x86-64: dirty ACPI implementation

This commit is contained in:
Mark Poliakov 2023-08-31 13:40:17 +03:00
parent 9b9a1f68b5
commit c4e8697042
18 changed files with 600 additions and 94 deletions

View File

@ -39,8 +39,9 @@ fdt-rs = { version = "0.4.3", default-features = false }
[target.'cfg(target_arch = "x86_64")'.dependencies]
yboot-proto = { git = "https://git.alnyan.me/yggdrasil/yboot-proto.git" }
aml = "0.16.4"
acpi = "4.1.1"
aml = { git = "https://github.com/alnyan/acpi.git", version = "0.16.4" }
acpi_lib = { git = "https://github.com/alnyan/acpi.git", version = "4.1.1", package = "acpi" }
acpi-system = { git = "https://github.com/alnyan/acpi-system.git", version = "0.1.0" }
[features]
default = []

View File

@ -47,6 +47,9 @@ cfg_if! {
pub enum CpuMessage {
/// Indicates that the sender CPU entered kernel panic and wants other CPUs to follow
Panic,
/// Indicates that the cores should either halt and wait for the caller to shut the system
/// down, or they should shut down by themselves, depending on the platform
Shutdown,
}
/// Interface for an architecture-specific facilities

393
src/arch/x86_64/acpi.rs Normal file
View File

@ -0,0 +1,393 @@
use core::{
alloc::{AllocError, Allocator, GlobalAlloc, Layout},
ptr::NonNull,
sync::atomic::Ordering,
time::Duration,
};
use acpi_lib::{AcpiHandler, AcpiTables, PhysicalMapping};
use acpi_system::{
AcpiInterruptMethod, AcpiSleepState, AcpiSystem, AcpiSystemError, EventAction, FixedEvent,
};
use alloc::boxed::Box;
use device_api::{
interrupt::{InterruptHandler, IpiDeliveryTarget},
Device,
};
use kernel_util::util::OneTimeInit;
use yggdrasil_abi::error::Error;
use crate::{
arch::{
x86_64::{smp::CPU_COUNT, IrqNumber, SHUTDOWN_FENCE},
Architecture, CpuMessage, ARCHITECTURE,
},
mem::{heap::GLOBAL_HEAP, ConvertAddress},
sync::IrqSafeSpinlock,
};
use super::intrinsics;
#[derive(Clone, Copy)]
pub struct AcpiAllocator;
#[derive(Clone, Copy)]
pub struct AcpiHandlerImpl;
struct SciHandler;
static ACPI_SYSTEM: OneTimeInit<IrqSafeSpinlock<AcpiSystem<AcpiHandlerImpl>>> = OneTimeInit::new();
impl Device for SciHandler {
fn display_name(&self) -> &'static str {
"ACPI interrupt handler"
}
}
impl InterruptHandler for SciHandler {
fn handle_irq(&self) -> bool {
log::trace!("ACPI SCI received");
ACPI_SYSTEM.get().lock().handle_sci();
true
}
}
unsafe impl Allocator for AcpiAllocator {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let ptr = unsafe { GLOBAL_HEAP.alloc(layout) };
log::trace!("ACPI alloc: {:?} -> {:p}", layout, ptr);
if ptr.is_null() {
Err(AllocError)
} else {
unsafe {
Ok(NonNull::slice_from_raw_parts(
NonNull::new_unchecked(ptr),
layout.size(),
))
}
}
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
log::trace!("ACPI dealloc: {:?}, {:?}", ptr, layout);
GLOBAL_HEAP.dealloc(ptr.as_ptr(), layout);
}
}
impl acpi_system::Handler for AcpiHandlerImpl {
unsafe fn map_slice(address: u64, length: u64) -> &'static [u8] {
if address + length < 0x100000000 {
core::slice::from_raw_parts(
(address as usize).virtualize() as *const u8,
length as usize,
)
} else {
panic!("Unhandled address: {:#x}", address)
}
}
fn io_read_u8(port: u16) -> u8 {
let value = unsafe { intrinsics::inb(port) };
log::trace!("io_read_u8 {:#x} <- {:#x}", port, value);
value
}
fn io_read_u16(port: u16) -> u16 {
let value = unsafe { intrinsics::inw(port) };
log::trace!("io_read_u16 {:#x} <- {:#x}", port, value);
value
}
fn io_read_u32(port: u16) -> u32 {
let value = unsafe { intrinsics::inl(port) };
log::trace!("io_read_u32 {:#x} <- {:#x}", port, value);
value
}
fn io_write_u8(port: u16, value: u8) {
log::trace!("io_write_u8 {:#x}, {:#x}", port, value);
unsafe { intrinsics::outb(port, value) }
}
fn io_write_u16(port: u16, value: u16) {
log::trace!("io_write_u16 {:#x}, {:#x}", port, value);
unsafe { intrinsics::outw(port, value) }
}
fn io_write_u32(port: u16, value: u32) {
log::trace!("io_write_u32 {:#x}, {:#x}", port, value);
unsafe { intrinsics::outl(port, value) }
}
fn mem_read_u8(address: u64) -> u8 {
let value = unsafe { (address as *const u8).virtualize().read_volatile() };
log::trace!("mem_read_u8 {:#x} <- {:#x}", address, value);
value
}
fn mem_read_u16(address: u64) -> u16 {
let value = if address & 0x1 == 0 {
unsafe { (address as *const u16).virtualize().read_volatile() }
} else {
unsafe { (address as *const u16).virtualize().read_unaligned() }
};
log::trace!("mem_read_u16 {:#x} <- {:#x}", address, value);
value
}
fn mem_read_u32(address: u64) -> u32 {
let value = if address & 0x3 == 0 {
unsafe { (address as *const u32).virtualize().read_volatile() }
} else {
unsafe { (address as *const u32).virtualize().read_unaligned() }
};
log::trace!("mem_read_u32 {:#x} <- {:#x}", address, value);
value
}
fn mem_read_u64(address: u64) -> u64 {
let value = if address & 0x7 == 0 {
unsafe { (address as *const u64).virtualize().read_volatile() }
} else {
unsafe { (address as *const u64).virtualize().read_unaligned() }
};
log::trace!("mem_read_u64 {:#x} <- {:#x}", address, value);
value
}
fn mem_write_u8(address: u64, value: u8) {
log::trace!("mem_write_u8 {:#x}, {:#x}", address, value);
unsafe { (address as *mut u8).virtualize().write_volatile(value) };
}
fn mem_write_u16(address: u64, value: u16) {
log::trace!("mem_write_u16 {:#x}, {:#x}", address, value);
if address & 0x1 == 0 {
unsafe { (address as *mut u16).virtualize().write_volatile(value) };
} else {
unsafe { (address as *mut u16).virtualize().write_unaligned(value) };
}
}
fn mem_write_u32(address: u64, value: u32) {
log::trace!("mem_write_u32 {:#x}, {:#x}", address, value);
if address & 0x3 == 0 {
unsafe { (address as *mut u32).virtualize().write_volatile(value) };
} else {
unsafe { (address as *mut u32).virtualize().write_unaligned(value) };
}
}
fn mem_write_u64(address: u64, value: u64) {
log::trace!("mem_write_u64 {:#x}, {:#x}", address, value);
if address & 0x7 == 0 {
unsafe { (address as *mut u64).virtualize().write_volatile(value) };
} else {
unsafe { (address as *mut u64).virtualize().write_unaligned(value) };
}
}
fn install_interrupt_handler(irq: u32) -> Result<(), AcpiSystemError> {
infoln!("Installing ACPI SCI handler at IRQ #{}", irq);
let intc = ARCHITECTURE.external_interrupt_controller();
let handler = Box::leak(Box::new(SciHandler));
let irq = IrqNumber::Isa(irq as _);
intc.register_irq(irq, Default::default(), handler).unwrap();
intc.enable_irq(irq).unwrap();
Ok(())
}
}
impl aml::Handler for AcpiHandlerImpl {
fn read_io_u8(&self, port: u16) -> u8 {
<Self as acpi_system::Handler>::io_read_u8(port)
}
fn read_io_u16(&self, port: u16) -> u16 {
<Self as acpi_system::Handler>::io_read_u16(port)
}
fn read_io_u32(&self, port: u16) -> u32 {
<Self as acpi_system::Handler>::io_read_u32(port)
}
fn write_io_u8(&self, port: u16, value: u8) {
<Self as acpi_system::Handler>::io_write_u8(port, value)
}
fn write_io_u16(&self, port: u16, value: u16) {
<Self as acpi_system::Handler>::io_write_u16(port, value)
}
fn write_io_u32(&self, port: u16, value: u32) {
<Self as acpi_system::Handler>::io_write_u32(port, value)
}
fn read_u8(&self, address: usize) -> u8 {
<Self as acpi_system::Handler>::mem_read_u8(address as u64)
}
fn read_u16(&self, address: usize) -> u16 {
<Self as acpi_system::Handler>::mem_read_u16(address as u64)
}
fn read_u32(&self, address: usize) -> u32 {
<Self as acpi_system::Handler>::mem_read_u32(address as u64)
}
fn read_u64(&self, address: usize) -> u64 {
<Self as acpi_system::Handler>::mem_read_u64(address as u64)
}
fn write_u8(&self, address: usize, value: u8) {
<Self as acpi_system::Handler>::mem_write_u8(address as u64, value)
}
fn write_u16(&self, address: usize, value: u16) {
<Self as acpi_system::Handler>::mem_write_u16(address as u64, value)
}
fn write_u32(&self, address: usize, value: u32) {
<Self as acpi_system::Handler>::mem_write_u32(address as u64, value)
}
fn write_u64(&self, address: usize, value: u64) {
<Self as acpi_system::Handler>::mem_write_u64(address as u64, value)
}
fn read_pci_u8(&self, _segment: u16, _bus: u8, _device: u8, _function: u8, _offset: u16) -> u8 {
0xFF
}
fn read_pci_u16(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
) -> u16 {
0xFFFF
}
fn read_pci_u32(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
) -> u32 {
0xFFFFFFFF
}
fn write_pci_u8(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
_value: u8,
) {
}
fn write_pci_u16(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
_value: u16,
) {
}
fn write_pci_u32(
&self,
_segment: u16,
_bus: u8,
_device: u8,
_function: u8,
_offset: u16,
_value: u32,
) {
}
fn read_ec_u8(&self, _address: u64) -> u8 {
0x00
}
fn write_ec_u8(&self, _address: u64, _value: u8) {}
fn sleep(&self, duration: Duration) {
todo!()
}
}
impl AcpiHandler for AcpiHandlerImpl {
// No actual address space modification is performed
unsafe fn map_physical_region<T>(
&self,
physical_address: usize,
size: usize,
) -> PhysicalMapping<Self, T> {
if physical_address <= 0xFFFFFFFF {
PhysicalMapping::new(
physical_address,
NonNull::new_unchecked(physical_address.virtualize() as *mut T),
size,
size,
*self,
)
} else {
todo!()
}
}
// Unmap nothing, these addresses are "virtualized" to high address space
fn unmap_physical_region<T>(_region: &PhysicalMapping<Self, T>) {}
}
pub fn init_acpi(tables: &'static AcpiTables<AcpiHandlerImpl>) -> Result<(), Error> {
let mut system = AcpiSystem::new(tables, Box::new(AcpiHandlerImpl)).unwrap();
system.initialize(AcpiInterruptMethod::Apic).unwrap();
system
.enable_fixed_event(
&FixedEvent::POWER_BUTTON,
Box::new(|_| {
log::info!("Power button was pressed");
// TODO the correct way would be to
// 1. Nicely ask all the processes to quit
// 2. Wait for some time
// 3. Kill the remaining ones
// 4. Halt other cores
// 5. Sync filesystem
// 6. Do something with the devices
// 7. Actually enter the S5 state
unsafe {
ARCHITECTURE
.send_ipi(IpiDeliveryTarget::OtherCpus, CpuMessage::Shutdown)
.unwrap();
}
SHUTDOWN_FENCE.signal();
SHUTDOWN_FENCE.wait_all(CPU_COUNT.load(Ordering::Acquire));
log::info!("CPUs are parked, can shutdown now");
EventAction::EnterSleepState(AcpiSleepState::S5)
}),
)
.unwrap();
ACPI_SYSTEM.init(IrqSafeSpinlock::new(system));
Ok(())
}

View File

@ -1,6 +1,6 @@
//! x86-64 I/O APIC driver implementation
use abi::error::Error;
use acpi::platform::interrupt::{Apic as AcpiApic, Polarity, TriggerMode};
use acpi_lib::platform::interrupt::{Apic as AcpiApic, Polarity, TriggerMode};
use device_api::{
interrupt::{
ExternalInterruptController, FixedInterruptTable, InterruptHandler, InterruptTable,
@ -10,7 +10,7 @@ use device_api::{
};
use crate::{
arch::x86_64::{apic::local::BSP_APIC_ID, IrqNumber},
arch::x86_64::{acpi::AcpiAllocator, apic::local::BSP_APIC_ID, IrqNumber},
mem::ConvertAddress,
sync::IrqSafeSpinlock,
};
@ -225,7 +225,7 @@ impl ExternalInterruptController for IoApic {
impl IoApic {
/// Creates an I/O APIC instance from its ACPI definition
pub fn from_acpi(info: &AcpiApic) -> Result<Self, Error> {
pub fn from_acpi(info: &AcpiApic<AcpiAllocator>) -> Result<Self, Error> {
if info.io_apics.len() != 1 {
todo!();
}

View File

@ -1,5 +1,10 @@
//! x86-64 Local APIC driver implementation
use device_api::{interrupt::LocalInterruptController, Device};
use core::sync::atomic::Ordering;
use device_api::{
interrupt::{IpiDeliveryTarget, LocalInterruptController},
Device,
};
use kernel_util::util::OneTimeInit;
use tock_registers::{
interfaces::{ReadWriteable, Readable, Writeable},
@ -8,11 +13,17 @@ use tock_registers::{
};
use crate::{
arch::{x86_64::registers::MSR_IA32_APIC_BASE, CpuMessage},
arch::{
x86_64::{registers::MSR_IA32_APIC_BASE, smp::CPU_COUNT},
CpuMessage,
},
mem::ConvertAddress,
task::Cpu,
};
use super::{APIC_LINT0_VECTOR, APIC_LINT1_VECTOR, APIC_SPURIOUS_VECTOR, APIC_TIMER_VECTOR};
use super::{
APIC_IPI_VECTOR, APIC_LINT0_VECTOR, APIC_LINT1_VECTOR, APIC_SPURIOUS_VECTOR, APIC_TIMER_VECTOR,
};
const TIMER_INTERVAL: u32 = 150000;
@ -136,10 +147,35 @@ impl LocalInterruptController for LocalApic {
fn send_ipi(
&self,
_target: device_api::interrupt::IpiDeliveryTarget,
_msg: Self::IpiMessage,
target: device_api::interrupt::IpiDeliveryTarget,
msg: Self::IpiMessage,
) -> Result<(), abi::error::Error> {
todo!()
while self.regs.ICR0.matches_all(ICR0::DeliveryStatus::SET) {
core::hint::spin_loop();
}
// TODO use NMI or regular interrupt depending on severity of the message
match target {
IpiDeliveryTarget::OtherCpus => {
let local = Cpu::local_id();
for i in 0..CPU_COUNT.load(Ordering::Acquire) {
if i != local as usize {
Cpu::push_ipi_queue(i as u32, msg);
}
}
self.regs.ICR1.write(ICR1::PhysicalDestination.val(0));
self.regs.ICR0.write(
ICR0::Vector.val(APIC_IPI_VECTOR + 32)
+ ICR0::Destination::NMI
+ ICR0::DestinationType::AllExceptThis,
);
Ok(())
}
IpiDeliveryTarget::ThisCpu => todo!(),
IpiDeliveryTarget::Specific(_) => todo!(),
}
}
unsafe fn init_ap(&self) -> Result<(), abi::error::Error> {
@ -257,29 +293,6 @@ impl LocalApic {
}
}
// /// Issues an interprocessor interrupt for the target.
// ///
// /// # Safety
// ///
// /// Unsafe: this function may break the control flow on the target processors.
// pub unsafe fn send_ipi(&self, target: IpiDeliveryTarget) {
// while self.regs.ICR0.matches_all(ICR0::DeliveryStatus::SET) {
// core::hint::spin_loop();
// }
// match target {
// IpiDeliveryTarget::AllExceptLocal => {
// self.regs.ICR1.write(ICR1::PhysicalDestination.val(0));
// self.regs.ICR0.write(
// ICR0::Vector.val(APIC_IPI_VECTOR + 32)
// + ICR0::Destination::NMI
// + ICR0::DestinationType::AllExceptThis,
// );
// }
// IpiDeliveryTarget::Specified(_) => todo!(),
// }
// }
#[inline]
fn base() -> usize {
MSR_IA32_APIC_BASE.read_base() as usize

View File

@ -25,7 +25,8 @@ use super::smp::CPU_COUNT;
// use super::ARCHITECTURE;
const BOOT_STACK_SIZE: usize = 65536;
const BOOT_STACK_SIZE: usize = 256 * 1024;
const HEAP_PAGES: usize = 256;
#[repr(C, align(0x20))]
struct BootStack {
@ -84,9 +85,9 @@ unsafe extern "C" fn __x86_64_upper_entry() -> ! {
.init_physical_memory()
.expect("Failed to initialize the physical memory manager");
let heap_base = phys::alloc_pages_contiguous(16, PageUsage::Used)
let heap_base = phys::alloc_pages_contiguous(HEAP_PAGES, PageUsage::Used)
.expect("Couldn't allocate memory for heap");
heap::init_heap(heap_base.virtualize(), 16 * 0x1000);
heap::init_heap(heap_base.virtualize(), HEAP_PAGES * 0x1000);
exception::init_exceptions(0);

View File

@ -88,7 +88,7 @@ impl TaskContextImpl for TaskContext {
const USER_STACK_EXTRA_ALIGN: usize = 8;
fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> {
const KERNEL_TASK_PAGES: usize = 4;
const KERNEL_TASK_PAGES: usize = 32;
let stack_base = unsafe {
phys::alloc_pages_contiguous(KERNEL_TASK_PAGES, PageUsage::Used)?.virtualize()
};

View File

@ -1,16 +1,20 @@
//! Per-CPU information and data structures
use core::ptr::null_mut;
use core::{ptr::null_mut, sync::atomic::Ordering};
use alloc::boxed::Box;
use alloc::{boxed::Box, vec::Vec};
use kernel_util::util::OneTimeInit;
use tock_registers::interfaces::Writeable;
use crate::{
arch::x86_64::{gdt, registers::MSR_IA32_KERNEL_GS_BASE, syscall},
arch::{
x86_64::{gdt, registers::MSR_IA32_KERNEL_GS_BASE, syscall},
CpuMessage,
},
sync::IrqSafeSpinlock,
task::sched::CpuQueue,
};
use super::apic::local::LocalApic;
use super::{apic::local::LocalApic, smp::CPU_COUNT};
/// Per-CPU data structure, only visible to the executing CPU
#[repr(C, align(0x10))]
@ -28,6 +32,32 @@ pub struct Cpu {
queue: OneTimeInit<&'static CpuQueue>,
}
struct IpiQueue {
data: IrqSafeSpinlock<Option<CpuMessage>>,
}
static IPI_QUEUES: OneTimeInit<Vec<IpiQueue>> = OneTimeInit::new();
impl IpiQueue {
pub const fn new() -> Self {
Self {
data: IrqSafeSpinlock::new(None),
}
}
pub fn push(&self, msg: CpuMessage) {
let mut lock = self.data.lock();
assert!(lock.is_none());
lock.replace(msg);
}
pub fn pop(&self) -> Option<CpuMessage> {
let mut lock = self.data.lock();
lock.take()
}
}
impl Cpu {
/// Initializes the per-CPU data structure.
///
@ -101,4 +131,32 @@ impl Cpu {
pub fn init_queue(&self, queue: &'static CpuQueue) {
self.queue.init(queue);
}
/// Inserts an IPI message to the back of the target CPU's message queue
pub fn push_ipi_queue(cpu_id: u32, msg: CpuMessage) {
let ipi_queue = &IPI_QUEUES.get()[cpu_id as usize];
ipi_queue.push(msg);
}
/// Pops the first IPI message received for this CPU.
///
/// # Note
///
/// Currently the queue consists of only one entry, so the CPU will only receive the last one.
pub fn get_ipi(&self) -> Option<CpuMessage> {
let ipi_queue = &IPI_QUEUES.get()[self.id as usize];
ipi_queue.pop()
}
/// Sets up global list of interprocessor message queues
pub fn init_ipi_queues() {
IPI_QUEUES.init(Vec::from_iter(
(0..CPU_COUNT.load(Ordering::Acquire)).map(|_| IpiQueue::new()),
));
}
// /// Gets an IPI message from the processor's queue and takes corresponding actions. If there is
// /// none, this is treated as a spurious IPI and ignored. See [CpuMessage].
// pub fn handle_ipi(&self) {
// }
}

View File

@ -4,11 +4,13 @@ use core::{arch::global_asm, mem::size_of_val};
use abi::{arch::SavedFrame, primitive_enum, process::Signal};
use crate::{
arch::x86_64::apic,
arch::{x86_64::apic, CpuMessage},
panic,
task::{context::TaskFrame, process::Process},
task::{context::TaskFrame, process::Process, Cpu},
};
use super::ARCHITECTURE;
primitive_enum! {
enum ExceptionKind: u64 {
DivisionError = 0,
@ -319,7 +321,13 @@ extern "C" fn __x86_64_exception_handler(frame: *mut ExceptionFrame) {
}
} else {
if kind == ExceptionKind::NonMaskableInterrupt {
panic::panic_secondary();
let cpu = Cpu::local();
if let Some(msg) = cpu.get_ipi() {
unsafe {
ARCHITECTURE.handle_ipi(msg);
}
}
}
kernel_exception_inner(kind, frame)

View File

@ -74,14 +74,14 @@ pub unsafe fn inb(port: u16) -> u8 {
#[inline]
pub unsafe fn inw(port: u16) -> u16 {
let value: u16;
core::arch::asm!("inb %dx, %ax", in("dx") port, out("ax") value, options(att_syntax));
core::arch::asm!("inw %dx, %ax", in("dx") port, out("ax") value, options(att_syntax));
value
}
#[inline]
pub unsafe fn inl(port: u16) -> u32 {
let value: u32;
core::arch::asm!("inb %dx, %eax", in("dx") port, out("eax") value, options(att_syntax));
core::arch::asm!("inl %dx, %eax", in("dx") port, out("eax") value, options(att_syntax));
value
}

View File

@ -2,7 +2,7 @@
use core::{ptr::NonNull, sync::atomic::Ordering};
use abi::error::Error;
use acpi::{AcpiHandler, AcpiTables, HpetInfo, InterruptModel, PhysicalMapping};
use acpi_lib::{AcpiHandler, AcpiTables, HpetInfo, InterruptModel, PhysicalMapping};
use alloc::boxed::Box;
use cpu::Cpu;
use device_api::{
@ -35,10 +35,13 @@ use crate::{
phys::{self, reserved::reserve_region, PhysicalMemoryRegion},
ConvertAddress,
},
panic,
sync::SpinFence,
CPU_INIT_FENCE,
};
use self::{
acpi::{AcpiAllocator, AcpiHandlerImpl},
apic::ioapic::IoApic,
intrinsics::{IoPort, IoPortAccess},
peripherals::{hpet::Hpet, ps2::PS2Controller},
@ -50,6 +53,7 @@ use super::{Architecture, CpuMessage};
#[macro_use]
pub mod intrinsics;
pub mod acpi;
pub mod apic;
pub mod boot;
pub mod context;
@ -119,33 +123,6 @@ impl<'a, T: IterableMemoryMap<'a> + 'a> AbstractMemoryMap<'a> for T {
}
}
#[derive(Clone, Copy)]
struct AcpiHandlerImpl;
impl AcpiHandler for AcpiHandlerImpl {
// No actual address space modification is performed
unsafe fn map_physical_region<T>(
&self,
physical_address: usize,
size: usize,
) -> PhysicalMapping<Self, T> {
if physical_address <= 0xFFFFFFFF {
PhysicalMapping::new(
physical_address,
NonNull::new_unchecked(physical_address.virtualize() as *mut T),
size,
size,
*self,
)
} else {
todo!()
}
}
// Unmap nothing, these addresses are "virtualized" to high address space
fn unmap_physical_region<T>(_region: &PhysicalMapping<Self, T>) {}
}
/// Describes which kind of bootloader data was provided to the kernel
pub enum BootData {
/// [yboot_proto::LoadProtocolV1]
@ -179,6 +156,8 @@ pub static ARCHITECTURE: X86_64 = X86_64 {
timer: OneTimeInit::new(),
};
static SHUTDOWN_FENCE: SpinFence = SpinFence::new();
impl Architecture for X86_64 {
const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000;
@ -236,7 +215,11 @@ impl Architecture for X86_64 {
unsafe fn start_application_processors(&self) {
if let Some(acpi) = self.acpi.try_get() {
let Some(pinfo) = acpi.platform_info().ok().and_then(|p| p.processor_info) else {
let Some(pinfo) = acpi
.platform_info_in(AcpiAllocator)
.ok()
.and_then(|p| p.processor_info)
else {
return;
};
@ -396,8 +379,8 @@ impl X86_64 {
Ok(())
}
unsafe fn init_platform_from_acpi(&self, acpi: &AcpiTables<AcpiHandlerImpl>) {
let platform_info = acpi.platform_info().unwrap();
unsafe fn init_platform_from_acpi(&self, acpi: &'static AcpiTables<AcpiHandlerImpl>) {
let platform_info = acpi.platform_info_in(AcpiAllocator).unwrap();
let InterruptModel::Apic(apic_info) = platform_info.interrupt_model else {
panic!("Processor does not have an APIC");
@ -407,6 +390,8 @@ impl X86_64 {
let hpet = HpetInfo::new(acpi).unwrap();
self.timer.init(Hpet::from_acpi(&hpet).unwrap());
acpi::init_acpi(acpi).unwrap();
}
unsafe fn init_framebuffer(&'static self) {
@ -512,4 +497,21 @@ impl X86_64 {
pic_master_cmd.write(0x20);
pic_slave_cmd.write(0x20);
}
unsafe fn handle_ipi(&self, msg: CpuMessage) {
match msg {
CpuMessage::Panic => panic::panic_secondary(),
CpuMessage::Shutdown => {
Self::set_interrupt_mask(true);
let id = Cpu::local_id();
infoln!("cpu{} shutdown", id);
SHUTDOWN_FENCE.signal();
loop {
Self::wait_for_interrupt();
}
}
}
}
}

View File

@ -2,7 +2,7 @@
use core::time::Duration;
use abi::error::Error;
use acpi::hpet::HpetInfo as AcpiHpet;
use acpi_lib::hpet::HpetInfo as AcpiHpet;
use device_api::{
interrupt::{InterruptHandler, IrqLevel, IrqOptions, IrqTrigger},
timer::MonotonicTimestampProviderDevice,

View File

@ -4,7 +4,7 @@ use core::{
sync::atomic::{AtomicUsize, Ordering},
};
use acpi::platform::{ProcessorInfo, ProcessorState};
use acpi_lib::platform::{ProcessorInfo, ProcessorState};
use crate::{
arch::{x86_64::boot::__x86_64_ap_entry, Architecture, ArchitectureImpl},
@ -15,7 +15,7 @@ use crate::{
task::Cpu,
};
use super::table::KERNEL_TABLES;
use super::{acpi::AcpiAllocator, table::KERNEL_TABLES};
/// The number of CPUs present in the system
pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(1);
@ -107,7 +107,7 @@ unsafe fn start_ap_core(apic_id: u32) {
/// # Safety
///
/// Only meant to be called once by the BSP.
pub unsafe fn start_ap_cores(info: &ProcessorInfo) {
pub unsafe fn start_ap_cores(info: &ProcessorInfo<AcpiAllocator>) {
let aps = &info.application_processors;
if aps.is_empty() {
@ -116,7 +116,7 @@ pub unsafe fn start_ap_cores(info: &ProcessorInfo) {
load_ap_bootstrap_code();
for ap in aps {
for ap in aps.iter() {
if ap.is_ap && ap.state == ProcessorState::WaitingForSipi {
start_ap_core(ap.local_apic_id);
}

View File

@ -513,11 +513,15 @@ pub fn add_console_autoflush(console: &'static dyn DisplayConsole) {
CONSOLES.lock().push(console);
}
/// Periodically flushes data from console buffers onto their displays
pub fn task_update_consoles() -> TaskFlow {
pub fn flush_consoles() {
for console in CONSOLES.lock().iter() {
let mut state = console.state().lock();
console.flush(&mut state);
}
}
/// Periodically flushes data from console buffers onto their displays
pub fn task_update_consoles() -> TaskFlow {
flush_consoles();
TaskFlow::Continue
}

View File

@ -24,6 +24,8 @@ fn setup_root() -> Result<VnodeRef, Error> {
/// This function is meant to be used as a kernel-space process after all the platform-specific
/// initialization has finished.
pub fn kinit() {
loop {}
infoln!("In main");
#[cfg(feature = "fb_console")]

View File

@ -10,7 +10,8 @@
const_mut_refs,
let_chains,
linked_list_cursors,
rustc_private
rustc_private,
allocator_api
)]
#![allow(clippy::new_without_default, clippy::fn_to_numeric_cast)]
#![warn(missing_docs)]
@ -23,6 +24,7 @@ use task::spawn_kernel_closure;
use crate::{
arch::{Architecture, ArchitectureImpl, ARCHITECTURE},
mem::heap,
task::Cpu,
};
extern crate yggdrasil_abi as abi;
@ -76,7 +78,7 @@ pub fn kernel_main() -> ! {
ARCHITECTURE.start_application_processors();
}
// Cpu::init_ipi_queues();
Cpu::init_ipi_queues();
// Wait until all APs initialize
CPU_INIT_FENCE.signal();

View File

@ -8,7 +8,7 @@ use core::{
use linked_list_allocator::Heap;
use spinning_top::Spinlock;
struct KernelAllocator {
pub struct KernelAllocator {
inner: Spinlock<Heap>,
}
@ -47,7 +47,7 @@ unsafe impl GlobalAlloc for KernelAllocator {
}
#[global_allocator]
static GLOBAL_HEAP: KernelAllocator = KernelAllocator::empty();
pub static GLOBAL_HEAP: KernelAllocator = KernelAllocator::empty();
/// Sets up kernel's global heap with given memory range.
///

View File

@ -1,22 +1,25 @@
//! Kernel panic handler code
use core::sync::atomic::{AtomicBool, Ordering};
use core::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering};
use device_api::interrupt::IpiDeliveryTarget;
use crate::{
arch::{Architecture, ArchitectureImpl, CpuMessage, ARCHITECTURE},
debug::{debug_internal, LogLevel},
device::display::console::flush_consoles,
sync::{hack_locks, SpinFence},
task::{sched::CpuQueue, Cpu},
};
// Just a fence to ensure secondary panics don't trash the screen
static PANIC_FINISHED_FENCE: SpinFence = SpinFence::new();
static PANIC_HANDLED_FENCE: SpinFence = SpinFence::new();
// Just a simple sequencer to ensure secondary panics don't trash the screen
static PANIC_FINISHED_FENCE: SpinFence = SpinFence::new();
static PANIC_SEQUENCE: AtomicU32 = AtomicU32::new(0);
/// Panic handler for CPUs other than the one that initiated it
pub fn panic_secondary() -> ! {
let id = Cpu::local_id();
unsafe {
ArchitectureImpl::set_interrupt_mask(true);
}
@ -24,7 +27,14 @@ pub fn panic_secondary() -> ! {
PANIC_HANDLED_FENCE.signal();
PANIC_FINISHED_FENCE.wait_one();
while PANIC_SEQUENCE.load(Ordering::Acquire) != id {
core::hint::spin_loop();
}
log_print_raw!(LogLevel::Fatal, "X");
flush_consoles();
PANIC_SEQUENCE.fetch_add(1, Ordering::Release);
loop {
ArchitectureImpl::wait_for_interrupt();
@ -43,6 +53,7 @@ fn panic_handler(pi: &core::panic::PanicInfo) -> ! {
.compare_exchange(false, true, Ordering::Release, Ordering::Acquire)
.is_ok()
{
let id = Cpu::local_id();
// Let other CPUs know we're screwed
unsafe {
ARCHITECTURE
@ -95,8 +106,16 @@ fn panic_handler(pi: &core::panic::PanicInfo) -> ! {
log_print_raw!(LogLevel::Fatal, "--- END PANIC ---\n");
log_print_raw!(LogLevel::Fatal, "X");
PANIC_FINISHED_FENCE.signal();
while PANIC_SEQUENCE.load(Ordering::Acquire) != id {
core::hint::spin_loop();
}
log_print_raw!(LogLevel::Fatal, "X");
flush_consoles();
PANIC_SEQUENCE.fetch_add(1, Ordering::Release);
unsafe {
ARCHITECTURE.reset();