//! AArch64 architecture and platforms implementation use core::sync::atomic::Ordering; use aarch64_cpu::registers::{CNTP_CTL_EL0, CNTP_TVAL_EL0}; use abi::error::Error; use device_api::{ interrupt::{IpiDeliveryTarget, IpiMessage, Irq, LocalInterruptController}, ResetDevice, }; use device_tree::dt::{DevTreeIndexPropExt, DevTreeNodeInfo, DeviceTree, FdtMemoryRegionIter}; use git_version::git_version; use kernel_arch_aarch64::{ mem::{ table::{L1, L3}, EarlyMapping, MEMORY_LIMIT, RAM_MAPPING_L1_COUNT, }, ArchitectureImpl, PerCpuData, }; use libk::{arch::Cpu, device::external_interrupt_controller}; use libk_mm::{ address::PhysicalAddress, phys::PhysicalMemoryRegion, phys::{self, reserved::reserve_region}, pointer::PhysicalRef, table::EntryLevelExt, }; use libk_util::OneTimeInit; use tock_registers::interfaces::Writeable; use ygg_driver_pci::PciBusManager; use crate::{ debug, device::{self, power::arm_psci::Psci}, fs::{Initrd, INITRD_DATA}, }; use self::gic::Gic; use super::Platform; pub mod boot; pub mod exception; pub mod gic; pub mod smp; pub mod timer; const BOOT_STACK_SIZE: usize = 4096 * 32; #[derive(Clone, Copy)] #[repr(C, align(0x20))] struct BootStack { data: [u8; SIZE], } /// AArch64 architecture implementation pub struct AArch64 { dt: OneTimeInit>, /// Optional instance of PSCI on this platform pub psci: OneTimeInit<&'static Psci>, reset: OneTimeInit<&'static dyn ResetDevice>, initrd: OneTimeInit>, } impl BootStack { pub const fn zeroed() -> Self { Self { data: [0; SIZE] } } pub fn top_addr(&self) -> usize { unsafe { self.data.as_ptr().add(SIZE).addr() } } } impl Platform for AArch64 { const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000; type L3 = L3; unsafe fn start_application_processors(&self) { let dt = self.dt.get(); if let Err(error) = smp::start_ap_cores(dt) { errorln!("Could not initialize AP CPUs: {:?}", error); } } unsafe fn send_ipi(&self, _target: IpiDeliveryTarget, _msg: IpiMessage) -> Result<(), Error> { Ok(()) // XXX // if let Some(local_intc) = self.lintc.try_get() { // local_intc.send_ipi(target, msg) // } else { // Ok(()) // } } fn register_reset_device(&self, reset: &'static dyn ResetDevice) -> Result<(), Error> { self.reset.init(reset); Ok(()) } unsafe fn reset(&self) -> ! { if let Some(reset) = self.reset.try_get() { reset.reset() } else { let psci = self.psci.get(); psci.reset() } } } static GIC: OneTimeInit<&'static Gic> = OneTimeInit::new(); impl AArch64 { fn set_gic(gic: &'static Gic) { GIC.init(gic); } fn extract_initrd_from_dt( &self, dt: &DeviceTree, ) -> Option<(PhysicalAddress, PhysicalAddress)> { let chosen = dt.node_by_path("/chosen")?; let initrd_start = device_tree::find_prop(&chosen, "linux,initrd-start")?; let initrd_end = device_tree::find_prop(&chosen, "linux,initrd-end")?; let address_cells = dt.address_cells(); let initrd_start = initrd_start.cell1_array_item(0, address_cells)?; let initrd_end = initrd_end.cell1_array_item(0, address_cells)?; let initrd_start = PhysicalAddress::from_u64(initrd_start); let initrd_end = PhysicalAddress::from_u64(initrd_end); Some((initrd_start, initrd_end)) } fn map_physical_memory + Clone>( _it: I, _memory_start: PhysicalAddress, memory_end: PhysicalAddress, ) -> Result<(), Error> { let end_l1i = memory_end.page_align_up::().page_index::(); if end_l1i > RAM_MAPPING_L1_COUNT { panic!("TODO: partial physical memory mapping"); } // Map 1GiB chunks for index in 0..end_l1i { unsafe { kernel_arch_aarch64::mem::map_ram_l1(index); } } MEMORY_LIMIT.store(memory_end.into_usize(), Ordering::Release); Ok(()) } unsafe fn init_memory_management(&'static self, dtb: PhysicalAddress) -> Result<(), Error> { // Initialize the runtime mappings kernel_arch_aarch64::mem::init_fixed_tables(); // Extract the size of the device tree let dtb_size = { let dtb_header = EarlyMapping::::map_slice(dtb, DeviceTree::MIN_HEADER_SIZE)?; DeviceTree::read_totalsize(dtb_header.as_ref()).unwrap() }; reserve_region( "dtb", PhysicalMemoryRegion { base: dtb, size: (dtb_size + 0xFFF) & !0xFFF, }, ); let dtb_slice = EarlyMapping::::map_slice(dtb, dtb_size)?; let dt = DeviceTree::from_addr(dtb_slice.as_ptr() as usize); // Setup initrd from the dt let initrd = self.extract_initrd_from_dt(&dt); if let Some((start, end)) = initrd { let aligned_start = start.page_align_down::(); let aligned_end = end.page_align_up::(); let size = aligned_end - aligned_start; reserve_region( "initrd", PhysicalMemoryRegion { base: aligned_start, size, }, ); } // Initialize the physical memory let regions = FdtMemoryRegionIter::new(&dt); phys::init_from_iter(regions, Self::map_physical_memory)?; // EarlyMapping for DTB no longer needed, it lives in physical memory and can be obtained // through PhysicalRef let dtb_slice: PhysicalRef<'static, [u8]> = PhysicalRef::map_slice(dtb, dtb_size); let dt = DeviceTree::from_addr(dtb_slice.as_ptr() as usize); self.dt.init(dt); // Setup initrd if let Some((initrd_start, initrd_end)) = initrd { let aligned_start = initrd_start.page_align_down::(); let aligned_end = initrd_end.page_align_up::(); let len = initrd_end - initrd_start; let data = unsafe { PhysicalRef::map_slice(initrd_start, len) }; self.initrd.init(data); INITRD_DATA.init(Initrd { phys_page_start: aligned_start, phys_page_len: aligned_end - aligned_start, data: self.initrd.get().as_ref(), }); } Ok(()) } unsafe fn init_platform(&self, is_bsp: bool) -> Result<(), Error> { let per_cpu = PerCpuData { gic: OneTimeInit::new(), }; Cpu::init_local(None, per_cpu); if is_bsp { ygg_driver_pci::register_vendor_driver( "Virtio PCI Network Device", 0x1AF4, 0x1000, ygg_driver_virtio_net::probe, ); ygg_driver_pci::register_class_driver( "AHCI SATA Controller", 0x01, Some(0x06), Some(0x01), ygg_driver_ahci::probe, ); ygg_driver_pci::register_class_driver( "USB xHCI", 0x0C, Some(0x03), Some(0x30), ygg_driver_usb_xhci::probe, ); let dt = self.dt.get(); let address_cells = dt.address_cells(); let size_cells = dt.size_cells(); // Setup /chosen.stdout-path to get early debug printing let chosen_stdout_path = dt.chosen_stdout_path(); let chosen_stdout = chosen_stdout_path.and_then(|path| dt.node_by_path(path)); if let Some(node) = chosen_stdout.clone() { let probe = DevTreeNodeInfo { address_cells, size_cells, node, }; if let Some((device, _)) = device_tree::driver::probe_dt_node(&probe, device::register_device) { device.init()?; } } debug::init(); infoln!( "Yggdrasil v{} ({})", env!("CARGO_PKG_VERSION"), git_version!() ); infoln!("Initializing aarch64 platform"); let nodes = dt.root().children(); if let Err(error) = device_tree::driver::enumerate_dt(address_cells, size_cells, nodes, |_, probe| { // Skip chosen-stdout, already initialized if let Some(ref chosen_stdout) = chosen_stdout && chosen_stdout.name() == probe.node.name() { return Ok(()); } if let Some((device, _)) = device_tree::driver::probe_dt_node(&probe, device::register_device) { device.init()?; } Ok(()) }) { warnln!( "{} errors encountered when initializing platform devices", error ); } // Initialize IRQs for the devices device::manager_lock().devices().for_each(|dev| unsafe { if let Err(error) = dev.init_irq() { warnln!( "Could not init IRQs for {:?}: {:?}", dev.display_name(), error ); } }); infoln!("Enumerated devices:"); device::manager_lock().devices().for_each(|dev| { infoln!("* {:?}", dev.display_name()); }); PciBusManager::setup_bus_devices()?; } else { // BSP already initialized everything needed // Setup timer and local interrupt controller if let Some(gic) = GIC.try_get() { unsafe { gic.init_ap().unwrap(); } } // TODO device-tree initialization for this CNTP_CTL_EL0.write(CNTP_CTL_EL0::ENABLE::SET + CNTP_CTL_EL0::IMASK::CLEAR); CNTP_TVAL_EL0.set(10000000); external_interrupt_controller() .enable_irq(Irq::Private(14)) .unwrap(); } if let Some(gic) = GIC.try_get() { let cpu_data = ArchitectureImpl::local_cpu_data().unwrap(); cpu_data.gic.init(*gic); } Ok(()) } } /// AArch64 implementation value pub static PLATFORM: AArch64 = AArch64 { dt: OneTimeInit::new(), initrd: OneTimeInit::new(), psci: OneTimeInit::new(), reset: OneTimeInit::new(), };