473 lines
13 KiB
Rust

//! AArch64 architecture and platforms implementation
use core::sync::atomic::Ordering;
use aarch64_cpu::{
asm::barrier,
registers::{
CNTP_CTL_EL0, CNTP_TVAL_EL0, DAIF, ID_AA64MMFR0_EL1, MAIR_EL1, SCTLR_EL1, TCR_EL1,
TTBR0_EL1, TTBR1_EL1,
},
};
use abi::error::Error;
use device_api::{
interrupt::{
ExternalInterruptController, IpiDeliveryTarget, IrqNumber, LocalInterruptController,
},
timer::MonotonicTimestampProviderDevice,
};
use fdt_rs::prelude::PropReader;
use git_version::git_version;
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
use crate::{
arch::{
aarch64::{cpu::Cpu, devtree::FdtMemoryRegionIter},
Architecture,
},
debug::{self},
device::{self, power::arm_psci::Psci, DevTreeNodeInfo},
fs::{devfs, Initrd, INITRD_DATA},
mem::{
heap,
phys::{self, reserved::reserve_region, PageUsage, PhysicalMemoryRegion},
ConvertAddress,
},
task,
util::OneTimeInit,
};
use self::{
boot::CPU_INIT_FENCE,
devtree::{DevTreeIndexPropExt, DeviceTree},
smp::CPU_COUNT,
table::{init_fixed_tables, KERNEL_TABLES},
};
use super::CpuMessage;
pub mod boot;
pub mod context;
pub mod cpu;
pub mod devtree;
pub mod exception;
pub mod gic;
pub mod smp;
pub mod table;
pub mod timer;
const BOOT_STACK_SIZE: usize = 65536;
#[derive(Clone, Copy)]
#[repr(C, align(0x20))]
struct KernelStack {
data: [u8; BOOT_STACK_SIZE],
}
/// AArch64 platform interface
pub struct AArch64 {
dt: OneTimeInit<DeviceTree<'static>>,
ext_intc: OneTimeInit<&'static dyn ExternalInterruptController>,
local_intc: OneTimeInit<&'static dyn LocalInterruptController<IpiMessage = CpuMessage>>,
mtimer: OneTimeInit<&'static dyn MonotonicTimestampProviderDevice>,
// ARM-only devices
/// ARM PSCI instance on this system (there may not be one)
pub psci: OneTimeInit<&'static Psci>,
}
/// Global platform handle
pub static ARCHITECTURE: AArch64 = AArch64 {
dt: OneTimeInit::new(),
ext_intc: OneTimeInit::new(),
local_intc: OneTimeInit::new(),
mtimer: OneTimeInit::new(),
// ARM-only devices
psci: OneTimeInit::new(),
};
impl Architecture for AArch64 {
const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000;
unsafe fn init_mmu(&self, bsp: bool) {
if bsp {
init_fixed_tables();
}
let tables_phys = absolute_address!(KERNEL_TABLES).physicalize() as u64;
if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran4::Supported) {
todo!();
}
MAIR_EL1.write(
// Attribute 0 -- normal memory
MAIR_EL1::Attr0_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc +
MAIR_EL1::Attr0_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc +
// Attribute 1 -- device memory
MAIR_EL1::Attr1_Device::nonGathering_nonReordering_EarlyWriteAck,
);
TCR_EL1.modify(
// General
TCR_EL1::IPS::Bits_48 +
// TTBR0
TCR_EL1::TG0::KiB_4 + TCR_EL1::T0SZ.val(25) + TCR_EL1::SH0::Inner +
// TTBR1
TCR_EL1::TG1::KiB_4 + TCR_EL1::T1SZ.val(25) + TCR_EL1::SH1::Outer,
);
barrier::dmb(barrier::ISH);
TTBR0_EL1.set_baddr(tables_phys);
TTBR1_EL1.set_baddr(tables_phys);
barrier::isb(barrier::SY);
// Enable instruction cache, data cache and translation
SCTLR_EL1
.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::I::NonCacheable + SCTLR_EL1::C::NonCacheable);
barrier::isb(barrier::SY);
}
fn map_device_pages(&self, phys: usize, count: usize) -> Result<usize, Error> {
unsafe { KERNEL_TABLES.map_device_pages(phys, count) }
}
fn wait_for_interrupt() {
aarch64_cpu::asm::wfi();
}
unsafe fn set_interrupt_mask(mask: bool) {
if mask {
DAIF.modify(DAIF::I::SET);
} else {
DAIF.modify(DAIF::I::CLEAR);
}
}
fn interrupt_mask() -> bool {
DAIF.read(DAIF::I) != 0
}
fn cpu_count() -> usize {
CPU_COUNT.load(Ordering::Acquire)
}
fn register_external_interrupt_controller(
&self,
intc: &'static dyn ExternalInterruptController,
) -> Result<(), Error> {
self.ext_intc.init(intc);
Ok(())
}
fn register_local_interrupt_controller(
&self,
intc: &'static dyn LocalInterruptController<IpiMessage = super::CpuMessage>,
) -> Result<(), Error> {
self.local_intc.init(intc);
Ok(())
}
fn register_monotonic_timer(
&self,
timer: &'static dyn MonotonicTimestampProviderDevice,
) -> Result<(), Error> {
self.mtimer.init(timer);
Ok(())
}
fn external_interrupt_controller(&self) -> &'static dyn ExternalInterruptController {
*self.ext_intc.get()
}
fn local_interrupt_controller(
&self,
) -> &'static dyn LocalInterruptController<IpiMessage = super::CpuMessage> {
*self.local_intc.get()
}
fn monotonic_timer(&self) -> &'static dyn MonotonicTimestampProviderDevice {
*self.mtimer.get()
}
unsafe fn send_ipi(&self, target: IpiDeliveryTarget, msg: CpuMessage) -> Result<(), Error> {
if let Some(local_intc) = self.local_intc.try_get() {
local_intc.send_ipi(target, msg)
} else {
Ok(())
}
}
unsafe fn reset(&self) -> ! {
todo!()
}
}
impl AArch64 {
/// Initializes the architecture's device tree
///
/// # Safety
///
/// Only makes sense to call during the early initialization, once.
pub unsafe fn init_device_tree(&self, dtb_phys: usize) {
let dt = DeviceTree::from_addr(dtb_phys.virtualize());
self.dt.init(dt);
}
/// Returns the device tree
///
/// # Panics
///
/// Will panic if the device tree has not yet been initialized
pub fn device_tree(&self) -> &DeviceTree {
self.dt.get()
}
unsafe fn init_physical_memory(&self, dtb_phys: usize) -> Result<(), Error> {
let dt = self.device_tree();
if let Some(initrd) = INITRD_DATA.try_get() {
reserve_region(
"initrd",
PhysicalMemoryRegion {
base: initrd.phys_page_start,
size: initrd.phys_page_len,
},
);
}
reserve_region(
"dtb",
PhysicalMemoryRegion {
base: dtb_phys,
size: (dt.size() + 0xFFF) & !0xFFF,
},
);
let regions = FdtMemoryRegionIter::new(dt);
phys::init_from_iter(regions)
}
fn chosen_stdout_path<'a>(dt: &'a DeviceTree) -> Option<&'a str> {
let chosen = dt.node_by_path("/chosen")?;
let prop = devtree::find_prop(&chosen, "stdout-path")?;
prop.str().ok()
}
fn init_platform(&self, bsp: bool) {
if bsp {
let dt = self.device_tree();
let address_cells = dt.address_cells();
let size_cells = dt.size_cells();
let chosen_stdout_path = Self::chosen_stdout_path(dt);
let chosen_stdout = chosen_stdout_path.and_then(|path| dt.node_by_path(path));
// Probe and initialize the /chosen.stdout-path device first
if let Some(node) = chosen_stdout.clone() {
let probe = DevTreeNodeInfo {
address_cells,
size_cells,
node,
};
if let Some((device, _)) = device::probe_dt_node(&probe) {
unsafe {
device.init().unwrap();
}
}
};
debug::reset();
// Print some stuff now that the output is initialized
infoln!(
"Yggdrasil v{} ({})",
env!("CARGO_PKG_VERSION"),
git_version!()
);
infoln!("Initializing aarch64 platform");
// Probe and initialize the rest of devices
let nodes = dt.root().children();
if let Err(error) = device::enumerate_dt(
address_cells,
size_cells,
nodes,
|_, probe| {
// Ignore /chosen/stdout-path node
if let Some(ref chosen_stdout) = chosen_stdout && chosen_stdout.name() == probe.node.name() {
return Ok(());
}
if let Some((device, _)) = device::probe_dt_node(&probe) {
unsafe {
device.init()?;
}
}
Ok(())
},
) {
warnln!(
"{} errors encountered when initializing platform devices",
error
);
}
// Initialize IRQs for the devices
device::manager_lock().devices().for_each(|dev| unsafe {
if let Err(error) = dev.init_irq() {
errorln!(
"Could not init interrupts for {:?}: {:?}",
dev.display_name(),
error
);
}
});
// Print the device list
infoln!("Enumerated devices:");
device::manager_lock().devices().for_each(|dev| {
infoln!("* {:?}", dev.display_name());
});
} else {
// BSP already initialized everything needed
// Setup timer and local interrupt controller
let intc = self.local_intc.get();
unsafe {
intc.init_ap().unwrap();
}
// TODO device-tree initialization for this
CNTP_CTL_EL0.write(CNTP_CTL_EL0::ENABLE::SET + CNTP_CTL_EL0::IMASK::CLEAR);
CNTP_TVAL_EL0.set(10000000);
self.ext_intc
.get()
.enable_irq(IrqNumber::Private(14))
.unwrap();
}
}
}
fn setup_initrd() {
let dt = ARCHITECTURE.device_tree();
let Some(chosen) = dt.node_by_path("/chosen") else {
return;
};
let Some(initrd_start) = devtree::find_prop(&chosen, "linux,initrd-start") else {
return;
};
let Some(initrd_end) = devtree::find_prop(&chosen, "linux,initrd-end") else {
return;
};
let address_cells = dt.address_cells();
let Some(initrd_start) = initrd_start.cell1_array_item(0, address_cells) else {
infoln!("No initrd specified");
return;
};
let Some(initrd_end) = initrd_end.cell1_array_item(0, address_cells) else {
infoln!("No initrd specified");
return;
};
let initrd_start = initrd_start as usize;
let initrd_end = initrd_end as usize;
let start_aligned = initrd_start & !0xFFF;
let end_aligned = initrd_end & !0xFFF;
let data = unsafe {
core::slice::from_raw_parts(
initrd_start.virtualize() as *const _,
initrd_end - initrd_start,
)
};
let initrd = Initrd {
phys_page_start: start_aligned,
phys_page_len: end_aligned - start_aligned,
data,
};
INITRD_DATA.init(initrd);
}
/// AArch64 kernel main entry point
pub fn kernel_main(dtb_phys: usize) -> ! {
// NOTE it is critical that the code does not panic until the debug is set up, otherwise no
// message will be displayed
// Unmap TTBR0
TTBR0_EL1.set(0);
barrier::isb(barrier::SY);
unsafe {
ARCHITECTURE.init_device_tree(dtb_phys);
AArch64::set_interrupt_mask(true);
exception::init_exceptions();
// Setup initrd
setup_initrd();
ARCHITECTURE
.init_physical_memory(dtb_phys)
.expect("Failed to initialize the physical memory manager");
// Setup heap
let heap_base = phys::alloc_pages_contiguous(16, PageUsage::Used)
.expect("Could not allocate a block for heap");
heap::init_heap(heap_base.virtualize(), 16 * 0x1000);
devfs::init();
// Enumerate the device tree
ARCHITECTURE.init_platform(true);
barrier::isb(barrier::SY);
SCTLR_EL1.modify(
SCTLR_EL1::SA::Enable
+ SCTLR_EL1::A::Enable
+ SCTLR_EL1::I::NonCacheable
+ SCTLR_EL1::C::NonCacheable,
);
barrier::isb(barrier::SY);
debugln!("Heap: {:#x?}", heap::heap_range());
Cpu::init_local();
let dt = ARCHITECTURE.dt.get();
if let Err(e) = smp::start_ap_cores(dt) {
errorln!(
"Could not initialize AP CPUs: {:?}. Will continue with one CPU.",
e
);
}
Cpu::init_ipi_queues();
CPU_INIT_FENCE.signal();
CPU_INIT_FENCE.wait_all(CPU_COUNT.load(Ordering::Acquire));
task::init().expect("Failed to initialize the scheduler");
infoln!("All cpus ready");
task::enter();
}
}