aarch64: new memory arch impl
This commit is contained in:
parent
d0b8d99378
commit
75b1807e8e
@ -7,3 +7,7 @@ edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bytemuck = { version = "1.14.0", features = ["derive"] }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
all = []
|
||||
|
26
lib/memtables/src/aarch64.rs
Normal file
26
lib/memtables/src/aarch64.rs
Normal file
@ -0,0 +1,26 @@
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
use crate::RawTable;
|
||||
|
||||
pub const KERNEL_L3_COUNT: usize = 4;
|
||||
|
||||
#[derive(Clone, Copy, Pod, Zeroable)]
|
||||
#[repr(C)]
|
||||
pub struct FixedTables {
|
||||
// 1GiB entries
|
||||
pub l1: RawTable,
|
||||
|
||||
// 2MiB entries
|
||||
pub l2: RawTable,
|
||||
pub l3s: [RawTable; KERNEL_L3_COUNT],
|
||||
}
|
||||
|
||||
impl FixedTables {
|
||||
pub const fn zeroed() -> Self {
|
||||
Self {
|
||||
l1: RawTable::zeroed(),
|
||||
l2: RawTable::zeroed(),
|
||||
l3s: [RawTable::zeroed(); KERNEL_L3_COUNT],
|
||||
}
|
||||
}
|
||||
}
|
27
lib/memtables/src/any.rs
Normal file
27
lib/memtables/src/any.rs
Normal file
@ -0,0 +1,27 @@
|
||||
use crate::{aarch64, x86_64};
|
||||
|
||||
pub enum AnyTables {
|
||||
X86_64(x86_64::FixedTables),
|
||||
AArch64(aarch64::FixedTables),
|
||||
}
|
||||
|
||||
impl AnyTables {
|
||||
pub fn as_bytes(&self) -> &[u8] {
|
||||
match self {
|
||||
Self::X86_64(tables) => bytemuck::bytes_of(tables),
|
||||
Self::AArch64(tables) => bytemuck::bytes_of(tables),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<x86_64::FixedTables> for AnyTables {
|
||||
fn from(value: x86_64::FixedTables) -> Self {
|
||||
Self::X86_64(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<aarch64::FixedTables> for AnyTables {
|
||||
fn from(value: aarch64::FixedTables) -> Self {
|
||||
Self::AArch64(value)
|
||||
}
|
||||
}
|
@ -2,7 +2,20 @@
|
||||
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
pub const KERNEL_L3_COUNT: usize = 16;
|
||||
// AArch64
|
||||
#[cfg(any(feature = "all", target_arch = "aarch64"))]
|
||||
pub mod aarch64;
|
||||
#[cfg(all(not(feature = "all"), target_arch = "aarch64"))]
|
||||
pub use aarch64::FixedTables;
|
||||
|
||||
// x86-64
|
||||
#[cfg(any(feature = "all", target_arch = "x86_64"))]
|
||||
pub mod x86_64;
|
||||
#[cfg(all(not(feature = "all"), target_arch = "x86_64"))]
|
||||
pub use x86_64::FixedTables;
|
||||
|
||||
#[cfg(feature = "all")]
|
||||
pub mod any;
|
||||
|
||||
#[derive(Clone, Copy, Pod, Zeroable)]
|
||||
#[repr(C, align(0x1000))]
|
||||
@ -10,30 +23,8 @@ pub struct RawTable {
|
||||
pub data: [u64; 512],
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Pod, Zeroable)]
|
||||
#[repr(C)]
|
||||
pub struct FixedTables {
|
||||
pub l0: RawTable,
|
||||
|
||||
pub kernel_l1: RawTable,
|
||||
pub kernel_l2: RawTable,
|
||||
pub kernel_l3s: [RawTable; KERNEL_L3_COUNT],
|
||||
}
|
||||
|
||||
impl RawTable {
|
||||
pub const fn zeroed() -> Self {
|
||||
Self { data: [0; 512] }
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedTables {
|
||||
pub const fn zeroed() -> Self {
|
||||
Self {
|
||||
l0: RawTable::zeroed(),
|
||||
|
||||
kernel_l1: RawTable::zeroed(),
|
||||
kernel_l2: RawTable::zeroed(),
|
||||
kernel_l3s: [RawTable::zeroed(); KERNEL_L3_COUNT],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
27
lib/memtables/src/x86_64.rs
Normal file
27
lib/memtables/src/x86_64.rs
Normal file
@ -0,0 +1,27 @@
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
use crate::RawTable;
|
||||
|
||||
pub const KERNEL_L3_COUNT: usize = 16;
|
||||
|
||||
#[derive(Clone, Copy, Pod, Zeroable)]
|
||||
#[repr(C)]
|
||||
pub struct FixedTables {
|
||||
pub l0: RawTable,
|
||||
|
||||
pub kernel_l1: RawTable,
|
||||
pub kernel_l2: RawTable,
|
||||
pub kernel_l3s: [RawTable; KERNEL_L3_COUNT],
|
||||
}
|
||||
|
||||
impl FixedTables {
|
||||
pub const fn zeroed() -> Self {
|
||||
Self {
|
||||
l0: RawTable::zeroed(),
|
||||
|
||||
kernel_l1: RawTable::zeroed(),
|
||||
kernel_l2: RawTable::zeroed(),
|
||||
kernel_l3s: [RawTable::zeroed(); KERNEL_L3_COUNT],
|
||||
}
|
||||
}
|
||||
}
|
@ -13,6 +13,12 @@
|
||||
movk \reg, #((\value) >> 16), lsl #16
|
||||
.endm
|
||||
|
||||
.macro MOV_ABS reg, sym
|
||||
movz \reg, #:abs_g2:\sym
|
||||
movk \reg, #:abs_g1_nc:\sym
|
||||
movk \reg, #:abs_g0_nc:\sym
|
||||
.endm
|
||||
|
||||
.macro LEAVE_EL2, ret_label
|
||||
mrs x8, CNTHCTL_EL2
|
||||
orr x8, x8, #(CNTHCTL_EL2_EL1PCTEN | CNTHCTL_EL2_EL1PCEN)
|
||||
@ -61,8 +67,9 @@ __aarch64_entry:
|
||||
isb
|
||||
|
||||
// Zero .bss
|
||||
adr x8, __bss_start_phys
|
||||
adr x9, __bss_end_phys
|
||||
MOV_ABS x8, __bss_start_phys
|
||||
MOV_ABS x9, __bss_end_phys
|
||||
// Zero .bss
|
||||
1:
|
||||
cmp x8, x9
|
||||
beq 2f
|
||||
@ -84,7 +91,7 @@ __aarch64_entry:
|
||||
|
||||
.section .text
|
||||
__aarch64_ap_entry:
|
||||
// x0 -- stack pointer (lower address space)
|
||||
// x0 -- physical sp
|
||||
|
||||
mrs x8, CurrentEL
|
||||
lsr x8, x8, #2
|
||||
|
@ -1,135 +1,168 @@
|
||||
//! Main entry point for the AArch64 platforms
|
||||
use core::{
|
||||
arch::{asm, global_asm},
|
||||
sync::atomic::Ordering,
|
||||
};
|
||||
use core::{arch::global_asm, sync::atomic::Ordering};
|
||||
|
||||
use aarch64_cpu::{
|
||||
asm::barrier,
|
||||
registers::{CPACR_EL1, TTBR0_EL1},
|
||||
registers::{CPACR_EL1, ID_AA64MMFR0_EL1, MAIR_EL1, SCTLR_EL1, TCR_EL1, TTBR0_EL1},
|
||||
};
|
||||
use tock_registers::interfaces::{ReadWriteable, Writeable};
|
||||
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
|
||||
|
||||
use super::{cpu::Cpu, exception, AArch64, KernelStack, ARCHITECTURE, BOOT_STACK_SIZE};
|
||||
use super::{
|
||||
exception, mem::load_fixed_tables, smp::CPU_COUNT, AArch64, BootStack, ARCHITECTURE,
|
||||
BOOT_STACK_SIZE,
|
||||
};
|
||||
use crate::{
|
||||
absolute_address,
|
||||
arch::{aarch64::smp::CPU_COUNT, Architecture, ArchitectureImpl},
|
||||
arch::{aarch64::mem::table::L3, Architecture},
|
||||
fs::devfs,
|
||||
kernel_main, kernel_secondary_main,
|
||||
mem::{
|
||||
heap,
|
||||
phys::{self, PageUsage},
|
||||
ConvertAddress, KERNEL_VIRT_OFFSET,
|
||||
},
|
||||
mem::{address::IntoRaw, phys, table::EntryLevel, PhysicalAddress, KERNEL_VIRT_OFFSET},
|
||||
task::runtime,
|
||||
};
|
||||
|
||||
extern "C" fn el1_bsp_lower_entry(dtb_phys: usize) -> ! {
|
||||
// Unmask FP operations
|
||||
CPACR_EL1.modify(CPACR_EL1::FPEN::TrapNothing);
|
||||
|
||||
unsafe {
|
||||
ARCHITECTURE.init_mmu(true);
|
||||
unsafe fn pre_init_mmu() {
|
||||
if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran4::Supported) {
|
||||
// TODO early panic
|
||||
loop {}
|
||||
}
|
||||
|
||||
let sp = unsafe { BSP_STACK.data.as_ptr().add(BOOT_STACK_SIZE).virtualize() };
|
||||
MAIR_EL1.write(
|
||||
//// Attribute 0 -- normal memory
|
||||
// Inner
|
||||
MAIR_EL1::Attr0_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc +
|
||||
// Outer
|
||||
MAIR_EL1::Attr0_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc +
|
||||
//// Attribute 1 -- device memory
|
||||
MAIR_EL1::Attr1_Device::nonGathering_nonReordering_EarlyWriteAck,
|
||||
);
|
||||
|
||||
TCR_EL1.modify(
|
||||
// General
|
||||
TCR_EL1::IPS::Bits_48 +
|
||||
// TTBR0
|
||||
TCR_EL1::TG0::KiB_4 + TCR_EL1::T0SZ.val(25) + TCR_EL1::SH0::Inner +
|
||||
// TTBR1
|
||||
TCR_EL1::TG1::KiB_4 + TCR_EL1::T1SZ.val(25) + TCR_EL1::SH1::Outer,
|
||||
);
|
||||
}
|
||||
|
||||
unsafe fn enable_mmu() {
|
||||
barrier::dmb(barrier::ISH);
|
||||
|
||||
SCTLR_EL1.modify(
|
||||
// Enable translation
|
||||
SCTLR_EL1::M::Enable +
|
||||
// (TODO) Disable I + D caches
|
||||
SCTLR_EL1::I::NonCacheable + SCTLR_EL1::C::NonCacheable,
|
||||
);
|
||||
|
||||
barrier::isb(barrier::SY);
|
||||
}
|
||||
|
||||
unsafe fn enter_higher_half(sp: usize, elr: usize, x0: usize) -> ! {
|
||||
unsafe {
|
||||
core::arch::asm!(r#"
|
||||
mov sp, {sp}
|
||||
mov x0, {x0}
|
||||
mov lr, xzr
|
||||
br {elr}
|
||||
"#, elr = in(reg) elr, sp = in(reg) sp, x0 = in(reg) x0, options(noreturn));
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE executes in "lower-half" address space, MMU not yet enabled
|
||||
unsafe extern "C" fn __aarch64_el1_bsp_lower_entry(dtb: PhysicalAddress) -> ! {
|
||||
AArch64::set_interrupt_mask(true);
|
||||
|
||||
// Don't trap FP operations
|
||||
CPACR_EL1.modify(CPACR_EL1::FPEN::TrapNothing);
|
||||
|
||||
// Setup MMU to jump to "higher-half" address space
|
||||
pre_init_mmu();
|
||||
load_fixed_tables();
|
||||
enable_mmu();
|
||||
|
||||
// Safety: SP points to the .bss section, so it's +offset mapped
|
||||
let sp = unsafe { BSP_STACK.data.as_ptr().add(BOOT_STACK_SIZE) as usize } + KERNEL_VIRT_OFFSET;
|
||||
let elr = absolute_address!(__aarch64_bsp_upper_entry);
|
||||
|
||||
barrier::dsb(barrier::SY);
|
||||
barrier::isb(barrier::SY);
|
||||
enter_higher_half(sp as usize, elr, dtb_phys);
|
||||
// TODO pass dtb
|
||||
enter_higher_half(sp, elr, dtb.into_raw());
|
||||
}
|
||||
|
||||
unsafe extern "C" fn el1_ap_lower_entry(sp: usize) -> ! {
|
||||
ArchitectureImpl::set_interrupt_mask(true);
|
||||
unsafe extern "C" fn __aarch64_bsp_upper_entry(dtb: PhysicalAddress) -> ! {
|
||||
// Remove the "lower-half" mapping, no longer needed
|
||||
TTBR0_EL1.set(0);
|
||||
|
||||
// Setup the "runtime" part of the kernel tables
|
||||
ARCHITECTURE
|
||||
.init_memory_management(dtb)
|
||||
.expect("Could not initialize memory management");
|
||||
barrier::isb(barrier::SY);
|
||||
|
||||
exception::init_exceptions();
|
||||
|
||||
// // Setup initrd
|
||||
// super::setup_initrd();
|
||||
|
||||
devfs::init();
|
||||
|
||||
runtime::init_task_queue();
|
||||
|
||||
// Initialize the BSP CPU + the devices
|
||||
ARCHITECTURE
|
||||
.init_platform(true)
|
||||
.expect("Could not initialize the platform");
|
||||
|
||||
kernel_main()
|
||||
}
|
||||
|
||||
unsafe extern "C" fn __aarch64_el1_ap_lower_entry(sp: PhysicalAddress) -> ! {
|
||||
const AP_STACK_PAGES: usize = 8;
|
||||
AArch64::set_interrupt_mask(true);
|
||||
|
||||
// Unmask FP operations
|
||||
CPACR_EL1.modify(CPACR_EL1::FPEN::TrapNothing);
|
||||
|
||||
unsafe {
|
||||
ARCHITECTURE.init_mmu(false);
|
||||
}
|
||||
pre_init_mmu();
|
||||
load_fixed_tables();
|
||||
enable_mmu();
|
||||
|
||||
let stack_pages = phys::alloc_pages_contiguous(AP_STACK_PAGES).unwrap();
|
||||
let stack_base = stack_pages.virtualize_raw();
|
||||
let sp = stack_base + L3::SIZE * AP_STACK_PAGES;
|
||||
|
||||
let sp = sp.virtualize();
|
||||
let elr = absolute_address!(__aarch64_ap_upper_entry);
|
||||
|
||||
enter_higher_half(sp, elr, 0);
|
||||
}
|
||||
|
||||
fn enter_higher_half(sp: usize, elr: usize, arg: usize) -> ! {
|
||||
unsafe {
|
||||
asm!(r#"
|
||||
mov sp, {sp}
|
||||
mov x0, {arg}
|
||||
br {entry}
|
||||
"#, entry = in(reg) elr, arg = in(reg) arg, sp = in(reg) sp, options(noreturn));
|
||||
}
|
||||
}
|
||||
|
||||
unsafe extern "C" fn __aarch64_bsp_upper_entry(dtb_phys: usize) -> ! {
|
||||
// NOTE it is critical that the code does not panic until the debug is set up, otherwise no
|
||||
// message will be displayed
|
||||
|
||||
// Unmap TTBR0
|
||||
TTBR0_EL1.set(0);
|
||||
extern "C" fn __aarch64_ap_upper_entry() -> ! {
|
||||
barrier::dmb(barrier::ISH);
|
||||
barrier::isb(barrier::SY);
|
||||
|
||||
ARCHITECTURE.init_device_tree(dtb_phys);
|
||||
|
||||
AArch64::set_interrupt_mask(true);
|
||||
|
||||
exception::init_exceptions();
|
||||
|
||||
// Setup initrd
|
||||
super::setup_initrd();
|
||||
|
||||
ARCHITECTURE
|
||||
.init_physical_memory(dtb_phys)
|
||||
.expect("Failed to initialize the physical memory manager");
|
||||
|
||||
// Setup heap
|
||||
let heap_base = phys::alloc_pages_contiguous(16, PageUsage::Used)
|
||||
.expect("Could not allocate a block for heap");
|
||||
|
||||
heap::init_heap(heap_base.virtualize(), 16 * 0x1000);
|
||||
|
||||
devfs::init();
|
||||
|
||||
// Enumerate the device tree
|
||||
ARCHITECTURE.init_platform(true);
|
||||
|
||||
Cpu::init_local();
|
||||
|
||||
kernel_main()
|
||||
}
|
||||
|
||||
extern "C" fn __aarch64_ap_upper_entry(_x0: usize) -> ! {
|
||||
assert!(ArchitectureImpl::interrupt_mask());
|
||||
|
||||
// Signal to BSP that we're up
|
||||
CPU_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
let cpu_id = CPU_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
aarch64_cpu::asm::sev();
|
||||
|
||||
infoln!("cpu{} initializing", cpu_id);
|
||||
|
||||
exception::init_exceptions();
|
||||
|
||||
// Initialize CPU-local GIC and timer
|
||||
unsafe {
|
||||
ARCHITECTURE.init_platform(false);
|
||||
|
||||
Cpu::init_local();
|
||||
}
|
||||
|
||||
kernel_secondary_main()
|
||||
}
|
||||
}
|
||||
|
||||
static BSP_STACK: KernelStack = KernelStack {
|
||||
#[link_section = ".bss"]
|
||||
static BSP_STACK: BootStack = BootStack {
|
||||
data: [0; BOOT_STACK_SIZE],
|
||||
};
|
||||
|
||||
global_asm!(
|
||||
include_str!("entry.S"),
|
||||
kernel_lower_entry = sym el1_bsp_lower_entry,
|
||||
kernel_ap_lower_entry = sym el1_ap_lower_entry,
|
||||
kernel_lower_entry = sym __aarch64_el1_bsp_lower_entry,
|
||||
kernel_ap_lower_entry = sym __aarch64_el1_ap_lower_entry,
|
||||
stack_bottom = sym BSP_STACK,
|
||||
stack_size = const BOOT_STACK_SIZE,
|
||||
kernel_virt_offset = const KERNEL_VIRT_OFFSET
|
||||
kernel_virt_offset = const KERNEL_VIRT_OFFSET,
|
||||
stack_size = const BOOT_STACK_SIZE
|
||||
);
|
||||
|
@ -4,10 +4,7 @@ use core::{arch::global_asm, cell::UnsafeCell};
|
||||
use abi::error::Error;
|
||||
|
||||
use crate::{
|
||||
mem::{
|
||||
phys::{self, PageUsage},
|
||||
ConvertAddress,
|
||||
},
|
||||
mem::{address::IntoRaw, phys, PhysicalAddress},
|
||||
task::context::TaskContextImpl,
|
||||
};
|
||||
|
||||
@ -64,8 +61,8 @@ impl StackBuilder {
|
||||
self.sp
|
||||
}
|
||||
|
||||
fn init_common(&mut self, entry: usize, ttbr0: usize) {
|
||||
self.push(ttbr0); // ttbr0_el1
|
||||
fn init_common(&mut self, entry: usize, ttbr0: u64) {
|
||||
self.push(ttbr0 as _); // ttbr0_el1
|
||||
self.push(0); // tpidr_el0
|
||||
|
||||
self.push(entry); // x30/lr
|
||||
@ -89,9 +86,8 @@ impl TaskContextImpl for TaskContext {
|
||||
|
||||
fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> {
|
||||
const KERNEL_TASK_PAGES: usize = 8;
|
||||
let stack_base = unsafe {
|
||||
phys::alloc_pages_contiguous(KERNEL_TASK_PAGES, PageUsage::Used)?.virtualize()
|
||||
};
|
||||
let stack_base =
|
||||
unsafe { phys::alloc_pages_contiguous(KERNEL_TASK_PAGES)?.virtualize_raw() };
|
||||
|
||||
let mut stack = StackBuilder::new(stack_base, KERNEL_TASK_PAGES * 0x1000);
|
||||
|
||||
@ -112,10 +108,9 @@ impl TaskContextImpl for TaskContext {
|
||||
})
|
||||
}
|
||||
|
||||
fn user(entry: usize, arg: usize, ttbr0: usize, user_stack_sp: usize) -> Result<Self, Error> {
|
||||
fn user(entry: usize, arg: usize, ttbr0: u64, user_stack_sp: usize) -> Result<Self, Error> {
|
||||
const USER_TASK_PAGES: usize = 16;
|
||||
let stack_base =
|
||||
unsafe { phys::alloc_pages_contiguous(USER_TASK_PAGES, PageUsage::Used)?.virtualize() };
|
||||
let stack_base = unsafe { phys::alloc_pages_contiguous(USER_TASK_PAGES)?.virtualize_raw() };
|
||||
|
||||
let mut stack = StackBuilder::new(stack_base, USER_TASK_PAGES * 0x1000);
|
||||
|
||||
|
@ -122,6 +122,7 @@ impl Cpu {
|
||||
|
||||
match msg {
|
||||
CpuMessage::Panic => panic::panic_secondary(),
|
||||
CpuMessage::Shutdown => todo!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -16,10 +16,7 @@ use abi::{
|
||||
use tock_registers::interfaces::{Readable, Writeable};
|
||||
|
||||
use crate::{
|
||||
arch::{
|
||||
aarch64::{cpu::Cpu, table::AddressSpace},
|
||||
Architecture, ArchitectureImpl,
|
||||
},
|
||||
arch::{aarch64::cpu::Cpu, Architecture, ArchitectureImpl},
|
||||
debug::LogLevel,
|
||||
syscall::raw_syscall_handler,
|
||||
task::{context::TaskFrame, process::Process},
|
||||
@ -179,14 +176,14 @@ fn dump_irrecoverable_exception(frame: &ExceptionFrame, ec: u64, iss: u64) {
|
||||
_ => (),
|
||||
}
|
||||
|
||||
unsafe {
|
||||
let space = AddressSpace::from_phys_raw(TTBR0_EL1.get_baddr() as _);
|
||||
let far = FAR_EL1.get() as usize;
|
||||
space.walk(far, |level, raw| {
|
||||
log_print_raw!(LogLevel::Fatal, "Level {}: entry={:#x}\n", level, raw);
|
||||
true
|
||||
});
|
||||
}
|
||||
// unsafe {
|
||||
// let space = AddressSpace::from_phys_raw(TTBR0_EL1.get_baddr() as _);
|
||||
// let far = FAR_EL1.get() as usize;
|
||||
// space.walk(far, |level, raw| {
|
||||
// log_print_raw!(LogLevel::Fatal, "Level {}: entry={:#x}\n", level, raw);
|
||||
// true
|
||||
// });
|
||||
// }
|
||||
|
||||
log_print_raw!(LogLevel::Fatal, "System register dump:\n");
|
||||
log_print_raw!(LogLevel::Fatal, "SCTLR_EL1 = {:#x}\n", SCTLR_EL1.get());
|
||||
|
@ -36,11 +36,11 @@ register_structs! {
|
||||
}
|
||||
|
||||
pub(super) struct Gicc {
|
||||
regs: DeviceMemoryIo<GiccRegs>,
|
||||
regs: DeviceMemoryIo<'static, GiccRegs>,
|
||||
}
|
||||
|
||||
impl Gicc {
|
||||
pub const fn new(regs: DeviceMemoryIo<GiccRegs>) -> Self {
|
||||
pub const fn new(regs: DeviceMemoryIo<'static, GiccRegs>) -> Self {
|
||||
Self { regs }
|
||||
}
|
||||
|
||||
|
@ -65,8 +65,8 @@ register_structs! {
|
||||
}
|
||||
|
||||
pub(super) struct Gicd {
|
||||
shared_regs: Spinlock<DeviceMemoryIo<GicdSharedRegs>>,
|
||||
banked_regs: DeviceMemoryIo<GicdBankedRegs>,
|
||||
shared_regs: Spinlock<DeviceMemoryIo<'static, GicdSharedRegs>>,
|
||||
banked_regs: DeviceMemoryIo<'static, GicdBankedRegs>,
|
||||
}
|
||||
|
||||
impl GicdSharedRegs {
|
||||
@ -85,8 +85,8 @@ impl GicdSharedRegs {
|
||||
|
||||
impl Gicd {
|
||||
pub const fn new(
|
||||
shared_regs: DeviceMemoryIo<GicdSharedRegs>,
|
||||
banked_regs: DeviceMemoryIo<GicdBankedRegs>,
|
||||
shared_regs: DeviceMemoryIo<'static, GicdSharedRegs>,
|
||||
banked_regs: DeviceMemoryIo<'static, GicdBankedRegs>,
|
||||
) -> Self {
|
||||
let shared_regs = Spinlock::new(shared_regs);
|
||||
Self {
|
||||
|
@ -4,7 +4,7 @@ use core::sync::atomic::Ordering;
|
||||
|
||||
use aarch64_cpu::asm::barrier;
|
||||
use abi::error::Error;
|
||||
use alloc::boxed::Box;
|
||||
use alloc::{boxed::Box, sync::Arc};
|
||||
use device_api::{
|
||||
interrupt::{
|
||||
ExternalInterruptController, FixedInterruptTable, InterruptHandler, InterruptTable,
|
||||
@ -18,7 +18,11 @@ use crate::{
|
||||
arch::{aarch64::IrqNumber, Architecture, CpuMessage},
|
||||
device::devtree::{self, DevTreeIndexPropExt},
|
||||
device_tree_driver,
|
||||
mem::device::{DeviceMemory, DeviceMemoryIo},
|
||||
mem::{
|
||||
address::FromRaw,
|
||||
device::{DeviceMemoryIo, DeviceMemoryMapping, RawDeviceMemoryMapping},
|
||||
PhysicalAddress,
|
||||
},
|
||||
sync::IrqSafeSpinlock,
|
||||
};
|
||||
|
||||
@ -36,8 +40,8 @@ pub mod gicd;
|
||||
pub struct Gic {
|
||||
gicc: OneTimeInit<Gicc>,
|
||||
gicd: OneTimeInit<Gicd>,
|
||||
gicd_base: usize,
|
||||
gicc_base: usize,
|
||||
gicd_base: PhysicalAddress,
|
||||
gicc_base: PhysicalAddress,
|
||||
table: IrqSafeSpinlock<FixedInterruptTable<MAX_IRQ>>,
|
||||
}
|
||||
|
||||
@ -47,10 +51,10 @@ impl Device for Gic {
|
||||
}
|
||||
|
||||
unsafe fn init(&'static self) -> Result<(), Error> {
|
||||
let gicd_mmio = DeviceMemory::map("GICv2 Distributor registers", self.gicd_base, 0x1000)?;
|
||||
let gicd_mmio_shared = DeviceMemoryIo::new(gicd_mmio.clone());
|
||||
let gicd_mmio_banked = DeviceMemoryIo::new(gicd_mmio);
|
||||
let gicc_mmio = DeviceMemoryIo::map("GICv2 CPU registers", self.gicc_base)?;
|
||||
let gicd_mmio = Arc::new(RawDeviceMemoryMapping::map(self.gicd_base, 0x1000)?);
|
||||
let gicd_mmio_shared = DeviceMemoryIo::from_raw(gicd_mmio.clone())?;
|
||||
let gicd_mmio_banked = DeviceMemoryIo::from_raw(gicd_mmio)?;
|
||||
let gicc_mmio = DeviceMemoryIo::map(self.gicc_base)?;
|
||||
|
||||
let gicd = Gicd::new(gicd_mmio_shared, gicd_mmio_banked);
|
||||
let gicc = Gicc::new(gicc_mmio);
|
||||
@ -169,7 +173,7 @@ impl Gic {
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure the addresses actually point to the GIC components.
|
||||
pub const unsafe fn new(gicd_base: usize, gicc_base: usize) -> Self {
|
||||
pub const unsafe fn new(gicd_base: PhysicalAddress, gicc_base: PhysicalAddress) -> Self {
|
||||
Self {
|
||||
gicc: OneTimeInit::new(),
|
||||
gicd: OneTimeInit::new(),
|
||||
@ -188,6 +192,9 @@ device_tree_driver! {
|
||||
let (gicc_base, _) = reg.cell2_array_item(0, dt.address_cells, dt.size_cells)?;
|
||||
let (gicd_base, _) = reg.cell2_array_item(1, dt.address_cells, dt.size_cells)?;
|
||||
|
||||
Some(Box::new(unsafe { Gic::new(gicc_base as usize, gicd_base as usize) }))
|
||||
Some(Box::new(unsafe { Gic::new(
|
||||
PhysicalAddress::from_raw(gicc_base),
|
||||
PhysicalAddress::from_raw(gicd_base),
|
||||
)}))
|
||||
}
|
||||
}
|
||||
|
374
src/arch/aarch64/mem/mod.rs
Normal file
374
src/arch/aarch64/mem/mod.rs
Normal file
@ -0,0 +1,374 @@
|
||||
use core::{
|
||||
alloc::Layout,
|
||||
ops::{Deref, DerefMut},
|
||||
};
|
||||
|
||||
use abi::error::Error;
|
||||
use cfg_if::cfg_if;
|
||||
use kernel_util::util::OneTimeInit;
|
||||
use memtables::aarch64::{FixedTables, KERNEL_L3_COUNT};
|
||||
|
||||
use aarch64_cpu::registers::{TTBR0_EL1, TTBR1_EL1};
|
||||
use static_assertions::const_assert_eq;
|
||||
use tock_registers::interfaces::Writeable;
|
||||
|
||||
use crate::mem::{
|
||||
address::{FromRaw, IntoRaw, KernelImageObject},
|
||||
device::RawDeviceMemoryMapping,
|
||||
table::EntryLevel,
|
||||
PhysicalAddress, KERNEL_VIRT_OFFSET,
|
||||
};
|
||||
|
||||
use self::table::{PageAttributes, PageEntry, PageTable, L1, L2, L3};
|
||||
|
||||
pub mod process;
|
||||
pub(super) mod table;
|
||||
|
||||
// TODO eliminate this requirement by using precomputed indices
|
||||
const MAPPING_OFFSET: usize = KERNEL_VIRT_OFFSET;
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "aarch64_qemu")] {
|
||||
const KERNEL_PHYS_BASE: usize = 0x40080000;
|
||||
} else {
|
||||
const KERNEL_PHYS_BASE: usize = 0x40080000;
|
||||
}
|
||||
}
|
||||
|
||||
// Precomputed mappings
|
||||
const KERNEL_L1_INDEX: usize = L1::index(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
|
||||
const KERNEL_START_L2_INDEX: usize = L2::index(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
|
||||
const KERNEL_END_L2_INDEX: usize =
|
||||
L2::index(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE) + KERNEL_L3_COUNT;
|
||||
|
||||
// Must not be zero, should be at 4MiB
|
||||
const_assert_eq!(KERNEL_START_L2_INDEX, 0);
|
||||
// From static mapping
|
||||
const_assert_eq!(KERNEL_L1_INDEX, 1);
|
||||
|
||||
// Runtime mappings
|
||||
// 2MiB max
|
||||
const EARLY_MAPPING_L2I: usize = KERNEL_END_L2_INDEX + 1;
|
||||
// 1GiB max
|
||||
const HEAP_MAPPING_L1I: usize = KERNEL_L1_INDEX + 1;
|
||||
// 1GiB max
|
||||
const DEVICE_MAPPING_L1I: usize = KERNEL_L1_INDEX + 2;
|
||||
// 16GiB max
|
||||
pub(super) const RAM_MAPPING_L1_COUNT: usize = 16;
|
||||
const RAM_MAPPING_START_L1I: usize = KERNEL_L1_INDEX + 3;
|
||||
const RAM_MAPPING_END_L1I: usize = RAM_MAPPING_START_L1I + RAM_MAPPING_L1_COUNT;
|
||||
|
||||
const DEVICE_MAPPING_L3_COUNT: usize = 4;
|
||||
|
||||
// 2MiB for early mappings
|
||||
const EARLY_MAPPING_OFFSET: usize =
|
||||
MAPPING_OFFSET | (KERNEL_L1_INDEX * L1::SIZE) | (EARLY_MAPPING_L2I * L2::SIZE);
|
||||
static mut EARLY_MAPPING_L3: PageTable<L3> = PageTable::zeroed();
|
||||
// 1GiB for heap mapping
|
||||
pub(super) const HEAP_MAPPING_OFFSET: usize = MAPPING_OFFSET | (HEAP_MAPPING_L1I * L1::SIZE);
|
||||
pub(super) static mut HEAP_MAPPING_L2: PageTable<L2> = PageTable::zeroed();
|
||||
// 1GiB for device MMIO mapping
|
||||
const DEVICE_MAPPING_OFFSET: usize = MAPPING_OFFSET | (DEVICE_MAPPING_L1I * L1::SIZE);
|
||||
static mut DEVICE_MAPPING_L2: PageTable<L2> = PageTable::zeroed();
|
||||
static mut DEVICE_MAPPING_L3S: [PageTable<L3>; DEVICE_MAPPING_L3_COUNT] =
|
||||
[PageTable::zeroed(); DEVICE_MAPPING_L3_COUNT];
|
||||
// 16GiB for RAM mapping
|
||||
pub(super) const RAM_MAPPING_OFFSET: usize = MAPPING_OFFSET | (RAM_MAPPING_START_L1I * L1::SIZE);
|
||||
pub(super) static MEMORY_LIMIT: OneTimeInit<usize> = OneTimeInit::new();
|
||||
|
||||
#[link_section = ".data.tables"]
|
||||
pub static mut KERNEL_TABLES: KernelImageObject<FixedTables> =
|
||||
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
|
||||
|
||||
pub struct EarlyMapping<'a, T: ?Sized> {
|
||||
value: &'a mut T,
|
||||
page_count: usize,
|
||||
}
|
||||
|
||||
impl<'a, T: Sized> EarlyMapping<'a, T> {
|
||||
pub unsafe fn map(physical: PhysicalAddress) -> Result<EarlyMapping<'a, T>, Error> {
|
||||
let layout = Layout::new::<T>();
|
||||
let aligned = physical.page_align_down::<L3>();
|
||||
let offset = physical.page_offset::<L3>();
|
||||
let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
|
||||
|
||||
let virt = map_early_pages(aligned, page_count)?;
|
||||
let value = &mut *((virt + offset) as *mut T);
|
||||
|
||||
Ok(EarlyMapping { value, page_count })
|
||||
}
|
||||
|
||||
pub unsafe fn map_slice(
|
||||
physical: PhysicalAddress,
|
||||
len: usize,
|
||||
) -> Result<EarlyMapping<'a, [T]>, Error> {
|
||||
let layout = Layout::array::<T>(len).unwrap();
|
||||
let aligned = physical.page_align_down::<L3>();
|
||||
let offset = physical.page_offset::<L3>();
|
||||
let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
|
||||
|
||||
let virt = map_early_pages(aligned, page_count)?;
|
||||
let value = core::slice::from_raw_parts_mut((virt + offset) as *mut T, len);
|
||||
|
||||
Ok(EarlyMapping { value, page_count })
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Deref for EarlyMapping<'a, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> DerefMut for EarlyMapping<'a, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Drop for EarlyMapping<'a, T> {
|
||||
fn drop(&mut self) {
|
||||
let address = (self.value as *mut T).addr() & !(L3::SIZE - 1);
|
||||
|
||||
for i in 0..self.page_count {
|
||||
let page = address + i * L3::SIZE;
|
||||
|
||||
unsafe {
|
||||
unmap_early_page(page);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn kernel_table_flags() -> PageAttributes {
|
||||
PageAttributes::TABLE
|
||||
| PageAttributes::ACCESS
|
||||
| PageAttributes::SH_INNER
|
||||
| PageAttributes::PAGE_ATTR_NORMAL
|
||||
| PageAttributes::PRESENT
|
||||
}
|
||||
|
||||
fn ram_block_flags() -> PageAttributes {
|
||||
// TODO UXN, PXN
|
||||
PageAttributes::BLOCK
|
||||
| PageAttributes::ACCESS
|
||||
| PageAttributes::SH_INNER
|
||||
| PageAttributes::PAGE_ATTR_NORMAL
|
||||
| PageAttributes::PRESENT
|
||||
}
|
||||
|
||||
// Early mappings
|
||||
unsafe fn map_early_pages(physical: PhysicalAddress, count: usize) -> Result<usize, Error> {
|
||||
for l3i in 0..512 {
|
||||
let mut taken = false;
|
||||
for i in 0..count {
|
||||
if EARLY_MAPPING_L3[i + l3i].is_present() {
|
||||
taken = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if taken {
|
||||
continue;
|
||||
}
|
||||
|
||||
for i in 0..count {
|
||||
let page = physical.add(i * L3::SIZE);
|
||||
// TODO NX, NC
|
||||
EARLY_MAPPING_L3[i + l3i] = PageEntry::normal_page(page, PageAttributes::empty());
|
||||
}
|
||||
|
||||
return Ok(EARLY_MAPPING_OFFSET + l3i * L3::SIZE);
|
||||
}
|
||||
|
||||
Err(Error::OutOfMemory)
|
||||
}
|
||||
|
||||
unsafe fn unmap_early_page(address: usize) {
|
||||
if address < EARLY_MAPPING_OFFSET || address >= EARLY_MAPPING_OFFSET + L2::SIZE {
|
||||
panic!("Tried to unmap invalid early mapping: {:#x}", address);
|
||||
}
|
||||
|
||||
let l3i = L3::index(address - EARLY_MAPPING_OFFSET);
|
||||
|
||||
assert!(EARLY_MAPPING_L3[l3i].is_present());
|
||||
EARLY_MAPPING_L3[l3i] = PageEntry::INVALID;
|
||||
|
||||
// TODO invalidate tlb
|
||||
}
|
||||
|
||||
pub(super) unsafe fn map_ram_l1(index: usize) {
|
||||
if index >= RAM_MAPPING_L1_COUNT {
|
||||
loop {}
|
||||
}
|
||||
assert_eq!(KERNEL_TABLES.l1.data[index + RAM_MAPPING_START_L1I], 0);
|
||||
|
||||
KERNEL_TABLES.l1.data[index + RAM_MAPPING_START_L1I] =
|
||||
((index * L1::SIZE) as u64) | ram_block_flags().bits();
|
||||
}
|
||||
|
||||
pub(super) unsafe fn map_heap_l2(index: usize, page: PhysicalAddress) {
|
||||
if index >= 512 {
|
||||
loop {}
|
||||
}
|
||||
assert!(!HEAP_MAPPING_L2[index].is_present());
|
||||
// TODO UXN, PXN
|
||||
HEAP_MAPPING_L2[index] = PageEntry::normal_block(page, PageAttributes::empty());
|
||||
}
|
||||
|
||||
// Device mappings
|
||||
unsafe fn map_device_memory_l3(base: PhysicalAddress, count: usize) -> Result<usize, Error> {
|
||||
// TODO don't map pages if already mapped
|
||||
|
||||
'l0: for i in 0..DEVICE_MAPPING_L3_COUNT * 512 {
|
||||
for j in 0..count {
|
||||
let l2i = (i + j) / 512;
|
||||
let l3i = (i + j) % 512;
|
||||
|
||||
if DEVICE_MAPPING_L3S[l2i][l3i].is_present() {
|
||||
continue 'l0;
|
||||
}
|
||||
}
|
||||
|
||||
for j in 0..count {
|
||||
let l2i = (i + j) / 512;
|
||||
let l3i = (i + j) % 512;
|
||||
|
||||
// TODO NX, NC
|
||||
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::device_page(base.add(j * L3::SIZE));
|
||||
}
|
||||
|
||||
return Ok(DEVICE_MAPPING_OFFSET + i * L3::SIZE);
|
||||
}
|
||||
|
||||
Err(Error::OutOfMemory)
|
||||
}
|
||||
|
||||
unsafe fn map_device_memory_l2(base: PhysicalAddress, count: usize) -> Result<usize, Error> {
|
||||
'l0: for i in DEVICE_MAPPING_L3_COUNT..512 {
|
||||
for j in 0..count {
|
||||
if DEVICE_MAPPING_L2[i + j].is_present() {
|
||||
continue 'l0;
|
||||
}
|
||||
}
|
||||
|
||||
for j in 0..count {
|
||||
DEVICE_MAPPING_L2[i + j] = PageEntry::<L2>::device_block(base.add(j * L2::SIZE));
|
||||
}
|
||||
|
||||
debugln!(
|
||||
"map l2s: base={:#x}, count={} -> {:#x}",
|
||||
base,
|
||||
count,
|
||||
DEVICE_MAPPING_OFFSET + i * L2::SIZE
|
||||
);
|
||||
return Ok(DEVICE_MAPPING_OFFSET + i * L2::SIZE);
|
||||
}
|
||||
|
||||
Err(Error::OutOfMemory)
|
||||
}
|
||||
|
||||
pub(super) unsafe fn map_device_memory(
|
||||
base: PhysicalAddress,
|
||||
size: usize,
|
||||
) -> Result<RawDeviceMemoryMapping, Error> {
|
||||
// debugln!("Map {}B @ {:#x}", size, base);
|
||||
let l3_aligned = base.page_align_down::<L3>();
|
||||
let l3_offset = L3::page_offset(base.into_raw());
|
||||
let page_count = (l3_offset + size + L3::SIZE - 1) / L3::SIZE;
|
||||
|
||||
if page_count > 256 {
|
||||
// Large mapping, use L2 mapping instead
|
||||
let l2_aligned = base.page_align_down::<L2>();
|
||||
let l2_offset = L2::page_offset(base.into_raw());
|
||||
let page_count = (l2_offset + size + L2::SIZE - 1) / L2::SIZE;
|
||||
|
||||
let base_address = map_device_memory_l2(l2_aligned, page_count)?;
|
||||
let address = base_address + l2_offset;
|
||||
|
||||
Ok(RawDeviceMemoryMapping {
|
||||
address,
|
||||
base_address,
|
||||
page_count,
|
||||
page_size: L2::SIZE,
|
||||
})
|
||||
} else {
|
||||
// Just map the pages directly
|
||||
let base_address = map_device_memory_l3(l3_aligned, page_count)?;
|
||||
let address = base_address + l3_offset;
|
||||
|
||||
Ok(RawDeviceMemoryMapping {
|
||||
address,
|
||||
base_address,
|
||||
page_count,
|
||||
page_size: L3::SIZE,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping) {
|
||||
// debugln!(
|
||||
// "Unmap {}B @ {:#x}",
|
||||
// map.page_count * map.page_size,
|
||||
// map.base_address
|
||||
// );
|
||||
match map.page_size {
|
||||
L3::SIZE => {
|
||||
for i in 0..map.page_count {
|
||||
let page = map.base_address + i * L3::SIZE;
|
||||
let l2i = L2::index(page);
|
||||
let l3i = L3::index(page);
|
||||
assert!(DEVICE_MAPPING_L3S[l2i][l3i].is_present());
|
||||
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::INVALID;
|
||||
// TODO flush the TLB entry
|
||||
loop {}
|
||||
// intrinsics::flush_tlb_entry(page);
|
||||
}
|
||||
}
|
||||
L2::SIZE => todo!(),
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
}
|
||||
|
||||
/// (BSP-early init) loads precomputed kernel mapping tables for the kernel to jump to "higher-half"
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Unsafe, must only be called by BSP during its early init while still in "lower-half"
|
||||
pub(super) unsafe fn load_fixed_tables() {
|
||||
let ttbr0 = KERNEL_TABLES.l1.data.as_ptr() as u64;
|
||||
TTBR0_EL1.set(ttbr0);
|
||||
TTBR1_EL1.set(ttbr0);
|
||||
}
|
||||
|
||||
/// Sets up additional translation tables for kernel usage
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Unsafe, must only be called by BSP during its early init, must already be in "higher-half"
|
||||
pub(super) unsafe fn init_fixed_tables() {
|
||||
// TODO this could be built in compile-time too?
|
||||
let early_mapping_l3_phys = &EARLY_MAPPING_L3 as *const _ as usize - KERNEL_VIRT_OFFSET;
|
||||
let device_mapping_l2_phys = &DEVICE_MAPPING_L2 as *const _ as usize - KERNEL_VIRT_OFFSET;
|
||||
let heap_mapping_l2_phys = &HEAP_MAPPING_L2 as *const _ as usize - KERNEL_VIRT_OFFSET;
|
||||
|
||||
for i in 0..DEVICE_MAPPING_L3_COUNT {
|
||||
let device_mapping_l3_phys = PhysicalAddress::from_raw(
|
||||
&DEVICE_MAPPING_L3S[i] as *const _ as usize - KERNEL_VIRT_OFFSET,
|
||||
);
|
||||
DEVICE_MAPPING_L2[i] = PageEntry::table(device_mapping_l3_phys, PageAttributes::empty());
|
||||
}
|
||||
|
||||
assert_eq!(KERNEL_TABLES.l2.data[EARLY_MAPPING_L2I], 0);
|
||||
KERNEL_TABLES.l2.data[EARLY_MAPPING_L2I] =
|
||||
(early_mapping_l3_phys as u64) | kernel_table_flags().bits();
|
||||
|
||||
assert_eq!(KERNEL_TABLES.l1.data[HEAP_MAPPING_L1I], 0);
|
||||
KERNEL_TABLES.l1.data[HEAP_MAPPING_L1I] =
|
||||
(heap_mapping_l2_phys as u64) | kernel_table_flags().bits();
|
||||
|
||||
assert_eq!(KERNEL_TABLES.l1.data[DEVICE_MAPPING_L1I], 0);
|
||||
KERNEL_TABLES.l1.data[DEVICE_MAPPING_L1I] =
|
||||
(device_mapping_l2_phys as u64) | kernel_table_flags().bits();
|
||||
}
|
130
src/arch/aarch64/mem/process.rs
Normal file
130
src/arch/aarch64/mem/process.rs
Normal file
@ -0,0 +1,130 @@
|
||||
use core::sync::atomic::{AtomicU8, Ordering};
|
||||
|
||||
use abi::error::Error;
|
||||
|
||||
use crate::mem::{
|
||||
address::{AsPhysicalAddress, IntoRaw},
|
||||
phys,
|
||||
pointer::PhysicalRefMut,
|
||||
process::ProcessAddressSpaceManager,
|
||||
table::{EntryLevel, MapAttributes, NextPageTable},
|
||||
PhysicalAddress,
|
||||
};
|
||||
|
||||
use super::table::{PageEntry, PageTable, L1, L2, L3};
|
||||
|
||||
#[repr(C)]
|
||||
pub struct ProcessAddressSpaceImpl {
|
||||
l1: PhysicalRefMut<'static, PageTable<L1>>,
|
||||
asid: u8,
|
||||
}
|
||||
|
||||
impl ProcessAddressSpaceManager for ProcessAddressSpaceImpl {
|
||||
const PAGE_SIZE: usize = L3::SIZE;
|
||||
const LOWER_LIMIT_PFN: usize = 8;
|
||||
// 16GiB VM limit
|
||||
const UPPER_LIMIT_PFN: usize = (16 << 30) / Self::PAGE_SIZE;
|
||||
|
||||
fn new() -> Result<Self, Error> {
|
||||
static LAST_ASID: AtomicU8 = AtomicU8::new(1);
|
||||
|
||||
let asid = LAST_ASID.fetch_add(1, Ordering::AcqRel);
|
||||
|
||||
let mut l1 = unsafe { PhysicalRefMut::<'static, PageTable<L1>>::map(phys::alloc_page()?) };
|
||||
|
||||
for i in 0..512 {
|
||||
l1[i] = PageEntry::INVALID;
|
||||
}
|
||||
|
||||
Ok(Self { l1, asid })
|
||||
}
|
||||
|
||||
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error> {
|
||||
self.read_l3_entry(address).ok_or(Error::DoesNotExist)
|
||||
}
|
||||
|
||||
unsafe fn map_page(
|
||||
&mut self,
|
||||
address: usize,
|
||||
physical: PhysicalAddress,
|
||||
flags: MapAttributes,
|
||||
) -> Result<(), Error> {
|
||||
self.write_l3_entry(
|
||||
address,
|
||||
PageEntry::normal_page(physical, flags.into()),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error> {
|
||||
self.pop_l3_entry(address)
|
||||
}
|
||||
|
||||
fn as_address_with_asid(&self) -> u64 {
|
||||
unsafe { u64::from(self.l1.as_physical_address()) | ((self.asid as u64) << 48) }
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessAddressSpaceImpl {
|
||||
// Write a single 4KiB entry
|
||||
fn write_l3_entry(
|
||||
&mut self,
|
||||
virt: usize,
|
||||
entry: PageEntry<L3>,
|
||||
overwrite: bool,
|
||||
) -> Result<(), Error> {
|
||||
let l1i = L1::index(virt);
|
||||
let l2i = L2::index(virt);
|
||||
let l3i = L3::index(virt);
|
||||
|
||||
let mut l2 = self.l1.get_mut_or_alloc(l1i)?;
|
||||
let mut l3 = l2.get_mut_or_alloc(l2i)?;
|
||||
|
||||
if l3[l3i].is_present() && !overwrite {
|
||||
todo!();
|
||||
}
|
||||
|
||||
l3[l3i] = entry;
|
||||
tlb_flush_vaae1(virt);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn pop_l3_entry(&mut self, virt: usize) -> Result<PhysicalAddress, Error> {
|
||||
let l1i = L1::index(virt);
|
||||
let l2i = L2::index(virt);
|
||||
let l3i = L3::index(virt);
|
||||
|
||||
// TODO somehow drop tables if they're known to be empty?
|
||||
let mut l2 = self.l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
|
||||
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
|
||||
|
||||
let page = l3[l3i].as_page().ok_or(Error::DoesNotExist)?;
|
||||
|
||||
l3[l3i] = PageEntry::INVALID;
|
||||
tlb_flush_vaae1(virt);
|
||||
|
||||
Ok(page)
|
||||
}
|
||||
|
||||
fn read_l3_entry(&self, virt: usize) -> Option<(PhysicalAddress, MapAttributes)> {
|
||||
let l1i = L1::index(virt);
|
||||
let l2i = L2::index(virt);
|
||||
let l3i = L3::index(virt);
|
||||
|
||||
let l2 = self.l1.get(l1i)?;
|
||||
let l3 = l2.get(l2i)?;
|
||||
|
||||
let page = l3[l3i].as_page()?;
|
||||
|
||||
Some((page, l3[l3i].attributes().into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn tlb_flush_vaae1(mut page: usize) {
|
||||
page >>= 12;
|
||||
unsafe {
|
||||
core::arch::asm!("tlbi vaae1, {page}", page = in(reg) page);
|
||||
}
|
||||
}
|
295
src/arch/aarch64/mem/table.rs
Normal file
295
src/arch/aarch64/mem/table.rs
Normal file
@ -0,0 +1,295 @@
|
||||
use core::{
|
||||
marker::PhantomData,
|
||||
ops::{Index, IndexMut},
|
||||
};
|
||||
|
||||
use abi::error::Error;
|
||||
use bitflags::bitflags;
|
||||
|
||||
use crate::mem::{
|
||||
address::{AsPhysicalAddress, FromRaw, IntoRaw},
|
||||
phys,
|
||||
pointer::{PhysicalRef, PhysicalRefMut},
|
||||
table::{EntryLevel, MapAttributes, NextPageTable, NonTerminalEntryLevel},
|
||||
PhysicalAddress,
|
||||
};
|
||||
|
||||
bitflags! {
|
||||
#[derive(Clone, Copy, PartialEq, Eq)]
|
||||
pub struct PageAttributes: u64 {
|
||||
const PRESENT = 1 << 0;
|
||||
|
||||
const TABLE = 1 << 1;
|
||||
const PAGE = 1 << 1;
|
||||
const BLOCK = 0 << 1;
|
||||
|
||||
const ACCESS = 1 << 10;
|
||||
|
||||
const AP_KERNEL_READWRITE = 0 << 6;
|
||||
const AP_BOTH_READWRITE = 1 << 6;
|
||||
const AP_KERNEL_READONLY = 2 << 6;
|
||||
const AP_BOTH_READONLY = 3 << 6;
|
||||
const AP_ACCESS_MASK = 3 << 6;
|
||||
|
||||
const SH_OUTER = 2 << 8;
|
||||
const SH_INNER = 3 << 8;
|
||||
|
||||
const PAGE_ATTR_NORMAL = 0 << 2;
|
||||
const PAGE_ATTR_DEVICE = 1 << 2;
|
||||
|
||||
const NON_GLOBAL = 1 << 11;
|
||||
|
||||
const PXN = 1 << 53;
|
||||
const UXN = 1 << 54;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
#[repr(C, align(0x1000))]
|
||||
pub struct PageTable<L: EntryLevel> {
|
||||
entries: [PageEntry<L>; 512],
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct PageEntry<L: EntryLevel>(u64, PhantomData<L>);
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct L1;
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct L2;
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct L3;
|
||||
|
||||
impl const EntryLevel for L1 {
|
||||
const SHIFT: usize = 30;
|
||||
}
|
||||
|
||||
impl NonTerminalEntryLevel for L1 {
|
||||
type NextLevel = L2;
|
||||
}
|
||||
|
||||
impl const EntryLevel for L2 {
|
||||
const SHIFT: usize = 21;
|
||||
}
|
||||
|
||||
impl NonTerminalEntryLevel for L2 {
|
||||
type NextLevel = L3;
|
||||
}
|
||||
|
||||
impl const EntryLevel for L3 {
|
||||
const SHIFT: usize = 12;
|
||||
}
|
||||
|
||||
impl<L: EntryLevel> PageTable<L> {
|
||||
pub const fn zeroed() -> Self {
|
||||
Self {
|
||||
entries: [PageEntry::INVALID; 512],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_zeroed<'a>() -> Result<PhysicalRefMut<'a, Self>, Error> {
|
||||
let physical = phys::alloc_page()?;
|
||||
let mut table = unsafe { PhysicalRefMut::<'a, Self>::map(physical) };
|
||||
|
||||
for i in 0..512 {
|
||||
table[i] = PageEntry::INVALID;
|
||||
}
|
||||
|
||||
Ok(table)
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: EntryLevel> PageEntry<L> {
|
||||
pub const INVALID: Self = Self(0, PhantomData);
|
||||
|
||||
pub const fn is_present(self) -> bool {
|
||||
self.0 & PageAttributes::PRESENT.bits() != 0
|
||||
}
|
||||
|
||||
pub fn attributes(self) -> PageAttributes {
|
||||
PageAttributes::from_bits_retain(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: NonTerminalEntryLevel + 'static> NextPageTable for PageTable<L> {
|
||||
type NextLevel = PageTable<L::NextLevel>;
|
||||
type TableRef = PhysicalRef<'static, PageTable<L::NextLevel>>;
|
||||
type TableRefMut = PhysicalRefMut<'static, PageTable<L::NextLevel>>;
|
||||
|
||||
fn get(&self, index: usize) -> Option<Self::TableRef> {
|
||||
self[index]
|
||||
.as_table()
|
||||
.map(|phys| unsafe { PhysicalRef::map(phys) })
|
||||
}
|
||||
|
||||
fn get_mut(&mut self, index: usize) -> Option<Self::TableRefMut> {
|
||||
self[index]
|
||||
.as_table()
|
||||
.map(|phys| unsafe { PhysicalRefMut::map(phys) })
|
||||
}
|
||||
|
||||
fn get_mut_or_alloc(&mut self, index: usize) -> Result<Self::TableRefMut, Error> {
|
||||
let entry = self[index];
|
||||
|
||||
if let Some(table) = entry.as_table() {
|
||||
Ok(unsafe { PhysicalRefMut::map(table) })
|
||||
} else {
|
||||
let table = PageTable::new_zeroed()?;
|
||||
self[index] = PageEntry::<L>::table(
|
||||
unsafe { table.as_physical_address() },
|
||||
PageAttributes::empty(),
|
||||
);
|
||||
Ok(table)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: NonTerminalEntryLevel> PageEntry<L> {
|
||||
pub fn table(phys: PhysicalAddress, attrs: PageAttributes) -> Self {
|
||||
Self(
|
||||
IntoRaw::<u64>::into_raw(phys)
|
||||
| (PageAttributes::TABLE | PageAttributes::PRESENT | attrs).bits(),
|
||||
PhantomData,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn normal_block(phys: PhysicalAddress, attrs: PageAttributes) -> Self {
|
||||
Self(
|
||||
IntoRaw::<u64>::into_raw(phys)
|
||||
| (PageAttributes::BLOCK
|
||||
| PageAttributes::PRESENT
|
||||
| PageAttributes::ACCESS
|
||||
| PageAttributes::SH_INNER
|
||||
| PageAttributes::PAGE_ATTR_NORMAL
|
||||
| attrs)
|
||||
.bits(),
|
||||
PhantomData,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn device_block(phys: PhysicalAddress) -> Self {
|
||||
Self(
|
||||
IntoRaw::<u64>::into_raw(phys)
|
||||
| (PageAttributes::BLOCK
|
||||
| PageAttributes::PRESENT
|
||||
| PageAttributes::ACCESS
|
||||
| PageAttributes::SH_OUTER
|
||||
| PageAttributes::PAGE_ATTR_DEVICE
|
||||
| PageAttributes::UXN
|
||||
| PageAttributes::PXN)
|
||||
.bits(),
|
||||
PhantomData,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the physical address of the table this entry refers to, returning None if it
|
||||
/// does not
|
||||
pub fn as_table(self) -> Option<PhysicalAddress> {
|
||||
if self.0 & PageAttributes::PRESENT.bits() != 0
|
||||
&& self.0 & PageAttributes::BLOCK.bits() == 0
|
||||
{
|
||||
Some(PhysicalAddress::from_raw(self.0 & !0xFFF))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PageEntry<L3> {
|
||||
pub fn normal_page(phys: PhysicalAddress, attrs: PageAttributes) -> Self {
|
||||
Self(
|
||||
IntoRaw::<u64>::into_raw(phys)
|
||||
| (PageAttributes::PAGE
|
||||
| PageAttributes::PRESENT
|
||||
| PageAttributes::ACCESS
|
||||
| PageAttributes::SH_INNER
|
||||
| PageAttributes::PAGE_ATTR_NORMAL
|
||||
| attrs)
|
||||
.bits(),
|
||||
PhantomData,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn device_page(phys: PhysicalAddress) -> Self {
|
||||
Self(
|
||||
IntoRaw::<u64>::into_raw(phys)
|
||||
| (PageAttributes::PAGE
|
||||
| PageAttributes::PRESENT
|
||||
| PageAttributes::ACCESS
|
||||
| PageAttributes::SH_OUTER
|
||||
| PageAttributes::PAGE_ATTR_DEVICE
|
||||
| PageAttributes::UXN
|
||||
| PageAttributes::PXN)
|
||||
.bits(),
|
||||
PhantomData,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn as_page(&self) -> Option<PhysicalAddress> {
|
||||
let mask = (PageAttributes::PRESENT | PageAttributes::PAGE).bits();
|
||||
if self.0 & mask == mask {
|
||||
Some(PhysicalAddress::from_raw(self.0 & !0xFFF))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: EntryLevel> Index<usize> for PageTable<L> {
|
||||
type Output = PageEntry<L>;
|
||||
|
||||
fn index(&self, index: usize) -> &Self::Output {
|
||||
&self.entries[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: EntryLevel> IndexMut<usize> for PageTable<L> {
|
||||
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
|
||||
&mut self.entries[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl From<MapAttributes> for PageAttributes {
|
||||
fn from(value: MapAttributes) -> Self {
|
||||
let mut out = PageAttributes::empty();
|
||||
// TODO kernel cannot write el0 readonly pages
|
||||
if value.contains(MapAttributes::USER_WRITE) {
|
||||
// Read/write
|
||||
out |= PageAttributes::AP_BOTH_READWRITE;
|
||||
} else if value.contains(MapAttributes::USER_READ) {
|
||||
// Read only
|
||||
out |= PageAttributes::AP_BOTH_READONLY;
|
||||
} else {
|
||||
// No read/write
|
||||
out |= PageAttributes::AP_KERNEL_READONLY;
|
||||
}
|
||||
|
||||
if value.contains(MapAttributes::NON_GLOBAL) {
|
||||
out |= PageAttributes::NON_GLOBAL;
|
||||
}
|
||||
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PageAttributes> for MapAttributes {
|
||||
fn from(value: PageAttributes) -> Self {
|
||||
let mut out = MapAttributes::empty();
|
||||
|
||||
out |= match value.intersection(PageAttributes::AP_ACCESS_MASK) {
|
||||
PageAttributes::AP_BOTH_READWRITE => {
|
||||
MapAttributes::USER_WRITE | MapAttributes::USER_READ
|
||||
}
|
||||
PageAttributes::AP_BOTH_READONLY => MapAttributes::USER_READ,
|
||||
PageAttributes::AP_KERNEL_READONLY => MapAttributes::empty(),
|
||||
PageAttributes::AP_KERNEL_READWRITE => panic!("This variant cannot be constructed"),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
if value.contains(PageAttributes::NON_GLOBAL) {
|
||||
out |= MapAttributes::NON_GLOBAL;
|
||||
}
|
||||
|
||||
out
|
||||
}
|
||||
}
|
@ -1,49 +1,47 @@
|
||||
//! AArch64 architecture and platforms implementation
|
||||
|
||||
pub mod mem;
|
||||
|
||||
use core::sync::atomic::Ordering;
|
||||
|
||||
use aarch64_cpu::{
|
||||
asm::barrier,
|
||||
registers::{
|
||||
CNTP_CTL_EL0, CNTP_TVAL_EL0, DAIF, ID_AA64MMFR0_EL1, MAIR_EL1, SCTLR_EL1, TCR_EL1,
|
||||
TTBR0_EL1, TTBR1_EL1,
|
||||
},
|
||||
};
|
||||
use aarch64_cpu::registers::{CNTP_CTL_EL0, CNTP_TVAL_EL0, DAIF};
|
||||
use abi::error::Error;
|
||||
use device_api::{
|
||||
interrupt::{ExternalInterruptController, IpiDeliveryTarget, LocalInterruptController},
|
||||
timer::MonotonicTimestampProviderDevice,
|
||||
ResetDevice,
|
||||
};
|
||||
use fdt_rs::prelude::PropReader;
|
||||
use fdt_rs::base::DevTree;
|
||||
use git_version::git_version;
|
||||
use kernel_util::util::OneTimeInit;
|
||||
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
|
||||
|
||||
use crate::{
|
||||
arch::Architecture,
|
||||
arch::aarch64::{
|
||||
cpu::Cpu,
|
||||
mem::table::{L2, L3},
|
||||
},
|
||||
debug,
|
||||
device::{
|
||||
self,
|
||||
devtree::{
|
||||
self, DevTreeIndexNodePropGet, DevTreeIndexPropExt, DevTreeNodeInfo, DeviceTree,
|
||||
FdtMemoryRegionIter,
|
||||
},
|
||||
devtree::{self, DevTreeIndexPropExt, DevTreeNodeInfo, DeviceTree, FdtMemoryRegionIter},
|
||||
power::arm_psci::Psci,
|
||||
},
|
||||
fs::{Initrd, INITRD_DATA},
|
||||
mem::{
|
||||
address::{FromRaw, IntoRaw},
|
||||
device::RawDeviceMemoryMapping,
|
||||
heap,
|
||||
phys::{self, reserved::reserve_region, PhysicalMemoryRegion},
|
||||
ConvertAddress,
|
||||
pointer::PhysicalRef,
|
||||
table::EntryLevel,
|
||||
PhysicalAddress,
|
||||
},
|
||||
};
|
||||
|
||||
use self::{
|
||||
smp::CPU_COUNT,
|
||||
table::{init_fixed_tables, KERNEL_TABLES},
|
||||
};
|
||||
use self::mem::{table::L1, EarlyMapping};
|
||||
|
||||
use super::CpuMessage;
|
||||
use super::{Architecture, CpuMessage};
|
||||
|
||||
pub mod boot;
|
||||
pub mod context;
|
||||
@ -51,111 +49,44 @@ pub mod cpu;
|
||||
pub mod exception;
|
||||
pub mod gic;
|
||||
pub mod smp;
|
||||
pub mod table;
|
||||
pub mod timer;
|
||||
|
||||
const BOOT_STACK_SIZE: usize = 65536;
|
||||
const BOOT_STACK_SIZE: usize = 4096 * 32;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
#[repr(C, align(0x20))]
|
||||
struct KernelStack {
|
||||
struct BootStack {
|
||||
data: [u8; BOOT_STACK_SIZE],
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub enum IrqNumber {
|
||||
Private(u32),
|
||||
Shared(u32),
|
||||
}
|
||||
|
||||
/// AArch64 platform interface
|
||||
pub struct AArch64 {
|
||||
dt: OneTimeInit<DeviceTree<'static>>,
|
||||
ext_intc: OneTimeInit<&'static dyn ExternalInterruptController<IrqNumber = IrqNumber>>,
|
||||
local_intc: OneTimeInit<&'static dyn LocalInterruptController<IpiMessage = CpuMessage>>,
|
||||
mtimer: OneTimeInit<&'static dyn MonotonicTimestampProviderDevice>,
|
||||
|
||||
pub psci: OneTimeInit<&'static Psci>,
|
||||
reset: OneTimeInit<&'static dyn ResetDevice>,
|
||||
|
||||
// ARM-only devices
|
||||
/// ARM PSCI instance on this system (there may not be one)
|
||||
pub psci: OneTimeInit<&'static Psci>,
|
||||
lintc: OneTimeInit<&'static dyn LocalInterruptController<IpiMessage = CpuMessage>>,
|
||||
xintc: OneTimeInit<&'static dyn ExternalInterruptController<IrqNumber = IrqNumber>>,
|
||||
|
||||
mtimer: OneTimeInit<&'static dyn MonotonicTimestampProviderDevice>,
|
||||
|
||||
initrd: OneTimeInit<PhysicalRef<'static, [u8]>>,
|
||||
}
|
||||
|
||||
/// Global platform handle
|
||||
pub static ARCHITECTURE: AArch64 = AArch64 {
|
||||
dt: OneTimeInit::new(),
|
||||
ext_intc: OneTimeInit::new(),
|
||||
local_intc: OneTimeInit::new(),
|
||||
mtimer: OneTimeInit::new(),
|
||||
reset: OneTimeInit::new(),
|
||||
|
||||
// ARM-only devices
|
||||
psci: OneTimeInit::new(),
|
||||
};
|
||||
|
||||
impl Architecture for AArch64 {
|
||||
type IrqNumber = IrqNumber;
|
||||
|
||||
const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000;
|
||||
|
||||
unsafe fn init_mmu(&self, bsp: bool) {
|
||||
if bsp {
|
||||
init_fixed_tables();
|
||||
}
|
||||
|
||||
let tables_phys = absolute_address!(KERNEL_TABLES).physicalize() as u64;
|
||||
|
||||
if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran4::Supported) {
|
||||
todo!();
|
||||
}
|
||||
|
||||
MAIR_EL1.write(
|
||||
// Attribute 0 -- normal memory
|
||||
MAIR_EL1::Attr0_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc +
|
||||
MAIR_EL1::Attr0_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc +
|
||||
// Attribute 1 -- device memory
|
||||
MAIR_EL1::Attr1_Device::nonGathering_nonReordering_EarlyWriteAck,
|
||||
);
|
||||
|
||||
TCR_EL1.modify(
|
||||
// General
|
||||
TCR_EL1::IPS::Bits_48 +
|
||||
// TTBR0
|
||||
TCR_EL1::TG0::KiB_4 + TCR_EL1::T0SZ.val(25) + TCR_EL1::SH0::Inner +
|
||||
// TTBR1
|
||||
TCR_EL1::TG1::KiB_4 + TCR_EL1::T1SZ.val(25) + TCR_EL1::SH1::Outer,
|
||||
);
|
||||
|
||||
barrier::dmb(barrier::ISH);
|
||||
|
||||
TTBR0_EL1.set_baddr(tables_phys);
|
||||
TTBR1_EL1.set_baddr(tables_phys);
|
||||
|
||||
barrier::isb(barrier::SY);
|
||||
|
||||
// Enable instruction cache, data cache and translation
|
||||
SCTLR_EL1
|
||||
.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::I::NonCacheable + SCTLR_EL1::C::NonCacheable);
|
||||
|
||||
barrier::isb(barrier::SY);
|
||||
}
|
||||
type IrqNumber = IrqNumber;
|
||||
|
||||
unsafe fn start_application_processors(&self) {
|
||||
let dt = self.dt.get();
|
||||
if let Err(e) = smp::start_ap_cores(dt) {
|
||||
errorln!(
|
||||
"Could not initialize AP CPUs: {:?}. Will continue with one CPU.",
|
||||
e
|
||||
);
|
||||
if let Err(error) = smp::start_ap_cores(dt) {
|
||||
errorln!("Could not initialize AP CPUs: {:?}", error);
|
||||
}
|
||||
}
|
||||
|
||||
fn map_device_pages(&self, phys: usize, count: usize) -> Result<usize, Error> {
|
||||
unsafe { KERNEL_TABLES.map_device_pages(phys, count) }
|
||||
}
|
||||
|
||||
fn wait_for_interrupt() {
|
||||
aarch64_cpu::asm::wfi();
|
||||
fn cpu_count() -> usize {
|
||||
smp::CPU_COUNT.load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
unsafe fn set_interrupt_mask(mask: bool) {
|
||||
@ -170,26 +101,98 @@ impl Architecture for AArch64 {
|
||||
DAIF.read(DAIF::I) != 0
|
||||
}
|
||||
|
||||
fn cpu_count() -> usize {
|
||||
CPU_COUNT.load(Ordering::Acquire)
|
||||
fn wait_for_interrupt() {
|
||||
aarch64_cpu::asm::wfi();
|
||||
}
|
||||
|
||||
fn register_external_interrupt_controller(
|
||||
unsafe fn map_device_memory(
|
||||
&self,
|
||||
intc: &'static dyn ExternalInterruptController<IrqNumber = Self::IrqNumber>,
|
||||
base: PhysicalAddress,
|
||||
size: usize,
|
||||
) -> Result<RawDeviceMemoryMapping, Error> {
|
||||
mem::map_device_memory(base, size)
|
||||
}
|
||||
|
||||
unsafe fn unmap_device_memory(&self, map: &RawDeviceMemoryMapping) {
|
||||
mem::unmap_device_memory(map)
|
||||
}
|
||||
|
||||
fn map_physical_memory<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
|
||||
&self,
|
||||
_it: I,
|
||||
_memory_start: PhysicalAddress,
|
||||
memory_end: PhysicalAddress,
|
||||
) -> Result<(), Error> {
|
||||
self.ext_intc.init(intc);
|
||||
let end_l1i = L1::index(memory_end.page_align_up::<L1>().into_raw());
|
||||
if end_l1i > mem::RAM_MAPPING_L1_COUNT {
|
||||
loop {}
|
||||
}
|
||||
|
||||
// Map 1GiB chunks
|
||||
for index in 0..end_l1i {
|
||||
unsafe {
|
||||
mem::map_ram_l1(index);
|
||||
}
|
||||
}
|
||||
|
||||
mem::MEMORY_LIMIT.init(memory_end.into_raw());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn virtualize(address: PhysicalAddress) -> Result<usize, Error> {
|
||||
let raw: usize = address.into_raw();
|
||||
if raw < *mem::MEMORY_LIMIT.get() {
|
||||
Ok(raw + mem::RAM_MAPPING_OFFSET)
|
||||
} else {
|
||||
errorln!("Invalid physical address: {:#x}", address);
|
||||
Err(Error::InvalidMemoryOperation)
|
||||
}
|
||||
}
|
||||
|
||||
fn physicalize(address: usize) -> Result<PhysicalAddress, Error> {
|
||||
if address < mem::RAM_MAPPING_OFFSET
|
||||
|| address - mem::RAM_MAPPING_OFFSET >= *mem::MEMORY_LIMIT.get()
|
||||
{
|
||||
errorln!("Not a virtualized physical address: {:#x}", address);
|
||||
return Err(Error::InvalidMemoryOperation);
|
||||
}
|
||||
|
||||
Ok(PhysicalAddress::from_raw(address - mem::RAM_MAPPING_OFFSET))
|
||||
}
|
||||
|
||||
fn local_interrupt_controller(
|
||||
&'static self,
|
||||
) -> &'static dyn LocalInterruptController<IpiMessage = super::CpuMessage> {
|
||||
*self.lintc.get()
|
||||
}
|
||||
|
||||
fn external_interrupt_controller(
|
||||
&'static self,
|
||||
) -> &'static dyn ExternalInterruptController<IrqNumber = Self::IrqNumber> {
|
||||
*self.xintc.get()
|
||||
}
|
||||
|
||||
fn register_local_interrupt_controller(
|
||||
&self,
|
||||
intc: &'static dyn LocalInterruptController<IpiMessage = super::CpuMessage>,
|
||||
) -> Result<(), Error> {
|
||||
self.local_intc.init(intc);
|
||||
self.lintc.init(intc);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn register_external_interrupt_controller(
|
||||
&self,
|
||||
intc: &'static dyn ExternalInterruptController<IrqNumber = Self::IrqNumber>,
|
||||
) -> Result<(), Error> {
|
||||
self.xintc.init(intc);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn monotonic_timer(&'static self) -> &'static dyn MonotonicTimestampProviderDevice {
|
||||
*self.mtimer.get()
|
||||
}
|
||||
|
||||
fn register_monotonic_timer(
|
||||
&self,
|
||||
timer: &'static dyn MonotonicTimestampProviderDevice,
|
||||
@ -198,107 +201,143 @@ impl Architecture for AArch64 {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn register_reset_device(&self, reset: &'static dyn ResetDevice) -> Result<(), Error> {
|
||||
self.reset.init(reset);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn external_interrupt_controller(
|
||||
&self,
|
||||
) -> &'static dyn ExternalInterruptController<IrqNumber = Self::IrqNumber> {
|
||||
*self.ext_intc.get()
|
||||
}
|
||||
|
||||
fn local_interrupt_controller(
|
||||
&self,
|
||||
) -> &'static dyn LocalInterruptController<IpiMessage = super::CpuMessage> {
|
||||
*self.local_intc.get()
|
||||
}
|
||||
|
||||
fn monotonic_timer(&self) -> &'static dyn MonotonicTimestampProviderDevice {
|
||||
*self.mtimer.get()
|
||||
}
|
||||
|
||||
unsafe fn send_ipi(&self, target: IpiDeliveryTarget, msg: CpuMessage) -> Result<(), Error> {
|
||||
if let Some(local_intc) = self.local_intc.try_get() {
|
||||
if let Some(local_intc) = self.lintc.try_get() {
|
||||
local_intc.send_ipi(target, msg)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn register_reset_device(&self, reset: &'static dyn ResetDevice) -> Result<(), Error> {
|
||||
self.reset.init(reset);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
unsafe fn reset(&self) -> ! {
|
||||
if let Some(reset) = self.reset.try_get() {
|
||||
reset.reset();
|
||||
reset.reset()
|
||||
} else {
|
||||
let psci = self.psci.get();
|
||||
psci.reset();
|
||||
psci.reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AArch64 {
|
||||
/// Initializes the architecture's device tree
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Only makes sense to call during the early initialization, once.
|
||||
pub unsafe fn init_device_tree(&self, dtb_phys: usize) {
|
||||
let dt = DeviceTree::from_addr(dtb_phys.virtualize());
|
||||
self.dt.init(dt);
|
||||
fn extract_initrd_from_dt(
|
||||
&self,
|
||||
dt: &DeviceTree,
|
||||
) -> Option<(PhysicalAddress, PhysicalAddress)> {
|
||||
let chosen = dt.node_by_path("/chosen")?;
|
||||
let initrd_start = devtree::find_prop(&chosen, "linux,initrd-start")?;
|
||||
let initrd_end = devtree::find_prop(&chosen, "linux,initrd-end")?;
|
||||
|
||||
let address_cells = dt.address_cells();
|
||||
|
||||
let initrd_start = initrd_start.cell1_array_item(0, address_cells)?;
|
||||
let initrd_end = initrd_end.cell1_array_item(0, address_cells)?;
|
||||
|
||||
let initrd_start = PhysicalAddress::from_raw(initrd_start);
|
||||
let initrd_end = PhysicalAddress::from_raw(initrd_end);
|
||||
|
||||
Some((initrd_start, initrd_end))
|
||||
}
|
||||
|
||||
/// Returns the device tree
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Will panic if the device tree has not yet been initialized
|
||||
pub fn device_tree(&self) -> &DeviceTree {
|
||||
self.dt.get()
|
||||
}
|
||||
unsafe fn init_memory_management(&'static self, dtb: PhysicalAddress) -> Result<(), Error> {
|
||||
// 16x2MiB
|
||||
const HEAP_PAGES: usize = 16;
|
||||
|
||||
unsafe fn init_physical_memory(&self, dtb_phys: usize) -> Result<(), Error> {
|
||||
let dt = self.device_tree();
|
||||
// Initialize the runtime mappings
|
||||
mem::init_fixed_tables();
|
||||
|
||||
if let Some(initrd) = INITRD_DATA.try_get() {
|
||||
reserve_region(
|
||||
"initrd",
|
||||
PhysicalMemoryRegion {
|
||||
base: initrd.phys_page_start,
|
||||
size: initrd.phys_page_len,
|
||||
},
|
||||
);
|
||||
}
|
||||
// Extract the size of the device tree
|
||||
let dtb_size = {
|
||||
let dtb_header = EarlyMapping::<u8>::map_slice(dtb, DevTree::MIN_HEADER_SIZE)?;
|
||||
DevTree::read_totalsize(dtb_header.as_ref()).unwrap()
|
||||
};
|
||||
|
||||
reserve_region(
|
||||
"dtb",
|
||||
PhysicalMemoryRegion {
|
||||
base: dtb_phys,
|
||||
size: (dt.size() + 0xFFF) & !0xFFF,
|
||||
base: dtb,
|
||||
size: (dtb_size + 0xFFF) & !0xFFF,
|
||||
},
|
||||
);
|
||||
|
||||
let regions = FdtMemoryRegionIter::new(dt);
|
||||
let dtb_slice = EarlyMapping::<u8>::map_slice(dtb, dtb_size)?;
|
||||
|
||||
phys::init_from_iter(regions)
|
||||
let dt = DeviceTree::from_addr(dtb_slice.as_ptr() as usize);
|
||||
|
||||
// Setup initrd from the dt
|
||||
let initrd = self.extract_initrd_from_dt(&dt);
|
||||
|
||||
if let Some((start, end)) = initrd {
|
||||
let aligned_start = start.page_align_down::<L3>();
|
||||
let aligned_end = end.page_align_up::<L3>();
|
||||
|
||||
let size = aligned_end - aligned_start;
|
||||
reserve_region(
|
||||
"initrd",
|
||||
PhysicalMemoryRegion {
|
||||
base: aligned_start,
|
||||
size,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
fn chosen_stdout_path<'a>(dt: &'a DeviceTree) -> Option<&'a str> {
|
||||
let chosen = dt.node_by_path("/chosen")?;
|
||||
chosen.prop("stdout-path")
|
||||
// Initialize the physical memory
|
||||
let regions = FdtMemoryRegionIter::new(&dt);
|
||||
|
||||
phys::init_from_iter(regions)?;
|
||||
|
||||
// Setup the heap
|
||||
for i in 0..HEAP_PAGES {
|
||||
let l2_page = phys::alloc_2m_page()?;
|
||||
mem::map_heap_l2(i, l2_page);
|
||||
}
|
||||
|
||||
fn init_platform(&self, bsp: bool) {
|
||||
if bsp {
|
||||
let dt = self.device_tree();
|
||||
heap::init_heap(mem::HEAP_MAPPING_OFFSET, HEAP_PAGES * L2::SIZE);
|
||||
|
||||
// EarlyMapping for DTB no longer needed, it lives in physical memory and can be obtained
|
||||
// through PhysicalRef
|
||||
let dtb_slice: PhysicalRef<'static, [u8]> = PhysicalRef::map_slice(dtb, dtb_size);
|
||||
let dt = DeviceTree::from_addr(dtb_slice.as_ptr() as usize);
|
||||
|
||||
self.dt.init(dt);
|
||||
|
||||
// Setup initrd
|
||||
if let Some((initrd_start, initrd_end)) = initrd {
|
||||
let aligned_start = initrd_start.page_align_down::<L3>();
|
||||
let aligned_end = initrd_end.page_align_up::<L3>();
|
||||
let len = initrd_end - initrd_start;
|
||||
|
||||
let data = unsafe { PhysicalRef::map_slice(initrd_start, len) };
|
||||
self.initrd.init(data);
|
||||
|
||||
INITRD_DATA.init(Initrd {
|
||||
phys_page_start: aligned_start,
|
||||
phys_page_len: aligned_end - aligned_start,
|
||||
data: self.initrd.get().as_ref(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
unsafe fn init_platform(&self, is_bsp: bool) -> Result<(), Error> {
|
||||
Cpu::init_local();
|
||||
|
||||
if is_bsp {
|
||||
let dt = self.dt.get();
|
||||
|
||||
let address_cells = dt.address_cells();
|
||||
let size_cells = dt.size_cells();
|
||||
|
||||
let chosen_stdout_path = Self::chosen_stdout_path(dt);
|
||||
// Setup /chosen.stdout-path to get early debug printing
|
||||
let chosen_stdout_path = dt.chosen_stdout_path();
|
||||
let chosen_stdout = chosen_stdout_path.and_then(|path| dt.node_by_path(path));
|
||||
|
||||
// Probe and initialize the /chosen.stdout-path device first
|
||||
if let Some(node) = chosen_stdout.clone() {
|
||||
let probe = DevTreeNodeInfo {
|
||||
address_cells,
|
||||
@ -307,15 +346,12 @@ impl AArch64 {
|
||||
};
|
||||
|
||||
if let Some((device, _)) = devtree::probe_dt_node(&probe) {
|
||||
unsafe {
|
||||
device.init().unwrap();
|
||||
device.init()?;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
debug::reset();
|
||||
debug::init();
|
||||
|
||||
// Print some stuff now that the output is initialized
|
||||
infoln!(
|
||||
"Yggdrasil v{} ({})",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
@ -323,23 +359,20 @@ impl AArch64 {
|
||||
);
|
||||
infoln!("Initializing aarch64 platform");
|
||||
|
||||
// Probe and initialize the rest of devices
|
||||
let nodes = dt.root().children();
|
||||
if let Err(error) = devtree::enumerate_dt(
|
||||
address_cells,
|
||||
size_cells,
|
||||
nodes,
|
||||
|_, probe| {
|
||||
// Ignore /chosen/stdout-path node
|
||||
// Skip chosen-stdout, already initialized
|
||||
if let Some(ref chosen_stdout) = chosen_stdout && chosen_stdout.name() == probe.node.name() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some((device, _)) = devtree::probe_dt_node(&probe) {
|
||||
unsafe {
|
||||
device.init()?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
},
|
||||
@ -353,15 +386,14 @@ impl AArch64 {
|
||||
// Initialize IRQs for the devices
|
||||
device::manager_lock().devices().for_each(|dev| unsafe {
|
||||
if let Err(error) = dev.init_irq() {
|
||||
errorln!(
|
||||
"Could not init interrupts for {:?}: {:?}",
|
||||
warnln!(
|
||||
"Could not init IRQs for {:?}: {:?}",
|
||||
dev.display_name(),
|
||||
error
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
// Print the device list
|
||||
infoln!("Enumerated devices:");
|
||||
device::manager_lock().devices().for_each(|dev| {
|
||||
infoln!("* {:?}", dev.display_name());
|
||||
@ -369,7 +401,7 @@ impl AArch64 {
|
||||
} else {
|
||||
// BSP already initialized everything needed
|
||||
// Setup timer and local interrupt controller
|
||||
let intc = self.local_intc.get();
|
||||
let intc = self.lintc.get();
|
||||
|
||||
unsafe {
|
||||
intc.init_ap().unwrap();
|
||||
@ -378,57 +410,28 @@ impl AArch64 {
|
||||
// TODO device-tree initialization for this
|
||||
CNTP_CTL_EL0.write(CNTP_CTL_EL0::ENABLE::SET + CNTP_CTL_EL0::IMASK::CLEAR);
|
||||
CNTP_TVAL_EL0.set(10000000);
|
||||
self.ext_intc
|
||||
.get()
|
||||
.enable_irq(IrqNumber::Private(14))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
self.xintc.get().enable_irq(IrqNumber::Private(14)).unwrap();
|
||||
}
|
||||
|
||||
fn setup_initrd() {
|
||||
let dt = ARCHITECTURE.device_tree();
|
||||
|
||||
let Some(chosen) = dt.node_by_path("/chosen") else {
|
||||
return;
|
||||
};
|
||||
|
||||
let Some(initrd_start) = devtree::find_prop(&chosen, "linux,initrd-start") else {
|
||||
return;
|
||||
};
|
||||
let Some(initrd_end) = devtree::find_prop(&chosen, "linux,initrd-end") else {
|
||||
return;
|
||||
};
|
||||
|
||||
let address_cells = dt.address_cells();
|
||||
|
||||
let Some(initrd_start) = initrd_start.cell1_array_item(0, address_cells) else {
|
||||
infoln!("No initrd specified");
|
||||
return;
|
||||
};
|
||||
let Some(initrd_end) = initrd_end.cell1_array_item(0, address_cells) else {
|
||||
infoln!("No initrd specified");
|
||||
return;
|
||||
};
|
||||
|
||||
let initrd_start = initrd_start as usize;
|
||||
let initrd_end = initrd_end as usize;
|
||||
|
||||
let start_aligned = initrd_start & !0xFFF;
|
||||
let end_aligned = initrd_end & !0xFFF;
|
||||
|
||||
let data = unsafe {
|
||||
core::slice::from_raw_parts(
|
||||
initrd_start.virtualize() as *const _,
|
||||
initrd_end - initrd_start,
|
||||
)
|
||||
};
|
||||
|
||||
let initrd = Initrd {
|
||||
phys_page_start: start_aligned,
|
||||
phys_page_len: end_aligned - start_aligned,
|
||||
data,
|
||||
};
|
||||
|
||||
INITRD_DATA.init(initrd);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub static ARCHITECTURE: AArch64 = AArch64 {
|
||||
dt: OneTimeInit::new(),
|
||||
initrd: OneTimeInit::new(),
|
||||
|
||||
psci: OneTimeInit::new(),
|
||||
reset: OneTimeInit::new(),
|
||||
|
||||
lintc: OneTimeInit::new(),
|
||||
xintc: OneTimeInit::new(),
|
||||
|
||||
mtimer: OneTimeInit::new(),
|
||||
};
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub enum IrqNumber {
|
||||
Private(u32),
|
||||
Shared(u32),
|
||||
}
|
||||
|
@ -1,103 +0,0 @@
|
||||
//! Orange Pi 3 (Allwinner H6/sun50i-h6 SoC)
|
||||
//!
|
||||
//! # Booting using u-boot
|
||||
//!
|
||||
//! > fatload mmc 0:1 0x40000000 uRamdisk
|
||||
//! > fatload mmc 0:1 0x4d000000 sun50i-h6-orangepi-3.dtb
|
||||
//! > loady 0x44000000
|
||||
//! ...
|
||||
//! > bootm 0x44000000 0x40000000 0x4d000000
|
||||
//!
|
||||
use abi::error::Error;
|
||||
|
||||
use crate::{
|
||||
arch::CpuMessage,
|
||||
debug::{self, LogLevel},
|
||||
device::{
|
||||
interrupt::{ExternalInterruptController, InterruptSource, IpiDeliveryTarget},
|
||||
platform::Platform,
|
||||
timer::TimestampSource,
|
||||
InitializableDevice,
|
||||
},
|
||||
fs::devfs::{self, CharDeviceType},
|
||||
};
|
||||
|
||||
use self::{r_wdog::RWatchdog, serial::Serial};
|
||||
|
||||
use super::{
|
||||
gic::{Gic, IrqNumber},
|
||||
timer::ArmTimer,
|
||||
};
|
||||
|
||||
pub mod r_wdog;
|
||||
pub mod serial;
|
||||
|
||||
/// Orange Pi 3 implementation
|
||||
pub struct PlatformImpl {
|
||||
uart0: Serial,
|
||||
r_wdog: RWatchdog,
|
||||
/// ...
|
||||
pub gic: Gic,
|
||||
timer: ArmTimer,
|
||||
}
|
||||
|
||||
impl Platform for PlatformImpl {
|
||||
type IrqNumber = IrqNumber;
|
||||
|
||||
const KERNEL_PHYS_BASE: usize = 0x50000000;
|
||||
|
||||
unsafe fn init(&'static self, is_bsp: bool) -> Result<(), Error> {
|
||||
if is_bsp {
|
||||
self.gic.init(())?;
|
||||
|
||||
self.timer.init(())?;
|
||||
self.r_wdog.init(())?;
|
||||
|
||||
self.timer.init_irq()?;
|
||||
self.uart0.init_irq()?;
|
||||
|
||||
devfs::add_char_device(&self.uart0, CharDeviceType::TtySerial)?;
|
||||
} else {
|
||||
todo!();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
unsafe fn init_debug(&'static self) {
|
||||
self.uart0.init(()).unwrap();
|
||||
debug::add_sink(&self.uart0, LogLevel::Debug);
|
||||
}
|
||||
|
||||
fn interrupt_controller(
|
||||
&self,
|
||||
) -> &dyn ExternalInterruptController<IrqNumber = Self::IrqNumber> {
|
||||
&self.gic
|
||||
}
|
||||
|
||||
fn timestamp_source(&self) -> &dyn TimestampSource {
|
||||
&self.timer
|
||||
}
|
||||
|
||||
unsafe fn send_ipi(&self, _target: IpiDeliveryTarget, _msg: CpuMessage) -> Result<(), Error> {
|
||||
todo!();
|
||||
}
|
||||
|
||||
fn name(&self) -> &'static str {
|
||||
"Orange Pi 3"
|
||||
}
|
||||
|
||||
unsafe fn reset(&self) -> ! {
|
||||
self.r_wdog.reset_board();
|
||||
}
|
||||
}
|
||||
|
||||
/// Orange Pi 3 platform implementation
|
||||
pub static PLATFORM: PlatformImpl = unsafe {
|
||||
PlatformImpl {
|
||||
uart0: Serial::new(0x05000000, IrqNumber::new(32)),
|
||||
r_wdog: RWatchdog::new(0x07020400),
|
||||
gic: Gic::new(0x03021000, 0x03022000),
|
||||
timer: ArmTimer::new(IrqNumber::new(30)),
|
||||
}
|
||||
};
|
@ -1,90 +0,0 @@
|
||||
//! Allwinner H6 R Watchdog driver
|
||||
use abi::error::Error;
|
||||
use tock_registers::{
|
||||
interfaces::Writeable, register_bitfields, register_structs, registers::ReadWrite,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
device::InitializableDevice, mem::device::DeviceMemoryIo, sync::IrqSafeSpinlock,
|
||||
util::OneTimeInit,
|
||||
};
|
||||
|
||||
register_bitfields! {
|
||||
u32,
|
||||
CTRL [
|
||||
KEY OFFSET(1) NUMBITS(12) [
|
||||
Value = 0xA57
|
||||
],
|
||||
RESTART OFFSET(0) NUMBITS(1) []
|
||||
],
|
||||
CFG [
|
||||
CONFIG OFFSET(0) NUMBITS(2) [
|
||||
System = 1,
|
||||
]
|
||||
],
|
||||
MODE [
|
||||
EN OFFSET(0) NUMBITS(1) [],
|
||||
]
|
||||
}
|
||||
|
||||
register_structs! {
|
||||
#[allow(non_snake_case)]
|
||||
Regs {
|
||||
(0x00 => IRQ_EN: ReadWrite<u32>),
|
||||
(0x04 => IRQ_STA: ReadWrite<u32>),
|
||||
(0x08 => _0),
|
||||
(0x10 => CTRL: ReadWrite<u32, CTRL::Register>),
|
||||
(0x14 => CFG: ReadWrite<u32, CFG::Register>),
|
||||
(0x18 => MODE: ReadWrite<u32, MODE::Register>),
|
||||
(0x1C => @END),
|
||||
}
|
||||
}
|
||||
|
||||
/// Allwinner H6 R Watchdog
|
||||
pub struct RWatchdog {
|
||||
inner: OneTimeInit<IrqSafeSpinlock<DeviceMemoryIo<Regs>>>,
|
||||
base: usize,
|
||||
}
|
||||
|
||||
impl InitializableDevice for RWatchdog {
|
||||
type Options = ();
|
||||
|
||||
unsafe fn init(&self, _opts: Self::Options) -> Result<(), Error> {
|
||||
let regs = DeviceMemoryIo::map("r_wdog", self.base)?;
|
||||
|
||||
self.inner.init(IrqSafeSpinlock::new(regs));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl RWatchdog {
|
||||
/// Performs a reset through watchdog.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Only meant to be called by platform reset code.
|
||||
pub unsafe fn reset_board(&self) -> ! {
|
||||
let regs = self.inner.get().lock();
|
||||
|
||||
regs.CFG.write(CFG::CONFIG::System);
|
||||
regs.MODE.write(MODE::EN::SET);
|
||||
regs.CTRL.write(CTRL::KEY::Value + CTRL::RESTART::SET);
|
||||
|
||||
loop {
|
||||
core::arch::asm!("wfe");
|
||||
}
|
||||
}
|
||||
|
||||
/// Constructs an instance of the device at `base`.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure the address is valid.
|
||||
pub const unsafe fn new(base: usize) -> Self {
|
||||
Self {
|
||||
inner: OneTimeInit::new(),
|
||||
base,
|
||||
}
|
||||
}
|
||||
}
|
@ -1,160 +0,0 @@
|
||||
//! Allwinner H6 UART driver
|
||||
use abi::{error::Error, io::DeviceRequest};
|
||||
use tock_registers::{
|
||||
interfaces::{Readable, Writeable},
|
||||
register_bitfields, register_structs,
|
||||
registers::{ReadOnly, ReadWrite},
|
||||
};
|
||||
use vfs::CharDevice;
|
||||
|
||||
use crate::{
|
||||
arch::aarch64::gic::IrqNumber,
|
||||
arch::PLATFORM,
|
||||
debug::DebugSink,
|
||||
device::{
|
||||
interrupt::InterruptSource,
|
||||
platform::Platform,
|
||||
serial::SerialDevice,
|
||||
tty::{CharRing, TtyDevice},
|
||||
Device, InitializableDevice,
|
||||
},
|
||||
mem::device::DeviceMemoryIo,
|
||||
sync::IrqSafeSpinlock,
|
||||
util::OneTimeInit,
|
||||
};
|
||||
|
||||
register_bitfields! {
|
||||
u32,
|
||||
USR [
|
||||
TFE OFFSET(2) NUMBITS(1) [],
|
||||
TFNF OFFSET(1) NUMBITS(1) []
|
||||
]
|
||||
}
|
||||
|
||||
register_structs! {
|
||||
#[allow(non_snake_case)]
|
||||
Regs {
|
||||
(0x00 => DLL: ReadWrite<u32>),
|
||||
(0x04 => _0),
|
||||
(0x7C => USR: ReadOnly<u32, USR::Register>),
|
||||
(0x80 => @END),
|
||||
}
|
||||
}
|
||||
|
||||
struct Inner {
|
||||
regs: DeviceMemoryIo<Regs>,
|
||||
}
|
||||
|
||||
/// Allwinner H6 UART
|
||||
pub struct Serial {
|
||||
inner: OneTimeInit<IrqSafeSpinlock<Inner>>,
|
||||
ring: CharRing<16>,
|
||||
base: usize,
|
||||
irq: IrqNumber,
|
||||
}
|
||||
|
||||
impl CharDevice for Serial {
|
||||
fn read(&'static self, _blocking: bool, data: &mut [u8]) -> Result<usize, Error> {
|
||||
self.line_read(data)
|
||||
}
|
||||
|
||||
fn write(&self, _blocking: bool, data: &[u8]) -> Result<usize, Error> {
|
||||
self.line_write(data)
|
||||
}
|
||||
|
||||
fn device_request(&self, req: &mut DeviceRequest) -> Result<(), Error> {
|
||||
match req {
|
||||
&mut DeviceRequest::SetTerminalGroup(id) => {
|
||||
self.set_signal_group(id as _);
|
||||
Ok(())
|
||||
}
|
||||
_ => Err(Error::InvalidArgument),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TtyDevice<16> for Serial {
|
||||
fn ring(&self) -> &CharRing<16> {
|
||||
&self.ring
|
||||
}
|
||||
}
|
||||
|
||||
impl InterruptSource for Serial {
|
||||
unsafe fn init_irq(&'static self) -> Result<(), Error> {
|
||||
let intc = PLATFORM.interrupt_controller();
|
||||
|
||||
intc.register_handler(self.irq, self)?;
|
||||
intc.enable_irq(self.irq)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_irq(&self) -> Result<bool, Error> {
|
||||
let byte = self.inner.get().lock().regs.DLL.get();
|
||||
|
||||
self.recv_byte(byte as u8);
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
impl DebugSink for Serial {
|
||||
fn putc(&self, c: u8) -> Result<(), Error> {
|
||||
self.send(c).ok();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl SerialDevice for Serial {
|
||||
fn send(&self, byte: u8) -> Result<(), Error> {
|
||||
let inner = self.inner.get().lock();
|
||||
if byte == b'\n' {
|
||||
while inner.regs.USR.matches_all(USR::TFE::CLEAR) {
|
||||
core::hint::spin_loop();
|
||||
}
|
||||
inner.regs.DLL.set(b'\r' as u32);
|
||||
}
|
||||
while inner.regs.USR.matches_all(USR::TFE::CLEAR) {
|
||||
core::hint::spin_loop();
|
||||
}
|
||||
inner.regs.DLL.set(byte as u32);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn receive(&self, _blocking: bool) -> Result<u8, Error> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl InitializableDevice for Serial {
|
||||
type Options = ();
|
||||
|
||||
unsafe fn init(&self, _opts: Self::Options) -> Result<(), Error> {
|
||||
let regs = DeviceMemoryIo::<Regs>::map("h6-uart", self.base)?;
|
||||
self.inner.init(IrqSafeSpinlock::new(Inner { regs }));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Device for Serial {
|
||||
fn name(&self) -> &'static str {
|
||||
"Allwinner H6 UART"
|
||||
}
|
||||
}
|
||||
|
||||
impl Serial {
|
||||
/// Constructs an instance of the device at `base`.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure the address is valid.
|
||||
pub const unsafe fn new(base: usize, irq: IrqNumber) -> Self {
|
||||
Self {
|
||||
inner: OneTimeInit::new(),
|
||||
ring: CharRing::new(),
|
||||
|
||||
base,
|
||||
irq,
|
||||
}
|
||||
}
|
||||
}
|
@ -5,17 +5,18 @@ use abi::error::Error;
|
||||
use device_api::CpuBringupDevice;
|
||||
use fdt_rs::prelude::PropReader;
|
||||
|
||||
use crate::arch::Architecture;
|
||||
use crate::mem::address::IntoRaw;
|
||||
use crate::mem::KERNEL_VIRT_OFFSET;
|
||||
use crate::{
|
||||
absolute_address,
|
||||
arch::ARCHITECTURE,
|
||||
mem::{
|
||||
phys::{self, PageUsage},
|
||||
ConvertAddress,
|
||||
},
|
||||
arch::{ArchitectureImpl, ARCHITECTURE},
|
||||
mem::phys,
|
||||
};
|
||||
|
||||
use crate::device::devtree::{self, DevTreeIndexNodePropGet, DeviceTree};
|
||||
|
||||
use super::{BootStack, BOOT_STACK_SIZE};
|
||||
|
||||
#[derive(Debug)]
|
||||
enum CpuEnableMethod {
|
||||
Psci,
|
||||
@ -74,6 +75,12 @@ impl CpuEnableMethod {
|
||||
/// Number of online CPUs, initially set to 1 (BSP processor is up)
|
||||
pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(1);
|
||||
|
||||
// TODO can be made smaller
|
||||
#[link_section = ".bss"]
|
||||
static AP_TRAMPOLINE_STACK: BootStack = BootStack {
|
||||
data: [0; BOOT_STACK_SIZE],
|
||||
};
|
||||
|
||||
/// Starts application processors using the method specified in the device tree.
|
||||
///
|
||||
/// TODO: currently does not handle systems where APs are already started before entry.
|
||||
@ -87,6 +94,9 @@ pub unsafe fn start_ap_cores(dt: &DeviceTree) -> Result<(), Error> {
|
||||
fn __aarch64_ap_entry();
|
||||
}
|
||||
|
||||
// Safety: safe, the stack is inside the kernel
|
||||
let sp = AP_TRAMPOLINE_STACK.data.as_ptr() as usize - KERNEL_VIRT_OFFSET + BOOT_STACK_SIZE;
|
||||
|
||||
for cpu in enumerate_cpus(dt).filter(|cpu| cpu.id != 0) {
|
||||
debugln!(
|
||||
"cpu{}: enable-method={:?}, compatible={:?}",
|
||||
@ -95,19 +105,20 @@ pub unsafe fn start_ap_cores(dt: &DeviceTree) -> Result<(), Error> {
|
||||
cpu.compatible
|
||||
);
|
||||
|
||||
const AP_STACK_PAGES: usize = 4;
|
||||
let stack_pages = phys::alloc_pages_contiguous(AP_STACK_PAGES, PageUsage::Used)?;
|
||||
debugln!(
|
||||
"cpu{} stack: {:#x}..{:#x}",
|
||||
cpu.id,
|
||||
stack_pages,
|
||||
stack_pages + AP_STACK_PAGES * 0x1000
|
||||
);
|
||||
// const AP_STACK_PAGES: usize = 4;
|
||||
// let stack_pages = phys::alloc_pages_contiguous(AP_STACK_PAGES)?;
|
||||
// debugln!(
|
||||
// "cpu{} stack: {:#x}..{:#x}",
|
||||
// cpu.id,
|
||||
// stack_pages,
|
||||
// stack_pages.add(AP_STACK_PAGES * 0x1000)
|
||||
// );
|
||||
// Wait for the CPU to come up
|
||||
let old_count = CPU_COUNT.load(Ordering::Acquire);
|
||||
|
||||
let ip = absolute_address!(__aarch64_ap_entry).physicalize();
|
||||
let sp = stack_pages + AP_STACK_PAGES * 0x1000;
|
||||
// Safety: safe, the function is inside the kernel
|
||||
let ip = __aarch64_ap_entry as usize - KERNEL_VIRT_OFFSET;
|
||||
// let sp = stack_pages.add(AP_STACK_PAGES * 0x1000);
|
||||
if let Err(error) = cpu.enable_method.start_cpu(cpu.id as usize, ip, sp) {
|
||||
errorln!("Couldn't start cpu{} up: {:?}", cpu.id, error);
|
||||
continue;
|
||||
|
@ -1,553 +0,0 @@
|
||||
//! AArch64 virtual memory management facilities
|
||||
use core::{
|
||||
marker::PhantomData,
|
||||
ops::{Index, IndexMut},
|
||||
sync::atomic::{AtomicU8, Ordering},
|
||||
};
|
||||
|
||||
use aarch64_cpu::registers::DAIF;
|
||||
use abi::error::Error;
|
||||
use bitflags::bitflags;
|
||||
use tock_registers::interfaces::Readable;
|
||||
|
||||
use crate::mem::{
|
||||
phys::{self, PageUsage},
|
||||
table::{
|
||||
EntryLevel, MapAttributes, NextPageTable, NonTerminalEntryLevel, VirtualMemoryManager,
|
||||
},
|
||||
ConvertAddress, KERNEL_VIRT_OFFSET,
|
||||
};
|
||||
|
||||
/// TODO
|
||||
#[derive(Clone)]
|
||||
#[repr(C)]
|
||||
pub struct AddressSpace {
|
||||
l1: *mut PageTable<L1>,
|
||||
asid: u8,
|
||||
}
|
||||
|
||||
/// Page table representing a single level of address translation
|
||||
#[derive(Clone)]
|
||||
#[repr(C, align(0x1000))]
|
||||
pub struct PageTable<L: EntryLevel> {
|
||||
data: [PageEntry<L>; 512],
|
||||
}
|
||||
|
||||
/// Translation level 1: Entry is 1GiB page/table
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct L1;
|
||||
/// Translation level 2: Entry is 2MiB page/table
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct L2;
|
||||
/// Translation level 3: Entry is 4KiB page
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct L3;
|
||||
|
||||
impl NonTerminalEntryLevel for L1 {
|
||||
type NextLevel = L2;
|
||||
}
|
||||
impl NonTerminalEntryLevel for L2 {
|
||||
type NextLevel = L3;
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
/// Describes how each page is mapped: access, presence, type of the mapping.
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct PageAttributes: u64 {
|
||||
/// When set, the mapping is considered valid and assumed to point to a page/table
|
||||
const PRESENT = 1 << 0;
|
||||
|
||||
/// For L1/L2 mappings, indicates that the mapping points to the next-level translation
|
||||
/// table
|
||||
const TABLE = 1 << 1;
|
||||
/// (Must be set) For L3 mappings, indicates that the mapping points to a page
|
||||
const PAGE = 1 << 1;
|
||||
/// For L1/L2 mappings, indicates that the mapping points to a page of given level's size
|
||||
const BLOCK = 0 << 1;
|
||||
|
||||
/// (Must be set) For page/block mappings, indicates to the hardware that the page is
|
||||
/// accessed
|
||||
const ACCESS = 1 << 10;
|
||||
|
||||
/// For page/block mappings, allows both user and kernel code to read/write to the page
|
||||
const AP_BOTH_READWRITE = 1 << 6;
|
||||
/// For page/block mappings, only allows read access for EL0/EL1
|
||||
const AP_BOTH_READONLY = 3 << 6;
|
||||
|
||||
/// Indicates outer shareability domain
|
||||
const SH_OUTER = 2 << 8;
|
||||
/// Indicates inner shareability domain
|
||||
const SH_INNER = 3 << 8;
|
||||
|
||||
/// For page/block mappings, indicates regular physical memory
|
||||
const PAGE_ATTR_NORMAL = 0 << 2;
|
||||
/// For page/block mappings, indicates device memory
|
||||
const PAGE_ATTR_DEVICE = 1 << 2;
|
||||
|
||||
/// Indicates the mapping is unique to a specific ASID (important for proper TLB
|
||||
/// maintenance)
|
||||
const NON_GLOBAL = 1 << 11;
|
||||
}
|
||||
}
|
||||
|
||||
impl const EntryLevel for L1 {
|
||||
fn index(addr: usize) -> usize {
|
||||
(addr >> 30) & 0x1FF
|
||||
}
|
||||
|
||||
fn page_offset(addr: usize) -> usize {
|
||||
addr & 0x3FFFFFFF
|
||||
}
|
||||
}
|
||||
impl const EntryLevel for L2 {
|
||||
fn index(addr: usize) -> usize {
|
||||
(addr >> 21) & 0x1FF
|
||||
}
|
||||
|
||||
fn page_offset(addr: usize) -> usize {
|
||||
addr & 0x1FFFFF
|
||||
}
|
||||
}
|
||||
impl const EntryLevel for L3 {
|
||||
fn index(addr: usize) -> usize {
|
||||
(addr >> 12) & 0x1FF
|
||||
}
|
||||
|
||||
fn page_offset(addr: usize) -> usize {
|
||||
addr & 0xFFF
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a single entry in a translation table
|
||||
#[derive(Clone, Copy)]
|
||||
#[repr(transparent)]
|
||||
pub struct PageEntry<L>(u64, PhantomData<L>);
|
||||
|
||||
/// Fixed-layout kernel-space address mapping tables
|
||||
pub struct FixedTables {
|
||||
l1: PageTable<L1>,
|
||||
device_l2: PageTable<L2>,
|
||||
device_l3: PageTable<L3>,
|
||||
|
||||
device_l3i: usize,
|
||||
}
|
||||
|
||||
impl PageEntry<L3> {
|
||||
/// Creates a 4KiB normal memory page mapping
|
||||
pub fn normal_page(phys: usize, attrs: PageAttributes) -> Self {
|
||||
Self(
|
||||
(phys as u64)
|
||||
| (PageAttributes::PAGE
|
||||
| PageAttributes::PRESENT
|
||||
| PageAttributes::ACCESS
|
||||
| PageAttributes::SH_INNER
|
||||
| PageAttributes::PAGE_ATTR_NORMAL
|
||||
| attrs)
|
||||
.bits(),
|
||||
PhantomData,
|
||||
)
|
||||
}
|
||||
|
||||
/// Creates a 4KiB device memory page mapping
|
||||
pub fn device_page(phys: usize, attrs: PageAttributes) -> Self {
|
||||
Self(
|
||||
(phys as u64)
|
||||
| (PageAttributes::PAGE
|
||||
| PageAttributes::PRESENT
|
||||
| PageAttributes::ACCESS
|
||||
| PageAttributes::SH_OUTER
|
||||
| PageAttributes::PAGE_ATTR_DEVICE
|
||||
| attrs)
|
||||
.bits(),
|
||||
PhantomData,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the physical address of the page this entry refers to, returning None if it does
|
||||
/// not
|
||||
pub fn as_page(self) -> Option<usize> {
|
||||
let mask = (PageAttributes::PRESENT | PageAttributes::PAGE).bits();
|
||||
|
||||
if self.0 & mask == mask {
|
||||
Some((self.0 & !0xFFF) as usize)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: NonTerminalEntryLevel> PageEntry<T> {
|
||||
/// Creates a 2MiB page mapping
|
||||
pub fn normal_block(phys: usize, attrs: PageAttributes) -> Self {
|
||||
Self(
|
||||
(phys as u64)
|
||||
| (PageAttributes::BLOCK
|
||||
| PageAttributes::PRESENT
|
||||
| PageAttributes::ACCESS
|
||||
| PageAttributes::SH_INNER
|
||||
| PageAttributes::PAGE_ATTR_NORMAL
|
||||
| attrs)
|
||||
.bits(),
|
||||
PhantomData,
|
||||
)
|
||||
}
|
||||
|
||||
/// Creates a mapping pointing to the next-level translation table
|
||||
pub fn table(phys: usize, attrs: PageAttributes) -> Self {
|
||||
Self(
|
||||
(phys as u64) | (PageAttributes::TABLE | PageAttributes::PRESENT | attrs).bits(),
|
||||
PhantomData,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the physical address of the table this entry refers to, returning None if it
|
||||
/// does not
|
||||
pub fn as_table(self) -> Option<usize> {
|
||||
if self.0 & (PageAttributes::TABLE | PageAttributes::PRESENT).bits()
|
||||
== (PageAttributes::TABLE | PageAttributes::PRESENT).bits()
|
||||
{
|
||||
Some((self.0 & !0xFFF) as usize)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: EntryLevel> PageEntry<L> {
|
||||
/// Represents an absent/invalid mapping in the table
|
||||
pub const INVALID: Self = Self(0, PhantomData);
|
||||
|
||||
/// Converts a raw mapping value into this wrapper type
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller is responsible for making sure that `raw` is a valid mapping value for the
|
||||
/// current translation level.
|
||||
pub unsafe fn from_raw(raw: u64) -> Self {
|
||||
Self(raw, PhantomData)
|
||||
}
|
||||
|
||||
/// Returns `true` if the entry refers to some table/block/page
|
||||
pub fn is_present(&self) -> bool {
|
||||
self.0 & PageAttributes::PRESENT.bits() != 0
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: NonTerminalEntryLevel> NextPageTable for PageTable<L> {
|
||||
type NextLevel = PageTable<L::NextLevel>;
|
||||
|
||||
fn get_mut(&mut self, index: usize) -> Option<&'static mut Self::NextLevel> {
|
||||
let entry = self[index];
|
||||
|
||||
entry
|
||||
.as_table()
|
||||
.map(|addr| unsafe { &mut *(addr.virtualize() as *mut Self::NextLevel) })
|
||||
}
|
||||
|
||||
fn get_mut_or_alloc(&mut self, index: usize) -> Result<&'static mut Self::NextLevel, Error> {
|
||||
let entry = self[index];
|
||||
|
||||
if let Some(table) = entry.as_table() {
|
||||
Ok(unsafe { &mut *(table.virtualize() as *mut Self::NextLevel) })
|
||||
} else {
|
||||
let table = PageTable::new_zeroed()?;
|
||||
self[index] = PageEntry::<L>::table(table.physical_address(), PageAttributes::empty());
|
||||
Ok(table)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: EntryLevel> PageTable<L> {
|
||||
/// Constructs a page table with all entries marked as invalid
|
||||
pub const fn zeroed() -> Self {
|
||||
Self {
|
||||
data: [PageEntry::INVALID; 512],
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates a new page table, filling it with non-preset entries
|
||||
pub fn new_zeroed() -> Result<&'static mut Self, Error> {
|
||||
let page = unsafe { phys::alloc_page(PageUsage::Used)?.virtualize() };
|
||||
let table = unsafe { &mut *(page as *mut Self) };
|
||||
for i in 0..512 {
|
||||
table[i] = PageEntry::INVALID;
|
||||
}
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
/// Returns a physical address pointing to this page table
|
||||
pub fn physical_address(&self) -> usize {
|
||||
// &self may already by a physical address
|
||||
let addr = self.data.as_ptr() as usize;
|
||||
if addr < KERNEL_VIRT_OFFSET {
|
||||
addr
|
||||
} else {
|
||||
unsafe { addr.physicalize() }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: EntryLevel> Index<usize> for PageTable<L> {
|
||||
type Output = PageEntry<L>;
|
||||
|
||||
fn index(&self, index: usize) -> &Self::Output {
|
||||
&self.data[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: EntryLevel> IndexMut<usize> for PageTable<L> {
|
||||
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
|
||||
&mut self.data[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedTables {
|
||||
/// Constructs an empty table group
|
||||
pub const fn zeroed() -> Self {
|
||||
Self {
|
||||
l1: PageTable::zeroed(),
|
||||
device_l2: PageTable::zeroed(),
|
||||
device_l3: PageTable::zeroed(),
|
||||
|
||||
device_l3i: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Maps a physical memory region as device memory and returns its allocated base address
|
||||
pub fn map_device_pages(&mut self, phys: usize, count: usize) -> Result<usize, Error> {
|
||||
if count > 512 * 512 {
|
||||
panic!("Unsupported device memory mapping size");
|
||||
} else if count > 512 {
|
||||
// 2MiB mappings
|
||||
todo!();
|
||||
} else {
|
||||
// 4KiB mappings
|
||||
if self.device_l3i + count > 512 {
|
||||
return Err(Error::OutOfMemory);
|
||||
}
|
||||
|
||||
let virt = DEVICE_VIRT_OFFSET + (self.device_l3i << 12);
|
||||
for i in 0..count {
|
||||
self.device_l3[self.device_l3i + i] =
|
||||
PageEntry::device_page(phys + i * 0x1000, PageAttributes::empty());
|
||||
}
|
||||
self.device_l3i += count;
|
||||
|
||||
Ok(virt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<MapAttributes> for PageAttributes {
|
||||
fn from(value: MapAttributes) -> Self {
|
||||
let mut res = Self::empty();
|
||||
if value.contains(MapAttributes::USER_WRITE) {
|
||||
res |= PageAttributes::AP_BOTH_READWRITE;
|
||||
} else {
|
||||
res |= PageAttributes::AP_BOTH_READONLY;
|
||||
}
|
||||
if value.contains(MapAttributes::NON_GLOBAL) {
|
||||
res |= PageAttributes::NON_GLOBAL;
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl VirtualMemoryManager for AddressSpace {
|
||||
fn allocate(
|
||||
&self,
|
||||
hint: Option<usize>,
|
||||
len: usize,
|
||||
attrs: MapAttributes,
|
||||
) -> Result<usize, Error> {
|
||||
assert_eq!(DAIF.read(DAIF::I), 1);
|
||||
|
||||
if hint.is_some() {
|
||||
todo!();
|
||||
}
|
||||
|
||||
const TRY_ALLOC_START: usize = 0x100000000;
|
||||
const TRY_ALLOC_END: usize = 0xF00000000;
|
||||
|
||||
'l0: for base in (TRY_ALLOC_START..TRY_ALLOC_END - len * 0x1000).step_by(0x1000) {
|
||||
for i in 0..len {
|
||||
if self.translate(base + i * 0x1000).is_some() {
|
||||
continue 'l0;
|
||||
}
|
||||
}
|
||||
|
||||
for i in 0..len {
|
||||
let page = phys::alloc_page(PageUsage::Used)?;
|
||||
self.map_page(base + i * 0x1000, page, attrs)?;
|
||||
}
|
||||
|
||||
return Ok(base);
|
||||
}
|
||||
|
||||
Err(Error::OutOfMemory)
|
||||
}
|
||||
|
||||
fn deallocate(&self, addr: usize, len: usize) -> Result<(), Error> {
|
||||
assert_eq!(DAIF.read(DAIF::I), 1);
|
||||
|
||||
for page in (addr..addr + len).step_by(0x1000) {
|
||||
let Some(phys) = self.translate(page) else {
|
||||
todo!(
|
||||
"Tried to deallocate address not present in the table: {:#x}",
|
||||
addr
|
||||
);
|
||||
};
|
||||
|
||||
self.write_entry(page, PageEntry::INVALID, true)?;
|
||||
unsafe {
|
||||
phys::free_page(phys);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn map_page(&self, virt: usize, phys: usize, attrs: MapAttributes) -> Result<(), Error> {
|
||||
self.write_entry(virt, PageEntry::normal_page(phys, attrs.into()), true)
|
||||
}
|
||||
}
|
||||
|
||||
impl AddressSpace {
|
||||
/// Allocates an empty address space with all entries marked as non-present
|
||||
pub fn new_empty() -> Result<Self, Error> {
|
||||
static LAST_ASID: AtomicU8 = AtomicU8::new(1);
|
||||
|
||||
let asid = LAST_ASID.fetch_add(1, Ordering::AcqRel);
|
||||
|
||||
let l1 = unsafe { phys::alloc_page(PageUsage::Used)?.virtualize() as *mut PageTable<L1> };
|
||||
|
||||
for i in 0..512 {
|
||||
unsafe {
|
||||
(*l1)[i] = PageEntry::INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self { l1, asid })
|
||||
}
|
||||
|
||||
/// Interprets a physical address as an address space structure pointer.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Unsafe: accepts arbitrary addresses and ignores ASIDs.
|
||||
pub unsafe fn from_phys_raw(value: usize) -> Self {
|
||||
let value = value.virtualize();
|
||||
Self {
|
||||
l1: value as *mut PageTable<L1>,
|
||||
asid: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates through all the translation levels for a given address, invoking the callback
|
||||
/// function on each of them
|
||||
pub fn walk<F: Fn(usize, usize) -> bool>(&self, vaddr: usize, f: F) {
|
||||
let l1i = L1::index(vaddr);
|
||||
let l2i = L2::index(vaddr);
|
||||
let l3i = L3::index(vaddr);
|
||||
|
||||
let l1 = unsafe { self.as_mut() };
|
||||
let l1e = l1[l1i];
|
||||
|
||||
let Some(l2) = l1.get_mut(l1i) else {
|
||||
f(1, 0);
|
||||
return;
|
||||
};
|
||||
if !f(1, l1e.0 as _) {
|
||||
return;
|
||||
}
|
||||
let l2e = l2[l2i];
|
||||
|
||||
let Some(l3) = l2.get_mut(l2i) else {
|
||||
f(2, 0);
|
||||
return;
|
||||
};
|
||||
if !f(2, l2e.0 as _) {
|
||||
return;
|
||||
}
|
||||
|
||||
let l3e = l3[l3i];
|
||||
if !f(3, l3e.0 as _) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn as_mut(&self) -> &'static mut PageTable<L1> {
|
||||
self.l1.as_mut().unwrap()
|
||||
}
|
||||
|
||||
// TODO return page size and attributes
|
||||
/// Returns the physical address to which the `virt` address is mapped
|
||||
pub fn translate(&self, virt: usize) -> Option<usize> {
|
||||
let l1i = L1::index(virt);
|
||||
let l2i = L2::index(virt);
|
||||
let l3i = L3::index(virt);
|
||||
|
||||
let l2 = unsafe { self.as_mut().get_mut(l1i) }?;
|
||||
let l3 = l2.get_mut(l2i)?;
|
||||
|
||||
l3[l3i].as_page()
|
||||
}
|
||||
|
||||
// Write a single 4KiB entry
|
||||
fn write_entry(&self, virt: usize, entry: PageEntry<L3>, overwrite: bool) -> Result<(), Error> {
|
||||
let l1i = L1::index(virt);
|
||||
let l2i = L2::index(virt);
|
||||
let l3i = L3::index(virt);
|
||||
|
||||
let l2 = unsafe { self.as_mut().get_mut_or_alloc(l1i) }?;
|
||||
let l3 = l2.get_mut_or_alloc(l2i)?;
|
||||
|
||||
if l3[l3i].is_present() && !overwrite {
|
||||
todo!()
|
||||
}
|
||||
l3[l3i] = entry;
|
||||
|
||||
tlb_flush_vaae1(virt);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the physical address of the address space (to be used in a TTBRn_ELx)
|
||||
pub fn physical_address(&self) -> usize {
|
||||
unsafe { (self.l1 as usize).physicalize() | ((self.asid as usize) << 48) }
|
||||
}
|
||||
}
|
||||
|
||||
/// Flush a virtual address from EL1/EL0 TLB for all ASIDs
|
||||
pub fn tlb_flush_vaae1(mut page: usize) {
|
||||
page >>= 12;
|
||||
unsafe {
|
||||
core::arch::asm!("tlbi vaae1, {page}", page = in(reg) page);
|
||||
}
|
||||
}
|
||||
|
||||
/// Initializes mappings for the kernel and device memory tables.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Only allowed to be called once during lower-half part of the initialization process.
|
||||
pub unsafe fn init_fixed_tables() {
|
||||
// Map first 256GiB
|
||||
for i in 0..256 {
|
||||
KERNEL_TABLES.l1[i] = PageEntry::<L1>::normal_block(i << 30, PageAttributes::empty());
|
||||
}
|
||||
|
||||
KERNEL_TABLES.l1[256] = PageEntry::<L1>::table(
|
||||
KERNEL_TABLES.device_l2.physical_address(),
|
||||
PageAttributes::empty(),
|
||||
);
|
||||
KERNEL_TABLES.device_l2[0] = PageEntry::<L2>::table(
|
||||
KERNEL_TABLES.device_l3.physical_address(),
|
||||
PageAttributes::empty(),
|
||||
);
|
||||
}
|
||||
|
||||
/// Offset applied to device virtual memory mappings
|
||||
pub const DEVICE_VIRT_OFFSET: usize = KERNEL_VIRT_OFFSET + (256 << 30);
|
||||
/// Global kernel address space translation tables
|
||||
pub static mut KERNEL_TABLES: FixedTables = FixedTables::zeroed();
|
@ -11,8 +11,7 @@ use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
|
||||
use crate::{
|
||||
arch::{aarch64::IrqNumber, Architecture, ARCHITECTURE},
|
||||
device_tree_driver,
|
||||
proc::wait,
|
||||
task::tasklet,
|
||||
task::runtime,
|
||||
};
|
||||
|
||||
use super::cpu::Cpu;
|
||||
@ -30,8 +29,7 @@ impl InterruptHandler for ArmTimer {
|
||||
CNTP_TVAL_EL0.set(TICK_INTERVAL);
|
||||
let now = self.monotonic_timestamp().unwrap();
|
||||
|
||||
wait::tick(now);
|
||||
tasklet::tick(now);
|
||||
runtime::tick(now);
|
||||
|
||||
unsafe {
|
||||
Cpu::local().queue().yield_cpu();
|
||||
|
@ -20,7 +20,6 @@ macro_rules! absolute_address {
|
||||
}
|
||||
|
||||
use cfg_if::cfg_if;
|
||||
// pub use aarch64::{AArch64 as ArchitectureImpl, ARCHITECTURE};
|
||||
use device_api::{
|
||||
interrupt::{ExternalInterruptController, IpiDeliveryTarget, LocalInterruptController},
|
||||
timer::MonotonicTimestampProviderDevice,
|
||||
|
@ -65,11 +65,11 @@ impl StackBuilder {
|
||||
self.sp
|
||||
}
|
||||
|
||||
fn init_common(&mut self, entry: usize, cr3: PhysicalAddress) {
|
||||
fn init_common(&mut self, entry: usize, cr3: u64) {
|
||||
self.push(entry); // address for ret
|
||||
|
||||
// End of common context
|
||||
self.push(cr3.into_raw()); // %cr3
|
||||
self.push(cr3 as _); // %cr3
|
||||
|
||||
self.push(0); // %rbp
|
||||
self.push(0); // %fs (TODO)
|
||||
@ -99,7 +99,7 @@ impl TaskContextImpl for TaskContext {
|
||||
stack.push(arg);
|
||||
|
||||
stack.init_common(__x86_64_task_enter_kernel as _, unsafe {
|
||||
KERNEL_TABLES.as_physical_address()
|
||||
KERNEL_TABLES.as_physical_address().into_raw()
|
||||
});
|
||||
|
||||
let sp = stack.build();
|
||||
@ -114,12 +114,7 @@ impl TaskContextImpl for TaskContext {
|
||||
})
|
||||
}
|
||||
|
||||
fn user(
|
||||
entry: usize,
|
||||
arg: usize,
|
||||
cr3: PhysicalAddress,
|
||||
user_stack_sp: usize,
|
||||
) -> Result<Self, Error> {
|
||||
fn user(entry: usize, arg: usize, cr3: u64, user_stack_sp: usize) -> Result<Self, Error> {
|
||||
const USER_TASK_PAGES: usize = 8;
|
||||
|
||||
let stack_base = phys::alloc_pages_contiguous(USER_TASK_PAGES)?.virtualize_raw();
|
||||
|
@ -3,7 +3,7 @@ use yggdrasil_abi::error::Error;
|
||||
use crate::{
|
||||
arch::x86_64::intrinsics,
|
||||
mem::{
|
||||
address::AsPhysicalAddress,
|
||||
address::{AsPhysicalAddress, IntoRaw},
|
||||
phys,
|
||||
pointer::PhysicalRefMut,
|
||||
process::ProcessAddressSpaceManager,
|
||||
@ -61,6 +61,11 @@ impl ProcessAddressSpaceManager for ProcessAddressSpaceImpl {
|
||||
self.read_l3_entry(address)
|
||||
.ok_or(Error::InvalidMemoryOperation)
|
||||
}
|
||||
|
||||
fn as_address_with_asid(&self) -> u64 {
|
||||
// TODO x86-64 PCID/ASID?
|
||||
unsafe { self.l0.as_physical_address().into_raw() }
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessAddressSpaceImpl {
|
||||
@ -128,9 +133,3 @@ impl ProcessAddressSpaceImpl {
|
||||
Some((page, l3[l3i].attributes().into()))
|
||||
}
|
||||
}
|
||||
|
||||
impl AsPhysicalAddress for ProcessAddressSpaceImpl {
|
||||
unsafe fn as_physical_address(&self) -> PhysicalAddress {
|
||||
self.l0.as_physical_address()
|
||||
}
|
||||
}
|
||||
|
@ -67,51 +67,19 @@ impl NonTerminalEntryLevel for L2 {
|
||||
}
|
||||
|
||||
impl const EntryLevel for L0 {
|
||||
const SIZE: usize = 1 << 39;
|
||||
|
||||
fn index(addr: usize) -> usize {
|
||||
(addr >> 39) & 0x1FF
|
||||
}
|
||||
|
||||
fn page_offset(_addr: usize) -> usize {
|
||||
todo!()
|
||||
}
|
||||
const SHIFT: usize = 39;
|
||||
}
|
||||
|
||||
impl const EntryLevel for L1 {
|
||||
const SIZE: usize = 1 << 30;
|
||||
|
||||
fn index(addr: usize) -> usize {
|
||||
(addr >> 30) & 0x1FF
|
||||
}
|
||||
|
||||
fn page_offset(addr: usize) -> usize {
|
||||
addr & 0x3FFFFFFF
|
||||
}
|
||||
const SHIFT: usize = 30;
|
||||
}
|
||||
|
||||
impl const EntryLevel for L2 {
|
||||
const SIZE: usize = 1 << 21;
|
||||
|
||||
fn index(addr: usize) -> usize {
|
||||
(addr >> 21) & 0x1FF
|
||||
}
|
||||
|
||||
fn page_offset(addr: usize) -> usize {
|
||||
addr & 0x1FFFFF
|
||||
}
|
||||
const SHIFT: usize = 21;
|
||||
}
|
||||
|
||||
impl const EntryLevel for L3 {
|
||||
const SIZE: usize = 1 << 12;
|
||||
|
||||
fn index(addr: usize) -> usize {
|
||||
(addr >> 12) & 0x1FF
|
||||
}
|
||||
|
||||
fn page_offset(addr: usize) -> usize {
|
||||
addr & 0xFFF
|
||||
}
|
||||
const SHIFT: usize = 12;
|
||||
}
|
||||
|
||||
impl PageEntry<L3> {
|
||||
|
@ -10,7 +10,10 @@ use fdt_rs::{
|
||||
prelude::PropReader,
|
||||
};
|
||||
|
||||
use crate::{debug::LogLevel, mem::phys::PhysicalMemoryRegion};
|
||||
use crate::{
|
||||
debug::LogLevel,
|
||||
mem::{address::FromRaw, phys::PhysicalMemoryRegion, PhysicalAddress},
|
||||
};
|
||||
|
||||
use super::register_device;
|
||||
|
||||
@ -162,6 +165,7 @@ impl<'a> DeviceTree<'a> {
|
||||
///
|
||||
/// The caller must ensure the validity of the address.
|
||||
pub unsafe fn from_addr(virt: usize) -> Self {
|
||||
FDT_INDEX_BUFFER.0.fill(0);
|
||||
let tree = DevTree::from_raw_pointer(virt as _).unwrap();
|
||||
let index = DevTreeIndex::new(tree, &mut FDT_INDEX_BUFFER.0).unwrap();
|
||||
Self { tree, index }
|
||||
@ -198,6 +202,12 @@ impl<'a> DeviceTree<'a> {
|
||||
pub fn root(&self) -> DevTreeIndexNode {
|
||||
self.index.root()
|
||||
}
|
||||
|
||||
// Commonly used functions for convenience
|
||||
pub fn chosen_stdout_path(&self) -> Option<&str> {
|
||||
let chosen = self.node_by_path("/chosen")?;
|
||||
chosen.prop("stdout-path")
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'i, 'dt> DevTreeIndexNodeExt for DevTreeIndexNode<'a, 'i, 'dt> {
|
||||
@ -314,10 +324,10 @@ impl Iterator for FdtMemoryRegionIter<'_> {
|
||||
.cell2_array_item(0, self.address_cells, self.size_cells)
|
||||
.unwrap();
|
||||
|
||||
break Some(PhysicalMemoryRegion {
|
||||
base: base as usize,
|
||||
size: size as usize,
|
||||
});
|
||||
let base = PhysicalAddress::from_raw(base);
|
||||
let size = size as usize;
|
||||
|
||||
break Some(PhysicalMemoryRegion { base, size });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7,9 +7,12 @@ use crate::sync::{IrqSafeSpinlock, IrqSafeSpinlockGuard};
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
pub mod devtree;
|
||||
|
||||
// TODO bus device support on aarch64
|
||||
#[cfg(not(target_arch = "aarch64"))]
|
||||
pub mod bus;
|
||||
|
||||
pub mod display;
|
||||
// pub mod power;
|
||||
pub mod power;
|
||||
pub mod serial;
|
||||
pub mod timer;
|
||||
pub mod tty;
|
||||
|
@ -5,6 +5,6 @@ use cfg_if::cfg_if;
|
||||
cfg_if! {
|
||||
if #[cfg(target_arch = "aarch64")] {
|
||||
pub mod arm_psci;
|
||||
pub mod sunxi_rwdog;
|
||||
// pub mod sunxi_rwdog;
|
||||
}
|
||||
}
|
||||
|
@ -5,6 +5,6 @@ use cfg_if::cfg_if;
|
||||
cfg_if! {
|
||||
if #[cfg(target_arch = "aarch64")] {
|
||||
pub mod pl011;
|
||||
pub mod sunxi_uart;
|
||||
// pub mod sunxi_uart;
|
||||
}
|
||||
}
|
||||
|
@ -12,14 +12,15 @@ use vfs::CharDevice;
|
||||
|
||||
use crate::{
|
||||
arch::{aarch64::IrqNumber, Architecture, ARCHITECTURE},
|
||||
block,
|
||||
debug::{self, DebugSink, LogLevel},
|
||||
device::{
|
||||
devtree::{self, DevTreeIndexPropExt},
|
||||
tty::{CharRing, TtyDevice},
|
||||
tty::{TtyContext, TtyDevice},
|
||||
},
|
||||
device_tree_driver,
|
||||
fs::devfs::{self, CharDeviceType},
|
||||
mem::device::DeviceMemoryIo,
|
||||
mem::{address::FromRaw, device::DeviceMemoryIo, PhysicalAddress},
|
||||
sync::IrqSafeSpinlock,
|
||||
};
|
||||
|
||||
@ -62,15 +63,15 @@ register_structs! {
|
||||
}
|
||||
|
||||
struct Pl011Inner {
|
||||
regs: DeviceMemoryIo<Regs>,
|
||||
regs: DeviceMemoryIo<'static, Regs>,
|
||||
}
|
||||
|
||||
/// PL011 device instance
|
||||
pub struct Pl011 {
|
||||
inner: OneTimeInit<IrqSafeSpinlock<Pl011Inner>>,
|
||||
base: usize,
|
||||
base: PhysicalAddress,
|
||||
irq: IrqNumber,
|
||||
ring: CharRing<16>,
|
||||
context: TtyContext,
|
||||
}
|
||||
|
||||
impl Pl011Inner {
|
||||
@ -101,9 +102,9 @@ impl DebugSink for Pl011 {
|
||||
}
|
||||
}
|
||||
|
||||
impl TtyDevice<16> for Pl011 {
|
||||
fn ring(&self) -> &CharRing<16> {
|
||||
&self.ring
|
||||
impl TtyDevice for Pl011 {
|
||||
fn context(&self) -> &TtyContext {
|
||||
&self.context
|
||||
}
|
||||
}
|
||||
|
||||
@ -115,7 +116,12 @@ impl CharDevice for Pl011 {
|
||||
|
||||
fn read(&'static self, blocking: bool, data: &mut [u8]) -> Result<usize, Error> {
|
||||
assert!(blocking);
|
||||
self.line_read(data)
|
||||
match block! {
|
||||
self.line_read(data).await
|
||||
} {
|
||||
Ok(res) => res,
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
fn device_request(&self, req: &mut DeviceRequest) -> Result<(), Error> {
|
||||
@ -174,7 +180,7 @@ impl Device for Pl011 {
|
||||
|
||||
unsafe fn init(&'static self) -> Result<(), Error> {
|
||||
let mut inner = Pl011Inner {
|
||||
regs: DeviceMemoryIo::map("pl011 UART", self.base)?,
|
||||
regs: DeviceMemoryIo::map(self.base)?,
|
||||
};
|
||||
inner.init();
|
||||
|
||||
@ -207,8 +213,8 @@ device_tree_driver! {
|
||||
inner: OneTimeInit::new(),
|
||||
// TODO obtain IRQ from dt
|
||||
irq: IrqNumber::Shared(1),
|
||||
ring: CharRing::new(),
|
||||
base: base as usize
|
||||
context: TtyContext::new(),
|
||||
base: PhysicalAddress::from_raw(base)
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
@ -67,9 +67,15 @@ impl DeviceMemoryMapping {
|
||||
}
|
||||
|
||||
impl<'a, T: Sized> DeviceMemoryIo<'a, T> {
|
||||
pub unsafe fn from_raw(raw: DeviceMemoryMapping) -> DeviceMemoryIo<'a, T> {
|
||||
// TODO
|
||||
loop {}
|
||||
pub unsafe fn from_raw(
|
||||
inner: Arc<RawDeviceMemoryMapping>,
|
||||
) -> Result<DeviceMemoryIo<'a, T>, Error> {
|
||||
if size_of::<T>() > inner.page_size * inner.page_count {
|
||||
todo!();
|
||||
}
|
||||
// TODO check align
|
||||
let value = &*(inner.address as *const T);
|
||||
Ok(DeviceMemoryIo { inner, value })
|
||||
}
|
||||
|
||||
pub unsafe fn map(base: PhysicalAddress) -> Result<DeviceMemoryIo<'a, T>, Error> {
|
||||
|
@ -22,12 +22,14 @@ pub(super) const TRACKED_PAGE_LIMIT: usize = (BITMAP_PAGE_COUNT * 4096) * 8;
|
||||
pub struct PhysicalMemoryManager {
|
||||
bitmap: PhysicalRefMut<'static, [u64]>,
|
||||
last_free_bit: usize,
|
||||
offset: usize,
|
||||
page_count: usize,
|
||||
}
|
||||
|
||||
impl PhysicalMemoryManager {
|
||||
pub unsafe fn new(
|
||||
bitmap_phys_base: PhysicalAddress,
|
||||
offset: usize,
|
||||
page_count: usize,
|
||||
) -> PhysicalMemoryManager {
|
||||
let bitmap_len = (page_count + (BITMAP_WORD_SIZE - 1)) / BITMAP_WORD_SIZE;
|
||||
@ -38,6 +40,7 @@ impl PhysicalMemoryManager {
|
||||
Self {
|
||||
bitmap,
|
||||
page_count,
|
||||
offset,
|
||||
last_free_bit: 0,
|
||||
}
|
||||
}
|
||||
@ -67,7 +70,7 @@ impl PhysicalMemoryManager {
|
||||
self.last_free_bit = i + 1;
|
||||
self.mark_alloc(i);
|
||||
|
||||
return Ok(PhysicalAddress::from_raw(i * 0x1000));
|
||||
return Ok(PhysicalAddress::from_raw(i * 0x1000 + self.offset));
|
||||
}
|
||||
|
||||
if self.last_free_bit != 0 {
|
||||
@ -93,7 +96,7 @@ impl PhysicalMemoryManager {
|
||||
}
|
||||
self.last_free_bit = i + 512;
|
||||
|
||||
return Ok(PhysicalAddress::from_raw(i * 0x1000));
|
||||
return Ok(PhysicalAddress::from_raw(i * 0x1000 + self.offset));
|
||||
}
|
||||
|
||||
if self.last_free_bit != 0 {
|
||||
@ -118,7 +121,7 @@ impl PhysicalMemoryManager {
|
||||
}
|
||||
self.last_free_bit = i + count;
|
||||
|
||||
return Ok(PhysicalAddress::from_raw(i * 0x1000));
|
||||
return Ok(PhysicalAddress::from_raw(i * 0x1000 + self.offset));
|
||||
}
|
||||
|
||||
if self.last_free_bit != 0 {
|
||||
@ -135,7 +138,9 @@ impl PhysicalMemoryManager {
|
||||
///
|
||||
/// `addr` must be a page-aligned physical address previously allocated by this implementation.
|
||||
pub unsafe fn free_page(&mut self, page: PhysicalAddress) {
|
||||
let index = IntoRaw::<usize>::into_raw(page) / 0x1000;
|
||||
let page: usize = page.into_raw();
|
||||
assert!(page >= self.offset);
|
||||
let index = (page - self.offset) / 0x1000;
|
||||
|
||||
assert!(self.is_alloc(index));
|
||||
self.mark_free(index);
|
||||
@ -147,7 +152,9 @@ impl PhysicalMemoryManager {
|
||||
///
|
||||
/// Will panic if the address does not point to a valid, reserved (and unallocated) page.
|
||||
pub fn add_available_page(&mut self, page: PhysicalAddress) {
|
||||
let index = IntoRaw::<usize>::into_raw(page) / 0x1000;
|
||||
let page: usize = page.into_raw();
|
||||
assert!(page >= self.offset);
|
||||
let index = (page - self.offset) / 0x1000;
|
||||
|
||||
assert!(self.is_alloc(index));
|
||||
self.mark_free(index);
|
||||
|
@ -5,7 +5,10 @@ use kernel_util::util::OneTimeInit;
|
||||
|
||||
use crate::{
|
||||
arch::{Architecture, ARCHITECTURE},
|
||||
mem::phys::reserved::is_reserved,
|
||||
mem::{
|
||||
address::IntoRaw,
|
||||
phys::{self, reserved::is_reserved},
|
||||
},
|
||||
sync::IrqSafeSpinlock,
|
||||
};
|
||||
|
||||
@ -162,7 +165,12 @@ pub unsafe fn init_from_iter<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
|
||||
},
|
||||
);
|
||||
|
||||
let mut manager = PhysicalMemoryManager::new(page_bitmap_phys_base, total_count);
|
||||
if IntoRaw::<u64>::into_raw(phys_start) & 0x1FFFFFF != 0 {
|
||||
loop {}
|
||||
}
|
||||
|
||||
let mut manager =
|
||||
PhysicalMemoryManager::new(page_bitmap_phys_base, phys_start.into_raw(), total_count);
|
||||
let mut collected = 0;
|
||||
const MAX_MEMORY: usize = 16 * 1024;
|
||||
|
||||
|
@ -2,20 +2,20 @@ use abi::error::Error;
|
||||
use cfg_if::cfg_if;
|
||||
use vmalloc::VirtualMemoryAllocator;
|
||||
|
||||
use crate::{arch::x86_64::mem::table::L3, mem::phys, sync::IrqSafeSpinlock};
|
||||
use crate::{mem::phys, sync::IrqSafeSpinlock};
|
||||
|
||||
use super::{address::AsPhysicalAddress, table::MapAttributes, PhysicalAddress};
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(target_arch = "aarch64")] {
|
||||
use crate::arch::aarch64::table::AddressSpace;
|
||||
use crate::arch::aarch64::mem::process::ProcessAddressSpaceImpl;
|
||||
} else if #[cfg(target_arch = "x86_64")] {
|
||||
use crate::arch::x86_64::mem::process::ProcessAddressSpaceImpl;
|
||||
}
|
||||
}
|
||||
|
||||
/// Interface for virtual memory address space management
|
||||
pub trait ProcessAddressSpaceManager: Sized + AsPhysicalAddress {
|
||||
pub trait ProcessAddressSpaceManager: Sized {
|
||||
const PAGE_SIZE: usize;
|
||||
const LOWER_LIMIT_PFN: usize;
|
||||
const UPPER_LIMIT_PFN: usize;
|
||||
@ -32,6 +32,8 @@ pub trait ProcessAddressSpaceManager: Sized + AsPhysicalAddress {
|
||||
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error>;
|
||||
|
||||
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error>;
|
||||
|
||||
fn as_address_with_asid(&self) -> u64;
|
||||
}
|
||||
|
||||
struct Inner {
|
||||
@ -186,7 +188,8 @@ impl ProcessAddressSpace {
|
||||
attributes: MapAttributes,
|
||||
) -> Result<(), Error> {
|
||||
assert_eq!(address & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);
|
||||
assert!(physical.is_aligned_for::<L3>());
|
||||
// XXX
|
||||
// assert!(physical.is_aligned_for::<L3>());
|
||||
|
||||
self.inner
|
||||
.lock()
|
||||
@ -211,22 +214,7 @@ impl ProcessAddressSpace {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn debug_dump(&self) {
|
||||
let lock = self.inner.lock();
|
||||
|
||||
debugln!("Address space @ {:#x}", unsafe {
|
||||
lock.table.as_physical_address()
|
||||
});
|
||||
for (used, range) in lock.allocator.ranges() {
|
||||
let start = range.start_pfn() * ProcessAddressSpaceImpl::PAGE_SIZE;
|
||||
let end = range.end_pfn() * ProcessAddressSpaceImpl::PAGE_SIZE;
|
||||
debugln!("{:#x?}: {}", start..end, used);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsPhysicalAddress for ProcessAddressSpace {
|
||||
unsafe fn as_physical_address(&self) -> PhysicalAddress {
|
||||
self.inner.lock().table.as_physical_address()
|
||||
pub fn as_address_with_asid(&self) -> u64 {
|
||||
self.inner.lock().table.as_address_with_asid()
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ use core::ops::{Deref, DerefMut};
|
||||
use abi::error::Error;
|
||||
use bitflags::bitflags;
|
||||
|
||||
// TODO EXECUTABLE
|
||||
bitflags! {
|
||||
/// Describes how a page translation mapping should behave
|
||||
#[derive(Clone, Copy)]
|
||||
@ -36,12 +37,29 @@ pub trait NextPageTable {
|
||||
/// Interface for a single level of address translation
|
||||
#[const_trait]
|
||||
pub trait EntryLevel: Copy {
|
||||
const SIZE: usize;
|
||||
const SHIFT: usize;
|
||||
const SIZE: usize = 1 << Self::SHIFT;
|
||||
|
||||
/// Returns the index into a page table for a given address
|
||||
fn index(addr: usize) -> usize;
|
||||
#[inline]
|
||||
fn index(addr: usize) -> usize {
|
||||
(addr >> Self::SHIFT) & 0x1FF
|
||||
}
|
||||
/// Returns the offset of an address from the page start at current level
|
||||
fn page_offset(addr: usize) -> usize;
|
||||
#[inline]
|
||||
fn page_offset(addr: usize) -> usize {
|
||||
addr & (Self::SIZE - 1)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn align_up(addr: usize) -> usize {
|
||||
(addr + Self::SIZE - 1) & !(Self::SIZE - 1)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn page_count(addr: usize) -> usize {
|
||||
(addr + Self::SIZE - 1) / Self::SIZE
|
||||
}
|
||||
}
|
||||
|
||||
/// Tag trait to mark that the page table level may point to a next-level table
|
||||
|
@ -98,8 +98,6 @@ pub fn load_elf_from_file(space: &ProcessAddressSpace, file: FileRef) -> Result<
|
||||
|
||||
let elf = ElfStream::<AnyEndian, _>::open_stream(file).map_err(from_parse_error)?;
|
||||
|
||||
space.debug_dump();
|
||||
|
||||
for phdr in elf.segments() {
|
||||
if phdr.p_type != PT_LOAD {
|
||||
continue;
|
||||
|
@ -174,7 +174,7 @@ fn setup_binary<S: Into<String>>(
|
||||
let context = TaskContext::user(
|
||||
entry,
|
||||
virt_args_base,
|
||||
unsafe { space.as_physical_address() },
|
||||
unsafe { space.as_address_with_asid() },
|
||||
user_sp,
|
||||
)?;
|
||||
|
||||
|
@ -56,12 +56,7 @@ pub trait TaskContextImpl: Sized {
|
||||
|
||||
/// Constructs a user thread context. The caller is responsible for allocating the userspace
|
||||
/// stack and setting up a valid address space for the context.
|
||||
fn user(
|
||||
entry: usize,
|
||||
arg: usize,
|
||||
cr3: PhysicalAddress,
|
||||
user_stack_sp: usize,
|
||||
) -> Result<Self, Error>;
|
||||
fn user(entry: usize, arg: usize, cr3: u64, user_stack_sp: usize) -> Result<Self, Error>;
|
||||
|
||||
/// Performs an entry into a context.
|
||||
///
|
||||
|
@ -6,7 +6,7 @@ edition = "2021"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
memtables = { path = "../../lib/memtables" }
|
||||
memtables = { path = "../../lib/memtables", features = ["all"] }
|
||||
|
||||
bytemuck = "1.14.0"
|
||||
elf = "0.7.2"
|
||||
|
193
tools/gentables/src/aarch64.rs
Normal file
193
tools/gentables/src/aarch64.rs
Normal file
@ -0,0 +1,193 @@
|
||||
use core::fmt;
|
||||
use std::{
|
||||
io::{Read, Seek},
|
||||
mem::offset_of,
|
||||
};
|
||||
|
||||
use bitflags::bitflags;
|
||||
use elf::{
|
||||
abi::{PF_W, PF_X, PT_LOAD},
|
||||
endian::AnyEndian,
|
||||
ElfStream,
|
||||
};
|
||||
use memtables::aarch64::{FixedTables, KERNEL_L3_COUNT};
|
||||
|
||||
use crate::{GenData, GenError};
|
||||
|
||||
bitflags! {
|
||||
#[derive(Clone, Copy)]
|
||||
struct PageFlags: u64 {
|
||||
const PRESENT = 1 << 0;
|
||||
const ACCESS = 1 << 10;
|
||||
const SH_INNER = 3 << 8;
|
||||
const PAGE_ATTR_NORMAL = 0 << 2;
|
||||
const AP_BOTH_READONLY = 3 << 6;
|
||||
const TABLE = 1 << 1;
|
||||
const PAGE = 1 << 1;
|
||||
const UXN = 1 << 54;
|
||||
const PXN = 1 << 53;
|
||||
}
|
||||
}
|
||||
|
||||
impl PageFlags {
|
||||
pub fn kernel_table() -> Self {
|
||||
Self::PRESENT | Self::ACCESS | Self::SH_INNER | Self::PAGE_ATTR_NORMAL | Self::TABLE
|
||||
}
|
||||
|
||||
pub fn kernel_page() -> Self {
|
||||
Self::PRESENT | Self::ACCESS | Self::SH_INNER | Self::PAGE_ATTR_NORMAL | Self::PAGE
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AArch64Builder<F: Read + Seek> {
|
||||
elf: ElfStream<AnyEndian, F>,
|
||||
data: GenData,
|
||||
tables: FixedTables,
|
||||
|
||||
l1i: usize,
|
||||
l2i_start: usize,
|
||||
l2i_end: usize,
|
||||
}
|
||||
|
||||
impl PageFlags {
|
||||
fn from_elf(flags: u32) -> Self {
|
||||
let mut out = Self::UXN | Self::PXN;
|
||||
if flags & PF_X != 0 {
|
||||
out.remove(Self::PXN);
|
||||
}
|
||||
if flags & PF_W == 0 {
|
||||
out |= Self::AP_BOTH_READONLY;
|
||||
}
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for PageFlags {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let x = if self.contains(Self::PXN) { '-' } else { 'x' };
|
||||
let w = if self.contains(Self::AP_BOTH_READONLY) {
|
||||
'-'
|
||||
} else {
|
||||
'w'
|
||||
};
|
||||
write!(f, "r{}{}", w, x)
|
||||
}
|
||||
}
|
||||
|
||||
const L1_SHIFT: u64 = 30;
|
||||
const L2_SHIFT: u64 = 21;
|
||||
const L3_SHIFT: u64 = 12;
|
||||
const L2_ENTRY_SIZE: u64 = 1 << L2_SHIFT;
|
||||
const L3_ENTRY_SIZE: u64 = 1 << L3_SHIFT;
|
||||
|
||||
// TODO proper granularity
|
||||
impl<F: Read + Seek> AArch64Builder<F> {
|
||||
pub fn new(elf: ElfStream<AnyEndian, F>, data: GenData) -> Result<Self, GenError> {
|
||||
let l1i = (data.kernel_start >> L1_SHIFT) as usize & 0x1FF;
|
||||
let l2i_start = (data.kernel_start >> L2_SHIFT) as usize & 0x1FF;
|
||||
let l2i_end = ((data.kernel_end + L2_ENTRY_SIZE - 1) >> L2_SHIFT) as usize & 0x1FF;
|
||||
|
||||
if l2i_end - l2i_start > KERNEL_L3_COUNT {
|
||||
todo!()
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
elf,
|
||||
data,
|
||||
|
||||
tables: FixedTables::zeroed(),
|
||||
|
||||
l1i,
|
||||
l2i_start,
|
||||
l2i_end,
|
||||
})
|
||||
}
|
||||
|
||||
// TODO the build function is almost identical to x86-64 one, but with slight changes, so might
|
||||
// wanna unify this later
|
||||
pub fn build(mut self) -> Result<(FixedTables, u64), GenError> {
|
||||
assert_eq!(offset_of!(FixedTables, l1), 0);
|
||||
|
||||
let l2_physical_address =
|
||||
self.data.table_physical_address + offset_of!(FixedTables, l2) as u64;
|
||||
|
||||
// L1 -> L2
|
||||
self.tables.l1.data[self.l1i] = l2_physical_address | PageFlags::kernel_table().bits();
|
||||
|
||||
// L2 -> L3s
|
||||
for l2i in self.l2i_start..self.l2i_end {
|
||||
let l3_table_index = l2i - self.l2i_start;
|
||||
let l3_physical_address = self.data.table_physical_address
|
||||
+ (offset_of!(FixedTables, l3s) + 0x1000 * l3_table_index) as u64;
|
||||
|
||||
self.tables.l2.data[l2i] = l3_physical_address | PageFlags::kernel_table().bits();
|
||||
}
|
||||
|
||||
for (i, segment) in self.elf.segments().into_iter().enumerate() {
|
||||
if segment.p_type != PT_LOAD
|
||||
|| segment.p_vaddr != segment.p_paddr + self.data.kernel_virt_offset
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let aligned_virt_start = segment.p_vaddr & !(L3_ENTRY_SIZE - 1);
|
||||
let aligned_virt_end =
|
||||
(segment.p_vaddr + segment.p_memsz + L3_ENTRY_SIZE - 1) & !(L3_ENTRY_SIZE - 1);
|
||||
let aligned_phys_start = segment.p_paddr & !(L3_ENTRY_SIZE - 1);
|
||||
let count = (aligned_virt_end - aligned_virt_start) / 0x1000;
|
||||
|
||||
let flags = PageFlags::from_elf(segment.p_flags);
|
||||
|
||||
println!(
|
||||
"{}: {:#x?} -> {:#x} {}",
|
||||
i,
|
||||
aligned_virt_start..aligned_virt_end,
|
||||
aligned_phys_start,
|
||||
flags
|
||||
);
|
||||
Self::map_segment(
|
||||
self.l2i_start,
|
||||
&mut self.tables,
|
||||
aligned_virt_start,
|
||||
aligned_phys_start,
|
||||
count as usize,
|
||||
flags,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok((self.tables, self.data.table_offset))
|
||||
}
|
||||
|
||||
fn map_segment(
|
||||
start_l2i: usize,
|
||||
tables: &mut FixedTables,
|
||||
vaddr_start: u64,
|
||||
paddr_start: u64,
|
||||
count: usize,
|
||||
flags: PageFlags,
|
||||
) -> Result<(), GenError> {
|
||||
for index in 0..count {
|
||||
let vaddr = vaddr_start + index as u64 * L3_ENTRY_SIZE;
|
||||
let paddr = paddr_start + index as u64 * L3_ENTRY_SIZE;
|
||||
|
||||
let entry = paddr | (PageFlags::kernel_page() | flags).bits();
|
||||
|
||||
let l2i = (vaddr >> L2_SHIFT) as usize & 0x1FF - start_l2i;
|
||||
let l3i = (vaddr >> L3_SHIFT) as usize & 0x1FF;
|
||||
|
||||
let l3 = &mut tables.l3s[l2i];
|
||||
|
||||
if l3.data[l3i] != 0 {
|
||||
if l3.data[l3i] != entry {
|
||||
todo!();
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
l3.data[l3i] = entry;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -10,15 +10,16 @@ use std::{
|
||||
|
||||
use clap::Parser;
|
||||
use elf::{
|
||||
abi::{EM_X86_64, PT_LOAD},
|
||||
abi::{EM_AARCH64, EM_X86_64, PT_LOAD},
|
||||
endian::AnyEndian,
|
||||
ElfStream,
|
||||
};
|
||||
use memtables::FixedTables;
|
||||
use memtables::any::AnyTables;
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::x86_64::X8664Builder;
|
||||
use crate::{aarch64::AArch64Builder, x86_64::X8664Builder};
|
||||
|
||||
mod aarch64;
|
||||
mod x86_64;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
@ -120,7 +121,11 @@ fn find_tables<F: Read + Seek>(elf: &mut ElfStream<AnyEndian, F>) -> Result<(u64
|
||||
Err(GenError::MissingSection(".data.tables"))
|
||||
}
|
||||
|
||||
fn build_tables<F: Read + Seek>(file: F) -> Result<(FixedTables, u64), GenError> {
|
||||
fn into_any<T: Into<AnyTables>, U>((l, r): (T, U)) -> (AnyTables, U) {
|
||||
(l.into(), r)
|
||||
}
|
||||
|
||||
fn build_tables<F: Read + Seek>(file: F) -> Result<(AnyTables, u64), GenError> {
|
||||
let mut elf = ElfStream::<AnyEndian, F>::open_stream(file)?;
|
||||
|
||||
let kernel_virt_offset = kernel_virt_offset(&mut elf)?;
|
||||
@ -144,7 +149,20 @@ fn build_tables<F: Read + Seek>(file: F) -> Result<(FixedTables, u64), GenError>
|
||||
table_physical_address,
|
||||
},
|
||||
)?
|
||||
.build(),
|
||||
.build()
|
||||
.map(into_any),
|
||||
EM_AARCH64 => AArch64Builder::new(
|
||||
elf,
|
||||
GenData {
|
||||
kernel_virt_offset,
|
||||
kernel_start,
|
||||
kernel_end,
|
||||
table_offset,
|
||||
table_physical_address,
|
||||
},
|
||||
)?
|
||||
.build()
|
||||
.map(into_any),
|
||||
_ => todo!(),
|
||||
}
|
||||
}
|
||||
@ -152,11 +170,10 @@ fn build_tables<F: Read + Seek>(file: F) -> Result<(FixedTables, u64), GenError>
|
||||
fn write_tables<F: Write + Seek>(
|
||||
mut file: F,
|
||||
offset: u64,
|
||||
tables: FixedTables,
|
||||
tables: AnyTables,
|
||||
) -> Result<(), GenError> {
|
||||
let bytes = bytemuck::bytes_of(&tables);
|
||||
file.seek(SeekFrom::Start(offset))?;
|
||||
file.write_all(bytes)?;
|
||||
file.write_all(tables.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ use elf::{
|
||||
endian::AnyEndian,
|
||||
ElfStream,
|
||||
};
|
||||
use memtables::{FixedTables, KERNEL_L3_COUNT};
|
||||
use memtables::x86_64::{FixedTables, KERNEL_L3_COUNT};
|
||||
|
||||
use crate::{GenData, GenError};
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user