396 lines
12 KiB
Rust
396 lines
12 KiB
Rust
use core::{
|
|
alloc::Layout,
|
|
ops::{Deref, DerefMut},
|
|
ptr::addr_of,
|
|
sync::atomic::AtomicUsize,
|
|
sync::atomic::Ordering,
|
|
};
|
|
|
|
use aarch64_cpu::registers::{TTBR0_EL1, TTBR1_EL1};
|
|
use kernel_arch_interface::{
|
|
mem::{DeviceMemoryAttributes, KernelTableManager, RawDeviceMemoryMapping},
|
|
KERNEL_VIRT_OFFSET,
|
|
};
|
|
use libk_mm_interface::{
|
|
address::{FromRaw, PhysicalAddress},
|
|
table::{EntryLevel, EntryLevelExt},
|
|
KernelImageObject,
|
|
};
|
|
use memtables::aarch64::{FixedTables, KERNEL_L3_COUNT};
|
|
use static_assertions::const_assert_eq;
|
|
use tock_registers::interfaces::Writeable;
|
|
use yggdrasil_abi::error::Error;
|
|
|
|
use self::table::{PageAttributes, PageEntry, PageTable, L1, L2, L3};
|
|
|
|
pub mod process;
|
|
pub mod table;
|
|
|
|
#[derive(Debug)]
|
|
pub struct KernelTableManagerImpl;
|
|
|
|
// TODO eliminate this requirement by using precomputed indices
|
|
const MAPPING_OFFSET: usize = KERNEL_VIRT_OFFSET;
|
|
const KERNEL_PHYS_BASE: usize = 0x40080000;
|
|
|
|
// Precomputed mappings
|
|
const KERNEL_L1_INDEX: usize = (KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE).page_index::<L1>();
|
|
const KERNEL_START_L2_INDEX: usize = (KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE).page_index::<L2>();
|
|
const KERNEL_END_L2_INDEX: usize = KERNEL_START_L2_INDEX + KERNEL_L3_COUNT;
|
|
|
|
// Must not be zero, should be at 4MiB
|
|
const_assert_eq!(KERNEL_START_L2_INDEX, 0);
|
|
// From static mapping
|
|
const_assert_eq!(KERNEL_L1_INDEX, 1);
|
|
|
|
// Runtime mappings
|
|
// 2MiB max
|
|
const EARLY_MAPPING_L2I: usize = KERNEL_END_L2_INDEX + 1;
|
|
// 1GiB max
|
|
const DEVICE_MAPPING_L1I: usize = KERNEL_L1_INDEX + 2;
|
|
const DEVICE_MAPPING_L3_COUNT: usize = 4;
|
|
// 16GiB max
|
|
const RAM_MAPPING_START_L1I: usize = KERNEL_L1_INDEX + 3;
|
|
pub const RAM_MAPPING_L1_COUNT: usize = 16;
|
|
|
|
// 2MiB for early mappings
|
|
const EARLY_MAPPING_OFFSET: usize =
|
|
MAPPING_OFFSET | (KERNEL_L1_INDEX * L1::SIZE) | (EARLY_MAPPING_L2I * L2::SIZE);
|
|
static mut EARLY_MAPPING_L3: PageTable<L3> = PageTable::zeroed();
|
|
// 1GiB for device MMIO mapping
|
|
const DEVICE_MAPPING_OFFSET: usize = MAPPING_OFFSET | (DEVICE_MAPPING_L1I * L1::SIZE);
|
|
static mut DEVICE_MAPPING_L2: PageTable<L2> = PageTable::zeroed();
|
|
static mut DEVICE_MAPPING_L3S: [PageTable<L3>; DEVICE_MAPPING_L3_COUNT] =
|
|
[PageTable::zeroed(); DEVICE_MAPPING_L3_COUNT];
|
|
// 16GiB for RAM mapping
|
|
pub const RAM_MAPPING_OFFSET: usize = MAPPING_OFFSET | (RAM_MAPPING_START_L1I * L1::SIZE);
|
|
pub static MEMORY_LIMIT: AtomicUsize = AtomicUsize::new(0);
|
|
|
|
#[link_section = ".data.tables"]
|
|
pub static mut KERNEL_TABLES: KernelImageObject<FixedTables> =
|
|
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
|
|
|
|
impl KernelTableManager for KernelTableManagerImpl {
|
|
fn virtualize(address: u64) -> usize {
|
|
let address = address as usize;
|
|
if address < MEMORY_LIMIT.load(Ordering::Acquire) {
|
|
address + RAM_MAPPING_OFFSET
|
|
} else {
|
|
panic!("Invalid physical address: {:#x}", address);
|
|
}
|
|
}
|
|
|
|
fn physicalize(address: usize) -> u64 {
|
|
if address < RAM_MAPPING_OFFSET
|
|
|| address - RAM_MAPPING_OFFSET >= MEMORY_LIMIT.load(Ordering::Acquire)
|
|
{
|
|
panic!("Not a virtualized physical address: {:#x}", address);
|
|
}
|
|
|
|
(address - RAM_MAPPING_OFFSET) as _
|
|
}
|
|
|
|
unsafe fn map_device_pages(
|
|
base: u64,
|
|
count: usize,
|
|
attrs: DeviceMemoryAttributes,
|
|
) -> Result<RawDeviceMemoryMapping<Self>, Error> {
|
|
map_device_memory(PhysicalAddress::from_raw(base), count, attrs)
|
|
}
|
|
|
|
unsafe fn unmap_device_pages(mapping: &RawDeviceMemoryMapping<Self>) {
|
|
unmap_device_memory(mapping)
|
|
}
|
|
}
|
|
|
|
/// Memory mapping which may be used for performing early kernel initialization
|
|
pub struct EarlyMapping<'a, T: ?Sized> {
|
|
value: &'a mut T,
|
|
page_count: usize,
|
|
}
|
|
|
|
impl<'a, T: Sized> EarlyMapping<'a, T> {
|
|
/// # Safety
|
|
///
|
|
/// `physical` address provided must be a valid non-NULL address actually containing `T`.
|
|
pub unsafe fn map_slice(
|
|
physical: PhysicalAddress,
|
|
len: usize,
|
|
) -> Result<EarlyMapping<'a, [T]>, Error> {
|
|
let layout = Layout::array::<T>(len).unwrap();
|
|
let aligned = physical.page_align_down::<L3>();
|
|
let offset = physical.page_offset::<L3>();
|
|
let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
|
|
|
|
let virt = map_early_pages(aligned, page_count)?;
|
|
let value = core::slice::from_raw_parts_mut((virt + offset) as *mut T, len);
|
|
|
|
Ok(EarlyMapping { value, page_count })
|
|
}
|
|
}
|
|
|
|
impl<'a, T: ?Sized> Deref for EarlyMapping<'a, T> {
|
|
type Target = T;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
self.value
|
|
}
|
|
}
|
|
|
|
impl<'a, T: ?Sized> DerefMut for EarlyMapping<'a, T> {
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
self.value
|
|
}
|
|
}
|
|
|
|
impl<'a, T: ?Sized> Drop for EarlyMapping<'a, T> {
|
|
fn drop(&mut self) {
|
|
let address = (self.value as *mut T).addr() & !(L3::SIZE - 1);
|
|
|
|
for i in 0..self.page_count {
|
|
let page = address + i * L3::SIZE;
|
|
|
|
unsafe {
|
|
unmap_early_page(page);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
fn kernel_table_flags() -> PageAttributes {
|
|
PageAttributes::TABLE
|
|
| PageAttributes::ACCESS
|
|
| PageAttributes::SH_INNER
|
|
| PageAttributes::PAGE_ATTR_NORMAL
|
|
| PageAttributes::PRESENT
|
|
}
|
|
|
|
fn ram_block_flags() -> PageAttributes {
|
|
// TODO UXN, PXN
|
|
PageAttributes::BLOCK
|
|
| PageAttributes::ACCESS
|
|
| PageAttributes::SH_INNER
|
|
| PageAttributes::PAGE_ATTR_NORMAL
|
|
| PageAttributes::PRESENT
|
|
}
|
|
|
|
// Early mappings
|
|
unsafe fn map_early_pages(physical: PhysicalAddress, count: usize) -> Result<usize, Error> {
|
|
for l3i in 0..512 {
|
|
let mut taken = false;
|
|
for i in 0..count {
|
|
if EARLY_MAPPING_L3[i + l3i].is_present() {
|
|
taken = true;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if taken {
|
|
continue;
|
|
}
|
|
|
|
for i in 0..count {
|
|
let page = physical.add(i * L3::SIZE);
|
|
// TODO NX, NC
|
|
EARLY_MAPPING_L3[i + l3i] = PageEntry::normal_page(page, PageAttributes::empty());
|
|
}
|
|
|
|
return Ok(EARLY_MAPPING_OFFSET + l3i * L3::SIZE);
|
|
}
|
|
|
|
Err(Error::OutOfMemory)
|
|
}
|
|
|
|
unsafe fn unmap_early_page(address: usize) {
|
|
if !(EARLY_MAPPING_OFFSET..EARLY_MAPPING_OFFSET + L2::SIZE).contains(&address) {
|
|
panic!("Tried to unmap invalid early mapping: {:#x}", address);
|
|
}
|
|
|
|
let l3i = (address - EARLY_MAPPING_OFFSET).page_index::<L3>();
|
|
|
|
assert!(EARLY_MAPPING_L3[l3i].is_present());
|
|
EARLY_MAPPING_L3[l3i] = PageEntry::INVALID;
|
|
|
|
// TODO invalidate tlb
|
|
}
|
|
|
|
/// # Safety
|
|
///
|
|
/// Only meant to be used by the architecture initialization functions.
|
|
pub unsafe fn map_ram_l1(index: usize) {
|
|
if index >= RAM_MAPPING_L1_COUNT {
|
|
todo!()
|
|
}
|
|
assert_eq!(KERNEL_TABLES.l1.data[index + RAM_MAPPING_START_L1I], 0);
|
|
|
|
KERNEL_TABLES.l1.data[index + RAM_MAPPING_START_L1I] =
|
|
((index * L1::SIZE) as u64) | ram_block_flags().bits();
|
|
}
|
|
|
|
// Device mappings
|
|
unsafe fn map_device_memory_l3(
|
|
base: PhysicalAddress,
|
|
count: usize,
|
|
_attrs: DeviceMemoryAttributes,
|
|
) -> Result<usize, Error> {
|
|
// TODO don't map pages if already mapped
|
|
|
|
'l0: for i in 0..DEVICE_MAPPING_L3_COUNT * 512 {
|
|
for j in 0..count {
|
|
let l2i = (i + j) / 512;
|
|
let l3i = (i + j) % 512;
|
|
|
|
if DEVICE_MAPPING_L3S[l2i][l3i].is_present() {
|
|
continue 'l0;
|
|
}
|
|
}
|
|
|
|
for j in 0..count {
|
|
let l2i = (i + j) / 512;
|
|
let l3i = (i + j) % 512;
|
|
|
|
// TODO NX, NC
|
|
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::device_page(base.add(j * L3::SIZE));
|
|
}
|
|
|
|
return Ok(DEVICE_MAPPING_OFFSET + i * L3::SIZE);
|
|
}
|
|
|
|
Err(Error::OutOfMemory)
|
|
}
|
|
|
|
unsafe fn map_device_memory_l2(
|
|
base: PhysicalAddress,
|
|
count: usize,
|
|
_attrs: DeviceMemoryAttributes,
|
|
) -> Result<usize, Error> {
|
|
'l0: for i in DEVICE_MAPPING_L3_COUNT..512 {
|
|
for j in 0..count {
|
|
if DEVICE_MAPPING_L2[i + j].is_present() {
|
|
continue 'l0;
|
|
}
|
|
}
|
|
|
|
for j in 0..count {
|
|
DEVICE_MAPPING_L2[i + j] = PageEntry::<L2>::device_block(base.add(j * L2::SIZE));
|
|
}
|
|
|
|
// log::debug!(
|
|
// "map l2s: base={:#x}, count={} -> {:#x}",
|
|
// base,
|
|
// count,
|
|
// DEVICE_MAPPING_OFFSET + i * L2::SIZE
|
|
// );
|
|
return Ok(DEVICE_MAPPING_OFFSET + i * L2::SIZE);
|
|
}
|
|
|
|
Err(Error::OutOfMemory)
|
|
}
|
|
|
|
pub(crate) unsafe fn map_device_memory(
|
|
base: PhysicalAddress,
|
|
size: usize,
|
|
attrs: DeviceMemoryAttributes,
|
|
) -> Result<RawDeviceMemoryMapping<KernelTableManagerImpl>, Error> {
|
|
// debugln!("Map {}B @ {:#x}", size, base);
|
|
let l3_aligned = base.page_align_down::<L3>();
|
|
let l3_offset = base.page_offset::<L3>();
|
|
let page_count = (l3_offset + size).page_count::<L3>();
|
|
|
|
if page_count > 256 {
|
|
// Large mapping, use L2 mapping instead
|
|
let l2_aligned = base.page_align_down::<L2>();
|
|
let l2_offset = base.page_offset::<L2>();
|
|
let page_count = (l2_offset + size).page_count::<L2>();
|
|
|
|
let base_address = map_device_memory_l2(l2_aligned, page_count, attrs)?;
|
|
let address = base_address + l2_offset;
|
|
|
|
Ok(RawDeviceMemoryMapping::from_raw_parts(
|
|
address,
|
|
base_address,
|
|
page_count,
|
|
L2::SIZE,
|
|
))
|
|
} else {
|
|
// Just map the pages directly
|
|
let base_address = map_device_memory_l3(l3_aligned, page_count, attrs)?;
|
|
let address = base_address + l3_offset;
|
|
|
|
Ok(RawDeviceMemoryMapping::from_raw_parts(
|
|
address,
|
|
base_address,
|
|
page_count,
|
|
L3::SIZE,
|
|
))
|
|
}
|
|
}
|
|
|
|
pub(crate) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping<KernelTableManagerImpl>) {
|
|
// debugln!(
|
|
// "Unmap {}B @ {:#x}",
|
|
// map.page_count * map.page_size,
|
|
// map.base_address
|
|
// );
|
|
match map.page_size {
|
|
L3::SIZE => {
|
|
for i in 0..map.page_count {
|
|
let page = map.base_address + i * L3::SIZE;
|
|
let l2i = page.page_index::<L2>();
|
|
let l3i = page.page_index::<L3>();
|
|
assert!(DEVICE_MAPPING_L3S[l2i][l3i].is_present());
|
|
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::INVALID;
|
|
|
|
tlb_flush_vaae1(page);
|
|
}
|
|
}
|
|
L2::SIZE => todo!(),
|
|
_ => unimplemented!(),
|
|
}
|
|
}
|
|
|
|
#[inline]
|
|
pub fn tlb_flush_vaae1(mut page: usize) {
|
|
page >>= 12;
|
|
unsafe {
|
|
core::arch::asm!("tlbi vaae1, {page}", page = in(reg) page);
|
|
}
|
|
}
|
|
|
|
/// (BSP-early init) loads precomputed kernel mapping tables for the kernel to jump to "higher-half"
|
|
///
|
|
/// # Safety
|
|
///
|
|
/// Unsafe, must only be called by BSP during its early init while still in "lower-half"
|
|
pub unsafe fn load_fixed_tables() {
|
|
let ttbr0 = KERNEL_TABLES.l1.data.as_ptr() as u64;
|
|
TTBR0_EL1.set(ttbr0);
|
|
TTBR1_EL1.set(ttbr0);
|
|
}
|
|
|
|
/// Sets up additional translation tables for kernel usage
|
|
///
|
|
/// # Safety
|
|
///
|
|
/// Unsafe, must only be called by BSP during its early init, must already be in "higher-half"
|
|
pub unsafe fn init_fixed_tables() {
|
|
// TODO this could be built in compile-time too?
|
|
let early_mapping_l3_phys = addr_of!(EARLY_MAPPING_L3) as usize - KERNEL_VIRT_OFFSET;
|
|
let device_mapping_l2_phys = addr_of!(DEVICE_MAPPING_L2) as usize - KERNEL_VIRT_OFFSET;
|
|
|
|
for i in 0..DEVICE_MAPPING_L3_COUNT {
|
|
let device_mapping_l3_phys = PhysicalAddress::from_raw(
|
|
&DEVICE_MAPPING_L3S[i] as *const _ as usize - KERNEL_VIRT_OFFSET,
|
|
);
|
|
DEVICE_MAPPING_L2[i] = PageEntry::table(device_mapping_l3_phys, PageAttributes::empty());
|
|
}
|
|
|
|
assert_eq!(KERNEL_TABLES.l2.data[EARLY_MAPPING_L2I], 0);
|
|
KERNEL_TABLES.l2.data[EARLY_MAPPING_L2I] =
|
|
(early_mapping_l3_phys as u64) | kernel_table_flags().bits();
|
|
|
|
assert_eq!(KERNEL_TABLES.l1.data[DEVICE_MAPPING_L1I], 0);
|
|
KERNEL_TABLES.l1.data[DEVICE_MAPPING_L1I] =
|
|
(device_mapping_l2_phys as u64) | kernel_table_flags().bits();
|
|
}
|