2024-02-06 12:27:02 +02:00
|
|
|
use core::{
|
|
|
|
alloc::Layout,
|
|
|
|
ops::{Deref, DerefMut},
|
|
|
|
ptr::addr_of,
|
|
|
|
sync::atomic::{AtomicUsize, Ordering},
|
|
|
|
};
|
|
|
|
|
|
|
|
use kernel_arch_interface::mem::{
|
|
|
|
DeviceMemoryAttributes, KernelTableManager, RawDeviceMemoryMapping,
|
|
|
|
};
|
|
|
|
use libk_mm_interface::{
|
|
|
|
address::{FromRaw, PhysicalAddress},
|
|
|
|
table::{EntryLevel, EntryLevelExt},
|
|
|
|
KernelImageObject,
|
|
|
|
};
|
|
|
|
use memtables::x86_64::FixedTables;
|
|
|
|
use static_assertions::{const_assert_eq, const_assert_ne};
|
|
|
|
use yggdrasil_abi::error::Error;
|
|
|
|
|
|
|
|
use crate::{registers::CR3, KERNEL_VIRT_OFFSET};
|
|
|
|
|
|
|
|
use self::table::{PageAttributes, PageEntry, PageTable, L0, L1, L2, L3};
|
|
|
|
|
2024-02-06 16:38:39 +02:00
|
|
|
pub mod process;
|
2024-02-06 12:27:02 +02:00
|
|
|
pub mod table;
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct KernelTableManagerImpl;
|
|
|
|
|
|
|
|
const CANONICAL_ADDRESS_MASK: usize = 0xFFFF000000000000;
|
|
|
|
const KERNEL_PHYS_BASE: usize = 0x200000;
|
|
|
|
|
|
|
|
// Mapped at compile time
|
|
|
|
const KERNEL_MAPPING_BASE: usize = KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE;
|
|
|
|
const KERNEL_L0_INDEX: usize = KERNEL_MAPPING_BASE.page_index::<L0>();
|
|
|
|
const KERNEL_L1_INDEX: usize = KERNEL_MAPPING_BASE.page_index::<L1>();
|
|
|
|
const KERNEL_START_L2_INDEX: usize = KERNEL_MAPPING_BASE.page_index::<L2>();
|
|
|
|
|
|
|
|
// Must not be zero, should be at 4MiB
|
|
|
|
const_assert_ne!(KERNEL_START_L2_INDEX, 0);
|
|
|
|
// From static mapping
|
|
|
|
const_assert_eq!(KERNEL_L0_INDEX, 511);
|
|
|
|
const_assert_eq!(KERNEL_L1_INDEX, 0);
|
|
|
|
|
|
|
|
// Mapped at boot
|
|
|
|
const EARLY_MAPPING_L2I: usize = KERNEL_START_L2_INDEX - 1;
|
|
|
|
const HEAP_MAPPING_L1I: usize = KERNEL_L1_INDEX + 1;
|
|
|
|
const DEVICE_MAPPING_L1I: usize = KERNEL_L1_INDEX + 2;
|
|
|
|
const RAM_MAPPING_L0I: usize = KERNEL_L0_INDEX - 1;
|
|
|
|
|
|
|
|
const DEVICE_MAPPING_L3_COUNT: usize = 4;
|
|
|
|
|
|
|
|
#[link_section = ".data.tables"]
|
|
|
|
pub static mut KERNEL_TABLES: KernelImageObject<FixedTables> =
|
|
|
|
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
|
|
|
|
|
|
|
|
// 2MiB for early mappings
|
|
|
|
const EARLY_MAPPING_OFFSET: usize = CANONICAL_ADDRESS_MASK
|
|
|
|
| (KERNEL_L0_INDEX * L0::SIZE)
|
|
|
|
| (KERNEL_L1_INDEX * L1::SIZE)
|
|
|
|
| (EARLY_MAPPING_L2I * L2::SIZE);
|
|
|
|
static mut EARLY_MAPPING_L3: PageTable<L3> = PageTable::zeroed();
|
|
|
|
// 1GiB for heap mapping
|
|
|
|
pub const HEAP_MAPPING_OFFSET: usize =
|
|
|
|
CANONICAL_ADDRESS_MASK | (KERNEL_L0_INDEX * L0::SIZE) | (HEAP_MAPPING_L1I * L1::SIZE);
|
|
|
|
pub(super) static mut HEAP_MAPPING_L2: PageTable<L2> = PageTable::zeroed();
|
|
|
|
// 1GiB for device MMIO mapping
|
|
|
|
const DEVICE_MAPPING_OFFSET: usize =
|
|
|
|
CANONICAL_ADDRESS_MASK | (KERNEL_L0_INDEX * L0::SIZE) | (DEVICE_MAPPING_L1I * L1::SIZE);
|
|
|
|
static mut DEVICE_MAPPING_L2: PageTable<L2> = PageTable::zeroed();
|
|
|
|
static mut DEVICE_MAPPING_L3S: [PageTable<L3>; DEVICE_MAPPING_L3_COUNT] =
|
|
|
|
[PageTable::zeroed(); DEVICE_MAPPING_L3_COUNT];
|
|
|
|
// 512GiB for whole RAM mapping
|
|
|
|
pub const RAM_MAPPING_OFFSET: usize = CANONICAL_ADDRESS_MASK | (RAM_MAPPING_L0I * L0::SIZE);
|
|
|
|
pub static MEMORY_LIMIT: AtomicUsize = AtomicUsize::new(0);
|
|
|
|
pub static mut RAM_MAPPING_L1: PageTable<L1> = PageTable::zeroed();
|
|
|
|
|
|
|
|
impl KernelTableManager for KernelTableManagerImpl {
|
|
|
|
fn virtualize(address: u64) -> usize {
|
|
|
|
let address = address as usize;
|
|
|
|
if address < MEMORY_LIMIT.load(Ordering::Acquire) {
|
|
|
|
address + RAM_MAPPING_OFFSET
|
|
|
|
} else {
|
|
|
|
panic!("Invalid physical address: {:#x}", address);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn physicalize(address: usize) -> u64 {
|
|
|
|
if address < RAM_MAPPING_OFFSET
|
|
|
|
|| address - RAM_MAPPING_OFFSET >= MEMORY_LIMIT.load(Ordering::Acquire)
|
|
|
|
{
|
|
|
|
panic!("Not a virtualized physical address: {:#x}", address);
|
|
|
|
}
|
|
|
|
|
|
|
|
(address - RAM_MAPPING_OFFSET) as _
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe fn map_device_pages(
|
|
|
|
base: u64,
|
|
|
|
count: usize,
|
|
|
|
attrs: DeviceMemoryAttributes,
|
|
|
|
) -> Result<RawDeviceMemoryMapping<Self>, Error> {
|
|
|
|
map_device_memory(PhysicalAddress::from_raw(base), count, attrs)
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe fn unmap_device_pages(mapping: &RawDeviceMemoryMapping<Self>) {
|
|
|
|
unmap_device_memory(mapping)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Early mappings
|
|
|
|
unsafe fn map_early_pages(physical: PhysicalAddress, count: usize) -> Result<usize, Error> {
|
|
|
|
for l3i in 0..512 {
|
|
|
|
let mut taken = false;
|
|
|
|
for i in 0..count {
|
|
|
|
if EARLY_MAPPING_L3[i + l3i].is_present() {
|
|
|
|
taken = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if taken {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for i in 0..count {
|
|
|
|
// TODO NX, NC
|
|
|
|
EARLY_MAPPING_L3[i + l3i] =
|
|
|
|
PageEntry::page(physical.add(i * L3::SIZE), PageAttributes::WRITABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Ok(EARLY_MAPPING_OFFSET + l3i * L3::SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
Err(Error::OutOfMemory)
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe fn unmap_early_page(address: usize) {
|
|
|
|
if !(EARLY_MAPPING_OFFSET..EARLY_MAPPING_OFFSET + L2::SIZE).contains(&address) {
|
|
|
|
panic!("Tried to unmap invalid early mapping: {:#x}", address);
|
|
|
|
}
|
|
|
|
|
|
|
|
let l3i = (address - EARLY_MAPPING_OFFSET).page_index::<L3>();
|
|
|
|
|
|
|
|
assert!(EARLY_MAPPING_L3[l3i].is_present());
|
|
|
|
EARLY_MAPPING_L3[l3i] = PageEntry::INVALID;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Device mappings
|
|
|
|
unsafe fn map_device_memory_l3(
|
|
|
|
base: PhysicalAddress,
|
|
|
|
count: usize,
|
|
|
|
_attrs: DeviceMemoryAttributes,
|
|
|
|
) -> Result<usize, Error> {
|
|
|
|
// TODO don't map pages if already mapped
|
|
|
|
|
|
|
|
'l0: for i in 0..DEVICE_MAPPING_L3_COUNT * 512 {
|
|
|
|
for j in 0..count {
|
|
|
|
let l2i = (i + j) / 512;
|
|
|
|
let l3i = (i + j) % 512;
|
|
|
|
|
|
|
|
if DEVICE_MAPPING_L3S[l2i][l3i].is_present() {
|
|
|
|
continue 'l0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for j in 0..count {
|
|
|
|
let l2i = (i + j) / 512;
|
|
|
|
let l3i = (i + j) % 512;
|
|
|
|
|
|
|
|
// TODO NX, NC
|
|
|
|
DEVICE_MAPPING_L3S[l2i][l3i] =
|
|
|
|
PageEntry::page(base.add(j * L3::SIZE), PageAttributes::WRITABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Ok(DEVICE_MAPPING_OFFSET + i * L3::SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
Err(Error::OutOfMemory)
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe fn map_device_memory_l2(
|
|
|
|
base: PhysicalAddress,
|
|
|
|
count: usize,
|
|
|
|
_attrs: DeviceMemoryAttributes,
|
|
|
|
) -> Result<usize, Error> {
|
|
|
|
'l0: for i in DEVICE_MAPPING_L3_COUNT..512 {
|
|
|
|
for j in 0..count {
|
|
|
|
if DEVICE_MAPPING_L2[i + j].is_present() {
|
|
|
|
continue 'l0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for j in 0..count {
|
|
|
|
DEVICE_MAPPING_L2[i + j] =
|
|
|
|
PageEntry::<L2>::block(base.add(j * L2::SIZE), PageAttributes::WRITABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
// debugln!(
|
|
|
|
// "map l2s: base={:#x}, count={} -> {:#x}",
|
|
|
|
// base,
|
|
|
|
// count,
|
|
|
|
// DEVICE_MAPPING_OFFSET + i * L2::SIZE
|
|
|
|
// );
|
|
|
|
return Ok(DEVICE_MAPPING_OFFSET + i * L2::SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
Err(Error::OutOfMemory)
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe fn map_device_memory(
|
|
|
|
base: PhysicalAddress,
|
|
|
|
size: usize,
|
|
|
|
attrs: DeviceMemoryAttributes,
|
|
|
|
) -> Result<RawDeviceMemoryMapping<KernelTableManagerImpl>, Error> {
|
|
|
|
// debugln!("Map {}B @ {:#x}", size, base);
|
|
|
|
let l3_aligned = base.page_align_down::<L3>();
|
|
|
|
let l3_offset = base.page_offset::<L3>();
|
|
|
|
let page_count = (l3_offset + size).page_count::<L3>();
|
|
|
|
|
|
|
|
if page_count > 256 {
|
|
|
|
// Large mapping, use L2 mapping instead
|
|
|
|
let l2_aligned = base.page_align_down::<L2>();
|
|
|
|
let l2_offset = base.page_offset::<L2>();
|
|
|
|
let page_count = (l2_offset + size).page_count::<L2>();
|
|
|
|
|
|
|
|
let base_address = map_device_memory_l2(l2_aligned, page_count, attrs)?;
|
|
|
|
let address = base_address + l2_offset;
|
|
|
|
|
|
|
|
Ok(RawDeviceMemoryMapping::from_raw_parts(
|
|
|
|
address,
|
|
|
|
base_address,
|
|
|
|
page_count,
|
|
|
|
L2::SIZE,
|
|
|
|
))
|
|
|
|
} else {
|
|
|
|
// Just map the pages directly
|
|
|
|
let base_address = map_device_memory_l3(l3_aligned, page_count, attrs)?;
|
|
|
|
let address = base_address + l3_offset;
|
|
|
|
|
|
|
|
Ok(RawDeviceMemoryMapping::from_raw_parts(
|
|
|
|
address,
|
|
|
|
base_address,
|
|
|
|
page_count,
|
|
|
|
L3::SIZE,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping<KernelTableManagerImpl>) {
|
|
|
|
// debugln!(
|
|
|
|
// "Unmap {}B @ {:#x}",
|
|
|
|
// map.page_count * map.page_size,
|
|
|
|
// map.base_address
|
|
|
|
// );
|
|
|
|
match map.page_size {
|
|
|
|
L3::SIZE => {
|
|
|
|
for i in 0..map.page_count {
|
|
|
|
let page = map.base_address + i * L3::SIZE;
|
|
|
|
let l2i = page.page_index::<L2>();
|
|
|
|
let l3i = page.page_index::<L3>();
|
|
|
|
assert!(DEVICE_MAPPING_L3S[l2i][l3i].is_present());
|
|
|
|
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::INVALID;
|
|
|
|
flush_tlb_entry(page);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
L2::SIZE => todo!(),
|
|
|
|
_ => unimplemented!(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub unsafe fn map_heap_block(index: usize, page: PhysicalAddress) {
|
|
|
|
if !page.is_page_aligned_for::<L2>() {
|
|
|
|
panic!("Attempted to map a misaligned 2MiB page");
|
|
|
|
}
|
|
|
|
assert!(index < 512);
|
|
|
|
|
|
|
|
if HEAP_MAPPING_L2[index].is_present() {
|
|
|
|
panic!("Page is already mappged: {:#x}", page);
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO NX
|
|
|
|
HEAP_MAPPING_L2[index] = PageEntry::<L2>::block(page, PageAttributes::WRITABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Memory mapping which may be used for performing early kernel initialization
|
|
|
|
pub struct EarlyMapping<'a, T: ?Sized> {
|
|
|
|
value: &'a mut T,
|
|
|
|
page_count: usize,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, T: Sized> EarlyMapping<'a, T> {
|
|
|
|
pub unsafe fn map(physical: PhysicalAddress) -> Result<EarlyMapping<'a, T>, Error> {
|
|
|
|
let layout = Layout::new::<T>();
|
|
|
|
let aligned = physical.page_align_down::<L3>();
|
|
|
|
let offset = physical.page_offset::<L3>();
|
|
|
|
let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
|
|
|
|
|
|
|
|
let virt = map_early_pages(aligned, page_count)?;
|
|
|
|
let value = &mut *((virt + offset) as *mut T);
|
|
|
|
|
|
|
|
Ok(EarlyMapping { value, page_count })
|
|
|
|
}
|
|
|
|
|
|
|
|
pub unsafe fn map_slice(
|
|
|
|
physical: PhysicalAddress,
|
|
|
|
len: usize,
|
|
|
|
) -> Result<EarlyMapping<'a, [T]>, Error> {
|
|
|
|
let layout = Layout::array::<T>(len).unwrap();
|
|
|
|
let aligned = physical.page_align_down::<L3>();
|
|
|
|
let offset = physical.page_offset::<L3>();
|
|
|
|
let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
|
|
|
|
|
|
|
|
let virt = map_early_pages(aligned, page_count)?;
|
|
|
|
let value = core::slice::from_raw_parts_mut((virt + offset) as *mut T, len);
|
|
|
|
|
|
|
|
Ok(EarlyMapping { value, page_count })
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, T: ?Sized> Deref for EarlyMapping<'a, T> {
|
|
|
|
type Target = T;
|
|
|
|
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
|
|
self.value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, T: ?Sized> DerefMut for EarlyMapping<'a, T> {
|
|
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
|
|
self.value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, T: ?Sized> Drop for EarlyMapping<'a, T> {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
let address = (self.value as *mut T).addr() & !(L3::SIZE - 1);
|
|
|
|
|
|
|
|
for i in 0..self.page_count {
|
|
|
|
let page = address + i * L3::SIZE;
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
unmap_early_page(page);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn clone_kernel_tables(dst: &mut PageTable<L0>) {
|
|
|
|
unsafe {
|
|
|
|
dst[KERNEL_L0_INDEX] = PageEntry::from_raw(KERNEL_TABLES.l0.data[KERNEL_L0_INDEX]);
|
|
|
|
dst[RAM_MAPPING_L0I] = PageEntry::from_raw(KERNEL_TABLES.l0.data[RAM_MAPPING_L0I]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Sets up the following memory map:
|
|
|
|
/// ...: KERNEL_TABLES.l0:
|
|
|
|
/// * 0xFFFFFF0000000000 .. 0xFFFFFFFF8000000000 : RAM_MAPPING_L1
|
|
|
|
/// * 0xFFFFFF8000000000 .. ... : KERNEL_TABLES.kernel_l1:
|
|
|
|
/// * 0xFFFFFF8000000000 .. 0xFFFFFF8040000000 : KERNEL_TABLES.kernel_l2
|
|
|
|
/// * 0xFFFFFF8000000000 .. 0xFFFFFF8000200000 : ---
|
|
|
|
/// * 0xFFFFFF8000200000 .. 0xFFFFFF8000400000 : EARLY_MAPPING_L3
|
|
|
|
/// * 0xFFFFFF8000400000 .. ... : KERNEL_TABLES.kernel_l3s
|
|
|
|
/// * 0xFFFFFF8040000000 .. 0xFFFFFF8080000000 : HEAP_MAPPING_L2
|
|
|
|
/// * 0xFFFFFF8080000000 .. 0xFFFFFF8100000000 : DEVICE_MAPPING_L2
|
|
|
|
/// * 0xFFFFFF8080000000 .. 0xFFFFFF8080800000 : DEVICE_MAPPING_L3S
|
|
|
|
/// * 0xFFFFFF8080800000 .. 0xFFFFFF8100000000 : ...
|
|
|
|
pub unsafe fn init_fixed_tables() {
|
|
|
|
// TODO this could be built in compile-time too?
|
|
|
|
let early_mapping_l3_phys = addr_of!(EARLY_MAPPING_L3) as usize - KERNEL_VIRT_OFFSET;
|
|
|
|
let device_mapping_l2_phys = addr_of!(DEVICE_MAPPING_L2) as usize - KERNEL_VIRT_OFFSET;
|
|
|
|
let heap_mapping_l2_phys = addr_of!(HEAP_MAPPING_L2) as usize - KERNEL_VIRT_OFFSET;
|
|
|
|
let ram_mapping_l1_phys = addr_of!(RAM_MAPPING_L1) as usize - KERNEL_VIRT_OFFSET;
|
|
|
|
|
|
|
|
for i in 0..DEVICE_MAPPING_L3_COUNT {
|
|
|
|
let device_mapping_l3_phys = PhysicalAddress::from_raw(
|
|
|
|
&DEVICE_MAPPING_L3S[i] as *const _ as usize - KERNEL_VIRT_OFFSET,
|
|
|
|
);
|
|
|
|
DEVICE_MAPPING_L2[i] = PageEntry::table(device_mapping_l3_phys, PageAttributes::WRITABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(KERNEL_TABLES.kernel_l2.data[EARLY_MAPPING_L2I], 0);
|
|
|
|
KERNEL_TABLES.kernel_l2.data[EARLY_MAPPING_L2I] = (early_mapping_l3_phys as u64)
|
|
|
|
| (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();
|
|
|
|
|
|
|
|
assert_eq!(KERNEL_TABLES.kernel_l1.data[HEAP_MAPPING_L1I], 0);
|
|
|
|
KERNEL_TABLES.kernel_l1.data[HEAP_MAPPING_L1I] =
|
|
|
|
(heap_mapping_l2_phys as u64) | (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();
|
|
|
|
assert_eq!(KERNEL_TABLES.kernel_l1.data[DEVICE_MAPPING_L1I], 0);
|
|
|
|
KERNEL_TABLES.kernel_l1.data[DEVICE_MAPPING_L1I] = (device_mapping_l2_phys as u64)
|
|
|
|
| (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();
|
|
|
|
|
|
|
|
assert_eq!(KERNEL_TABLES.l0.data[RAM_MAPPING_L0I], 0);
|
|
|
|
KERNEL_TABLES.l0.data[RAM_MAPPING_L0I] =
|
|
|
|
(ram_mapping_l1_phys as u64) | (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();
|
|
|
|
|
|
|
|
// TODO ENABLE EFER.NXE
|
|
|
|
let cr3 = &KERNEL_TABLES.l0 as *const _ as usize - KERNEL_VIRT_OFFSET;
|
|
|
|
CR3.set_address(cr3);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
pub unsafe fn flush_tlb_entry(address: usize) {
|
|
|
|
core::arch::asm!("invlpg ({0})", in(reg) address, options(att_syntax));
|
|
|
|
}
|