use core::{ marker::PhantomData, ops::{Index, IndexMut, Range}, }; use bitflags::bitflags; use libk_mm_interface::{ address::{AsPhysicalAddress, PhysicalAddress}, pointer::{PhysicalRef, PhysicalRefMut}, table::{ EntryLevel, EntryLevelDrop, MapAttributes, NextPageTable, NonTerminalEntryLevel, TableAllocator, }, }; use yggdrasil_abi::error::Error; use crate::KernelTableManagerImpl; bitflags! { /// Describes how each page table entry is mapped pub struct PageAttributes: u32 { /// When set, the mapping is considered valid and pointing somewhere const PRESENT = 1 << 0; /// For tables, allows writes to further translation levels, for pages/blocks, allows /// writes to the region covered by the entry const WRITABLE = 1 << 1; /// When set for L2 entries, the mapping specifies a 2MiB page instead of a page table /// reference const BLOCK = 1 << 7; /// For tables, allows user access to further translation levels, for pages/blocks, allows /// user access to the region covered by the entry const USER = 1 << 2; } } // TODO stuff for PAE? #[derive(Debug, Clone, Copy)] pub struct L3; #[derive(Debug, Clone, Copy)] pub struct L0; #[derive(Clone, Copy, Debug)] pub struct PageEntry(u32, PhantomData); #[derive(Clone, Copy, Debug)] #[repr(C, align(0x1000))] pub struct PageTable { data: [PageEntry; 1024], } impl EntryLevel for L3 { const SHIFT: usize = 12; } impl EntryLevel for L0 { const SHIFT: usize = 22; } impl NonTerminalEntryLevel for L0 { type NextLevel = L3; } impl PageEntry { pub fn page(address: PhysicalAddress, attrs: PageAttributes) -> Self { Self( address.try_into_u32().unwrap() | (PageAttributes::PRESENT | attrs).bits(), PhantomData, ) } pub fn as_page(&self) -> Option { if self.0 & PageAttributes::PRESENT.bits() != 0 { Some(PhysicalAddress::from_u32(self.0 & !0xFFF)) } else { None } } } impl PageEntry { pub fn block(address: PhysicalAddress, attrs: PageAttributes) -> Self { Self( address.try_into_u32().unwrap() | (PageAttributes::PRESENT | PageAttributes::BLOCK | attrs).bits(), PhantomData, ) } pub fn table(address: PhysicalAddress, attrs: PageAttributes) -> Self { Self( address.try_into_u32().unwrap() | (PageAttributes::PRESENT | attrs).bits(), PhantomData, ) } pub fn as_table(&self) -> Option { if self.0 & PageAttributes::PRESENT.bits() != 0 && self.0 & PageAttributes::BLOCK.bits() == 0 { Some(PhysicalAddress::from_u32(self.0 & !0xFFF)) } else { None } } } impl PageEntry { pub const INVALID: Self = Self(0, PhantomData); pub fn is_present(&self) -> bool { self.0 & (1 << 0) != 0 } pub fn attributes(&self) -> PageAttributes { PageAttributes::from_bits_retain(self.0) } } impl PageTable { pub const fn zeroed() -> Self { Self { data: [PageEntry::INVALID; 1024], } } pub fn new_zeroed<'a, TA: TableAllocator>( ) -> Result, Error> { let physical = TA::allocate_page_table()?; let mut table = unsafe { PhysicalRefMut::<'a, Self, KernelTableManagerImpl>::map(physical) }; for i in 0..1024 { table[i] = PageEntry::INVALID; } Ok(table) } /// Recursively clears and deallocates the translation table. /// /// # Safety /// /// The caller must ensure the table is no longer in use and is not referenced anymore. pub unsafe fn free(this: PhysicalRefMut) { let physical = this.as_physical_address(); TA::free_page_table(physical); } } impl NextPageTable for PageTable { type NextLevel = PageTable; type TableRef = PhysicalRef<'static, Self::NextLevel, KernelTableManagerImpl>; type TableRefMut = PhysicalRefMut<'static, Self::NextLevel, KernelTableManagerImpl>; fn get(&self, index: usize) -> Option { self[index] .as_table() .map(|addr| unsafe { PhysicalRef::map(addr) }) } fn get_mut(&mut self, index: usize) -> Option { self[index] .as_table() .map(|addr| unsafe { PhysicalRefMut::map(addr) }) } fn get_mut_or_alloc( &mut self, index: usize, ) -> Result { let entry = self[index]; if let Some(table) = entry.as_table() { Ok(unsafe { PhysicalRefMut::map(table) }) } else { let table = PageTable::new_zeroed::()?; self[index] = PageEntry::::table( unsafe { table.as_physical_address() }, PageAttributes::WRITABLE | PageAttributes::USER, ); Ok(table) } } } impl Index for PageTable { type Output = PageEntry; fn index(&self, index: usize) -> &Self::Output { &self.data[index] } } impl IndexMut for PageTable { fn index_mut(&mut self, index: usize) -> &mut Self::Output { &mut self.data[index] } } impl EntryLevelDrop for PageTable { const FULL_RANGE: Range = 0..1024; unsafe fn drop_range(&mut self, _range: Range) {} } impl EntryLevelDrop for PageTable { const FULL_RANGE: Range = 0..1024; unsafe fn drop_range(&mut self, range: Range) { for index in range { let entry = self[index]; if let Some(table) = entry.as_table() { let mut table_ref: PhysicalRefMut, KernelTableManagerImpl> = PhysicalRefMut::map(table); table_ref.drop_all::(); TA::free_page_table(table); } else if entry.is_present() { // Memory must've been cleared beforehand, so no non-table entries must be present panic!( "Expected a table containing only tables, got table[{}] = {:#x?}", index, entry.0 ); } self[index] = PageEntry::INVALID; } } } impl From for PageAttributes { fn from(value: MapAttributes) -> Self { let mut res = PageAttributes::WRITABLE; if value.intersects(MapAttributes::USER_READ | MapAttributes::USER_WRITE) { res |= PageAttributes::USER; } res } } impl From for MapAttributes { fn from(value: PageAttributes) -> Self { let mut res = MapAttributes::empty(); if value.contains(PageAttributes::USER) { res |= MapAttributes::USER_READ; if value.contains(PageAttributes::WRITABLE) { res |= MapAttributes::USER_WRITE; } } // TODO ??? res |= MapAttributes::NON_GLOBAL; res } }