rv64: implement address space dropping
This commit is contained in:
parent
ca82e25cf6
commit
822d4f891c
@ -7,7 +7,9 @@ use libk_mm_interface::{
|
|||||||
address::{AsPhysicalAddress, PhysicalAddress},
|
address::{AsPhysicalAddress, PhysicalAddress},
|
||||||
pointer::PhysicalRefMut,
|
pointer::PhysicalRefMut,
|
||||||
process::ProcessAddressSpaceManager,
|
process::ProcessAddressSpaceManager,
|
||||||
table::{EntryLevel, EntryLevelExt, MapAttributes, NextPageTable, TableAllocator},
|
table::{
|
||||||
|
EntryLevel, EntryLevelDrop, EntryLevelExt, MapAttributes, NextPageTable, TableAllocator,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use memtables::riscv64::PageAttributes;
|
use memtables::riscv64::PageAttributes;
|
||||||
use yggdrasil_abi::error::Error;
|
use yggdrasil_abi::error::Error;
|
||||||
@ -15,8 +17,8 @@ use yggdrasil_abi::error::Error;
|
|||||||
use crate::mem::{clone_kernel_tables, table::PageEntry};
|
use crate::mem::{clone_kernel_tables, table::PageEntry};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
table::{PageTable, L1, L2, L3},
|
table::{DroppableRange, PageTable, L1, L2, L3},
|
||||||
tlb_flush_va_asid, KernelTableManagerImpl, USER_BOUNDARY,
|
KernelTableManagerImpl, USER_BOUNDARY,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct ProcessAddressSpaceImpl<TA: TableAllocator> {
|
pub struct ProcessAddressSpaceImpl<TA: TableAllocator> {
|
||||||
@ -81,7 +83,9 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
|
|||||||
(physical, self.asid as u64)
|
(physical, self.asid as u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn clear(&mut self) {}
|
unsafe fn clear(&mut self) {
|
||||||
|
unsafe { self.l1.drop_range::<TA>(L1::DROPPABLE_RANGE) };
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
|
impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
|
||||||
@ -109,7 +113,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
l3[l3i] = entry;
|
l3[l3i] = entry;
|
||||||
tlb_flush_va_asid(virt, self.asid as usize);
|
super::tlb_flush_va_asid(virt, self.asid as usize);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -126,7 +130,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
|
|||||||
let page = l3[l3i].as_page().ok_or(Error::DoesNotExist)?;
|
let page = l3[l3i].as_page().ok_or(Error::DoesNotExist)?;
|
||||||
|
|
||||||
l3[l3i] = PageEntry::INVALID;
|
l3[l3i] = PageEntry::INVALID;
|
||||||
tlb_flush_va_asid(virt, self.asid as usize);
|
super::tlb_flush_va_asid(virt, self.asid as usize);
|
||||||
|
|
||||||
Ok(page)
|
Ok(page)
|
||||||
}
|
}
|
||||||
@ -155,14 +159,14 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
|
|||||||
|
|
||||||
impl<TA: TableAllocator> Drop for ProcessAddressSpaceImpl<TA> {
|
impl<TA: TableAllocator> Drop for ProcessAddressSpaceImpl<TA> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
// TODO
|
// SAFETY: with safe usage of the ProcessAddressSpaceImpl, clearing and dropping
|
||||||
// // SAFETY: with safe usage of the ProcessAddressSpaceImpl, clearing and dropping
|
// is safe, no one refers to the memory
|
||||||
// // is safe, no one refers to the memory
|
unsafe {
|
||||||
// unsafe {
|
self.clear();
|
||||||
// self.clear();
|
let l1_phys = self.l1.as_physical_address();
|
||||||
// let l1_phys = self.l1.as_physical_address();
|
TA::free_page_table(l1_phys);
|
||||||
// TA::free_page_table(l1_phys);
|
super::tlb_flush_asid(self.asid as usize);
|
||||||
// }
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,16 +1,19 @@
|
|||||||
use core::{
|
use core::{
|
||||||
marker::PhantomData,
|
marker::PhantomData,
|
||||||
ops::{Index, IndexMut},
|
ops::{Index, IndexMut, Range},
|
||||||
};
|
};
|
||||||
|
|
||||||
use libk_mm_interface::{
|
use libk_mm_interface::{
|
||||||
address::{AsPhysicalAddress, PhysicalAddress},
|
address::{AsPhysicalAddress, PhysicalAddress},
|
||||||
pointer::{PhysicalRef, PhysicalRefMut},
|
pointer::{PhysicalRef, PhysicalRefMut},
|
||||||
table::{EntryLevel, NextPageTable, NonTerminalEntryLevel, TableAllocator},
|
table::{
|
||||||
|
page_index, EntryLevel, EntryLevelDrop, NextPageTable, NonTerminalEntryLevel,
|
||||||
|
TableAllocator,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use yggdrasil_abi::error::Error;
|
use yggdrasil_abi::error::Error;
|
||||||
|
|
||||||
use super::KernelTableManagerImpl;
|
use super::{KernelTableManagerImpl, USER_BOUNDARY};
|
||||||
|
|
||||||
pub use memtables::riscv64::PageAttributes;
|
pub use memtables::riscv64::PageAttributes;
|
||||||
|
|
||||||
@ -44,6 +47,18 @@ pub struct PageTable<L: EntryLevel> {
|
|||||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||||
pub struct PageEntry<L: EntryLevel>(pub u64, PhantomData<L>);
|
pub struct PageEntry<L: EntryLevel>(pub u64, PhantomData<L>);
|
||||||
|
|
||||||
|
pub(super) trait DroppableRange {
|
||||||
|
const DROPPABLE_RANGE: Range<usize>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DroppableRange for L1 {
|
||||||
|
const DROPPABLE_RANGE: Range<usize> = 0..page_index::<L1>(USER_BOUNDARY);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DroppableRange for L2 {
|
||||||
|
const DROPPABLE_RANGE: Range<usize> = 0..512;
|
||||||
|
}
|
||||||
|
|
||||||
impl NonTerminalEntryLevel for L1 {
|
impl NonTerminalEntryLevel for L1 {
|
||||||
type NextLevel = L2;
|
type NextLevel = L2;
|
||||||
}
|
}
|
||||||
@ -93,6 +108,48 @@ impl<L: EntryLevel> PageEntry<L> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<L: NonTerminalEntryLevel + DroppableRange> EntryLevelDrop for PageTable<L>
|
||||||
|
where
|
||||||
|
PageTable<L::NextLevel>: EntryLevelDrop,
|
||||||
|
{
|
||||||
|
const FULL_RANGE: Range<usize> = L::DROPPABLE_RANGE;
|
||||||
|
|
||||||
|
unsafe fn drop_range<TA: TableAllocator>(&mut self, range: Range<usize>) {
|
||||||
|
for index in range {
|
||||||
|
let entry = self[index];
|
||||||
|
|
||||||
|
if let Some(table) = entry.as_table() {
|
||||||
|
unsafe {
|
||||||
|
let mut table_ref: PhysicalRefMut<
|
||||||
|
PageTable<L::NextLevel>,
|
||||||
|
KernelTableManagerImpl,
|
||||||
|
> = PhysicalRefMut::map(table);
|
||||||
|
|
||||||
|
table_ref.drop_all::<TA>();
|
||||||
|
|
||||||
|
TA::free_page_table(table);
|
||||||
|
}
|
||||||
|
} else if entry.is_present() {
|
||||||
|
// Memory must've been cleared beforehand, so no non-table entries must be present
|
||||||
|
panic!(
|
||||||
|
"Expected a table containing only tables, got table[{}] = {:#x?}",
|
||||||
|
index, entry.0
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
self[index] = PageEntry::INVALID;
|
||||||
|
// dc_cvac((&raw const self[index]).addr());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EntryLevelDrop for PageTable<L3> {
|
||||||
|
const FULL_RANGE: Range<usize> = 0..512;
|
||||||
|
|
||||||
|
// Do nothing
|
||||||
|
unsafe fn drop_range<TA: TableAllocator>(&mut self, _range: Range<usize>) {}
|
||||||
|
}
|
||||||
|
|
||||||
impl<L: NonTerminalEntryLevel + 'static> NextPageTable for PageTable<L> {
|
impl<L: NonTerminalEntryLevel + 'static> NextPageTable for PageTable<L> {
|
||||||
type NextLevel = PageTable<L::NextLevel>;
|
type NextLevel = PageTable<L::NextLevel>;
|
||||||
type TableRef = PhysicalRef<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>;
|
type TableRef = PhysicalRef<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user