136 lines
3.7 KiB
Rust
136 lines
3.7 KiB
Rust
use yggdrasil_abi::error::Error;
|
|
|
|
use crate::{
|
|
arch::x86_64::intrinsics,
|
|
mem::{
|
|
address::{AsPhysicalAddress, IntoRaw},
|
|
phys,
|
|
pointer::PhysicalRefMut,
|
|
process::ProcessAddressSpaceManager,
|
|
table::{EntryLevel, MapAttributes, NextPageTable},
|
|
PhysicalAddress,
|
|
},
|
|
};
|
|
|
|
use super::{
|
|
clone_kernel_tables,
|
|
table::{PageEntry, PageTable, L0, L1, L2, L3},
|
|
};
|
|
|
|
/// Represents a process or kernel address space. Because x86-64 does not have cool stuff like
|
|
/// TTBR0 and TTBR1, all address spaces are initially cloned from the kernel space.
|
|
#[repr(C)]
|
|
pub struct ProcessAddressSpaceImpl {
|
|
l0: PhysicalRefMut<'static, PageTable<L0>>,
|
|
}
|
|
|
|
impl ProcessAddressSpaceManager for ProcessAddressSpaceImpl {
|
|
const PAGE_SIZE: usize = L3::SIZE;
|
|
const LOWER_LIMIT_PFN: usize = 8;
|
|
// 16GiB VM limit
|
|
const UPPER_LIMIT_PFN: usize = (16 << 30) / Self::PAGE_SIZE;
|
|
|
|
fn new() -> Result<Self, Error> {
|
|
let mut l0 = unsafe { PhysicalRefMut::<'static, PageTable<L0>>::map(phys::alloc_page()?) };
|
|
|
|
for i in 0..512 {
|
|
l0[i] = PageEntry::INVALID;
|
|
}
|
|
|
|
clone_kernel_tables(&mut l0);
|
|
|
|
Ok(Self { l0 })
|
|
}
|
|
|
|
#[inline]
|
|
unsafe fn map_page(
|
|
&mut self,
|
|
address: usize,
|
|
physical: PhysicalAddress,
|
|
flags: MapAttributes,
|
|
) -> Result<(), Error> {
|
|
self.write_l3_entry(address, PageEntry::page(physical, flags.into()), false)
|
|
}
|
|
|
|
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error> {
|
|
self.pop_l3_entry(address)
|
|
}
|
|
|
|
#[inline]
|
|
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error> {
|
|
self.read_l3_entry(address)
|
|
.ok_or(Error::InvalidMemoryOperation)
|
|
}
|
|
|
|
fn as_address_with_asid(&self) -> u64 {
|
|
// TODO x86-64 PCID/ASID?
|
|
unsafe { self.l0.as_physical_address().into_raw() }
|
|
}
|
|
}
|
|
|
|
impl ProcessAddressSpaceImpl {
|
|
// Write a single 4KiB entry
|
|
fn write_l3_entry(
|
|
&mut self,
|
|
virt: usize,
|
|
entry: PageEntry<L3>,
|
|
overwrite: bool,
|
|
) -> Result<(), Error> {
|
|
let l0i = L0::index(virt);
|
|
let l1i = L1::index(virt);
|
|
let l2i = L2::index(virt);
|
|
let l3i = L3::index(virt);
|
|
|
|
let mut l1 = self.l0.get_mut_or_alloc(l0i)?;
|
|
let mut l2 = l1.get_mut_or_alloc(l1i)?;
|
|
let mut l3 = l2.get_mut_or_alloc(l2i)?;
|
|
|
|
if l3[l3i].is_present() && !overwrite {
|
|
todo!();
|
|
}
|
|
|
|
l3[l3i] = entry;
|
|
unsafe {
|
|
intrinsics::flush_tlb_entry(virt);
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
fn pop_l3_entry(&mut self, virt: usize) -> Result<PhysicalAddress, Error> {
|
|
let l0i = L0::index(virt);
|
|
let l1i = L1::index(virt);
|
|
let l2i = L2::index(virt);
|
|
let l3i = L3::index(virt);
|
|
|
|
// TODO somehow drop tables if they're known to be empty?
|
|
let mut l1 = self.l0.get_mut(l0i).ok_or(Error::DoesNotExist)?;
|
|
let mut l2 = l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
|
|
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
|
|
|
|
let page = l3[l3i].as_page().ok_or(Error::DoesNotExist)?;
|
|
|
|
l3[l3i] = PageEntry::INVALID;
|
|
unsafe {
|
|
intrinsics::flush_tlb_entry(virt);
|
|
}
|
|
|
|
Ok(page)
|
|
}
|
|
|
|
fn read_l3_entry(&self, virt: usize) -> Option<(PhysicalAddress, MapAttributes)> {
|
|
let l0i = L0::index(virt);
|
|
let l1i = L1::index(virt);
|
|
let l2i = L2::index(virt);
|
|
let l3i = L3::index(virt);
|
|
|
|
let l1 = self.l0.get(l0i)?;
|
|
let l2 = l1.get(l1i)?;
|
|
let l3 = l2.get(l2i)?;
|
|
|
|
let page = l3[l3i].as_page()?;
|
|
|
|
Some((page, l3[l3i].attributes().into()))
|
|
}
|
|
}
|