yggdrasil/src/mem/process.rs

241 lines
6.9 KiB
Rust

use abi::error::Error;
use cfg_if::cfg_if;
use kernel_util::sync::IrqSafeSpinlock;
use vmalloc::VirtualMemoryAllocator;
use crate::mem::phys;
use super::{table::MapAttributes, PhysicalAddress};
cfg_if! {
if #[cfg(target_arch = "aarch64")] {
use crate::arch::aarch64::mem::process::ProcessAddressSpaceImpl;
} else if #[cfg(target_arch = "x86_64")] {
use crate::arch::x86_64::mem::process::ProcessAddressSpaceImpl;
}
}
/// Interface for virtual memory address space management
pub trait ProcessAddressSpaceManager: Sized {
const PAGE_SIZE: usize;
const LOWER_LIMIT_PFN: usize;
const UPPER_LIMIT_PFN: usize;
fn new() -> Result<Self, Error>;
/// Places a single PAGE_SIZE mapping into the address space.
///
/// # Safety
///
/// The caller must ensure the correct origin of the physical address being mapped.
unsafe fn map_page(
&mut self,
address: usize,
physical: PhysicalAddress,
flags: MapAttributes,
) -> Result<(), Error>;
/// Removes a single PAGE_SIZE mapping from the address space.
///
/// # Safety
///
/// The caller must ensure the process to which this address space belongs does not and
/// will not access this page.
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error>;
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error>;
fn as_address_with_asid(&self) -> u64;
}
struct Inner {
allocator: VirtualMemoryAllocator,
table: ProcessAddressSpaceImpl,
}
pub struct ProcessAddressSpace {
inner: IrqSafeSpinlock<Inner>,
}
impl Inner {
fn try_map_pages<F: Fn(usize) -> Result<PhysicalAddress, Error>>(
&mut self,
address: usize,
page_count: usize,
get_page: F,
attributes: MapAttributes,
) -> Result<(), (usize, Error)> {
for i in 0..page_count {
let virt = address + i * ProcessAddressSpaceImpl::PAGE_SIZE;
let phys = match get_page(virt) {
Ok(page) => page,
Err(err) => {
return Err((i, err));
}
};
if let Err(err) = unsafe { self.table.map_page(virt, phys, attributes) } {
return Err((i, err));
}
}
Ok(())
}
fn map_range<F: Fn(usize) -> Result<PhysicalAddress, Error>>(
&mut self,
address: usize,
page_count: usize,
get_page: F,
attributes: MapAttributes,
) -> Result<(), Error> {
// If inserting fails, the range cannot be mapped
let start_pfn = address / ProcessAddressSpaceImpl::PAGE_SIZE;
self.allocator.insert(start_pfn, page_count)?;
if let Err(_e) = self.try_map_pages(address, page_count, get_page, attributes) {
// TODO rollback & remove the range
todo!();
};
Ok(())
}
fn alloc_range<F: Fn(usize) -> Result<PhysicalAddress, Error>>(
&mut self,
page_count: usize,
get_page: F,
attributes: MapAttributes,
) -> Result<usize, Error> {
let start_pfn = self.allocator.allocate(page_count)?;
let address = start_pfn * ProcessAddressSpaceImpl::PAGE_SIZE;
if let Err(_e) = self.try_map_pages(address, page_count, get_page, attributes) {
// TODO rollback
todo!();
};
Ok(address)
}
unsafe fn unmap_range<F: Fn(usize, PhysicalAddress)>(
&mut self,
start_address: usize,
page_count: usize,
free_page: F,
) -> Result<(), Error> {
let start_pfn = start_address / ProcessAddressSpaceImpl::PAGE_SIZE;
// Deallocate the range first
self.allocator.free(start_pfn, page_count)?;
// Then unmap it from the table
for i in 0..page_count {
let virt = start_address + i * ProcessAddressSpaceImpl::PAGE_SIZE;
// This should not fail under normal circumstances
// TODO handle failures here?
let phys = self.table.unmap_page(virt).unwrap();
free_page(virt, phys);
}
Ok(())
}
}
impl ProcessAddressSpace {
pub fn new() -> Result<Self, Error> {
let table = ProcessAddressSpaceImpl::new()?;
let allocator = VirtualMemoryAllocator::new(
ProcessAddressSpaceImpl::LOWER_LIMIT_PFN,
ProcessAddressSpaceImpl::UPPER_LIMIT_PFN,
);
Ok(Self {
inner: IrqSafeSpinlock::new(Inner { table, allocator }),
})
}
pub fn allocate<F: Fn(usize) -> Result<PhysicalAddress, Error>>(
&self,
_hint: Option<usize>,
size: usize,
get_page: F,
attributes: MapAttributes,
) -> Result<usize, Error> {
assert_eq!(size & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);
let mut lock = self.inner.lock();
lock.alloc_range(
size / ProcessAddressSpaceImpl::PAGE_SIZE,
get_page,
attributes,
)
}
pub fn map<F: Fn(usize) -> Result<PhysicalAddress, Error>>(
&self,
address: usize,
size: usize,
get_page: F,
attributes: MapAttributes,
) -> Result<(), Error> {
assert_eq!(address & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);
assert_eq!(size & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);
let mut lock = self.inner.lock();
lock.map_range(
address,
size / ProcessAddressSpaceImpl::PAGE_SIZE,
get_page,
attributes,
)
}
pub fn map_single(
&self,
address: usize,
physical: PhysicalAddress,
attributes: MapAttributes,
) -> Result<(), Error> {
assert_eq!(address & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);
// XXX
// assert!(physical.is_aligned_for::<L3>());
self.inner
.lock()
.map_range(address, 1, |_| Ok(physical), attributes)
}
pub fn translate(&self, address: usize) -> Result<PhysicalAddress, Error> {
// Offset is handled at impl level
self.inner.lock().table.translate(address).map(|e| e.0)
}
/// Removes a single PAGE_SIZE mapping from the address space.
///
/// See [ProcessAddressSpaceManager::unmap].
///
/// # Safety
///
/// The caller must ensure the process to which this address space belongs does not and
/// will not access this page.
pub unsafe fn unmap(&self, address: usize, size: usize) -> Result<(), Error> {
assert_eq!(address & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);
assert_eq!(size & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);
let mut lock = self.inner.lock();
lock.unmap_range(
address,
size / ProcessAddressSpaceImpl::PAGE_SIZE,
|_, paddr| phys::free_page(paddr),
)
}
pub fn as_address_with_asid(&self) -> u64 {
self.inner.lock().table.as_address_with_asid()
}
}