482 lines
15 KiB
Rust
Raw Normal View History

2024-11-19 17:28:41 +02:00
use core::ops::{Deref, Range};
use alloc::sync::Arc;
use kernel_arch::ProcessAddressSpaceImpl;
use libk_mm_interface::{
address::PhysicalAddress,
process::ProcessAddressSpaceManager,
table::{MapAttributes, TableAllocator},
};
2024-11-19 17:28:41 +02:00
use libk_util::sync::{IrqSafeSpinlock, IrqSafeSpinlockGuard};
use vmalloc::{RangeData, VirtualMemoryAllocator};
use yggdrasil_abi::error::Error;
use crate::{
phys,
pointer::{PhysicalRef, PhysicalRefMut},
PageProvider, TableAllocatorImpl, L3_PAGE_SIZE,
};
/// Describes how the physical memory is provided for the mapping
#[derive(Clone)]
pub enum VirtualRangeBacking {
/// Memory is taken from regular "anonymous" physical memory
Anonymous,
/// Mapping is backed by file blocks/device memory
File(FileBacking),
}
2024-11-19 17:28:41 +02:00
/// Wrapper type for ensuring the translation table cannot be modified while performing accesses
/// to the inner [PhysicalAddress].
pub struct TranslateGuard<'a, TA: TableAllocator> {
address: PhysicalAddress,
_guard: IrqSafeSpinlockGuard<'a, Inner<TA>>,
}
/// Describes a file-backed memory range provider
#[derive(Clone)]
pub struct FileBacking {
offset: u64,
file: Arc<dyn PageProvider>,
}
impl VirtualRangeBacking {
/// Creates a file-backed memory range provider
pub fn file(offset: u64, file: Arc<dyn PageProvider>) -> Result<Self, Error> {
// XXX
// if !(offset as usize).is_page_aligned_for::<L3>() {
// todo!();
// }
Ok(Self::File(FileBacking { offset, file }))
}
/// Creates a range of anonymous memory
pub fn anonymous() -> Self {
Self::Anonymous
}
}
impl PageProvider for VirtualRangeBacking {
fn get_page(&self, offset: u64) -> Result<PhysicalAddress, Error> {
match self {
Self::Anonymous => phys::alloc_page(),
Self::File(f) => f.file.get_page(f.offset + offset),
}
}
fn release_page(&self, offset: u64, phys: PhysicalAddress) -> Result<(), Error> {
match self {
Self::Anonymous => unsafe {
phys::free_page(phys);
Ok(())
},
Self::File(f) => f.file.release_page(f.offset + offset, phys),
}
}
fn clone_page(
&self,
_offset: u64,
src_phys: PhysicalAddress,
_src_attrs: MapAttributes,
) -> Result<PhysicalAddress, Error> {
match self {
Self::Anonymous => {
let dst_page = phys::alloc_page()?;
let src_map = unsafe { PhysicalRef::<[u8; 4096]>::map(src_phys) };
let mut dst_map = unsafe { PhysicalRefMut::<[u8; 4096]>::map(dst_page) };
dst_map.copy_from_slice(src_map.as_ref());
Ok(dst_page)
}
Self::File(_) => todo!(),
}
}
}
impl PartialEq for VirtualRangeBacking {
fn eq(&self, other: &Self) -> bool {
matches!(self, Self::Anonymous) && matches!(other, Self::Anonymous)
}
}
impl Eq for VirtualRangeBacking {}
impl RangeData for VirtualRangeBacking {}
impl core::fmt::Debug for VirtualRangeBacking {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::Anonymous => f.debug_struct("VirtualRangeBacking::Anonymous").finish(),
Self::File(fb) => f
.debug_struct("VirtualRangeBacking::File")
.field("offset", &fb.offset)
.finish(),
}
}
}
struct Inner<TA: TableAllocator> {
allocator: VirtualMemoryAllocator<VirtualRangeBacking>,
table: ProcessAddressSpaceImpl<TA>,
}
/// Data structure for managing the address translation and allocation for a single process
pub struct ProcessAddressSpace<TA: TableAllocator = TableAllocatorImpl> {
inner: IrqSafeSpinlock<Inner<TA>>,
}
impl<TA: TableAllocator> Inner<TA> {
fn try_map_pages(
&mut self,
address: usize,
page_count: usize,
backing: &VirtualRangeBacking,
attributes: MapAttributes,
) -> Result<(), (usize, Error)> {
for i in 0..page_count {
let offset = (i * L3_PAGE_SIZE) as u64;
let virt = address + i * L3_PAGE_SIZE;
let phys = match backing.get_page(offset) {
Ok(page) => page,
Err(err) => {
return Err((i, err));
}
};
if let Err(err) = unsafe { self.table.map_page(virt, phys, attributes) } {
backing.release_page(offset, phys).unwrap();
return Err((i, err));
}
}
Ok(())
}
unsafe fn rollback_allocation(
&mut self,
start_pfn: usize,
pages_mapped: usize,
region_size: usize,
) {
let unmap_range = start_pfn..start_pfn + pages_mapped;
self.allocator
.free(start_pfn, region_size, |origin_pfn, pfn_range, backing| {
for pfn in pfn_range {
if unmap_range.contains(&pfn) {
let offset = (pfn - origin_pfn) * L3_PAGE_SIZE;
let virt = pfn * L3_PAGE_SIZE;
let phys = self.table.unmap_page(virt)?;
backing.release_page(offset as u64, phys)?;
}
}
Ok(())
})
.unwrap();
}
fn map_range(
&mut self,
address: usize,
page_count: usize,
backing: VirtualRangeBacking,
attributes: MapAttributes,
) -> Result<(), Error> {
// If inserting fails, the range cannot be mapped
let start_pfn = address / L3_PAGE_SIZE;
self.allocator
.insert(start_pfn, page_count, backing.clone())?;
if let Err((mapped, error)) = self.try_map_pages(address, page_count, &backing, attributes)
{
debug_assert!(mapped < page_count);
unsafe {
self.rollback_allocation(start_pfn, mapped, page_count);
}
return Err(error);
};
Ok(())
}
fn map_single(
&mut self,
address: usize,
backing: VirtualRangeBacking,
attributes: MapAttributes,
) -> Result<PhysicalAddress, Error> {
let start_pfn = address / L3_PAGE_SIZE;
self.allocator.insert(start_pfn, 1, backing.clone())?;
let phys = match backing.get_page(0) {
Ok(page) => page,
Err(err) => {
// Do nothing, as the page has not been allocated to this range yet
self.allocator.free(start_pfn, 1, |_, _, _| Ok(())).unwrap();
return Err(err);
}
};
if let Err(err) = unsafe { self.table.map_page(address, phys, attributes) } {
self.allocator
.free(start_pfn, 1, |_, _, _| {
// Deallocate the page, but do not unmap, as the mapping failed
unsafe {
phys::free_page(phys);
}
Ok(())
})
.unwrap();
return Err(err);
}
Ok(phys)
}
fn alloc_range(
&mut self,
page_count: usize,
backing: VirtualRangeBacking,
attributes: MapAttributes,
) -> Result<usize, Error> {
let start_pfn = self.allocator.allocate(page_count, backing.clone())?;
let address = start_pfn * L3_PAGE_SIZE;
if let Err((mapped, error)) = self.try_map_pages(address, page_count, &backing, attributes)
{
debug_assert!(mapped < page_count);
unsafe {
self.rollback_allocation(start_pfn, mapped, page_count);
}
return Err(error);
};
Ok(address)
}
unsafe fn unmap_range(&mut self, start_address: usize, page_count: usize) -> Result<(), Error> {
let start_pfn = start_address / L3_PAGE_SIZE;
self.allocator
.free(start_pfn, page_count, |origin_pfn, pfn_range, backing| {
for pfn in pfn_range {
let offset = ((pfn - origin_pfn) * L3_PAGE_SIZE) as u64;
let virt = pfn * L3_PAGE_SIZE;
let phys = self.table.unmap_page(virt)?;
backing.release_page(offset, phys)?;
}
Ok(())
})?;
Ok(())
}
unsafe fn clear(&mut self) -> Result<(), Error> {
self.allocator.clear(|pfn_range, backing| {
let origin_pfn = pfn_range.start;
for pfn in pfn_range {
let offset = ((pfn - origin_pfn) * L3_PAGE_SIZE) as u64;
let virt = pfn * L3_PAGE_SIZE;
let phys = unsafe { self.table.unmap_page(virt)? };
backing.release_page(offset, phys)?;
}
Ok(())
})?;
// Drop the tables
self.table.clear();
Ok(())
}
fn clone_range(
&mut self,
source: &Self,
pfn_range: Range<usize>,
backing: &VirtualRangeBacking,
) -> Result<(), Error> {
self.allocator
.insert(pfn_range.start, pfn_range.len(), backing.clone())
.unwrap();
let start = pfn_range.start * L3_PAGE_SIZE;
let end = pfn_range.end * L3_PAGE_SIZE;
log::debug!("clone_range({:#x?})", start..end);
for i in pfn_range {
let address = i * L3_PAGE_SIZE;
let offset = (address - start) as u64;
let (src_page, attrs) = source.table.translate(address).unwrap();
let dst_page = backing.clone_page(offset, src_page, attrs)?;
unsafe {
self.table.map_page(address, dst_page, attrs).unwrap();
}
}
Ok(())
}
}
impl<TA: TableAllocator> ProcessAddressSpace<TA> {
/// Constructs a new [ProcessAddressSpace]
pub fn new() -> Result<Self, Error> {
let table = ProcessAddressSpaceImpl::new()?;
let allocator = VirtualMemoryAllocator::new(
ProcessAddressSpaceImpl::<TA>::LOWER_LIMIT_PFN,
ProcessAddressSpaceImpl::<TA>::UPPER_LIMIT_PFN,
);
2025-01-20 00:54:26 +02:00
let (physical, asid) = table.as_address_with_asid();
log::debug!("New AddressSpace {:#x}, asid {:#x}", physical, asid);
Ok(Self {
inner: IrqSafeSpinlock::new(Inner { table, allocator }),
})
}
/// Performs a "fork" operation of the address space, cloning all the mappings into a new one
pub fn fork(&self) -> Result<Self, Error> {
let src_inner = self.inner.lock();
let new_table = ProcessAddressSpaceImpl::new()?;
let mut new_inner = Inner {
allocator: VirtualMemoryAllocator::new(
ProcessAddressSpaceImpl::<TA>::LOWER_LIMIT_PFN,
ProcessAddressSpaceImpl::<TA>::UPPER_LIMIT_PFN,
),
table: new_table,
};
log::debug!("fork address space!");
for (range, backing) in src_inner.allocator.regions() {
// If they are present in existing allocator, there should be no
// problem adding them to a new one
new_inner.clone_range(&src_inner, range, backing)?;
}
for (range, _) in new_inner.allocator.regions() {
let start = range.start * L3_PAGE_SIZE;
let end = range.end * L3_PAGE_SIZE;
log::debug!("forked region: {:#x?}", start..end);
}
Ok(Self {
inner: IrqSafeSpinlock::new(new_inner),
})
}
/// Allocates a region of virtual memory within the address space and maps the pages to the
/// ones returned from `get_page` function
pub fn allocate(
&self,
_hint: Option<usize>,
size: usize,
backing: VirtualRangeBacking,
attributes: MapAttributes,
) -> Result<usize, Error> {
assert_eq!(size & (L3_PAGE_SIZE - 1), 0);
let mut lock = self.inner.lock();
lock.alloc_range(size / L3_PAGE_SIZE, backing, attributes)
}
/// Maps a region of memory in the address space
pub fn map(
&self,
address: usize,
size: usize,
backing: VirtualRangeBacking,
attributes: MapAttributes,
) -> Result<(), Error> {
assert_eq!(address & (L3_PAGE_SIZE - 1), 0);
assert_eq!(size & (L3_PAGE_SIZE - 1), 0);
let mut lock = self.inner.lock();
lock.map_range(address, size / L3_PAGE_SIZE, backing, attributes)
}
/// Adds a single-page mapping to the address space
pub fn map_single(
&self,
address: usize,
backing: VirtualRangeBacking,
attributes: MapAttributes,
) -> Result<PhysicalAddress, Error> {
assert_eq!(address & (L3_PAGE_SIZE - 1), 0);
self.inner.lock().map_single(address, backing, attributes)
}
/// Returns the [PhysicalAddress] associated with given virtual `address`,
/// if one is mapped
pub fn translate(&self, address: usize) -> Result<PhysicalAddress, Error> {
// Offset is handled at impl level
self.inner.lock().table.translate(address).map(|e| e.0)
}
2024-11-19 17:28:41 +02:00
/// Same as [ProcessAddressSpace::translate], except the lock on the address space is held
/// until the resulting [TranslateGuard] is dropped.
pub fn translate_lock(&self, address: usize) -> Result<TranslateGuard<TA>, Error> {
let guard = self.inner.lock();
let address = guard.table.translate(address).map(|e| e.0)?;
Ok(TranslateGuard {
address,
_guard: guard,
})
}
/// Removes a single PAGE_SIZE mapping from the address space.
///
/// See [ProcessAddressSpaceManager::unmap].
///
/// # Safety
///
/// The caller must ensure the process to which this address space belongs does not and
/// will not access this page.
pub unsafe fn unmap(&self, address: usize, size: usize) -> Result<(), Error> {
assert_eq!(address & (L3_PAGE_SIZE - 1), 0);
assert_eq!(size & (L3_PAGE_SIZE - 1), 0);
let mut lock = self.inner.lock();
lock.unmap_range(address, size / L3_PAGE_SIZE)
}
2025-01-20 00:54:26 +02:00
/// Returns the physical address of the translation table along with its ASID
pub fn as_address_with_asid(&self) -> (u64, u64) {
self.inner.lock().table.as_address_with_asid()
}
/// Removes all allocations and their associated mappings from the address space
pub fn clear(&self) -> Result<(), Error> {
let mut inner = self.inner.lock();
unsafe { inner.clear() }
}
}
impl<TA: TableAllocator> Drop for ProcessAddressSpace<TA> {
fn drop(&mut self) {
2025-01-20 00:54:26 +02:00
let (physical, asid) = self.as_address_with_asid();
log::debug!("Drop AddressSpace {:#x}, asid {:#x}", physical, asid);
self.clear().ok();
}
}
2024-11-19 17:28:41 +02:00
impl<TA: TableAllocator> Deref for TranslateGuard<'_, TA> {
type Target = PhysicalAddress;
fn deref(&self) -> &Self::Target {
&self.address
}
}