358 lines
11 KiB
Rust
358 lines
11 KiB
Rust
|
use core::ops::Range;
|
||
|
|
||
|
use kernel_arch::ProcessAddressSpaceImpl;
|
||
|
use libk_mm_interface::{
|
||
|
address::PhysicalAddress,
|
||
|
process::ProcessAddressSpaceManager,
|
||
|
table::{MapAttributes, TableAllocator},
|
||
|
};
|
||
|
use libk_util::sync::IrqSafeSpinlock;
|
||
|
use vmalloc::{RangeData, VirtualMemoryAllocator};
|
||
|
use yggdrasil_abi::error::Error;
|
||
|
|
||
|
use crate::{phys, PageProvider, TableAllocatorImpl, L3_PAGE_SIZE};
|
||
|
|
||
|
struct Inner<PP: PageProvider + RangeData, TA: TableAllocator> {
|
||
|
allocator: VirtualMemoryAllocator<PP>,
|
||
|
table: ProcessAddressSpaceImpl<TA>,
|
||
|
}
|
||
|
|
||
|
/// Data structure for managing the address translation and allocation for a single process
|
||
|
pub struct ProcessAddressSpace<
|
||
|
PP: PageProvider + RangeData,
|
||
|
TA: TableAllocator = TableAllocatorImpl,
|
||
|
> {
|
||
|
inner: IrqSafeSpinlock<Inner<PP, TA>>,
|
||
|
}
|
||
|
|
||
|
impl<PP: PageProvider + RangeData, TA: TableAllocator> Inner<PP, TA> {
|
||
|
fn try_map_pages(
|
||
|
&mut self,
|
||
|
address: usize,
|
||
|
page_count: usize,
|
||
|
backing: &PP,
|
||
|
attributes: MapAttributes,
|
||
|
) -> Result<(), (usize, Error)> {
|
||
|
for i in 0..page_count {
|
||
|
let offset = (i * L3_PAGE_SIZE) as u64;
|
||
|
let virt = address + i * L3_PAGE_SIZE;
|
||
|
let phys = match backing.get_page(offset) {
|
||
|
Ok(page) => page,
|
||
|
Err(err) => {
|
||
|
return Err((i, err));
|
||
|
}
|
||
|
};
|
||
|
|
||
|
if let Err(err) = unsafe { self.table.map_page(virt, phys, attributes) } {
|
||
|
backing.release_page(offset, phys).unwrap();
|
||
|
return Err((i, err));
|
||
|
}
|
||
|
}
|
||
|
|
||
|
Ok(())
|
||
|
}
|
||
|
|
||
|
unsafe fn rollback_allocation(
|
||
|
&mut self,
|
||
|
start_pfn: usize,
|
||
|
pages_mapped: usize,
|
||
|
region_size: usize,
|
||
|
) {
|
||
|
let unmap_range = start_pfn..start_pfn + pages_mapped;
|
||
|
self.allocator
|
||
|
.free(start_pfn, region_size, |origin_pfn, pfn_range, backing| {
|
||
|
for pfn in pfn_range {
|
||
|
if unmap_range.contains(&pfn) {
|
||
|
let offset = (pfn - origin_pfn) * L3_PAGE_SIZE;
|
||
|
let virt = pfn * L3_PAGE_SIZE;
|
||
|
|
||
|
let phys = self.table.unmap_page(virt)?;
|
||
|
|
||
|
backing.release_page(offset as u64, phys)?;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
Ok(())
|
||
|
})
|
||
|
.unwrap();
|
||
|
}
|
||
|
|
||
|
fn map_range(
|
||
|
&mut self,
|
||
|
address: usize,
|
||
|
page_count: usize,
|
||
|
backing: PP,
|
||
|
attributes: MapAttributes,
|
||
|
) -> Result<(), Error> {
|
||
|
// If inserting fails, the range cannot be mapped
|
||
|
let start_pfn = address / L3_PAGE_SIZE;
|
||
|
self.allocator
|
||
|
.insert(start_pfn, page_count, backing.clone())?;
|
||
|
|
||
|
if let Err((mapped, error)) = self.try_map_pages(address, page_count, &backing, attributes)
|
||
|
{
|
||
|
debug_assert!(mapped < page_count);
|
||
|
unsafe {
|
||
|
self.rollback_allocation(start_pfn, mapped, page_count);
|
||
|
}
|
||
|
return Err(error);
|
||
|
};
|
||
|
|
||
|
Ok(())
|
||
|
}
|
||
|
|
||
|
fn map_single(
|
||
|
&mut self,
|
||
|
address: usize,
|
||
|
backing: PP,
|
||
|
attributes: MapAttributes,
|
||
|
) -> Result<PhysicalAddress, Error> {
|
||
|
let start_pfn = address / L3_PAGE_SIZE;
|
||
|
self.allocator.insert(start_pfn, 1, backing.clone())?;
|
||
|
|
||
|
let phys = match backing.get_page(0) {
|
||
|
Ok(page) => page,
|
||
|
Err(err) => {
|
||
|
// Do nothing, as the page has not been allocated to this range yet
|
||
|
self.allocator.free(start_pfn, 1, |_, _, _| Ok(())).unwrap();
|
||
|
return Err(err);
|
||
|
}
|
||
|
};
|
||
|
|
||
|
if let Err(err) = unsafe { self.table.map_page(address, phys, attributes) } {
|
||
|
self.allocator
|
||
|
.free(start_pfn, 1, |_, _, _| {
|
||
|
// Deallocate the page, but do not unmap, as the mapping failed
|
||
|
unsafe {
|
||
|
phys::free_page(phys);
|
||
|
}
|
||
|
Ok(())
|
||
|
})
|
||
|
.unwrap();
|
||
|
return Err(err);
|
||
|
}
|
||
|
|
||
|
Ok(phys)
|
||
|
}
|
||
|
|
||
|
fn alloc_range(
|
||
|
&mut self,
|
||
|
page_count: usize,
|
||
|
backing: PP,
|
||
|
attributes: MapAttributes,
|
||
|
) -> Result<usize, Error> {
|
||
|
let start_pfn = self.allocator.allocate(page_count, backing.clone())?;
|
||
|
let address = start_pfn * L3_PAGE_SIZE;
|
||
|
|
||
|
if let Err((mapped, error)) = self.try_map_pages(address, page_count, &backing, attributes)
|
||
|
{
|
||
|
debug_assert!(mapped < page_count);
|
||
|
unsafe {
|
||
|
self.rollback_allocation(start_pfn, mapped, page_count);
|
||
|
}
|
||
|
return Err(error);
|
||
|
};
|
||
|
|
||
|
Ok(address)
|
||
|
}
|
||
|
|
||
|
unsafe fn unmap_range(&mut self, start_address: usize, page_count: usize) -> Result<(), Error> {
|
||
|
let start_pfn = start_address / L3_PAGE_SIZE;
|
||
|
|
||
|
self.allocator
|
||
|
.free(start_pfn, page_count, |origin_pfn, pfn_range, backing| {
|
||
|
for pfn in pfn_range {
|
||
|
let offset = ((pfn - origin_pfn) * L3_PAGE_SIZE) as u64;
|
||
|
|
||
|
let virt = pfn * L3_PAGE_SIZE;
|
||
|
let phys = self.table.unmap_page(virt)?;
|
||
|
|
||
|
backing.release_page(offset, phys)?;
|
||
|
}
|
||
|
|
||
|
Ok(())
|
||
|
})?;
|
||
|
|
||
|
Ok(())
|
||
|
}
|
||
|
|
||
|
unsafe fn clear(&mut self) -> Result<(), Error> {
|
||
|
self.allocator.clear(|pfn_range, backing| {
|
||
|
let origin_pfn = pfn_range.start;
|
||
|
for pfn in pfn_range {
|
||
|
let offset = ((pfn - origin_pfn) * L3_PAGE_SIZE) as u64;
|
||
|
|
||
|
let virt = pfn * L3_PAGE_SIZE;
|
||
|
let phys = unsafe { self.table.unmap_page(virt)? };
|
||
|
|
||
|
backing.release_page(offset, phys)?;
|
||
|
}
|
||
|
|
||
|
Ok(())
|
||
|
})?;
|
||
|
|
||
|
// Drop the tables
|
||
|
self.table.clear();
|
||
|
|
||
|
Ok(())
|
||
|
}
|
||
|
|
||
|
fn clone_range(
|
||
|
&mut self,
|
||
|
source: &Self,
|
||
|
pfn_range: Range<usize>,
|
||
|
backing: &PP,
|
||
|
) -> Result<(), Error> {
|
||
|
self.allocator
|
||
|
.insert(pfn_range.start, pfn_range.len(), backing.clone())
|
||
|
.unwrap();
|
||
|
|
||
|
let start = pfn_range.start * L3_PAGE_SIZE;
|
||
|
let end = pfn_range.end * L3_PAGE_SIZE;
|
||
|
|
||
|
log::debug!("clone_range({:#x?})", start..end);
|
||
|
|
||
|
for i in pfn_range {
|
||
|
let address = i * L3_PAGE_SIZE;
|
||
|
let offset = (address - start) as u64;
|
||
|
let (src_page, attrs) = source.table.translate(address).unwrap();
|
||
|
let dst_page = backing.clone_page(offset, src_page, attrs)?;
|
||
|
unsafe {
|
||
|
self.table.map_page(address, dst_page, attrs).unwrap();
|
||
|
}
|
||
|
}
|
||
|
|
||
|
Ok(())
|
||
|
}
|
||
|
}
|
||
|
|
||
|
impl<PP: PageProvider + RangeData, TA: TableAllocator> ProcessAddressSpace<PP, TA> {
|
||
|
/// Constructs a new [ProcessAddressSpace]
|
||
|
pub fn new() -> Result<Self, Error> {
|
||
|
let table = ProcessAddressSpaceImpl::new()?;
|
||
|
let allocator = VirtualMemoryAllocator::new(
|
||
|
ProcessAddressSpaceImpl::<TA>::LOWER_LIMIT_PFN,
|
||
|
ProcessAddressSpaceImpl::<TA>::UPPER_LIMIT_PFN,
|
||
|
);
|
||
|
Ok(Self {
|
||
|
inner: IrqSafeSpinlock::new(Inner { table, allocator }),
|
||
|
})
|
||
|
}
|
||
|
|
||
|
/// Performs a "fork" operation of the address space, cloning all the mappings into a new one
|
||
|
pub fn fork(&self) -> Result<Self, Error> {
|
||
|
let src_inner = self.inner.lock();
|
||
|
let new_table = ProcessAddressSpaceImpl::new()?;
|
||
|
let mut new_inner = Inner {
|
||
|
allocator: VirtualMemoryAllocator::new(
|
||
|
ProcessAddressSpaceImpl::<TA>::LOWER_LIMIT_PFN,
|
||
|
ProcessAddressSpaceImpl::<TA>::UPPER_LIMIT_PFN,
|
||
|
),
|
||
|
table: new_table,
|
||
|
};
|
||
|
|
||
|
log::debug!("fork address space!");
|
||
|
|
||
|
for (range, backing) in src_inner.allocator.regions() {
|
||
|
// If they are present in existing allocator, there should be no
|
||
|
// problem adding them to a new one
|
||
|
new_inner.clone_range(&src_inner, range, backing)?;
|
||
|
}
|
||
|
|
||
|
for (range, _) in new_inner.allocator.regions() {
|
||
|
let start = range.start * L3_PAGE_SIZE;
|
||
|
let end = range.end * L3_PAGE_SIZE;
|
||
|
log::debug!("forked region: {:#x?}", start..end);
|
||
|
}
|
||
|
|
||
|
Ok(Self {
|
||
|
inner: IrqSafeSpinlock::new(new_inner),
|
||
|
})
|
||
|
}
|
||
|
|
||
|
/// Allocates a region of virtual memory within the address space and maps the pages to the
|
||
|
/// ones returned from `get_page` function
|
||
|
pub fn allocate(
|
||
|
&self,
|
||
|
_hint: Option<usize>,
|
||
|
size: usize,
|
||
|
backing: PP,
|
||
|
attributes: MapAttributes,
|
||
|
) -> Result<usize, Error> {
|
||
|
assert_eq!(size & (L3_PAGE_SIZE - 1), 0);
|
||
|
|
||
|
let mut lock = self.inner.lock();
|
||
|
|
||
|
lock.alloc_range(size / L3_PAGE_SIZE, backing, attributes)
|
||
|
}
|
||
|
|
||
|
/// Maps a region of memory in the address space
|
||
|
pub fn map(
|
||
|
&self,
|
||
|
address: usize,
|
||
|
size: usize,
|
||
|
backing: PP,
|
||
|
attributes: MapAttributes,
|
||
|
) -> Result<(), Error> {
|
||
|
assert_eq!(address & (L3_PAGE_SIZE - 1), 0);
|
||
|
assert_eq!(size & (L3_PAGE_SIZE - 1), 0);
|
||
|
|
||
|
let mut lock = self.inner.lock();
|
||
|
|
||
|
lock.map_range(address, size / L3_PAGE_SIZE, backing, attributes)
|
||
|
}
|
||
|
|
||
|
/// Adds a single-page mapping to the address space
|
||
|
pub fn map_single(
|
||
|
&self,
|
||
|
address: usize,
|
||
|
backing: PP,
|
||
|
attributes: MapAttributes,
|
||
|
) -> Result<PhysicalAddress, Error> {
|
||
|
assert_eq!(address & (L3_PAGE_SIZE - 1), 0);
|
||
|
|
||
|
self.inner.lock().map_single(address, backing, attributes)
|
||
|
}
|
||
|
|
||
|
/// Returns the [PhysicalAddress] associated with given virtual `address`,
|
||
|
/// if one is mapped
|
||
|
pub fn translate(&self, address: usize) -> Result<PhysicalAddress, Error> {
|
||
|
// Offset is handled at impl level
|
||
|
self.inner.lock().table.translate(address).map(|e| e.0)
|
||
|
}
|
||
|
|
||
|
/// Removes a single PAGE_SIZE mapping from the address space.
|
||
|
///
|
||
|
/// See [ProcessAddressSpaceManager::unmap].
|
||
|
///
|
||
|
/// # Safety
|
||
|
///
|
||
|
/// The caller must ensure the process to which this address space belongs does not and
|
||
|
/// will not access this page.
|
||
|
pub unsafe fn unmap(&self, address: usize, size: usize) -> Result<(), Error> {
|
||
|
assert_eq!(address & (L3_PAGE_SIZE - 1), 0);
|
||
|
assert_eq!(size & (L3_PAGE_SIZE - 1), 0);
|
||
|
|
||
|
let mut lock = self.inner.lock();
|
||
|
|
||
|
lock.unmap_range(address, size / L3_PAGE_SIZE)
|
||
|
}
|
||
|
|
||
|
/// Returns the physical address of this table, with ASID applied
|
||
|
pub fn as_address_with_asid(&self) -> u64 {
|
||
|
self.inner.lock().table.as_address_with_asid()
|
||
|
}
|
||
|
|
||
|
/// Removes all allocations and their associated mappings from the address space
|
||
|
pub fn clear(&self) -> Result<(), Error> {
|
||
|
let mut inner = self.inner.lock();
|
||
|
unsafe { inner.clear() }
|
||
|
}
|
||
|
}
|
||
|
|
||
|
impl<PP: PageProvider + RangeData, TA: TableAllocator> Drop for ProcessAddressSpace<PP, TA> {
|
||
|
fn drop(&mut self) {
|
||
|
self.clear().ok();
|
||
|
}
|
||
|
}
|