152 lines
4.3 KiB
Rust

use core::{fmt, marker::PhantomData, mem::size_of, ptr::NonNull};
use yggdrasil_abi::error::Error;
pub mod address;
pub mod table;
pub trait PhysicalMemoryAllocator {
type Address;
fn allocate_page() -> Result<Self::Address, Error>;
fn allocate_contiguous_pages(count: usize) -> Result<Self::Address, Error>;
/// # Safety
///
/// Preconditions:
///
/// * The page must have been obtained through [PhysicalMemoryAllocator::allocate_page] first;
/// * The caller must guarantee the page will not be used after this call.
unsafe fn free_page(page: Self::Address);
}
#[derive(Debug, Default, Clone, Copy)]
pub enum DeviceMemoryCaching {
#[default]
None,
Cacheable,
}
#[derive(Default, Debug, Clone, Copy)]
pub struct DeviceMemoryAttributes {
pub caching: DeviceMemoryCaching,
}
/// Describes a single device memory mapping
#[derive(Debug)]
pub struct RawDeviceMemoryMapping<A: KernelTableManager> {
/// Physical base address of the object
pub physical_base: u64,
/// Virtual address of the mapped object
pub address: usize,
/// Base address of the mapping start
pub base_address: usize,
/// Page size used for the mapping
pub page_size: usize,
/// Number of pages used to map the object
pub page_count: usize,
_manager: PhantomData<A>,
}
pub trait KernelTableManager: Sized + fmt::Debug {
fn virtualize(phys: u64) -> usize;
fn physicalize(virt: usize) -> u64;
/// # Safety
///
/// The caller must ensure the `base..base + count` region is not aliased by some other code,
/// points to a valid chunk of device memory.
unsafe fn map_device_pages(
base: u64,
count: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping<Self>, Error>;
/// # Safety
///
/// Only meant to be called from "safer" wrappers like [RawDeviceMemoryMapping].
unsafe fn unmap_device_pages(mapping: &RawDeviceMemoryMapping<Self>);
/// Releases a "virtualized" physical address.
///
/// # Safety
///
/// The address will/may become invalid after the call.
/// Caller must guarantee the address will no longer be used.
#[allow(unused)]
unsafe fn unmap_physical_address(virt: usize) {}
}
impl<A: KernelTableManager> RawDeviceMemoryMapping<A> {
/// Maps a region of physical memory as device memory of given size.
///
/// # Safety
///
/// The caller must ensure proper access synchronization, as well as the address' origin.
#[inline]
pub unsafe fn map(
base: u64,
size: usize,
attrs: DeviceMemoryAttributes,
) -> Result<Self, Error> {
A::map_device_pages(base, size, attrs)
}
/// Consumes the device mapping, leaking its address without deallocating the translation
/// mapping itself
pub fn leak(self) -> usize {
let address = self.address;
core::mem::forget(self);
address
}
pub fn into_raw_parts(self) -> (u64, usize, usize, usize, usize) {
let physical_base = self.physical_base;
let address = self.address;
let base_address = self.base_address;
let page_count = self.page_count;
let page_size = self.page_size;
core::mem::forget(self);
(physical_base, address, base_address, page_count, page_size)
}
/// # Safety
///
/// Preconditions: all the fields must come from a [RawDeviceMemoryMapping::into_raw_parts]
/// call.
pub unsafe fn from_raw_parts(
physical_base: u64,
address: usize,
base_address: usize,
page_count: usize,
page_size: usize,
) -> Self {
Self {
physical_base,
address,
base_address,
page_count,
page_size,
_manager: PhantomData,
}
}
/// "Casts" the mapping to a specific type T and returns a [NonNull] pointer to it
pub fn as_non_null<T>(&self) -> NonNull<T> {
if self.page_size * self.page_count < size_of::<T>() {
panic!();
}
unsafe { NonNull::new_unchecked(self.address as *mut T) }
}
}
impl<A: KernelTableManager> Drop for RawDeviceMemoryMapping<A> {
fn drop(&mut self) {
unsafe {
A::unmap_device_pages(self);
}
}
}