refactor: move memory API outside of the main kernel
This commit is contained in:
parent
352c68e31e
commit
4d23cc5c74
@ -6,3 +6,4 @@ edition = "2021"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
yggdrasil-abi = { git = "https://git.alnyan.me/yggdrasil/yggdrasil-abi.git" }
|
||||
|
@ -1,7 +1,21 @@
|
||||
use yggdrasil_abi::error::Error;
|
||||
|
||||
use crate::mem::{address::PhysicalAddress, device::RawDeviceMemoryMapping};
|
||||
|
||||
extern "Rust" {
|
||||
pub fn __acquire_irq_guard() -> bool;
|
||||
pub fn __release_irq_guard(mask: bool);
|
||||
|
||||
pub fn __suspend();
|
||||
pub fn __yield();
|
||||
pub fn __allocate_2m_page() -> u64;
|
||||
pub fn __allocate_page() -> u64;
|
||||
pub fn __free_page(page: u64);
|
||||
|
||||
pub fn __virtualize(phys: u64) -> usize;
|
||||
pub fn __physicalize(virt: usize) -> u64;
|
||||
|
||||
pub fn __map_device_pages(
|
||||
base: PhysicalAddress,
|
||||
count: usize,
|
||||
) -> Result<RawDeviceMemoryMapping, Error>;
|
||||
pub fn __unmap_device_pages(mapping: &RawDeviceMemoryMapping);
|
||||
}
|
||||
|
@ -1,8 +1,11 @@
|
||||
#![no_std]
|
||||
#![feature(maybe_uninit_slice)]
|
||||
#![feature(maybe_uninit_slice, step_trait, const_trait_impl, effects)]
|
||||
|
||||
extern crate alloc;
|
||||
|
||||
pub(crate) mod api;
|
||||
|
||||
pub mod mem;
|
||||
pub mod sync;
|
||||
pub mod util;
|
||||
|
||||
|
157
lib/kernel-util/src/mem/address.rs
Normal file
157
lib/kernel-util/src/mem/address.rs
Normal file
@ -0,0 +1,157 @@
|
||||
use crate::api::{__physicalize, __virtualize};
|
||||
use core::{
|
||||
fmt,
|
||||
iter::Step,
|
||||
mem::align_of,
|
||||
ops::{Add, Sub},
|
||||
};
|
||||
|
||||
/// Wrapper type to represent a physical memory address
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
|
||||
#[repr(transparent)]
|
||||
pub struct PhysicalAddress(pub(crate) u64);
|
||||
|
||||
/// Interface for converting addresses from their raw values to more specific types
|
||||
#[const_trait]
|
||||
pub trait FromRaw<T> {
|
||||
/// Converts a raw value into the address wrapper type
|
||||
fn from_raw(value: T) -> Self;
|
||||
}
|
||||
|
||||
/// Interface for converting wrapper types into their raw address representations
|
||||
#[const_trait]
|
||||
pub trait IntoRaw<T> {
|
||||
/// Converts a wrapper type value into its raw address
|
||||
fn into_raw(self) -> T;
|
||||
}
|
||||
|
||||
/// Interface for obtaining physical addresses of values
|
||||
pub trait AsPhysicalAddress {
|
||||
/// Returns the value's physical address.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure the value has been constructed and obtained through proper means.
|
||||
unsafe fn as_physical_address(&self) -> PhysicalAddress;
|
||||
}
|
||||
|
||||
impl PhysicalAddress {
|
||||
/// Physical address of zero
|
||||
pub const ZERO: Self = Self(0);
|
||||
|
||||
/// Maximum representable physical address
|
||||
pub const MAX: Self = Self(u64::MAX);
|
||||
/// Minumum representable physical address
|
||||
pub const MIN: Self = Self(u64::MIN);
|
||||
|
||||
/// Applies an offset to the address
|
||||
pub const fn add(self, offset: usize) -> Self {
|
||||
Self(self.0 + offset as u64)
|
||||
}
|
||||
|
||||
/// Returns `true` if the address is zero
|
||||
#[inline(always)]
|
||||
pub const fn is_zero(self) -> bool {
|
||||
self.0 == 0
|
||||
}
|
||||
|
||||
/// Returns `true` if the address is aligned to a boundary of a page at level `L`
|
||||
#[inline]
|
||||
pub const fn is_aligned_for<T: Sized>(self) -> bool {
|
||||
self.0 as usize % align_of::<T>() == 0
|
||||
}
|
||||
|
||||
/// Converts a previously virtualized physical address back into its physical form.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure the function only receives addresses obtained through
|
||||
/// [PhysicalAddress::virtualize_raw] or
|
||||
/// [super::pointer::PhysicalRef]/[super::pointer::PhysicalRefMut] facilities.
|
||||
pub unsafe fn from_virtualized(address: usize) -> Self {
|
||||
Self(__physicalize(address))
|
||||
}
|
||||
|
||||
/// Converts the physical address to a virtual one
|
||||
pub fn virtualize_raw(self) -> usize {
|
||||
unsafe { __virtualize(self.0) }
|
||||
}
|
||||
}
|
||||
|
||||
impl Add for PhysicalAddress {
|
||||
type Output = Self;
|
||||
|
||||
fn add(self, rhs: Self) -> Self::Output {
|
||||
Self(self.0 + rhs.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl Sub for PhysicalAddress {
|
||||
type Output = usize;
|
||||
|
||||
fn sub(self, rhs: Self) -> Self::Output {
|
||||
(self.0 - rhs.0) as usize
|
||||
}
|
||||
}
|
||||
|
||||
// Conversions
|
||||
|
||||
impl const FromRaw<u64> for PhysicalAddress {
|
||||
fn from_raw(value: u64) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl const FromRaw<usize> for PhysicalAddress {
|
||||
fn from_raw(value: usize) -> Self {
|
||||
Self(value as u64)
|
||||
}
|
||||
}
|
||||
|
||||
impl const IntoRaw<u64> for PhysicalAddress {
|
||||
fn into_raw(self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl const IntoRaw<usize> for PhysicalAddress {
|
||||
fn into_raw(self) -> usize {
|
||||
self.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PhysicalAddress> for u64 {
|
||||
fn from(addr: PhysicalAddress) -> u64 {
|
||||
addr.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PhysicalAddress> for usize {
|
||||
fn from(addr: PhysicalAddress) -> usize {
|
||||
addr.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
// Ranges
|
||||
|
||||
impl Step for PhysicalAddress {
|
||||
fn steps_between(_start: &Self, _end: &Self) -> Option<usize> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn forward_checked(start: Self, count: usize) -> Option<Self> {
|
||||
start.0.checked_add(count as u64).map(Self)
|
||||
}
|
||||
|
||||
fn backward_checked(_start: Self, _count: usize) -> Option<Self> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
// fmt
|
||||
|
||||
impl fmt::LowerHex for PhysicalAddress {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::LowerHex::fmt(&self.0, f)
|
||||
}
|
||||
}
|
@ -1,19 +1,15 @@
|
||||
//! Facilities for mapping devices to virtual address space
|
||||
use core::{
|
||||
alloc::Layout,
|
||||
mem::size_of,
|
||||
ops::{Deref, DerefMut},
|
||||
};
|
||||
|
||||
use abi::error::Error;
|
||||
use alloc::sync::Arc;
|
||||
use yggdrasil_abi::error::Error;
|
||||
|
||||
use crate::arch::{Architecture, ARCHITECTURE};
|
||||
use crate::api::{__map_device_pages, __unmap_device_pages};
|
||||
|
||||
use super::{
|
||||
address::{AsPhysicalAddress, FromRaw},
|
||||
PhysicalAddress,
|
||||
};
|
||||
use super::address::{AsPhysicalAddress, FromRaw, PhysicalAddress};
|
||||
|
||||
/// Describes a single device memory mapping
|
||||
#[derive(Debug)]
|
||||
@ -60,7 +56,7 @@ impl RawDeviceMemoryMapping {
|
||||
/// The caller must ensure proper access synchronization, as well as the address' origin.
|
||||
#[inline]
|
||||
pub unsafe fn map(base: PhysicalAddress, size: usize) -> Result<Self, Error> {
|
||||
ARCHITECTURE.map_device_memory(base, size)
|
||||
__map_device_pages(base, size)
|
||||
}
|
||||
|
||||
/// Consumes the device mapping, leaking its address without deallocating the translation
|
||||
@ -75,7 +71,7 @@ impl RawDeviceMemoryMapping {
|
||||
impl Drop for RawDeviceMemoryMapping {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
ARCHITECTURE.unmap_device_memory(self);
|
||||
__unmap_device_pages(self);
|
||||
}
|
||||
}
|
||||
}
|
3
lib/kernel-util/src/mem/mod.rs
Normal file
3
lib/kernel-util/src/mem/mod.rs
Normal file
@ -0,0 +1,3 @@
|
||||
pub mod address;
|
||||
pub mod device;
|
||||
pub mod table;
|
79
lib/kernel-util/src/mem/table.rs
Normal file
79
lib/kernel-util/src/mem/table.rs
Normal file
@ -0,0 +1,79 @@
|
||||
use super::address::PhysicalAddress;
|
||||
|
||||
/// Interface for a single level of address translation
|
||||
pub trait EntryLevel: Copy {
|
||||
/// The right shift needed to obtain an index of an entry at this level from an address
|
||||
const SHIFT: usize;
|
||||
/// The size of a page at this entry level
|
||||
const SIZE: usize = 1 << Self::SHIFT;
|
||||
}
|
||||
|
||||
#[const_trait]
|
||||
pub trait EntryLevelExt: Sized {
|
||||
fn page_index<T: EntryLevel>(self) -> usize;
|
||||
fn page_offset<T: EntryLevel>(self) -> usize;
|
||||
fn page_count<T: EntryLevel>(self) -> usize;
|
||||
fn page_align_up<T: EntryLevel>(self) -> Self;
|
||||
fn page_align_down<T: EntryLevel>(self) -> Self;
|
||||
fn is_page_aligned_for<T: EntryLevel>(self) -> bool;
|
||||
}
|
||||
|
||||
#[const_trait]
|
||||
trait AddressLike: Sized {
|
||||
fn into_usize(self) -> usize;
|
||||
fn from_usize(v: usize) -> Self;
|
||||
}
|
||||
|
||||
impl const AddressLike for usize {
|
||||
#[inline(always)]
|
||||
fn into_usize(self) -> usize {
|
||||
self
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn from_usize(v: usize) -> Self {
|
||||
v
|
||||
}
|
||||
}
|
||||
|
||||
impl const AddressLike for PhysicalAddress {
|
||||
fn from_usize(v: usize) -> Self {
|
||||
Self(v as _)
|
||||
}
|
||||
|
||||
fn into_usize(self) -> usize {
|
||||
self.0 as _
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ~const AddressLike> const EntryLevelExt for T {
|
||||
#[inline(always)]
|
||||
fn page_index<L: EntryLevel>(self) -> usize {
|
||||
(self.into_usize() >> L::SHIFT) & 0x1FF
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn page_offset<L: EntryLevel>(self) -> usize {
|
||||
self.into_usize() & (L::SIZE - 1)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn page_count<L: EntryLevel>(self) -> usize {
|
||||
(self.into_usize() + L::SIZE - 1) / L::SIZE
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn page_align_up<L: EntryLevel>(self) -> Self {
|
||||
Self::from_usize((self.into_usize() + L::SIZE - 1) & !(L::SIZE - 1))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn page_align_down<L: EntryLevel>(self) -> Self {
|
||||
Self::from_usize(self.into_usize() & !(L::SIZE - 1))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn is_page_aligned_for<L: EntryLevel>(self) -> bool {
|
||||
self.page_offset::<L>() == 0
|
||||
}
|
||||
}
|
@ -28,8 +28,11 @@ use device_api::{
|
||||
timer::MonotonicTimestampProviderDevice,
|
||||
ResetDevice,
|
||||
};
|
||||
use kernel_util::mem::{
|
||||
address::PhysicalAddress, device::RawDeviceMemoryMapping, table::EntryLevel,
|
||||
};
|
||||
|
||||
use crate::mem::{device::RawDeviceMemoryMapping, phys::PhysicalMemoryRegion, PhysicalAddress};
|
||||
use crate::mem::phys::PhysicalMemoryRegion;
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(target_arch = "aarch64")] {
|
||||
@ -45,23 +48,8 @@ cfg_if! {
|
||||
}
|
||||
}
|
||||
|
||||
// External API for architecture specifics
|
||||
|
||||
#[no_mangle]
|
||||
fn __acquire_irq_guard() -> bool {
|
||||
let mask = ArchitectureImpl::interrupt_mask();
|
||||
unsafe {
|
||||
ArchitectureImpl::set_interrupt_mask(true);
|
||||
}
|
||||
mask
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
fn __release_irq_guard(mask: bool) {
|
||||
unsafe {
|
||||
ArchitectureImpl::set_interrupt_mask(mask);
|
||||
}
|
||||
}
|
||||
/// Architecture-specific lowest level of page mapping
|
||||
pub type L3 = <ArchitectureImpl as Architecture>::L3;
|
||||
|
||||
// Architecture interfaces
|
||||
|
||||
@ -85,6 +73,9 @@ pub trait Architecture {
|
||||
/// IRQ number type associated with the architecture
|
||||
type IrqNumber;
|
||||
|
||||
/// Lowest page entry level, usually 4KiB pages
|
||||
type L3: EntryLevel;
|
||||
|
||||
/// Starts up the application processors that may be present in the system.
|
||||
///
|
||||
/// # Safety
|
||||
@ -120,10 +111,10 @@ pub trait Architecture {
|
||||
) -> Result<(), Error>;
|
||||
|
||||
/// Converts a physical address to a virtual one, so it can be accessed by the kernel
|
||||
fn virtualize(address: PhysicalAddress) -> Result<usize, Error>;
|
||||
fn virtualize(address: u64) -> usize;
|
||||
|
||||
/// Converts a virtual address created by [Architecture::virtualize] back to its physical form
|
||||
fn physicalize(address: usize) -> Result<PhysicalAddress, Error>;
|
||||
fn physicalize(address: usize) -> u64;
|
||||
|
||||
// Architecture intrinsics
|
||||
|
||||
@ -233,3 +224,43 @@ pub trait Architecture {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// External API for architecture specifics
|
||||
|
||||
#[no_mangle]
|
||||
fn __acquire_irq_guard() -> bool {
|
||||
let mask = ArchitectureImpl::interrupt_mask();
|
||||
unsafe {
|
||||
ArchitectureImpl::set_interrupt_mask(true);
|
||||
}
|
||||
mask
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
fn __release_irq_guard(mask: bool) {
|
||||
unsafe {
|
||||
ArchitectureImpl::set_interrupt_mask(mask);
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
fn __virtualize(addr: u64) -> usize {
|
||||
ArchitectureImpl::virtualize(addr)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
fn __physicalize(addr: usize) -> u64 {
|
||||
ArchitectureImpl::physicalize(addr)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
fn __map_device_pages(
|
||||
base: PhysicalAddress,
|
||||
count: usize,
|
||||
) -> Result<RawDeviceMemoryMapping, Error> {
|
||||
unsafe { ARCHITECTURE.map_device_memory(base, count) }
|
||||
}
|
||||
#[no_mangle]
|
||||
fn __unmap_device_pages(mapping: &RawDeviceMemoryMapping) {
|
||||
unsafe { ARCHITECTURE.unmap_device_memory(mapping) }
|
||||
}
|
||||
|
@ -15,7 +15,11 @@ use device_api::{
|
||||
interrupt::{InterruptHandler, IpiDeliveryTarget},
|
||||
Device,
|
||||
};
|
||||
use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit};
|
||||
use kernel_util::{
|
||||
mem::address::{FromRaw, PhysicalAddress},
|
||||
sync::IrqSafeSpinlock,
|
||||
util::OneTimeInit,
|
||||
};
|
||||
use yggdrasil_abi::error::Error;
|
||||
|
||||
use crate::{
|
||||
@ -23,10 +27,7 @@ use crate::{
|
||||
x86_64::{smp::CPU_COUNT, IrqNumber, SHUTDOWN_FENCE},
|
||||
Architecture, CpuMessage, ARCHITECTURE,
|
||||
},
|
||||
mem::{
|
||||
address::FromRaw, heap::GLOBAL_HEAP, pointer::PhysicalRef, read_memory, write_memory,
|
||||
PhysicalAddress,
|
||||
},
|
||||
mem::{heap::GLOBAL_HEAP, pointer::PhysicalRef, read_memory, write_memory},
|
||||
};
|
||||
|
||||
use super::intrinsics;
|
||||
|
@ -8,17 +8,20 @@ use device_api::{
|
||||
},
|
||||
Device,
|
||||
};
|
||||
use kernel_util::sync::IrqSafeSpinlock;
|
||||
use kernel_util::{
|
||||
mem::{
|
||||
address::{FromRaw, PhysicalAddress},
|
||||
device::DeviceMemoryIo,
|
||||
},
|
||||
sync::IrqSafeSpinlock,
|
||||
};
|
||||
use tock_registers::{
|
||||
interfaces::{Readable, Writeable},
|
||||
register_structs,
|
||||
registers::{ReadWrite, WriteOnly},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
arch::x86_64::{acpi::AcpiAllocator, apic::local::BSP_APIC_ID, IrqNumber},
|
||||
mem::{address::FromRaw, device::DeviceMemoryIo, PhysicalAddress},
|
||||
};
|
||||
use crate::arch::x86_64::{acpi::AcpiAllocator, apic::local::BSP_APIC_ID, IrqNumber};
|
||||
|
||||
use super::{APIC_EXTERNAL_OFFSET, POPULATED_EXTERNAL_VECTORS};
|
||||
|
||||
|
@ -10,7 +10,15 @@ use device_api::{
|
||||
},
|
||||
Device,
|
||||
};
|
||||
use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit};
|
||||
use kernel_util::{
|
||||
mem::{
|
||||
address::{FromRaw, IntoRaw, PhysicalAddress},
|
||||
device::DeviceMemoryIo,
|
||||
table::EntryLevelExt,
|
||||
},
|
||||
sync::IrqSafeSpinlock,
|
||||
util::OneTimeInit,
|
||||
};
|
||||
use tock_registers::{
|
||||
interfaces::{ReadWriteable, Readable, Writeable},
|
||||
register_bitfields, register_structs,
|
||||
@ -24,11 +32,6 @@ use crate::{
|
||||
},
|
||||
CpuMessage,
|
||||
},
|
||||
mem::{
|
||||
address::{FromRaw, IntoRaw},
|
||||
device::DeviceMemoryIo,
|
||||
PhysicalAddress,
|
||||
},
|
||||
task::Cpu,
|
||||
};
|
||||
|
||||
|
@ -2,15 +2,9 @@
|
||||
use core::{arch::global_asm, cell::UnsafeCell};
|
||||
|
||||
use abi::error::Error;
|
||||
use kernel_util::mem::address::{AsPhysicalAddress, IntoRaw};
|
||||
|
||||
use crate::{
|
||||
arch::x86_64::mem::KERNEL_TABLES,
|
||||
mem::{
|
||||
address::{AsPhysicalAddress, IntoRaw},
|
||||
phys,
|
||||
},
|
||||
task::context::TaskContextImpl,
|
||||
};
|
||||
use crate::{arch::x86_64::mem::KERNEL_TABLES, mem::phys, task::context::TaskContextImpl};
|
||||
|
||||
struct StackBuilder {
|
||||
base: usize,
|
||||
|
@ -5,7 +5,14 @@ use core::{
|
||||
};
|
||||
|
||||
use abi::error::Error;
|
||||
use kernel_util::util::OneTimeInit;
|
||||
use kernel_util::{
|
||||
mem::{
|
||||
address::{FromRaw, PhysicalAddress},
|
||||
device::RawDeviceMemoryMapping,
|
||||
table::EntryLevelExt,
|
||||
},
|
||||
util::OneTimeInit,
|
||||
};
|
||||
use memtables::FixedTables;
|
||||
use static_assertions::{const_assert_eq, const_assert_ne};
|
||||
|
||||
@ -14,12 +21,7 @@ pub mod table;
|
||||
|
||||
use crate::{
|
||||
arch::x86_64::{intrinsics, mem::table::PageAttributes, registers::CR3},
|
||||
mem::{
|
||||
address::{FromRaw, IntoRaw, KernelImageObject},
|
||||
device::RawDeviceMemoryMapping,
|
||||
table::EntryLevel,
|
||||
PhysicalAddress, KERNEL_VIRT_OFFSET,
|
||||
},
|
||||
mem::{address::KernelImageObject, table::EntryLevel, KERNEL_VIRT_OFFSET},
|
||||
};
|
||||
|
||||
use self::table::{PageEntry, PageTable, L0, L1, L2, L3};
|
||||
@ -28,9 +30,10 @@ const CANONICAL_ADDRESS_MASK: usize = 0xFFFF000000000000;
|
||||
const KERNEL_PHYS_BASE: usize = 0x400000;
|
||||
|
||||
// Mapped at compile time
|
||||
const KERNEL_L0_INDEX: usize = L0::index(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
|
||||
const KERNEL_L1_INDEX: usize = L1::index(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
|
||||
const KERNEL_START_L2_INDEX: usize = L2::index(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
|
||||
const KERNEL_MAPPING_BASE: usize = KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE;
|
||||
const KERNEL_L0_INDEX: usize = KERNEL_MAPPING_BASE.page_index::<L0>();
|
||||
const KERNEL_L1_INDEX: usize = KERNEL_MAPPING_BASE.page_index::<L1>();
|
||||
const KERNEL_START_L2_INDEX: usize = KERNEL_MAPPING_BASE.page_index::<L2>();
|
||||
|
||||
// Must not be zero, should be at 4MiB
|
||||
const_assert_ne!(KERNEL_START_L2_INDEX, 0);
|
||||
@ -103,7 +106,7 @@ unsafe fn unmap_early_page(address: usize) {
|
||||
panic!("Tried to unmap invalid early mapping: {:#x}", address);
|
||||
}
|
||||
|
||||
let l3i = L3::index(address - EARLY_MAPPING_OFFSET);
|
||||
let l3i = (address - EARLY_MAPPING_OFFSET).page_index::<L3>();
|
||||
|
||||
assert!(EARLY_MAPPING_L3[l3i].is_present());
|
||||
EARLY_MAPPING_L3[l3i] = PageEntry::INVALID;
|
||||
@ -169,14 +172,14 @@ pub(super) unsafe fn map_device_memory(
|
||||
) -> Result<RawDeviceMemoryMapping, Error> {
|
||||
// debugln!("Map {}B @ {:#x}", size, base);
|
||||
let l3_aligned = base.page_align_down::<L3>();
|
||||
let l3_offset = L3::page_offset(base.into_raw());
|
||||
let page_count = (l3_offset + size + L3::SIZE - 1) / L3::SIZE;
|
||||
let l3_offset = base.page_offset::<L3>();
|
||||
let page_count = (l3_offset + size).page_count::<L3>();
|
||||
|
||||
if page_count > 256 {
|
||||
// Large mapping, use L2 mapping instead
|
||||
let l2_aligned = base.page_align_down::<L2>();
|
||||
let l2_offset = L2::page_offset(base.into_raw());
|
||||
let page_count = (l2_offset + size + L2::SIZE - 1) / L2::SIZE;
|
||||
let l2_offset = base.page_offset::<L2>();
|
||||
let page_count = (l2_offset + size).page_count::<L2>();
|
||||
|
||||
let base_address = map_device_memory_l2(l2_aligned, page_count)?;
|
||||
let address = base_address + l2_offset;
|
||||
@ -211,8 +214,8 @@ pub(super) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping) {
|
||||
L3::SIZE => {
|
||||
for i in 0..map.page_count {
|
||||
let page = map.base_address + i * L3::SIZE;
|
||||
let l2i = L2::index(page);
|
||||
let l3i = L3::index(page);
|
||||
let l2i = page.page_index::<L2>();
|
||||
let l3i = page.page_index::<L3>();
|
||||
assert!(DEVICE_MAPPING_L3S[l2i][l3i].is_present());
|
||||
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::INVALID;
|
||||
intrinsics::flush_tlb_entry(page);
|
||||
@ -224,7 +227,7 @@ pub(super) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping) {
|
||||
}
|
||||
|
||||
pub(super) unsafe fn map_heap_block(index: usize, page: PhysicalAddress) {
|
||||
if L2::page_offset(page.into_raw()) != 0 {
|
||||
if !page.is_page_aligned_for::<L2>() {
|
||||
panic!("Attempted to map a misaligned 2MiB page");
|
||||
}
|
||||
assert!(index < 512);
|
||||
|
@ -1,15 +1,17 @@
|
||||
//! x86-64-specific process address space management functions
|
||||
use kernel_util::mem::{
|
||||
address::{AsPhysicalAddress, IntoRaw, PhysicalAddress},
|
||||
table::EntryLevelExt,
|
||||
};
|
||||
use yggdrasil_abi::error::Error;
|
||||
|
||||
use crate::{
|
||||
arch::x86_64::intrinsics,
|
||||
mem::{
|
||||
address::{AsPhysicalAddress, IntoRaw},
|
||||
phys,
|
||||
pointer::PhysicalRefMut,
|
||||
process::ProcessAddressSpaceManager,
|
||||
table::{EntryLevel, MapAttributes, NextPageTable},
|
||||
PhysicalAddress,
|
||||
},
|
||||
};
|
||||
|
||||
@ -77,10 +79,10 @@ impl ProcessAddressSpaceImpl {
|
||||
entry: PageEntry<L3>,
|
||||
overwrite: bool,
|
||||
) -> Result<(), Error> {
|
||||
let l0i = L0::index(virt);
|
||||
let l1i = L1::index(virt);
|
||||
let l2i = L2::index(virt);
|
||||
let l3i = L3::index(virt);
|
||||
let l0i = virt.page_index::<L0>();
|
||||
let l1i = virt.page_index::<L1>();
|
||||
let l2i = virt.page_index::<L2>();
|
||||
let l3i = virt.page_index::<L3>();
|
||||
|
||||
let mut l1 = self.l0.get_mut_or_alloc(l0i)?;
|
||||
let mut l2 = l1.get_mut_or_alloc(l1i)?;
|
||||
@ -99,10 +101,10 @@ impl ProcessAddressSpaceImpl {
|
||||
}
|
||||
|
||||
fn pop_l3_entry(&mut self, virt: usize) -> Result<PhysicalAddress, Error> {
|
||||
let l0i = L0::index(virt);
|
||||
let l1i = L1::index(virt);
|
||||
let l2i = L2::index(virt);
|
||||
let l3i = L3::index(virt);
|
||||
let l0i = virt.page_index::<L0>();
|
||||
let l1i = virt.page_index::<L1>();
|
||||
let l2i = virt.page_index::<L2>();
|
||||
let l3i = virt.page_index::<L3>();
|
||||
|
||||
// TODO somehow drop tables if they're known to be empty?
|
||||
let mut l1 = self.l0.get_mut(l0i).ok_or(Error::DoesNotExist)?;
|
||||
@ -120,10 +122,10 @@ impl ProcessAddressSpaceImpl {
|
||||
}
|
||||
|
||||
fn read_l3_entry(&self, virt: usize) -> Option<(PhysicalAddress, MapAttributes)> {
|
||||
let l0i = L0::index(virt);
|
||||
let l1i = L1::index(virt);
|
||||
let l2i = L2::index(virt);
|
||||
let l3i = L3::index(virt);
|
||||
let l0i = virt.page_index::<L0>();
|
||||
let l1i = virt.page_index::<L1>();
|
||||
let l2i = virt.page_index::<L2>();
|
||||
let l3i = virt.page_index::<L3>();
|
||||
|
||||
let l1 = self.l0.get(l0i)?;
|
||||
let l2 = l1.get(l1i)?;
|
||||
|
@ -6,13 +6,12 @@ use core::{
|
||||
|
||||
use abi::error::Error;
|
||||
use bitflags::bitflags;
|
||||
use kernel_util::mem::address::{AsPhysicalAddress, FromRaw, PhysicalAddress};
|
||||
|
||||
use crate::mem::{
|
||||
address::{AsPhysicalAddress, FromRaw},
|
||||
phys,
|
||||
pointer::{PhysicalRef, PhysicalRefMut},
|
||||
table::{EntryLevel, MapAttributes, NextPageTable, NonTerminalEntryLevel},
|
||||
PhysicalAddress,
|
||||
};
|
||||
|
||||
bitflags! {
|
||||
@ -67,19 +66,19 @@ impl NonTerminalEntryLevel for L2 {
|
||||
type NextLevel = L3;
|
||||
}
|
||||
|
||||
impl const EntryLevel for L0 {
|
||||
impl EntryLevel for L0 {
|
||||
const SHIFT: usize = 39;
|
||||
}
|
||||
|
||||
impl const EntryLevel for L1 {
|
||||
impl EntryLevel for L1 {
|
||||
const SHIFT: usize = 30;
|
||||
}
|
||||
|
||||
impl const EntryLevel for L2 {
|
||||
impl EntryLevel for L2 {
|
||||
const SHIFT: usize = 21;
|
||||
}
|
||||
|
||||
impl const EntryLevel for L3 {
|
||||
impl EntryLevel for L3 {
|
||||
const SHIFT: usize = 12;
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,15 @@ use device_api::{
|
||||
Device,
|
||||
};
|
||||
use git_version::git_version;
|
||||
use kernel_util::{sync::SpinFence, util::OneTimeInit};
|
||||
use kernel_util::{
|
||||
mem::{
|
||||
address::{FromRaw, IntoRaw, PhysicalAddress},
|
||||
device::RawDeviceMemoryMapping,
|
||||
table::EntryLevelExt,
|
||||
},
|
||||
sync::SpinFence,
|
||||
util::OneTimeInit,
|
||||
};
|
||||
use yboot_proto::{v1::AvailableMemoryRegion, LoadProtocolV1};
|
||||
|
||||
mod acpi;
|
||||
@ -50,12 +58,9 @@ use crate::{
|
||||
Initrd, INITRD_DATA,
|
||||
},
|
||||
mem::{
|
||||
address::{FromRaw, IntoRaw},
|
||||
device::RawDeviceMemoryMapping,
|
||||
heap,
|
||||
phys::{self, reserved::reserve_region, PhysicalMemoryRegion},
|
||||
table::EntryLevel,
|
||||
PhysicalAddress,
|
||||
},
|
||||
};
|
||||
|
||||
@ -117,6 +122,7 @@ pub static ARCHITECTURE: X86_64 = X86_64 {
|
||||
impl Architecture for X86_64 {
|
||||
const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000;
|
||||
type IrqNumber = IrqNumber;
|
||||
type L3 = mem::table::L3;
|
||||
|
||||
unsafe fn start_application_processors(&self) {
|
||||
if let Some(acpi) = self.acpi.try_get() {
|
||||
@ -256,25 +262,23 @@ impl Architecture for X86_64 {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn virtualize(address: PhysicalAddress) -> Result<usize, Error> {
|
||||
let raw: usize = address.into_raw();
|
||||
if raw < *mem::MEMORY_LIMIT.get() {
|
||||
Ok(raw + RAM_MAPPING_OFFSET)
|
||||
fn virtualize(address: u64) -> usize {
|
||||
let address = address as usize;
|
||||
if address < *mem::MEMORY_LIMIT.get() {
|
||||
address + RAM_MAPPING_OFFSET
|
||||
} else {
|
||||
errorln!("Invalid physical address: {:#x}", address);
|
||||
Err(Error::InvalidMemoryOperation)
|
||||
panic!("Invalid physical address: {:#x}", address);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn physicalize(address: usize) -> Result<PhysicalAddress, Error> {
|
||||
fn physicalize(address: usize) -> u64 {
|
||||
if address < RAM_MAPPING_OFFSET || address - RAM_MAPPING_OFFSET >= *mem::MEMORY_LIMIT.get()
|
||||
{
|
||||
errorln!("Not a virtualized physical address: {:#x}", address);
|
||||
return Err(Error::InvalidMemoryOperation);
|
||||
panic!("Not a virtualized physical address: {:#x}", address);
|
||||
}
|
||||
|
||||
Ok(PhysicalAddress::from_raw(address - RAM_MAPPING_OFFSET))
|
||||
(address - RAM_MAPPING_OFFSET) as _
|
||||
}
|
||||
|
||||
fn external_interrupt_controller(
|
||||
|
@ -2,6 +2,7 @@
|
||||
use core::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use acpi_lib::platform::{ProcessorInfo, ProcessorState};
|
||||
use kernel_util::mem::address::{AsPhysicalAddress, FromRaw, IntoRaw, PhysicalAddress};
|
||||
|
||||
use crate::{
|
||||
arch::{
|
||||
@ -15,12 +16,7 @@ use crate::{
|
||||
},
|
||||
Architecture, ArchitectureImpl,
|
||||
},
|
||||
mem::{
|
||||
address::{AsPhysicalAddress, FromRaw, IntoRaw},
|
||||
phys,
|
||||
pointer::PhysicalRefMut,
|
||||
PhysicalAddress,
|
||||
},
|
||||
mem::{phys, pointer::PhysicalRefMut},
|
||||
task::Cpu,
|
||||
};
|
||||
|
||||
|
@ -2,12 +2,13 @@
|
||||
|
||||
use abi::error::Error;
|
||||
use device_api::interrupt::{MessageInterruptController, MsiHandler};
|
||||
|
||||
use crate::{
|
||||
device::bus::pci::PciBaseAddress,
|
||||
mem::{address::FromRaw, device::DeviceMemoryIoMut, PhysicalAddress},
|
||||
use kernel_util::mem::{
|
||||
address::{FromRaw, PhysicalAddress},
|
||||
device::DeviceMemoryIoMut,
|
||||
};
|
||||
|
||||
use crate::device::bus::pci::PciBaseAddress;
|
||||
|
||||
use super::{PciCapability, PciCapabilityId, PciConfigurationSpace};
|
||||
|
||||
/// MSI-X capability query
|
||||
|
@ -5,7 +5,10 @@ use acpi_lib::mcfg::McfgEntry;
|
||||
use alloc::{boxed::Box, rc::Rc, vec::Vec};
|
||||
use bitflags::bitflags;
|
||||
use device_api::Device;
|
||||
use kernel_util::sync::IrqSafeSpinlock;
|
||||
use kernel_util::{
|
||||
mem::address::{FromRaw, PhysicalAddress},
|
||||
sync::IrqSafeSpinlock,
|
||||
};
|
||||
use yggdrasil_abi::error::Error;
|
||||
|
||||
pub mod capability;
|
||||
@ -15,10 +18,7 @@ pub use space::{
|
||||
ecam::PciEcam, PciConfigSpace, PciConfigurationSpace, PciLegacyConfigurationSpace,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
device::nvme,
|
||||
mem::{address::FromRaw, PhysicalAddress},
|
||||
};
|
||||
use crate::device::nvme;
|
||||
|
||||
bitflags! {
|
||||
/// Command register of the PCI configuration space
|
||||
|
@ -1,8 +1,7 @@
|
||||
//! PCI Express ECAM interface
|
||||
use kernel_util::mem::{address::PhysicalAddress, device::DeviceMemoryMapping};
|
||||
use yggdrasil_abi::error::Error;
|
||||
|
||||
use crate::mem::{device::DeviceMemoryMapping, PhysicalAddress};
|
||||
|
||||
use super::{PciAddress, PciConfigurationSpace};
|
||||
|
||||
/// PCI Express Enhanced Configuration Access Mechanism
|
||||
@ -57,20 +56,5 @@ impl PciEcam {
|
||||
let this = Self::map(phys_addr)?;
|
||||
|
||||
Ok(if this.is_valid() { Some(this) } else { None })
|
||||
|
||||
// if phys_addr + 0xFFF < 0x100000000 {
|
||||
// // Probe without allocating a mapping
|
||||
// let raw = PciRawEcam {
|
||||
// virt_addr: phys_addr.virtualize(),
|
||||
// };
|
||||
|
||||
// if !raw.is_valid() {
|
||||
// return Ok(None);
|
||||
// }
|
||||
|
||||
// Self::map(phys_addr).map(Some)
|
||||
// } else {
|
||||
// todo!()
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
@ -4,9 +4,10 @@ use core::ops::{Index, IndexMut};
|
||||
|
||||
use abi::error::Error;
|
||||
use device_api::Device;
|
||||
use kernel_util::sync::IrqSafeSpinlock;
|
||||
|
||||
use crate::mem::{device::RawDeviceMemoryMapping, PhysicalAddress};
|
||||
use kernel_util::{
|
||||
mem::{address::PhysicalAddress, device::RawDeviceMemoryMapping},
|
||||
sync::IrqSafeSpinlock,
|
||||
};
|
||||
|
||||
use super::{DisplayDevice, DisplayDimensions};
|
||||
|
||||
|
@ -2,12 +2,10 @@
|
||||
|
||||
use core::fmt::{self, Write};
|
||||
|
||||
use kernel_util::mem::address::PhysicalAddress;
|
||||
use tock_registers::{interfaces::Readable, register_structs, registers::ReadOnly, UIntLike};
|
||||
|
||||
use crate::{
|
||||
device::nvme::queue::PhysicalRegionPage,
|
||||
mem::{address::IntoRaw, PhysicalAddress},
|
||||
};
|
||||
use crate::device::nvme::queue::PhysicalRegionPage;
|
||||
|
||||
use super::queue::SubmissionQueueEntry;
|
||||
|
||||
|
@ -4,7 +4,14 @@ use core::{mem::size_of, time::Duration};
|
||||
use abi::error::Error;
|
||||
use alloc::{collections::BTreeMap, vec::Vec};
|
||||
use device_api::{interrupt::MsiHandler, Device};
|
||||
use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit};
|
||||
use kernel_util::{
|
||||
mem::{
|
||||
address::{AsPhysicalAddress, FromRaw, IntoRaw, PhysicalAddress},
|
||||
device::{DeviceMemoryIo, DeviceMemoryIoMut},
|
||||
},
|
||||
sync::IrqSafeSpinlock,
|
||||
util::OneTimeInit,
|
||||
};
|
||||
use tock_registers::{
|
||||
interfaces::{ReadWriteable, Readable, Writeable},
|
||||
register_bitfields, register_structs,
|
||||
@ -25,12 +32,7 @@ use crate::{
|
||||
queue::{CompletionQueueEntry, SubmissionQueueEntry},
|
||||
},
|
||||
},
|
||||
mem::{
|
||||
address::{AsPhysicalAddress, FromRaw, IntoRaw},
|
||||
device::{DeviceMemoryIo, DeviceMemoryIoMut},
|
||||
pointer::PhysicalRefMut,
|
||||
PhysicalAddress,
|
||||
},
|
||||
mem::pointer::PhysicalRefMut,
|
||||
task::runtime,
|
||||
};
|
||||
|
||||
|
@ -12,12 +12,18 @@ use alloc::{
|
||||
};
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
use futures_util::Future;
|
||||
use kernel_util::sync::IrqSafeSpinlock;
|
||||
use kernel_util::{
|
||||
mem::{
|
||||
address::{IntoRaw, PhysicalAddress},
|
||||
table::EntryLevelExt,
|
||||
},
|
||||
sync::IrqSafeSpinlock,
|
||||
};
|
||||
use static_assertions::const_assert;
|
||||
|
||||
use crate::{
|
||||
arch::x86_64::mem::table::L3,
|
||||
mem::{address::IntoRaw, phys, pointer::PhysicalRefMut, table::EntryLevel, PhysicalAddress},
|
||||
arch::L3,
|
||||
mem::{phys, pointer::PhysicalRefMut},
|
||||
task::runtime::QueueWaker,
|
||||
};
|
||||
|
||||
@ -257,11 +263,11 @@ impl<'a> QueuePair<'a> {
|
||||
let sq_size = capacity * size_of::<SubmissionQueueEntry>();
|
||||
let cq_size = capacity * size_of::<CompletionQueueEntry>();
|
||||
|
||||
let page_count = L3::page_count(sq_size) + L3::page_count(cq_size);
|
||||
let page_count = sq_size.page_count::<L3>() + cq_size.page_count::<L3>();
|
||||
let base = phys::alloc_pages_contiguous(page_count)?;
|
||||
|
||||
let sq_base = base;
|
||||
let cq_base = base.add(L3::align_up(sq_size));
|
||||
let cq_base = base.add(sq_size.page_align_up::<L3>());
|
||||
|
||||
debugln!(
|
||||
"Allocated queue pair: sq={:x?}, cq={:x?} ({} pages)",
|
||||
|
@ -2,12 +2,12 @@
|
||||
|
||||
use core::ptr::NonNull;
|
||||
|
||||
use kernel_util::util::OneTimeInit;
|
||||
use kernel_util::{mem::address::PhysicalAddress, util::OneTimeInit};
|
||||
use memfs::block::{self, BlockAllocator};
|
||||
use vfs::NodeRef;
|
||||
use yggdrasil_abi::{error::Error, io::MountOptions};
|
||||
|
||||
use crate::mem::{phys, PhysicalAddress};
|
||||
use crate::mem::phys;
|
||||
|
||||
pub mod devfs;
|
||||
pub mod sysfs;
|
||||
|
@ -1,15 +1,9 @@
|
||||
//! Address manipulation interfaces and utilities
|
||||
|
||||
use core::{
|
||||
fmt,
|
||||
iter::Step,
|
||||
mem::align_of,
|
||||
ops::{Add, Deref, DerefMut, Sub},
|
||||
};
|
||||
use kernel_util::mem::address::{AsPhysicalAddress, FromRaw, PhysicalAddress};
|
||||
|
||||
use crate::arch::{Architecture, ArchitectureImpl};
|
||||
|
||||
use super::{table::EntryLevel, KERNEL_VIRT_OFFSET};
|
||||
use super::KERNEL_VIRT_OFFSET;
|
||||
use core::ops::{Deref, DerefMut};
|
||||
|
||||
/// Wrapper type to represent an object residing within the kernel
|
||||
#[repr(transparent)]
|
||||
@ -17,35 +11,6 @@ pub struct KernelImageObject<T> {
|
||||
inner: T,
|
||||
}
|
||||
|
||||
/// Wrapper type to represent a physical memory address
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
|
||||
#[repr(transparent)]
|
||||
pub struct PhysicalAddress(u64);
|
||||
|
||||
/// Interface for converting addresses from their raw values to more specific types
|
||||
#[const_trait]
|
||||
pub trait FromRaw<T> {
|
||||
/// Converts a raw value into the address wrapper type
|
||||
fn from_raw(value: T) -> Self;
|
||||
}
|
||||
|
||||
/// Interface for converting wrapper types into their raw address representations
|
||||
#[const_trait]
|
||||
pub trait IntoRaw<T> {
|
||||
/// Converts a wrapper type value into its raw address
|
||||
fn into_raw(self) -> T;
|
||||
}
|
||||
|
||||
/// Interface for obtaining physical addresses of values
|
||||
pub trait AsPhysicalAddress {
|
||||
/// Returns the value's physical address.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure the value has been constructed and obtained through proper means.
|
||||
unsafe fn as_physical_address(&self) -> PhysicalAddress;
|
||||
}
|
||||
|
||||
// KernelImageObject wrapper for objects inside the kernel
|
||||
|
||||
impl<T> KernelImageObject<T> {
|
||||
@ -78,146 +43,3 @@ impl<T> DerefMut for KernelImageObject<T> {
|
||||
&mut self.inner
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl PhysicalAddress {
|
||||
/// Physical address of zero
|
||||
pub const ZERO: Self = Self(0);
|
||||
|
||||
/// Maximum representable physical address
|
||||
pub const MAX: Self = Self(u64::MAX);
|
||||
/// Minumum representable physical address
|
||||
pub const MIN: Self = Self(u64::MIN);
|
||||
|
||||
/// Applies an offset to the address
|
||||
pub const fn add(self, offset: usize) -> Self {
|
||||
Self(self.0 + offset as u64)
|
||||
}
|
||||
|
||||
/// Returns `true` if the address is zero
|
||||
#[inline(always)]
|
||||
pub const fn is_zero(self) -> bool {
|
||||
self.0 == 0
|
||||
}
|
||||
|
||||
/// Returns the offset this address has within a page of level `L`
|
||||
pub const fn page_offset<L: ~const EntryLevel>(self) -> usize {
|
||||
L::page_offset(self.0 as usize)
|
||||
}
|
||||
|
||||
/// Aligns the address down to a boundary of a page of level `L`
|
||||
pub const fn page_align_down<L: ~const EntryLevel>(self) -> Self {
|
||||
Self(self.0 & !(L::SIZE as u64 - 1))
|
||||
}
|
||||
|
||||
/// Aligns the address up to a boundary of a page of level `L`
|
||||
pub const fn page_align_up<L: ~const EntryLevel>(self) -> Self {
|
||||
Self((self.0 + L::SIZE as u64 - 1) & !(L::SIZE as u64 - 1))
|
||||
}
|
||||
|
||||
/// Returns the page index this address has at level `L`
|
||||
pub const fn page_index<L: ~const EntryLevel>(self) -> usize {
|
||||
L::index(self.0 as usize)
|
||||
}
|
||||
|
||||
/// Returns `true` if the address is aligned to a boundary of a page at level `L`
|
||||
#[inline]
|
||||
pub const fn is_aligned_for<T: Sized>(self) -> bool {
|
||||
self.0 as usize % align_of::<T>() == 0
|
||||
}
|
||||
|
||||
/// Converts a previously virtualized physical address back into its physical form.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure the function only receives addresses obtained through
|
||||
/// [PhysicalAddress::virtualize_raw] or
|
||||
/// [super::pointer::PhysicalRef]/[super::pointer::PhysicalRefMut] facilities.
|
||||
pub unsafe fn from_virtualized(address: usize) -> Self {
|
||||
ArchitectureImpl::physicalize(address).unwrap()
|
||||
}
|
||||
|
||||
/// Converts the physical address to a virtual one
|
||||
pub fn virtualize_raw(self) -> usize {
|
||||
ArchitectureImpl::virtualize(self).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl Add for PhysicalAddress {
|
||||
type Output = Self;
|
||||
|
||||
fn add(self, rhs: Self) -> Self::Output {
|
||||
Self(self.0 + rhs.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl Sub for PhysicalAddress {
|
||||
type Output = usize;
|
||||
|
||||
fn sub(self, rhs: Self) -> Self::Output {
|
||||
(self.0 - rhs.0) as usize
|
||||
}
|
||||
}
|
||||
|
||||
// Conversions
|
||||
|
||||
impl const FromRaw<u64> for PhysicalAddress {
|
||||
fn from_raw(value: u64) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl const FromRaw<usize> for PhysicalAddress {
|
||||
fn from_raw(value: usize) -> Self {
|
||||
Self(value as u64)
|
||||
}
|
||||
}
|
||||
|
||||
impl const IntoRaw<u64> for PhysicalAddress {
|
||||
fn into_raw(self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl const IntoRaw<usize> for PhysicalAddress {
|
||||
fn into_raw(self) -> usize {
|
||||
self.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PhysicalAddress> for u64 {
|
||||
fn from(addr: PhysicalAddress) -> u64 {
|
||||
addr.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PhysicalAddress> for usize {
|
||||
fn from(addr: PhysicalAddress) -> usize {
|
||||
addr.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
// Ranges
|
||||
|
||||
impl Step for PhysicalAddress {
|
||||
fn steps_between(_start: &Self, _end: &Self) -> Option<usize> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn forward_checked(start: Self, count: usize) -> Option<Self> {
|
||||
start.0.checked_add(count as u64).map(Self)
|
||||
}
|
||||
|
||||
fn backward_checked(_start: Self, _count: usize) -> Option<Self> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
// fmt
|
||||
|
||||
impl fmt::LowerHex for PhysicalAddress {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::LowerHex::fmt(&self.0, f)
|
||||
}
|
||||
}
|
||||
|
@ -5,18 +5,18 @@ use core::{
|
||||
ptr::{null_mut, NonNull},
|
||||
};
|
||||
|
||||
use kernel_util::sync::IrqSafeSpinlock;
|
||||
use linked_list_allocator::Heap;
|
||||
use spinning_top::Spinlock;
|
||||
|
||||
/// Kernel heap manager
|
||||
pub struct KernelAllocator {
|
||||
inner: Spinlock<Heap>,
|
||||
inner: IrqSafeSpinlock<Heap>,
|
||||
}
|
||||
|
||||
impl KernelAllocator {
|
||||
const fn empty() -> Self {
|
||||
Self {
|
||||
inner: Spinlock::new(Heap::empty()),
|
||||
inner: IrqSafeSpinlock::new(Heap::empty()),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7,20 +7,18 @@ use core::{
|
||||
};
|
||||
|
||||
use abi::error::Error;
|
||||
use kernel_util::mem::{address::PhysicalAddress, device::DeviceMemoryMapping};
|
||||
|
||||
use crate::arch::{Architecture, ArchitectureImpl};
|
||||
|
||||
pub mod address;
|
||||
pub mod device;
|
||||
pub mod heap;
|
||||
pub mod phys;
|
||||
pub mod pointer;
|
||||
pub mod process;
|
||||
pub mod table;
|
||||
|
||||
pub use address::PhysicalAddress;
|
||||
|
||||
use self::{device::DeviceMemoryMapping, process::ProcessAddressSpace};
|
||||
use self::process::ProcessAddressSpace;
|
||||
|
||||
/// Offset applied to the physical kernel image when translating it into the virtual address space
|
||||
pub const KERNEL_VIRT_OFFSET: usize = ArchitectureImpl::KERNEL_VIRT_OFFSET;
|
||||
|
@ -1,11 +1,8 @@
|
||||
//! Physical memory manager implementation
|
||||
use abi::error::Error;
|
||||
use kernel_util::mem::address::{FromRaw, IntoRaw, PhysicalAddress};
|
||||
|
||||
use crate::mem::{
|
||||
address::{FromRaw, IntoRaw},
|
||||
pointer::PhysicalRefMut,
|
||||
PhysicalAddress,
|
||||
};
|
||||
use crate::mem::pointer::PhysicalRefMut;
|
||||
|
||||
pub type BitmapWord = u64;
|
||||
|
||||
|
@ -3,11 +3,15 @@
|
||||
use core::ops::Range;
|
||||
|
||||
use abi::error::Error;
|
||||
use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit};
|
||||
use kernel_util::{
|
||||
mem::address::{FromRaw, IntoRaw, PhysicalAddress},
|
||||
sync::IrqSafeSpinlock,
|
||||
util::OneTimeInit,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
arch::{Architecture, ARCHITECTURE},
|
||||
mem::{address::IntoRaw, phys::reserved::is_reserved},
|
||||
mem::phys::reserved::is_reserved,
|
||||
};
|
||||
|
||||
use self::{
|
||||
@ -15,8 +19,6 @@ use self::{
|
||||
reserved::reserve_region,
|
||||
};
|
||||
|
||||
use super::{address::FromRaw, PhysicalAddress};
|
||||
|
||||
// 8 * 4096 bits per page, 1 page per bit
|
||||
const MEMORY_UPPER_LIMIT: PhysicalAddress = PhysicalAddress::from_raw(TRACKED_PAGE_LIMIT * 4096);
|
||||
|
||||
|
@ -5,7 +5,7 @@ use core::{
|
||||
ops::{Deref, DerefMut},
|
||||
};
|
||||
|
||||
use super::{address::AsPhysicalAddress, PhysicalAddress};
|
||||
use kernel_util::mem::address::{AsPhysicalAddress, PhysicalAddress};
|
||||
|
||||
/// Wrapper for immutably accessing a value at a physical address
|
||||
#[repr(transparent)]
|
||||
|
@ -3,6 +3,7 @@ use core::ops::{Deref, DerefMut};
|
||||
|
||||
use abi::error::Error;
|
||||
use bitflags::bitflags;
|
||||
pub use kernel_util::mem::table::EntryLevel;
|
||||
|
||||
// TODO EXECUTABLE
|
||||
bitflags! {
|
||||
@ -36,38 +37,6 @@ pub trait NextPageTable {
|
||||
fn get(&self, index: usize) -> Option<Self::TableRef>;
|
||||
}
|
||||
|
||||
/// Interface for a single level of address translation
|
||||
#[const_trait]
|
||||
pub trait EntryLevel: Copy {
|
||||
/// The right shift needed to obtain an index of an entry at this level from an address
|
||||
const SHIFT: usize;
|
||||
/// The size of a page at this entry level
|
||||
const SIZE: usize = 1 << Self::SHIFT;
|
||||
|
||||
/// Returns the index into a page table for a given address
|
||||
#[inline]
|
||||
fn index(addr: usize) -> usize {
|
||||
(addr >> Self::SHIFT) & 0x1FF
|
||||
}
|
||||
/// Returns the offset of an address from the page start at current level
|
||||
#[inline]
|
||||
fn page_offset(addr: usize) -> usize {
|
||||
addr & (Self::SIZE - 1)
|
||||
}
|
||||
|
||||
/// Aligns the `addr` up to the level page boundary
|
||||
#[inline]
|
||||
fn align_up(addr: usize) -> usize {
|
||||
(addr + Self::SIZE - 1) & !(Self::SIZE - 1)
|
||||
}
|
||||
|
||||
/// Returns the page count needed to fully contain a block of size `addr`
|
||||
#[inline]
|
||||
fn page_count(addr: usize) -> usize {
|
||||
(addr + Self::SIZE - 1) / Self::SIZE
|
||||
}
|
||||
}
|
||||
|
||||
/// Tag trait to mark that the page table level may point to a next-level table
|
||||
pub trait NonTerminalEntryLevel: EntryLevel {
|
||||
/// Tag type of the level this entry level may point to
|
||||
|
Loading…
x
Reference in New Issue
Block a user