x86-64/mm: better VMA allocator
This commit is contained in:
parent
03180a1561
commit
6cedfa7c4a
@ -15,6 +15,7 @@ memfs = { path = "lib/memfs" }
|
|||||||
device-api = { path = "lib/device-api", features = ["derive"] }
|
device-api = { path = "lib/device-api", features = ["derive"] }
|
||||||
kernel-util = { path = "lib/kernel-util" }
|
kernel-util = { path = "lib/kernel-util" }
|
||||||
memtables = { path = "lib/memtables" }
|
memtables = { path = "lib/memtables" }
|
||||||
|
vmalloc = { path = "lib/vmalloc" }
|
||||||
|
|
||||||
atomic_enum = "0.2.0"
|
atomic_enum = "0.2.0"
|
||||||
bitflags = "2.3.3"
|
bitflags = "2.3.3"
|
||||||
|
13
lib/vmalloc/Cargo.toml
Normal file
13
lib/vmalloc/Cargo.toml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
[package]
|
||||||
|
name = "vmalloc"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
yggdrasil-abi = { git = "https://git.alnyan.me/yggdrasil/yggdrasil-abi.git" }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
itertools = "0.11.0"
|
||||||
|
proptest = "1.2.0"
|
349
lib/vmalloc/src/allocator.rs
Normal file
349
lib/vmalloc/src/allocator.rs
Normal file
@ -0,0 +1,349 @@
|
|||||||
|
use core::cmp::Ordering;
|
||||||
|
|
||||||
|
use alloc::collections::{linked_list::CursorMut, LinkedList};
|
||||||
|
use yggdrasil_abi::error::Error;
|
||||||
|
|
||||||
|
use crate::VirtualMemoryRange;
|
||||||
|
|
||||||
|
#[derive(PartialEq, Clone, Debug, Copy)]
|
||||||
|
struct AllocatorNode {
|
||||||
|
range: VirtualMemoryRange,
|
||||||
|
used: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialOrd for AllocatorNode {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
|
self.range.start_pfn.partial_cmp(&other.range.start_pfn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct TreeAllocator {
|
||||||
|
ranges: LinkedList<AllocatorNode>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AllocatorNode {
|
||||||
|
pub const fn free(start_pfn: usize, end_pfn: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
range: VirtualMemoryRange { start_pfn, end_pfn },
|
||||||
|
used: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub const fn used(start_pfn: usize, end_pfn: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
range: VirtualMemoryRange { start_pfn, end_pfn },
|
||||||
|
used: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub const fn pfn_count(&self) -> usize {
|
||||||
|
self.range.end_pfn - self.range.start_pfn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TreeAllocator {
|
||||||
|
pub fn new(start_pfn: usize, end_pfn: usize) -> Self {
|
||||||
|
let mut ranges = LinkedList::new();
|
||||||
|
ranges.push_back(AllocatorNode::free(start_pfn, end_pfn));
|
||||||
|
Self { ranges }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn find_region_mut<F: Fn(&AllocatorNode) -> bool>(&mut self, f: F) -> CursorMut<AllocatorNode> {
|
||||||
|
let mut cursor = self.ranges.cursor_front_mut();
|
||||||
|
while let Some(range) = cursor.current() {
|
||||||
|
if f(range) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
cursor.move_next();
|
||||||
|
}
|
||||||
|
cursor
|
||||||
|
}
|
||||||
|
|
||||||
|
fn coalesce_regions(&mut self) {
|
||||||
|
let mut cursor = self.ranges.cursor_front_mut();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let Some(&mut next) = cursor.peek_next() else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
let current = cursor.current().unwrap();
|
||||||
|
|
||||||
|
if current.used == next.used {
|
||||||
|
debug_assert_eq!(current.range.end_pfn, next.range.start_pfn);
|
||||||
|
current.range.end_pfn = next.range.end_pfn;
|
||||||
|
|
||||||
|
cursor.move_next();
|
||||||
|
cursor.remove_current();
|
||||||
|
cursor.move_prev();
|
||||||
|
} else {
|
||||||
|
cursor.move_next();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_range(
|
||||||
|
&mut self,
|
||||||
|
start_pfn: usize,
|
||||||
|
pfn_count: usize,
|
||||||
|
old_state: bool,
|
||||||
|
new_state: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let insert = VirtualMemoryRange {
|
||||||
|
start_pfn,
|
||||||
|
end_pfn: start_pfn + pfn_count,
|
||||||
|
};
|
||||||
|
let mut cursor = self.find_region_mut(|r| r.used == old_state && r.range.contains(&insert));
|
||||||
|
let range = cursor.current().ok_or(Error::AlreadyExists)?;
|
||||||
|
|
||||||
|
let start_pfn = range.range.start_pfn;
|
||||||
|
let end_pfn = range.range.end_pfn;
|
||||||
|
|
||||||
|
match (insert.start_pfn == start_pfn, insert.end_pfn == end_pfn) {
|
||||||
|
// No split
|
||||||
|
(true, true) => {
|
||||||
|
range.used = new_state;
|
||||||
|
}
|
||||||
|
// Split start
|
||||||
|
(true, false) => {
|
||||||
|
range.used = new_state;
|
||||||
|
range.range.end_pfn = insert.end_pfn;
|
||||||
|
|
||||||
|
cursor.insert_after(AllocatorNode {
|
||||||
|
range: VirtualMemoryRange {
|
||||||
|
start_pfn: insert.end_pfn,
|
||||||
|
end_pfn,
|
||||||
|
},
|
||||||
|
used: old_state,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
// Split end
|
||||||
|
(false, true) => {
|
||||||
|
range.range.end_pfn = insert.start_pfn;
|
||||||
|
|
||||||
|
cursor.insert_after(AllocatorNode {
|
||||||
|
range: VirtualMemoryRange {
|
||||||
|
start_pfn: insert.start_pfn,
|
||||||
|
end_pfn,
|
||||||
|
},
|
||||||
|
used: new_state,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
// Split in the middle
|
||||||
|
(false, false) => {
|
||||||
|
range.range = insert;
|
||||||
|
range.used = new_state;
|
||||||
|
|
||||||
|
cursor.insert_after(AllocatorNode {
|
||||||
|
range: VirtualMemoryRange {
|
||||||
|
start_pfn: insert.end_pfn,
|
||||||
|
end_pfn,
|
||||||
|
},
|
||||||
|
used: old_state,
|
||||||
|
});
|
||||||
|
cursor.insert_before(AllocatorNode {
|
||||||
|
range: VirtualMemoryRange {
|
||||||
|
start_pfn,
|
||||||
|
end_pfn: insert.start_pfn,
|
||||||
|
},
|
||||||
|
used: old_state,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.coalesce_regions();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn insert(&mut self, start_pfn: usize, pfn_count: usize) -> Result<(), Error> {
|
||||||
|
self.set_range(start_pfn, pfn_count, false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn free(&mut self, start_pfn: usize, pfn_count: usize) -> Result<(), Error> {
|
||||||
|
self.set_range(start_pfn, pfn_count, true, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn allocate(&mut self, pfn_count: usize) -> Option<usize> {
|
||||||
|
let mut cursor = self.find_region_mut(|r| !r.used && r.pfn_count() >= pfn_count);
|
||||||
|
let range = cursor.current()?;
|
||||||
|
|
||||||
|
let start_pfn = range.range.start_pfn;
|
||||||
|
let end_pfn = range.range.end_pfn;
|
||||||
|
|
||||||
|
range.used = true;
|
||||||
|
|
||||||
|
if range.pfn_count() > pfn_count {
|
||||||
|
range.range.end_pfn = start_pfn + pfn_count;
|
||||||
|
|
||||||
|
// Split the range
|
||||||
|
cursor.insert_after(AllocatorNode::free(start_pfn + pfn_count, end_pfn));
|
||||||
|
}
|
||||||
|
|
||||||
|
self.coalesce_regions();
|
||||||
|
|
||||||
|
Some(start_pfn)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ranges(&self) -> impl Iterator<Item = (bool, VirtualMemoryRange)> + '_ {
|
||||||
|
self.ranges.iter().map(|r| (r.used, r.range))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use alloc::collections::LinkedList;
|
||||||
|
|
||||||
|
use super::{AllocatorNode, TreeAllocator};
|
||||||
|
|
||||||
|
extern crate std;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn deallocation() {
|
||||||
|
let ranges = LinkedList::from_iter([
|
||||||
|
AllocatorNode::free(0, 12),
|
||||||
|
AllocatorNode::used(12, 24),
|
||||||
|
AllocatorNode::free(24, 32),
|
||||||
|
AllocatorNode::used(32, 64),
|
||||||
|
AllocatorNode::free(64, 128),
|
||||||
|
]);
|
||||||
|
let mut alloc = TreeAllocator { ranges };
|
||||||
|
|
||||||
|
// No-split dealloc
|
||||||
|
assert_eq!(alloc.free(12, 12), Ok(()));
|
||||||
|
let expected = LinkedList::from_iter([
|
||||||
|
AllocatorNode::free(0, 32),
|
||||||
|
AllocatorNode::used(32, 64),
|
||||||
|
AllocatorNode::free(64, 128),
|
||||||
|
]);
|
||||||
|
itertools::assert_equal(alloc.ranges.iter(), expected.iter());
|
||||||
|
|
||||||
|
// Split at the start dealloc
|
||||||
|
assert_eq!(alloc.free(32, 8), Ok(()));
|
||||||
|
let expected = LinkedList::from_iter([
|
||||||
|
AllocatorNode::free(0, 40),
|
||||||
|
AllocatorNode::used(40, 64),
|
||||||
|
AllocatorNode::free(64, 128),
|
||||||
|
]);
|
||||||
|
itertools::assert_equal(alloc.ranges.iter(), expected.iter());
|
||||||
|
|
||||||
|
// Split at the end dealloc
|
||||||
|
assert_eq!(alloc.free(56, 8), Ok(()));
|
||||||
|
let expected = LinkedList::from_iter([
|
||||||
|
AllocatorNode::free(0, 40),
|
||||||
|
AllocatorNode::used(40, 56),
|
||||||
|
AllocatorNode::free(56, 128),
|
||||||
|
]);
|
||||||
|
|
||||||
|
itertools::assert_equal(alloc.ranges.iter(), expected.iter());
|
||||||
|
|
||||||
|
// Split in the middle
|
||||||
|
assert_eq!(alloc.free(42, 4), Ok(()));
|
||||||
|
let expected = LinkedList::from_iter([
|
||||||
|
AllocatorNode::free(0, 40),
|
||||||
|
AllocatorNode::used(40, 42),
|
||||||
|
AllocatorNode::free(42, 46),
|
||||||
|
AllocatorNode::used(46, 56),
|
||||||
|
AllocatorNode::free(56, 128),
|
||||||
|
]);
|
||||||
|
|
||||||
|
itertools::assert_equal(alloc.ranges.iter(), expected.iter());
|
||||||
|
|
||||||
|
// Whole region free
|
||||||
|
assert_eq!(alloc.free(40, 2), Ok(()));
|
||||||
|
let expected = LinkedList::from_iter([
|
||||||
|
AllocatorNode::free(0, 46),
|
||||||
|
AllocatorNode::used(46, 56),
|
||||||
|
AllocatorNode::free(56, 128),
|
||||||
|
]);
|
||||||
|
|
||||||
|
itertools::assert_equal(alloc.ranges.iter(), expected.iter());
|
||||||
|
|
||||||
|
assert_eq!(alloc.free(46, 10), Ok(()));
|
||||||
|
let expected = LinkedList::from_iter([AllocatorNode::free(0, 128)]);
|
||||||
|
|
||||||
|
itertools::assert_equal(alloc.ranges.iter(), expected.iter());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn allocation() {
|
||||||
|
let ranges = LinkedList::from_iter([
|
||||||
|
AllocatorNode::free(0, 12),
|
||||||
|
AllocatorNode::used(12, 24),
|
||||||
|
AllocatorNode::free(24, 32),
|
||||||
|
AllocatorNode::used(32, 64),
|
||||||
|
]);
|
||||||
|
let mut alloc = TreeAllocator { ranges };
|
||||||
|
|
||||||
|
// Non-splitting allocation
|
||||||
|
assert_eq!(alloc.allocate(12), Some(0));
|
||||||
|
|
||||||
|
// Splitting allocation
|
||||||
|
assert_eq!(alloc.allocate(4), Some(24));
|
||||||
|
|
||||||
|
// Non-splitting allocation
|
||||||
|
assert_eq!(alloc.allocate(4), Some(28));
|
||||||
|
|
||||||
|
// Out of memory
|
||||||
|
assert_eq!(alloc.allocate(1), None);
|
||||||
|
|
||||||
|
let expected = LinkedList::from_iter([AllocatorNode::used(0, 64)]);
|
||||||
|
|
||||||
|
itertools::assert_equal(alloc.ranges, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn insertion() {
|
||||||
|
let ranges = LinkedList::from_iter([
|
||||||
|
AllocatorNode::free(0, 12),
|
||||||
|
AllocatorNode::used(12, 24),
|
||||||
|
AllocatorNode::free(24, 32),
|
||||||
|
AllocatorNode::used(32, 64),
|
||||||
|
AllocatorNode::free(64, 128),
|
||||||
|
]);
|
||||||
|
let mut alloc = TreeAllocator { ranges };
|
||||||
|
|
||||||
|
// No split
|
||||||
|
assert_eq!(alloc.insert(0, 12), Ok(()));
|
||||||
|
let expected = LinkedList::from_iter([
|
||||||
|
AllocatorNode::used(0, 24),
|
||||||
|
AllocatorNode::free(24, 32),
|
||||||
|
AllocatorNode::used(32, 64),
|
||||||
|
AllocatorNode::free(64, 128),
|
||||||
|
]);
|
||||||
|
itertools::assert_equal(alloc.ranges.iter(), expected.iter());
|
||||||
|
|
||||||
|
// Split at the start
|
||||||
|
assert_eq!(alloc.insert(24, 4), Ok(()));
|
||||||
|
let expected = LinkedList::from_iter([
|
||||||
|
AllocatorNode::used(0, 28),
|
||||||
|
AllocatorNode::free(28, 32),
|
||||||
|
AllocatorNode::used(32, 64),
|
||||||
|
AllocatorNode::free(64, 128),
|
||||||
|
]);
|
||||||
|
itertools::assert_equal(alloc.ranges.iter(), expected.iter());
|
||||||
|
|
||||||
|
// Split at the end
|
||||||
|
assert_eq!(alloc.insert(30, 2), Ok(()));
|
||||||
|
let expected = LinkedList::from_iter([
|
||||||
|
AllocatorNode::used(0, 28),
|
||||||
|
AllocatorNode::free(28, 30),
|
||||||
|
AllocatorNode::used(30, 64),
|
||||||
|
AllocatorNode::free(64, 128),
|
||||||
|
]);
|
||||||
|
itertools::assert_equal(alloc.ranges.iter(), expected.iter());
|
||||||
|
|
||||||
|
// Split in the middle
|
||||||
|
assert_eq!(alloc.insert(72, 16), Ok(()));
|
||||||
|
let expected = LinkedList::from_iter([
|
||||||
|
AllocatorNode::used(0, 28),
|
||||||
|
AllocatorNode::free(28, 30),
|
||||||
|
AllocatorNode::used(30, 64),
|
||||||
|
AllocatorNode::free(64, 72),
|
||||||
|
AllocatorNode::used(72, 88),
|
||||||
|
AllocatorNode::free(88, 128),
|
||||||
|
]);
|
||||||
|
itertools::assert_equal(alloc.ranges.iter(), expected.iter());
|
||||||
|
}
|
||||||
|
}
|
64
lib/vmalloc/src/lib.rs
Normal file
64
lib/vmalloc/src/lib.rs
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
#![no_std]
|
||||||
|
#![feature(linked_list_cursors, let_chains, btree_extract_if)]
|
||||||
|
|
||||||
|
extern crate alloc;
|
||||||
|
|
||||||
|
use allocator::TreeAllocator;
|
||||||
|
use yggdrasil_abi::error::Error;
|
||||||
|
|
||||||
|
pub(crate) mod allocator;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||||
|
pub struct VirtualMemoryRange {
|
||||||
|
start_pfn: usize,
|
||||||
|
end_pfn: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct VirtualMemoryAllocator {
|
||||||
|
inner: TreeAllocator,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VirtualMemoryRange {
|
||||||
|
pub fn start_pfn(&self) -> usize {
|
||||||
|
self.start_pfn
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn end_pfn(&self) -> usize {
|
||||||
|
self.end_pfn
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pfn_count(&self) -> usize {
|
||||||
|
self.end_pfn - self.start_pfn
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn contains(&self, other: &Self) -> bool {
|
||||||
|
other.start_pfn >= self.start_pfn && other.end_pfn <= self.end_pfn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VirtualMemoryAllocator {
|
||||||
|
pub fn new(start_pfn: usize, end_pfn: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: TreeAllocator::new(start_pfn, end_pfn),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn allocate(&mut self, pfn_count: usize) -> Result<usize, Error> {
|
||||||
|
self.inner.allocate(pfn_count).ok_or(Error::OutOfMemory)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn free(&mut self, start_pfn: usize, pfn_count: usize) -> Result<(), Error> {
|
||||||
|
self.inner.free(start_pfn, pfn_count)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn insert(&mut self, start_pfn: usize, pfn_count: usize) -> Result<(), Error> {
|
||||||
|
self.inner.insert(start_pfn, pfn_count)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ranges(&self) -> impl Iterator<Item = (bool, VirtualMemoryRange)> + '_ {
|
||||||
|
self.inner.ranges()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {}
|
@ -27,10 +27,7 @@ use device_api::{
|
|||||||
ResetDevice,
|
ResetDevice,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::mem::{
|
use crate::mem::{device::RawDeviceMemoryMapping, phys::PhysicalMemoryRegion, PhysicalAddress};
|
||||||
device::RawDeviceMemoryMapping, phys::PhysicalMemoryRegion, table::KernelAddressSpace,
|
|
||||||
PhysicalAddress,
|
|
||||||
};
|
|
||||||
|
|
||||||
cfg_if! {
|
cfg_if! {
|
||||||
if #[cfg(target_arch = "aarch64")] {
|
if #[cfg(target_arch = "aarch64")] {
|
||||||
@ -58,6 +55,7 @@ pub enum CpuMessage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Interface for an architecture-specific facilities
|
/// Interface for an architecture-specific facilities
|
||||||
|
#[allow(unused)]
|
||||||
pub trait Architecture {
|
pub trait Architecture {
|
||||||
/// Address, to which "zero" address is mapped in the virtual address space
|
/// Address, to which "zero" address is mapped in the virtual address space
|
||||||
const KERNEL_VIRT_OFFSET: usize;
|
const KERNEL_VIRT_OFFSET: usize;
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
//! x86-64 implementation of ACPI management interfaces
|
//! x86-64 implementation of ACPI management interfaces
|
||||||
use core::{
|
use core::{
|
||||||
alloc::{AllocError, Allocator, GlobalAlloc, Layout},
|
alloc::{AllocError, Allocator, GlobalAlloc, Layout},
|
||||||
mem::{align_of, size_of},
|
|
||||||
ptr::NonNull,
|
ptr::NonNull,
|
||||||
sync::atomic::Ordering,
|
sync::atomic::Ordering,
|
||||||
time::Duration,
|
time::Duration,
|
||||||
@ -89,17 +88,6 @@ impl acpi_system::Handler for AcpiHandlerImpl {
|
|||||||
PhysicalAddress::from_raw(address),
|
PhysicalAddress::from_raw(address),
|
||||||
length.try_into().unwrap(),
|
length.try_into().unwrap(),
|
||||||
)
|
)
|
||||||
|
|
||||||
// PhysicalPointer::into_raw(slice)
|
|
||||||
|
|
||||||
// if address + length < 0x100000000 {
|
|
||||||
// core::slice::from_raw_parts(
|
|
||||||
// (address as usize).virtualize() as *const u8,
|
|
||||||
// length as usize,
|
|
||||||
// )
|
|
||||||
// } else {
|
|
||||||
// panic!("Unhandled address: {:#x}", address)
|
|
||||||
// }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn io_read_u8(port: u16) -> u8 {
|
fn io_read_u8(port: u16) -> u8 {
|
||||||
|
@ -16,11 +16,7 @@ use tock_registers::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::x86_64::{acpi::AcpiAllocator, apic::local::BSP_APIC_ID, IrqNumber},
|
arch::x86_64::{acpi::AcpiAllocator, apic::local::BSP_APIC_ID, IrqNumber},
|
||||||
mem::{
|
mem::{address::FromRaw, device::DeviceMemoryIo, PhysicalAddress},
|
||||||
address::FromRaw,
|
|
||||||
device::{DeviceMemoryIo, DeviceMemoryMapping},
|
|
||||||
PhysicalAddress,
|
|
||||||
},
|
|
||||||
sync::IrqSafeSpinlock,
|
sync::IrqSafeSpinlock,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -8,10 +8,7 @@ use yboot_proto::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::{
|
arch::x86_64::{registers::MSR_IA32_KERNEL_GS_BASE, smp::CPU_COUNT},
|
||||||
x86_64::{registers::MSR_IA32_KERNEL_GS_BASE, smp::CPU_COUNT},
|
|
||||||
ArchitectureImpl,
|
|
||||||
},
|
|
||||||
fs::devfs,
|
fs::devfs,
|
||||||
kernel_main, kernel_secondary_main,
|
kernel_main, kernel_secondary_main,
|
||||||
mem::KERNEL_VIRT_OFFSET,
|
mem::KERNEL_VIRT_OFFSET,
|
||||||
|
@ -90,8 +90,7 @@ impl TaskContextImpl for TaskContext {
|
|||||||
fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> {
|
fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> {
|
||||||
const KERNEL_TASK_PAGES: usize = 32;
|
const KERNEL_TASK_PAGES: usize = 32;
|
||||||
|
|
||||||
let stack_base =
|
let stack_base = phys::alloc_pages_contiguous(KERNEL_TASK_PAGES)?.virtualize_raw();
|
||||||
unsafe { phys::alloc_pages_contiguous(KERNEL_TASK_PAGES)?.virtualize_raw() };
|
|
||||||
|
|
||||||
let mut stack = StackBuilder::new(stack_base, KERNEL_TASK_PAGES * 0x1000);
|
let mut stack = StackBuilder::new(stack_base, KERNEL_TASK_PAGES * 0x1000);
|
||||||
|
|
||||||
@ -123,7 +122,7 @@ impl TaskContextImpl for TaskContext {
|
|||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
const USER_TASK_PAGES: usize = 8;
|
const USER_TASK_PAGES: usize = 8;
|
||||||
|
|
||||||
let stack_base = unsafe { phys::alloc_pages_contiguous(USER_TASK_PAGES)?.virtualize_raw() };
|
let stack_base = phys::alloc_pages_contiguous(USER_TASK_PAGES)?.virtualize_raw();
|
||||||
|
|
||||||
let mut stack = StackBuilder::new(stack_base, USER_TASK_PAGES * 0x1000);
|
let mut stack = StackBuilder::new(stack_base, USER_TASK_PAGES * 0x1000);
|
||||||
|
|
||||||
|
@ -7,12 +7,7 @@ use tock_registers::interfaces::Writeable;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::{
|
arch::{
|
||||||
x86_64::{
|
x86_64::{cpuid, gdt, registers::MSR_IA32_KERNEL_GS_BASE, syscall},
|
||||||
cpuid::{self, PROCESSOR_FEATURES},
|
|
||||||
gdt,
|
|
||||||
registers::MSR_IA32_KERNEL_GS_BASE,
|
|
||||||
syscall,
|
|
||||||
},
|
|
||||||
CpuMessage,
|
CpuMessage,
|
||||||
},
|
},
|
||||||
sync::IrqSafeSpinlock,
|
sync::IrqSafeSpinlock,
|
||||||
|
@ -8,6 +8,7 @@ use kernel_util::util::OneTimeInit;
|
|||||||
use memtables::FixedTables;
|
use memtables::FixedTables;
|
||||||
use static_assertions::{const_assert_eq, const_assert_ne};
|
use static_assertions::{const_assert_eq, const_assert_ne};
|
||||||
|
|
||||||
|
pub mod process;
|
||||||
pub mod table;
|
pub mod table;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -69,9 +70,6 @@ pub(super) const RAM_MAPPING_OFFSET: usize = CANONICAL_ADDRESS_MASK | (RAM_MAPPI
|
|||||||
pub(super) static MEMORY_LIMIT: OneTimeInit<usize> = OneTimeInit::new();
|
pub(super) static MEMORY_LIMIT: OneTimeInit<usize> = OneTimeInit::new();
|
||||||
pub(super) static mut RAM_MAPPING_L1: PageTable<L1> = PageTable::zeroed();
|
pub(super) static mut RAM_MAPPING_L1: PageTable<L1> = PageTable::zeroed();
|
||||||
|
|
||||||
// Global limits
|
|
||||||
pub(super) const HEAP_SIZE_LIMIT: usize = L1::SIZE;
|
|
||||||
|
|
||||||
// Early mappings
|
// Early mappings
|
||||||
unsafe fn map_early_pages(physical: PhysicalAddress, count: usize) -> Result<usize, Error> {
|
unsafe fn map_early_pages(physical: PhysicalAddress, count: usize) -> Result<usize, Error> {
|
||||||
for l3i in 0..512 {
|
for l3i in 0..512 {
|
||||||
@ -189,8 +187,6 @@ pub(super) unsafe fn map_device_memory(
|
|||||||
page_size: L2::SIZE,
|
page_size: L2::SIZE,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
let page_size = L3::SIZE;
|
|
||||||
|
|
||||||
// Just map the pages directly
|
// Just map the pages directly
|
||||||
let base_address = map_device_memory_l3(l3_aligned, page_count)?;
|
let base_address = map_device_memory_l3(l3_aligned, page_count)?;
|
||||||
let address = base_address + l3_offset;
|
let address = base_address + l3_offset;
|
||||||
|
136
src/arch/x86_64/mem/process.rs
Normal file
136
src/arch/x86_64/mem/process.rs
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
use yggdrasil_abi::error::Error;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
arch::x86_64::intrinsics,
|
||||||
|
mem::{
|
||||||
|
address::AsPhysicalAddress,
|
||||||
|
phys,
|
||||||
|
pointer::PhysicalRefMut,
|
||||||
|
process::ProcessAddressSpaceManager,
|
||||||
|
table::{EntryLevel, MapAttributes, NextPageTable},
|
||||||
|
PhysicalAddress,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::{
|
||||||
|
clone_kernel_tables,
|
||||||
|
table::{PageEntry, PageTable, L0, L1, L2, L3},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Represents a process or kernel address space. Because x86-64 does not have cool stuff like
|
||||||
|
/// TTBR0 and TTBR1, all address spaces are initially cloned from the kernel space.
|
||||||
|
#[repr(C)]
|
||||||
|
pub struct ProcessAddressSpaceImpl {
|
||||||
|
l0: PhysicalRefMut<'static, PageTable<L0>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProcessAddressSpaceManager for ProcessAddressSpaceImpl {
|
||||||
|
const PAGE_SIZE: usize = L3::SIZE;
|
||||||
|
const LOWER_LIMIT_PFN: usize = 8;
|
||||||
|
// 16GiB VM limit
|
||||||
|
const UPPER_LIMIT_PFN: usize = (16 << 30) / Self::PAGE_SIZE;
|
||||||
|
|
||||||
|
fn new() -> Result<Self, Error> {
|
||||||
|
let mut l0 = unsafe { PhysicalRefMut::<'static, PageTable<L0>>::map(phys::alloc_page()?) };
|
||||||
|
|
||||||
|
for i in 0..512 {
|
||||||
|
l0[i] = PageEntry::INVALID;
|
||||||
|
}
|
||||||
|
|
||||||
|
clone_kernel_tables(&mut *l0);
|
||||||
|
|
||||||
|
Ok(Self { l0 })
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
unsafe fn map_page(
|
||||||
|
&mut self,
|
||||||
|
address: usize,
|
||||||
|
physical: PhysicalAddress,
|
||||||
|
flags: MapAttributes,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
self.write_l3_entry(address, PageEntry::page(physical, flags.into()), false)
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error> {
|
||||||
|
self.pop_l3_entry(address)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error> {
|
||||||
|
self.read_l3_entry(address)
|
||||||
|
.ok_or(Error::InvalidMemoryOperation)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProcessAddressSpaceImpl {
|
||||||
|
// Write a single 4KiB entry
|
||||||
|
fn write_l3_entry(
|
||||||
|
&mut self,
|
||||||
|
virt: usize,
|
||||||
|
entry: PageEntry<L3>,
|
||||||
|
overwrite: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let l0i = L0::index(virt);
|
||||||
|
let l1i = L1::index(virt);
|
||||||
|
let l2i = L2::index(virt);
|
||||||
|
let l3i = L3::index(virt);
|
||||||
|
|
||||||
|
let mut l1 = self.l0.get_mut_or_alloc(l0i)?;
|
||||||
|
let mut l2 = l1.get_mut_or_alloc(l1i)?;
|
||||||
|
let mut l3 = l2.get_mut_or_alloc(l2i)?;
|
||||||
|
|
||||||
|
if l3[l3i].is_present() && !overwrite {
|
||||||
|
todo!();
|
||||||
|
}
|
||||||
|
|
||||||
|
l3[l3i] = entry;
|
||||||
|
unsafe {
|
||||||
|
intrinsics::flush_tlb_entry(virt);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pop_l3_entry(&mut self, virt: usize) -> Result<PhysicalAddress, Error> {
|
||||||
|
let l0i = L0::index(virt);
|
||||||
|
let l1i = L1::index(virt);
|
||||||
|
let l2i = L2::index(virt);
|
||||||
|
let l3i = L3::index(virt);
|
||||||
|
|
||||||
|
// TODO somehow drop tables if they're known to be empty?
|
||||||
|
let mut l1 = self.l0.get_mut(l0i).ok_or(Error::DoesNotExist)?;
|
||||||
|
let mut l2 = l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
|
||||||
|
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
|
||||||
|
|
||||||
|
let page = l3[l3i].as_page().ok_or(Error::DoesNotExist)?;
|
||||||
|
|
||||||
|
l3[l3i] = PageEntry::INVALID;
|
||||||
|
unsafe {
|
||||||
|
intrinsics::flush_tlb_entry(virt);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(page)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_l3_entry(&self, virt: usize) -> Option<(PhysicalAddress, MapAttributes)> {
|
||||||
|
let l0i = L0::index(virt);
|
||||||
|
let l1i = L1::index(virt);
|
||||||
|
let l2i = L2::index(virt);
|
||||||
|
let l3i = L3::index(virt);
|
||||||
|
|
||||||
|
let l1 = self.l0.get(l0i)?;
|
||||||
|
let l2 = l1.get(l1i)?;
|
||||||
|
let l3 = l2.get(l2i)?;
|
||||||
|
|
||||||
|
let page = l3[l3i].as_page()?;
|
||||||
|
|
||||||
|
Some((page, l3[l3i].attributes().into()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsPhysicalAddress for ProcessAddressSpaceImpl {
|
||||||
|
unsafe fn as_physical_address(&self) -> PhysicalAddress {
|
||||||
|
self.l0.as_physical_address()
|
||||||
|
}
|
||||||
|
}
|
@ -6,22 +6,14 @@ use core::{
|
|||||||
use abi::error::Error;
|
use abi::error::Error;
|
||||||
use bitflags::bitflags;
|
use bitflags::bitflags;
|
||||||
|
|
||||||
use crate::{
|
use crate::mem::{
|
||||||
arch::x86_64::intrinsics,
|
address::{AsPhysicalAddress, FromRaw},
|
||||||
mem::{
|
phys,
|
||||||
address::{AsPhysicalAddress, FromRaw},
|
pointer::{PhysicalRef, PhysicalRefMut},
|
||||||
phys,
|
table::{EntryLevel, MapAttributes, NextPageTable, NonTerminalEntryLevel},
|
||||||
pointer::{PhysicalRef, PhysicalRefMut},
|
PhysicalAddress,
|
||||||
table::{
|
|
||||||
EntryLevel, MapAttributes, NextPageTable, NonTerminalEntryLevel, VirtualMemoryManager,
|
|
||||||
},
|
|
||||||
PhysicalAddress,
|
|
||||||
},
|
|
||||||
sync::IrqSafeSpinlock,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{clone_kernel_tables, KERNEL_TABLES};
|
|
||||||
|
|
||||||
bitflags! {
|
bitflags! {
|
||||||
/// Describes how each page table entry is mapped
|
/// Describes how each page table entry is mapped
|
||||||
pub struct PageAttributes: u64 {
|
pub struct PageAttributes: u64 {
|
||||||
@ -39,15 +31,8 @@ bitflags! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Represents a process or kernel address space. Because x86-64 does not have cool stuff like
|
|
||||||
/// TTBR0 and TTBR1, all address spaces are initially cloned from the kernel space.
|
|
||||||
#[repr(C)]
|
|
||||||
pub struct AddressSpace {
|
|
||||||
inner: IrqSafeSpinlock<PhysicalRefMut<'static, PageTable<L0>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a single virtual address space mapping depending on its translation level
|
/// Represents a single virtual address space mapping depending on its translation level
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
#[repr(transparent)]
|
#[repr(transparent)]
|
||||||
pub struct PageEntry<L: EntryLevel>(u64, PhantomData<L>);
|
pub struct PageEntry<L: EntryLevel>(u64, PhantomData<L>);
|
||||||
|
|
||||||
@ -59,16 +44,16 @@ pub struct PageTable<L: EntryLevel> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Translation level 0 (PML4): Entry is 512GiB table
|
/// Translation level 0 (PML4): Entry is 512GiB table
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
pub struct L0;
|
pub struct L0;
|
||||||
/// Translation level 1 (PDPT): Entry is 1GiB table
|
/// Translation level 1 (PDPT): Entry is 1GiB table
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
pub struct L1;
|
pub struct L1;
|
||||||
/// Translation level 2 (Page directory): Entry is 2MiB block/table
|
/// Translation level 2 (Page directory): Entry is 2MiB block/table
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
pub struct L2;
|
pub struct L2;
|
||||||
/// Translation level 3 (Page table): Entry is 4KiB page
|
/// Translation level 3 (Page table): Entry is 4KiB page
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
pub struct L3;
|
pub struct L3;
|
||||||
|
|
||||||
impl NonTerminalEntryLevel for L0 {
|
impl NonTerminalEntryLevel for L0 {
|
||||||
@ -193,6 +178,10 @@ impl<L: NonTerminalEntryLevel> PageEntry<L> {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_block(self) -> bool {
|
||||||
|
self.0 & PageAttributes::BLOCK.bits() != 0
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<L: EntryLevel> PageEntry<L> {
|
impl<L: EntryLevel> PageEntry<L> {
|
||||||
@ -203,6 +192,10 @@ impl<L: EntryLevel> PageEntry<L> {
|
|||||||
Self(raw, PhantomData)
|
Self(raw, PhantomData)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn attributes(&self) -> PageAttributes {
|
||||||
|
PageAttributes::from_bits_retain(self.0)
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns `true` if the entry contains a valid mapping to either a table or to a page/block
|
/// Returns `true` if the entry contains a valid mapping to either a table or to a page/block
|
||||||
pub fn is_present(&self) -> bool {
|
pub fn is_present(&self) -> bool {
|
||||||
self.0 & PageAttributes::PRESENT.bits() != 0
|
self.0 & PageAttributes::PRESENT.bits() != 0
|
||||||
@ -292,125 +285,17 @@ impl From<MapAttributes> for PageAttributes {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VirtualMemoryManager for AddressSpace {
|
impl From<PageAttributes> for MapAttributes {
|
||||||
fn allocate(
|
fn from(value: PageAttributes) -> Self {
|
||||||
&self,
|
let mut res = MapAttributes::empty();
|
||||||
hint: Option<usize>,
|
if value.contains(PageAttributes::USER) {
|
||||||
len: usize,
|
res |= MapAttributes::USER_READ;
|
||||||
attrs: MapAttributes,
|
if value.contains(PageAttributes::WRITABLE) {
|
||||||
) -> Result<usize, Error> {
|
res |= MapAttributes::USER_WRITE;
|
||||||
if hint.is_some() {
|
|
||||||
todo!();
|
|
||||||
}
|
|
||||||
|
|
||||||
const TRY_ALLOC_START: usize = 0x100000000;
|
|
||||||
const TRY_ALLOC_END: usize = 0xF00000000;
|
|
||||||
|
|
||||||
'l0: for base in (TRY_ALLOC_START..TRY_ALLOC_END - len * 0x1000).step_by(0x1000) {
|
|
||||||
for i in 0..len {
|
|
||||||
if self.translate(base + i * 0x1000).is_some() {
|
|
||||||
continue 'l0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i in 0..len {
|
|
||||||
let page = phys::alloc_page()?;
|
|
||||||
self.map_page(base + i * 0x1000, page, attrs)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(base);
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(Error::OutOfMemory)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn map_page(
|
|
||||||
&self,
|
|
||||||
virt: usize,
|
|
||||||
phys: PhysicalAddress,
|
|
||||||
attrs: MapAttributes,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
self.write_entry(virt, PageEntry::page(phys, attrs.into()), true)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn deallocate(&self, addr: usize, len: usize) -> Result<(), Error> {
|
|
||||||
for page in (addr..addr + len).step_by(0x1000) {
|
|
||||||
let Some(phys) = self.translate(page) else {
|
|
||||||
todo!();
|
|
||||||
};
|
|
||||||
|
|
||||||
self.write_entry(page, PageEntry::INVALID, true)?;
|
|
||||||
unsafe {
|
|
||||||
phys::free_page(phys);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// TODO ???
|
||||||
Ok(())
|
res |= MapAttributes::NON_GLOBAL;
|
||||||
}
|
res
|
||||||
}
|
|
||||||
|
|
||||||
impl AddressSpace {
|
|
||||||
/// Allocates an empty address space with all entries marked as non-present
|
|
||||||
pub fn new_empty() -> Result<Self, Error> {
|
|
||||||
let mut l0 = unsafe { PhysicalRefMut::<'static, PageTable<L0>>::map(phys::alloc_page()?) };
|
|
||||||
|
|
||||||
for i in 0..512 {
|
|
||||||
unsafe {
|
|
||||||
l0[i] = PageEntry::INVALID;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
clone_kernel_tables(&mut *l0);
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
inner: IrqSafeSpinlock::new(l0),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO return page size and attributes
|
|
||||||
/// Returns the physical address to which the `virt` address is mapped
|
|
||||||
pub fn translate(&self, virt: usize) -> Option<PhysicalAddress> {
|
|
||||||
let l0 = self.inner.lock();
|
|
||||||
|
|
||||||
let l0i = L0::index(virt);
|
|
||||||
let l1i = L1::index(virt);
|
|
||||||
let l2i = L2::index(virt);
|
|
||||||
let l3i = L3::index(virt);
|
|
||||||
|
|
||||||
let l1 = l0.get(l0i)?;
|
|
||||||
let l2 = l1.get(l1i)?;
|
|
||||||
let l3 = l2.get(l2i)?;
|
|
||||||
|
|
||||||
l3[l3i].as_page()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write a single 4KiB entry
|
|
||||||
fn write_entry(&self, virt: usize, entry: PageEntry<L3>, overwrite: bool) -> Result<(), Error> {
|
|
||||||
let mut l0 = self.inner.lock();
|
|
||||||
|
|
||||||
let l0i = L0::index(virt);
|
|
||||||
let l1i = L1::index(virt);
|
|
||||||
let l2i = L2::index(virt);
|
|
||||||
let l3i = L3::index(virt);
|
|
||||||
|
|
||||||
let mut l1 = l0.get_mut_or_alloc(l0i)?;
|
|
||||||
let mut l2 = l1.get_mut_or_alloc(l1i)?;
|
|
||||||
let mut l3 = l2.get_mut_or_alloc(l2i)?;
|
|
||||||
|
|
||||||
if l3[l3i].is_present() && !overwrite {
|
|
||||||
todo!();
|
|
||||||
}
|
|
||||||
|
|
||||||
l3[l3i] = entry;
|
|
||||||
unsafe {
|
|
||||||
intrinsics::flush_tlb_entry(virt);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the physical address of the root table
|
|
||||||
pub fn physical_address(&self) -> PhysicalAddress {
|
|
||||||
unsafe { self.inner.lock().as_physical_address() }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,6 @@ use device_api::{
|
|||||||
};
|
};
|
||||||
use git_version::git_version;
|
use git_version::git_version;
|
||||||
use kernel_util::util::OneTimeInit;
|
use kernel_util::util::OneTimeInit;
|
||||||
use memtables::FixedTables;
|
|
||||||
use yboot_proto::{v1::AvailableMemoryRegion, LoadProtocolV1};
|
use yboot_proto::{v1::AvailableMemoryRegion, LoadProtocolV1};
|
||||||
|
|
||||||
mod acpi;
|
mod acpi;
|
||||||
@ -64,7 +63,7 @@ use self::{
|
|||||||
mem::{
|
mem::{
|
||||||
init_fixed_tables,
|
init_fixed_tables,
|
||||||
table::{PageAttributes, PageEntry, L1, L3},
|
table::{PageAttributes, PageEntry, L1, L3},
|
||||||
EarlyMapping, KERNEL_TABLES, MEMORY_LIMIT, RAM_MAPPING_L1, RAM_MAPPING_OFFSET,
|
EarlyMapping, MEMORY_LIMIT, RAM_MAPPING_L1, RAM_MAPPING_OFFSET,
|
||||||
},
|
},
|
||||||
peripherals::{i8253::I8253, ps2::PS2Controller, serial::ComPort},
|
peripherals::{i8253::I8253, ps2::PS2Controller, serial::ComPort},
|
||||||
smp::CPU_COUNT,
|
smp::CPU_COUNT,
|
||||||
@ -166,7 +165,7 @@ impl Architecture for X86_64 {
|
|||||||
|
|
||||||
fn map_physical_memory<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
|
fn map_physical_memory<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
|
||||||
&self,
|
&self,
|
||||||
it: I,
|
_it: I,
|
||||||
_memory_start: PhysicalAddress,
|
_memory_start: PhysicalAddress,
|
||||||
memory_end: PhysicalAddress,
|
memory_end: PhysicalAddress,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -237,8 +236,9 @@ impl Architecture for X86_64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl X86_64 {
|
impl X86_64 {
|
||||||
unsafe fn handle_ipi(&self, msg: CpuMessage) {
|
unsafe fn handle_ipi(&self, _msg: CpuMessage) {
|
||||||
todo!()
|
warnln!("Received an IPI");
|
||||||
|
loop {}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_boot_data(&self, data: BootData) {
|
fn set_boot_data(&self, data: BootData) {
|
||||||
@ -404,7 +404,7 @@ impl X86_64 {
|
|||||||
|
|
||||||
self.ioapic.init(IoApic::from_acpi(&apic_info)?);
|
self.ioapic.init(IoApic::from_acpi(&apic_info)?);
|
||||||
|
|
||||||
// acpi::init_acpi(acpi).unwrap();
|
acpi::init_acpi(acpi).unwrap();
|
||||||
|
|
||||||
if let Ok(mcfg) = acpi.find_table::<Mcfg>() {
|
if let Ok(mcfg) = acpi.find_table::<Mcfg>() {
|
||||||
for entry in mcfg.entries() {
|
for entry in mcfg.entries() {
|
||||||
|
@ -9,7 +9,7 @@ use crate::{
|
|||||||
boot::__x86_64_ap_entry,
|
boot::__x86_64_ap_entry,
|
||||||
intrinsics::flush_tlb_entry,
|
intrinsics::flush_tlb_entry,
|
||||||
mem::{
|
mem::{
|
||||||
table::{PageAttributes, L1, L2},
|
table::{PageAttributes, PageEntry, PageTable, L1, L2},
|
||||||
KERNEL_TABLES,
|
KERNEL_TABLES,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -19,7 +19,6 @@ use crate::{
|
|||||||
address::{AsPhysicalAddress, FromRaw, IntoRaw},
|
address::{AsPhysicalAddress, FromRaw, IntoRaw},
|
||||||
phys,
|
phys,
|
||||||
pointer::PhysicalRefMut,
|
pointer::PhysicalRefMut,
|
||||||
table::{PageEntry, PageTable},
|
|
||||||
PhysicalAddress,
|
PhysicalAddress,
|
||||||
},
|
},
|
||||||
task::Cpu,
|
task::Cpu,
|
||||||
@ -35,7 +34,6 @@ static AP_BOOTSTRAP_BIN: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/__x86
|
|||||||
const AP_STACK_PAGES: usize = 8;
|
const AP_STACK_PAGES: usize = 8;
|
||||||
const AP_BOOTSTRAP_DATA: PhysicalAddress = PhysicalAddress::from_raw(0x6000usize);
|
const AP_BOOTSTRAP_DATA: PhysicalAddress = PhysicalAddress::from_raw(0x6000usize);
|
||||||
const AP_BOOTSTRAP_CODE: PhysicalAddress = PhysicalAddress::from_raw(0x7000usize);
|
const AP_BOOTSTRAP_CODE: PhysicalAddress = PhysicalAddress::from_raw(0x7000usize);
|
||||||
const AP_ADDRESS_LIMIT: PhysicalAddress = PhysicalAddress::from_raw(0x100000usize);
|
|
||||||
|
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
use core::time::Duration;
|
use core::time::Duration;
|
||||||
|
|
||||||
use abi::{error::Error, primitive_enum};
|
use abi::{error::Error, primitive_enum};
|
||||||
use alloc::{boxed::Box, vec, vec::Vec};
|
use alloc::{vec, vec::Vec};
|
||||||
use bitflags::bitflags;
|
use bitflags::bitflags;
|
||||||
use kernel_util::util::StaticVector;
|
use kernel_util::util::StaticVector;
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@ use memfs::block::{self, BlockAllocator};
|
|||||||
use vfs::VnodeRef;
|
use vfs::VnodeRef;
|
||||||
use yggdrasil_abi::{error::Error, io::MountOptions};
|
use yggdrasil_abi::{error::Error, io::MountOptions};
|
||||||
|
|
||||||
use crate::mem::{self, phys, PhysicalAddress};
|
use crate::mem::{phys, PhysicalAddress};
|
||||||
|
|
||||||
pub mod devfs;
|
pub mod devfs;
|
||||||
|
|
||||||
|
@ -1,14 +1,11 @@
|
|||||||
use core::{
|
use core::{
|
||||||
fmt,
|
fmt,
|
||||||
iter::Step,
|
iter::Step,
|
||||||
marker::PhantomData,
|
|
||||||
mem::align_of,
|
mem::align_of,
|
||||||
ops::{Add, Deref, DerefMut, Sub},
|
ops::{Add, Deref, DerefMut, Sub},
|
||||||
};
|
};
|
||||||
|
|
||||||
use bytemuck::{Pod, Zeroable};
|
use crate::arch::{Architecture, ArchitectureImpl};
|
||||||
|
|
||||||
use crate::arch::{Architecture, ArchitectureImpl, ARCHITECTURE};
|
|
||||||
|
|
||||||
use super::{pointer::PhysicalPointer, table::EntryLevel, KERNEL_VIRT_OFFSET};
|
use super::{pointer::PhysicalPointer, table::EntryLevel, KERNEL_VIRT_OFFSET};
|
||||||
|
|
||||||
@ -185,16 +182,16 @@ impl From<PhysicalAddress> for usize {
|
|||||||
// Ranges
|
// Ranges
|
||||||
|
|
||||||
impl Step for PhysicalAddress {
|
impl Step for PhysicalAddress {
|
||||||
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
|
fn steps_between(_start: &Self, _end: &Self) -> Option<usize> {
|
||||||
loop {}
|
todo!()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn forward_checked(start: Self, count: usize) -> Option<Self> {
|
fn forward_checked(start: Self, count: usize) -> Option<Self> {
|
||||||
start.0.checked_add(count as u64).map(Self)
|
start.0.checked_add(count as u64).map(Self)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn backward_checked(start: Self, count: usize) -> Option<Self> {
|
fn backward_checked(_start: Self, _count: usize) -> Option<Self> {
|
||||||
loop {}
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ unsafe impl GlobalAlloc for KernelAllocator {
|
|||||||
match self.inner.lock().allocate_first_fit(layout) {
|
match self.inner.lock().allocate_first_fit(layout) {
|
||||||
Ok(v) => v.as_ptr(),
|
Ok(v) => v.as_ptr(),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
// errorln!("Failed to allocate {:?}: {:?}", layout, e);
|
errorln!("Failed to allocate {:?}: {:?}", layout, e);
|
||||||
null_mut()
|
null_mut()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,25 +1,9 @@
|
|||||||
// //! Memory management utilities and types
|
//! Memory management utilities and types
|
||||||
// // use core::{alloc::Layout, mem::size_of};
|
|
||||||
//
|
|
||||||
// use core::{alloc::Layout, ffi::c_void, mem::size_of};
|
|
||||||
//
|
|
||||||
// use abi::error::Error;
|
|
||||||
//
|
|
||||||
// // use abi::error::Error;
|
|
||||||
// //
|
|
||||||
// use crate::arch::{Architecture, ArchitectureImpl /*, PlatformImpl*/};
|
|
||||||
//
|
|
||||||
// use self::table::AddressSpace;
|
|
||||||
// //
|
|
||||||
// // use self::table::AddressSpace;
|
|
||||||
//
|
|
||||||
// pub mod device;
|
|
||||||
|
|
||||||
use core::{
|
use core::{
|
||||||
alloc::Layout,
|
alloc::Layout,
|
||||||
ffi::c_void,
|
ffi::c_void,
|
||||||
mem::{align_of, size_of},
|
mem::{align_of, size_of},
|
||||||
ops::Add,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use abi::error::Error;
|
use abi::error::Error;
|
||||||
@ -31,11 +15,12 @@ pub mod device;
|
|||||||
pub mod heap;
|
pub mod heap;
|
||||||
pub mod phys;
|
pub mod phys;
|
||||||
pub mod pointer;
|
pub mod pointer;
|
||||||
|
pub mod process;
|
||||||
pub mod table;
|
pub mod table;
|
||||||
|
|
||||||
pub use address::PhysicalAddress;
|
pub use address::PhysicalAddress;
|
||||||
|
|
||||||
use self::{device::DeviceMemoryMapping, table::AddressSpace};
|
use self::{device::DeviceMemoryMapping, process::ProcessAddressSpace};
|
||||||
|
|
||||||
pub const KERNEL_VIRT_OFFSET: usize = ArchitectureImpl::KERNEL_VIRT_OFFSET;
|
pub const KERNEL_VIRT_OFFSET: usize = ArchitectureImpl::KERNEL_VIRT_OFFSET;
|
||||||
|
|
||||||
@ -60,42 +45,7 @@ pub unsafe fn write_memory<T>(address: PhysicalAddress, value: T) {
|
|||||||
(address as *mut T).write_unaligned(value)
|
(address as *mut T).write_unaligned(value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// pub mod phys;
|
|
||||||
//
|
|
||||||
// /// Kernel's physical load address
|
|
||||||
// // pub const KERNEL_PHYS_BASE: usize = PlatformImpl::KERNEL_PHYS_BASE;
|
|
||||||
// /// Kernel's virtual memory mapping offset (i.e. kernel's virtual address is [KERNEL_PHYS_BASE] +
|
|
||||||
// /// [KERNEL_VIRT_OFFSET])
|
|
||||||
// pub const KERNEL_VIRT_OFFSET: usize = ArchitectureImpl::KERNEL_VIRT_OFFSET;
|
|
||||||
//
|
|
||||||
// /// Interface for converting between address spaces.
|
|
||||||
// ///
|
|
||||||
// /// # Safety
|
|
||||||
// ///
|
|
||||||
// /// An incorrect implementation can produce invalid address.
|
|
||||||
// pub unsafe trait ConvertAddress {
|
|
||||||
// /// Convert the address into a virtual one
|
|
||||||
// ///
|
|
||||||
// /// # Panics
|
|
||||||
// ///
|
|
||||||
// /// Panics if the address is already a virtual one
|
|
||||||
// ///
|
|
||||||
// /// # Safety
|
|
||||||
// ///
|
|
||||||
// /// An incorrect implementation can produce invalid address.
|
|
||||||
// unsafe fn virtualize(self) -> Self;
|
|
||||||
// /// Convert the address into a physical one
|
|
||||||
// ///
|
|
||||||
// /// # Panics
|
|
||||||
// ///
|
|
||||||
// /// Panics if the address is already a physical one
|
|
||||||
// ///
|
|
||||||
// /// # Safety
|
|
||||||
// ///
|
|
||||||
// /// An incorrect implementation can produce invalid address.
|
|
||||||
// unsafe fn physicalize(self) -> Self;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
/// Helper trait to allow cross-address space access to pointers
|
/// Helper trait to allow cross-address space access to pointers
|
||||||
pub trait ForeignPointer: Sized {
|
pub trait ForeignPointer: Sized {
|
||||||
/// Perform a volatile pointer write without dropping the old value.
|
/// Perform a volatile pointer write without dropping the old value.
|
||||||
@ -111,7 +61,7 @@ pub trait ForeignPointer: Sized {
|
|||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// As this function allows direct memory writes, it is inherently unsafe.
|
/// As this function allows direct memory writes, it is inherently unsafe.
|
||||||
unsafe fn write_foreign_volatile(self: *mut Self, space: &AddressSpace, value: Self);
|
unsafe fn write_foreign_volatile(self: *mut Self, space: &ProcessAddressSpace, value: Self);
|
||||||
|
|
||||||
/// Performs pointer validation for given address space:
|
/// Performs pointer validation for given address space:
|
||||||
///
|
///
|
||||||
@ -125,7 +75,7 @@ pub trait ForeignPointer: Sized {
|
|||||||
/// conversion, and thus is unsafe.
|
/// conversion, and thus is unsafe.
|
||||||
unsafe fn validate_user_ptr<'a>(
|
unsafe fn validate_user_ptr<'a>(
|
||||||
self: *const Self,
|
self: *const Self,
|
||||||
space: &AddressSpace,
|
space: &ProcessAddressSpace,
|
||||||
) -> Result<&'a Self, Error>;
|
) -> Result<&'a Self, Error>;
|
||||||
|
|
||||||
/// [ForeignPointer::validate_user_ptr], with extra "writability" check.
|
/// [ForeignPointer::validate_user_ptr], with extra "writability" check.
|
||||||
@ -136,7 +86,7 @@ pub trait ForeignPointer: Sized {
|
|||||||
/// conversion, and thus is unsafe.
|
/// conversion, and thus is unsafe.
|
||||||
unsafe fn validate_user_mut<'a>(
|
unsafe fn validate_user_mut<'a>(
|
||||||
self: *mut Self,
|
self: *mut Self,
|
||||||
space: &AddressSpace,
|
space: &ProcessAddressSpace,
|
||||||
) -> Result<&'a mut Self, Error>;
|
) -> Result<&'a mut Self, Error>;
|
||||||
|
|
||||||
/// [ForeignPointer::validate_user_ptr], but for slices
|
/// [ForeignPointer::validate_user_ptr], but for slices
|
||||||
@ -148,7 +98,7 @@ pub trait ForeignPointer: Sized {
|
|||||||
unsafe fn validate_user_slice<'a>(
|
unsafe fn validate_user_slice<'a>(
|
||||||
self: *const Self,
|
self: *const Self,
|
||||||
len: usize,
|
len: usize,
|
||||||
space: &AddressSpace,
|
space: &ProcessAddressSpace,
|
||||||
) -> Result<&'a [Self], Error>;
|
) -> Result<&'a [Self], Error>;
|
||||||
|
|
||||||
/// [ForeignPointer::validate_user_slice], but for mutable slices
|
/// [ForeignPointer::validate_user_slice], but for mutable slices
|
||||||
@ -160,12 +110,12 @@ pub trait ForeignPointer: Sized {
|
|||||||
unsafe fn validate_user_slice_mut<'a>(
|
unsafe fn validate_user_slice_mut<'a>(
|
||||||
self: *mut Self,
|
self: *mut Self,
|
||||||
len: usize,
|
len: usize,
|
||||||
space: &AddressSpace,
|
space: &ProcessAddressSpace,
|
||||||
) -> Result<&'a mut [Self], Error>;
|
) -> Result<&'a mut [Self], Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> ForeignPointer for T {
|
impl<T> ForeignPointer for T {
|
||||||
unsafe fn write_foreign_volatile(self: *mut Self, space: &AddressSpace, value: T) {
|
unsafe fn write_foreign_volatile(self: *mut Self, space: &ProcessAddressSpace, value: T) {
|
||||||
// TODO check align
|
// TODO check align
|
||||||
let addr = self as usize;
|
let addr = self as usize;
|
||||||
let start_page = addr & !0xFFF;
|
let start_page = addr & !0xFFF;
|
||||||
@ -187,7 +137,7 @@ impl<T> ForeignPointer for T {
|
|||||||
unsafe fn validate_user_slice_mut<'a>(
|
unsafe fn validate_user_slice_mut<'a>(
|
||||||
self: *mut Self,
|
self: *mut Self,
|
||||||
len: usize,
|
len: usize,
|
||||||
space: &AddressSpace,
|
space: &ProcessAddressSpace,
|
||||||
) -> Result<&'a mut [Self], Error> {
|
) -> Result<&'a mut [Self], Error> {
|
||||||
let base = self as usize;
|
let base = self as usize;
|
||||||
let layout = Layout::array::<T>(len).unwrap();
|
let layout = Layout::array::<T>(len).unwrap();
|
||||||
@ -201,7 +151,7 @@ impl<T> ForeignPointer for T {
|
|||||||
unsafe fn validate_user_slice<'a>(
|
unsafe fn validate_user_slice<'a>(
|
||||||
self: *const Self,
|
self: *const Self,
|
||||||
len: usize,
|
len: usize,
|
||||||
space: &AddressSpace,
|
space: &ProcessAddressSpace,
|
||||||
) -> Result<&'a [Self], Error> {
|
) -> Result<&'a [Self], Error> {
|
||||||
let base = self as usize;
|
let base = self as usize;
|
||||||
let layout = Layout::array::<T>(len).unwrap();
|
let layout = Layout::array::<T>(len).unwrap();
|
||||||
@ -214,7 +164,7 @@ impl<T> ForeignPointer for T {
|
|||||||
|
|
||||||
unsafe fn validate_user_mut<'a>(
|
unsafe fn validate_user_mut<'a>(
|
||||||
self: *mut Self,
|
self: *mut Self,
|
||||||
space: &AddressSpace,
|
space: &ProcessAddressSpace,
|
||||||
) -> Result<&'a mut Self, Error> {
|
) -> Result<&'a mut Self, Error> {
|
||||||
let addr = self as usize;
|
let addr = self as usize;
|
||||||
let layout = Layout::new::<T>();
|
let layout = Layout::new::<T>();
|
||||||
@ -231,7 +181,7 @@ impl<T> ForeignPointer for T {
|
|||||||
|
|
||||||
unsafe fn validate_user_ptr<'a>(
|
unsafe fn validate_user_ptr<'a>(
|
||||||
self: *const Self,
|
self: *const Self,
|
||||||
space: &AddressSpace,
|
space: &ProcessAddressSpace,
|
||||||
) -> Result<&'a Self, Error> {
|
) -> Result<&'a Self, Error> {
|
||||||
let addr = self as usize;
|
let addr = self as usize;
|
||||||
let layout = Layout::new::<T>();
|
let layout = Layout::new::<T>();
|
||||||
@ -262,7 +212,7 @@ fn validate_user_align_size(addr: usize, layout: &Layout) -> Result<(), Error> {
|
|||||||
|
|
||||||
/// Validates access to given userspace memory region with given constraints
|
/// Validates access to given userspace memory region with given constraints
|
||||||
pub fn validate_user_region(
|
pub fn validate_user_region(
|
||||||
space: &AddressSpace,
|
space: &ProcessAddressSpace,
|
||||||
base: usize,
|
base: usize,
|
||||||
len: usize,
|
len: usize,
|
||||||
_need_write: bool,
|
_need_write: bool,
|
||||||
@ -276,7 +226,7 @@ pub fn validate_user_region(
|
|||||||
|
|
||||||
for page in (aligned_start..aligned_end).step_by(0x1000) {
|
for page in (aligned_start..aligned_end).step_by(0x1000) {
|
||||||
// TODO check writability
|
// TODO check writability
|
||||||
space.translate(page).ok_or(Error::InvalidArgument)?;
|
space.translate(page)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -30,10 +30,8 @@ impl PhysicalMemoryManager {
|
|||||||
bitmap_phys_base: PhysicalAddress,
|
bitmap_phys_base: PhysicalAddress,
|
||||||
page_count: usize,
|
page_count: usize,
|
||||||
) -> PhysicalMemoryManager {
|
) -> PhysicalMemoryManager {
|
||||||
// let bitmap_addr = bitmap_phys_base.virtualize();
|
|
||||||
let bitmap_len = (page_count + (BITMAP_WORD_SIZE - 1)) / BITMAP_WORD_SIZE;
|
let bitmap_len = (page_count + (BITMAP_WORD_SIZE - 1)) / BITMAP_WORD_SIZE;
|
||||||
let mut bitmap = PhysicalRefMut::<'static, u64>::map_slice(bitmap_phys_base, bitmap_len);
|
let mut bitmap = PhysicalRefMut::<'static, u64>::map_slice(bitmap_phys_base, bitmap_len);
|
||||||
// let bitmap = core::slice::from_raw_parts_mut(bitmap_addr as *mut BitmapWord, bitmap_len);
|
|
||||||
|
|
||||||
bitmap.fill(BitmapWord::MAX);
|
bitmap.fill(BitmapWord::MAX);
|
||||||
|
|
||||||
@ -85,7 +83,7 @@ impl PhysicalMemoryManager {
|
|||||||
|
|
||||||
'l0: for i in (aligned_bit..self.page_count).step_by(512) {
|
'l0: for i in (aligned_bit..self.page_count).step_by(512) {
|
||||||
for j in 0..HUGE_PAGE_WORD_COUNT {
|
for j in 0..HUGE_PAGE_WORD_COUNT {
|
||||||
if self.bitmap[i / BITMAP_WORD_SIZE] != 0 {
|
if self.bitmap[i / BITMAP_WORD_SIZE + j] != 0 {
|
||||||
continue 'l0;
|
continue 'l0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use core::{iter::StepBy, ops::Range};
|
use core::ops::Range;
|
||||||
|
|
||||||
use abi::error::Error;
|
use abi::error::Error;
|
||||||
use kernel_util::util::OneTimeInit;
|
use kernel_util::util::OneTimeInit;
|
||||||
@ -10,68 +10,18 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use self::{
|
use self::{
|
||||||
manager::{PhysicalMemoryManager, BITMAP_PAGE_COUNT, BITMAP_WORD_SIZE, TRACKED_PAGE_LIMIT},
|
manager::{PhysicalMemoryManager, BITMAP_WORD_SIZE, TRACKED_PAGE_LIMIT},
|
||||||
reserved::reserve_region,
|
reserved::reserve_region,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{address::FromRaw, PhysicalAddress};
|
use super::{address::FromRaw, PhysicalAddress};
|
||||||
|
|
||||||
// //! Physical memory management facilities
|
|
||||||
// use core::{iter::StepBy, mem::size_of, ops::Range};
|
|
||||||
//
|
|
||||||
// use abi::error::Error;
|
|
||||||
// use kernel_util::util::OneTimeInit;
|
|
||||||
//
|
|
||||||
// use crate::{
|
|
||||||
// debug::LogLevel,
|
|
||||||
// mem::{
|
|
||||||
// phys::reserved::{is_reserved, reserve_region},
|
|
||||||
// ConvertAddress, /*, KERNEL_PHYS_BASE */
|
|
||||||
// },
|
|
||||||
// sync::IrqSafeSpinlock,
|
|
||||||
// };
|
|
||||||
//
|
|
||||||
// use self::manager::PhysicalMemoryManager;
|
|
||||||
//
|
|
||||||
// // Enumerating lots of pages is slow and I'm too lazy right now to write a better algorithm, so
|
|
||||||
// // capping the page count helps
|
|
||||||
// const PHYS_MEMORY_PAGE_CAP: usize = 65536;
|
|
||||||
//
|
|
||||||
|
|
||||||
// 8 * 4096 bits per page, 1 page per bit
|
// 8 * 4096 bits per page, 1 page per bit
|
||||||
const MEMORY_UPPER_LIMIT: PhysicalAddress = PhysicalAddress::from_raw(TRACKED_PAGE_LIMIT * 4096);
|
const MEMORY_UPPER_LIMIT: PhysicalAddress = PhysicalAddress::from_raw(TRACKED_PAGE_LIMIT * 4096);
|
||||||
|
|
||||||
mod manager;
|
mod manager;
|
||||||
pub mod reserved;
|
pub mod reserved;
|
||||||
//
|
|
||||||
// /// Contains information about the physical memory usage
|
|
||||||
// #[derive(Clone, Copy, Debug)]
|
|
||||||
// pub struct PhysicalMemoryStats {
|
|
||||||
// /// Number of pages available for allocation
|
|
||||||
// pub available_pages: usize,
|
|
||||||
// /// Number of pages being used
|
|
||||||
// pub used_pages: usize,
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /// Represents the way in which the page is used (or not)
|
|
||||||
// #[derive(PartialEq, Clone, Copy, Debug)]
|
|
||||||
// #[repr(u32)]
|
|
||||||
// pub enum PageUsage {
|
|
||||||
// /// Page is not available for allocation or use
|
|
||||||
// Reserved = 0,
|
|
||||||
// /// Regular page available for allocation
|
|
||||||
// Available,
|
|
||||||
// /// Page is used by some kernel facility
|
|
||||||
// Used,
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /// Page descriptor structure for the page management array
|
|
||||||
// #[repr(C)]
|
|
||||||
// pub struct Page {
|
|
||||||
// usage: PageUsage,
|
|
||||||
// refcount: u32,
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
/// Defines an usable memory region
|
/// Defines an usable memory region
|
||||||
#[derive(Clone, Copy, Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
pub struct PhysicalMemoryRegion {
|
pub struct PhysicalMemoryRegion {
|
||||||
@ -103,46 +53,7 @@ impl PhysicalMemoryRegion {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//
|
|
||||||
// impl PhysicalMemoryStats {
|
|
||||||
// /// Handles "alloc" cases of the memory manager
|
|
||||||
// pub fn add_allocated_pages(&mut self, count: usize, _usage: PageUsage) {
|
|
||||||
// assert!(self.available_pages >= count);
|
|
||||||
// self.available_pages -= count;
|
|
||||||
// self.used_pages += count;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /// Handles "free" cases of the memory manager
|
|
||||||
// pub fn add_freed_pages(&mut self, count: usize, _usage: PageUsage) {
|
|
||||||
// assert!(self.used_pages >= count);
|
|
||||||
// self.used_pages -= count;
|
|
||||||
// self.available_pages += count;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /// Increases the available pages counter
|
|
||||||
// pub fn add_available_pages(&mut self, count: usize) {
|
|
||||||
// self.available_pages += count;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /// Prints out the statistics into specified log level
|
|
||||||
// pub fn dump(&self, level: LogLevel) {
|
|
||||||
// log_print_raw!(level, "+++ Physical memory stats +++\n");
|
|
||||||
// log_print_raw!(
|
|
||||||
// level,
|
|
||||||
// "Available: {}K ({} pages)\n",
|
|
||||||
// self.available_pages * 4,
|
|
||||||
// self.available_pages
|
|
||||||
// );
|
|
||||||
// log_print_raw!(
|
|
||||||
// level,
|
|
||||||
// "Used: {}K ({} pages)\n",
|
|
||||||
// self.used_pages * 4,
|
|
||||||
// self.used_pages
|
|
||||||
// );
|
|
||||||
// log_print_raw!(level, "-----------------------------\n");
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
/// Global physical memory manager
|
/// Global physical memory manager
|
||||||
pub static PHYSICAL_MEMORY: OneTimeInit<IrqSafeSpinlock<PhysicalMemoryManager>> =
|
pub static PHYSICAL_MEMORY: OneTimeInit<IrqSafeSpinlock<PhysicalMemoryManager>> =
|
||||||
OneTimeInit::new();
|
OneTimeInit::new();
|
||||||
@ -267,58 +178,7 @@ pub unsafe fn init_from_iter<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
//
|
|
||||||
// debugln!("Initializing physical memory manager");
|
|
||||||
// debugln!("Total tracked pages: {}", total_count);
|
|
||||||
//
|
|
||||||
// // Reserve memory regions from which allocation is forbidden
|
|
||||||
// reserve_region("kernel", kernel_physical_memory_region());
|
|
||||||
//
|
|
||||||
// let pages_array_base = find_contiguous_region(it.clone(), (pages_array_size + 0xFFF) / 0x1000)
|
|
||||||
// .ok_or(Error::OutOfMemory)?;
|
|
||||||
//
|
|
||||||
// debugln!(
|
|
||||||
// "Placing page tracking at {:#x}",
|
|
||||||
// pages_array_base.virtualize()
|
|
||||||
// );
|
|
||||||
//
|
|
||||||
// reserve_region(
|
|
||||||
// "pages",
|
|
||||||
// PhysicalMemoryRegion {
|
|
||||||
// base: pages_array_base,
|
|
||||||
// size: (pages_array_size + 0xFFF) & !0xFFF,
|
|
||||||
// },
|
|
||||||
// );
|
|
||||||
//
|
|
||||||
// let mut manager =
|
|
||||||
// PhysicalMemoryManager::new(phys_start, pages_array_base.virtualize(), pages_array_size);
|
|
||||||
// let mut page_count = 0;
|
|
||||||
//
|
|
||||||
// for region in it {
|
|
||||||
// if page_count >= PHYS_MEMORY_PAGE_CAP {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// for page in region.pages() {
|
|
||||||
// if is_reserved(page) {
|
|
||||||
// continue;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// manager.add_available_page(page);
|
|
||||||
// page_count += 1;
|
|
||||||
//
|
|
||||||
// if page_count >= PHYS_MEMORY_PAGE_CAP {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// infoln!("{} available pages ({}KiB)", page_count, page_count * 4);
|
|
||||||
//
|
|
||||||
// PHYSICAL_MEMORY.init(IrqSafeSpinlock::new(manager));
|
|
||||||
// Ok(())
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
fn kernel_physical_memory_region() -> PhysicalMemoryRegion {
|
fn kernel_physical_memory_region() -> PhysicalMemoryRegion {
|
||||||
extern "C" {
|
extern "C" {
|
||||||
static __kernel_phys_start: u8;
|
static __kernel_phys_start: u8;
|
||||||
|
@ -13,14 +13,7 @@ static mut RESERVED_MEMORY: StaticVector<PhysicalMemoryRegion, 8> = StaticVector
|
|||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// Can only be called from initialization code **before** physical memory manager is initialized.
|
/// Can only be called from initialization code **before** physical memory manager is initialized.
|
||||||
pub unsafe fn reserve_region(reason: &str, region: PhysicalMemoryRegion) {
|
pub unsafe fn reserve_region(_reason: &str, region: PhysicalMemoryRegion) {
|
||||||
// debugln!(
|
|
||||||
// "Reserve {:?} memory: {:#x}..{:#x}",
|
|
||||||
// reason,
|
|
||||||
// region.base,
|
|
||||||
// region.end()
|
|
||||||
// );
|
|
||||||
|
|
||||||
RESERVED_MEMORY.push(region);
|
RESERVED_MEMORY.push(region);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
use core::{
|
use core::{
|
||||||
alloc::Layout,
|
|
||||||
fmt,
|
fmt,
|
||||||
mem::align_of,
|
mem::align_of,
|
||||||
ops::{Deref, DerefMut},
|
ops::{Deref, DerefMut},
|
||||||
|
232
src/mem/process.rs
Normal file
232
src/mem/process.rs
Normal file
@ -0,0 +1,232 @@
|
|||||||
|
use abi::error::Error;
|
||||||
|
use cfg_if::cfg_if;
|
||||||
|
use vmalloc::VirtualMemoryAllocator;
|
||||||
|
|
||||||
|
use crate::{arch::x86_64::mem::table::L3, mem::phys, sync::IrqSafeSpinlock};
|
||||||
|
|
||||||
|
use super::{address::AsPhysicalAddress, table::MapAttributes, PhysicalAddress};
|
||||||
|
|
||||||
|
cfg_if! {
|
||||||
|
if #[cfg(target_arch = "aarch64")] {
|
||||||
|
use crate::arch::aarch64::table::AddressSpace;
|
||||||
|
} else if #[cfg(target_arch = "x86_64")] {
|
||||||
|
use crate::arch::x86_64::mem::process::ProcessAddressSpaceImpl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Interface for virtual memory address space management
|
||||||
|
pub trait ProcessAddressSpaceManager: Sized + AsPhysicalAddress {
|
||||||
|
const PAGE_SIZE: usize;
|
||||||
|
const LOWER_LIMIT_PFN: usize;
|
||||||
|
const UPPER_LIMIT_PFN: usize;
|
||||||
|
|
||||||
|
fn new() -> Result<Self, Error>;
|
||||||
|
|
||||||
|
unsafe fn map_page(
|
||||||
|
&mut self,
|
||||||
|
address: usize,
|
||||||
|
physical: PhysicalAddress,
|
||||||
|
flags: MapAttributes,
|
||||||
|
) -> Result<(), Error>;
|
||||||
|
|
||||||
|
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error>;
|
||||||
|
|
||||||
|
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Inner {
|
||||||
|
allocator: VirtualMemoryAllocator,
|
||||||
|
table: ProcessAddressSpaceImpl,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ProcessAddressSpace {
|
||||||
|
inner: IrqSafeSpinlock<Inner>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Inner {
|
||||||
|
fn try_map_pages<F: Fn(usize) -> Result<PhysicalAddress, Error>>(
|
||||||
|
&mut self,
|
||||||
|
address: usize,
|
||||||
|
page_count: usize,
|
||||||
|
get_page: F,
|
||||||
|
attributes: MapAttributes,
|
||||||
|
) -> Result<(), (usize, Error)> {
|
||||||
|
for i in 0..page_count {
|
||||||
|
let virt = address + i * ProcessAddressSpaceImpl::PAGE_SIZE;
|
||||||
|
let phys = match get_page(virt) {
|
||||||
|
Ok(page) => page,
|
||||||
|
Err(err) => {
|
||||||
|
return Err((i, err));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(err) = unsafe { self.table.map_page(virt, phys, attributes) } {
|
||||||
|
return Err((i, err));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn map_range<F: Fn(usize) -> Result<PhysicalAddress, Error>>(
|
||||||
|
&mut self,
|
||||||
|
address: usize,
|
||||||
|
page_count: usize,
|
||||||
|
get_page: F,
|
||||||
|
attributes: MapAttributes,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
// If inserting fails, the range cannot be mapped
|
||||||
|
let start_pfn = address / ProcessAddressSpaceImpl::PAGE_SIZE;
|
||||||
|
self.allocator.insert(start_pfn, page_count)?;
|
||||||
|
|
||||||
|
if let Err(_e) = self.try_map_pages(address, page_count, get_page, attributes) {
|
||||||
|
// TODO rollback & remove the range
|
||||||
|
todo!();
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn alloc_range<F: Fn(usize) -> Result<PhysicalAddress, Error>>(
|
||||||
|
&mut self,
|
||||||
|
page_count: usize,
|
||||||
|
get_page: F,
|
||||||
|
attributes: MapAttributes,
|
||||||
|
) -> Result<usize, Error> {
|
||||||
|
let start_pfn = self.allocator.allocate(page_count)?;
|
||||||
|
let address = start_pfn * ProcessAddressSpaceImpl::PAGE_SIZE;
|
||||||
|
|
||||||
|
if let Err(_e) = self.try_map_pages(address, page_count, get_page, attributes) {
|
||||||
|
// TODO rollback
|
||||||
|
todo!();
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(address)
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn unmap_range<F: Fn(usize, PhysicalAddress)>(
|
||||||
|
&mut self,
|
||||||
|
start_address: usize,
|
||||||
|
page_count: usize,
|
||||||
|
free_page: F,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let start_pfn = start_address / ProcessAddressSpaceImpl::PAGE_SIZE;
|
||||||
|
|
||||||
|
// Deallocate the range first
|
||||||
|
self.allocator.free(start_pfn, page_count)?;
|
||||||
|
|
||||||
|
// Then unmap it from the table
|
||||||
|
for i in 0..page_count {
|
||||||
|
let virt = start_address + i * ProcessAddressSpaceImpl::PAGE_SIZE;
|
||||||
|
// This should not fail under normal circumstances
|
||||||
|
// TODO handle failures here?
|
||||||
|
let phys = self.table.unmap_page(virt).unwrap();
|
||||||
|
|
||||||
|
free_page(virt, phys);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProcessAddressSpace {
|
||||||
|
pub fn new() -> Result<Self, Error> {
|
||||||
|
let table = ProcessAddressSpaceImpl::new()?;
|
||||||
|
let allocator = VirtualMemoryAllocator::new(
|
||||||
|
ProcessAddressSpaceImpl::LOWER_LIMIT_PFN,
|
||||||
|
ProcessAddressSpaceImpl::UPPER_LIMIT_PFN,
|
||||||
|
);
|
||||||
|
Ok(Self {
|
||||||
|
inner: IrqSafeSpinlock::new(Inner { table, allocator }),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn allocate<F: Fn(usize) -> Result<PhysicalAddress, Error>>(
|
||||||
|
&self,
|
||||||
|
_hint: Option<usize>,
|
||||||
|
size: usize,
|
||||||
|
get_page: F,
|
||||||
|
attributes: MapAttributes,
|
||||||
|
) -> Result<usize, Error> {
|
||||||
|
assert_eq!(size & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);
|
||||||
|
|
||||||
|
let mut lock = self.inner.lock();
|
||||||
|
|
||||||
|
lock.alloc_range(
|
||||||
|
size / ProcessAddressSpaceImpl::PAGE_SIZE,
|
||||||
|
get_page,
|
||||||
|
attributes,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn map<F: Fn(usize) -> Result<PhysicalAddress, Error>>(
|
||||||
|
&self,
|
||||||
|
address: usize,
|
||||||
|
size: usize,
|
||||||
|
get_page: F,
|
||||||
|
attributes: MapAttributes,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
assert_eq!(address & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);
|
||||||
|
assert_eq!(size & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);
|
||||||
|
|
||||||
|
let mut lock = self.inner.lock();
|
||||||
|
|
||||||
|
lock.map_range(
|
||||||
|
address,
|
||||||
|
size / ProcessAddressSpaceImpl::PAGE_SIZE,
|
||||||
|
get_page,
|
||||||
|
attributes,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn map_single(
|
||||||
|
&self,
|
||||||
|
address: usize,
|
||||||
|
physical: PhysicalAddress,
|
||||||
|
attributes: MapAttributes,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
assert_eq!(address & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);
|
||||||
|
assert!(physical.is_aligned_for::<L3>());
|
||||||
|
|
||||||
|
self.inner
|
||||||
|
.lock()
|
||||||
|
.map_range(address, 1, |_| Ok(physical), attributes)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn translate(&self, address: usize) -> Result<PhysicalAddress, Error> {
|
||||||
|
// Offset is handled at impl level
|
||||||
|
self.inner.lock().table.translate(address).map(|e| e.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub unsafe fn unmap(&self, address: usize, size: usize) -> Result<(), Error> {
|
||||||
|
assert_eq!(address & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);
|
||||||
|
assert_eq!(size & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);
|
||||||
|
|
||||||
|
let mut lock = self.inner.lock();
|
||||||
|
|
||||||
|
lock.unmap_range(
|
||||||
|
address,
|
||||||
|
size / ProcessAddressSpaceImpl::PAGE_SIZE,
|
||||||
|
|_, paddr| phys::free_page(paddr),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn debug_dump(&self) {
|
||||||
|
let lock = self.inner.lock();
|
||||||
|
|
||||||
|
debugln!("Address space @ {:#x}", unsafe {
|
||||||
|
lock.table.as_physical_address()
|
||||||
|
});
|
||||||
|
for (used, range) in lock.allocator.ranges() {
|
||||||
|
let start = range.start_pfn() * ProcessAddressSpaceImpl::PAGE_SIZE;
|
||||||
|
let end = range.end_pfn() * ProcessAddressSpaceImpl::PAGE_SIZE;
|
||||||
|
debugln!("{:#x?}: {}", start..end, used);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsPhysicalAddress for ProcessAddressSpace {
|
||||||
|
unsafe fn as_physical_address(&self) -> PhysicalAddress {
|
||||||
|
self.inner.lock().table.as_physical_address()
|
||||||
|
}
|
||||||
|
}
|
@ -3,17 +3,6 @@ use core::ops::{Deref, DerefMut};
|
|||||||
|
|
||||||
use abi::error::Error;
|
use abi::error::Error;
|
||||||
use bitflags::bitflags;
|
use bitflags::bitflags;
|
||||||
use cfg_if::cfg_if;
|
|
||||||
|
|
||||||
use super::PhysicalAddress;
|
|
||||||
|
|
||||||
cfg_if! {
|
|
||||||
if #[cfg(target_arch = "aarch64")] {
|
|
||||||
pub use crate::arch::aarch64::table::{AddressSpace, PageAttributes, PageEntry, PageTable};
|
|
||||||
} else if #[cfg(target_arch = "x86_64")] {
|
|
||||||
pub use crate::arch::x86_64::mem::table::{AddressSpace, PageEntry, PageTable};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bitflags! {
|
bitflags! {
|
||||||
/// Describes how a page translation mapping should behave
|
/// Describes how a page translation mapping should behave
|
||||||
@ -28,40 +17,6 @@ bitflags! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Interface for virtual memory address space management
|
|
||||||
pub trait VirtualMemoryManager {
|
|
||||||
/// Allocates a region of virtual memory inside the address space and maps it to physical
|
|
||||||
/// memory pages with given attributes
|
|
||||||
fn allocate(
|
|
||||||
&self,
|
|
||||||
hint: Option<usize>,
|
|
||||||
len: usize,
|
|
||||||
attrs: MapAttributes,
|
|
||||||
) -> Result<usize, Error>;
|
|
||||||
|
|
||||||
/// Insert a single 4KiB-page translation mapping into the table
|
|
||||||
fn map_page(
|
|
||||||
&self,
|
|
||||||
virt: usize,
|
|
||||||
phys: PhysicalAddress,
|
|
||||||
attrs: MapAttributes,
|
|
||||||
) -> Result<(), Error>;
|
|
||||||
|
|
||||||
/// Releases the virtual memory region from the address space and the pages it refers to
|
|
||||||
fn deallocate(&self, addr: usize, len: usize) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait KernelAddressSpace {
|
|
||||||
type Mapping;
|
|
||||||
|
|
||||||
fn map_page(
|
|
||||||
&self,
|
|
||||||
virt: usize,
|
|
||||||
physical: PhysicalAddress,
|
|
||||||
attrs: MapAttributes,
|
|
||||||
) -> Result<Self::Mapping, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Interface for non-terminal tables to retrieve the next level of address translation tables
|
/// Interface for non-terminal tables to retrieve the next level of address translation tables
|
||||||
pub trait NextPageTable {
|
pub trait NextPageTable {
|
||||||
/// Type for the next-level page table
|
/// Type for the next-level page table
|
||||||
|
@ -10,9 +10,7 @@ use vfs::{FileRef, Read, Seek};
|
|||||||
use yggdrasil_abi::{error::Error, io::SeekFrom};
|
use yggdrasil_abi::{error::Error, io::SeekFrom};
|
||||||
|
|
||||||
use crate::mem::{
|
use crate::mem::{
|
||||||
phys,
|
phys, pointer::PhysicalRefMut, process::ProcessAddressSpace, table::MapAttributes,
|
||||||
pointer::PhysicalRefMut,
|
|
||||||
table::{AddressSpace, MapAttributes, VirtualMemoryManager},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
@ -59,24 +57,16 @@ fn from_parse_error(v: ParseError) -> Error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn load_bytes<F>(
|
fn load_bytes<F>(
|
||||||
space: &AddressSpace,
|
space: &ProcessAddressSpace,
|
||||||
addr: usize,
|
addr: usize,
|
||||||
mut src: F,
|
mut src: F,
|
||||||
len: usize,
|
len: usize,
|
||||||
elf_attrs: u32,
|
|
||||||
) -> Result<(), Error>
|
) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
F: FnMut(usize, PhysicalRefMut<'_, [u8]>) -> Result<(), Error>,
|
F: FnMut(usize, PhysicalRefMut<'_, [u8]>) -> Result<(), Error>,
|
||||||
{
|
{
|
||||||
// TODO check for crazy addresses here
|
// TODO check for crazy addresses here
|
||||||
|
|
||||||
let attrs = match (elf_attrs & PF_W, elf_attrs & PF_X) {
|
|
||||||
(0, 0) => MapAttributes::USER_READ,
|
|
||||||
(_, 0) => MapAttributes::USER_WRITE | MapAttributes::USER_READ,
|
|
||||||
(0, _) => MapAttributes::USER_READ,
|
|
||||||
(_, _) => MapAttributes::USER_WRITE | MapAttributes::USER_READ,
|
|
||||||
} | MapAttributes::NON_GLOBAL;
|
|
||||||
|
|
||||||
let dst_page_off = addr & 0xFFF;
|
let dst_page_off = addr & 0xFFF;
|
||||||
let dst_page_aligned = addr & !0xFFF;
|
let dst_page_aligned = addr & !0xFFF;
|
||||||
let mut off = 0usize;
|
let mut off = 0usize;
|
||||||
@ -89,22 +79,9 @@ where
|
|||||||
|
|
||||||
let virt_page = dst_page_aligned + page_idx * 0x1000;
|
let virt_page = dst_page_aligned + page_idx * 0x1000;
|
||||||
assert_eq!(virt_page & 0xFFF, 0);
|
assert_eq!(virt_page & 0xFFF, 0);
|
||||||
if let Some(page) = space.translate(virt_page) {
|
|
||||||
// TODO Handle these cases
|
|
||||||
warnln!("Page {:#x} is already mapped to {:#x}", virt_page, page);
|
|
||||||
todo!();
|
|
||||||
}
|
|
||||||
|
|
||||||
let phys_page = phys::alloc_page()?;
|
|
||||||
space.map_page(virt_page, phys_page, attrs)?;
|
|
||||||
// debugln!("Map {:#x} -> {:#x}", virt_page, phys_page);
|
|
||||||
|
|
||||||
|
let phys_page = space.translate(virt_page)?;
|
||||||
let dst_slice = unsafe { PhysicalRefMut::map_slice(phys_page.add(page_off), count) };
|
let dst_slice = unsafe { PhysicalRefMut::map_slice(phys_page.add(page_off), count) };
|
||||||
// let dst_slice = unsafe {
|
|
||||||
// let addr = (phys_page + page_off).virtualize();
|
|
||||||
|
|
||||||
// core::slice::from_raw_parts_mut(addr as *mut u8, count)
|
|
||||||
// };
|
|
||||||
|
|
||||||
src(off, dst_slice)?;
|
src(off, dst_slice)?;
|
||||||
|
|
||||||
@ -116,17 +93,41 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Loads an ELF binary from `file` into the target address space
|
/// Loads an ELF binary from `file` into the target address space
|
||||||
pub fn load_elf_from_file(space: &AddressSpace, file: FileRef) -> Result<usize, Error> {
|
pub fn load_elf_from_file(space: &ProcessAddressSpace, file: FileRef) -> Result<usize, Error> {
|
||||||
let file = FileReader { file: &file };
|
let file = FileReader { file: &file };
|
||||||
|
|
||||||
let elf = ElfStream::<AnyEndian, _>::open_stream(file).map_err(from_parse_error)?;
|
let elf = ElfStream::<AnyEndian, _>::open_stream(file).map_err(from_parse_error)?;
|
||||||
|
|
||||||
|
space.debug_dump();
|
||||||
|
|
||||||
for phdr in elf.segments() {
|
for phdr in elf.segments() {
|
||||||
if phdr.p_type != PT_LOAD {
|
if phdr.p_type != PT_LOAD {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
debugln!("LOAD {:#x}", phdr.p_vaddr);
|
debugln!("LOAD {:#x?}", phdr.p_vaddr..phdr.p_vaddr + phdr.p_memsz);
|
||||||
|
|
||||||
|
let attrs = match (phdr.p_flags & PF_W, phdr.p_flags & PF_X) {
|
||||||
|
(0, 0) => MapAttributes::USER_READ,
|
||||||
|
(_, 0) => MapAttributes::USER_WRITE | MapAttributes::USER_READ,
|
||||||
|
(0, _) => MapAttributes::USER_READ,
|
||||||
|
(_, _) => MapAttributes::USER_WRITE | MapAttributes::USER_READ,
|
||||||
|
} | MapAttributes::NON_GLOBAL;
|
||||||
|
|
||||||
|
if phdr.p_memsz > 0 {
|
||||||
|
// Map the range
|
||||||
|
let aligned_start = (phdr.p_vaddr as usize) & !0xFFF;
|
||||||
|
let aligned_end = ((phdr.p_vaddr + phdr.p_memsz) as usize + 0xFFF) & !0xFFF;
|
||||||
|
|
||||||
|
space.map(
|
||||||
|
aligned_start,
|
||||||
|
aligned_end - aligned_start,
|
||||||
|
|_| phys::alloc_page(),
|
||||||
|
attrs,
|
||||||
|
)?;
|
||||||
|
} else {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if phdr.p_filesz > 0 {
|
if phdr.p_filesz > 0 {
|
||||||
load_bytes(
|
load_bytes(
|
||||||
@ -138,7 +139,6 @@ pub fn load_elf_from_file(space: &AddressSpace, file: FileRef) -> Result<usize,
|
|||||||
source.read_exact(dst.deref_mut())
|
source.read_exact(dst.deref_mut())
|
||||||
},
|
},
|
||||||
phdr.p_filesz as usize,
|
phdr.p_filesz as usize,
|
||||||
phdr.p_flags,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -154,7 +154,6 @@ pub fn load_elf_from_file(space: &AddressSpace, file: FileRef) -> Result<usize,
|
|||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
len,
|
len,
|
||||||
phdr.p_flags,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,15 +7,14 @@ use vfs::FileRef;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
mem::{
|
mem::{
|
||||||
phys,
|
address::AsPhysicalAddress, phys, process::ProcessAddressSpace, table::MapAttributes,
|
||||||
table::{AddressSpace, MapAttributes, VirtualMemoryManager},
|
|
||||||
ForeignPointer,
|
ForeignPointer,
|
||||||
},
|
},
|
||||||
proc,
|
proc,
|
||||||
task::{context::TaskContextImpl, process::Process, TaskContext},
|
task::{context::TaskContextImpl, process::Process, TaskContext},
|
||||||
};
|
};
|
||||||
|
|
||||||
fn setup_args(space: &AddressSpace, virt: usize, args: &[&str]) -> Result<(), Error> {
|
fn setup_args(space: &ProcessAddressSpace, virt: usize, args: &[&str]) -> Result<(), Error> {
|
||||||
// arg data len
|
// arg data len
|
||||||
let args_size: usize = args.iter().map(|x| x.len()).sum();
|
let args_size: usize = args.iter().map(|x| x.len()).sum();
|
||||||
// 1 + arg ptr:len count
|
// 1 + arg ptr:len count
|
||||||
@ -30,13 +29,11 @@ fn setup_args(space: &AddressSpace, virt: usize, args: &[&str]) -> Result<(), Er
|
|||||||
debugln!("arg data size = {}", args_size);
|
debugln!("arg data size = {}", args_size);
|
||||||
|
|
||||||
let phys_page = phys::alloc_page()?;
|
let phys_page = phys::alloc_page()?;
|
||||||
// TODO check if this doesn't overwrite anything
|
space.map_single(
|
||||||
space.map_page(
|
|
||||||
virt,
|
virt,
|
||||||
phys_page,
|
phys_page,
|
||||||
MapAttributes::USER_READ | MapAttributes::USER_WRITE | MapAttributes::NON_GLOBAL,
|
MapAttributes::USER_READ | MapAttributes::USER_WRITE | MapAttributes::NON_GLOBAL,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let write = phys_page.virtualize_raw();
|
let write = phys_page.virtualize_raw();
|
||||||
|
|
||||||
let mut offset = args_ptr_size;
|
let mut offset = args_ptr_size;
|
||||||
@ -57,6 +54,7 @@ fn setup_args(space: &AddressSpace, virt: usize, args: &[&str]) -> Result<(), Er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Place the argument data
|
// Place the argument data
|
||||||
|
// TODO rewrite using write_foreign
|
||||||
unsafe {
|
unsafe {
|
||||||
let arg_data_slice =
|
let arg_data_slice =
|
||||||
core::slice::from_raw_parts_mut((write + args_ptr_size) as *mut u8, args_size);
|
core::slice::from_raw_parts_mut((write + args_ptr_size) as *mut u8, args_size);
|
||||||
@ -72,7 +70,7 @@ fn setup_args(space: &AddressSpace, virt: usize, args: &[&str]) -> Result<(), Er
|
|||||||
|
|
||||||
fn setup_binary<S: Into<String>>(
|
fn setup_binary<S: Into<String>>(
|
||||||
name: S,
|
name: S,
|
||||||
space: AddressSpace,
|
space: ProcessAddressSpace,
|
||||||
entry: usize,
|
entry: usize,
|
||||||
args: &[&str],
|
args: &[&str],
|
||||||
) -> Result<Arc<Process>, Error> {
|
) -> Result<Arc<Process>, Error> {
|
||||||
@ -82,14 +80,12 @@ fn setup_binary<S: Into<String>>(
|
|||||||
// 0x1000 of guard page
|
// 0x1000 of guard page
|
||||||
let virt_args_base = virt_stack_base + (USER_STACK_PAGES + 1) * 0x1000;
|
let virt_args_base = virt_stack_base + (USER_STACK_PAGES + 1) * 0x1000;
|
||||||
|
|
||||||
for i in 0..USER_STACK_PAGES {
|
space.map(
|
||||||
let phys = phys::alloc_page()?;
|
virt_stack_base,
|
||||||
space.map_page(
|
USER_STACK_PAGES * 0x1000,
|
||||||
virt_stack_base + i * 0x1000,
|
|_| phys::alloc_page(),
|
||||||
phys,
|
MapAttributes::USER_WRITE | MapAttributes::USER_READ | MapAttributes::NON_GLOBAL,
|
||||||
MapAttributes::USER_WRITE | MapAttributes::USER_READ | MapAttributes::NON_GLOBAL,
|
)?;
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
setup_args(&space, virt_args_base, args)?;
|
setup_args(&space, virt_args_base, args)?;
|
||||||
|
|
||||||
@ -113,7 +109,12 @@ fn setup_binary<S: Into<String>>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let context = TaskContext::user(entry, virt_args_base, space.physical_address(), user_sp)?;
|
let context = TaskContext::user(
|
||||||
|
entry,
|
||||||
|
virt_args_base,
|
||||||
|
unsafe { space.as_physical_address() },
|
||||||
|
user_sp,
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok(Process::new_with_context(name, Some(space), context))
|
Ok(Process::new_with_context(name, Some(space), context))
|
||||||
}
|
}
|
||||||
@ -124,7 +125,7 @@ pub fn load_elf<S: Into<String>>(
|
|||||||
file: FileRef,
|
file: FileRef,
|
||||||
args: &[&str],
|
args: &[&str],
|
||||||
) -> Result<Arc<Process>, Error> {
|
) -> Result<Arc<Process>, Error> {
|
||||||
let space = AddressSpace::new_empty()?;
|
let space = ProcessAddressSpace::new()?;
|
||||||
let elf_entry = proc::elf::load_elf_from_file(&space, file)?;
|
let elf_entry = proc::elf::load_elf_from_file(&space, file)?;
|
||||||
|
|
||||||
setup_binary(name, space, elf_entry, args)
|
setup_binary(name, space, elf_entry, args)
|
||||||
|
@ -16,7 +16,7 @@ use yggdrasil_abi::{
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
block, fs,
|
block, fs,
|
||||||
mem::table::{MapAttributes, VirtualMemoryManager},
|
mem::{phys, table::MapAttributes},
|
||||||
proc::{self, io::ProcessIo},
|
proc::{self, io::ProcessIo},
|
||||||
sync::IrqSafeSpinlockGuard,
|
sync::IrqSafeSpinlockGuard,
|
||||||
task::{process::Process, runtime, ProcessId},
|
task::{process::Process, runtime, ProcessId},
|
||||||
@ -82,11 +82,14 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
|
|||||||
todo!();
|
todo!();
|
||||||
}
|
}
|
||||||
|
|
||||||
space.allocate(
|
let res = space.allocate(
|
||||||
None,
|
None,
|
||||||
len / 0x1000,
|
len,
|
||||||
MapAttributes::USER_READ | MapAttributes::USER_WRITE | MapAttributes::NON_GLOBAL,
|
|_| phys::alloc_page(),
|
||||||
)
|
MapAttributes::USER_WRITE | MapAttributes::USER_READ | MapAttributes::NON_GLOBAL,
|
||||||
|
);
|
||||||
|
|
||||||
|
res
|
||||||
}
|
}
|
||||||
SyscallFunction::UnmapMemory => {
|
SyscallFunction::UnmapMemory => {
|
||||||
let addr = args[0] as usize;
|
let addr = args[0] as usize;
|
||||||
@ -99,7 +102,9 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
|
|||||||
todo!();
|
todo!();
|
||||||
}
|
}
|
||||||
|
|
||||||
space.deallocate(addr, len)?;
|
unsafe {
|
||||||
|
space.unmap(addr, len)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(0)
|
Ok(0)
|
||||||
}
|
}
|
||||||
|
@ -18,8 +18,7 @@ use kernel_util::util::OneTimeInit;
|
|||||||
use vfs::VnodeRef;
|
use vfs::VnodeRef;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
arch::{Architecture, ArchitectureImpl},
|
mem::{process::ProcessAddressSpace, ForeignPointer},
|
||||||
mem::{table::AddressSpace, ForeignPointer},
|
|
||||||
proc::io::ProcessIo,
|
proc::io::ProcessIo,
|
||||||
sync::{IrqGuard, IrqSafeSpinlock},
|
sync::{IrqGuard, IrqSafeSpinlock},
|
||||||
task::context::TaskContextImpl,
|
task::context::TaskContextImpl,
|
||||||
@ -72,7 +71,7 @@ pub struct Process {
|
|||||||
id: OneTimeInit<ProcessId>,
|
id: OneTimeInit<ProcessId>,
|
||||||
state: AtomicProcessState,
|
state: AtomicProcessState,
|
||||||
cpu_id: AtomicU32,
|
cpu_id: AtomicU32,
|
||||||
space: Option<AddressSpace>,
|
space: Option<ProcessAddressSpace>,
|
||||||
inner: IrqSafeSpinlock<ProcessInner>,
|
inner: IrqSafeSpinlock<ProcessInner>,
|
||||||
|
|
||||||
exit_waker: QueueWaker,
|
exit_waker: QueueWaker,
|
||||||
@ -92,7 +91,7 @@ impl Process {
|
|||||||
/// Has side-effect of allocating a new PID for itself.
|
/// Has side-effect of allocating a new PID for itself.
|
||||||
pub fn new_with_context<S: Into<String>>(
|
pub fn new_with_context<S: Into<String>>(
|
||||||
name: S,
|
name: S,
|
||||||
space: Option<AddressSpace>,
|
space: Option<ProcessAddressSpace>,
|
||||||
normal_context: TaskContext,
|
normal_context: TaskContext,
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
let this = Arc::new(Self {
|
let this = Arc::new(Self {
|
||||||
@ -172,12 +171,12 @@ impl Process {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the address space of the task
|
/// Returns the address space of the task
|
||||||
pub fn address_space(&self) -> &AddressSpace {
|
pub fn address_space(&self) -> &ProcessAddressSpace {
|
||||||
self.space.as_ref().unwrap()
|
self.space.as_ref().unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the address space of the task, if one is set
|
/// Returns the address space of the task, if one is set
|
||||||
pub fn get_address_space(&self) -> Option<&AddressSpace> {
|
pub fn get_address_space(&self) -> Option<&ProcessAddressSpace> {
|
||||||
self.space.as_ref()
|
self.space.as_ref()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user