From b440a3c975a22d818f48818fcd2f4c918715833e Mon Sep 17 00:00:00 2001 From: Mark Poliakov Date: Wed, 13 Oct 2021 10:20:52 +0300 Subject: [PATCH] feat: integrate physical mm --- etc/aarch64-qemu.ld | 1 + kernel/Cargo.toml | 2 +- kernel/src/arch/aarch64/mach_qemu/mod.rs | 7 ++ kernel/src/mem/mod.rs | 18 ++++ kernel/src/mem/phys/manager.rs | 76 +++++++++++++++ kernel/src/mem/phys/mod.rs | 119 +++++++++++++++++++++++ kernel/src/mem/phys/reserved.rs | 66 +++++++++++++ 7 files changed, 288 insertions(+), 1 deletion(-) create mode 100644 kernel/src/mem/phys/manager.rs create mode 100644 kernel/src/mem/phys/mod.rs create mode 100644 kernel/src/mem/phys/reserved.rs diff --git a/etc/aarch64-qemu.ld b/etc/aarch64-qemu.ld index 0203fcf..a689809 100644 --- a/etc/aarch64-qemu.ld +++ b/etc/aarch64-qemu.ld @@ -41,4 +41,5 @@ SECTIONS { PROVIDE(__bss_end_phys = . - KERNEL_OFFSET); PROVIDE(__kernel_end = .); + PROVIDE(__kernel_end_phys = . - KERNEL_OFFSET); } diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index 2085185..dd73764 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -10,8 +10,8 @@ name = "kernel" test = false [dependencies] -cfg-if = "1.x.x" error = { path = "../error" } +cfg-if = "1.x.x" tock-registers = "0.7.x" fdt-rs = { version = "0.x.x", default-features = false } diff --git a/kernel/src/arch/aarch64/mach_qemu/mod.rs b/kernel/src/arch/aarch64/mach_qemu/mod.rs index 869af9b..2858839 100644 --- a/kernel/src/arch/aarch64/mach_qemu/mod.rs +++ b/kernel/src/arch/aarch64/mach_qemu/mod.rs @@ -12,6 +12,7 @@ use crate::dev::{ serial::{pl011::Pl011, SerialDevice}, Device, }; +use crate::mem::phys; use error::Errno; pub use gic::IrqNumber; @@ -23,11 +24,17 @@ const GICC_BASE: usize = 0x08010000; // TODO extract this from device tree const ECAM_BASE: usize = 0x4010000000; +const PHYS_BASE: usize = 0x40000000; +const PHYS_SIZE: usize = 0x10000000; + #[allow(missing_docs)] pub fn init_board() -> Result<(), Errno> { unsafe { + // Enable UART early on UART0.enable()?; + phys::init_from_region(PHYS_BASE, PHYS_SIZE); + GIC.enable()?; UART0.init_irqs()?; diff --git a/kernel/src/mem/mod.rs b/kernel/src/mem/mod.rs index d4f3cf0..21b6598 100644 --- a/kernel/src/mem/mod.rs +++ b/kernel/src/mem/mod.rs @@ -1,9 +1,27 @@ //! Memory management and functions module +#![allow(missing_docs)] +pub mod phys; pub mod virt; /// Virtual offset applied to kernel address space pub const KERNEL_OFFSET: usize = 0xFFFFFF8000000000; +/// +pub fn virtualize(addr: usize) -> usize { + // TODO remove this function + addr + KERNEL_OFFSET +} + +/// +pub fn kernel_end_phys() -> usize { + extern "C" { + static __kernel_end_phys: u8; + } + unsafe { &__kernel_end_phys as *const _ as usize } +} + +/// +pub const PAGE_SIZE: usize = 4096; /// See memcpy(3p). /// diff --git a/kernel/src/mem/phys/manager.rs b/kernel/src/mem/phys/manager.rs new file mode 100644 index 0000000..3d34af5 --- /dev/null +++ b/kernel/src/mem/phys/manager.rs @@ -0,0 +1,76 @@ +use super::{PageInfo, PageUsage}; +use crate::mem::{virtualize, PAGE_SIZE}; +use crate::sync::IrqSafeNullLock; +use core::mem; +use error::Errno; + +pub unsafe trait Manager { + fn alloc_page(&mut self, pu: PageUsage) -> Result; + fn alloc_contiguous_pages(&mut self, pu: PageUsage, count: usize) -> Result; + fn free_page(&mut self, page: usize) -> Result<(), Errno>; + // TODO status() +} +pub struct SimpleManager { + pages: &'static mut [PageInfo], + base_index: usize, +} +impl SimpleManager { + pub(super) unsafe fn initialize(base: usize, at: usize, count: usize) -> Self { + let pages: &'static mut [PageInfo] = + core::slice::from_raw_parts_mut(virtualize(at) as *mut _, count); + // Initialize uninit pages + for index in 0..count { + mem::forget(mem::replace( + &mut pages[index], + PageInfo { + refcount: 0, + usage: PageUsage::Reserved, + }, + )); + } + Self { + base_index: base / PAGE_SIZE, + pages, + } + } + pub(super) unsafe fn add_page(&mut self, addr: usize) { + let page = &mut self.pages[addr / PAGE_SIZE - self.base_index]; + assert!(page.refcount == 0 && page.usage == PageUsage::Reserved); + page.usage = PageUsage::Available; + } +} +unsafe impl Manager for SimpleManager { + fn alloc_page(&mut self, pu: PageUsage) -> Result { + for index in 0..self.pages.len() { + let page = &mut self.pages[index]; + if page.usage == PageUsage::Available { + page.usage = pu; + page.refcount = 1; + return Ok((self.base_index + index) * PAGE_SIZE); + } + } + Err(Errno::OutOfMemory) + } + fn alloc_contiguous_pages(&mut self, pu: PageUsage, count: usize) -> Result { + 'l0: for i in 0..self.pages.len() { + for j in 0..count { + if self.pages[i + j].usage != PageUsage::Available { + continue 'l0; + } + } + for j in 0..count { + let page = &mut self.pages[i + j]; + assert!(page.usage == PageUsage::Available); + page.usage = pu; + page.refcount = 1; + } + return Ok((self.base_index + i) * PAGE_SIZE); + } + Err(Errno::OutOfMemory) + } + fn free_page(&mut self, _page: usize) -> Result<(), Errno> { + todo!() + } +} + +pub(super) static MANAGER: IrqSafeNullLock> = IrqSafeNullLock::new(None); diff --git a/kernel/src/mem/phys/mod.rs b/kernel/src/mem/phys/mod.rs new file mode 100644 index 0000000..633550b --- /dev/null +++ b/kernel/src/mem/phys/mod.rs @@ -0,0 +1,119 @@ +use crate::mem::PAGE_SIZE; +use core::mem::size_of; + +mod reserved; +mod manager; + +use manager::{Manager, SimpleManager, MANAGER}; +pub use reserved::ReservedRegion; + +type ManagerImpl = SimpleManager; + +const MAX_PAGES: usize = 1024 * 1024; + +#[derive(PartialEq, Debug, Clone, Copy)] +pub enum PageUsage { + Reserved, + Available, + Kernel +} + +pub struct PageInfo { + refcount: usize, + usage: PageUsage +} + +#[derive(Clone)] +pub struct MemoryRegion { + pub start: usize, + pub end: usize, +} + +#[repr(transparent)] +#[derive(Clone)] +pub struct SimpleMemoryIterator { + inner: Option, +} +impl SimpleMemoryIterator { + pub const fn new(reg: MemoryRegion) -> Self { + Self { inner: Some(reg) } + } +} +impl Iterator for SimpleMemoryIterator { + type Item = MemoryRegion; + fn next(&mut self) -> Option { + self.inner.take() + } +} + +fn find_contiguous>( + iter: T, + count: usize, +) -> Option { + for region in iter { + let mut collected = 0; + let mut base_addr = None; + for addr in (region.start..region.end).step_by(PAGE_SIZE) { + if reserved::is_reserved(addr) { + collected = 0; + base_addr = None; + continue; + } + if base_addr.is_none() { + base_addr = Some(addr); + } + collected += 1; + if collected == count { + return base_addr; + } + } + } + None +} + +pub unsafe fn init_from_iter + Clone>(iter: T) { + let mut mem_base = usize::MAX; + for reg in iter.clone() { + if reg.start < mem_base { + mem_base = reg.start; + } + } + debugln!("Memory base is {:?}", mem_base); + // Step 1. Count available memory + let mut total_pages = 0usize; + for reg in iter.clone() { + total_pages += (reg.end - reg.start) / PAGE_SIZE; + } + // TODO maybe instead of size_of::<...> use Layout? + let need_pages = ((total_pages * size_of::()) + 0xFFF) / 0x1000; + reserved::reserve_kernel(); + // Step 2. Allocate memory for page array + let pages_base = + find_contiguous(iter.clone(), need_pages).expect("Failed to allocate memory for page info"); + reserved::reserve_pages(pages_base, need_pages); + // Step 3. Initialize the memory manager with available pages + let mut manager = ManagerImpl::initialize(mem_base, pages_base, total_pages); + let mut usable_pages = 0usize; + 'l0: for region in iter { + for addr in (region.start..region.end).step_by(PAGE_SIZE) { + if !reserved::is_reserved(addr) { + manager.add_page(addr); + usable_pages += 1; + if usable_pages == MAX_PAGES { + break 'l0; + } + } + } + } + debug!("{}K of usable physical memory\n", usable_pages * 4); + *MANAGER.lock() = Some(manager); +} + +pub unsafe fn init_from_region(base: usize, size: usize) { + let iter = SimpleMemoryIterator::new(MemoryRegion { + start: base, + end: base + size + }); + + init_from_iter(iter); +} diff --git a/kernel/src/mem/phys/reserved.rs b/kernel/src/mem/phys/reserved.rs new file mode 100644 index 0000000..d95d40e --- /dev/null +++ b/kernel/src/mem/phys/reserved.rs @@ -0,0 +1,66 @@ +use crate::mem::{kernel_end_phys, PAGE_SIZE}; +use core::mem::MaybeUninit; +use core::ptr::null_mut; + +pub struct ReservedRegion { + pub start: usize, + pub end: usize, + next: *mut ReservedRegion, +} +pub struct ReservedRegionIterator { + ptr: *mut ReservedRegion, +} +impl Iterator for ReservedRegionIterator { + type Item = &'static mut ReservedRegion; + fn next(&mut self) -> Option { + if let Some(item) = unsafe { self.ptr.as_mut() } { + self.ptr = item.next; + Some(item) + } else { + None + } + } +} +impl ReservedRegion { + pub const fn new(start: usize, end: usize) -> ReservedRegion { + //assert!(start.is_paligned() && end.is_paligned()); + ReservedRegion { + start, + end, + next: null_mut(), + } + } +} +static mut RESERVED_REGIONS_HEAD: *mut ReservedRegion = null_mut(); +static mut RESERVED_REGION_KERNEL: MaybeUninit = MaybeUninit::uninit(); +static mut RESERVED_REGION_PAGES: MaybeUninit = MaybeUninit::uninit(); +pub unsafe fn reserve(usage: &str, region: *mut ReservedRegion) { + debugln!("Reserving {:?} region: {:?}..{:?}", usage, (*region).start, (*region).end); + (*region).next = RESERVED_REGIONS_HEAD; + RESERVED_REGIONS_HEAD = region; +} +pub(super) unsafe fn reserve_kernel() { + RESERVED_REGION_KERNEL.write(ReservedRegion::new( + 0, + kernel_end_phys(), + )); + reserve("kernel", RESERVED_REGION_KERNEL.as_mut_ptr()); +} +pub(super) unsafe fn reserve_pages(base: usize, count: usize) { + RESERVED_REGION_PAGES.write(ReservedRegion::new(base, base + count * PAGE_SIZE)); + reserve("pages", RESERVED_REGION_PAGES.as_mut_ptr()); +} +pub fn is_reserved(page: usize) -> bool { + unsafe { + let mut iter = RESERVED_REGIONS_HEAD; + while !iter.is_null() { + let region = &*iter; + if page >= region.start && page < region.end { + return true; + } + iter = region.next; + } + } + false +} +