//! Physical memory management utilities use core::ops::Range; use abi::error::Error; use kernel_util::{ mem::address::{FromRaw, IntoRaw, PhysicalAddress}, sync::IrqSafeSpinlock, util::OneTimeInit, }; use crate::{ arch::{Architecture, ARCHITECTURE}, mem::phys::reserved::is_reserved, }; use self::{ manager::{PhysicalMemoryManager, BITMAP_WORD_SIZE, TRACKED_PAGE_LIMIT}, reserved::reserve_region, }; // 8 * 4096 bits per page, 1 page per bit const MEMORY_UPPER_LIMIT: PhysicalAddress = PhysicalAddress::from_raw(TRACKED_PAGE_LIMIT * 4096); mod manager; pub mod reserved; /// Defines an usable memory region #[derive(Clone, Copy, Debug)] pub struct PhysicalMemoryRegion { /// Start of the region pub base: PhysicalAddress, /// Length of the region pub size: usize, } impl PhysicalMemoryRegion { /// Returns the end address of the region pub const fn end(&self) -> PhysicalAddress { self.base.add(self.size) } /// Returns an address range covered by the region pub fn range(&self) -> Range { self.base..self.end() } /// Constrains the [PhysicalMemoryRegion] to global memory limits set in the kernel pub fn clamp(self) -> Option<(PhysicalAddress, PhysicalAddress)> { let start = self.base.min(MEMORY_UPPER_LIMIT); let end = self.end().min(MEMORY_UPPER_LIMIT); if start < end { Some((start, end)) } else { None } } } /// Global physical memory manager pub static PHYSICAL_MEMORY: OneTimeInit> = OneTimeInit::new(); /// Allocates a single physical page from the global manager pub fn alloc_page() -> Result { PHYSICAL_MEMORY.get().lock().alloc_page() } /// Allocates a contiguous range of physical pages from the global manager pub fn alloc_pages_contiguous(count: usize) -> Result { PHYSICAL_MEMORY.get().lock().alloc_contiguous_pages(count) } /// Allocates a single 2MiB page of physical memory from the global manager pub fn alloc_2m_page() -> Result { PHYSICAL_MEMORY.get().lock().alloc_2m_page() } /// Deallocates a physical memory page. /// /// # Safety /// /// `addr` must be a page-aligned physical address previously allocated by this implementation. pub unsafe fn free_page(addr: PhysicalAddress) { PHYSICAL_MEMORY.get().lock().free_page(addr) } fn physical_memory_range>( it: I, ) -> Option<(PhysicalAddress, PhysicalAddress)> { let mut start = PhysicalAddress::MAX; let mut end = PhysicalAddress::MIN; for (reg_start, reg_end) in it.into_iter().filter_map(PhysicalMemoryRegion::clamp) { if reg_start < start { start = reg_start; } if reg_end > end { end = reg_end; } } if start == PhysicalAddress::MAX || end == PhysicalAddress::MIN { None } else { Some((start, end)) } } /// Locates a contiguous region of available physical memory within the memory region list pub fn find_contiguous_region>( it: I, count: usize, ) -> Option { for (reg_start, reg_end) in it.into_iter().filter_map(PhysicalMemoryRegion::clamp) { let mut collected = 0; let mut base_addr = None; for addr in (reg_start..reg_end).step_by(0x1000) { if is_reserved(addr) { collected = 0; base_addr = None; continue; } if base_addr.is_none() { base_addr = Some(addr); } collected += 1; if collected == count { return base_addr; } } } todo!() } // /// Initializes physical memory manager from given available memory region iterator. /// /// 1. Finds a non-reserved range to place the page tracking array. /// 2. Adds all non-reserved pages to the manager. /// /// # Safety /// /// The caller must ensure this function has not been called before and that the regions /// are valid and actually available. pub unsafe fn init_from_iter + Clone>( it: I, ) -> Result<(), Error> { // Map the physical memory let (phys_start, phys_end) = physical_memory_range(it.clone()).unwrap(); reserve_region("kernel", kernel_physical_memory_region()); ARCHITECTURE.map_physical_memory(it.clone(), phys_start, phys_end)?; let total_count = (phys_end - phys_start) / 0x1000; let page_bitmap_size = (total_count + BITMAP_WORD_SIZE - 1) / BITMAP_WORD_SIZE; let page_bitmap_page_count = (page_bitmap_size + 0xFFF) / 0x1000; let page_bitmap_phys_base = find_contiguous_region(it.clone(), page_bitmap_page_count).unwrap(); reserve_region( "page-bitmap", PhysicalMemoryRegion { base: page_bitmap_phys_base, size: page_bitmap_page_count * 0x1000, }, ); if IntoRaw::::into_raw(phys_start) & 0x1FFFFFF != 0 { todo!(); } let mut manager = PhysicalMemoryManager::new(page_bitmap_phys_base, phys_start.into_raw(), total_count); let mut collected = 0; const MAX_MEMORY: usize = 16 * 1024; for (start, end) in it.into_iter().filter_map(PhysicalMemoryRegion::clamp) { for page in (start..end).step_by(0x1000) { if collected >= MAX_MEMORY { break; } if is_reserved(page) { continue; } manager.add_available_page(page); collected += 1; } } PHYSICAL_MEMORY.init(IrqSafeSpinlock::new(manager)); Ok(()) } fn kernel_physical_memory_region() -> PhysicalMemoryRegion { extern "C" { static __kernel_phys_start: u8; static __kernel_size: u8; } let base = PhysicalAddress::from_raw(absolute_address!(__kernel_phys_start)); let size = absolute_address!(__kernel_size); PhysicalMemoryRegion { base, size } }