use core::ops::Range; use kernel_arch::mem::PhysicalMemoryAllocator; use libk_mm_interface::address::{FromRaw, IntoRaw, PhysicalAddress}; use libk_util::{sync::IrqSafeSpinlock, OneTimeInit}; use yggdrasil_abi::{error::Error, system::SystemMemoryStats}; use crate::{ phys::{ manager::BITMAP_WORD_SIZE, reserved::{is_reserved, reserve_region}, }, L2_PAGE_SIZE, L3_PAGE_SIZE, }; use self::manager::{PhysicalMemoryManager, TRACKED_PAGE_LIMIT}; mod manager; pub mod reserved; pub struct GlobalPhysicalAllocator; /// Defines an usable memory region #[derive(Clone, Copy, Debug)] pub struct PhysicalMemoryRegion { /// Start of the region pub base: PhysicalAddress, /// Length of the region pub size: usize, } // 8 * 4096 bits per page, 1 page per bit const MEMORY_UPPER_LIMIT: PhysicalAddress = PhysicalAddress::from_raw(TRACKED_PAGE_LIMIT * 4096); /// Global physical memory manager pub static PHYSICAL_MEMORY: OneTimeInit> = OneTimeInit::new(); impl PhysicalMemoryRegion { /// Returns the end address of the region pub const fn end(&self) -> PhysicalAddress { self.base.add(self.size) } /// Returns an address range covered by the region pub fn range(&self) -> Range { self.base..self.end() } /// Constrains the [PhysicalMemoryRegion] to global memory limits set in the kernel pub fn clamp(self, limit: PhysicalAddress) -> Option<(PhysicalAddress, PhysicalAddress)> { let start = self.base.min(limit); let end = self.end().min(limit); if start < end { Some((start, end)) } else { None } } } impl PhysicalMemoryAllocator for GlobalPhysicalAllocator { type Address = PhysicalAddress; fn allocate_page() -> Result { alloc_page() } fn allocate_contiguous_pages(count: usize) -> Result { alloc_pages_contiguous(count) } unsafe fn free_page(page: Self::Address) { free_page(page) } } /// Allocates a single physical page from the global manager pub fn alloc_page() -> Result { PHYSICAL_MEMORY.get().lock().alloc_page() } /// Allocates a contiguous range of physical pages from the global manager pub fn alloc_pages_contiguous(count: usize) -> Result { PHYSICAL_MEMORY.get().lock().alloc_contiguous_pages(count) } /// Allocates a single 2MiB page of physical memory from the global manager pub fn alloc_2m_page() -> Result { PHYSICAL_MEMORY.get().lock().alloc_2m_page() } /// Returns physical memory stats pub fn stats() -> SystemMemoryStats { PhysicalMemoryManager::stats() } /// Deallocates a physical memory page. /// /// # Safety /// /// `addr` must be a page-aligned physical address previously allocated by this implementation. pub unsafe fn free_page(addr: PhysicalAddress) { PHYSICAL_MEMORY.get().lock().free_page(addr) } fn physical_memory_range>( it: I, ) -> Option<(PhysicalAddress, PhysicalAddress)> { let mut start = PhysicalAddress::MAX; let mut end = PhysicalAddress::MIN; for (reg_start, reg_end) in it.into_iter().filter_map(|r| r.clamp(MEMORY_UPPER_LIMIT)) { if reg_start < start { start = reg_start; } if reg_end > end { end = reg_end; } } if start == PhysicalAddress::MAX || end == PhysicalAddress::MIN { None } else { Some((start, end)) } } /// Locates a contiguous region of available physical memory within the memory region list pub fn find_contiguous_region>( it: I, count: usize, ) -> Option { for (reg_start, reg_end) in it.into_iter().filter_map(|r| r.clamp(MEMORY_UPPER_LIMIT)) { let mut collected = 0; let mut base_addr = None; for addr in (reg_start..reg_end).step_by(L3_PAGE_SIZE) { if is_reserved(addr) { collected = 0; base_addr = None; continue; } if base_addr.is_none() { base_addr = Some(addr); } collected += 1; if collected == count { return base_addr; } } } todo!() } // /// Initializes physical memory manager from given available memory region iterator. /// /// 1. Finds a non-reserved range to place the page tracking array. /// 2. Adds all non-reserved pages to the manager. /// /// # Safety /// /// The caller must ensure this function has not been called before and that the regions /// are valid and actually available. pub unsafe fn init_from_iter< I: Iterator + Clone, Map: FnOnce(I, PhysicalAddress, PhysicalAddress) -> Result<(), Error>, >( it: I, map_physical_memory: Map, ) -> Result<(), Error> { // Map the physical memory let (phys_start, phys_end) = physical_memory_range(it.clone()).unwrap(); reserve_region("kernel", kernel_physical_memory_region()); map_physical_memory(it.clone(), phys_start, phys_end)?; let total_count = (phys_end - phys_start) / L3_PAGE_SIZE; let page_bitmap_size = (total_count + BITMAP_WORD_SIZE - 1) / (BITMAP_WORD_SIZE / 8); let page_bitmap_page_count = (page_bitmap_size + L3_PAGE_SIZE - 1) / L3_PAGE_SIZE; let page_bitmap_phys_base = find_contiguous_region(it.clone(), page_bitmap_page_count).unwrap(); reserve_region( "page-bitmap", PhysicalMemoryRegion { base: page_bitmap_phys_base, size: page_bitmap_page_count * L3_PAGE_SIZE, }, ); if IntoRaw::::into_raw(phys_start) & (L2_PAGE_SIZE - 1) != 0 { todo!(); } let mut manager = PhysicalMemoryManager::new(page_bitmap_phys_base, phys_start.into_raw(), total_count); let mut collected = 0; const MAX_MEMORY: usize = 64 * 1024; for (start, end) in it.into_iter().filter_map(|r| r.clamp(MEMORY_UPPER_LIMIT)) { for page in (start..end).step_by(L3_PAGE_SIZE) { if collected >= MAX_MEMORY { break; } if is_reserved(page) { continue; } manager.add_available_page(page); collected += 1; } } PHYSICAL_MEMORY.init(IrqSafeSpinlock::new(manager)); Ok(()) } fn kernel_physical_memory_region() -> PhysicalMemoryRegion { use core::ptr::addr_of; extern "C" { static __kernel_start: u8; static __kernel_end: u8; } let start = unsafe { addr_of!(__kernel_start) }; let end = unsafe { addr_of!(__kernel_end) }; let base = PhysicalAddress::from_raw(start.addr() - kernel_arch::KERNEL_VIRT_OFFSET); let size = end.addr() - start.addr(); PhysicalMemoryRegion { base, size } } #[no_mangle] fn __allocate_page() -> Result { alloc_page() } #[no_mangle] fn __allocate_contiguous_pages(count: usize) -> Result { alloc_pages_contiguous(count) } #[no_mangle] unsafe fn __free_page(page: PhysicalAddress) { free_page(page) }