231 lines
6.6 KiB
Rust
Raw Normal View History

use core::ops::Range;
2023-12-07 12:50:54 +02:00
use kernel_arch::absolute_address;
use libk_mm_interface::address::{FromRaw, IntoRaw, PhysicalAddress};
2024-02-05 12:35:09 +02:00
use libk_util::{sync::IrqSafeSpinlock, OneTimeInit};
use yggdrasil_abi::{error::Error, system::SystemMemoryStats};
2023-07-18 18:03:45 +03:00
use crate::{
phys::{
manager::BITMAP_WORD_SIZE,
reserved::{is_reserved, reserve_region},
},
L2_PAGE_SIZE, L3_PAGE_SIZE,
2023-07-18 18:03:45 +03:00
};
use self::manager::{PhysicalMemoryManager, TRACKED_PAGE_LIMIT};
2023-07-30 16:40:30 +03:00
mod manager;
2023-07-18 18:03:45 +03:00
pub mod reserved;
2023-10-01 16:58:46 +03:00
/// Defines an usable memory region
#[derive(Clone, Copy, Debug)]
pub struct PhysicalMemoryRegion {
/// Start of the region
pub base: PhysicalAddress,
/// Length of the region
pub size: usize,
}
// 8 * 4096 bits per page, 1 page per bit
const MEMORY_UPPER_LIMIT: PhysicalAddress = PhysicalAddress::from_raw(TRACKED_PAGE_LIMIT * 4096);
2023-07-18 18:03:45 +03:00
/// Global physical memory manager
pub static PHYSICAL_MEMORY: OneTimeInit<IrqSafeSpinlock<PhysicalMemoryManager>> =
OneTimeInit::new();
2023-07-18 18:03:45 +03:00
impl PhysicalMemoryRegion {
/// Returns the end address of the region
pub const fn end(&self) -> PhysicalAddress {
self.base.add(self.size)
}
/// Returns an address range covered by the region
pub fn range(&self) -> Range<PhysicalAddress> {
self.base..self.end()
}
/// Constrains the [PhysicalMemoryRegion] to global memory limits set in the kernel
pub fn clamp(self, limit: PhysicalAddress) -> Option<(PhysicalAddress, PhysicalAddress)> {
let start = self.base.min(limit);
let end = self.end().min(limit);
if start < end {
Some((start, end))
} else {
None
}
}
}
2023-07-18 18:03:45 +03:00
/// Allocates a single physical page from the global manager
pub fn alloc_page() -> Result<PhysicalAddress, Error> {
PHYSICAL_MEMORY.get().lock().alloc_page()
2023-07-18 18:03:45 +03:00
}
/// Allocates a contiguous range of physical pages from the global manager
pub fn alloc_pages_contiguous(count: usize) -> Result<PhysicalAddress, Error> {
PHYSICAL_MEMORY.get().lock().alloc_contiguous_pages(count)
}
2023-12-07 12:50:54 +02:00
/// Allocates a single 2MiB page of physical memory from the global manager
pub fn alloc_2m_page() -> Result<PhysicalAddress, Error> {
PHYSICAL_MEMORY.get().lock().alloc_2m_page()
2023-07-18 18:03:45 +03:00
}
2024-01-31 19:56:49 +02:00
/// Returns physical memory stats
pub fn stats() -> SystemMemoryStats {
PhysicalMemoryManager::stats()
}
2023-07-22 00:45:14 +03:00
/// Deallocates a physical memory page.
///
/// # Safety
///
/// `addr` must be a page-aligned physical address previously allocated by this implementation.
pub unsafe fn free_page(addr: PhysicalAddress) {
PHYSICAL_MEMORY.get().lock().free_page(addr)
}
2023-07-18 18:03:45 +03:00
fn physical_memory_range<I: Iterator<Item = PhysicalMemoryRegion>>(
it: I,
) -> Option<(PhysicalAddress, PhysicalAddress)> {
let mut start = PhysicalAddress::MAX;
let mut end = PhysicalAddress::MIN;
2023-07-18 18:03:45 +03:00
for (reg_start, reg_end) in it.into_iter().filter_map(|r| r.clamp(MEMORY_UPPER_LIMIT)) {
if reg_start < start {
start = reg_start;
2023-07-18 18:03:45 +03:00
}
if reg_end > end {
end = reg_end;
2023-07-18 18:03:45 +03:00
}
}
if start == PhysicalAddress::MAX || end == PhysicalAddress::MIN {
2023-07-18 18:03:45 +03:00
None
} else {
Some((start, end))
}
}
2023-12-07 12:50:54 +02:00
/// Locates a contiguous region of available physical memory within the memory region list
2023-10-03 10:17:13 +03:00
pub fn find_contiguous_region<I: Iterator<Item = PhysicalMemoryRegion>>(
2023-07-18 18:03:45 +03:00
it: I,
count: usize,
) -> Option<PhysicalAddress> {
for (reg_start, reg_end) in it.into_iter().filter_map(|r| r.clamp(MEMORY_UPPER_LIMIT)) {
2023-07-18 18:03:45 +03:00
let mut collected = 0;
let mut base_addr = None;
for addr in (reg_start..reg_end).step_by(L3_PAGE_SIZE) {
2023-07-18 18:03:45 +03:00
if is_reserved(addr) {
collected = 0;
base_addr = None;
continue;
}
if base_addr.is_none() {
base_addr = Some(addr);
}
collected += 1;
if collected == count {
return base_addr;
}
}
}
todo!()
}
//
2023-07-18 18:03:45 +03:00
/// Initializes physical memory manager from given available memory region iterator.
///
/// 1. Finds a non-reserved range to place the page tracking array.
/// 2. Adds all non-reserved pages to the manager.
///
/// # Safety
///
/// The caller must ensure this function has not been called before and that the regions
/// are valid and actually available.
pub unsafe fn init_from_iter<
I: Iterator<Item = PhysicalMemoryRegion> + Clone,
Map: FnOnce(I, PhysicalAddress, PhysicalAddress) -> Result<(), Error>,
>(
2023-07-18 18:03:45 +03:00
it: I,
map_physical_memory: Map,
2023-07-18 18:03:45 +03:00
) -> Result<(), Error> {
// Map the physical memory
2023-07-18 18:03:45 +03:00
let (phys_start, phys_end) = physical_memory_range(it.clone()).unwrap();
2023-10-03 10:17:13 +03:00
reserve_region("kernel", kernel_physical_memory_region());
map_physical_memory(it.clone(), phys_start, phys_end)?;
let total_count = (phys_end - phys_start) / L3_PAGE_SIZE;
let page_bitmap_size = (total_count + BITMAP_WORD_SIZE - 1) / (BITMAP_WORD_SIZE / 8);
let page_bitmap_page_count = (page_bitmap_size + L3_PAGE_SIZE - 1) / L3_PAGE_SIZE;
2023-07-18 18:03:45 +03:00
let page_bitmap_phys_base = find_contiguous_region(it.clone(), page_bitmap_page_count).unwrap();
2023-07-18 18:03:45 +03:00
reserve_region(
"page-bitmap",
2023-07-18 18:03:45 +03:00
PhysicalMemoryRegion {
base: page_bitmap_phys_base,
size: page_bitmap_page_count * L3_PAGE_SIZE,
2023-07-18 18:03:45 +03:00
},
);
if IntoRaw::<usize>::into_raw(phys_start) & (L2_PAGE_SIZE - 1) != 0 {
2023-11-16 11:11:10 +02:00
todo!();
2023-11-16 00:16:38 +02:00
}
let mut manager =
PhysicalMemoryManager::new(page_bitmap_phys_base, phys_start.into_raw(), total_count);
let mut collected = 0;
2024-02-01 17:23:51 +02:00
const MAX_MEMORY: usize = 64 * 1024;
2023-07-30 16:40:30 +03:00
for (start, end) in it.into_iter().filter_map(|r| r.clamp(MEMORY_UPPER_LIMIT)) {
for page in (start..end).step_by(L3_PAGE_SIZE) {
if collected >= MAX_MEMORY {
break;
}
2023-07-18 18:03:45 +03:00
if is_reserved(page) {
continue;
}
manager.add_available_page(page);
collected += 1;
2023-07-18 18:03:45 +03:00
}
}
PHYSICAL_MEMORY.init(IrqSafeSpinlock::new(manager));
2023-07-18 18:03:45 +03:00
Ok(())
}
2023-10-01 16:58:46 +03:00
2023-07-18 18:03:45 +03:00
fn kernel_physical_memory_region() -> PhysicalMemoryRegion {
extern "C" {
static __kernel_phys_start: u8;
2023-07-18 18:03:45 +03:00
static __kernel_size: u8;
}
let base = PhysicalAddress::from_raw(absolute_address!(__kernel_phys_start));
2023-07-18 18:03:45 +03:00
let size = absolute_address!(__kernel_size);
PhysicalMemoryRegion { base, size }
2023-07-18 18:03:45 +03:00
}
2023-12-10 20:54:15 +02:00
2024-01-02 14:01:33 +02:00
#[no_mangle]
fn __allocate_page() -> Result<PhysicalAddress, Error> {
alloc_page()
}
2023-12-10 20:54:15 +02:00
#[no_mangle]
fn __allocate_contiguous_pages(count: usize) -> Result<PhysicalAddress, Error> {
alloc_pages_contiguous(count)
}
#[no_mangle]
unsafe fn __free_page(page: PhysicalAddress) {
free_page(page)
}