270 lines
7.3 KiB
Rust
Raw Normal View History

2023-07-18 18:03:45 +03:00
//! Physical memory management facilities
use core::{iter::StepBy, mem::size_of, ops::Range};
use abi::error::Error;
use kernel_util::util::OneTimeInit;
2023-07-18 18:03:45 +03:00
use crate::{
2023-07-27 16:24:52 +03:00
debug::LogLevel,
2023-07-18 18:03:45 +03:00
mem::{
phys::reserved::{is_reserved, reserve_region},
ConvertAddress, /*, KERNEL_PHYS_BASE */
2023-07-18 18:03:45 +03:00
},
sync::IrqSafeSpinlock,
2023-07-18 18:03:45 +03:00
};
use self::manager::PhysicalMemoryManager;
2023-07-30 16:40:30 +03:00
// Enumerating lots of pages is slow and I'm too lazy right now to write a better algorithm, so
// capping the page count helps
const PHYS_MEMORY_PAGE_CAP: usize = 65536;
2023-07-18 18:03:45 +03:00
pub mod manager;
pub mod reserved;
2023-07-27 16:24:52 +03:00
/// Contains information about the physical memory usage
#[derive(Clone, Copy, Debug)]
pub struct PhysicalMemoryStats {
/// Number of pages available for allocation
pub available_pages: usize,
/// Number of pages being used
pub used_pages: usize,
}
2023-07-18 18:03:45 +03:00
/// Represents the way in which the page is used (or not)
#[derive(PartialEq, Clone, Copy, Debug)]
#[repr(u32)]
pub enum PageUsage {
/// Page is not available for allocation or use
Reserved = 0,
/// Regular page available for allocation
Available,
/// Page is used by some kernel facility
Used,
}
/// Page descriptor structure for the page management array
#[repr(C)]
pub struct Page {
usage: PageUsage,
refcount: u32,
}
/// Defines an usable memory region
#[derive(Clone, Copy, Debug)]
pub struct PhysicalMemoryRegion {
/// Start of the region
pub base: usize,
/// Length of the region
pub size: usize,
}
impl PhysicalMemoryRegion {
/// Returns the end address of the region
pub const fn end(&self) -> usize {
self.base + self.size
}
/// Returns an address range covered by the region
2023-07-20 11:59:53 +03:00
pub fn range(&self) -> Range<usize> {
2023-07-18 18:03:45 +03:00
self.base..self.end()
}
/// Provides an iterator over the pages in the region
2023-07-20 11:59:53 +03:00
pub fn pages(&self) -> StepBy<Range<usize>> {
2023-07-18 18:03:45 +03:00
self.range().step_by(0x1000)
}
}
2023-07-27 16:24:52 +03:00
impl PhysicalMemoryStats {
/// Handles "alloc" cases of the memory manager
pub fn add_allocated_pages(&mut self, count: usize, _usage: PageUsage) {
assert!(self.available_pages >= count);
self.available_pages -= count;
self.used_pages += count;
}
/// Handles "free" cases of the memory manager
pub fn add_freed_pages(&mut self, count: usize, _usage: PageUsage) {
assert!(self.used_pages >= count);
self.used_pages -= count;
self.available_pages += count;
}
/// Increases the available pages counter
pub fn add_available_pages(&mut self, count: usize) {
self.available_pages += count;
}
/// Prints out the statistics into specified log level
pub fn dump(&self, level: LogLevel) {
log_print_raw!(level, "+++ Physical memory stats +++\n");
log_print_raw!(
level,
"Available: {}K ({} pages)\n",
self.available_pages * 4,
self.available_pages
);
log_print_raw!(
level,
"Used: {}K ({} pages)\n",
self.used_pages * 4,
self.used_pages
);
log_print_raw!(level, "-----------------------------\n");
}
}
2023-07-18 18:03:45 +03:00
/// Global physical memory manager
pub static PHYSICAL_MEMORY: OneTimeInit<IrqSafeSpinlock<PhysicalMemoryManager>> =
OneTimeInit::new();
2023-07-18 18:03:45 +03:00
/// Allocates a single physical page from the global manager
pub fn alloc_page(usage: PageUsage) -> Result<usize, Error> {
PHYSICAL_MEMORY.get().lock().alloc_page(usage)
}
/// Allocates a contiguous range of physical pages from the global manager
pub fn alloc_pages_contiguous(count: usize, usage: PageUsage) -> Result<usize, Error> {
PHYSICAL_MEMORY
.get()
.lock()
.alloc_contiguous_pages(count, usage)
}
2023-07-22 00:45:14 +03:00
/// Deallocates a physical memory page.
///
/// # Safety
///
/// `addr` must be a page-aligned physical address previously allocated by this implementation.
pub unsafe fn free_page(addr: usize) {
PHYSICAL_MEMORY.get().lock().free_page(addr)
}
2023-07-18 18:03:45 +03:00
fn physical_memory_range<I: Iterator<Item = PhysicalMemoryRegion>>(
it: I,
) -> Option<(usize, usize)> {
let mut start = usize::MAX;
let mut end = usize::MIN;
for reg in it {
if reg.base < start {
start = reg.base;
}
if reg.base + reg.size > end {
end = reg.base + reg.size;
}
}
if start == usize::MAX || end == usize::MIN {
None
} else {
Some((start, end))
}
}
fn find_contiguous_region<I: Iterator<Item = PhysicalMemoryRegion>>(
it: I,
count: usize,
) -> Option<usize> {
for region in it {
let mut collected = 0;
let mut base_addr = None;
for addr in region.pages() {
if is_reserved(addr) {
collected = 0;
base_addr = None;
continue;
}
if base_addr.is_none() {
base_addr = Some(addr);
}
collected += 1;
if collected == count {
return base_addr;
}
}
}
todo!()
}
/// Initializes physical memory manager from given available memory region iterator.
///
/// 1. Finds a non-reserved range to place the page tracking array.
/// 2. Adds all non-reserved pages to the manager.
///
/// # Safety
///
/// The caller must ensure this function has not been called before and that the regions
/// are valid and actually available.
pub unsafe fn init_from_iter<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
it: I,
) -> Result<(), Error> {
let (phys_start, phys_end) = physical_memory_range(it.clone()).unwrap();
let total_count = (phys_end - phys_start) / 0x1000;
let pages_array_size = total_count * size_of::<Page>();
debugln!("Initializing physical memory manager");
debugln!("Total tracked pages: {}", total_count);
// Reserve memory regions from which allocation is forbidden
reserve_region("kernel", kernel_physical_memory_region());
let pages_array_base = find_contiguous_region(it.clone(), (pages_array_size + 0xFFF) / 0x1000)
.ok_or(Error::OutOfMemory)?;
debugln!(
"Placing page tracking at {:#x}",
pages_array_base.virtualize()
);
reserve_region(
"pages",
PhysicalMemoryRegion {
base: pages_array_base,
size: (pages_array_size + 0xFFF) & !0xFFF,
},
);
let mut manager =
PhysicalMemoryManager::new(phys_start, pages_array_base.virtualize(), pages_array_size);
let mut page_count = 0;
for region in it {
2023-07-30 16:40:30 +03:00
if page_count >= PHYS_MEMORY_PAGE_CAP {
break;
}
2023-07-18 18:03:45 +03:00
for page in region.pages() {
if is_reserved(page) {
continue;
}
manager.add_available_page(page);
page_count += 1;
2023-07-30 16:40:30 +03:00
if page_count >= PHYS_MEMORY_PAGE_CAP {
break;
}
2023-07-18 18:03:45 +03:00
}
}
2023-07-30 16:40:30 +03:00
infoln!("{} available pages ({}KiB)", page_count, page_count * 4);
2023-07-18 18:03:45 +03:00
PHYSICAL_MEMORY.init(IrqSafeSpinlock::new(manager));
2023-07-18 18:03:45 +03:00
Ok(())
}
fn kernel_physical_memory_region() -> PhysicalMemoryRegion {
extern "C" {
static __kernel_phys_start: u8;
2023-07-18 18:03:45 +03:00
static __kernel_size: u8;
}
let base = absolute_address!(__kernel_phys_start);
2023-07-18 18:03:45 +03:00
let size = absolute_address!(__kernel_size);
PhysicalMemoryRegion { base, size }
2023-07-18 18:03:45 +03:00
}