2023-09-13 18:21:45 +03:00
|
|
|
use core::{iter::StepBy, ops::Range};
|
2023-07-18 18:03:45 +03:00
|
|
|
|
|
|
|
use abi::error::Error;
|
2023-08-21 17:26:44 +03:00
|
|
|
use kernel_util::util::OneTimeInit;
|
2023-07-18 18:03:45 +03:00
|
|
|
|
|
|
|
use crate::{
|
2023-09-13 18:21:45 +03:00
|
|
|
arch::{Architecture, ARCHITECTURE},
|
|
|
|
mem::phys::reserved::is_reserved,
|
2023-07-22 00:39:08 +03:00
|
|
|
sync::IrqSafeSpinlock,
|
2023-07-18 18:03:45 +03:00
|
|
|
};
|
|
|
|
|
2023-09-13 18:21:45 +03:00
|
|
|
use self::{
|
|
|
|
manager::{PhysicalMemoryManager, BITMAP_PAGE_COUNT, BITMAP_WORD_SIZE, TRACKED_PAGE_LIMIT},
|
|
|
|
reserved::reserve_region,
|
|
|
|
};
|
2023-07-30 16:40:30 +03:00
|
|
|
|
2023-09-13 18:21:45 +03:00
|
|
|
use super::{address::FromRaw, PhysicalAddress};
|
|
|
|
|
|
|
|
// //! Physical memory management facilities
|
|
|
|
// use core::{iter::StepBy, mem::size_of, ops::Range};
|
|
|
|
//
|
|
|
|
// use abi::error::Error;
|
|
|
|
// use kernel_util::util::OneTimeInit;
|
|
|
|
//
|
|
|
|
// use crate::{
|
|
|
|
// debug::LogLevel,
|
|
|
|
// mem::{
|
|
|
|
// phys::reserved::{is_reserved, reserve_region},
|
|
|
|
// ConvertAddress, /*, KERNEL_PHYS_BASE */
|
|
|
|
// },
|
|
|
|
// sync::IrqSafeSpinlock,
|
|
|
|
// };
|
|
|
|
//
|
|
|
|
// use self::manager::PhysicalMemoryManager;
|
|
|
|
//
|
|
|
|
// // Enumerating lots of pages is slow and I'm too lazy right now to write a better algorithm, so
|
|
|
|
// // capping the page count helps
|
|
|
|
// const PHYS_MEMORY_PAGE_CAP: usize = 65536;
|
|
|
|
//
|
|
|
|
|
|
|
|
// 8 * 4096 bits per page, 1 page per bit
|
|
|
|
const MEMORY_UPPER_LIMIT: PhysicalAddress = PhysicalAddress::from_raw(TRACKED_PAGE_LIMIT * 4096);
|
|
|
|
|
|
|
|
mod manager;
|
2023-07-18 18:03:45 +03:00
|
|
|
pub mod reserved;
|
2023-09-13 18:21:45 +03:00
|
|
|
//
|
|
|
|
// /// Contains information about the physical memory usage
|
|
|
|
// #[derive(Clone, Copy, Debug)]
|
|
|
|
// pub struct PhysicalMemoryStats {
|
|
|
|
// /// Number of pages available for allocation
|
|
|
|
// pub available_pages: usize,
|
|
|
|
// /// Number of pages being used
|
|
|
|
// pub used_pages: usize,
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// /// Represents the way in which the page is used (or not)
|
|
|
|
// #[derive(PartialEq, Clone, Copy, Debug)]
|
|
|
|
// #[repr(u32)]
|
|
|
|
// pub enum PageUsage {
|
|
|
|
// /// Page is not available for allocation or use
|
|
|
|
// Reserved = 0,
|
|
|
|
// /// Regular page available for allocation
|
|
|
|
// Available,
|
|
|
|
// /// Page is used by some kernel facility
|
|
|
|
// Used,
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// /// Page descriptor structure for the page management array
|
|
|
|
// #[repr(C)]
|
|
|
|
// pub struct Page {
|
|
|
|
// usage: PageUsage,
|
|
|
|
// refcount: u32,
|
|
|
|
// }
|
|
|
|
//
|
2023-07-18 18:03:45 +03:00
|
|
|
/// Defines an usable memory region
|
|
|
|
#[derive(Clone, Copy, Debug)]
|
|
|
|
pub struct PhysicalMemoryRegion {
|
|
|
|
/// Start of the region
|
2023-09-13 18:21:45 +03:00
|
|
|
pub base: PhysicalAddress,
|
2023-07-18 18:03:45 +03:00
|
|
|
/// Length of the region
|
|
|
|
pub size: usize,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PhysicalMemoryRegion {
|
|
|
|
/// Returns the end address of the region
|
2023-09-13 18:21:45 +03:00
|
|
|
pub const fn end(&self) -> PhysicalAddress {
|
|
|
|
self.base.add(self.size)
|
2023-07-18 18:03:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns an address range covered by the region
|
2023-09-13 18:21:45 +03:00
|
|
|
pub fn range(&self) -> Range<PhysicalAddress> {
|
2023-07-18 18:03:45 +03:00
|
|
|
self.base..self.end()
|
|
|
|
}
|
|
|
|
|
2023-09-13 18:21:45 +03:00
|
|
|
pub fn clamp(self) -> Option<(PhysicalAddress, PhysicalAddress)> {
|
|
|
|
let start = self.base.min(MEMORY_UPPER_LIMIT);
|
|
|
|
let end = self.end().min(MEMORY_UPPER_LIMIT);
|
2023-07-27 16:24:52 +03:00
|
|
|
|
2023-09-13 18:21:45 +03:00
|
|
|
if start < end {
|
|
|
|
Some((start, end))
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
2023-07-27 16:24:52 +03:00
|
|
|
}
|
|
|
|
}
|
2023-09-13 18:21:45 +03:00
|
|
|
//
|
|
|
|
// impl PhysicalMemoryStats {
|
|
|
|
// /// Handles "alloc" cases of the memory manager
|
|
|
|
// pub fn add_allocated_pages(&mut self, count: usize, _usage: PageUsage) {
|
|
|
|
// assert!(self.available_pages >= count);
|
|
|
|
// self.available_pages -= count;
|
|
|
|
// self.used_pages += count;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// /// Handles "free" cases of the memory manager
|
|
|
|
// pub fn add_freed_pages(&mut self, count: usize, _usage: PageUsage) {
|
|
|
|
// assert!(self.used_pages >= count);
|
|
|
|
// self.used_pages -= count;
|
|
|
|
// self.available_pages += count;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// /// Increases the available pages counter
|
|
|
|
// pub fn add_available_pages(&mut self, count: usize) {
|
|
|
|
// self.available_pages += count;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// /// Prints out the statistics into specified log level
|
|
|
|
// pub fn dump(&self, level: LogLevel) {
|
|
|
|
// log_print_raw!(level, "+++ Physical memory stats +++\n");
|
|
|
|
// log_print_raw!(
|
|
|
|
// level,
|
|
|
|
// "Available: {}K ({} pages)\n",
|
|
|
|
// self.available_pages * 4,
|
|
|
|
// self.available_pages
|
|
|
|
// );
|
|
|
|
// log_print_raw!(
|
|
|
|
// level,
|
|
|
|
// "Used: {}K ({} pages)\n",
|
|
|
|
// self.used_pages * 4,
|
|
|
|
// self.used_pages
|
|
|
|
// );
|
|
|
|
// log_print_raw!(level, "-----------------------------\n");
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
2023-07-18 18:03:45 +03:00
|
|
|
/// Global physical memory manager
|
2023-07-22 00:39:08 +03:00
|
|
|
pub static PHYSICAL_MEMORY: OneTimeInit<IrqSafeSpinlock<PhysicalMemoryManager>> =
|
|
|
|
OneTimeInit::new();
|
2023-07-18 18:03:45 +03:00
|
|
|
|
|
|
|
/// Allocates a single physical page from the global manager
|
2023-09-13 18:21:45 +03:00
|
|
|
pub fn alloc_page() -> Result<PhysicalAddress, Error> {
|
|
|
|
PHYSICAL_MEMORY.get().lock().alloc_page()
|
2023-07-18 18:03:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Allocates a contiguous range of physical pages from the global manager
|
2023-09-13 18:21:45 +03:00
|
|
|
pub fn alloc_pages_contiguous(count: usize) -> Result<PhysicalAddress, Error> {
|
|
|
|
PHYSICAL_MEMORY.get().lock().alloc_contiguous_pages(count)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn alloc_2m_page() -> Result<PhysicalAddress, Error> {
|
|
|
|
PHYSICAL_MEMORY.get().lock().alloc_2m_page()
|
2023-07-18 18:03:45 +03:00
|
|
|
}
|
|
|
|
|
2023-07-22 00:45:14 +03:00
|
|
|
/// Deallocates a physical memory page.
|
|
|
|
///
|
|
|
|
/// # Safety
|
|
|
|
///
|
|
|
|
/// `addr` must be a page-aligned physical address previously allocated by this implementation.
|
2023-09-13 18:21:45 +03:00
|
|
|
pub unsafe fn free_page(addr: PhysicalAddress) {
|
2023-07-22 00:39:08 +03:00
|
|
|
PHYSICAL_MEMORY.get().lock().free_page(addr)
|
|
|
|
}
|
|
|
|
|
2023-07-18 18:03:45 +03:00
|
|
|
fn physical_memory_range<I: Iterator<Item = PhysicalMemoryRegion>>(
|
|
|
|
it: I,
|
2023-09-13 18:21:45 +03:00
|
|
|
) -> Option<(PhysicalAddress, PhysicalAddress)> {
|
|
|
|
let mut start = PhysicalAddress::MAX;
|
|
|
|
let mut end = PhysicalAddress::MIN;
|
2023-07-18 18:03:45 +03:00
|
|
|
|
2023-09-13 18:21:45 +03:00
|
|
|
for (reg_start, reg_end) in it.into_iter().filter_map(PhysicalMemoryRegion::clamp) {
|
|
|
|
if reg_start < start {
|
|
|
|
start = reg_start;
|
2023-07-18 18:03:45 +03:00
|
|
|
}
|
2023-09-13 18:21:45 +03:00
|
|
|
if reg_end > end {
|
|
|
|
end = reg_end;
|
2023-07-18 18:03:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-13 18:21:45 +03:00
|
|
|
if start == PhysicalAddress::MAX || end == PhysicalAddress::MIN {
|
2023-07-18 18:03:45 +03:00
|
|
|
None
|
|
|
|
} else {
|
|
|
|
Some((start, end))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn find_contiguous_region<I: Iterator<Item = PhysicalMemoryRegion>>(
|
|
|
|
it: I,
|
|
|
|
count: usize,
|
2023-09-13 18:21:45 +03:00
|
|
|
) -> Option<PhysicalAddress> {
|
|
|
|
for (reg_start, reg_end) in it.into_iter().filter_map(PhysicalMemoryRegion::clamp) {
|
2023-07-18 18:03:45 +03:00
|
|
|
let mut collected = 0;
|
|
|
|
let mut base_addr = None;
|
|
|
|
|
2023-09-13 18:21:45 +03:00
|
|
|
for addr in (reg_start..reg_end).step_by(0x1000) {
|
2023-07-18 18:03:45 +03:00
|
|
|
if is_reserved(addr) {
|
|
|
|
collected = 0;
|
|
|
|
base_addr = None;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if base_addr.is_none() {
|
|
|
|
base_addr = Some(addr);
|
|
|
|
}
|
|
|
|
collected += 1;
|
|
|
|
if collected == count {
|
|
|
|
return base_addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
todo!()
|
|
|
|
}
|
2023-09-13 18:21:45 +03:00
|
|
|
//
|
2023-07-18 18:03:45 +03:00
|
|
|
/// Initializes physical memory manager from given available memory region iterator.
|
|
|
|
///
|
|
|
|
/// 1. Finds a non-reserved range to place the page tracking array.
|
|
|
|
/// 2. Adds all non-reserved pages to the manager.
|
|
|
|
///
|
|
|
|
/// # Safety
|
|
|
|
///
|
|
|
|
/// The caller must ensure this function has not been called before and that the regions
|
|
|
|
/// are valid and actually available.
|
|
|
|
pub unsafe fn init_from_iter<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
|
|
|
|
it: I,
|
|
|
|
) -> Result<(), Error> {
|
2023-09-13 18:21:45 +03:00
|
|
|
// Map the physical memory
|
2023-07-18 18:03:45 +03:00
|
|
|
let (phys_start, phys_end) = physical_memory_range(it.clone()).unwrap();
|
|
|
|
|
2023-09-13 18:21:45 +03:00
|
|
|
ARCHITECTURE.map_physical_memory(it.clone(), phys_start, phys_end)?;
|
|
|
|
|
|
|
|
let total_count = (phys_end - phys_start) / 0x1000;
|
|
|
|
let page_bitmap_size = (total_count + BITMAP_WORD_SIZE - 1) / BITMAP_WORD_SIZE;
|
|
|
|
let page_bitmap_page_count = (page_bitmap_size + 0xFFF) / 0x1000;
|
2023-07-18 18:03:45 +03:00
|
|
|
|
|
|
|
reserve_region("kernel", kernel_physical_memory_region());
|
|
|
|
|
2023-09-13 18:21:45 +03:00
|
|
|
let page_bitmap_phys_base = find_contiguous_region(it.clone(), page_bitmap_page_count).unwrap();
|
2023-07-18 18:03:45 +03:00
|
|
|
|
|
|
|
reserve_region(
|
2023-09-13 18:21:45 +03:00
|
|
|
"page-bitmap",
|
2023-07-18 18:03:45 +03:00
|
|
|
PhysicalMemoryRegion {
|
2023-09-13 18:21:45 +03:00
|
|
|
base: page_bitmap_phys_base,
|
|
|
|
size: page_bitmap_page_count * 0x1000,
|
2023-07-18 18:03:45 +03:00
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2023-09-13 18:21:45 +03:00
|
|
|
let mut manager = PhysicalMemoryManager::new(page_bitmap_phys_base, total_count);
|
2023-07-30 16:40:30 +03:00
|
|
|
|
2023-09-13 18:21:45 +03:00
|
|
|
for (start, end) in it.into_iter().filter_map(PhysicalMemoryRegion::clamp) {
|
|
|
|
for page in (start..end).step_by(0x1000) {
|
2023-07-18 18:03:45 +03:00
|
|
|
if is_reserved(page) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
manager.add_available_page(page);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-22 00:39:08 +03:00
|
|
|
PHYSICAL_MEMORY.init(IrqSafeSpinlock::new(manager));
|
2023-09-13 18:21:45 +03:00
|
|
|
|
2023-07-18 18:03:45 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
2023-09-13 18:21:45 +03:00
|
|
|
//
|
|
|
|
// debugln!("Initializing physical memory manager");
|
|
|
|
// debugln!("Total tracked pages: {}", total_count);
|
|
|
|
//
|
|
|
|
// // Reserve memory regions from which allocation is forbidden
|
|
|
|
// reserve_region("kernel", kernel_physical_memory_region());
|
|
|
|
//
|
|
|
|
// let pages_array_base = find_contiguous_region(it.clone(), (pages_array_size + 0xFFF) / 0x1000)
|
|
|
|
// .ok_or(Error::OutOfMemory)?;
|
|
|
|
//
|
|
|
|
// debugln!(
|
|
|
|
// "Placing page tracking at {:#x}",
|
|
|
|
// pages_array_base.virtualize()
|
|
|
|
// );
|
|
|
|
//
|
|
|
|
// reserve_region(
|
|
|
|
// "pages",
|
|
|
|
// PhysicalMemoryRegion {
|
|
|
|
// base: pages_array_base,
|
|
|
|
// size: (pages_array_size + 0xFFF) & !0xFFF,
|
|
|
|
// },
|
|
|
|
// );
|
|
|
|
//
|
|
|
|
// let mut manager =
|
|
|
|
// PhysicalMemoryManager::new(phys_start, pages_array_base.virtualize(), pages_array_size);
|
|
|
|
// let mut page_count = 0;
|
|
|
|
//
|
|
|
|
// for region in it {
|
|
|
|
// if page_count >= PHYS_MEMORY_PAGE_CAP {
|
|
|
|
// break;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// for page in region.pages() {
|
|
|
|
// if is_reserved(page) {
|
|
|
|
// continue;
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// manager.add_available_page(page);
|
|
|
|
// page_count += 1;
|
|
|
|
//
|
|
|
|
// if page_count >= PHYS_MEMORY_PAGE_CAP {
|
|
|
|
// break;
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// infoln!("{} available pages ({}KiB)", page_count, page_count * 4);
|
|
|
|
//
|
|
|
|
// PHYSICAL_MEMORY.init(IrqSafeSpinlock::new(manager));
|
|
|
|
// Ok(())
|
|
|
|
// }
|
|
|
|
//
|
2023-07-18 18:03:45 +03:00
|
|
|
fn kernel_physical_memory_region() -> PhysicalMemoryRegion {
|
|
|
|
extern "C" {
|
2023-08-13 21:23:58 +03:00
|
|
|
static __kernel_phys_start: u8;
|
2023-07-18 18:03:45 +03:00
|
|
|
static __kernel_size: u8;
|
|
|
|
}
|
2023-08-13 21:23:58 +03:00
|
|
|
|
2023-09-13 18:21:45 +03:00
|
|
|
let base = PhysicalAddress::from_raw(absolute_address!(__kernel_phys_start));
|
2023-07-18 18:03:45 +03:00
|
|
|
let size = absolute_address!(__kernel_size);
|
|
|
|
|
2023-08-13 21:23:58 +03:00
|
|
|
PhysicalMemoryRegion { base, size }
|
2023-07-18 18:03:45 +03:00
|
|
|
}
|