feat: integrate physical mm

This commit is contained in:
Mark Poliakov 2021-10-13 10:20:52 +03:00
parent fdb4f4e8e9
commit b440a3c975
7 changed files with 288 additions and 1 deletions

View File

@ -41,4 +41,5 @@ SECTIONS {
PROVIDE(__bss_end_phys = . - KERNEL_OFFSET);
PROVIDE(__kernel_end = .);
PROVIDE(__kernel_end_phys = . - KERNEL_OFFSET);
}

View File

@ -10,8 +10,8 @@ name = "kernel"
test = false
[dependencies]
cfg-if = "1.x.x"
error = { path = "../error" }
cfg-if = "1.x.x"
tock-registers = "0.7.x"
fdt-rs = { version = "0.x.x", default-features = false }

View File

@ -12,6 +12,7 @@ use crate::dev::{
serial::{pl011::Pl011, SerialDevice},
Device,
};
use crate::mem::phys;
use error::Errno;
pub use gic::IrqNumber;
@ -23,11 +24,17 @@ const GICC_BASE: usize = 0x08010000;
// TODO extract this from device tree
const ECAM_BASE: usize = 0x4010000000;
const PHYS_BASE: usize = 0x40000000;
const PHYS_SIZE: usize = 0x10000000;
#[allow(missing_docs)]
pub fn init_board() -> Result<(), Errno> {
unsafe {
// Enable UART early on
UART0.enable()?;
phys::init_from_region(PHYS_BASE, PHYS_SIZE);
GIC.enable()?;
UART0.init_irqs()?;

View File

@ -1,9 +1,27 @@
//! Memory management and functions module
#![allow(missing_docs)]
pub mod phys;
pub mod virt;
/// Virtual offset applied to kernel address space
pub const KERNEL_OFFSET: usize = 0xFFFFFF8000000000;
///
pub fn virtualize(addr: usize) -> usize {
// TODO remove this function
addr + KERNEL_OFFSET
}
///
pub fn kernel_end_phys() -> usize {
extern "C" {
static __kernel_end_phys: u8;
}
unsafe { &__kernel_end_phys as *const _ as usize }
}
///
pub const PAGE_SIZE: usize = 4096;
/// See memcpy(3p).
///

View File

@ -0,0 +1,76 @@
use super::{PageInfo, PageUsage};
use crate::mem::{virtualize, PAGE_SIZE};
use crate::sync::IrqSafeNullLock;
use core::mem;
use error::Errno;
pub unsafe trait Manager {
fn alloc_page(&mut self, pu: PageUsage) -> Result<usize, Errno>;
fn alloc_contiguous_pages(&mut self, pu: PageUsage, count: usize) -> Result<usize, Errno>;
fn free_page(&mut self, page: usize) -> Result<(), Errno>;
// TODO status()
}
pub struct SimpleManager {
pages: &'static mut [PageInfo],
base_index: usize,
}
impl SimpleManager {
pub(super) unsafe fn initialize(base: usize, at: usize, count: usize) -> Self {
let pages: &'static mut [PageInfo] =
core::slice::from_raw_parts_mut(virtualize(at) as *mut _, count);
// Initialize uninit pages
for index in 0..count {
mem::forget(mem::replace(
&mut pages[index],
PageInfo {
refcount: 0,
usage: PageUsage::Reserved,
},
));
}
Self {
base_index: base / PAGE_SIZE,
pages,
}
}
pub(super) unsafe fn add_page(&mut self, addr: usize) {
let page = &mut self.pages[addr / PAGE_SIZE - self.base_index];
assert!(page.refcount == 0 && page.usage == PageUsage::Reserved);
page.usage = PageUsage::Available;
}
}
unsafe impl Manager for SimpleManager {
fn alloc_page(&mut self, pu: PageUsage) -> Result<usize, Errno> {
for index in 0..self.pages.len() {
let page = &mut self.pages[index];
if page.usage == PageUsage::Available {
page.usage = pu;
page.refcount = 1;
return Ok((self.base_index + index) * PAGE_SIZE);
}
}
Err(Errno::OutOfMemory)
}
fn alloc_contiguous_pages(&mut self, pu: PageUsage, count: usize) -> Result<usize, Errno> {
'l0: for i in 0..self.pages.len() {
for j in 0..count {
if self.pages[i + j].usage != PageUsage::Available {
continue 'l0;
}
}
for j in 0..count {
let page = &mut self.pages[i + j];
assert!(page.usage == PageUsage::Available);
page.usage = pu;
page.refcount = 1;
}
return Ok((self.base_index + i) * PAGE_SIZE);
}
Err(Errno::OutOfMemory)
}
fn free_page(&mut self, _page: usize) -> Result<(), Errno> {
todo!()
}
}
pub(super) static MANAGER: IrqSafeNullLock<Option<SimpleManager>> = IrqSafeNullLock::new(None);

119
kernel/src/mem/phys/mod.rs Normal file
View File

@ -0,0 +1,119 @@
use crate::mem::PAGE_SIZE;
use core::mem::size_of;
mod reserved;
mod manager;
use manager::{Manager, SimpleManager, MANAGER};
pub use reserved::ReservedRegion;
type ManagerImpl = SimpleManager;
const MAX_PAGES: usize = 1024 * 1024;
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum PageUsage {
Reserved,
Available,
Kernel
}
pub struct PageInfo {
refcount: usize,
usage: PageUsage
}
#[derive(Clone)]
pub struct MemoryRegion {
pub start: usize,
pub end: usize,
}
#[repr(transparent)]
#[derive(Clone)]
pub struct SimpleMemoryIterator {
inner: Option<MemoryRegion>,
}
impl SimpleMemoryIterator {
pub const fn new(reg: MemoryRegion) -> Self {
Self { inner: Some(reg) }
}
}
impl Iterator for SimpleMemoryIterator {
type Item = MemoryRegion;
fn next(&mut self) -> Option<Self::Item> {
self.inner.take()
}
}
fn find_contiguous<T: Iterator<Item = MemoryRegion>>(
iter: T,
count: usize,
) -> Option<usize> {
for region in iter {
let mut collected = 0;
let mut base_addr = None;
for addr in (region.start..region.end).step_by(PAGE_SIZE) {
if reserved::is_reserved(addr) {
collected = 0;
base_addr = None;
continue;
}
if base_addr.is_none() {
base_addr = Some(addr);
}
collected += 1;
if collected == count {
return base_addr;
}
}
}
None
}
pub unsafe fn init_from_iter<T: Iterator<Item = MemoryRegion> + Clone>(iter: T) {
let mut mem_base = usize::MAX;
for reg in iter.clone() {
if reg.start < mem_base {
mem_base = reg.start;
}
}
debugln!("Memory base is {:?}", mem_base);
// Step 1. Count available memory
let mut total_pages = 0usize;
for reg in iter.clone() {
total_pages += (reg.end - reg.start) / PAGE_SIZE;
}
// TODO maybe instead of size_of::<...> use Layout?
let need_pages = ((total_pages * size_of::<PageInfo>()) + 0xFFF) / 0x1000;
reserved::reserve_kernel();
// Step 2. Allocate memory for page array
let pages_base =
find_contiguous(iter.clone(), need_pages).expect("Failed to allocate memory for page info");
reserved::reserve_pages(pages_base, need_pages);
// Step 3. Initialize the memory manager with available pages
let mut manager = ManagerImpl::initialize(mem_base, pages_base, total_pages);
let mut usable_pages = 0usize;
'l0: for region in iter {
for addr in (region.start..region.end).step_by(PAGE_SIZE) {
if !reserved::is_reserved(addr) {
manager.add_page(addr);
usable_pages += 1;
if usable_pages == MAX_PAGES {
break 'l0;
}
}
}
}
debug!("{}K of usable physical memory\n", usable_pages * 4);
*MANAGER.lock() = Some(manager);
}
pub unsafe fn init_from_region(base: usize, size: usize) {
let iter = SimpleMemoryIterator::new(MemoryRegion {
start: base,
end: base + size
});
init_from_iter(iter);
}

View File

@ -0,0 +1,66 @@
use crate::mem::{kernel_end_phys, PAGE_SIZE};
use core::mem::MaybeUninit;
use core::ptr::null_mut;
pub struct ReservedRegion {
pub start: usize,
pub end: usize,
next: *mut ReservedRegion,
}
pub struct ReservedRegionIterator {
ptr: *mut ReservedRegion,
}
impl Iterator for ReservedRegionIterator {
type Item = &'static mut ReservedRegion;
fn next(&mut self) -> Option<Self::Item> {
if let Some(item) = unsafe { self.ptr.as_mut() } {
self.ptr = item.next;
Some(item)
} else {
None
}
}
}
impl ReservedRegion {
pub const fn new(start: usize, end: usize) -> ReservedRegion {
//assert!(start.is_paligned() && end.is_paligned());
ReservedRegion {
start,
end,
next: null_mut(),
}
}
}
static mut RESERVED_REGIONS_HEAD: *mut ReservedRegion = null_mut();
static mut RESERVED_REGION_KERNEL: MaybeUninit<ReservedRegion> = MaybeUninit::uninit();
static mut RESERVED_REGION_PAGES: MaybeUninit<ReservedRegion> = MaybeUninit::uninit();
pub unsafe fn reserve(usage: &str, region: *mut ReservedRegion) {
debugln!("Reserving {:?} region: {:?}..{:?}", usage, (*region).start, (*region).end);
(*region).next = RESERVED_REGIONS_HEAD;
RESERVED_REGIONS_HEAD = region;
}
pub(super) unsafe fn reserve_kernel() {
RESERVED_REGION_KERNEL.write(ReservedRegion::new(
0,
kernel_end_phys(),
));
reserve("kernel", RESERVED_REGION_KERNEL.as_mut_ptr());
}
pub(super) unsafe fn reserve_pages(base: usize, count: usize) {
RESERVED_REGION_PAGES.write(ReservedRegion::new(base, base + count * PAGE_SIZE));
reserve("pages", RESERVED_REGION_PAGES.as_mut_ptr());
}
pub fn is_reserved(page: usize) -> bool {
unsafe {
let mut iter = RESERVED_REGIONS_HEAD;
while !iter.is_null() {
let region = &*iter;
if page >= region.start && page < region.end {
return true;
}
iter = region.next;
}
}
false
}