x86-64/mm: implement 2MiB RAM mapping

This commit is contained in:
Mark Poliakov 2023-10-03 10:17:13 +03:00
parent 6cedfa7c4a
commit d5859d93a9
6 changed files with 83 additions and 16 deletions

View File

@ -134,3 +134,10 @@ pub unsafe fn outl(port: u16, value: u32) {
pub unsafe fn flush_tlb_entry(address: usize) {
core::arch::asm!("invlpg ({0})", in(reg) address, options(att_syntax));
}
#[inline]
pub fn flush_cpu_cache() {
unsafe {
core::arch::asm!("wbinvd");
}
}

View File

@ -242,6 +242,18 @@ pub struct EarlyMapping<'a, T: ?Sized> {
}
impl<'a, T: Sized> EarlyMapping<'a, T> {
pub unsafe fn map(physical: PhysicalAddress) -> Result<EarlyMapping<'a, T>, Error> {
let layout = Layout::new::<T>();
let aligned = physical.page_align_down::<L3>();
let offset = physical.page_offset::<L3>();
let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
let virt = map_early_pages(physical, page_count)?;
let value = &mut *((virt + offset) as *mut T);
Ok(EarlyMapping { value, page_count })
}
pub unsafe fn map_slice(
physical: PhysicalAddress,
len: usize,

View File

@ -210,6 +210,10 @@ impl<L: EntryLevel> PageTable<L> {
}
}
pub unsafe fn from_raw_slice_mut(data: &mut [PageEntry<L>; 512]) -> &mut Self {
core::mem::transmute(data)
}
/// Allocates a new page table, filling it with non-preset entries
pub fn new_zeroed<'a>() -> Result<PhysicalRefMut<'a, Self>, Error> {
let physical = phys::alloc_page()?;

View File

@ -1,5 +1,5 @@
// TODO fix all TODOs
use core::{mem::size_of, sync::atomic::Ordering};
use core::{mem::size_of, ops::DerefMut, sync::atomic::Ordering};
use abi::error::Error;
use acpi_lib::{mcfg::Mcfg, AcpiTables, InterruptModel};
@ -30,7 +30,11 @@ mod syscall;
use crate::{
arch::x86_64::{
intrinsics::{IoPort, IoPortAccess},
mem::{map_heap_block, table::L2, HEAP_MAPPING_OFFSET},
mem::{
map_heap_block,
table::{PageTable, L2},
HEAP_MAPPING_OFFSET,
},
},
debug::{self, LogLevel},
device::{
@ -165,7 +169,7 @@ impl Architecture for X86_64 {
fn map_physical_memory<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
&self,
_it: I,
it: I,
_memory_start: PhysicalAddress,
memory_end: PhysicalAddress,
) -> Result<(), Error> {
@ -195,11 +199,54 @@ impl Architecture for X86_64 {
);
}
}
Ok(())
} else {
todo!();
// Allocate the intermediate tables first
let l2_tables_start = phys::find_contiguous_region(it, end_l1i)
.expect("Could not allocate the memory for RAM mapping L2 tables");
unsafe {
reserve_region(
"ram-l2-tables",
PhysicalMemoryRegion {
base: l2_tables_start,
size: end_l1i * 0x1000,
},
);
}
// Fill in the tables
for l1i in 0..end_l1i {
let l2_phys_addr = l2_tables_start.add(l1i * 0x1000);
// TODO (minor) the slice is uninitialized, maybe find some way to deal with that
// case nicely
// Safety: ok, the mapping is done to the memory obtained from
// find_contiguous_region()
let mut l2_data =
unsafe { EarlyMapping::<[PageEntry<L2>; 512]>::map(l2_phys_addr)? };
// Safety: ok, the slice comes from EarlyMapping of a page-aligned region
let l2 = unsafe { PageTable::from_raw_slice_mut(l2_data.deref_mut()) };
for l2i in 0..512 {
// TODO NX
l2[l2i] = PageEntry::<L2>::block(
PhysicalAddress::from_raw((l1i << 30) | (l2i << 21)),
PageAttributes::WRITABLE,
);
}
// Point the L1 entry to the L2 table
unsafe {
RAM_MAPPING_L1[l1i] =
PageEntry::<L1>::table(l2_phys_addr, PageAttributes::WRITABLE)
};
intrinsics::flush_cpu_cache();
// The EarlyMapping is then dropped
}
}
Ok(())
}
#[inline]
@ -404,7 +451,7 @@ impl X86_64 {
self.ioapic.init(IoApic::from_acpi(&apic_info)?);
acpi::init_acpi(acpi).unwrap();
// acpi::init_acpi(acpi).unwrap();
if let Ok(mcfg) = acpi.find_table::<Mcfg>() {
for entry in mcfg.entries() {

View File

@ -103,7 +103,7 @@ fn physical_memory_range<I: Iterator<Item = PhysicalMemoryRegion>>(
}
}
fn find_contiguous_region<I: Iterator<Item = PhysicalMemoryRegion>>(
pub fn find_contiguous_region<I: Iterator<Item = PhysicalMemoryRegion>>(
it: I,
count: usize,
) -> Option<PhysicalAddress> {
@ -144,14 +144,14 @@ pub unsafe fn init_from_iter<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
// Map the physical memory
let (phys_start, phys_end) = physical_memory_range(it.clone()).unwrap();
reserve_region("kernel", kernel_physical_memory_region());
ARCHITECTURE.map_physical_memory(it.clone(), phys_start, phys_end)?;
let total_count = (phys_end - phys_start) / 0x1000;
let page_bitmap_size = (total_count + BITMAP_WORD_SIZE - 1) / BITMAP_WORD_SIZE;
let page_bitmap_page_count = (page_bitmap_size + 0xFFF) / 0x1000;
reserve_region("kernel", kernel_physical_memory_region());
let page_bitmap_phys_base = find_contiguous_region(it.clone(), page_bitmap_page_count).unwrap();
reserve_region(

View File

@ -3,7 +3,7 @@ use core::time::Duration;
use yggdrasil_abi::error::Error;
use crate::arch::{Architecture, ARCHITECTURE};
// use crate::arch::{Architecture, ARCHITECTURE};
pub mod queue;
pub mod ring;
@ -26,12 +26,9 @@ impl<T, E, I: Iterator<Item = Result<T, E>>> ResultIterator<T, E> for I {
/// Performs a busy-loop sleep until the specified duration has passed
pub fn polling_sleep(duration: Duration) -> Result<(), Error> {
let timer = ARCHITECTURE.monotonic_timer();
let deadline = timer.monotonic_timestamp()? + duration;
while timer.monotonic_timestamp()? < deadline {
// TODO no non-IRQ mode timestamp provider
for i in 0..1000000 {
core::hint::spin_loop();
}
Ok(())
}