diff --git a/src/arch/x86_64/intrinsics.rs b/src/arch/x86_64/intrinsics.rs index 6c11490f..8fe7991b 100644 --- a/src/arch/x86_64/intrinsics.rs +++ b/src/arch/x86_64/intrinsics.rs @@ -134,3 +134,10 @@ pub unsafe fn outl(port: u16, value: u32) { pub unsafe fn flush_tlb_entry(address: usize) { core::arch::asm!("invlpg ({0})", in(reg) address, options(att_syntax)); } + +#[inline] +pub fn flush_cpu_cache() { + unsafe { + core::arch::asm!("wbinvd"); + } +} diff --git a/src/arch/x86_64/mem/mod.rs b/src/arch/x86_64/mem/mod.rs index b52bf5ff..77ee41d2 100644 --- a/src/arch/x86_64/mem/mod.rs +++ b/src/arch/x86_64/mem/mod.rs @@ -242,6 +242,18 @@ pub struct EarlyMapping<'a, T: ?Sized> { } impl<'a, T: Sized> EarlyMapping<'a, T> { + pub unsafe fn map(physical: PhysicalAddress) -> Result, Error> { + let layout = Layout::new::(); + let aligned = physical.page_align_down::(); + let offset = physical.page_offset::(); + let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE; + + let virt = map_early_pages(physical, page_count)?; + let value = &mut *((virt + offset) as *mut T); + + Ok(EarlyMapping { value, page_count }) + } + pub unsafe fn map_slice( physical: PhysicalAddress, len: usize, diff --git a/src/arch/x86_64/mem/table.rs b/src/arch/x86_64/mem/table.rs index a70084f7..c8e1234d 100644 --- a/src/arch/x86_64/mem/table.rs +++ b/src/arch/x86_64/mem/table.rs @@ -210,6 +210,10 @@ impl PageTable { } } + pub unsafe fn from_raw_slice_mut(data: &mut [PageEntry; 512]) -> &mut Self { + core::mem::transmute(data) + } + /// Allocates a new page table, filling it with non-preset entries pub fn new_zeroed<'a>() -> Result, Error> { let physical = phys::alloc_page()?; diff --git a/src/arch/x86_64/mod.rs b/src/arch/x86_64/mod.rs index 3a174a56..df67b8eb 100644 --- a/src/arch/x86_64/mod.rs +++ b/src/arch/x86_64/mod.rs @@ -1,5 +1,5 @@ // TODO fix all TODOs -use core::{mem::size_of, sync::atomic::Ordering}; +use core::{mem::size_of, ops::DerefMut, sync::atomic::Ordering}; use abi::error::Error; use acpi_lib::{mcfg::Mcfg, AcpiTables, InterruptModel}; @@ -30,7 +30,11 @@ mod syscall; use crate::{ arch::x86_64::{ intrinsics::{IoPort, IoPortAccess}, - mem::{map_heap_block, table::L2, HEAP_MAPPING_OFFSET}, + mem::{ + map_heap_block, + table::{PageTable, L2}, + HEAP_MAPPING_OFFSET, + }, }, debug::{self, LogLevel}, device::{ @@ -165,7 +169,7 @@ impl Architecture for X86_64 { fn map_physical_memory + Clone>( &self, - _it: I, + it: I, _memory_start: PhysicalAddress, memory_end: PhysicalAddress, ) -> Result<(), Error> { @@ -195,11 +199,54 @@ impl Architecture for X86_64 { ); } } - - Ok(()) } else { - todo!(); + // Allocate the intermediate tables first + let l2_tables_start = phys::find_contiguous_region(it, end_l1i) + .expect("Could not allocate the memory for RAM mapping L2 tables"); + + unsafe { + reserve_region( + "ram-l2-tables", + PhysicalMemoryRegion { + base: l2_tables_start, + size: end_l1i * 0x1000, + }, + ); + } + + // Fill in the tables + for l1i in 0..end_l1i { + let l2_phys_addr = l2_tables_start.add(l1i * 0x1000); + + // TODO (minor) the slice is uninitialized, maybe find some way to deal with that + // case nicely + // Safety: ok, the mapping is done to the memory obtained from + // find_contiguous_region() + let mut l2_data = + unsafe { EarlyMapping::<[PageEntry; 512]>::map(l2_phys_addr)? }; + // Safety: ok, the slice comes from EarlyMapping of a page-aligned region + let l2 = unsafe { PageTable::from_raw_slice_mut(l2_data.deref_mut()) }; + + for l2i in 0..512 { + // TODO NX + l2[l2i] = PageEntry::::block( + PhysicalAddress::from_raw((l1i << 30) | (l2i << 21)), + PageAttributes::WRITABLE, + ); + } + + // Point the L1 entry to the L2 table + unsafe { + RAM_MAPPING_L1[l1i] = + PageEntry::::table(l2_phys_addr, PageAttributes::WRITABLE) + }; + + intrinsics::flush_cpu_cache(); + // The EarlyMapping is then dropped + } } + + Ok(()) } #[inline] @@ -404,7 +451,7 @@ impl X86_64 { self.ioapic.init(IoApic::from_acpi(&apic_info)?); - acpi::init_acpi(acpi).unwrap(); + // acpi::init_acpi(acpi).unwrap(); if let Ok(mcfg) = acpi.find_table::() { for entry in mcfg.entries() { diff --git a/src/mem/phys/mod.rs b/src/mem/phys/mod.rs index 31aa664f..1b691cf1 100644 --- a/src/mem/phys/mod.rs +++ b/src/mem/phys/mod.rs @@ -103,7 +103,7 @@ fn physical_memory_range>( } } -fn find_contiguous_region>( +pub fn find_contiguous_region>( it: I, count: usize, ) -> Option { @@ -144,14 +144,14 @@ pub unsafe fn init_from_iter + Clone>( // Map the physical memory let (phys_start, phys_end) = physical_memory_range(it.clone()).unwrap(); + reserve_region("kernel", kernel_physical_memory_region()); + ARCHITECTURE.map_physical_memory(it.clone(), phys_start, phys_end)?; let total_count = (phys_end - phys_start) / 0x1000; let page_bitmap_size = (total_count + BITMAP_WORD_SIZE - 1) / BITMAP_WORD_SIZE; let page_bitmap_page_count = (page_bitmap_size + 0xFFF) / 0x1000; - reserve_region("kernel", kernel_physical_memory_region()); - let page_bitmap_phys_base = find_contiguous_region(it.clone(), page_bitmap_page_count).unwrap(); reserve_region( diff --git a/src/util/mod.rs b/src/util/mod.rs index d69d94c1..20124369 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -3,7 +3,7 @@ use core::time::Duration; use yggdrasil_abi::error::Error; -use crate::arch::{Architecture, ARCHITECTURE}; +// use crate::arch::{Architecture, ARCHITECTURE}; pub mod queue; pub mod ring; @@ -26,12 +26,9 @@ impl>> ResultIterator for I { /// Performs a busy-loop sleep until the specified duration has passed pub fn polling_sleep(duration: Duration) -> Result<(), Error> { - let timer = ARCHITECTURE.monotonic_timer(); - let deadline = timer.monotonic_timestamp()? + duration; - - while timer.monotonic_timestamp()? < deadline { + // TODO no non-IRQ mode timestamp provider + for i in 0..1000000 { core::hint::spin_loop(); } - Ok(()) }