x86-64: make SMP work with new mm
This commit is contained in:
parent
6949f8c44a
commit
399e9531e7
@ -27,7 +27,10 @@ use device_api::{
|
||||
ResetDevice,
|
||||
};
|
||||
|
||||
use crate::mem::{device::RawDeviceMemoryMapping, phys::PhysicalMemoryRegion, PhysicalAddress};
|
||||
use crate::mem::{
|
||||
device::RawDeviceMemoryMapping, phys::PhysicalMemoryRegion, table::KernelAddressSpace,
|
||||
PhysicalAddress,
|
||||
};
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(target_arch = "aarch64")] {
|
||||
@ -75,7 +78,7 @@ pub trait Architecture {
|
||||
base: PhysicalAddress,
|
||||
size: usize,
|
||||
) -> Result<RawDeviceMemoryMapping, Error>;
|
||||
unsafe fn unmap_device_memory(&self, map: RawDeviceMemoryMapping);
|
||||
unsafe fn unmap_device_memory(&self, map: &RawDeviceMemoryMapping);
|
||||
|
||||
fn map_physical_memory<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
|
||||
&self,
|
||||
|
@ -1,6 +1,7 @@
|
||||
//! x86-64 implementation of ACPI management interfaces
|
||||
use core::{
|
||||
alloc::{AllocError, Allocator, GlobalAlloc, Layout},
|
||||
mem::{align_of, size_of},
|
||||
ptr::NonNull,
|
||||
sync::atomic::Ordering,
|
||||
time::Duration,
|
||||
@ -23,7 +24,10 @@ use crate::{
|
||||
x86_64::{smp::CPU_COUNT, IrqNumber, SHUTDOWN_FENCE},
|
||||
Architecture, CpuMessage, ARCHITECTURE,
|
||||
},
|
||||
mem::{address::FromRaw, heap::GLOBAL_HEAP, PhysicalAddress},
|
||||
mem::{
|
||||
address::FromRaw, heap::GLOBAL_HEAP, pointer::PhysicalRef, read_memory, write_memory,
|
||||
PhysicalAddress,
|
||||
},
|
||||
sync::IrqSafeSpinlock,
|
||||
util,
|
||||
};
|
||||
@ -78,10 +82,14 @@ unsafe impl Allocator for AcpiAllocator {
|
||||
}
|
||||
|
||||
impl acpi_system::Handler for AcpiHandlerImpl {
|
||||
unsafe fn map_slice(address: u64, length: u64) -> &'static [u8] {
|
||||
let slice = PhysicalAddress::from_raw(address).virtualize_slice::<u8>(length as usize);
|
||||
type MappedSlice = PhysicalRef<'static, [u8]>;
|
||||
|
||||
unsafe fn map_slice(address: u64, length: u64) -> Self::MappedSlice {
|
||||
PhysicalRef::map_slice(
|
||||
PhysicalAddress::from_raw(address),
|
||||
length.try_into().unwrap(),
|
||||
)
|
||||
|
||||
todo!();
|
||||
// PhysicalPointer::into_raw(slice)
|
||||
|
||||
// if address + length < 0x100000000 {
|
||||
@ -128,39 +136,47 @@ impl acpi_system::Handler for AcpiHandlerImpl {
|
||||
}
|
||||
|
||||
fn mem_read_u8(address: u64) -> u8 {
|
||||
todo!()
|
||||
let value = unsafe { read_memory(PhysicalAddress::from_raw(address)) };
|
||||
log::trace!("mem_read_u8 {:#x} -> {:#x}", address, value);
|
||||
value
|
||||
}
|
||||
|
||||
fn mem_read_u16(address: u64) -> u16 {
|
||||
todo!()
|
||||
let value = unsafe { read_memory(PhysicalAddress::from_raw(address)) };
|
||||
log::trace!("mem_read_u16 {:#x} -> {:#x}", address, value);
|
||||
value
|
||||
}
|
||||
|
||||
fn mem_read_u32(address: u64) -> u32 {
|
||||
todo!()
|
||||
let value = unsafe { read_memory(PhysicalAddress::from_raw(address)) };
|
||||
log::trace!("mem_read_u32 {:#x} -> {:#x}", address, value);
|
||||
value
|
||||
}
|
||||
|
||||
fn mem_read_u64(address: u64) -> u64 {
|
||||
todo!()
|
||||
let value = unsafe { read_memory(PhysicalAddress::from_raw(address)) };
|
||||
log::trace!("mem_read_u64 {:#x} -> {:#x}", address, value);
|
||||
value
|
||||
}
|
||||
|
||||
fn mem_write_u8(address: u64, value: u8) {
|
||||
log::trace!("mem_write_u8 {:#x}, {:#x}", address, value);
|
||||
todo!()
|
||||
unsafe { write_memory(PhysicalAddress::from_raw(address), value) }
|
||||
}
|
||||
|
||||
fn mem_write_u16(address: u64, value: u16) {
|
||||
log::trace!("mem_write_u16 {:#x}, {:#x}", address, value);
|
||||
todo!()
|
||||
unsafe { write_memory(PhysicalAddress::from_raw(address), value) }
|
||||
}
|
||||
|
||||
fn mem_write_u32(address: u64, value: u32) {
|
||||
log::trace!("mem_write_u32 {:#x}, {:#x}", address, value);
|
||||
todo!()
|
||||
unsafe { write_memory(PhysicalAddress::from_raw(address), value) }
|
||||
}
|
||||
|
||||
fn mem_write_u64(address: u64, value: u64) {
|
||||
log::trace!("mem_write_u64 {:#x}, {:#x}", address, value);
|
||||
todo!()
|
||||
unsafe { write_memory(PhysicalAddress::from_raw(address), value) }
|
||||
}
|
||||
|
||||
fn install_interrupt_handler(irq: u32) -> Result<(), AcpiSystemError> {
|
||||
|
@ -14,7 +14,7 @@ use tock_registers::{
|
||||
|
||||
use crate::{
|
||||
arch::{
|
||||
x86_64::{registers::MSR_IA32_APIC_BASE, smp::CPU_COUNT},
|
||||
x86_64::{mem::table::L3, registers::MSR_IA32_APIC_BASE, smp::CPU_COUNT},
|
||||
CpuMessage,
|
||||
},
|
||||
mem::{address::FromRaw, device::DeviceMemoryIo, PhysicalAddress},
|
||||
@ -241,14 +241,14 @@ impl LocalApic {
|
||||
/// # Safety
|
||||
///
|
||||
/// Unsafe: only meant to be called by the BSP during SMP init.
|
||||
pub unsafe fn wakeup_cpu(&self, apic_id: u32, entry_vector: usize) {
|
||||
pub unsafe fn wakeup_cpu(&self, apic_id: u32, entry_vector: PhysicalAddress) {
|
||||
infoln!("Waking up apic{}, entry = {:#x}", apic_id, entry_vector);
|
||||
|
||||
while self.regs.ICR0.matches_all(ICR0::DeliveryStatus::SET) {
|
||||
core::hint::spin_loop();
|
||||
}
|
||||
|
||||
let entry_vector = entry_vector >> 12;
|
||||
let entry_vector = entry_vector.page_index::<L3>();
|
||||
|
||||
// INIT assert
|
||||
self.regs.ICR1.write(ICR1::PhysicalDestination.val(apic_id));
|
||||
|
@ -46,10 +46,11 @@ ap_start_32:
|
||||
mov eax, dword [0x6000 + 0x00]
|
||||
mov cr3, eax
|
||||
|
||||
; Enable EFER.LME
|
||||
; Enable EFER.LME + EFER.NXE
|
||||
mov ecx, 0xC0000080
|
||||
rdmsr
|
||||
or eax, 1 << 8
|
||||
or eax, 1 << 11
|
||||
wrmsr
|
||||
|
||||
; Enable paging
|
||||
@ -79,6 +80,7 @@ ap_start_64:
|
||||
|
||||
; Jump to kernel entry
|
||||
mov rax, qword [0x6000 + 0x18]
|
||||
|
||||
jmp rax
|
||||
|
||||
align 4
|
||||
|
@ -1,5 +1,5 @@
|
||||
//! x86-64 boot and entry functions
|
||||
use core::arch::global_asm;
|
||||
use core::{arch::global_asm, sync::atomic::Ordering};
|
||||
|
||||
use tock_registers::interfaces::Writeable;
|
||||
use yboot_proto::{
|
||||
@ -8,9 +8,12 @@ use yboot_proto::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
arch::{x86_64::registers::MSR_IA32_KERNEL_GS_BASE, Architecture, ArchitectureImpl},
|
||||
arch::{
|
||||
x86_64::{registers::MSR_IA32_KERNEL_GS_BASE, smp::CPU_COUNT},
|
||||
ArchitectureImpl,
|
||||
},
|
||||
fs::devfs,
|
||||
kernel_main,
|
||||
kernel_main, kernel_secondary_main,
|
||||
mem::KERNEL_VIRT_OFFSET,
|
||||
task::runtime,
|
||||
};
|
||||
@ -59,43 +62,6 @@ static YBOOT_DATA: LoadProtocolV1 = LoadProtocolV1 {
|
||||
res_size: 0,
|
||||
},
|
||||
};
|
||||
//
|
||||
//
|
||||
// unsafe extern "C" fn __x86_64_upper_entry() -> ! {
|
||||
// }
|
||||
//
|
||||
// /// Application processor entry point
|
||||
// pub extern "C" fn __x86_64_ap_entry() -> ! {
|
||||
// let cpu_id = CPU_COUNT.load(Ordering::Acquire);
|
||||
//
|
||||
// MSR_IA32_KERNEL_GS_BASE.set(&UNINIT_CPU as *const _ as u64);
|
||||
// unsafe {
|
||||
// core::arch::asm!("swapgs");
|
||||
// }
|
||||
// MSR_IA32_KERNEL_GS_BASE.set(&UNINIT_CPU as *const _ as u64);
|
||||
// unsafe {
|
||||
// core::arch::asm!("swapgs");
|
||||
// }
|
||||
//
|
||||
// // Still not initialized: GDT, IDT, CPU features, syscall, kernel_gs_base
|
||||
// cpuid::feature_gate();
|
||||
//
|
||||
// infoln!("cpu{} initializing", cpu_id);
|
||||
// unsafe {
|
||||
// ARCHITECTURE.init_mmu(false);
|
||||
// core::arch::asm!("wbinvd");
|
||||
//
|
||||
// // Cpu::init_local(LocalApic::new(), cpu_id as u32);
|
||||
// // syscall::init_syscall();
|
||||
// exception::init_exceptions(cpu_id);
|
||||
//
|
||||
// ARCHITECTURE.init_platform(cpu_id);
|
||||
// }
|
||||
//
|
||||
// CPU_COUNT.fetch_add(1, Ordering::Release);
|
||||
//
|
||||
// kernel_secondary_main()
|
||||
// }
|
||||
|
||||
unsafe fn init_dummy_cpu() {
|
||||
// TODO this is incorrect
|
||||
@ -110,10 +76,6 @@ unsafe fn init_dummy_cpu() {
|
||||
core::arch::asm!("swapgs");
|
||||
}
|
||||
|
||||
pub extern "C" fn __x86_64_ap_entry() -> ! {
|
||||
loop {}
|
||||
}
|
||||
|
||||
extern "C" fn __x86_64_upper_entry() -> ! {
|
||||
// Safety: ok, CPU hasn't been initialized yet and it's the early kernel entry
|
||||
unsafe {
|
||||
@ -147,6 +109,31 @@ extern "C" fn __x86_64_upper_entry() -> ! {
|
||||
kernel_main()
|
||||
}
|
||||
|
||||
/// Application processor entry point
|
||||
pub extern "C" fn __x86_64_ap_entry() -> ! {
|
||||
let cpu_id = CPU_COUNT.load(Ordering::Acquire);
|
||||
|
||||
unsafe {
|
||||
init_dummy_cpu();
|
||||
}
|
||||
|
||||
// Still not initialized: GDT, IDT, CPU features, syscall, kernel_gs_base
|
||||
|
||||
infoln!("cpu{} initializing", cpu_id);
|
||||
|
||||
unsafe {
|
||||
// Cpu::init_local(LocalApic::new(), cpu_id as u32);
|
||||
// syscall::init_syscall();
|
||||
exception::init_exceptions(cpu_id);
|
||||
|
||||
ARCHITECTURE.init_platform(cpu_id);
|
||||
}
|
||||
|
||||
CPU_COUNT.fetch_add(1, Ordering::Release);
|
||||
|
||||
kernel_secondary_main()
|
||||
}
|
||||
|
||||
global_asm!(
|
||||
r#"
|
||||
// {boot_data}
|
||||
|
@ -7,7 +7,12 @@ use tock_registers::interfaces::Writeable;
|
||||
|
||||
use crate::{
|
||||
arch::{
|
||||
x86_64::{gdt, registers::MSR_IA32_KERNEL_GS_BASE, syscall},
|
||||
x86_64::{
|
||||
cpuid::{self, PROCESSOR_FEATURES},
|
||||
gdt,
|
||||
registers::MSR_IA32_KERNEL_GS_BASE,
|
||||
syscall,
|
||||
},
|
||||
CpuMessage,
|
||||
},
|
||||
sync::IrqSafeSpinlock,
|
||||
@ -67,6 +72,9 @@ impl Cpu {
|
||||
pub unsafe fn init_local(local_apic: LocalApic, id: u32) {
|
||||
infoln!("Initialize CPU with id {}", id);
|
||||
|
||||
// Initialize CPU features
|
||||
cpuid::enable_features();
|
||||
|
||||
let tss_address = gdt::init();
|
||||
|
||||
let this = Box::new(Cpu {
|
||||
|
@ -2,6 +2,9 @@
|
||||
|
||||
use bitflags::bitflags;
|
||||
use kernel_util::util::OneTimeInit;
|
||||
use tock_registers::interfaces::ReadWriteable;
|
||||
|
||||
use super::registers::{CR4, XCR0};
|
||||
|
||||
bitflags! {
|
||||
pub struct ProcessorFeatures: u64 {
|
||||
@ -9,7 +12,27 @@ bitflags! {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn cpuid(eax: u32, result: &mut [u32]) {
|
||||
bitflags! {
|
||||
pub struct EcxFeatures: u32 {
|
||||
const XSAVE = 1 << 26;
|
||||
const AVX = 1 << 28;
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
pub struct EdxFeatures: u32 {
|
||||
const FXSR = 1 << 24;
|
||||
const PGE = 1 << 13;
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
pub struct ExtEdxFeatures: u32 {
|
||||
const PDPE1GB = 1 << 26;
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn raw_cpuid(eax: u32, result: &mut [u32]) {
|
||||
core::arch::asm!(
|
||||
r#"
|
||||
push %rbx
|
||||
@ -25,19 +48,65 @@ unsafe fn cpuid(eax: u32, result: &mut [u32]) {
|
||||
);
|
||||
}
|
||||
|
||||
fn cpuid_features() -> (EcxFeatures, EdxFeatures) {
|
||||
let mut raw = [0; 3];
|
||||
|
||||
unsafe {
|
||||
raw_cpuid(0x1, &mut raw);
|
||||
}
|
||||
|
||||
(
|
||||
EcxFeatures::from_bits_truncate(raw[2]),
|
||||
EdxFeatures::from_bits_truncate(raw[1]),
|
||||
)
|
||||
}
|
||||
|
||||
fn cpuid_ext_features() -> ExtEdxFeatures {
|
||||
let mut raw = [0; 3];
|
||||
|
||||
unsafe {
|
||||
raw_cpuid(0x80000001, &mut raw);
|
||||
}
|
||||
|
||||
ExtEdxFeatures::from_bits_truncate(raw[1])
|
||||
}
|
||||
|
||||
pub static PROCESSOR_FEATURES: OneTimeInit<ProcessorFeatures> = OneTimeInit::new();
|
||||
|
||||
pub fn init_cpuid() {
|
||||
let mut features = ProcessorFeatures::empty();
|
||||
let mut data = [0; 3];
|
||||
|
||||
unsafe {
|
||||
cpuid(0x80000001, &mut data);
|
||||
}
|
||||
let ext_edx = cpuid_ext_features();
|
||||
|
||||
if data[1] & (1 << 26) != 0 {
|
||||
if ext_edx.contains(ExtEdxFeatures::PDPE1GB) {
|
||||
features |= ProcessorFeatures::PDPE1GB;
|
||||
}
|
||||
|
||||
PROCESSOR_FEATURES.init(features);
|
||||
}
|
||||
|
||||
pub fn enable_features() {
|
||||
let (ecx, edx) = cpuid_features();
|
||||
|
||||
if !ecx.contains(EcxFeatures::XSAVE) {
|
||||
panic!("XSAVE feature is required");
|
||||
}
|
||||
|
||||
if !edx.contains(EdxFeatures::FXSR) {
|
||||
panic!("FXSR feature is required");
|
||||
}
|
||||
|
||||
if !edx.contains(EdxFeatures::PGE) {
|
||||
todo!("PGE feature (currently) is not optional");
|
||||
}
|
||||
|
||||
CR4.modify(CR4::OSXSAVE::SET + CR4::OSFXSR::SET + CR4::PGE::SET);
|
||||
|
||||
// XXX? SSE is supported on all x86-64s
|
||||
XCR0.modify(XCR0::X87::SET + XCR0::SSE::SET);
|
||||
|
||||
if ecx.contains(EcxFeatures::AVX) {
|
||||
// Enable AVX
|
||||
XCR0.modify(XCR0::AVX::SET);
|
||||
}
|
||||
}
|
||||
|
@ -129,3 +129,8 @@ pub unsafe fn outw(port: u16, value: u16) {
|
||||
pub unsafe fn outl(port: u16, value: u32) {
|
||||
core::arch::asm!("outl %eax, %dx", in("dx") port, in("eax") value, options(att_syntax));
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn flush_tlb_entry(address: usize) {
|
||||
core::arch::asm!("invlpg ({0})", in(reg) address, options(att_syntax));
|
||||
}
|
||||
|
@ -11,11 +11,11 @@ use static_assertions::{const_assert_eq, const_assert_ne};
|
||||
pub mod table;
|
||||
|
||||
use crate::{
|
||||
arch::x86_64::mem::table::PageAttributes,
|
||||
arch::x86_64::{intrinsics, mem::table::PageAttributes},
|
||||
mem::{
|
||||
address::{FromRaw, IntoRaw, KernelImageObject},
|
||||
device::RawDeviceMemoryMapping,
|
||||
table::EntryLevel,
|
||||
table::{EntryLevel, KernelAddressSpace, MapAttributes},
|
||||
PhysicalAddress, KERNEL_VIRT_OFFSET,
|
||||
},
|
||||
};
|
||||
@ -166,7 +166,7 @@ pub(super) unsafe fn map_device_memory(
|
||||
base: PhysicalAddress,
|
||||
size: usize,
|
||||
) -> Result<RawDeviceMemoryMapping, Error> {
|
||||
debugln!("Map {}B @ {:#x}", size, base);
|
||||
// debugln!("Map {}B @ {:#x}", size, base);
|
||||
let l3_aligned = base.page_align_down::<L3>();
|
||||
let l3_offset = L3::page_offset(base.into_raw());
|
||||
let page_count = (l3_offset + size + L3::SIZE - 1) / L3::SIZE;
|
||||
@ -202,8 +202,26 @@ pub(super) unsafe fn map_device_memory(
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) unsafe fn unmap_device_memory(map: RawDeviceMemoryMapping) {
|
||||
loop {}
|
||||
pub(super) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping) {
|
||||
// debugln!(
|
||||
// "Unmap {}B @ {:#x}",
|
||||
// map.page_count * map.page_size,
|
||||
// map.base_address
|
||||
// );
|
||||
match map.page_size {
|
||||
L3::SIZE => {
|
||||
for i in 0..map.page_count {
|
||||
let page = map.base_address + i * L3::SIZE;
|
||||
let l2i = L2::index(page);
|
||||
let l3i = L3::index(page);
|
||||
assert!(DEVICE_MAPPING_L3S[l2i][l3i].is_present());
|
||||
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::INVALID;
|
||||
intrinsics::flush_tlb_entry(page);
|
||||
}
|
||||
}
|
||||
L2::SIZE => todo!(),
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) unsafe fn map_heap_block(index: usize, page: PhysicalAddress) {
|
||||
@ -313,6 +331,8 @@ pub unsafe fn init_fixed_tables() {
|
||||
KERNEL_TABLES.l0.data[RAM_MAPPING_L0I] =
|
||||
(ram_mapping_l1_phys as u64) | (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();
|
||||
|
||||
// TODO ENABLE EFER.NXE
|
||||
|
||||
let cr3 = &KERNEL_TABLES.l0 as *const _ as usize - KERNEL_VIRT_OFFSET;
|
||||
core::arch::asm!("wbinvd; mov {0}, %cr3", in(reg) cr3, options(att_syntax));
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ use abi::error::Error;
|
||||
use bitflags::bitflags;
|
||||
|
||||
use crate::{
|
||||
arch::x86_64::intrinsics,
|
||||
mem::{
|
||||
address::{AsPhysicalAddress, FromRaw},
|
||||
phys,
|
||||
@ -401,9 +402,8 @@ impl AddressSpace {
|
||||
}
|
||||
|
||||
l3[l3i] = entry;
|
||||
|
||||
unsafe {
|
||||
core::arch::asm!("invlpg ({0})", in(reg) virt, options(att_syntax));
|
||||
intrinsics::flush_tlb_entry(virt);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -1,7 +1,8 @@
|
||||
// TODO fix all TODOs
|
||||
use core::{mem::size_of, sync::atomic::Ordering};
|
||||
|
||||
use abi::error::Error;
|
||||
use acpi_lib::{AcpiHandler, AcpiTable, AcpiTables, InterruptModel};
|
||||
use acpi_lib::{mcfg::Mcfg, AcpiTables, InterruptModel};
|
||||
use alloc::boxed::Box;
|
||||
use device_api::{
|
||||
input::KeyboardProducer, interrupt::ExternalInterruptController,
|
||||
@ -9,6 +10,7 @@ use device_api::{
|
||||
};
|
||||
use git_version::git_version;
|
||||
use kernel_util::util::OneTimeInit;
|
||||
use memtables::FixedTables;
|
||||
use yboot_proto::{v1::AvailableMemoryRegion, LoadProtocolV1};
|
||||
|
||||
mod acpi;
|
||||
@ -34,6 +36,7 @@ use crate::{
|
||||
debug::{self, LogLevel},
|
||||
device::{
|
||||
self,
|
||||
bus::pci::PciBusManager,
|
||||
display::{console, fb_console::FramebufferConsole, linear_fb::LinearFramebuffer},
|
||||
tty::CombinedTerminal,
|
||||
},
|
||||
@ -61,7 +64,7 @@ use self::{
|
||||
mem::{
|
||||
init_fixed_tables,
|
||||
table::{PageAttributes, PageEntry, L1, L3},
|
||||
EarlyMapping, MEMORY_LIMIT, RAM_MAPPING_L1, RAM_MAPPING_OFFSET,
|
||||
EarlyMapping, KERNEL_TABLES, MEMORY_LIMIT, RAM_MAPPING_L1, RAM_MAPPING_OFFSET,
|
||||
},
|
||||
peripherals::{i8253::I8253, ps2::PS2Controller, serial::ComPort},
|
||||
smp::CPU_COUNT,
|
||||
@ -106,6 +109,20 @@ impl Architecture for X86_64 {
|
||||
const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000;
|
||||
type IrqNumber = IrqNumber;
|
||||
|
||||
unsafe fn start_application_processors(&self) {
|
||||
if let Some(acpi) = self.acpi.try_get() {
|
||||
let Some(pinfo) = acpi
|
||||
.platform_info_in(AcpiAllocator)
|
||||
.ok()
|
||||
.and_then(|p| p.processor_info)
|
||||
else {
|
||||
return;
|
||||
};
|
||||
|
||||
smp::start_ap_cores(&pinfo);
|
||||
}
|
||||
}
|
||||
|
||||
fn cpu_count() -> usize {
|
||||
CPU_COUNT.load(Ordering::Acquire)
|
||||
}
|
||||
@ -143,7 +160,7 @@ impl Architecture for X86_64 {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn unmap_device_memory(&self, map: RawDeviceMemoryMapping) {
|
||||
unsafe fn unmap_device_memory(&self, map: &RawDeviceMemoryMapping) {
|
||||
mem::unmap_device_memory(map)
|
||||
}
|
||||
|
||||
@ -347,9 +364,7 @@ impl X86_64 {
|
||||
device::register_device(self.ioapic.get());
|
||||
device::register_device(ps2);
|
||||
|
||||
// TODO setup PCI devices
|
||||
} else {
|
||||
loop {}
|
||||
PciBusManager::setup_bus_devices().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@ -373,15 +388,13 @@ impl X86_64 {
|
||||
|
||||
self.ioapic.init(IoApic::from_acpi(&apic_info).unwrap());
|
||||
|
||||
// TODO ACPI init
|
||||
// acpi::init_acpi(acpi).unwrap();
|
||||
|
||||
// TODO MCFG
|
||||
// if let Ok(mcfg) = acpi.find_table::<Mcfg>() {
|
||||
// for entry in mcfg.entries() {
|
||||
// PciBusManager::add_segment_from_mcfg(entry).unwrap();
|
||||
// }
|
||||
// }
|
||||
if let Ok(mcfg) = acpi.find_table::<Mcfg>() {
|
||||
for entry in mcfg.entries() {
|
||||
PciBusManager::add_segment_from_mcfg(entry).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn init_framebuffer(&'static self) {
|
||||
|
@ -1,19 +1,26 @@
|
||||
//! x86-64 multiprocessing implementation
|
||||
use core::{
|
||||
mem::size_of,
|
||||
sync::atomic::{AtomicUsize, Ordering},
|
||||
};
|
||||
use core::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use acpi_lib::platform::{ProcessorInfo, ProcessorState};
|
||||
|
||||
use crate::{
|
||||
arch::{
|
||||
x86_64::{boot::__x86_64_ap_entry, mem::KERNEL_TABLES},
|
||||
x86_64::{
|
||||
boot::__x86_64_ap_entry,
|
||||
intrinsics::flush_tlb_entry,
|
||||
mem::{
|
||||
table::{PageAttributes, L1, L2},
|
||||
KERNEL_TABLES,
|
||||
},
|
||||
},
|
||||
Architecture, ArchitectureImpl,
|
||||
},
|
||||
mem::{
|
||||
address::{AsPhysicalAddress, IntoRaw},
|
||||
address::{AsPhysicalAddress, FromRaw, IntoRaw},
|
||||
phys,
|
||||
pointer::PhysicalRefMut,
|
||||
table::{PageEntry, PageTable},
|
||||
PhysicalAddress,
|
||||
},
|
||||
task::Cpu,
|
||||
};
|
||||
@ -26,61 +33,26 @@ pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(1);
|
||||
static AP_BOOTSTRAP_BIN: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/__x86_64_ap_boot.bin"));
|
||||
|
||||
const AP_STACK_PAGES: usize = 8;
|
||||
const AP_BOOTSTRAP_DATA: usize = 0x6000;
|
||||
const AP_BOOTSTRAP_CODE: usize = 0x7000;
|
||||
const AP_BOOTSTRAP_DATA: PhysicalAddress = PhysicalAddress::from_raw(0x6000usize);
|
||||
const AP_BOOTSTRAP_CODE: PhysicalAddress = PhysicalAddress::from_raw(0x7000usize);
|
||||
const AP_ADDRESS_LIMIT: PhysicalAddress = PhysicalAddress::from_raw(0x100000usize);
|
||||
|
||||
#[repr(C)]
|
||||
#[allow(dead_code)]
|
||||
struct ApBootstrapData {
|
||||
cr3: usize,
|
||||
cr3: PhysicalAddress,
|
||||
stack_base: usize,
|
||||
stack_size: usize,
|
||||
entry: usize,
|
||||
}
|
||||
|
||||
unsafe fn load_ap_bootstrap_code() {
|
||||
let src_ptr = AP_BOOTSTRAP_BIN.as_ptr();
|
||||
let dst_ptr = AP_BOOTSTRAP_CODE as *mut u8;
|
||||
|
||||
let size = AP_BOOTSTRAP_BIN.len();
|
||||
|
||||
assert!(size != 0, "Empty bootstrap code");
|
||||
assert!(
|
||||
AP_BOOTSTRAP_CODE + size < 0x100000,
|
||||
"Invalid bootstrap code placement: is not below 1MiB"
|
||||
);
|
||||
|
||||
todo!();
|
||||
// let src_slice = core::slice::from_raw_parts(src_ptr, size);
|
||||
// let dst_slice = core::slice::from_raw_parts_mut(dst_ptr.virtualize(), size);
|
||||
|
||||
// dst_slice.copy_from_slice(src_slice);
|
||||
}
|
||||
|
||||
unsafe fn load_ap_bootstrap_data(src: &ApBootstrapData) {
|
||||
let src_ptr = src as *const _ as *const u8;
|
||||
let dst_ptr = AP_BOOTSTRAP_DATA as *mut u8;
|
||||
let size = size_of::<ApBootstrapData>();
|
||||
|
||||
assert!(
|
||||
AP_BOOTSTRAP_DATA + size < 0x100000,
|
||||
"Invalid bootstrap data placement: is not below 1MiB"
|
||||
);
|
||||
|
||||
todo!()
|
||||
// let src_slice = core::slice::from_raw_parts(src_ptr, size);
|
||||
// let dst_slice = core::slice::from_raw_parts_mut(dst_ptr.virtualize(), size);
|
||||
|
||||
// dst_slice.copy_from_slice(src_slice);
|
||||
// core::arch::asm!("wbinvd");
|
||||
}
|
||||
|
||||
unsafe fn start_ap_core(apic_id: u32) {
|
||||
assert!(ArchitectureImpl::interrupt_mask());
|
||||
|
||||
let bsp_cpu = Cpu::local();
|
||||
let bsp_apic = bsp_cpu.local_apic();
|
||||
|
||||
let cr3 = KERNEL_TABLES.as_physical_address().into_raw();
|
||||
let cr3 = KERNEL_TABLES.as_physical_address();
|
||||
let stack_base = phys::alloc_pages_contiguous(AP_STACK_PAGES)
|
||||
.unwrap()
|
||||
.virtualize_raw();
|
||||
@ -93,11 +65,13 @@ unsafe fn start_ap_core(apic_id: u32) {
|
||||
entry: __x86_64_ap_entry as usize,
|
||||
};
|
||||
|
||||
load_ap_bootstrap_data(&data);
|
||||
let mut data_ref = PhysicalRefMut::<ApBootstrapData>::map(AP_BOOTSTRAP_DATA);
|
||||
*data_ref = data;
|
||||
|
||||
let cpu_count = CPU_COUNT.load(Ordering::Acquire);
|
||||
|
||||
// Send an IPI to wake up the AP
|
||||
core::arch::asm!("wbinvd");
|
||||
bsp_apic.wakeup_cpu(apic_id, AP_BOOTSTRAP_CODE);
|
||||
|
||||
while cpu_count == CPU_COUNT.load(Ordering::Acquire) {
|
||||
@ -119,11 +93,32 @@ pub unsafe fn start_ap_cores(info: &ProcessorInfo<AcpiAllocator>) {
|
||||
return;
|
||||
}
|
||||
|
||||
load_ap_bootstrap_code();
|
||||
// Temporarily identity-map the lowest 2MiB
|
||||
let mut identity_l1 = PageTable::<L1>::new_zeroed().unwrap();
|
||||
let mut identity_l2 = PageTable::<L2>::new_zeroed().unwrap();
|
||||
|
||||
identity_l1[0] =
|
||||
PageEntry::<L1>::table(identity_l2.as_physical_address(), PageAttributes::WRITABLE);
|
||||
identity_l2[0] = PageEntry::<L2>::block(PhysicalAddress::ZERO, PageAttributes::WRITABLE);
|
||||
|
||||
assert_eq!(KERNEL_TABLES.l0.data[0], 0);
|
||||
KERNEL_TABLES.l0.data[0] = IntoRaw::<u64>::into_raw(identity_l1.as_physical_address())
|
||||
| (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();
|
||||
|
||||
// Load AP_BOOTSTRAP_CODE
|
||||
let mut code_ref = PhysicalRefMut::map_slice(AP_BOOTSTRAP_CODE, AP_BOOTSTRAP_BIN.len());
|
||||
code_ref.copy_from_slice(AP_BOOTSTRAP_BIN);
|
||||
|
||||
for ap in aps.iter() {
|
||||
if ap.is_ap && ap.state == ProcessorState::WaitingForSipi {
|
||||
start_ap_core(ap.local_apic_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the identity-map
|
||||
identity_l2[0] = PageEntry::INVALID;
|
||||
flush_tlb_entry(0);
|
||||
KERNEL_TABLES.l0.data[0] = 0;
|
||||
|
||||
// TODO drop the tables
|
||||
}
|
||||
|
@ -13,7 +13,10 @@ pub use space::{
|
||||
ecam::PciEcam, PciConfigSpace, PciConfigurationSpace, PciLegacyConfigurationSpace,
|
||||
};
|
||||
|
||||
use crate::sync::IrqSafeSpinlock;
|
||||
use crate::{
|
||||
mem::{address::FromRaw, PhysicalAddress},
|
||||
sync::IrqSafeSpinlock,
|
||||
};
|
||||
|
||||
bitflags! {
|
||||
/// Command register of the PCI configuration space
|
||||
@ -77,7 +80,7 @@ pub struct PciBusSegment {
|
||||
segment_number: u8,
|
||||
bus_number_start: u8,
|
||||
bus_number_end: u8,
|
||||
ecam_phys_base: Option<usize>,
|
||||
ecam_phys_base: Option<PhysicalAddress>,
|
||||
|
||||
devices: Vec<PciBusDevice>,
|
||||
}
|
||||
@ -192,7 +195,7 @@ impl PciBusManager {
|
||||
segment_number: entry.pci_segment_group as u8,
|
||||
bus_number_start: entry.bus_number_start,
|
||||
bus_number_end: entry.bus_number_end,
|
||||
ecam_phys_base: Some(entry.base_address as usize),
|
||||
ecam_phys_base: Some(PhysicalAddress::from_raw(entry.base_address)),
|
||||
|
||||
devices: Vec::new(),
|
||||
};
|
||||
|
@ -1,7 +1,7 @@
|
||||
//! PCI Express ECAM interface
|
||||
use yggdrasil_abi::error::Error;
|
||||
|
||||
use crate::mem::{device::DeviceMemory, ConvertAddress};
|
||||
use crate::mem::{device::DeviceMemoryMapping, PhysicalAddress};
|
||||
|
||||
use super::{PciAddress, PciConfigurationSpace};
|
||||
|
||||
@ -9,27 +9,13 @@ use super::{PciAddress, PciConfigurationSpace};
|
||||
#[derive(Debug)]
|
||||
#[repr(transparent)]
|
||||
pub struct PciEcam {
|
||||
mapping: DeviceMemory,
|
||||
}
|
||||
|
||||
// Only used for probing
|
||||
#[derive(Debug)]
|
||||
#[repr(transparent)]
|
||||
struct PciRawEcam {
|
||||
virt_addr: usize,
|
||||
}
|
||||
|
||||
impl PciConfigurationSpace for PciRawEcam {
|
||||
fn read_u32(&self, offset: usize) -> u32 {
|
||||
assert_eq!(offset & 3, 0);
|
||||
unsafe { ((self.virt_addr + offset) as *const u32).read_volatile() }
|
||||
}
|
||||
mapping: DeviceMemoryMapping,
|
||||
}
|
||||
|
||||
impl PciConfigurationSpace for PciEcam {
|
||||
fn read_u32(&self, offset: usize) -> u32 {
|
||||
assert_eq!(offset & 3, 0);
|
||||
unsafe { ((self.mapping.base() + offset) as *const u32).read_volatile() }
|
||||
unsafe { ((self.mapping.address() + offset) as *const u32).read_volatile() }
|
||||
}
|
||||
}
|
||||
|
||||
@ -41,11 +27,9 @@ impl PciEcam {
|
||||
/// The `phys_addr` must be a valid ECAM address. The address must not alias any other mapped
|
||||
/// regions. The address must be aligned to a 4KiB boundary and be valid for accesses within a
|
||||
/// 4KiB-sized range.
|
||||
pub unsafe fn map(phys_addr: usize) -> Result<Self, Error> {
|
||||
// TODO check align
|
||||
let mapping = DeviceMemory::map("pcie-ecam", phys_addr, 0x1000)?;
|
||||
|
||||
Ok(PciEcam { mapping })
|
||||
pub unsafe fn map(phys_addr: PhysicalAddress) -> Result<Self, Error> {
|
||||
let mapping = DeviceMemoryMapping::map(phys_addr, 0x1000)?;
|
||||
Ok(Self { mapping })
|
||||
}
|
||||
|
||||
/// Checks if the ECAM contains a valid device configuration space, mapping and returning a
|
||||
@ -55,29 +39,33 @@ impl PciEcam {
|
||||
///
|
||||
/// See [PciEcam::map].
|
||||
pub unsafe fn probe_raw_parts(
|
||||
segment_phys_addr: usize,
|
||||
segment_phys_addr: PhysicalAddress,
|
||||
bus_offset: u8,
|
||||
address: PciAddress,
|
||||
) -> Result<Option<Self>, Error> {
|
||||
let phys_addr = segment_phys_addr
|
||||
+ ((address.bus - bus_offset) as usize * 256
|
||||
let phys_addr = segment_phys_addr.add(
|
||||
((address.bus - bus_offset) as usize * 256
|
||||
+ address.device as usize * 8
|
||||
+ address.function as usize)
|
||||
* 0x1000;
|
||||
* 0x1000,
|
||||
);
|
||||
let this = Self::map(phys_addr)?;
|
||||
|
||||
if phys_addr + 0xFFF < 0x100000000 {
|
||||
// Probe without allocating a mapping
|
||||
let raw = PciRawEcam {
|
||||
virt_addr: phys_addr.virtualize(),
|
||||
};
|
||||
Ok(if this.is_valid() { Some(this) } else { None })
|
||||
|
||||
if !raw.is_valid() {
|
||||
return Ok(None);
|
||||
}
|
||||
// if phys_addr + 0xFFF < 0x100000000 {
|
||||
// // Probe without allocating a mapping
|
||||
// let raw = PciRawEcam {
|
||||
// virt_addr: phys_addr.virtualize(),
|
||||
// };
|
||||
|
||||
Self::map(phys_addr).map(Some)
|
||||
} else {
|
||||
todo!()
|
||||
}
|
||||
// if !raw.is_valid() {
|
||||
// return Ok(None);
|
||||
// }
|
||||
|
||||
// Self::map(phys_addr).map(Some)
|
||||
// } else {
|
||||
// todo!()
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ use crate::sync::{IrqSafeSpinlock, IrqSafeSpinlockGuard};
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
pub mod devtree;
|
||||
|
||||
// pub mod bus;
|
||||
pub mod bus;
|
||||
pub mod display;
|
||||
// pub mod power;
|
||||
pub mod serial;
|
||||
|
@ -2,9 +2,12 @@ use core::{
|
||||
fmt,
|
||||
iter::Step,
|
||||
marker::PhantomData,
|
||||
mem::align_of,
|
||||
ops::{Add, Deref, DerefMut, Sub},
|
||||
};
|
||||
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
|
||||
use crate::arch::{Architecture, ArchitectureImpl, ARCHITECTURE};
|
||||
|
||||
use super::{pointer::PhysicalPointer, table::EntryLevel, KERNEL_VIRT_OFFSET};
|
||||
@ -89,6 +92,15 @@ impl PhysicalAddress {
|
||||
Self((self.0 + L::SIZE as u64 - 1) & !(L::SIZE as u64 - 1))
|
||||
}
|
||||
|
||||
pub const fn page_index<L: ~const EntryLevel>(self) -> usize {
|
||||
L::index(self.0 as usize)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub const fn is_aligned_for<T: Sized>(self) -> bool {
|
||||
self.0 as usize % align_of::<T>() == 0
|
||||
}
|
||||
|
||||
pub unsafe fn from_virtualized(address: usize) -> Self {
|
||||
ArchitectureImpl::physicalize(address).unwrap()
|
||||
}
|
||||
@ -97,12 +109,22 @@ impl PhysicalAddress {
|
||||
ArchitectureImpl::virtualize(self).unwrap()
|
||||
}
|
||||
|
||||
pub fn virtualize<T>(self) -> PhysicalPointer<T> {
|
||||
loop {}
|
||||
pub unsafe fn virtualize<T>(self) -> PhysicalPointer<T> {
|
||||
if !self.is_aligned_for::<T>() {
|
||||
todo!();
|
||||
}
|
||||
|
||||
pub fn virtualize_slice<T>(self, len: usize) -> PhysicalPointer<[T]> {
|
||||
loop {}
|
||||
let base = self.virtualize_raw();
|
||||
PhysicalPointer::from_raw(base as *mut T)
|
||||
}
|
||||
|
||||
pub unsafe fn virtualize_slice<T: Sized>(self, len: usize) -> PhysicalPointer<[T]> {
|
||||
if !self.is_aligned_for::<T>() {
|
||||
todo!();
|
||||
}
|
||||
|
||||
let base = self.virtualize_raw();
|
||||
PhysicalPointer::from_raw_parts(base as *mut T, len)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,14 +43,24 @@ impl RawDeviceMemoryMapping {
|
||||
|
||||
impl Drop for RawDeviceMemoryMapping {
|
||||
fn drop(&mut self) {
|
||||
loop {}
|
||||
unsafe {
|
||||
ARCHITECTURE.unmap_device_memory(self);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DeviceMemoryMapping {
|
||||
pub unsafe fn map(base: PhysicalAddress, size: usize) -> Result<Self, Error> {
|
||||
let inner = RawDeviceMemoryMapping::map(base, size)?;
|
||||
loop {}
|
||||
let address = inner.address;
|
||||
Ok(Self {
|
||||
inner: Arc::new(inner),
|
||||
address,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn address(&self) -> usize {
|
||||
self.address
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,12 @@
|
||||
//
|
||||
// pub mod device;
|
||||
|
||||
use core::{alloc::Layout, ffi::c_void, mem::size_of, ops::Add};
|
||||
use core::{
|
||||
alloc::Layout,
|
||||
ffi::c_void,
|
||||
mem::{align_of, size_of},
|
||||
ops::Add,
|
||||
};
|
||||
|
||||
use abi::error::Error;
|
||||
|
||||
@ -30,10 +35,31 @@ pub mod table;
|
||||
|
||||
pub use address::PhysicalAddress;
|
||||
|
||||
use self::table::AddressSpace;
|
||||
use self::{device::DeviceMemoryMapping, table::AddressSpace};
|
||||
|
||||
pub const KERNEL_VIRT_OFFSET: usize = ArchitectureImpl::KERNEL_VIRT_OFFSET;
|
||||
|
||||
pub unsafe fn read_memory<T>(address: PhysicalAddress) -> T {
|
||||
let io = DeviceMemoryMapping::map(address, size_of::<T>()).unwrap();
|
||||
let address = io.address();
|
||||
|
||||
if address % align_of::<T>() == 0 {
|
||||
(address as *const T).read_volatile()
|
||||
} else {
|
||||
(address as *const T).read_unaligned()
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn write_memory<T>(address: PhysicalAddress, value: T) {
|
||||
let io = DeviceMemoryMapping::map(address, size_of::<T>()).unwrap();
|
||||
let address = io.address();
|
||||
|
||||
if address % align_of::<T>() == 0 {
|
||||
(address as *mut T).write_volatile(value)
|
||||
} else {
|
||||
(address as *mut T).write_unaligned(value)
|
||||
}
|
||||
}
|
||||
// pub mod phys;
|
||||
//
|
||||
// /// Kernel's physical load address
|
||||
|
@ -5,7 +5,7 @@ use abi::error::Error;
|
||||
|
||||
use crate::mem::{
|
||||
address::{FromRaw, IntoRaw},
|
||||
pointer::{PhysicalRef, PhysicalRefMut},
|
||||
pointer::PhysicalRefMut,
|
||||
PhysicalAddress,
|
||||
};
|
||||
|
||||
@ -76,7 +76,7 @@ impl PhysicalMemoryManager {
|
||||
self.last_free_bit = 0;
|
||||
self.alloc_page()
|
||||
} else {
|
||||
loop {}
|
||||
Err(Error::OutOfMemory)
|
||||
}
|
||||
}
|
||||
|
||||
@ -102,7 +102,7 @@ impl PhysicalMemoryManager {
|
||||
self.last_free_bit = 0;
|
||||
self.alloc_2m_page()
|
||||
} else {
|
||||
loop {}
|
||||
Err(Error::OutOfMemory)
|
||||
}
|
||||
}
|
||||
|
||||
@ -127,7 +127,7 @@ impl PhysicalMemoryManager {
|
||||
self.last_free_bit = 0;
|
||||
self.alloc_contiguous_pages(count)
|
||||
} else {
|
||||
loop {}
|
||||
Err(Error::OutOfMemory)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
use core::{
|
||||
alloc::Layout,
|
||||
fmt,
|
||||
mem::align_of,
|
||||
ops::{Deref, DerefMut},
|
||||
};
|
||||
|
||||
@ -31,8 +32,37 @@ impl<T: ?Sized> PhysicalPointer<T> {
|
||||
}
|
||||
|
||||
impl<T: Sized> PhysicalPointer<T> {
|
||||
#[inline(always)]
|
||||
pub fn is_aligned(&self) -> bool {
|
||||
self.pointer.addr() % align_of::<T>() == 0
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub const unsafe fn from_raw(pointer: *mut T) -> PhysicalPointer<T> {
|
||||
PhysicalPointer { pointer }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub unsafe fn from_raw_parts(base: *mut T, len: usize) -> PhysicalPointer<[T]> {
|
||||
PhysicalPointer {
|
||||
pointer: core::ptr::slice_from_raw_parts_mut(base, len),
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn write_unaligned(self, value: T) {
|
||||
self.write_unaligned(value);
|
||||
self.pointer.write_unaligned(value)
|
||||
}
|
||||
|
||||
pub unsafe fn write_volatile(self, value: T) {
|
||||
self.pointer.write_volatile(value)
|
||||
}
|
||||
|
||||
pub unsafe fn read_unaligned(self) -> T {
|
||||
self.pointer.read_unaligned()
|
||||
}
|
||||
|
||||
pub unsafe fn read_volatile(self) -> T {
|
||||
self.pointer.read_volatile()
|
||||
}
|
||||
}
|
||||
|
||||
@ -96,6 +126,11 @@ impl<'a, T: Sized> PhysicalRef<'a, T> {
|
||||
let value = virtualize_raw(physical);
|
||||
PhysicalRef { value }
|
||||
}
|
||||
|
||||
pub unsafe fn map_slice(physical: PhysicalAddress, len: usize) -> PhysicalRef<'a, [T]> {
|
||||
let value = virtualize_slice_raw(physical, len);
|
||||
PhysicalRef { value }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized> PhysicalRef<'_, T> {
|
||||
|
@ -51,6 +51,17 @@ pub trait VirtualMemoryManager {
|
||||
fn deallocate(&self, addr: usize, len: usize) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
pub trait KernelAddressSpace {
|
||||
type Mapping;
|
||||
|
||||
fn map_page(
|
||||
&self,
|
||||
virt: usize,
|
||||
physical: PhysicalAddress,
|
||||
attrs: MapAttributes,
|
||||
) -> Result<Self::Mapping, Error>;
|
||||
}
|
||||
|
||||
/// Interface for non-terminal tables to retrieve the next level of address translation tables
|
||||
pub trait NextPageTable {
|
||||
/// Type for the next-level page table
|
||||
|
@ -97,7 +97,7 @@ where
|
||||
|
||||
let phys_page = phys::alloc_page()?;
|
||||
space.map_page(virt_page, phys_page, attrs)?;
|
||||
debugln!("Map {:#x} -> {:#x}", virt_page, phys_page);
|
||||
// debugln!("Map {:#x} -> {:#x}", virt_page, phys_page);
|
||||
|
||||
let dst_slice = unsafe { PhysicalRefMut::map_slice(phys_page.add(page_off), count) };
|
||||
// let dst_slice = unsafe {
|
||||
|
Loading…
x
Reference in New Issue
Block a user