mem: rework phys/virt mem management, get process working

This commit is contained in:
Mark Poliakov 2023-09-13 18:21:45 +03:00
parent 3330beedfd
commit 6949f8c44a
39 changed files with 2264 additions and 1335 deletions

View File

@ -14,12 +14,13 @@ vfs = { path = "lib/vfs" }
memfs = { path = "lib/memfs" } memfs = { path = "lib/memfs" }
device-api = { path = "lib/device-api", features = ["derive"] } device-api = { path = "lib/device-api", features = ["derive"] }
kernel-util = { path = "lib/kernel-util" } kernel-util = { path = "lib/kernel-util" }
memtables = { path = "lib/memtables" }
atomic_enum = "0.2.0" atomic_enum = "0.2.0"
bitflags = "2.3.3" bitflags = "2.3.3"
linked_list_allocator = "0.10.5" linked_list_allocator = "0.10.5"
spinning_top = "0.2.5" spinning_top = "0.2.5"
# static_assertions = "1.1.0" static_assertions = "1.1.0"
tock-registers = "0.8.1" tock-registers = "0.8.1"
cfg-if = "1.0.0" cfg-if = "1.0.0"
git-version = "0.3.5" git-version = "0.3.5"
@ -48,7 +49,7 @@ acpi-system = { git = "https://github.com/alnyan/acpi-system.git", version = "0.
xhci_lib = { git = "https://github.com/rust-osdev/xhci.git", package = "xhci" } xhci_lib = { git = "https://github.com/rust-osdev/xhci.git", package = "xhci" }
[features] [features]
default = [] default = ["fb_console"]
fb_console = [] fb_console = []
aarch64_qemu = [] aarch64_qemu = []
aarch64_orange_pi3 = [] aarch64_orange_pi3 = []

9
lib/memtables/Cargo.toml Normal file
View File

@ -0,0 +1,9 @@
[package]
name = "memtables"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bytemuck = { version = "1.14.0", features = ["derive"] }

39
lib/memtables/src/lib.rs Normal file
View File

@ -0,0 +1,39 @@
#![no_std]
use bytemuck::{Pod, Zeroable};
pub const KERNEL_L3_COUNT: usize = 16;
#[derive(Clone, Copy, Pod, Zeroable)]
#[repr(C, align(0x1000))]
pub struct RawTable {
pub data: [u64; 512],
}
#[derive(Clone, Copy, Pod, Zeroable)]
#[repr(C)]
pub struct FixedTables {
pub l0: RawTable,
pub kernel_l1: RawTable,
pub kernel_l2: RawTable,
pub kernel_l3s: [RawTable; KERNEL_L3_COUNT],
}
impl RawTable {
pub const fn zeroed() -> Self {
Self { data: [0; 512] }
}
}
impl FixedTables {
pub const fn zeroed() -> Self {
Self {
l0: RawTable::zeroed(),
kernel_l1: RawTable::zeroed(),
kernel_l2: RawTable::zeroed(),
kernel_l3s: [RawTable::zeroed(); KERNEL_L3_COUNT],
}
}
}

View File

@ -27,6 +27,8 @@ use device_api::{
ResetDevice, ResetDevice,
}; };
use crate::mem::{device::RawDeviceMemoryMapping, phys::PhysicalMemoryRegion, PhysicalAddress};
cfg_if! { cfg_if! {
if #[cfg(target_arch = "aarch64")] { if #[cfg(target_arch = "aarch64")] {
pub mod aarch64; pub mod aarch64;
@ -60,24 +62,31 @@ pub trait Architecture {
/// IRQ number type associated with the architecture /// IRQ number type associated with the architecture
type IrqNumber; type IrqNumber;
/// Initializes the memory management unit and sets up virtual memory management.
/// `bsp` flag is provided to make sure mapping tables are only initialized once in a SMP
/// system.
///
/// # Safety
///
/// Unsafe to call if the MMU has already been initialized.
unsafe fn init_mmu(&self, bsp: bool);
/// Starts up the application processors that may be present in the system. /// Starts up the application processors that may be present in the system.
/// ///
/// # Safety /// # Safety
/// ///
/// Only safe to call once during system init. /// Only safe to call once during system init.
unsafe fn start_application_processors(&self); unsafe fn start_application_processors(&self) {}
/// Allocates a virtual mapping for the specified physical memory region /// Allocates a virtual mapping for the specified physical memory region
fn map_device_pages(&self, phys: usize, count: usize) -> Result<usize, Error>; unsafe fn map_device_memory(
&self,
base: PhysicalAddress,
size: usize,
) -> Result<RawDeviceMemoryMapping, Error>;
unsafe fn unmap_device_memory(&self, map: RawDeviceMemoryMapping);
fn map_physical_memory<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
&self,
it: I,
memory_start: PhysicalAddress,
memory_end: PhysicalAddress,
) -> Result<(), Error>;
fn virtualize(address: PhysicalAddress) -> Result<usize, Error>;
fn physicalize(address: usize) -> Result<PhysicalAddress, Error>;
// Architecture intrinsics // Architecture intrinsics
@ -102,36 +111,50 @@ pub trait Architecture {
fn register_external_interrupt_controller( fn register_external_interrupt_controller(
&self, &self,
intc: &'static dyn ExternalInterruptController<IrqNumber = Self::IrqNumber>, intc: &'static dyn ExternalInterruptController<IrqNumber = Self::IrqNumber>,
) -> Result<(), Error>; ) -> Result<(), Error> {
Err(Error::NotImplemented)
}
/// Adds a local interrupt controller to the system /// Adds a local interrupt controller to the system
fn register_local_interrupt_controller( fn register_local_interrupt_controller(
&self, &self,
intc: &'static dyn LocalInterruptController<IpiMessage = CpuMessage>, intc: &'static dyn LocalInterruptController<IpiMessage = CpuMessage>,
) -> Result<(), Error>; ) -> Result<(), Error> {
Err(Error::NotImplemented)
}
/// Adds a monotonic timer to the system /// Adds a monotonic timer to the system
fn register_monotonic_timer( fn register_monotonic_timer(
&self, &self,
timer: &'static dyn MonotonicTimestampProviderDevice, timer: &'static dyn MonotonicTimestampProviderDevice,
) -> Result<(), Error>; ) -> Result<(), Error> {
Err(Error::NotImplemented)
}
/// Adds a reset device to the system /// Adds a reset device to the system
fn register_reset_device(&self, reset: &'static dyn ResetDevice) -> Result<(), Error>; fn register_reset_device(&self, reset: &'static dyn ResetDevice) -> Result<(), Error> {
Err(Error::NotImplemented)
}
// TODO only supports 1 extintc per system // TODO only supports 1 extintc per system
/// Returns the primary external interrupt controller /// Returns the primary external interrupt controller
fn external_interrupt_controller( fn external_interrupt_controller(
&'static self, &'static self,
) -> &'static dyn ExternalInterruptController<IrqNumber = Self::IrqNumber>; ) -> &'static dyn ExternalInterruptController<IrqNumber = Self::IrqNumber> {
unimplemented!()
}
/// Returns the local interrupt controller /// Returns the local interrupt controller
fn local_interrupt_controller( fn local_interrupt_controller(
&'static self, &'static self,
) -> &'static dyn LocalInterruptController<IpiMessage = CpuMessage>; ) -> &'static dyn LocalInterruptController<IpiMessage = CpuMessage> {
unimplemented!()
}
/// Returns the monotonic timer /// Returns the monotonic timer
fn monotonic_timer(&'static self) -> &'static dyn MonotonicTimestampProviderDevice; fn monotonic_timer(&'static self) -> &'static dyn MonotonicTimestampProviderDevice {
unimplemented!()
}
/// Sends a message to the requested set of CPUs through an interprocessor interrupt. /// Sends a message to the requested set of CPUs through an interprocessor interrupt.
/// ///
@ -143,7 +166,9 @@ pub trait Architecture {
/// # Safety /// # Safety
/// ///
/// As the call may alter the flow of execution on CPUs, this function is unsafe. /// As the call may alter the flow of execution on CPUs, this function is unsafe.
unsafe fn send_ipi(&self, target: IpiDeliveryTarget, msg: CpuMessage) -> Result<(), Error>; unsafe fn send_ipi(&self, target: IpiDeliveryTarget, msg: CpuMessage) -> Result<(), Error> {
Ok(())
}
/// Performs a CPU reset. /// Performs a CPU reset.
/// ///
@ -151,5 +176,7 @@ pub trait Architecture {
/// ///
/// The caller must ensure it is actually safe to reset, i.e. no critical processes will be /// The caller must ensure it is actually safe to reset, i.e. no critical processes will be
/// aborted and no data will be lost. /// aborted and no data will be lost.
unsafe fn reset(&self) -> !; unsafe fn reset(&self) -> ! {
loop {}
}
} }

View File

@ -23,7 +23,7 @@ use crate::{
x86_64::{smp::CPU_COUNT, IrqNumber, SHUTDOWN_FENCE}, x86_64::{smp::CPU_COUNT, IrqNumber, SHUTDOWN_FENCE},
Architecture, CpuMessage, ARCHITECTURE, Architecture, CpuMessage, ARCHITECTURE,
}, },
mem::{heap::GLOBAL_HEAP, ConvertAddress}, mem::{address::FromRaw, heap::GLOBAL_HEAP, PhysicalAddress},
sync::IrqSafeSpinlock, sync::IrqSafeSpinlock,
util, util,
}; };
@ -79,14 +79,19 @@ unsafe impl Allocator for AcpiAllocator {
impl acpi_system::Handler for AcpiHandlerImpl { impl acpi_system::Handler for AcpiHandlerImpl {
unsafe fn map_slice(address: u64, length: u64) -> &'static [u8] { unsafe fn map_slice(address: u64, length: u64) -> &'static [u8] {
if address + length < 0x100000000 { let slice = PhysicalAddress::from_raw(address).virtualize_slice::<u8>(length as usize);
core::slice::from_raw_parts(
(address as usize).virtualize() as *const u8, todo!();
length as usize, // PhysicalPointer::into_raw(slice)
)
} else { // if address + length < 0x100000000 {
panic!("Unhandled address: {:#x}", address) // core::slice::from_raw_parts(
} // (address as usize).virtualize() as *const u8,
// length as usize,
// )
// } else {
// panic!("Unhandled address: {:#x}", address)
// }
} }
fn io_read_u8(port: u16) -> u8 { fn io_read_u8(port: u16) -> u8 {
@ -123,71 +128,39 @@ impl acpi_system::Handler for AcpiHandlerImpl {
} }
fn mem_read_u8(address: u64) -> u8 { fn mem_read_u8(address: u64) -> u8 {
let value = unsafe { (address as *const u8).virtualize().read_volatile() }; todo!()
log::trace!("mem_read_u8 {:#x} <- {:#x}", address, value);
value
} }
fn mem_read_u16(address: u64) -> u16 { fn mem_read_u16(address: u64) -> u16 {
let value = if address & 0x1 == 0 { todo!()
unsafe { (address as *const u16).virtualize().read_volatile() }
} else {
unsafe { (address as *const u16).virtualize().read_unaligned() }
};
log::trace!("mem_read_u16 {:#x} <- {:#x}", address, value);
value
} }
fn mem_read_u32(address: u64) -> u32 { fn mem_read_u32(address: u64) -> u32 {
let value = if address & 0x3 == 0 { todo!()
unsafe { (address as *const u32).virtualize().read_volatile() }
} else {
unsafe { (address as *const u32).virtualize().read_unaligned() }
};
log::trace!("mem_read_u32 {:#x} <- {:#x}", address, value);
value
} }
fn mem_read_u64(address: u64) -> u64 { fn mem_read_u64(address: u64) -> u64 {
let value = if address & 0x7 == 0 { todo!()
unsafe { (address as *const u64).virtualize().read_volatile() }
} else {
unsafe { (address as *const u64).virtualize().read_unaligned() }
};
log::trace!("mem_read_u64 {:#x} <- {:#x}", address, value);
value
} }
fn mem_write_u8(address: u64, value: u8) { fn mem_write_u8(address: u64, value: u8) {
log::trace!("mem_write_u8 {:#x}, {:#x}", address, value); log::trace!("mem_write_u8 {:#x}, {:#x}", address, value);
unsafe { (address as *mut u8).virtualize().write_volatile(value) }; todo!()
} }
fn mem_write_u16(address: u64, value: u16) { fn mem_write_u16(address: u64, value: u16) {
log::trace!("mem_write_u16 {:#x}, {:#x}", address, value); log::trace!("mem_write_u16 {:#x}, {:#x}", address, value);
if address & 0x1 == 0 { todo!()
unsafe { (address as *mut u16).virtualize().write_volatile(value) };
} else {
unsafe { (address as *mut u16).virtualize().write_unaligned(value) };
}
} }
fn mem_write_u32(address: u64, value: u32) { fn mem_write_u32(address: u64, value: u32) {
log::trace!("mem_write_u32 {:#x}, {:#x}", address, value); log::trace!("mem_write_u32 {:#x}, {:#x}", address, value);
if address & 0x3 == 0 { todo!()
unsafe { (address as *mut u32).virtualize().write_volatile(value) };
} else {
unsafe { (address as *mut u32).virtualize().write_unaligned(value) };
}
} }
fn mem_write_u64(address: u64, value: u64) { fn mem_write_u64(address: u64, value: u64) {
log::trace!("mem_write_u64 {:#x}, {:#x}", address, value); log::trace!("mem_write_u64 {:#x}, {:#x}", address, value);
if address & 0x7 == 0 { todo!()
unsafe { (address as *mut u64).virtualize().write_volatile(value) };
} else {
unsafe { (address as *mut u64).virtualize().write_unaligned(value) };
}
} }
fn install_interrupt_handler(irq: u32) -> Result<(), AcpiSystemError> { fn install_interrupt_handler(irq: u32) -> Result<(), AcpiSystemError> {
@ -342,17 +315,15 @@ impl AcpiHandler for AcpiHandlerImpl {
physical_address: usize, physical_address: usize,
size: usize, size: usize,
) -> PhysicalMapping<Self, T> { ) -> PhysicalMapping<Self, T> {
if physical_address <= 0xFFFFFFFF { PhysicalMapping::new(
PhysicalMapping::new( physical_address,
physical_address, NonNull::new_unchecked(
NonNull::new_unchecked(physical_address.virtualize() as *mut T), PhysicalAddress::from_raw(physical_address).virtualize_raw() as *mut T
size, ),
size, size,
*self, size,
) *self,
} else { )
todo!()
}
} }
// Unmap nothing, these addresses are "virtualized" to high address space // Unmap nothing, these addresses are "virtualized" to high address space

View File

@ -8,10 +8,19 @@ use device_api::{
}, },
Device, Device,
}; };
use tock_registers::{
interfaces::{Readable, Writeable},
register_structs,
registers::{ReadWrite, WriteOnly},
};
use crate::{ use crate::{
arch::x86_64::{acpi::AcpiAllocator, apic::local::BSP_APIC_ID, IrqNumber}, arch::x86_64::{acpi::AcpiAllocator, apic::local::BSP_APIC_ID, IrqNumber},
mem::ConvertAddress, mem::{
address::FromRaw,
device::{DeviceMemoryIo, DeviceMemoryMapping},
PhysicalAddress,
},
sync::IrqSafeSpinlock, sync::IrqSafeSpinlock,
}; };
@ -38,12 +47,18 @@ struct IsaRedirection {
trigger: IrqTrigger, trigger: IrqTrigger,
} }
struct Regs { register_structs! {
base: usize, #[allow(non_snake_case)]
Regs {
(0x00 => Index: WriteOnly<u32>),
(0x04 => _0),
(0x10 => Data: ReadWrite<u32>),
(0x14 => @END),
}
} }
struct Inner { struct Inner {
regs: Regs, regs: DeviceMemoryIo<'static, Regs>,
max_gsi: u32, max_gsi: u32,
} }
@ -59,22 +74,14 @@ pub struct IoApic {
impl Regs { impl Regs {
#[inline] #[inline]
fn read(&self, reg: u32) -> u32 { fn read(&self, reg: u32) -> u32 {
let ptr = self.base as *mut u32; self.Index.set(reg);
self.Data.get()
unsafe {
ptr.write_volatile(reg & 0xFF);
ptr.add(4).read_volatile()
}
} }
#[inline] #[inline]
fn write(&self, reg: u32, value: u32) { fn write(&self, reg: u32, value: u32) {
let ptr = self.base as *mut u32; self.Index.set(reg);
self.Data.set(value);
unsafe {
ptr.write_volatile(reg & 0xFF);
ptr.add(4).write_volatile(value);
}
} }
} }
@ -269,8 +276,12 @@ impl IoApic {
} }
// TODO properly map this using DeviceMemory // TODO properly map this using DeviceMemory
let regs = Regs { // let regs = Regs {
base: unsafe { (ioapic.address as usize).virtualize() }, // base: unsafe { PhysicalAddress::from_raw(ioapic.address as u64).virtualize_raw() },
// };
// let mapping = unsafe { DeviceMemoryMapping::map(base, size) };
let regs = unsafe {
DeviceMemoryIo::<'_, Regs>::map(PhysicalAddress::from_raw(ioapic.address as u64))?
}; };
let max_gsi = (regs.read(REG_IOAPIC_VERSION) >> 16) & 0xFF; let max_gsi = (regs.read(REG_IOAPIC_VERSION) >> 16) & 0xFF;

View File

@ -17,7 +17,7 @@ use crate::{
x86_64::{registers::MSR_IA32_APIC_BASE, smp::CPU_COUNT}, x86_64::{registers::MSR_IA32_APIC_BASE, smp::CPU_COUNT},
CpuMessage, CpuMessage,
}, },
mem::ConvertAddress, mem::{address::FromRaw, device::DeviceMemoryIo, PhysicalAddress},
task::Cpu, task::Cpu,
}; };
@ -130,7 +130,7 @@ register_structs! {
/// Per-processor local APIC interface /// Per-processor local APIC interface
pub struct LocalApic { pub struct LocalApic {
regs: &'static Regs, regs: DeviceMemoryIo<'static, Regs>,
} }
unsafe impl Send for LocalApic {} unsafe impl Send for LocalApic {}
@ -190,8 +190,7 @@ impl LocalApic {
/// ///
/// Only meant to be called once per processor during their init. /// Only meant to be called once per processor during their init.
pub unsafe fn new() -> Self { pub unsafe fn new() -> Self {
let base = unsafe { Self::base().virtualize() }; let regs = DeviceMemoryIo::<Regs>::map(Self::base()).unwrap();
let regs = unsafe { &*(base as *const Regs) };
let id = regs.Id.read(Id::ApicId); let id = regs.Id.read(Id::ApicId);
@ -294,8 +293,8 @@ impl LocalApic {
} }
#[inline] #[inline]
fn base() -> usize { fn base() -> PhysicalAddress {
MSR_IA32_APIC_BASE.read_base() as usize PhysicalAddress::from_raw(MSR_IA32_APIC_BASE.read_base())
} }
#[inline] #[inline]

View File

@ -1,5 +1,5 @@
//! x86-64 boot and entry functions //! x86-64 boot and entry functions
use core::{arch::global_asm, sync::atomic::Ordering}; use core::arch::global_asm;
use tock_registers::interfaces::Writeable; use tock_registers::interfaces::Writeable;
use yboot_proto::{ use yboot_proto::{
@ -8,26 +8,20 @@ use yboot_proto::{
}; };
use crate::{ use crate::{
arch::{ arch::{x86_64::registers::MSR_IA32_KERNEL_GS_BASE, Architecture, ArchitectureImpl},
x86_64::{cpuid, exception, registers::MSR_IA32_KERNEL_GS_BASE, BootData},
Architecture, ArchitectureImpl, ARCHITECTURE,
},
fs::devfs, fs::devfs,
kernel_main, kernel_secondary_main, kernel_main,
mem::{ mem::KERNEL_VIRT_OFFSET,
heap,
phys::{self, PageUsage},
ConvertAddress, KERNEL_VIRT_OFFSET,
},
task::runtime, task::runtime,
}; };
use super::smp::CPU_COUNT; use super::{cpuid::init_cpuid, exception, ARCHITECTURE};
// use super::ARCHITECTURE; pub enum BootData {
YBoot(&'static LoadProtocolV1),
}
const BOOT_STACK_SIZE: usize = 1024 * 1024; const BOOT_STACK_SIZE: usize = 1024 * 1024;
const HEAP_PAGES: usize = 512;
#[repr(C, align(0x20))] #[repr(C, align(0x20))]
struct BootStack { struct BootStack {
@ -41,7 +35,7 @@ static mut BSP_STACK: BootStack = BootStack {
#[used] #[used]
#[link_section = ".data.yboot"] #[link_section = ".data.yboot"]
static mut YBOOT_DATA: LoadProtocolV1 = LoadProtocolV1 { static YBOOT_DATA: LoadProtocolV1 = LoadProtocolV1 {
header: LoadProtocolHeader { header: LoadProtocolHeader {
kernel_magic: KERNEL_MAGIC, kernel_magic: KERNEL_MAGIC,
version: PROTOCOL_VERSION_1, version: PROTOCOL_VERSION_1,
@ -65,32 +59,80 @@ static mut YBOOT_DATA: LoadProtocolV1 = LoadProtocolV1 {
res_size: 0, res_size: 0,
}, },
}; };
//
//
// unsafe extern "C" fn __x86_64_upper_entry() -> ! {
// }
//
// /// Application processor entry point
// pub extern "C" fn __x86_64_ap_entry() -> ! {
// let cpu_id = CPU_COUNT.load(Ordering::Acquire);
//
// MSR_IA32_KERNEL_GS_BASE.set(&UNINIT_CPU as *const _ as u64);
// unsafe {
// core::arch::asm!("swapgs");
// }
// MSR_IA32_KERNEL_GS_BASE.set(&UNINIT_CPU as *const _ as u64);
// unsafe {
// core::arch::asm!("swapgs");
// }
//
// // Still not initialized: GDT, IDT, CPU features, syscall, kernel_gs_base
// cpuid::feature_gate();
//
// infoln!("cpu{} initializing", cpu_id);
// unsafe {
// ARCHITECTURE.init_mmu(false);
// core::arch::asm!("wbinvd");
//
// // Cpu::init_local(LocalApic::new(), cpu_id as u32);
// // syscall::init_syscall();
// exception::init_exceptions(cpu_id);
//
// ARCHITECTURE.init_platform(cpu_id);
// }
//
// CPU_COUNT.fetch_add(1, Ordering::Release);
//
// kernel_secondary_main()
// }
static UNINIT_CPU: usize = 0; unsafe fn init_dummy_cpu() {
// TODO this is incorrect
unsafe extern "C" fn __x86_64_upper_entry() -> ! { static UNINIT_CPU_INNER: usize = 0;
ArchitectureImpl::set_interrupt_mask(true); static UNINIT_CPU_PTR: &'static usize = &UNINIT_CPU_INNER;
// Point %gs to a dummy structure so that Cpu::get_local() works properly even before the CPU // Point %gs to a dummy structure so that Cpu::get_local() works properly even before the CPU
// data structure is initialized // data structure is initialized
MSR_IA32_KERNEL_GS_BASE.set(&UNINIT_CPU as *const _ as u64); MSR_IA32_KERNEL_GS_BASE.set(&UNINIT_CPU_PTR as *const _ as u64);
core::arch::asm!("swapgs"); core::arch::asm!("swapgs");
MSR_IA32_KERNEL_GS_BASE.set(&UNINIT_CPU as *const _ as u64); MSR_IA32_KERNEL_GS_BASE.set(&UNINIT_CPU_PTR as *const _ as u64);
core::arch::asm!("swapgs"); core::arch::asm!("swapgs");
}
ARCHITECTURE.init_mmu(true); pub extern "C" fn __x86_64_ap_entry() -> ! {
core::arch::asm!("wbinvd"); loop {}
}
extern "C" fn __x86_64_upper_entry() -> ! {
// Safety: ok, CPU hasn't been initialized yet and it's the early kernel entry
unsafe {
init_dummy_cpu();
}
ARCHITECTURE.set_boot_data(BootData::YBoot(&YBOOT_DATA)); ARCHITECTURE.set_boot_data(BootData::YBoot(&YBOOT_DATA));
ARCHITECTURE
.init_physical_memory()
.expect("Failed to initialize the physical memory manager");
let heap_base = phys::alloc_pages_contiguous(HEAP_PAGES, PageUsage::Used) // Gather available CPU features
.expect("Couldn't allocate memory for heap"); init_cpuid();
heap::init_heap(heap_base.virtualize(), HEAP_PAGES * 0x1000);
exception::init_exceptions(0); // Setup memory management: kernel virtual memory tables, physical page manager and heap
unsafe {
ARCHITECTURE.init_memory_management();
}
unsafe {
exception::init_exceptions(0);
}
// Initialize async executor queue // Initialize async executor queue
runtime::init_task_queue(); runtime::init_task_queue();
@ -98,52 +140,21 @@ unsafe extern "C" fn __x86_64_upper_entry() -> ! {
devfs::init(); devfs::init();
// Initializes: local CPU, platform devices (timers/serials/etc), debug output // Initializes: local CPU, platform devices (timers/serials/etc), debug output
ARCHITECTURE.init_platform(0); unsafe {
ARCHITECTURE.init_platform(0);
cpuid::feature_gate(); }
kernel_main() kernel_main()
} }
/// Application processor entry point
pub extern "C" fn __x86_64_ap_entry() -> ! {
let cpu_id = CPU_COUNT.load(Ordering::Acquire);
MSR_IA32_KERNEL_GS_BASE.set(&UNINIT_CPU as *const _ as u64);
unsafe {
core::arch::asm!("swapgs");
}
MSR_IA32_KERNEL_GS_BASE.set(&UNINIT_CPU as *const _ as u64);
unsafe {
core::arch::asm!("swapgs");
}
// Still not initialized: GDT, IDT, CPU features, syscall, kernel_gs_base
cpuid::feature_gate();
infoln!("cpu{} initializing", cpu_id);
unsafe {
ARCHITECTURE.init_mmu(false);
core::arch::asm!("wbinvd");
// Cpu::init_local(LocalApic::new(), cpu_id as u32);
// syscall::init_syscall();
exception::init_exceptions(cpu_id);
ARCHITECTURE.init_platform(cpu_id);
}
CPU_COUNT.fetch_add(1, Ordering::Release);
kernel_secondary_main()
}
global_asm!( global_asm!(
r#" r#"
// {boot_data}
.global __x86_64_entry .global __x86_64_entry
.section .text.entry .section .text.entry
__x86_64_entry: __x86_64_entry:
cli
mov ${yboot_loader_magic}, %edi mov ${yboot_loader_magic}, %edi
cmp %edi, %eax cmp %edi, %eax
je 2f je 2f
@ -166,6 +177,7 @@ __x86_64_entry:
yboot_loader_magic = const LOADER_MAGIC, yboot_loader_magic = const LOADER_MAGIC,
stack_size = const BOOT_STACK_SIZE, stack_size = const BOOT_STACK_SIZE,
stack_bottom = sym BSP_STACK, stack_bottom = sym BSP_STACK,
boot_data = sym YBOOT_DATA,
entry = sym __x86_64_upper_entry, entry = sym __x86_64_upper_entry,
options(att_syntax) options(att_syntax)
); );

View File

@ -4,10 +4,10 @@ use core::{arch::global_asm, cell::UnsafeCell};
use abi::error::Error; use abi::error::Error;
use crate::{ use crate::{
arch::x86_64::table::KERNEL_TABLES, arch::x86_64::mem::KERNEL_TABLES,
mem::{ mem::{
phys::{self, PageUsage}, address::{AsPhysicalAddress, IntoRaw},
ConvertAddress, phys, PhysicalAddress,
}, },
task::context::TaskContextImpl, task::context::TaskContextImpl,
}; };
@ -65,11 +65,11 @@ impl StackBuilder {
self.sp self.sp
} }
fn init_common(&mut self, entry: usize, cr3: usize) { fn init_common(&mut self, entry: usize, cr3: PhysicalAddress) {
self.push(entry); // address for ret self.push(entry); // address for ret
// End of common context // End of common context
self.push(cr3); // %cr3 self.push(cr3.into_raw()); // %cr3
self.push(0); // %rbp self.push(0); // %rbp
self.push(0); // %fs (TODO) self.push(0); // %fs (TODO)
@ -89,9 +89,9 @@ impl TaskContextImpl for TaskContext {
fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> { fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> {
const KERNEL_TASK_PAGES: usize = 32; const KERNEL_TASK_PAGES: usize = 32;
let stack_base = unsafe {
phys::alloc_pages_contiguous(KERNEL_TASK_PAGES, PageUsage::Used)?.virtualize() let stack_base =
}; unsafe { phys::alloc_pages_contiguous(KERNEL_TASK_PAGES)?.virtualize_raw() };
let mut stack = StackBuilder::new(stack_base, KERNEL_TASK_PAGES * 0x1000); let mut stack = StackBuilder::new(stack_base, KERNEL_TASK_PAGES * 0x1000);
@ -100,7 +100,7 @@ impl TaskContextImpl for TaskContext {
stack.push(arg); stack.push(arg);
stack.init_common(__x86_64_task_enter_kernel as _, unsafe { stack.init_common(__x86_64_task_enter_kernel as _, unsafe {
KERNEL_TABLES.physical_address() KERNEL_TABLES.as_physical_address()
}); });
let sp = stack.build(); let sp = stack.build();
@ -115,10 +115,15 @@ impl TaskContextImpl for TaskContext {
}) })
} }
fn user(entry: usize, arg: usize, cr3: usize, user_stack_sp: usize) -> Result<Self, Error> { fn user(
entry: usize,
arg: usize,
cr3: PhysicalAddress,
user_stack_sp: usize,
) -> Result<Self, Error> {
const USER_TASK_PAGES: usize = 8; const USER_TASK_PAGES: usize = 8;
let stack_base =
unsafe { phys::alloc_pages_contiguous(USER_TASK_PAGES, PageUsage::Used)?.virtualize() }; let stack_base = unsafe { phys::alloc_pages_contiguous(USER_TASK_PAGES)?.virtualize_raw() };
let mut stack = StackBuilder::new(stack_base, USER_TASK_PAGES * 0x1000); let mut stack = StackBuilder::new(stack_base, USER_TASK_PAGES * 0x1000);

View File

@ -1,9 +1,13 @@
//! x86-64 CPUID interface //! x86-64 CPUID interface
use tock_registers::interfaces::ReadWriteable;
use crate::arch::x86_64::registers::CR4; use bitflags::bitflags;
use kernel_util::util::OneTimeInit;
use super::registers::XCR0; bitflags! {
pub struct ProcessorFeatures: u64 {
const PDPE1GB = 1 << 0;
}
}
unsafe fn cpuid(eax: u32, result: &mut [u32]) { unsafe fn cpuid(eax: u32, result: &mut [u32]) {
core::arch::asm!( core::arch::asm!(
@ -21,60 +25,19 @@ unsafe fn cpuid(eax: u32, result: &mut [u32]) {
); );
} }
type RequiredBit = (u32, &'static str); pub static PROCESSOR_FEATURES: OneTimeInit<ProcessorFeatures> = OneTimeInit::new();
const EAX1_ECX_REQUIRED_FEATURES: &[RequiredBit] = &[ pub fn init_cpuid() {
(1 << 0, "SSE3"), let mut features = ProcessorFeatures::empty();
(1 << 19, "SSE4.1"),
(1 << 20, "SSE4.2"),
// (1 << 24, "TSC"),
(1 << 26, "XSAVE"),
(1 << 28, "AVX"),
];
const EAX1_EDX_REQUIRED_FEATURES: &[RequiredBit] = &[
(1 << 0, "FPU"),
(1 << 3, "PSE"),
(1 << 4, "TSC (%edx)"),
(1 << 5, "MSR"),
(1 << 6, "PAE"),
(1 << 9, "APIC"),
(1 << 13, "PGE"),
(1 << 23, "MMX"),
(1 << 24, "FXSR"),
(1 << 25, "SSE"),
(1 << 26, "SSE2"),
];
fn enable_cr4_features() {
// TODO maybe also include FSGSBASE here?
CR4.modify(CR4::OSXSAVE::SET + CR4::OSFXSR::SET + CR4::PGE::SET);
}
fn enable_xcr0_features() {
XCR0.modify(XCR0::X87::SET + XCR0::SSE::SET + XCR0::AVX::SET);
}
/// Checks for the features required by the kernel and enables them
pub fn feature_gate() {
// TODO the compiler may have generated instructions from SSE/AVX sets by now, find some way to
// perform this as early as possible
let mut data = [0; 3]; let mut data = [0; 3];
unsafe { unsafe {
cpuid(1, &mut data); cpuid(0x80000001, &mut data);
} }
for (bit, name) in EAX1_ECX_REQUIRED_FEATURES { if data[1] & (1 << 26) != 0 {
if data[2] & bit == 0 { features |= ProcessorFeatures::PDPE1GB;
panic!("Required feature not supported: {}", name);
}
}
for (bit, name) in EAX1_EDX_REQUIRED_FEATURES {
if data[1] & bit == 0 {
panic!("Required feature not supported: {}", name);
}
} }
// Enable the SSE/AVX features PROCESSOR_FEATURES.init(features);
enable_cr4_features();
enable_xcr0_features();
} }

318
src/arch/x86_64/mem/mod.rs Normal file
View File

@ -0,0 +1,318 @@
use core::{
alloc::Layout,
ops::{Deref, DerefMut},
};
use abi::error::Error;
use kernel_util::util::OneTimeInit;
use memtables::FixedTables;
use static_assertions::{const_assert_eq, const_assert_ne};
pub mod table;
use crate::{
arch::x86_64::mem::table::PageAttributes,
mem::{
address::{FromRaw, IntoRaw, KernelImageObject},
device::RawDeviceMemoryMapping,
table::EntryLevel,
PhysicalAddress, KERNEL_VIRT_OFFSET,
},
};
use self::table::{PageEntry, PageTable, L0, L1, L2, L3};
const CANONICAL_ADDRESS_MASK: usize = 0xFFFF000000000000;
const KERNEL_PHYS_BASE: usize = 0x400000;
// Mapped at compile time
const KERNEL_L0_INDEX: usize = L0::index(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
const KERNEL_L1_INDEX: usize = L1::index(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
const KERNEL_START_L2_INDEX: usize = L2::index(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
// Must not be zero, should be at 4MiB
const_assert_ne!(KERNEL_START_L2_INDEX, 0);
// From static mapping
const_assert_eq!(KERNEL_L0_INDEX, 511);
const_assert_eq!(KERNEL_L1_INDEX, 0);
// Mapped at boot
const EARLY_MAPPING_L2I: usize = KERNEL_START_L2_INDEX - 1;
const HEAP_MAPPING_L1I: usize = KERNEL_L1_INDEX + 1;
const DEVICE_MAPPING_L1I: usize = KERNEL_L1_INDEX + 2;
const RAM_MAPPING_L0I: usize = KERNEL_L0_INDEX - 1;
const DEVICE_MAPPING_L3_COUNT: usize = 4;
#[link_section = ".data.tables"]
pub static mut KERNEL_TABLES: KernelImageObject<FixedTables> =
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
// 2MiB for early mappings
const EARLY_MAPPING_OFFSET: usize = CANONICAL_ADDRESS_MASK
| (KERNEL_L0_INDEX * L0::SIZE)
| (KERNEL_L1_INDEX * L1::SIZE)
| (EARLY_MAPPING_L2I * L2::SIZE);
static mut EARLY_MAPPING_L3: PageTable<L3> = PageTable::zeroed();
// 1GiB for heap mapping
pub(super) const HEAP_MAPPING_OFFSET: usize =
CANONICAL_ADDRESS_MASK | (KERNEL_L0_INDEX * L0::SIZE) | (HEAP_MAPPING_L1I * L1::SIZE);
pub(super) static mut HEAP_MAPPING_L2: PageTable<L2> = PageTable::zeroed();
// 1GiB for device MMIO mapping
const DEVICE_MAPPING_OFFSET: usize =
CANONICAL_ADDRESS_MASK | (KERNEL_L0_INDEX * L0::SIZE) | (DEVICE_MAPPING_L1I * L1::SIZE);
static mut DEVICE_MAPPING_L2: PageTable<L2> = PageTable::zeroed();
static mut DEVICE_MAPPING_L3S: [PageTable<L3>; DEVICE_MAPPING_L3_COUNT] =
[PageTable::zeroed(); DEVICE_MAPPING_L3_COUNT];
// 512GiB for whole RAM mapping
pub(super) const RAM_MAPPING_OFFSET: usize = CANONICAL_ADDRESS_MASK | (RAM_MAPPING_L0I * L0::SIZE);
pub(super) static MEMORY_LIMIT: OneTimeInit<usize> = OneTimeInit::new();
pub(super) static mut RAM_MAPPING_L1: PageTable<L1> = PageTable::zeroed();
// Global limits
pub(super) const HEAP_SIZE_LIMIT: usize = L1::SIZE;
// Early mappings
unsafe fn map_early_pages(physical: PhysicalAddress, count: usize) -> Result<usize, Error> {
for l3i in 0..512 {
let mut taken = false;
for i in 0..count {
if EARLY_MAPPING_L3[i + l3i].is_present() {
taken = true;
break;
}
}
if taken {
continue;
}
for i in 0..count {
// TODO NX, NC
EARLY_MAPPING_L3[i + l3i] =
PageEntry::page(physical.add(i * L3::SIZE), PageAttributes::WRITABLE);
}
return Ok(EARLY_MAPPING_OFFSET + l3i * L3::SIZE);
}
loop {}
}
unsafe fn unmap_early_page(address: usize) {
if address < EARLY_MAPPING_OFFSET || address >= EARLY_MAPPING_OFFSET + L2::SIZE {
loop {}
}
let l3i = L3::index(address - EARLY_MAPPING_OFFSET);
assert!(EARLY_MAPPING_L3[l3i].is_present());
EARLY_MAPPING_L3[l3i] = PageEntry::INVALID;
}
// Device mappings
unsafe fn map_device_memory_l3(base: PhysicalAddress, count: usize) -> Result<usize, Error> {
// TODO don't map pages if already mapped
'l0: for i in 0..DEVICE_MAPPING_L3_COUNT * 512 {
for j in 0..count {
let l2i = (i + j) / 512;
let l3i = (i + j) % 512;
if DEVICE_MAPPING_L3S[l2i][l3i].is_present() {
continue 'l0;
}
}
for j in 0..count {
let l2i = (i + j) / 512;
let l3i = (i + j) % 512;
// TODO NX, NC
DEVICE_MAPPING_L3S[l2i][l3i] =
PageEntry::page(base.add(j * L3::SIZE), PageAttributes::WRITABLE);
}
return Ok(DEVICE_MAPPING_OFFSET + i * L3::SIZE);
}
loop {}
}
unsafe fn map_device_memory_l2(base: PhysicalAddress, count: usize) -> Result<usize, Error> {
'l0: for i in DEVICE_MAPPING_L3_COUNT..512 {
for j in 0..count {
if DEVICE_MAPPING_L2[i + j].is_present() {
continue 'l0;
}
}
for j in 0..count {
DEVICE_MAPPING_L2[i + j] =
PageEntry::<L2>::block(base.add(j * L2::SIZE), PageAttributes::WRITABLE);
}
debugln!(
"map l2s: base={:#x}, count={} -> {:#x}",
base,
count,
DEVICE_MAPPING_OFFSET + i * L2::SIZE
);
return Ok(DEVICE_MAPPING_OFFSET + i * L2::SIZE);
}
loop {}
}
pub(super) unsafe fn map_device_memory(
base: PhysicalAddress,
size: usize,
) -> Result<RawDeviceMemoryMapping, Error> {
debugln!("Map {}B @ {:#x}", size, base);
let l3_aligned = base.page_align_down::<L3>();
let l3_offset = L3::page_offset(base.into_raw());
let page_count = (l3_offset + size + L3::SIZE - 1) / L3::SIZE;
if page_count > 256 {
// Large mapping, use L2 mapping instead
let l2_aligned = base.page_align_down::<L2>();
let l2_offset = L2::page_offset(base.into_raw());
let page_count = (l2_offset + size + L2::SIZE - 1) / L2::SIZE;
let base_address = map_device_memory_l2(l2_aligned, page_count)?;
let address = base_address + l2_offset;
Ok(RawDeviceMemoryMapping {
address,
base_address,
page_count,
page_size: L2::SIZE,
})
} else {
let page_size = L3::SIZE;
// Just map the pages directly
let base_address = map_device_memory_l3(l3_aligned, page_count)?;
let address = base_address + l3_offset;
Ok(RawDeviceMemoryMapping {
address,
base_address,
page_count,
page_size: L3::SIZE,
})
}
}
pub(super) unsafe fn unmap_device_memory(map: RawDeviceMemoryMapping) {
loop {}
}
pub(super) unsafe fn map_heap_block(index: usize, page: PhysicalAddress) {
if L2::page_offset(page.into_raw()) != 0 {
loop {}
}
assert!(index < 512);
if HEAP_MAPPING_L2[index].is_present() {
loop {}
}
// TODO NX
HEAP_MAPPING_L2[index] = PageEntry::<L2>::block(page, PageAttributes::WRITABLE);
}
pub struct EarlyMapping<'a, T: ?Sized> {
value: &'a mut T,
page_count: usize,
}
impl<'a, T: Sized> EarlyMapping<'a, T> {
pub unsafe fn map_slice(
physical: PhysicalAddress,
len: usize,
) -> Result<EarlyMapping<'a, [T]>, Error> {
let layout = Layout::array::<T>(len).unwrap();
let aligned = physical.page_align_down::<L3>();
let offset = physical.page_offset::<L3>();
let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
let virt = map_early_pages(aligned, page_count)?;
let value = core::slice::from_raw_parts_mut((virt + offset) as *mut T, len);
Ok(EarlyMapping { value, page_count })
}
}
impl<'a, T: ?Sized> Deref for EarlyMapping<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.value
}
}
impl<'a, T: ?Sized> DerefMut for EarlyMapping<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.value
}
}
impl<'a, T: ?Sized> Drop for EarlyMapping<'a, T> {
fn drop(&mut self) {
let address = (self.value as *mut T).addr() & !(L3::SIZE - 1);
for i in 0..self.page_count {
let page = address + i * L3::SIZE;
unsafe {
unmap_early_page(page);
}
}
}
}
fn clone_kernel_tables(dst: &mut PageTable<L0>) {
unsafe {
dst[KERNEL_L0_INDEX] = PageEntry::from_raw(KERNEL_TABLES.l0.data[KERNEL_L0_INDEX]);
dst[RAM_MAPPING_L0I] = PageEntry::from_raw(KERNEL_TABLES.l0.data[RAM_MAPPING_L0I]);
}
}
/// Sets up the following memory map:
/// ...: KERNEL_TABLES.l0:
/// * 0xFFFFFF0000000000 .. 0xFFFFFFFF8000000000 : RAM_MAPPING_L1
/// * 0xFFFFFF8000000000 .. ... : KERNEL_TABLES.kernel_l1:
/// * 0xFFFFFF8000000000 .. 0xFFFFFF8040000000 : KERNEL_TABLES.kernel_l2
/// * 0xFFFFFF8000000000 .. 0xFFFFFF8000200000 : ---
/// * 0xFFFFFF8000200000 .. 0xFFFFFF8000400000 : EARLY_MAPPING_L3
/// * 0xFFFFFF8000400000 .. ... : KERNEL_TABLES.kernel_l3s
/// * 0xFFFFFF8040000000 .. 0xFFFFFF8080000000 : HEAP_MAPPING_L2
/// * 0xFFFFFF8080000000 .. 0xFFFFFF8100000000 : DEVICE_MAPPING_L2
/// * 0xFFFFFF8080000000 .. 0xFFFFFF8080800000 : DEVICE_MAPPING_L3S
/// * 0xFFFFFF8080800000 .. 0xFFFFFF8100000000 : ...
pub unsafe fn init_fixed_tables() {
let early_mapping_l3_phys = &EARLY_MAPPING_L3 as *const _ as usize - KERNEL_VIRT_OFFSET;
let device_mapping_l2_phys = &DEVICE_MAPPING_L2 as *const _ as usize - KERNEL_VIRT_OFFSET;
let heap_mapping_l2_phys = &HEAP_MAPPING_L2 as *const _ as usize - KERNEL_VIRT_OFFSET;
let ram_mapping_l1_phys = &RAM_MAPPING_L1 as *const _ as usize - KERNEL_VIRT_OFFSET;
for i in 0..DEVICE_MAPPING_L3_COUNT {
let device_mapping_l3_phys = PhysicalAddress::from_raw(
&DEVICE_MAPPING_L3S[i] as *const _ as usize - KERNEL_VIRT_OFFSET,
);
DEVICE_MAPPING_L2[i] = PageEntry::table(device_mapping_l3_phys, PageAttributes::WRITABLE);
}
KERNEL_TABLES.kernel_l2.data[EARLY_MAPPING_L2I] = (early_mapping_l3_phys as u64)
| (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();
KERNEL_TABLES.kernel_l1.data[HEAP_MAPPING_L1I] =
(heap_mapping_l2_phys as u64) | (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();
KERNEL_TABLES.kernel_l1.data[DEVICE_MAPPING_L1I] = (device_mapping_l2_phys as u64)
| (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();
KERNEL_TABLES.l0.data[RAM_MAPPING_L0I] =
(ram_mapping_l1_phys as u64) | (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();
let cr3 = &KERNEL_TABLES.l0 as *const _ as usize - KERNEL_VIRT_OFFSET;
core::arch::asm!("wbinvd; mov {0}, %cr3", in(reg) cr3, options(att_syntax));
}

View File

@ -1,5 +1,3 @@
//! x86-64 virtual memory management implementation
use core::{ use core::{
marker::PhantomData, marker::PhantomData,
ops::{Index, IndexMut}, ops::{Index, IndexMut},
@ -8,21 +6,24 @@ use core::{
use abi::error::Error; use abi::error::Error;
use bitflags::bitflags; use bitflags::bitflags;
mod fixed; use crate::{
mem::{
pub use fixed::{init_fixed_tables, KERNEL_TABLES}; address::{AsPhysicalAddress, FromRaw},
phys,
use crate::mem::{ pointer::{PhysicalRef, PhysicalRefMut},
phys::{self, PageUsage}, table::{
table::{ EntryLevel, MapAttributes, NextPageTable, NonTerminalEntryLevel, VirtualMemoryManager,
EntryLevel, MapAttributes, NextPageTable, NonTerminalEntryLevel, VirtualMemoryManager, },
PhysicalAddress,
}, },
ConvertAddress, sync::IrqSafeSpinlock,
}; };
use super::{clone_kernel_tables, KERNEL_TABLES};
bitflags! { bitflags! {
/// Describes how each page table entry is mapped /// Describes how each page table entry is mapped
struct PageAttributes: u64 { pub struct PageAttributes: u64 {
/// When set, the mapping is considered valid and pointing somewhere /// When set, the mapping is considered valid and pointing somewhere
const PRESENT = 1 << 0; const PRESENT = 1 << 0;
/// For tables, allows writes to further translation levels, for pages/blocks, allows /// For tables, allows writes to further translation levels, for pages/blocks, allows
@ -41,7 +42,7 @@ bitflags! {
/// TTBR0 and TTBR1, all address spaces are initially cloned from the kernel space. /// TTBR0 and TTBR1, all address spaces are initially cloned from the kernel space.
#[repr(C)] #[repr(C)]
pub struct AddressSpace { pub struct AddressSpace {
l0: *mut PageTable<L0>, inner: IrqSafeSpinlock<PhysicalRefMut<'static, PageTable<L0>>>,
} }
/// Represents a single virtual address space mapping depending on its translation level /// Represents a single virtual address space mapping depending on its translation level
@ -80,6 +81,8 @@ impl NonTerminalEntryLevel for L2 {
} }
impl const EntryLevel for L0 { impl const EntryLevel for L0 {
const SIZE: usize = 1 << 39;
fn index(addr: usize) -> usize { fn index(addr: usize) -> usize {
(addr >> 39) & 0x1FF (addr >> 39) & 0x1FF
} }
@ -90,6 +93,8 @@ impl const EntryLevel for L0 {
} }
impl const EntryLevel for L1 { impl const EntryLevel for L1 {
const SIZE: usize = 1 << 30;
fn index(addr: usize) -> usize { fn index(addr: usize) -> usize {
(addr >> 30) & 0x1FF (addr >> 30) & 0x1FF
} }
@ -100,6 +105,8 @@ impl const EntryLevel for L1 {
} }
impl const EntryLevel for L2 { impl const EntryLevel for L2 {
const SIZE: usize = 1 << 21;
fn index(addr: usize) -> usize { fn index(addr: usize) -> usize {
(addr >> 21) & 0x1FF (addr >> 21) & 0x1FF
} }
@ -110,6 +117,8 @@ impl const EntryLevel for L2 {
} }
impl const EntryLevel for L3 { impl const EntryLevel for L3 {
const SIZE: usize = 1 << 12;
fn index(addr: usize) -> usize { fn index(addr: usize) -> usize {
(addr >> 12) & 0x1FF (addr >> 12) & 0x1FF
} }
@ -121,18 +130,18 @@ impl const EntryLevel for L3 {
impl PageEntry<L3> { impl PageEntry<L3> {
/// Constructs a mapping which points to a 4KiB page /// Constructs a mapping which points to a 4KiB page
fn page(phys: usize, attrs: PageAttributes) -> Self { pub fn page(phys: PhysicalAddress, attrs: PageAttributes) -> Self {
Self( Self(
(phys as u64) | (attrs | PageAttributes::PRESENT | PageAttributes::USER).bits(), u64::from(phys) | (attrs | PageAttributes::PRESENT | PageAttributes::USER).bits(),
PhantomData, PhantomData,
) )
} }
/// Returns the physical address of the page this entry refers to, returning None if it does /// Returns the physical address of the page this entry refers to, returning None if it does
/// not /// not
pub fn as_page(self) -> Option<usize> { pub fn as_page(self) -> Option<PhysicalAddress> {
if self.0 & PageAttributes::PRESENT.bits() != 0 { if self.0 & PageAttributes::PRESENT.bits() != 0 {
Some((self.0 & !0xFFF) as usize) Some(PhysicalAddress::from_raw(self.0 & !0xFFF))
} else { } else {
None None
} }
@ -141,11 +150,18 @@ impl PageEntry<L3> {
impl PageEntry<L2> { impl PageEntry<L2> {
/// Constructs a mapping which points to a 2MiB block /// Constructs a mapping which points to a 2MiB block
fn block(phys: usize, attrs: PageAttributes) -> Self { pub fn block(phys: PhysicalAddress, attrs: PageAttributes) -> Self {
Self( Self(
(phys as u64) u64::from(phys) | (attrs | PageAttributes::PRESENT | PageAttributes::BLOCK).bits(),
| (attrs | PageAttributes::PRESENT | PageAttributes::BLOCK | PageAttributes::USER) PhantomData,
.bits(), )
}
}
impl PageEntry<L1> {
pub unsafe fn block(phys: PhysicalAddress, attrs: PageAttributes) -> Self {
Self(
u64::from(phys) | (attrs | PageAttributes::PRESENT | PageAttributes::BLOCK).bits(),
PhantomData, PhantomData,
) )
} }
@ -153,9 +169,9 @@ impl PageEntry<L2> {
impl<L: NonTerminalEntryLevel> PageEntry<L> { impl<L: NonTerminalEntryLevel> PageEntry<L> {
/// Constructs a mapping which points to a next-level table /// Constructs a mapping which points to a next-level table
fn table(phys: usize, attrs: PageAttributes) -> Self { pub fn table(phys: PhysicalAddress, attrs: PageAttributes) -> Self {
Self( Self(
(phys as u64) u64::from(phys)
| (attrs | (attrs
| PageAttributes::PRESENT | PageAttributes::PRESENT
| PageAttributes::WRITABLE | PageAttributes::WRITABLE
@ -167,11 +183,11 @@ impl<L: NonTerminalEntryLevel> PageEntry<L> {
/// Returns the physical address of the table this entry refers to, returning None if it /// Returns the physical address of the table this entry refers to, returning None if it
/// does not /// does not
pub fn as_table(self) -> Option<usize> { pub fn as_table(self) -> Option<PhysicalAddress> {
if self.0 & PageAttributes::PRESENT.bits() != 0 if self.0 & PageAttributes::PRESENT.bits() != 0
&& self.0 & PageAttributes::BLOCK.bits() == 0 && self.0 & PageAttributes::BLOCK.bits() == 0
{ {
Some((self.0 & !0xFFF) as usize) Some(PhysicalAddress::from_raw(self.0 & !0xFFF))
} else { } else {
None None
} }
@ -182,6 +198,10 @@ impl<L: EntryLevel> PageEntry<L> {
/// An entry that is not mapped /// An entry that is not mapped
pub const INVALID: Self = Self(0, PhantomData); pub const INVALID: Self = Self(0, PhantomData);
pub const unsafe fn from_raw(raw: u64) -> Self {
Self(raw, PhantomData)
}
/// Returns `true` if the entry contains a valid mapping to either a table or to a page/block /// Returns `true` if the entry contains a valid mapping to either a table or to a page/block
pub fn is_present(&self) -> bool { pub fn is_present(&self) -> bool {
self.0 & PageAttributes::PRESENT.bits() != 0 self.0 & PageAttributes::PRESENT.bits() != 0
@ -197,41 +217,49 @@ impl<L: EntryLevel> PageTable<L> {
} }
/// Allocates a new page table, filling it with non-preset entries /// Allocates a new page table, filling it with non-preset entries
pub fn new_zeroed() -> Result<&'static mut Self, Error> { pub fn new_zeroed<'a>() -> Result<PhysicalRefMut<'a, Self>, Error> {
let page = unsafe { phys::alloc_page(PageUsage::Used)?.virtualize() }; let physical = phys::alloc_page()?;
let table = unsafe { &mut *(page as *mut Self) }; let mut table = unsafe { PhysicalRefMut::<'a, Self>::map(physical) };
for i in 0..512 { for i in 0..512 {
table[i] = PageEntry::INVALID; table[i] = PageEntry::INVALID;
} }
Ok(table) Ok(table)
} }
/// Returns the physical address of this table // /// Returns the physical address of this table
pub fn physical_address(&self) -> usize { // pub fn physical_address(&self) -> usize {
unsafe { (self.data.as_ptr() as usize).physicalize() } // unsafe { (self.data.as_ptr() as usize).physicalize() }
} // }
} }
impl<L: NonTerminalEntryLevel> NextPageTable for PageTable<L> { impl<L: NonTerminalEntryLevel + 'static> NextPageTable for PageTable<L> {
type NextLevel = PageTable<L::NextLevel>; type NextLevel = PageTable<L::NextLevel>;
type TableRef = PhysicalRef<'static, Self::NextLevel>;
type TableRefMut = PhysicalRefMut<'static, Self::NextLevel>;
fn get_mut(&mut self, index: usize) -> Option<&'static mut Self::NextLevel> { fn get(&self, index: usize) -> Option<Self::TableRef> {
let entry = self[index]; self[index]
entry
.as_table() .as_table()
.map(|addr| unsafe { &mut *(addr.virtualize() as *mut Self::NextLevel) }) .map(|addr| unsafe { PhysicalRef::map(addr) })
} }
fn get_mut_or_alloc(&mut self, index: usize) -> Result<&'static mut Self::NextLevel, Error> { fn get_mut(&mut self, index: usize) -> Option<Self::TableRefMut> {
self[index]
.as_table()
.map(|addr| unsafe { PhysicalRefMut::map(addr) })
}
fn get_mut_or_alloc(&mut self, index: usize) -> Result<Self::TableRefMut, Error> {
let entry = self[index]; let entry = self[index];
if let Some(table) = entry.as_table() { if let Some(table) = entry.as_table() {
Ok(unsafe { &mut *(table.virtualize() as *mut Self::NextLevel) }) Ok(unsafe { PhysicalRefMut::map(table) })
} else { } else {
let table = PageTable::new_zeroed()?; let table = PageTable::new_zeroed()?;
self[index] = PageEntry::<L>::table( self[index] = PageEntry::<L>::table(
table.physical_address(), unsafe { table.as_physical_address() },
PageAttributes::WRITABLE | PageAttributes::USER, PageAttributes::WRITABLE | PageAttributes::USER,
); );
Ok(table) Ok(table)
@ -285,7 +313,7 @@ impl VirtualMemoryManager for AddressSpace {
} }
for i in 0..len { for i in 0..len {
let page = phys::alloc_page(PageUsage::Used)?; let page = phys::alloc_page()?;
self.map_page(base + i * 0x1000, page, attrs)?; self.map_page(base + i * 0x1000, page, attrs)?;
} }
@ -295,7 +323,12 @@ impl VirtualMemoryManager for AddressSpace {
Err(Error::OutOfMemory) Err(Error::OutOfMemory)
} }
fn map_page(&self, virt: usize, phys: usize, attrs: MapAttributes) -> Result<(), Error> { fn map_page(
&self,
virt: usize,
phys: PhysicalAddress,
attrs: MapAttributes,
) -> Result<(), Error> {
self.write_entry(virt, PageEntry::page(phys, attrs.into()), true) self.write_entry(virt, PageEntry::page(phys, attrs.into()), true)
} }
@ -318,49 +351,50 @@ impl VirtualMemoryManager for AddressSpace {
impl AddressSpace { impl AddressSpace {
/// Allocates an empty address space with all entries marked as non-present /// Allocates an empty address space with all entries marked as non-present
pub fn new_empty() -> Result<Self, Error> { pub fn new_empty() -> Result<Self, Error> {
let l0 = unsafe { phys::alloc_page(PageUsage::Used)?.virtualize() as *mut PageTable<L0> }; let mut l0 = unsafe { PhysicalRefMut::<'static, PageTable<L0>>::map(phys::alloc_page()?) };
for i in 0..512 { for i in 0..512 {
unsafe { unsafe {
(*l0)[i] = PageEntry::INVALID; l0[i] = PageEntry::INVALID;
} }
} }
unsafe {
KERNEL_TABLES.clone_into(&mut *l0);
}
Ok(Self { l0 }) clone_kernel_tables(&mut *l0);
}
unsafe fn as_mut(&self) -> &'static mut PageTable<L0> { Ok(Self {
self.l0.as_mut().unwrap() inner: IrqSafeSpinlock::new(l0),
})
} }
// TODO return page size and attributes // TODO return page size and attributes
/// Returns the physical address to which the `virt` address is mapped /// Returns the physical address to which the `virt` address is mapped
pub fn translate(&self, virt: usize) -> Option<usize> { pub fn translate(&self, virt: usize) -> Option<PhysicalAddress> {
let l0 = self.inner.lock();
let l0i = L0::index(virt); let l0i = L0::index(virt);
let l1i = L1::index(virt); let l1i = L1::index(virt);
let l2i = L2::index(virt); let l2i = L2::index(virt);
let l3i = L3::index(virt); let l3i = L3::index(virt);
let l1 = unsafe { self.as_mut().get_mut(l0i) }?; let l1 = l0.get(l0i)?;
let l2 = l1.get_mut(l1i)?; let l2 = l1.get(l1i)?;
let l3 = l2.get_mut(l2i)?; let l3 = l2.get(l2i)?;
l3[l3i].as_page() l3[l3i].as_page()
} }
// Write a single 4KiB entry // Write a single 4KiB entry
fn write_entry(&self, virt: usize, entry: PageEntry<L3>, overwrite: bool) -> Result<(), Error> { fn write_entry(&self, virt: usize, entry: PageEntry<L3>, overwrite: bool) -> Result<(), Error> {
let mut l0 = self.inner.lock();
let l0i = L0::index(virt); let l0i = L0::index(virt);
let l1i = L1::index(virt); let l1i = L1::index(virt);
let l2i = L2::index(virt); let l2i = L2::index(virt);
let l3i = L3::index(virt); let l3i = L3::index(virt);
let l1 = unsafe { self.as_mut().get_mut_or_alloc(l0i) }?; let mut l1 = l0.get_mut_or_alloc(l0i)?;
let l2 = l1.get_mut_or_alloc(l1i)?; let mut l2 = l1.get_mut_or_alloc(l1i)?;
let l3 = l2.get_mut_or_alloc(l2i)?; let mut l3 = l2.get_mut_or_alloc(l2i)?;
if l3[l3i].is_present() && !overwrite { if l3[l3i].is_present() && !overwrite {
todo!(); todo!();
@ -376,10 +410,7 @@ impl AddressSpace {
} }
/// Returns the physical address of the root table /// Returns the physical address of the root table
pub fn physical_address(&self) -> usize { pub fn physical_address(&self) -> PhysicalAddress {
unsafe { (self.l0 as usize).physicalize() } unsafe { self.inner.lock().as_physical_address() }
} }
} }
unsafe impl Send for AddressSpace {}
unsafe impl Sync for AddressSpace {}

View File

@ -1,176 +1,113 @@
//! x86-64 architecture and platform implementation use core::{mem::size_of, sync::atomic::Ordering};
use core::sync::atomic::Ordering;
use abi::error::Error; use abi::error::Error;
use acpi_lib::{mcfg::Mcfg, AcpiTables, InterruptModel}; use acpi_lib::{AcpiHandler, AcpiTable, AcpiTables, InterruptModel};
use alloc::boxed::Box; use alloc::boxed::Box;
use cpu::Cpu;
use device_api::{ use device_api::{
input::KeyboardProducer, input::KeyboardProducer, interrupt::ExternalInterruptController,
interrupt::{ExternalInterruptController, IpiDeliveryTarget, LocalInterruptController}, timer::MonotonicTimestampProviderDevice, Device,
timer::MonotonicTimestampProviderDevice,
Device,
}; };
use git_version::git_version; use git_version::git_version;
use kernel_util::util::OneTimeInit; use kernel_util::util::OneTimeInit;
use yboot_proto::{AvailableRegion, IterableMemoryMap}; use yboot_proto::{v1::AvailableMemoryRegion, LoadProtocolV1};
mod acpi;
mod apic;
mod boot;
pub mod context;
pub mod cpu;
mod cpuid;
mod exception;
mod gdt;
mod intrinsics;
pub mod mem;
mod peripherals;
mod registers;
mod smp;
mod syscall;
use crate::{ use crate::{
arch::x86_64::{ arch::x86_64::{
apic::local::LocalApic, intrinsics::{IoPort, IoPortAccess},
peripherals::serial::ComPort, mem::{map_heap_block, table::L2, HEAP_MAPPING_OFFSET},
table::{init_fixed_tables, KERNEL_TABLES},
}, },
debug::{self, LogLevel}, debug::{self, LogLevel},
device::{ device::{
self, self,
bus::pci::PciBusManager,
display::{console, fb_console::FramebufferConsole, linear_fb::LinearFramebuffer}, display::{console, fb_console::FramebufferConsole, linear_fb::LinearFramebuffer},
tty::combined::CombinedTerminal, tty::CombinedTerminal,
}, },
fs::{ fs::{
devfs::{self, CharDeviceType}, devfs::{self, CharDeviceType},
Initrd, INITRD_DATA, Initrd, INITRD_DATA,
}, },
mem::{ mem::{
address::{FromRaw, IntoRaw},
device::RawDeviceMemoryMapping,
heap,
phys::{self, reserved::reserve_region, PhysicalMemoryRegion}, phys::{self, reserved::reserve_region, PhysicalMemoryRegion},
ConvertAddress, table::EntryLevel,
PhysicalAddress,
}, },
panic,
sync::SpinFence, sync::SpinFence,
CPU_INIT_FENCE,
}; };
use self::{ use self::{
acpi::{AcpiAllocator, AcpiHandlerImpl}, acpi::{AcpiAllocator, AcpiHandlerImpl},
apic::ioapic::IoApic, apic::{ioapic::IoApic, local::LocalApic},
intrinsics::{IoPort, IoPortAccess}, boot::BootData,
peripherals::{i8253::I8253, ps2::PS2Controller}, cpu::Cpu,
cpuid::{ProcessorFeatures, PROCESSOR_FEATURES},
mem::{
init_fixed_tables,
table::{PageAttributes, PageEntry, L1, L3},
EarlyMapping, MEMORY_LIMIT, RAM_MAPPING_L1, RAM_MAPPING_OFFSET,
},
peripherals::{i8253::I8253, ps2::PS2Controller, serial::ComPort},
smp::CPU_COUNT, smp::CPU_COUNT,
}; };
use super::{Architecture, CpuMessage}; use super::{Architecture, CpuMessage};
#[macro_use] #[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
pub mod intrinsics;
pub mod acpi;
pub mod apic;
pub mod boot;
pub mod context;
pub mod cpu;
pub mod cpuid;
pub mod exception;
pub mod gdt;
pub mod peripherals;
pub mod registers;
pub mod smp;
pub mod syscall;
pub mod table;
/// x86-64 interrupt number wrapper
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum IrqNumber { pub enum IrqNumber {
/// Legacy (ISA) interrupt. Can have value in range 0..16.
Isa(u8), Isa(u8),
/// Global system interrupt. Means an external interrupt for I/O APIC.
Gsi(u8), Gsi(u8),
} }
/// Helper trait to provide abstract access to available memory regions
pub trait AbstractAvailableRegion {
/// Returns page-aligned physical start address of the region
fn start_address(&self) -> usize;
/// Returns the page count (rounded down) of this region
fn page_count(&self) -> usize;
}
/// Helper trait to provide abstract access to memory maps
pub trait AbstractMemoryMap<'a>: 'a {
/// Available memory region type contained within this memory map
type AvailableRegion: AbstractAvailableRegion;
/// Iterator type returned by [Self::iter]
type Iter: Iterator<Item = &'a Self::AvailableRegion> + Clone;
/// Returns the physical memory range which contains this memory map
fn reserved_range(&self) -> PhysicalMemoryRegion;
/// Returns an iterator over the available memory regions
fn iter(&self) -> Self::Iter;
}
impl<T: AvailableRegion> AbstractAvailableRegion for T {
fn start_address(&self) -> usize {
<T as AvailableRegion>::start_address(self)
}
fn page_count(&self) -> usize {
<T as AvailableRegion>::page_count(self)
}
}
impl<'a, T: IterableMemoryMap<'a> + 'a> AbstractMemoryMap<'a> for T {
type AvailableRegion = T::Entry;
type Iter = T::Iter;
fn reserved_range(&self) -> PhysicalMemoryRegion {
PhysicalMemoryRegion {
base: self.data_physical_base(),
size: (self.data_size() + 0xFFF) & !0xFFF,
}
}
fn iter(&self) -> Self::Iter {
<T as IterableMemoryMap>::iter_with_offset(self, X86_64::KERNEL_VIRT_OFFSET)
}
}
/// Describes which kind of bootloader data was provided to the kernel
pub enum BootData {
/// [yboot_proto::LoadProtocolV1]
YBoot(&'static yboot_proto::LoadProtocolV1),
}
/// x86-64 architecture + platform implementation
pub struct X86_64 { pub struct X86_64 {
boot_data: OneTimeInit<BootData>, boot_data: OneTimeInit<BootData>,
acpi: OneTimeInit<AcpiTables<AcpiHandlerImpl>>, acpi: OneTimeInit<AcpiTables<AcpiHandlerImpl>>,
// Display
framebuffer: OneTimeInit<LinearFramebuffer>, framebuffer: OneTimeInit<LinearFramebuffer>,
fb_console: OneTimeInit<FramebufferConsole>, fbconsole: OneTimeInit<FramebufferConsole>,
combined_terminal: OneTimeInit<CombinedTerminal>, tty: OneTimeInit<CombinedTerminal>,
ioapic: OneTimeInit<IoApic>, ioapic: OneTimeInit<IoApic>,
timer: OneTimeInit<I8253>, timer: OneTimeInit<I8253>,
} }
/// x86-64 architecture implementation static SHUTDOWN_FENCE: SpinFence = SpinFence::new();
pub static ARCHITECTURE: X86_64 = X86_64 { pub static ARCHITECTURE: X86_64 = X86_64 {
boot_data: OneTimeInit::new(), boot_data: OneTimeInit::new(),
acpi: OneTimeInit::new(), acpi: OneTimeInit::new(),
framebuffer: OneTimeInit::new(), framebuffer: OneTimeInit::new(),
fb_console: OneTimeInit::new(), fbconsole: OneTimeInit::new(),
combined_terminal: OneTimeInit::new(), tty: OneTimeInit::new(),
// Devices
ioapic: OneTimeInit::new(), ioapic: OneTimeInit::new(),
timer: OneTimeInit::new(), timer: OneTimeInit::new(),
}; };
static SHUTDOWN_FENCE: SpinFence = SpinFence::new();
impl Architecture for X86_64 { impl Architecture for X86_64 {
const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000; const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000;
type IrqNumber = IrqNumber; type IrqNumber = IrqNumber;
unsafe fn init_mmu(&self, bsp: bool) { fn cpu_count() -> usize {
if bsp { CPU_COUNT.load(Ordering::Acquire)
init_fixed_tables();
}
let cr3 = KERNEL_TABLES.physical_address();
core::arch::asm!("wbinvd; mov {0}, %cr3", in(reg) cr3, options(att_syntax));
} }
unsafe fn set_interrupt_mask(mask: bool) { unsafe fn set_interrupt_mask(mask: bool) {
@ -196,91 +133,76 @@ impl Architecture for X86_64 {
} }
} }
// CPU management #[inline]
unsafe fn reset(&self) -> ! { unsafe fn map_device_memory(
Self::set_interrupt_mask(true); &self,
loop { base: PhysicalAddress,
Self::wait_for_interrupt(); size: usize,
) -> Result<RawDeviceMemoryMapping, Error> {
mem::map_device_memory(base, size)
}
#[inline]
unsafe fn unmap_device_memory(&self, map: RawDeviceMemoryMapping) {
mem::unmap_device_memory(map)
}
fn map_physical_memory<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
&self,
it: I,
_memory_start: PhysicalAddress,
memory_end: PhysicalAddress,
) -> Result<(), Error> {
let end_l1i = (IntoRaw::<usize>::into_raw(memory_end) + (1 << 30) - 1) >> 30;
if end_l1i > 512 {
loop {}
}
MEMORY_LIMIT.init(memory_end.into_raw());
// Check if 1GiB pages are supported
if PROCESSOR_FEATURES
.get()
.contains(ProcessorFeatures::PDPE1GB)
{
// Just map gigabytes of RAM
for l1i in 0..end_l1i {
// TODO NX
unsafe {
RAM_MAPPING_L1[l1i] = PageEntry::<L1>::block(
PhysicalAddress::from_raw(l1i << 30),
PageAttributes::WRITABLE,
);
}
}
Ok(())
} else {
loop {}
} }
} }
unsafe fn send_ipi(&self, target: IpiDeliveryTarget, msg: CpuMessage) -> Result<(), Error> { #[inline]
if !CPU_INIT_FENCE.try_wait_all(1) { fn virtualize(address: PhysicalAddress) -> Result<usize, Error> {
// Don't send an IPI: SMP not initialized yet let raw: usize = address.into_raw();
return Ok(()); if raw < *mem::MEMORY_LIMIT.get() {
} Ok(raw + RAM_MAPPING_OFFSET)
} else {
let Some(local_apic) = Cpu::get_local().map(|cpu| cpu.local_apic()) else { errorln!("Invalid physical address: {:#x}", address);
panic!("Local APIC has not been initialized yet"); Err(Error::InvalidMemoryOperation)
};
local_apic.send_ipi(target, msg)
}
unsafe fn start_application_processors(&self) {
if let Some(acpi) = self.acpi.try_get() {
let Some(pinfo) = acpi
.platform_info_in(AcpiAllocator)
.ok()
.and_then(|p| p.processor_info)
else {
return;
};
smp::start_ap_cores(&pinfo);
} }
} }
fn cpu_count() -> usize { #[inline]
CPU_COUNT.load(Ordering::Acquire) fn physicalize(address: usize) -> Result<PhysicalAddress, Error> {
} if address < RAM_MAPPING_OFFSET || address - RAM_MAPPING_OFFSET >= *mem::MEMORY_LIMIT.get()
{
errorln!("Not a virtualized physical address: {:#x}", address);
return Err(Error::InvalidMemoryOperation);
}
// Memory Ok(PhysicalAddress::from_raw(address - RAM_MAPPING_OFFSET))
fn map_device_pages(&self, phys: usize, count: usize) -> Result<usize, Error> {
unsafe { KERNEL_TABLES.map_device_pages(phys, count) }
}
// Devices
fn register_monotonic_timer(
&self,
_timer: &'static dyn device_api::timer::MonotonicTimestampProviderDevice,
) -> Result<(), Error> {
todo!()
}
fn register_local_interrupt_controller(
&self,
_intc: &'static dyn device_api::interrupt::LocalInterruptController<
IpiMessage = CpuMessage,
>,
) -> Result<(), Error> {
todo!()
}
fn register_external_interrupt_controller(
&self,
_intc: &'static dyn device_api::interrupt::ExternalInterruptController<
IrqNumber = Self::IrqNumber,
>,
) -> Result<(), Error> {
todo!()
}
fn register_reset_device(
&self,
_reset: &'static dyn device_api::ResetDevice,
) -> Result<(), Error> {
todo!()
}
fn monotonic_timer(&'static self) -> &'static dyn MonotonicTimestampProviderDevice {
self.timer.get()
}
fn local_interrupt_controller(
&'static self,
) -> &'static dyn LocalInterruptController<IpiMessage = CpuMessage> {
todo!()
} }
fn external_interrupt_controller( fn external_interrupt_controller(
@ -288,35 +210,220 @@ impl Architecture for X86_64 {
) -> &'static dyn ExternalInterruptController<IrqNumber = Self::IrqNumber> { ) -> &'static dyn ExternalInterruptController<IrqNumber = Self::IrqNumber> {
self.ioapic.get() self.ioapic.get()
} }
fn monotonic_timer(&'static self) -> &'static dyn MonotonicTimestampProviderDevice {
self.timer.get()
}
} }
impl X86_64 { impl X86_64 {
fn set_boot_data(&self, boot_data: BootData) { unsafe fn handle_ipi(&self, msg: CpuMessage) {
match &boot_data { loop {}
}
fn set_boot_data(&self, data: BootData) {
match data {
BootData::YBoot(data) => { BootData::YBoot(data) => {
// Setup initrd // Reserve the memory map
Self::init_initrd( unsafe {
data.initrd_address as usize, reserve_region(
(data.initrd_address + data.initrd_size) as usize, "mmap",
PhysicalMemoryRegion {
base: PhysicalAddress::from_raw(data.memory_map.address),
size: data.memory_map.len as usize * size_of::<AvailableMemoryRegion>(),
},
);
}
// Reserve initrd, if not NULL
if data.initrd_address != 0 && data.initrd_size != 0 {
let aligned_start = data.initrd_address & !0xFFF;
let aligned_end = (data.initrd_address + data.initrd_size + 0xFFF) & !0xFFF;
unsafe {
reserve_region(
"initrd",
PhysicalMemoryRegion {
base: PhysicalAddress::from_raw(aligned_start),
size: (aligned_end - aligned_start) as usize,
},
);
}
}
}
}
self.boot_data.init(data);
}
unsafe fn init_physical_memory_from_yboot(data: &LoadProtocolV1) {
let mmap = EarlyMapping::<AvailableMemoryRegion>::map_slice(
PhysicalAddress::from_raw(data.memory_map.address),
data.memory_map.len as usize,
)
.unwrap();
phys::init_from_iter(mmap.as_ref().iter().map(|reg| PhysicalMemoryRegion {
base: PhysicalAddress::from_raw(reg.start_address),
size: reg.page_count as usize * 0x1000,
}));
}
unsafe fn init_memory_management(&self) {
const HEAP_PAGES: usize = 16;
init_fixed_tables();
// Reserve lower 4MiB just in case
reserve_region(
"lowmem",
PhysicalMemoryRegion {
base: PhysicalAddress::ZERO,
size: 4 * 1024 * 1024,
},
);
match self.boot_data.get() {
&BootData::YBoot(data) => Self::init_physical_memory_from_yboot(data),
}
// Setup heap
for i in 0..HEAP_PAGES {
// Allocate in 2MiB chunks
let l2_page = phys::alloc_2m_page().unwrap();
map_heap_block(i, l2_page);
}
heap::init_heap(HEAP_MAPPING_OFFSET, HEAP_PAGES * L2::SIZE);
}
unsafe fn init_platform(&'static self, cpu_id: usize) {
Cpu::init_local(LocalApic::new(), cpu_id as _);
if cpu_id == 0 {
match self.boot_data.get() {
&BootData::YBoot(data) => {
let start = PhysicalAddress::from_raw(data.initrd_address);
Self::init_initrd(start, start.add(data.initrd_size as usize));
}
}
self.init_acpi_from_boot_data();
Self::disable_8259();
self.timer.init(I8253::new());
let com1_3 = Box::leak(Box::new(ComPort::new(0x3F8, 0x3E8, IrqNumber::Isa(4))));
debug::add_sink(com1_3.port_a(), LogLevel::Debug);
self.init_framebuffer();
debug::init();
let ps2 = Box::leak(Box::new(PS2Controller::new(
IrqNumber::Isa(1),
IrqNumber::Isa(12),
0x64,
0x60,
)));
ps2.init().unwrap();
infoln!(
"Yggdrasil v{} ({})",
env!("CARGO_PKG_VERSION"),
git_version!()
);
if let Some(acpi) = self.acpi.try_get() {
self.init_platform_from_acpi(acpi);
}
self.timer.get().init_irq().unwrap();
ps2.connect(self.tty.get());
ps2.init_irq().unwrap();
device::register_device(self.ioapic.get());
device::register_device(ps2);
// TODO setup PCI devices
} else {
loop {}
}
}
unsafe fn init_acpi_from_boot_data(&self) {
match self.boot_data.get() {
&BootData::YBoot(data) => self.init_acpi_from_rsdp(data.rsdp_address as usize),
}
}
unsafe fn init_acpi_from_rsdp(&self, rsdp: usize) {
let acpi_tables = AcpiTables::from_rsdp(AcpiHandlerImpl, rsdp).unwrap();
self.acpi.init(acpi_tables);
}
unsafe fn init_platform_from_acpi(&self, acpi: &'static AcpiTables<AcpiHandlerImpl>) {
let platform_info = acpi.platform_info_in(AcpiAllocator).unwrap();
let InterruptModel::Apic(apic_info) = platform_info.interrupt_model else {
panic!("The processor does not support APIC");
};
self.ioapic.init(IoApic::from_acpi(&apic_info).unwrap());
// TODO ACPI init
// acpi::init_acpi(acpi).unwrap();
// TODO MCFG
// if let Ok(mcfg) = acpi.find_table::<Mcfg>() {
// for entry in mcfg.entries() {
// PciBusManager::add_segment_from_mcfg(entry).unwrap();
// }
// }
}
unsafe fn init_framebuffer(&'static self) {
match self.boot_data.get() {
&BootData::YBoot(data) => {
let info = &data.opt_framebuffer;
self.framebuffer.init(
LinearFramebuffer::from_physical_bits(
PhysicalAddress::from_raw(info.res_address),
info.res_size as usize,
info.res_stride as usize,
info.res_width,
info.res_height,
)
.unwrap(),
); );
} }
} }
self.boot_data.init(boot_data); self.fbconsole
.init(FramebufferConsole::from_framebuffer(self.framebuffer.get(), None).unwrap());
debug::add_sink(self.fbconsole.get(), LogLevel::Info);
self.tty.init(CombinedTerminal::new(self.fbconsole.get()));
devfs::add_char_device(self.tty.get(), CharDeviceType::TtyRegular).unwrap();
console::add_console_autoflush(self.fbconsole.get());
} }
fn init_initrd(initrd_start: usize, initrd_end: usize) { fn init_initrd(initrd_start: PhysicalAddress, initrd_end: PhysicalAddress) {
if initrd_start == 0 || initrd_end <= initrd_start { if initrd_start.is_zero() || initrd_end <= initrd_start {
infoln!("No initrd loaded"); infoln!("No initrd loaded");
return; return;
} }
let start_aligned = initrd_start & !0xFFF; let start_aligned = initrd_start.page_align_down::<L3>();
let end_aligned = initrd_end & !0xFFF; let end_aligned = initrd_start.page_align_up::<L3>();
let data = unsafe { let data = unsafe {
core::slice::from_raw_parts( core::slice::from_raw_parts(
initrd_start.virtualize() as *const _, start_aligned.virtualize_raw() as *const u8,
initrd_end - initrd_start, initrd_end - initrd_start,
) )
}; };
@ -330,167 +437,6 @@ impl X86_64 {
INITRD_DATA.init(initrd); INITRD_DATA.init(initrd);
} }
fn init_acpi_from_boot_data(&self) {
match *self.boot_data.get() {
BootData::YBoot(data) => {
self.init_acpi_from_rsdp(data.rsdp_address as usize);
}
}
}
fn init_acpi_from_rsdp(&self, address: usize) {
let acpi_tables = unsafe { AcpiTables::from_rsdp(AcpiHandlerImpl, address).unwrap() };
self.acpi.init(acpi_tables);
}
unsafe fn init_physical_memory(&self) -> Result<(), Error> {
// Reserve the lower 8MiB of memory
reserve_region(
"lower-memory",
PhysicalMemoryRegion {
base: 0,
size: 8 << 21,
},
);
// Reserve initrd
if let Some(initrd) = INITRD_DATA.try_get() {
reserve_region(
"initrd",
PhysicalMemoryRegion {
base: initrd.phys_page_start,
size: initrd.phys_page_len,
},
);
}
match *self.boot_data.get() {
BootData::YBoot(data) => {
let memory_map = &data.memory_map;
reserve_region("memory-map", memory_map.reserved_range());
phys::init_from_iter(IterableMemoryMap::iter(memory_map).map(|r| {
PhysicalMemoryRegion {
base: AbstractAvailableRegion::start_address(r),
size: AbstractAvailableRegion::page_count(r) * 0x1000,
}
}))
.expect("Failed to initialize the physical memory manager");
}
}
Ok(())
}
unsafe fn init_platform_from_acpi(&self, acpi: &'static AcpiTables<AcpiHandlerImpl>) {
let platform_info = acpi.platform_info_in(AcpiAllocator).unwrap();
let InterruptModel::Apic(apic_info) = platform_info.interrupt_model else {
panic!("Processor does not have an APIC");
};
self.ioapic.init(IoApic::from_acpi(&apic_info).unwrap());
acpi::init_acpi(acpi).unwrap();
// Enumerate PCIe root devices
// TODO can there be multiple MCFGs?
if let Ok(mcfg) = acpi.find_table::<Mcfg>() {
for entry in mcfg.entries() {
PciBusManager::add_segment_from_mcfg(entry).unwrap();
}
}
}
unsafe fn init_framebuffer(&'static self) {
match *self.boot_data.get() {
BootData::YBoot(data) => {
let fb = &data.opt_framebuffer;
self.framebuffer.init(
LinearFramebuffer::from_physical_bits(
fb.res_address as _,
fb.res_size as _,
fb.res_stride as _,
fb.res_width as _,
fb.res_height as _,
)
.unwrap(),
);
}
}
self.fb_console
.init(FramebufferConsole::from_framebuffer(self.framebuffer.get(), None).unwrap());
debug::add_sink(self.fb_console.get(), LogLevel::Info);
// Add a terminal to the devfs
// TODO this is ugly
let combined_terminal = CombinedTerminal::new(self.fb_console.get());
self.combined_terminal.init(combined_terminal);
devfs::add_char_device(self.combined_terminal.get(), CharDeviceType::TtyRegular).unwrap();
console::add_console_autoflush(self.fb_console.get());
}
unsafe fn init_platform(&'static self, cpu_id: usize) {
Cpu::init_local(LocalApic::new(), cpu_id as _);
if cpu_id == 0 {
self.init_acpi_from_boot_data();
Self::disable_8259();
self.timer.init(I8253::new());
// Initialize debug output as soon as possible
let com1_3 = Box::leak(Box::new(ComPort::new(0x3F8, 0x3E8, IrqNumber::Isa(4))));
debug::add_sink(com1_3.port_a(), LogLevel::Debug);
// devfs::add_char_device(com1_3.port_a(), CharDeviceType::TtySerial).unwrap();
self.init_framebuffer();
debug::init();
let ps2 = Box::leak(Box::new(PS2Controller::new(
IrqNumber::Isa(1),
IrqNumber::Isa(12),
0x64,
0x60,
)));
ps2.init().unwrap();
// Print some stuff now that the output is initialized
infoln!(
"Yggdrasil v{} ({})",
env!("CARGO_PKG_VERSION"),
git_version!()
);
infoln!("Initializing x86_64 platform");
if let Some(acpi) = self.acpi.try_get() {
self.init_platform_from_acpi(acpi);
}
// Enable IRQs for the devices
self.timer.get().init_irq().unwrap();
ps2.connect(self.combined_terminal.get());
ps2.init_irq().unwrap();
device::register_device(self.ioapic.get());
// device::register_device(self.timer.get());
device::register_device(ps2);
// Initialize devices from PCI bus
PciBusManager::setup_bus_devices().unwrap();
infoln!("Device list:");
for device in device::manager_lock().devices() {
infoln!("* {}", device.display_name());
}
}
}
unsafe fn disable_8259() { unsafe fn disable_8259() {
infoln!("Disabling i8259 PIC"); infoln!("Disabling i8259 PIC");
// TODO should I make a module for 8259 if I don't even use it? // TODO should I make a module for 8259 if I don't even use it?
@ -512,21 +458,4 @@ impl X86_64 {
pic_master_cmd.write(0x20); pic_master_cmd.write(0x20);
pic_slave_cmd.write(0x20); pic_slave_cmd.write(0x20);
} }
unsafe fn handle_ipi(&self, msg: CpuMessage) {
match msg {
CpuMessage::Panic => panic::panic_secondary(),
CpuMessage::Shutdown => {
Self::set_interrupt_mask(true);
let id = Cpu::local_id();
infoln!("cpu{} shutdown", id);
SHUTDOWN_FENCE.signal();
loop {
Self::wait_for_interrupt();
}
}
}
}
} }

View File

@ -7,15 +7,18 @@ use core::{
use acpi_lib::platform::{ProcessorInfo, ProcessorState}; use acpi_lib::platform::{ProcessorInfo, ProcessorState};
use crate::{ use crate::{
arch::{x86_64::boot::__x86_64_ap_entry, Architecture, ArchitectureImpl}, arch::{
x86_64::{boot::__x86_64_ap_entry, mem::KERNEL_TABLES},
Architecture, ArchitectureImpl,
},
mem::{ mem::{
phys::{self, PageUsage}, address::{AsPhysicalAddress, IntoRaw},
ConvertAddress, phys,
}, },
task::Cpu, task::Cpu,
}; };
use super::{acpi::AcpiAllocator, table::KERNEL_TABLES}; use super::acpi::AcpiAllocator;
/// The number of CPUs present in the system /// The number of CPUs present in the system
pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(1); pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(1);
@ -46,10 +49,11 @@ unsafe fn load_ap_bootstrap_code() {
"Invalid bootstrap code placement: is not below 1MiB" "Invalid bootstrap code placement: is not below 1MiB"
); );
let src_slice = core::slice::from_raw_parts(src_ptr, size); todo!();
let dst_slice = core::slice::from_raw_parts_mut(dst_ptr.virtualize(), size); // let src_slice = core::slice::from_raw_parts(src_ptr, size);
// let dst_slice = core::slice::from_raw_parts_mut(dst_ptr.virtualize(), size);
dst_slice.copy_from_slice(src_slice); // dst_slice.copy_from_slice(src_slice);
} }
unsafe fn load_ap_bootstrap_data(src: &ApBootstrapData) { unsafe fn load_ap_bootstrap_data(src: &ApBootstrapData) {
@ -62,11 +66,12 @@ unsafe fn load_ap_bootstrap_data(src: &ApBootstrapData) {
"Invalid bootstrap data placement: is not below 1MiB" "Invalid bootstrap data placement: is not below 1MiB"
); );
let src_slice = core::slice::from_raw_parts(src_ptr, size); todo!()
let dst_slice = core::slice::from_raw_parts_mut(dst_ptr.virtualize(), size); // let src_slice = core::slice::from_raw_parts(src_ptr, size);
// let dst_slice = core::slice::from_raw_parts_mut(dst_ptr.virtualize(), size);
dst_slice.copy_from_slice(src_slice); // dst_slice.copy_from_slice(src_slice);
core::arch::asm!("wbinvd"); // core::arch::asm!("wbinvd");
} }
unsafe fn start_ap_core(apic_id: u32) { unsafe fn start_ap_core(apic_id: u32) {
@ -75,10 +80,10 @@ unsafe fn start_ap_core(apic_id: u32) {
let bsp_cpu = Cpu::local(); let bsp_cpu = Cpu::local();
let bsp_apic = bsp_cpu.local_apic(); let bsp_apic = bsp_cpu.local_apic();
let cr3 = KERNEL_TABLES.physical_address(); let cr3 = KERNEL_TABLES.as_physical_address().into_raw();
let stack_base = phys::alloc_pages_contiguous(AP_STACK_PAGES, PageUsage::Used) let stack_base = phys::alloc_pages_contiguous(AP_STACK_PAGES)
.unwrap() .unwrap()
.virtualize(); .virtualize_raw();
let stack_size = AP_STACK_PAGES * 0x1000; let stack_size = AP_STACK_PAGES * 0x1000;
let data = ApBootstrapData { let data = ApBootstrapData {

View File

@ -1,161 +0,0 @@
use abi::error::Error;
use crate::{
arch::x86_64::table::{PageAttributes, PageEntry, PageTable, L0, L1, L2, L3},
mem::KERNEL_VIRT_OFFSET,
};
// Means 4 lower GiB are mapped
const KERNEL_PD_COUNT: usize = 4;
// Leave 1GiB gap just for fool safety
const DEVICE_MAPPING_L1I: usize = KERNEL_PD_COUNT + 1;
const DEVICE_VIRT_OFFSET: usize = (DEVICE_MAPPING_L1I << 30) + KERNEL_VIRT_OFFSET;
/// Fixed tables for x86-64. Provide device mappings and static kernel mapping.
pub struct FixedTables {
// Common
l0: PageTable<L0>,
l1: PageTable<L1>,
// Kernel mapping
kernel_l2: [PageTable<L2>; KERNEL_PD_COUNT],
// Device mapping
// 511 entries
device_l2: PageTable<L2>,
// 512 entries
device_l3: PageTable<L3>,
device_l2i: usize,
device_l3i: usize,
}
impl FixedTables {
/// Constructs a set of empty translation tables
pub const fn zeroed() -> Self {
Self {
// Global
l0: PageTable::zeroed(),
// Higher-half common
l1: PageTable::zeroed(),
// Kernel
kernel_l2: [PageTable::zeroed(); KERNEL_PD_COUNT],
// Device
device_l2: PageTable::zeroed(),
device_l3: PageTable::zeroed(),
device_l2i: 1,
device_l3i: 0,
}
}
/// Maps a specified count of physical memory pages to the device virtual address space
pub fn map_device_pages(&mut self, phys: usize, count: usize) -> Result<usize, Error> {
if count > 512 * 512 {
panic!("Unsupported device memory mapping size");
} else if count > 512 {
let count = (count + 511) / 512;
// 2MiB mappings
if self.device_l2i + count > 512 {
return Err(Error::OutOfMemory);
}
let virt = DEVICE_VIRT_OFFSET + (self.device_l2i << 21);
for i in 0..count {
self.device_l2[self.device_l2i + i] =
PageEntry::block(phys, PageAttributes::WRITABLE);
}
self.device_l2i += count;
Ok(virt)
} else {
// 4KiB mappings
// Check if a mapping to that address already exists
if self.device_l3i >= count {
for i in 0..self.device_l3i {
let mut matches = true;
for j in 0..count {
let page = phys + j * 0x1000;
let existing = self.device_l3[i].as_page().unwrap();
if page != existing {
matches = false;
break;
}
}
if matches {
let virt = DEVICE_VIRT_OFFSET + (i << 12);
return Ok(virt);
}
}
}
if self.device_l3i + count > 512 {
return Err(Error::OutOfMemory);
}
let virt = DEVICE_VIRT_OFFSET + (self.device_l3i << 12);
for i in 0..count {
self.device_l3[self.device_l3i + i] =
PageEntry::page(phys + i * 0x1000, PageAttributes::WRITABLE);
}
self.device_l3i += count;
Ok(virt)
}
}
/// Returns the physical address of the fixed PML4
pub fn physical_address(&self) -> usize {
self.l0.physical_address()
}
pub fn clone_into(&self, target: &mut PageTable<L0>) {
target[511] = self.l0[511];
}
}
/// Instance of fixed translation tables
pub static mut KERNEL_TABLES: FixedTables = FixedTables::zeroed();
/// Initializes the fixed translation tables.
///
/// # Safety
///
/// Only meant to be called by BSP during early init.
pub unsafe fn init_fixed_tables() {
// Kernel L2
for i in 0..512 * KERNEL_PD_COUNT {
let table_index = i / 512;
let table_offset = i % 512;
KERNEL_TABLES.kernel_l2[table_index][table_offset] =
PageEntry::block(i << 21, PageAttributes::WRITABLE);
}
// Device L2
let addr = KERNEL_TABLES.device_l3.physical_address();
KERNEL_TABLES.device_l2[0] = PageEntry::table(addr, PageAttributes::empty());
// Higher-half L1
// Map kernel nGiB
for i in 0..KERNEL_PD_COUNT {
let addr = KERNEL_TABLES.kernel_l2[i].physical_address();
KERNEL_TABLES.l1[i] = PageEntry::table(addr, PageAttributes::empty());
}
// Map device tables
let addr = KERNEL_TABLES.device_l2.physical_address();
KERNEL_TABLES.l1[DEVICE_MAPPING_L1I] = PageEntry::table(addr, PageAttributes::empty());
// Global L0
let addr = KERNEL_TABLES.l1.physical_address();
// Keep the lower mapping for AP bootstrapping
KERNEL_TABLES.l0[0] = PageEntry::table(addr, PageAttributes::empty());
KERNEL_TABLES.l0[511] = PageEntry::table(addr, PageAttributes::empty());
}

View File

@ -52,7 +52,7 @@ macro_rules! log_print_raw {
macro_rules! log_print { macro_rules! log_print {
($level:expr, $($args:tt)+) => { ($level:expr, $($args:tt)+) => {
log_print_raw!($level, "cpu{}:{}:{}: {}", $crate::task::Cpu::local_id(), file!(), line!(), format_args!($($args)+)) log_print_raw!($level, "cpu{}:{}:{}: {}", /* $crate::task::Cpu::local_id() */ 0, file!(), line!(), format_args!($($args)+))
}; };
} }

View File

@ -1,20 +1,13 @@
//! Console device interfaces //! Console device interfaces
use core::{mem::size_of, time::Duration};
use core::time::Duration;
use abi::{error::Error, primitive_enum}; use abi::{error::Error, primitive_enum};
use alloc::vec::Vec; use alloc::{boxed::Box, vec, vec::Vec};
use bitflags::bitflags; use bitflags::bitflags;
use kernel_util::util::StaticVector; use kernel_util::util::StaticVector;
use crate::{ use crate::{debug::DebugSink, sync::IrqSafeSpinlock, task::runtime};
debug::DebugSink,
mem::{
phys::{self, PageUsage},
ConvertAddress,
},
sync::IrqSafeSpinlock,
task::runtime,
};
const CONSOLE_ROW_LEN: usize = 80; const CONSOLE_ROW_LEN: usize = 80;
const MAX_CSI_ARGS: usize = 8; const MAX_CSI_ARGS: usize = 8;
@ -93,7 +86,7 @@ pub struct ConsoleRow {
/// Buffer that contains text rows of the console with their attributes + tracks dirty rows which /// Buffer that contains text rows of the console with their attributes + tracks dirty rows which
/// need to be flushed to the display /// need to be flushed to the display
pub struct ConsoleBuffer { pub struct ConsoleBuffer {
rows: &'static mut [ConsoleRow], rows: Vec<ConsoleRow>,
height: u32, height: u32,
} }
@ -253,48 +246,27 @@ impl ConsoleRow {
impl ConsoleBuffer { impl ConsoleBuffer {
/// Constructs a fixed-size console buffer /// Constructs a fixed-size console buffer
pub fn new(height: u32) -> Result<Self, Error> { pub fn new(height: u32) -> Result<Self, Error> {
let size = size_of::<ConsoleRow>() * (height as usize); // let size = size_of::<ConsoleRow>() * (height as usize);
let page_count = (size + 0xFFF) / 0x1000; let mut rows = vec![ConsoleRow::zeroed(); height as usize];
let pages = phys::alloc_pages_contiguous(page_count, PageUsage::Used)?;
let rows = unsafe {
core::slice::from_raw_parts_mut(pages.virtualize() as *mut ConsoleRow, height as usize)
};
for row in rows.iter_mut() { for row in rows.iter_mut() {
row.clear(DEFAULT_BG_COLOR); row.clear(DEFAULT_BG_COLOR);
} }
Ok(Self { rows, height }) Ok(Self { rows, height })
} // let size = size_of::<ConsoleRow>() * (height as usize);
// let page_count = (size + 0xFFF) / 0x1000;
// let pages = phys::alloc_pages_contiguous(page_count, PageUsage::Used)?;
/// Reallocates the internal buffer with a new size // let rows = unsafe {
pub fn reallocate(&mut self, new_height: u32) -> Result<(), Error> { // core::slice::from_raw_parts_mut(pages.virtualize() as *mut ConsoleRow, height as usize)
// TODO suppress debugging output here // };
if new_height <= self.height {
// Keep using the old buffer
return Ok(());
}
let size = size_of::<ConsoleRow>() * (new_height as usize); // for row in rows.iter_mut() {
let page_count = (size + 0xFFF) / 0x1000; // row.clear(DEFAULT_BG_COLOR);
let pages = phys::alloc_pages_contiguous(page_count, PageUsage::Used)?; // }
let data = unsafe { // Ok(Self { rows, height })
core::slice::from_raw_parts_mut(
pages.virtualize() as *mut ConsoleRow,
new_height as usize,
)
};
// Copy rows from the old buffer
data[0..self.height as usize].copy_from_slice(self.rows);
data[self.height as usize..].fill(ConsoleRow::zeroed());
self.rows = data;
self.height = new_height;
Ok(())
} }
#[inline(never)] #[inline(never)]

View File

@ -5,7 +5,10 @@ use core::ops::{Index, IndexMut};
use abi::error::Error; use abi::error::Error;
use device_api::Device; use device_api::Device;
use crate::{mem::device::DeviceMemory, sync::IrqSafeSpinlock}; use crate::{
mem::{device::RawDeviceMemoryMapping, PhysicalAddress},
sync::IrqSafeSpinlock,
};
use super::{DisplayDevice, DisplayDimensions}; use super::{DisplayDevice, DisplayDimensions};
@ -34,18 +37,17 @@ impl LinearFramebuffer {
/// ///
/// Unsafe: the caller must ensure the validity of all the arguments. /// Unsafe: the caller must ensure the validity of all the arguments.
pub unsafe fn from_physical_bits( pub unsafe fn from_physical_bits(
base: usize, base: PhysicalAddress,
size: usize, size: usize,
stride: usize, stride: usize,
width: u32, width: u32,
height: u32, height: u32,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
// TODO this may get Dropped later let base = unsafe { RawDeviceMemoryMapping::map(base, size) }?.leak();
let mmio = unsafe { DeviceMemory::map("framebuffer", base, size) }?;
let inner = Inner { let inner = Inner {
dimensions: DisplayDimensions { width, height }, dimensions: DisplayDimensions { width, height },
base: mmio.base(), base,
stride, stride,
}; };

View File

@ -7,9 +7,9 @@ use crate::sync::{IrqSafeSpinlock, IrqSafeSpinlockGuard};
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
pub mod devtree; pub mod devtree;
pub mod bus; // pub mod bus;
pub mod display; pub mod display;
pub mod power; // pub mod power;
pub mod serial; pub mod serial;
pub mod timer; pub mod timer;
pub mod tty; pub mod tty;

View File

@ -7,18 +7,14 @@ use memfs::block::{self, BlockAllocator};
use vfs::VnodeRef; use vfs::VnodeRef;
use yggdrasil_abi::{error::Error, io::MountOptions}; use yggdrasil_abi::{error::Error, io::MountOptions};
use crate::mem::{ use crate::mem::{self, phys, PhysicalAddress};
self,
phys::{self, PageUsage},
ConvertAddress,
};
pub mod devfs; pub mod devfs;
/// Describes in-memory filesystem image used as initial root /// Describes in-memory filesystem image used as initial root
pub struct Initrd { pub struct Initrd {
/// Page-aligned start address of the initrd /// Page-aligned start address of the initrd
pub phys_page_start: usize, pub phys_page_start: PhysicalAddress,
/// Page-aligned length /// Page-aligned length
pub phys_page_len: usize, pub phys_page_len: usize,
/// Safe reference to the initrd data slice /// Safe reference to the initrd data slice
@ -35,14 +31,14 @@ unsafe impl BlockAllocator for FileBlockAllocator {
fn alloc() -> Result<NonNull<u8>, Error> { fn alloc() -> Result<NonNull<u8>, Error> {
// TODO make this a static assertion // TODO make this a static assertion
assert_eq!(block::SIZE, 4096); assert_eq!(block::SIZE, 4096);
let page = phys::alloc_page(PageUsage::Used)?; let page = phys::alloc_page()?;
Ok(unsafe { NonNull::new_unchecked(page.virtualize() as *mut _) }) Ok(unsafe { NonNull::new_unchecked(page.virtualize_raw() as *mut _) })
} }
unsafe fn dealloc(block: NonNull<u8>) { unsafe fn dealloc(block: NonNull<u8>) {
let page = block.as_ptr() as usize; let page = block.as_ptr() as usize;
assert!(page > mem::KERNEL_VIRT_OFFSET); let physical = PhysicalAddress::from_virtualized(page);
phys::free_page(page.physicalize()); phys::free_page(physical);
} }
} }

View File

@ -1,5 +1,6 @@
//! osdev-x kernel crate //! osdev-x kernel crate
#![feature( #![feature(
step_trait,
decl_macro, decl_macro,
naked_functions, naked_functions,
asm_const, asm_const,
@ -13,7 +14,8 @@
linked_list_cursors, linked_list_cursors,
rustc_private, rustc_private,
allocator_api, allocator_api,
async_fn_in_trait async_fn_in_trait,
strict_provenance
)] )]
#![allow(clippy::new_without_default, clippy::fn_to_numeric_cast)] #![allow(clippy::new_without_default, clippy::fn_to_numeric_cast)]
// #![warn(missing_docs)] // #![warn(missing_docs)]
@ -21,11 +23,12 @@
#![no_std] #![no_std]
#![no_main] #![no_main]
use sync::SpinFence; use arch::Architecture;
use crate::{ use crate::{
arch::{Architecture, ArchitectureImpl, ARCHITECTURE}, arch::{ArchitectureImpl, ARCHITECTURE},
mem::heap, mem::heap,
sync::SpinFence,
task::{spawn_kernel_closure, Cpu}, task::{spawn_kernel_closure, Cpu},
}; };

185
src/mem/address.rs Normal file
View File

@ -0,0 +1,185 @@
use core::{
fmt,
iter::Step,
marker::PhantomData,
ops::{Add, Deref, DerefMut, Sub},
};
use crate::arch::{Architecture, ArchitectureImpl, ARCHITECTURE};
use super::{pointer::PhysicalPointer, table::EntryLevel, KERNEL_VIRT_OFFSET};
#[repr(transparent)]
pub struct KernelImageObject<T> {
inner: T,
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
#[repr(transparent)]
pub struct PhysicalAddress(u64);
#[const_trait]
pub trait FromRaw<T> {
fn from_raw(value: T) -> Self;
}
#[const_trait]
pub trait IntoRaw<T> {
fn into_raw(self) -> T;
}
pub trait AsPhysicalAddress {
unsafe fn as_physical_address(&self) -> PhysicalAddress;
}
// KernelImageObject wrapper for objects inside the kernel
impl<T> KernelImageObject<T> {
pub const unsafe fn new(inner: T) -> Self {
Self { inner }
}
}
impl<T> AsPhysicalAddress for KernelImageObject<T> {
unsafe fn as_physical_address(&self) -> PhysicalAddress {
PhysicalAddress::from_raw(&self.inner as *const _ as usize - KERNEL_VIRT_OFFSET)
}
}
impl<T> Deref for KernelImageObject<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<T> DerefMut for KernelImageObject<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
//
impl PhysicalAddress {
pub const ZERO: Self = Self(0);
pub const MAX: Self = Self(u64::MAX);
pub const MIN: Self = Self(u64::MIN);
pub const fn add(self, offset: usize) -> Self {
Self(self.0 + offset as u64)
}
#[inline(always)]
pub const fn is_zero(self) -> bool {
self.0 == 0
}
pub const fn page_offset<L: ~const EntryLevel>(self) -> usize {
L::page_offset(self.0 as usize)
}
pub const fn page_align_down<L: ~const EntryLevel>(self) -> Self {
Self(self.0 & !(L::SIZE as u64 - 1))
}
pub const fn page_align_up<L: ~const EntryLevel>(self) -> Self {
Self((self.0 + L::SIZE as u64 - 1) & !(L::SIZE as u64 - 1))
}
pub unsafe fn from_virtualized(address: usize) -> Self {
ArchitectureImpl::physicalize(address).unwrap()
}
pub fn virtualize_raw(self) -> usize {
ArchitectureImpl::virtualize(self).unwrap()
}
pub fn virtualize<T>(self) -> PhysicalPointer<T> {
loop {}
}
pub fn virtualize_slice<T>(self, len: usize) -> PhysicalPointer<[T]> {
loop {}
}
}
impl Add for PhysicalAddress {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Self(self.0 + rhs.0)
}
}
impl Sub for PhysicalAddress {
type Output = usize;
fn sub(self, rhs: Self) -> Self::Output {
(self.0 - rhs.0) as usize
}
}
// Conversions
impl const FromRaw<u64> for PhysicalAddress {
fn from_raw(value: u64) -> Self {
Self(value)
}
}
impl const FromRaw<usize> for PhysicalAddress {
fn from_raw(value: usize) -> Self {
Self(value as u64)
}
}
impl const IntoRaw<u64> for PhysicalAddress {
fn into_raw(self) -> u64 {
self.0
}
}
impl const IntoRaw<usize> for PhysicalAddress {
fn into_raw(self) -> usize {
self.0 as usize
}
}
impl From<PhysicalAddress> for u64 {
fn from(addr: PhysicalAddress) -> u64 {
addr.0
}
}
impl From<PhysicalAddress> for usize {
fn from(addr: PhysicalAddress) -> usize {
addr.0 as usize
}
}
// Ranges
impl Step for PhysicalAddress {
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
loop {}
}
fn forward_checked(start: Self, count: usize) -> Option<Self> {
start.0.checked_add(count as u64).map(Self)
}
fn backward_checked(start: Self, count: usize) -> Option<Self> {
loop {}
}
}
// fmt
impl fmt::LowerHex for PhysicalAddress {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::LowerHex::fmt(&self.0, f)
}
}

View File

@ -1,85 +1,80 @@
//! Facilities for mapping devices to virtual address space //! Facilities for mapping devices to virtual address space
use core::{marker::PhantomData, mem::size_of, ops::Deref}; use core::{marker::PhantomData, mem::size_of, ops::Deref, sync::atomic::AtomicUsize};
use abi::error::Error; use abi::error::Error;
use alloc::sync::Arc;
use crate::arch::{Architecture, ARCHITECTURE}; use crate::arch::{Architecture, ARCHITECTURE};
/// Generic MMIO access mapping use super::PhysicalAddress;
#[derive(Debug)]
pub struct RawDeviceMemoryMapping {
pub address: usize,
pub base_address: usize,
pub page_size: usize,
pub page_count: usize,
}
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
#[allow(unused)] pub struct DeviceMemoryMapping {
pub struct DeviceMemory { inner: Arc<RawDeviceMemoryMapping>,
name: &'static str, address: usize,
base: usize,
size: usize,
} }
/// MMIO wrapper for `T` #[derive(Clone, Debug)]
pub struct DeviceMemoryIo<T> { pub struct DeviceMemoryIo<'a, T: ?Sized> {
mmio: DeviceMemory, inner: Arc<RawDeviceMemoryMapping>,
_pd: PhantomData<T>, value: &'a T,
} }
impl DeviceMemory { impl RawDeviceMemoryMapping {
/// Maps the device to some virtual memory address and constructs a wrapper for that range.
///
/// # Safety
///
/// The caller is responsible for making sure the (phys, size) range is valid and actually
/// points to some device's MMIO. The caller must also make sure no aliasing for that range is
/// possible.
pub unsafe fn map(name: &'static str, phys: usize, size: usize) -> Result<Self, Error> {
let aligned_base = phys & !0xFFF;
let base_offset = phys & 0xFFF;
let aligned_size = (size + 0xFFF) & !0xFFF;
let base = ARCHITECTURE.map_device_pages(aligned_base, aligned_size / 0x1000)?;
let base = base + base_offset;
debugln!("Mapped {}@{:#x} to {:#x}", name, phys, base);
Ok(Self { name, base, size })
}
/// Returns the base address of this mapping
#[inline] #[inline]
pub fn base(&self) -> usize { pub unsafe fn map(base: PhysicalAddress, size: usize) -> Result<Self, Error> {
self.base ARCHITECTURE.map_device_memory(base, size)
}
pub fn leak(self) -> usize {
let address = self.address;
core::mem::forget(self);
address
} }
} }
impl<T> DeviceMemoryIo<T> { impl Drop for RawDeviceMemoryMapping {
/// Maps the `T` struct at `phys` to some virtual memory address and provides a [Deref]able fn drop(&mut self) {
/// wrapper to it. loop {}
///
/// # Safety
///
/// The caller is responsible for making sure the `phys` address points to a MMIO region which
/// is at least `size_of::<T>()` and no aliasing for that region is possible.
pub unsafe fn map(name: &'static str, phys: usize) -> Result<Self, Error> {
DeviceMemory::map(name, phys, size_of::<T>()).map(|t| Self::new(t))
}
/// Constructs a device MMIO wrapper from given [DeviceMemory] mapping.
///
/// # Safety
///
/// The caller must ensure `mmio` actually points to a device of type `T`.
pub unsafe fn new(mmio: DeviceMemory) -> Self {
assert!(mmio.size >= size_of::<T>());
// TODO check align
Self {
mmio,
_pd: PhantomData,
}
} }
} }
impl<T> Deref for DeviceMemoryIo<T> { impl DeviceMemoryMapping {
pub unsafe fn map(base: PhysicalAddress, size: usize) -> Result<Self, Error> {
let inner = RawDeviceMemoryMapping::map(base, size)?;
loop {}
}
}
impl<'a, T: Sized> DeviceMemoryIo<'a, T> {
pub unsafe fn from_raw(raw: DeviceMemoryMapping) -> DeviceMemoryIo<'a, T> {
// TODO
loop {}
}
pub unsafe fn map(base: PhysicalAddress) -> Result<DeviceMemoryIo<'a, T>, Error> {
let inner = RawDeviceMemoryMapping::map(base, size_of::<T>())?;
let value = &*(inner.address as *const T);
Ok(DeviceMemoryIo {
inner: Arc::new(inner),
value,
})
}
}
impl<'a, T: ?Sized> Deref for DeviceMemoryIo<'a, T> {
type Target = T; type Target = T;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
unsafe { &*(self.mmio.base as *const T) } self.value
} }
} }

View File

@ -35,7 +35,7 @@ unsafe impl GlobalAlloc for KernelAllocator {
match self.inner.lock().allocate_first_fit(layout) { match self.inner.lock().allocate_first_fit(layout) {
Ok(v) => v.as_ptr(), Ok(v) => v.as_ptr(),
Err(e) => { Err(e) => {
errorln!("Failed to allocate {:?}: {:?}", layout, e); // errorln!("Failed to allocate {:?}: {:?}", layout, e);
null_mut() null_mut()
} }
} }

View File

@ -1,57 +1,75 @@
//! Memory management utilities and types // //! Memory management utilities and types
// use core::{alloc::Layout, mem::size_of}; // // use core::{alloc::Layout, mem::size_of};
//
// use core::{alloc::Layout, ffi::c_void, mem::size_of};
//
// use abi::error::Error;
//
// // use abi::error::Error;
// //
// use crate::arch::{Architecture, ArchitectureImpl /*, PlatformImpl*/};
//
// use self::table::AddressSpace;
// //
// // use self::table::AddressSpace;
//
// pub mod device;
use core::{alloc::Layout, ffi::c_void, mem::size_of}; use core::{alloc::Layout, ffi::c_void, mem::size_of, ops::Add};
use abi::error::Error; use abi::error::Error;
// use abi::error::Error; use crate::arch::{Architecture, ArchitectureImpl};
//
use crate::arch::{Architecture, ArchitectureImpl /*, PlatformImpl*/};
use self::table::AddressSpace;
//
// use self::table::AddressSpace;
pub mod address;
pub mod device; pub mod device;
pub mod heap; pub mod heap;
pub mod phys; pub mod phys;
pub mod pointer;
pub mod table; pub mod table;
/// Kernel's physical load address pub use address::PhysicalAddress;
// pub const KERNEL_PHYS_BASE: usize = PlatformImpl::KERNEL_PHYS_BASE;
/// Kernel's virtual memory mapping offset (i.e. kernel's virtual address is [KERNEL_PHYS_BASE] + use self::table::AddressSpace;
/// [KERNEL_VIRT_OFFSET])
pub const KERNEL_VIRT_OFFSET: usize = ArchitectureImpl::KERNEL_VIRT_OFFSET; pub const KERNEL_VIRT_OFFSET: usize = ArchitectureImpl::KERNEL_VIRT_OFFSET;
/// Interface for converting between address spaces. // pub mod phys;
/// //
/// # Safety // /// Kernel's physical load address
/// // // pub const KERNEL_PHYS_BASE: usize = PlatformImpl::KERNEL_PHYS_BASE;
/// An incorrect implementation can produce invalid address. // /// Kernel's virtual memory mapping offset (i.e. kernel's virtual address is [KERNEL_PHYS_BASE] +
pub unsafe trait ConvertAddress { // /// [KERNEL_VIRT_OFFSET])
/// Convert the address into a virtual one // pub const KERNEL_VIRT_OFFSET: usize = ArchitectureImpl::KERNEL_VIRT_OFFSET;
/// //
/// # Panics // /// Interface for converting between address spaces.
/// // ///
/// Panics if the address is already a virtual one // /// # Safety
/// // ///
/// # Safety // /// An incorrect implementation can produce invalid address.
/// // pub unsafe trait ConvertAddress {
/// An incorrect implementation can produce invalid address. // /// Convert the address into a virtual one
unsafe fn virtualize(self) -> Self; // ///
/// Convert the address into a physical one // /// # Panics
/// // ///
/// # Panics // /// Panics if the address is already a virtual one
/// // ///
/// Panics if the address is already a physical one // /// # Safety
/// // ///
/// # Safety // /// An incorrect implementation can produce invalid address.
/// // unsafe fn virtualize(self) -> Self;
/// An incorrect implementation can produce invalid address. // /// Convert the address into a physical one
unsafe fn physicalize(self) -> Self; // ///
} // /// # Panics
// ///
// /// Panics if the address is already a physical one
// ///
// /// # Safety
// ///
// /// An incorrect implementation can produce invalid address.
// unsafe fn physicalize(self) -> Self;
// }
//
/// Helper trait to allow cross-address space access to pointers /// Helper trait to allow cross-address space access to pointers
pub trait ForeignPointer: Sized { pub trait ForeignPointer: Sized {
/// Perform a volatile pointer write without dropping the old value. /// Perform a volatile pointer write without dropping the old value.
@ -120,52 +138,6 @@ pub trait ForeignPointer: Sized {
) -> Result<&'a mut [Self], Error>; ) -> Result<&'a mut [Self], Error>;
} }
unsafe impl ConvertAddress for usize {
#[inline(always)]
unsafe fn virtualize(self) -> Self {
#[cfg(debug_assertions)]
if self > KERNEL_VIRT_OFFSET {
todo!();
}
self + KERNEL_VIRT_OFFSET
}
#[inline(always)]
unsafe fn physicalize(self) -> Self {
#[cfg(debug_assertions)]
if self < KERNEL_VIRT_OFFSET {
todo!();
}
self - KERNEL_VIRT_OFFSET
}
}
unsafe impl<T> ConvertAddress for *mut T {
#[inline(always)]
unsafe fn virtualize(self) -> Self {
(self as usize).virtualize() as Self
}
#[inline(always)]
unsafe fn physicalize(self) -> Self {
(self as usize).physicalize() as Self
}
}
unsafe impl<T> ConvertAddress for *const T {
#[inline(always)]
unsafe fn virtualize(self) -> Self {
(self as usize).virtualize() as Self
}
#[inline(always)]
unsafe fn physicalize(self) -> Self {
(self as usize).physicalize() as Self
}
}
impl<T> ForeignPointer for T { impl<T> ForeignPointer for T {
unsafe fn write_foreign_volatile(self: *mut Self, space: &AddressSpace, value: T) { unsafe fn write_foreign_volatile(self: *mut Self, space: &AddressSpace, value: T) {
// TODO check align // TODO check align
@ -182,7 +154,7 @@ impl<T> ForeignPointer for T {
.translate(start_page) .translate(start_page)
.expect("Address is not mapped in the target address space"); .expect("Address is not mapped in the target address space");
let virt_ptr = (phys_page + page_offset).virtualize() as *mut T; let virt_ptr = phys_page.add(page_offset).virtualize_raw() as *mut T;
virt_ptr.write_volatile(value); virt_ptr.write_volatile(value);
} }

View File

@ -3,89 +3,132 @@ use core::mem::size_of;
use abi::error::Error; use abi::error::Error;
use super::{Page, PageUsage, PhysicalMemoryStats}; use crate::mem::{
address::{FromRaw, IntoRaw},
pointer::{PhysicalRef, PhysicalRefMut},
PhysicalAddress,
};
pub type BitmapWord = u64;
pub(super) const BITMAP_WORD_SIZE: usize = size_of::<BitmapWord>() * 8;
pub(super) const BITMAP_PAGE_COUNT: usize = 256;
const HUGE_PAGE_WORD_COUNT: usize = 512 / BITMAP_WORD_SIZE;
pub(super) const TRACKED_PAGE_LIMIT: usize = (BITMAP_PAGE_COUNT * 4096) * 8;
/// Physical memory management interface /// Physical memory management interface
pub struct PhysicalMemoryManager { pub struct PhysicalMemoryManager {
pages: &'static mut [Page], bitmap: PhysicalRefMut<'static, [u64]>,
offset: usize, last_free_bit: usize,
stats: PhysicalMemoryStats, page_count: usize,
} }
impl PhysicalMemoryManager { impl PhysicalMemoryManager {
/// Constructs a [PhysicalMemoryManager] with page tracking array placed at given pub unsafe fn new(
/// `base`..`base+size` range. Physical addresses allocated are offset by the given value. bitmap_phys_base: PhysicalAddress,
/// page_count: usize,
/// # Safety ) -> PhysicalMemoryManager {
/// // let bitmap_addr = bitmap_phys_base.virtualize();
/// Addresses are not checked. The caller is responsible for making sure (base, size) ranges do let bitmap_len = (page_count + (BITMAP_WORD_SIZE - 1)) / BITMAP_WORD_SIZE;
/// not alias/overlap, they're accessible through virtual memory and that the offset is a let mut bitmap = PhysicalRefMut::<'static, u64>::map_slice(bitmap_phys_base, bitmap_len);
/// meaningful value. // let bitmap = core::slice::from_raw_parts_mut(bitmap_addr as *mut BitmapWord, bitmap_len);
pub unsafe fn new(offset: usize, base: usize, size: usize) -> PhysicalMemoryManager {
// TODO check alignment
let page_count = size / size_of::<Page>();
let pages = core::slice::from_raw_parts_mut(base as *mut _, page_count);
for page in pages.iter_mut() { bitmap.fill(BitmapWord::MAX);
*page = Page {
usage: PageUsage::Reserved,
refcount: 0,
};
}
PhysicalMemoryManager { Self {
pages, bitmap,
offset, page_count,
stats: PhysicalMemoryStats { last_free_bit: 0,
available_pages: 0,
used_pages: 0,
},
} }
} }
#[inline]
fn mark_alloc(&mut self, index: usize) {
self.bitmap[index / BITMAP_WORD_SIZE] |= 1 << (index & (BITMAP_WORD_SIZE - 1));
}
#[inline]
fn mark_free(&mut self, index: usize) {
self.bitmap[index / BITMAP_WORD_SIZE] &= !(1 << (index & (BITMAP_WORD_SIZE - 1)));
}
#[inline(always)]
fn is_alloc(&self, index: usize) -> bool {
self.bitmap[index / BITMAP_WORD_SIZE] & (1 << (index & (BITMAP_WORD_SIZE - 1))) != 0
}
/// Allocates a single page, marking it as used with `usage` /// Allocates a single page, marking it as used with `usage`
pub fn alloc_page(&mut self, usage: PageUsage) -> Result<usize, Error> { pub fn alloc_page(&mut self) -> Result<PhysicalAddress, Error> {
assert_ne!(usage, PageUsage::Available); for i in self.last_free_bit..self.page_count {
assert_ne!(usage, PageUsage::Reserved); if self.is_alloc(i) {
continue;
for index in 0..self.pages.len() {
if self.pages[index].usage == PageUsage::Available {
self.pages[index].usage = PageUsage::Used;
self.stats.add_allocated_pages(1, usage);
return Ok(index * 4096 + self.offset);
} }
self.last_free_bit = i + 1;
self.mark_alloc(i);
return Ok(PhysicalAddress::from_raw(i * 0x1000));
} }
Err(Error::OutOfMemory) if self.last_free_bit != 0 {
self.last_free_bit = 0;
self.alloc_page()
} else {
loop {}
}
} }
/// Allocates a contiguous range of physical pages, marking it as used with `usage` pub fn alloc_2m_page(&mut self) -> Result<PhysicalAddress, Error> {
pub fn alloc_contiguous_pages( let aligned_bit = self.last_free_bit & !511;
&mut self,
count: usize,
usage: PageUsage,
) -> Result<usize, Error> {
assert_ne!(usage, PageUsage::Available);
assert_ne!(usage, PageUsage::Reserved);
assert_ne!(count, 0);
'l0: for i in 0..self.pages.len() { 'l0: for i in (aligned_bit..self.page_count).step_by(512) {
for j in 0..count { for j in 0..HUGE_PAGE_WORD_COUNT {
if self.pages[i + j].usage != PageUsage::Available { if self.bitmap[i / BITMAP_WORD_SIZE] != 0 {
continue 'l0; continue 'l0;
} }
} }
for j in 0..count {
let page = &mut self.pages[i + j]; for j in 0..HUGE_PAGE_WORD_COUNT {
assert!(page.usage == PageUsage::Available); self.bitmap[i / BITMAP_WORD_SIZE + j] = BitmapWord::MAX;
page.usage = usage;
page.refcount = 1;
} }
self.stats.add_allocated_pages(count, usage); self.last_free_bit = i + 512;
return Ok(self.offset + i * 0x1000);
return Ok(PhysicalAddress::from_raw(i * 0x1000));
} }
Err(Error::OutOfMemory) if self.last_free_bit != 0 {
self.last_free_bit = 0;
self.alloc_2m_page()
} else {
loop {}
}
}
/// Allocates a contiguous range of physical pages, marking it as used with `usage`
pub fn alloc_contiguous_pages(&mut self, count: usize) -> Result<PhysicalAddress, Error> {
'l0: for i in self.last_free_bit..self.page_count {
for j in 0..count {
if self.is_alloc(i + j) {
continue 'l0;
}
}
for j in 0..count {
self.mark_alloc(i + j);
}
self.last_free_bit = i + count;
return Ok(PhysicalAddress::from_raw(i * 0x1000));
}
if self.last_free_bit != 0 {
self.last_free_bit = 0;
self.alloc_contiguous_pages(count)
} else {
loop {}
}
} }
/// Deallocates a physical memory page. /// Deallocates a physical memory page.
@ -93,15 +136,11 @@ impl PhysicalMemoryManager {
/// # Safety /// # Safety
/// ///
/// `addr` must be a page-aligned physical address previously allocated by this implementation. /// `addr` must be a page-aligned physical address previously allocated by this implementation.
pub unsafe fn free_page(&mut self, addr: usize) { pub unsafe fn free_page(&mut self, page: PhysicalAddress) {
assert!(addr > self.offset); let index = IntoRaw::<usize>::into_raw(page) / 0x1000;
let index = (addr - self.offset) / 0x1000;
let page = &mut self.pages[index];
assert_eq!(page.usage, PageUsage::Used);
self.stats.add_freed_pages(1, page.usage); assert!(self.is_alloc(index));
self.mark_free(index);
page.usage = PageUsage::Available;
} }
/// Marks a previously reserved page as available. /// Marks a previously reserved page as available.
@ -109,19 +148,10 @@ impl PhysicalMemoryManager {
/// # Panics /// # Panics
/// ///
/// Will panic if the address does not point to a valid, reserved (and unallocated) page. /// Will panic if the address does not point to a valid, reserved (and unallocated) page.
pub fn add_available_page(&mut self, addr: usize) { pub fn add_available_page(&mut self, page: PhysicalAddress) {
assert!(addr >= self.offset); let index = IntoRaw::<usize>::into_raw(page) / 0x1000;
let index = (addr - self.offset) / 4096;
assert_eq!(self.pages[index].usage, PageUsage::Reserved); assert!(self.is_alloc(index));
assert_eq!(self.pages[index].refcount, 0); self.mark_free(index);
self.stats.add_available_pages(1);
self.pages[index].usage = PageUsage::Available;
}
/// Returns a reference to physical memory stats info
pub fn stats(&self) -> &PhysicalMemoryStats {
&self.stats
} }
} }

View File

@ -1,135 +1,164 @@
//! Physical memory management facilities use core::{iter::StepBy, ops::Range};
use core::{iter::StepBy, mem::size_of, ops::Range};
use abi::error::Error; use abi::error::Error;
use kernel_util::util::OneTimeInit; use kernel_util::util::OneTimeInit;
use crate::{ use crate::{
debug::LogLevel, arch::{Architecture, ARCHITECTURE},
mem::{ mem::phys::reserved::is_reserved,
phys::reserved::{is_reserved, reserve_region},
ConvertAddress, /*, KERNEL_PHYS_BASE */
},
sync::IrqSafeSpinlock, sync::IrqSafeSpinlock,
}; };
use self::manager::PhysicalMemoryManager; use self::{
manager::{PhysicalMemoryManager, BITMAP_PAGE_COUNT, BITMAP_WORD_SIZE, TRACKED_PAGE_LIMIT},
reserved::reserve_region,
};
// Enumerating lots of pages is slow and I'm too lazy right now to write a better algorithm, so use super::{address::FromRaw, PhysicalAddress};
// capping the page count helps
const PHYS_MEMORY_PAGE_CAP: usize = 65536;
pub mod manager; // //! Physical memory management facilities
// use core::{iter::StepBy, mem::size_of, ops::Range};
//
// use abi::error::Error;
// use kernel_util::util::OneTimeInit;
//
// use crate::{
// debug::LogLevel,
// mem::{
// phys::reserved::{is_reserved, reserve_region},
// ConvertAddress, /*, KERNEL_PHYS_BASE */
// },
// sync::IrqSafeSpinlock,
// };
//
// use self::manager::PhysicalMemoryManager;
//
// // Enumerating lots of pages is slow and I'm too lazy right now to write a better algorithm, so
// // capping the page count helps
// const PHYS_MEMORY_PAGE_CAP: usize = 65536;
//
// 8 * 4096 bits per page, 1 page per bit
const MEMORY_UPPER_LIMIT: PhysicalAddress = PhysicalAddress::from_raw(TRACKED_PAGE_LIMIT * 4096);
mod manager;
pub mod reserved; pub mod reserved;
//
/// Contains information about the physical memory usage // /// Contains information about the physical memory usage
#[derive(Clone, Copy, Debug)] // #[derive(Clone, Copy, Debug)]
pub struct PhysicalMemoryStats { // pub struct PhysicalMemoryStats {
/// Number of pages available for allocation // /// Number of pages available for allocation
pub available_pages: usize, // pub available_pages: usize,
/// Number of pages being used // /// Number of pages being used
pub used_pages: usize, // pub used_pages: usize,
} // }
//
/// Represents the way in which the page is used (or not) // /// Represents the way in which the page is used (or not)
#[derive(PartialEq, Clone, Copy, Debug)] // #[derive(PartialEq, Clone, Copy, Debug)]
#[repr(u32)] // #[repr(u32)]
pub enum PageUsage { // pub enum PageUsage {
/// Page is not available for allocation or use // /// Page is not available for allocation or use
Reserved = 0, // Reserved = 0,
/// Regular page available for allocation // /// Regular page available for allocation
Available, // Available,
/// Page is used by some kernel facility // /// Page is used by some kernel facility
Used, // Used,
} // }
//
/// Page descriptor structure for the page management array // /// Page descriptor structure for the page management array
#[repr(C)] // #[repr(C)]
pub struct Page { // pub struct Page {
usage: PageUsage, // usage: PageUsage,
refcount: u32, // refcount: u32,
} // }
//
/// Defines an usable memory region /// Defines an usable memory region
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
pub struct PhysicalMemoryRegion { pub struct PhysicalMemoryRegion {
/// Start of the region /// Start of the region
pub base: usize, pub base: PhysicalAddress,
/// Length of the region /// Length of the region
pub size: usize, pub size: usize,
} }
impl PhysicalMemoryRegion { impl PhysicalMemoryRegion {
/// Returns the end address of the region /// Returns the end address of the region
pub const fn end(&self) -> usize { pub const fn end(&self) -> PhysicalAddress {
self.base + self.size self.base.add(self.size)
} }
/// Returns an address range covered by the region /// Returns an address range covered by the region
pub fn range(&self) -> Range<usize> { pub fn range(&self) -> Range<PhysicalAddress> {
self.base..self.end() self.base..self.end()
} }
/// Provides an iterator over the pages in the region pub fn clamp(self) -> Option<(PhysicalAddress, PhysicalAddress)> {
pub fn pages(&self) -> StepBy<Range<usize>> { let start = self.base.min(MEMORY_UPPER_LIMIT);
self.range().step_by(0x1000) let end = self.end().min(MEMORY_UPPER_LIMIT);
if start < end {
Some((start, end))
} else {
None
}
} }
} }
//
impl PhysicalMemoryStats { // impl PhysicalMemoryStats {
/// Handles "alloc" cases of the memory manager // /// Handles "alloc" cases of the memory manager
pub fn add_allocated_pages(&mut self, count: usize, _usage: PageUsage) { // pub fn add_allocated_pages(&mut self, count: usize, _usage: PageUsage) {
assert!(self.available_pages >= count); // assert!(self.available_pages >= count);
self.available_pages -= count; // self.available_pages -= count;
self.used_pages += count; // self.used_pages += count;
} // }
//
/// Handles "free" cases of the memory manager // /// Handles "free" cases of the memory manager
pub fn add_freed_pages(&mut self, count: usize, _usage: PageUsage) { // pub fn add_freed_pages(&mut self, count: usize, _usage: PageUsage) {
assert!(self.used_pages >= count); // assert!(self.used_pages >= count);
self.used_pages -= count; // self.used_pages -= count;
self.available_pages += count; // self.available_pages += count;
} // }
//
/// Increases the available pages counter // /// Increases the available pages counter
pub fn add_available_pages(&mut self, count: usize) { // pub fn add_available_pages(&mut self, count: usize) {
self.available_pages += count; // self.available_pages += count;
} // }
//
/// Prints out the statistics into specified log level // /// Prints out the statistics into specified log level
pub fn dump(&self, level: LogLevel) { // pub fn dump(&self, level: LogLevel) {
log_print_raw!(level, "+++ Physical memory stats +++\n"); // log_print_raw!(level, "+++ Physical memory stats +++\n");
log_print_raw!( // log_print_raw!(
level, // level,
"Available: {}K ({} pages)\n", // "Available: {}K ({} pages)\n",
self.available_pages * 4, // self.available_pages * 4,
self.available_pages // self.available_pages
); // );
log_print_raw!( // log_print_raw!(
level, // level,
"Used: {}K ({} pages)\n", // "Used: {}K ({} pages)\n",
self.used_pages * 4, // self.used_pages * 4,
self.used_pages // self.used_pages
); // );
log_print_raw!(level, "-----------------------------\n"); // log_print_raw!(level, "-----------------------------\n");
} // }
} // }
//
/// Global physical memory manager /// Global physical memory manager
pub static PHYSICAL_MEMORY: OneTimeInit<IrqSafeSpinlock<PhysicalMemoryManager>> = pub static PHYSICAL_MEMORY: OneTimeInit<IrqSafeSpinlock<PhysicalMemoryManager>> =
OneTimeInit::new(); OneTimeInit::new();
/// Allocates a single physical page from the global manager /// Allocates a single physical page from the global manager
pub fn alloc_page(usage: PageUsage) -> Result<usize, Error> { pub fn alloc_page() -> Result<PhysicalAddress, Error> {
PHYSICAL_MEMORY.get().lock().alloc_page(usage) PHYSICAL_MEMORY.get().lock().alloc_page()
} }
/// Allocates a contiguous range of physical pages from the global manager /// Allocates a contiguous range of physical pages from the global manager
pub fn alloc_pages_contiguous(count: usize, usage: PageUsage) -> Result<usize, Error> { pub fn alloc_pages_contiguous(count: usize) -> Result<PhysicalAddress, Error> {
PHYSICAL_MEMORY PHYSICAL_MEMORY.get().lock().alloc_contiguous_pages(count)
.get() }
.lock()
.alloc_contiguous_pages(count, usage) pub fn alloc_2m_page() -> Result<PhysicalAddress, Error> {
PHYSICAL_MEMORY.get().lock().alloc_2m_page()
} }
/// Deallocates a physical memory page. /// Deallocates a physical memory page.
@ -137,26 +166,26 @@ pub fn alloc_pages_contiguous(count: usize, usage: PageUsage) -> Result<usize, E
/// # Safety /// # Safety
/// ///
/// `addr` must be a page-aligned physical address previously allocated by this implementation. /// `addr` must be a page-aligned physical address previously allocated by this implementation.
pub unsafe fn free_page(addr: usize) { pub unsafe fn free_page(addr: PhysicalAddress) {
PHYSICAL_MEMORY.get().lock().free_page(addr) PHYSICAL_MEMORY.get().lock().free_page(addr)
} }
fn physical_memory_range<I: Iterator<Item = PhysicalMemoryRegion>>( fn physical_memory_range<I: Iterator<Item = PhysicalMemoryRegion>>(
it: I, it: I,
) -> Option<(usize, usize)> { ) -> Option<(PhysicalAddress, PhysicalAddress)> {
let mut start = usize::MAX; let mut start = PhysicalAddress::MAX;
let mut end = usize::MIN; let mut end = PhysicalAddress::MIN;
for reg in it { for (reg_start, reg_end) in it.into_iter().filter_map(PhysicalMemoryRegion::clamp) {
if reg.base < start { if reg_start < start {
start = reg.base; start = reg_start;
} }
if reg.base + reg.size > end { if reg_end > end {
end = reg.base + reg.size; end = reg_end;
} }
} }
if start == usize::MAX || end == usize::MIN { if start == PhysicalAddress::MAX || end == PhysicalAddress::MIN {
None None
} else { } else {
Some((start, end)) Some((start, end))
@ -166,12 +195,12 @@ fn physical_memory_range<I: Iterator<Item = PhysicalMemoryRegion>>(
fn find_contiguous_region<I: Iterator<Item = PhysicalMemoryRegion>>( fn find_contiguous_region<I: Iterator<Item = PhysicalMemoryRegion>>(
it: I, it: I,
count: usize, count: usize,
) -> Option<usize> { ) -> Option<PhysicalAddress> {
for region in it { for (reg_start, reg_end) in it.into_iter().filter_map(PhysicalMemoryRegion::clamp) {
let mut collected = 0; let mut collected = 0;
let mut base_addr = None; let mut base_addr = None;
for addr in region.pages() { for addr in (reg_start..reg_end).step_by(0x1000) {
if is_reserved(addr) { if is_reserved(addr) {
collected = 0; collected = 0;
base_addr = None; base_addr = None;
@ -188,7 +217,7 @@ fn find_contiguous_region<I: Iterator<Item = PhysicalMemoryRegion>>(
} }
todo!() todo!()
} }
//
/// Initializes physical memory manager from given available memory region iterator. /// Initializes physical memory manager from given available memory region iterator.
/// ///
/// 1. Finds a non-reserved range to place the page tracking array. /// 1. Finds a non-reserved range to place the page tracking array.
@ -201,68 +230,102 @@ fn find_contiguous_region<I: Iterator<Item = PhysicalMemoryRegion>>(
pub unsafe fn init_from_iter<I: Iterator<Item = PhysicalMemoryRegion> + Clone>( pub unsafe fn init_from_iter<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
it: I, it: I,
) -> Result<(), Error> { ) -> Result<(), Error> {
// Map the physical memory
let (phys_start, phys_end) = physical_memory_range(it.clone()).unwrap(); let (phys_start, phys_end) = physical_memory_range(it.clone()).unwrap();
ARCHITECTURE.map_physical_memory(it.clone(), phys_start, phys_end)?;
let total_count = (phys_end - phys_start) / 0x1000; let total_count = (phys_end - phys_start) / 0x1000;
let pages_array_size = total_count * size_of::<Page>(); let page_bitmap_size = (total_count + BITMAP_WORD_SIZE - 1) / BITMAP_WORD_SIZE;
let page_bitmap_page_count = (page_bitmap_size + 0xFFF) / 0x1000;
debugln!("Initializing physical memory manager");
debugln!("Total tracked pages: {}", total_count);
// Reserve memory regions from which allocation is forbidden
reserve_region("kernel", kernel_physical_memory_region()); reserve_region("kernel", kernel_physical_memory_region());
let pages_array_base = find_contiguous_region(it.clone(), (pages_array_size + 0xFFF) / 0x1000) let page_bitmap_phys_base = find_contiguous_region(it.clone(), page_bitmap_page_count).unwrap();
.ok_or(Error::OutOfMemory)?;
debugln!(
"Placing page tracking at {:#x}",
pages_array_base.virtualize()
);
reserve_region( reserve_region(
"pages", "page-bitmap",
PhysicalMemoryRegion { PhysicalMemoryRegion {
base: pages_array_base, base: page_bitmap_phys_base,
size: (pages_array_size + 0xFFF) & !0xFFF, size: page_bitmap_page_count * 0x1000,
}, },
); );
let mut manager = let mut manager = PhysicalMemoryManager::new(page_bitmap_phys_base, total_count);
PhysicalMemoryManager::new(phys_start, pages_array_base.virtualize(), pages_array_size);
let mut page_count = 0;
for region in it { for (start, end) in it.into_iter().filter_map(PhysicalMemoryRegion::clamp) {
if page_count >= PHYS_MEMORY_PAGE_CAP { for page in (start..end).step_by(0x1000) {
break;
}
for page in region.pages() {
if is_reserved(page) { if is_reserved(page) {
continue; continue;
} }
manager.add_available_page(page); manager.add_available_page(page);
page_count += 1;
if page_count >= PHYS_MEMORY_PAGE_CAP {
break;
}
} }
} }
infoln!("{} available pages ({}KiB)", page_count, page_count * 4);
PHYSICAL_MEMORY.init(IrqSafeSpinlock::new(manager)); PHYSICAL_MEMORY.init(IrqSafeSpinlock::new(manager));
Ok(()) Ok(())
} }
//
// debugln!("Initializing physical memory manager");
// debugln!("Total tracked pages: {}", total_count);
//
// // Reserve memory regions from which allocation is forbidden
// reserve_region("kernel", kernel_physical_memory_region());
//
// let pages_array_base = find_contiguous_region(it.clone(), (pages_array_size + 0xFFF) / 0x1000)
// .ok_or(Error::OutOfMemory)?;
//
// debugln!(
// "Placing page tracking at {:#x}",
// pages_array_base.virtualize()
// );
//
// reserve_region(
// "pages",
// PhysicalMemoryRegion {
// base: pages_array_base,
// size: (pages_array_size + 0xFFF) & !0xFFF,
// },
// );
//
// let mut manager =
// PhysicalMemoryManager::new(phys_start, pages_array_base.virtualize(), pages_array_size);
// let mut page_count = 0;
//
// for region in it {
// if page_count >= PHYS_MEMORY_PAGE_CAP {
// break;
// }
//
// for page in region.pages() {
// if is_reserved(page) {
// continue;
// }
//
// manager.add_available_page(page);
// page_count += 1;
//
// if page_count >= PHYS_MEMORY_PAGE_CAP {
// break;
// }
// }
// }
//
// infoln!("{} available pages ({}KiB)", page_count, page_count * 4);
//
// PHYSICAL_MEMORY.init(IrqSafeSpinlock::new(manager));
// Ok(())
// }
//
fn kernel_physical_memory_region() -> PhysicalMemoryRegion { fn kernel_physical_memory_region() -> PhysicalMemoryRegion {
extern "C" { extern "C" {
static __kernel_phys_start: u8; static __kernel_phys_start: u8;
static __kernel_size: u8; static __kernel_size: u8;
} }
let base = absolute_address!(__kernel_phys_start); let base = PhysicalAddress::from_raw(absolute_address!(__kernel_phys_start));
let size = absolute_address!(__kernel_size); let size = absolute_address!(__kernel_size);
PhysicalMemoryRegion { base, size } PhysicalMemoryRegion { base, size }

View File

@ -2,6 +2,8 @@
use kernel_util::util::StaticVector; use kernel_util::util::StaticVector;
use crate::mem::PhysicalAddress;
use super::PhysicalMemoryRegion; use super::PhysicalMemoryRegion;
static mut RESERVED_MEMORY: StaticVector<PhysicalMemoryRegion, 8> = StaticVector::new(); static mut RESERVED_MEMORY: StaticVector<PhysicalMemoryRegion, 8> = StaticVector::new();
@ -12,18 +14,18 @@ static mut RESERVED_MEMORY: StaticVector<PhysicalMemoryRegion, 8> = StaticVector
/// ///
/// Can only be called from initialization code **before** physical memory manager is initialized. /// Can only be called from initialization code **before** physical memory manager is initialized.
pub unsafe fn reserve_region(reason: &str, region: PhysicalMemoryRegion) { pub unsafe fn reserve_region(reason: &str, region: PhysicalMemoryRegion) {
debugln!( // debugln!(
"Reserve {:?} memory: {:#x}..{:#x}", // "Reserve {:?} memory: {:#x}..{:#x}",
reason, // reason,
region.base, // region.base,
region.end() // region.end()
); // );
RESERVED_MEMORY.push(region); RESERVED_MEMORY.push(region);
} }
/// Returns `true` if `addr` refers to any reserved memory region /// Returns `true` if `addr` refers to any reserved memory region
pub fn is_reserved(addr: usize) -> bool { pub fn is_reserved(addr: PhysicalAddress) -> bool {
for region in unsafe { RESERVED_MEMORY.iter() } { for region in unsafe { RESERVED_MEMORY.iter() } {
if region.range().contains(&addr) { if region.range().contains(&addr) {
return true; return true;

132
src/mem/pointer.rs Normal file
View File

@ -0,0 +1,132 @@
use core::{
alloc::Layout,
fmt,
ops::{Deref, DerefMut},
};
use super::{address::AsPhysicalAddress, PhysicalAddress};
#[derive(Clone, Copy, PartialEq, PartialOrd, Debug, Hash)]
#[repr(transparent)]
pub struct PhysicalPointer<T: ?Sized> {
pointer: *mut T,
}
#[repr(transparent)]
pub struct PhysicalRef<'a, T: ?Sized> {
value: &'a T,
}
#[repr(transparent)]
pub struct PhysicalRefMut<'a, T: ?Sized> {
value: &'a mut T,
}
// PhysicalPointer<T> wrapper for direct access to any memory location
impl<T: ?Sized> PhysicalPointer<T> {
pub fn into_address(self) -> usize {
self.pointer.addr()
}
}
impl<T: Sized> PhysicalPointer<T> {
pub unsafe fn write_unaligned(self, value: T) {
self.write_unaligned(value);
}
}
impl<T: ?Sized> AsPhysicalAddress for PhysicalPointer<T> {
unsafe fn as_physical_address(&self) -> PhysicalAddress {
todo!()
}
}
// PhysicalRefMut<T> wrapper for safe mutable access to physical addresses
impl<'a, T: Sized> PhysicalRefMut<'a, T> {
pub unsafe fn map(physical: PhysicalAddress) -> PhysicalRefMut<'a, T> {
let value = virtualize_raw(physical);
PhysicalRefMut { value }
}
pub unsafe fn map_slice(physical: PhysicalAddress, len: usize) -> PhysicalRefMut<'a, [T]> {
let value = virtualize_slice_raw(physical, len);
PhysicalRefMut { value }
}
}
impl<T: ?Sized> PhysicalRefMut<'_, T> {
#[inline]
pub fn as_address(&self) -> usize {
(self.value as *const T).addr()
}
}
impl<T: ?Sized> AsPhysicalAddress for PhysicalRefMut<'_, T> {
unsafe fn as_physical_address(&self) -> PhysicalAddress {
PhysicalAddress::from_virtualized(self.as_address())
}
}
impl<T: ?Sized> Deref for PhysicalRefMut<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.value
}
}
impl<T: ?Sized> DerefMut for PhysicalRefMut<'_, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.value
}
}
impl<T: ?Sized> fmt::Pointer for PhysicalRefMut<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Pointer::fmt(&self.value, f)
}
}
// PhysicalRef<T>: same as PhysicalRefMut<T>, except immutable
impl<'a, T: Sized> PhysicalRef<'a, T> {
pub unsafe fn map(physical: PhysicalAddress) -> PhysicalRef<'a, T> {
let value = virtualize_raw(physical);
PhysicalRef { value }
}
}
impl<T: ?Sized> PhysicalRef<'_, T> {
#[inline]
pub fn as_address(&self) -> usize {
(self.value as *const T).addr()
}
}
impl<T: ?Sized> AsPhysicalAddress for PhysicalRef<'_, T> {
unsafe fn as_physical_address(&self) -> PhysicalAddress {
PhysicalAddress::from_virtualized(self.as_address())
}
}
impl<T: ?Sized> Deref for PhysicalRef<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.value
}
}
unsafe fn virtualize_raw<'a, T: Sized>(physical: PhysicalAddress) -> &'a mut T {
// TODO check align
let address = physical.virtualize_raw();
&mut *(address as *mut T)
}
unsafe fn virtualize_slice_raw<'a, T: Sized>(physical: PhysicalAddress, len: usize) -> &'a mut [T] {
// TODO check align
let address = physical.virtualize_raw();
core::slice::from_raw_parts_mut(address as *mut T, len)
}

View File

@ -1,13 +1,17 @@
//! Virtual memory table interface //! Virtual memory table interface
use core::ops::{Deref, DerefMut};
use abi::error::Error; use abi::error::Error;
use bitflags::bitflags; use bitflags::bitflags;
use cfg_if::cfg_if; use cfg_if::cfg_if;
use super::PhysicalAddress;
cfg_if! { cfg_if! {
if #[cfg(target_arch = "aarch64")] { if #[cfg(target_arch = "aarch64")] {
pub use crate::arch::aarch64::table::{AddressSpace, PageAttributes, PageEntry, PageTable}; pub use crate::arch::aarch64::table::{AddressSpace, PageAttributes, PageEntry, PageTable};
} else if #[cfg(target_arch = "x86_64")] { } else if #[cfg(target_arch = "x86_64")] {
pub use crate::arch::x86_64::table::{AddressSpace, PageEntry, PageTable}; pub use crate::arch::x86_64::mem::table::{AddressSpace, PageEntry, PageTable};
} }
} }
@ -36,7 +40,12 @@ pub trait VirtualMemoryManager {
) -> Result<usize, Error>; ) -> Result<usize, Error>;
/// Insert a single 4KiB-page translation mapping into the table /// Insert a single 4KiB-page translation mapping into the table
fn map_page(&self, virt: usize, phys: usize, attrs: MapAttributes) -> Result<(), Error>; fn map_page(
&self,
virt: usize,
phys: PhysicalAddress,
attrs: MapAttributes,
) -> Result<(), Error>;
/// Releases the virtual memory region from the address space and the pages it refers to /// Releases the virtual memory region from the address space and the pages it refers to
fn deallocate(&self, addr: usize, len: usize) -> Result<(), Error>; fn deallocate(&self, addr: usize, len: usize) -> Result<(), Error>;
@ -46,17 +55,23 @@ pub trait VirtualMemoryManager {
pub trait NextPageTable { pub trait NextPageTable {
/// Type for the next-level page table /// Type for the next-level page table
type NextLevel; type NextLevel;
type TableRef: Deref<Target = Self::NextLevel>;
type TableRefMut: DerefMut<Target = Self::NextLevel>;
/// Tries looking up a next-level table at given index, allocating and mapping one if it is not /// Tries looking up a next-level table at given index, allocating and mapping one if it is not
/// present there /// present there
fn get_mut_or_alloc(&mut self, index: usize) -> Result<&'static mut Self::NextLevel, Error>; fn get_mut_or_alloc(&mut self, index: usize) -> Result<Self::TableRefMut, Error>;
/// Returns a mutable reference to a next-level table at `index`, if present /// Returns a mutable reference to a next-level table at `index`, if present
fn get_mut(&mut self, index: usize) -> Option<&'static mut Self::NextLevel>; fn get_mut(&mut self, index: usize) -> Option<Self::TableRefMut>;
fn get(&self, index: usize) -> Option<Self::TableRef>;
} }
/// Interface for a single level of address translation /// Interface for a single level of address translation
#[const_trait] #[const_trait]
pub trait EntryLevel: Copy { pub trait EntryLevel: Copy {
const SIZE: usize;
/// Returns the index into a page table for a given address /// Returns the index into a page table for a given address
fn index(addr: usize) -> usize; fn index(addr: usize) -> usize;
/// Returns the offset of an address from the page start at current level /// Returns the offset of an address from the page start at current level

View File

@ -1,4 +1,6 @@
//! ELF binary format support //! ELF binary format support
use core::ops::DerefMut;
use elf::{ use elf::{
abi::{PF_W, PF_X, PT_LOAD}, abi::{PF_W, PF_X, PT_LOAD},
endian::AnyEndian, endian::AnyEndian,
@ -8,9 +10,9 @@ use vfs::{FileRef, Read, Seek};
use yggdrasil_abi::{error::Error, io::SeekFrom}; use yggdrasil_abi::{error::Error, io::SeekFrom};
use crate::mem::{ use crate::mem::{
phys::{self, PageUsage}, phys,
pointer::PhysicalRefMut,
table::{AddressSpace, MapAttributes, VirtualMemoryManager}, table::{AddressSpace, MapAttributes, VirtualMemoryManager},
ConvertAddress,
}; };
#[derive(Clone, Copy)] #[derive(Clone, Copy)]
@ -64,7 +66,7 @@ fn load_bytes<F>(
elf_attrs: u32, elf_attrs: u32,
) -> Result<(), Error> ) -> Result<(), Error>
where where
F: FnMut(usize, &mut [u8]) -> Result<(), Error>, F: FnMut(usize, PhysicalRefMut<'_, [u8]>) -> Result<(), Error>,
{ {
// TODO check for crazy addresses here // TODO check for crazy addresses here
@ -87,19 +89,22 @@ where
let virt_page = dst_page_aligned + page_idx * 0x1000; let virt_page = dst_page_aligned + page_idx * 0x1000;
assert_eq!(virt_page & 0xFFF, 0); assert_eq!(virt_page & 0xFFF, 0);
if space.translate(virt_page).is_some() { if let Some(page) = space.translate(virt_page) {
// Handle these cases // TODO Handle these cases
warnln!("Page {:#x} is already mapped to {:#x}", virt_page, page);
todo!(); todo!();
} }
let phys_page = phys::alloc_page(PageUsage::Used)?; let phys_page = phys::alloc_page()?;
space.map_page(virt_page, phys_page, attrs)?; space.map_page(virt_page, phys_page, attrs)?;
debugln!("Map {:#x} -> {:#x}", virt_page, phys_page);
let dst_slice = unsafe { let dst_slice = unsafe { PhysicalRefMut::map_slice(phys_page.add(page_off), count) };
let addr = (phys_page + page_off).virtualize(); // let dst_slice = unsafe {
// let addr = (phys_page + page_off).virtualize();
core::slice::from_raw_parts_mut(addr as *mut u8, count) // core::slice::from_raw_parts_mut(addr as *mut u8, count)
}; // };
src(off, dst_slice)?; src(off, dst_slice)?;
@ -127,10 +132,10 @@ pub fn load_elf_from_file(space: &AddressSpace, file: FileRef) -> Result<usize,
load_bytes( load_bytes(
space, space,
phdr.p_vaddr as usize, phdr.p_vaddr as usize,
|off, dst| { |off, mut dst| {
let mut source = file.file.borrow_mut(); let mut source = file.file.borrow_mut();
source.seek(SeekFrom::Start(phdr.p_offset + off as u64))?; source.seek(SeekFrom::Start(phdr.p_offset + off as u64))?;
source.read_exact(dst) source.read_exact(dst.deref_mut())
}, },
phdr.p_filesz as usize, phdr.p_filesz as usize,
phdr.p_flags, phdr.p_flags,
@ -144,7 +149,7 @@ pub fn load_elf_from_file(space: &AddressSpace, file: FileRef) -> Result<usize,
load_bytes( load_bytes(
space, space,
addr, addr,
|_, dst| { |_, mut dst| {
dst.fill(0); dst.fill(0);
Ok(()) Ok(())
}, },

View File

@ -7,9 +7,9 @@ use vfs::FileRef;
use crate::{ use crate::{
mem::{ mem::{
phys::{self, PageUsage}, phys,
table::{AddressSpace, MapAttributes, VirtualMemoryManager}, table::{AddressSpace, MapAttributes, VirtualMemoryManager},
ConvertAddress, ForeignPointer, ForeignPointer,
}, },
proc, proc,
task::{context::TaskContextImpl, process::Process, TaskContext}, task::{context::TaskContextImpl, process::Process, TaskContext},
@ -29,15 +29,15 @@ fn setup_args(space: &AddressSpace, virt: usize, args: &[&str]) -> Result<(), Er
debugln!("arg data size = {}", args_size); debugln!("arg data size = {}", args_size);
let phys_page = phys::alloc_page(PageUsage::Used)?; let phys_page = phys::alloc_page()?;
// TODO check if this doesn't overwrite anything // TODO check if this doesn't overwrite anything
space.map_page( space.map_page(
virt, virt,
phys_page, phys_page,
MapAttributes::USER_READ | MapAttributes::USER_WRITE | MapAttributes::NON_GLOBAL, // PageAttributes::AP_BOTH_READWRITE | PageAttributes::NON_GLOBAL, MapAttributes::USER_READ | MapAttributes::USER_WRITE | MapAttributes::NON_GLOBAL,
)?; )?;
let write = unsafe { phys_page.virtualize() }; let write = phys_page.virtualize_raw();
let mut offset = args_ptr_size; let mut offset = args_ptr_size;
@ -83,7 +83,7 @@ fn setup_binary<S: Into<String>>(
let virt_args_base = virt_stack_base + (USER_STACK_PAGES + 1) * 0x1000; let virt_args_base = virt_stack_base + (USER_STACK_PAGES + 1) * 0x1000;
for i in 0..USER_STACK_PAGES { for i in 0..USER_STACK_PAGES {
let phys = phys::alloc_page(PageUsage::Used)?; let phys = phys::alloc_page()?;
space.map_page( space.map_page(
virt_stack_base + i * 0x1000, virt_stack_base + i * 0x1000,
phys, phys,

View File

@ -4,7 +4,7 @@ use abi::{arch::SavedFrame, error::Error, process::ExitCode};
use alloc::boxed::Box; use alloc::boxed::Box;
use cfg_if::cfg_if; use cfg_if::cfg_if;
use crate::task::process::Process; use crate::{mem::PhysicalAddress, task::process::Process};
cfg_if! { cfg_if! {
if #[cfg(target_arch = "aarch64")] { if #[cfg(target_arch = "aarch64")] {
@ -56,7 +56,12 @@ pub trait TaskContextImpl: Sized {
/// Constructs a user thread context. The caller is responsible for allocating the userspace /// Constructs a user thread context. The caller is responsible for allocating the userspace
/// stack and setting up a valid address space for the context. /// stack and setting up a valid address space for the context.
fn user(entry: usize, arg: usize, cr3: usize, user_stack_sp: usize) -> Result<Self, Error>; fn user(
entry: usize,
arg: usize,
cr3: PhysicalAddress,
user_stack_sp: usize,
) -> Result<Self, Error>;
/// Performs an entry into a context. /// Performs an entry into a context.
/// ///

View File

@ -82,7 +82,7 @@ pub struct Process {
} }
/// Guard type that provides [Process] operations only available for current processes /// Guard type that provides [Process] operations only available for current processes
pub struct CurrentProcess(Arc<Process>); pub struct CurrentProcess(Arc<Process>, IrqGuard);
impl Process { impl Process {
/// Creates a process from raw architecture-specific [TaskContext]. /// Creates a process from raw architecture-specific [TaskContext].
@ -447,11 +447,8 @@ impl CurrentProcess {
/// # Safety /// # Safety
/// ///
/// Only meant to be called from [Process::current] or [CpuQueue::current_process]. /// Only meant to be called from [Process::current] or [CpuQueue::current_process].
pub unsafe fn new(inner: Arc<Process>) -> Self { pub unsafe fn new(inner: Arc<Process>, guard: IrqGuard) -> Self {
// XXX Self(inner, guard)
// assert_eq!(DAIF.read(DAIF::I), 1);
assert!(ArchitectureImpl::interrupt_mask());
Self(inner)
} }
/// Configures signal entry information for the process /// Configures signal entry information for the process

View File

@ -3,7 +3,11 @@ use alloc::sync::Arc;
use crossbeam_queue::ArrayQueue; use crossbeam_queue::ArrayQueue;
use kernel_util::util::OneTimeInit; use kernel_util::util::OneTimeInit;
use crate::{sync::IrqGuard, task::process::Process}; use crate::{
arch::{Architecture, ArchitectureImpl},
sync::IrqGuard,
task::process::Process,
};
use super::task::Task; use super::task::Task;
@ -41,6 +45,7 @@ impl TaskQueue {
pub fn dequeue(&self) -> Result<Arc<Task>, Error> { pub fn dequeue(&self) -> Result<Arc<Task>, Error> {
let process = Process::current(); let process = Process::current();
assert!(ArchitectureImpl::interrupt_mask());
loop { loop {
if let Some(task) = self.task_queue.pop() { if let Some(task) = self.task_queue.pop() {
return Ok(task); return Ok(task);

View File

@ -8,7 +8,7 @@ use kernel_util::util::OneTimeInit;
use crate::{ use crate::{
// arch::aarch64::{context::TaskContext, cpu::Cpu}, // arch::aarch64::{context::TaskContext, cpu::Cpu},
arch::{Architecture, ArchitectureImpl}, arch::{Architecture, ArchitectureImpl},
sync::{IrqSafeSpinlock, IrqSafeSpinlockGuard}, sync::{IrqGuard, IrqSafeSpinlock, IrqSafeSpinlockGuard},
}; };
use super::{ use super::{
@ -276,11 +276,12 @@ impl CpuQueue {
/// will remain valid until the end of the interrupt or until [CpuQueue::yield_cpu] /// will remain valid until the end of the interrupt or until [CpuQueue::yield_cpu]
/// is called. /// is called.
pub fn current_process(&self) -> Option<CurrentProcess> { pub fn current_process(&self) -> Option<CurrentProcess> {
let guard = IrqGuard::acquire();
self.inner self.inner
.lock() .lock()
.current .current
.clone() .clone()
.map(|p| unsafe { CurrentProcess::new(p) }) .map(|p| unsafe { CurrentProcess::new(p, guard) })
} }
/// Returns a queue for given CPU index /// Returns a queue for given CPU index

View File

@ -0,0 +1,15 @@
[package]
name = "gentables"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
memtables = { path = "../../lib/memtables" }
bytemuck = "1.14.0"
elf = "0.7.2"
thiserror = "1.0.48"
clap = { version = "4.4.2", features = ["derive"] }
bitflags = "2.4.0"

186
tools/gentables/src/main.rs Normal file
View File

@ -0,0 +1,186 @@
#![feature(offset_of)]
use std::{
fs::OpenOptions,
io::{Read, Seek, SeekFrom, Write},
ops::Range,
path::{Path, PathBuf},
process::ExitCode,
};
use clap::Parser;
use elf::{
abi::{EM_X86_64, PT_LOAD},
endian::AnyEndian,
ElfStream,
};
use memtables::FixedTables;
use thiserror::Error;
use crate::x86_64::X8664Builder;
mod x86_64;
#[derive(Error, Debug)]
pub enum GenError {
#[error("I/O error: {0}")]
IoError(#[from] std::io::Error),
#[error("ELF parse error: {0}")]
ElfParseError(#[from] elf::ParseError),
#[error("Image's arhitecture is not supported")]
UnsupportedArchitecture,
#[error("Could not determine the kernel image address range (possibly incorrect segments?)")]
NoKernelImageRange,
#[error("Kernel image is too large: {0:#x?} ({1}B). Maximum size: {2}B")]
KernelTooLarge(Range<u64>, u64, u64),
#[error("Kernel image is missing a required symbol: {0:?}")]
MissingSymbol(&'static str),
#[error("Kernel image is missing a required section: {0:?}")]
MissingSection(&'static str),
#[error("Incorrect tables section placement: {0:#x}")]
IncorrectTablesPlacement(u64),
}
#[derive(Parser)]
struct Args {
image: PathBuf,
}
pub struct GenData {
pub kernel_start: u64,
pub kernel_end: u64,
pub table_offset: u64,
pub table_physical_address: u64,
pub kernel_virt_offset: u64,
}
fn kernel_image_range<F: Read + Seek>(
elf: &mut ElfStream<AnyEndian, F>,
kernel_virt_offset: u64,
) -> Result<(u64, u64), GenError> {
let mut start = u64::MAX;
let mut end = u64::MIN;
for segment in elf.segments() {
if segment.p_type != PT_LOAD || segment.p_vaddr != segment.p_paddr + kernel_virt_offset {
continue;
}
let aligned_start = segment.p_vaddr & !0xFFF;
let aligned_end = (segment.p_vaddr + segment.p_memsz + 0xFFF) & !0xFFF;
if aligned_end > end {
end = aligned_end;
}
if aligned_start < start {
start = aligned_start;
}
}
if start < end {
Ok((start, end))
} else {
Err(GenError::NoKernelImageRange)
}
}
fn kernel_virt_offset<F: Read + Seek>(elf: &mut ElfStream<AnyEndian, F>) -> Result<u64, GenError> {
let (symtab, symstrtab) = elf
.symbol_table()?
.ok_or_else(|| GenError::MissingSection(".symtab"))?;
for sym in symtab {
let name = symstrtab.get(sym.st_name as _)?;
if name == "KERNEL_VIRT_OFFSET" {
// TODO symbol checks
return Ok(sym.st_value);
}
}
Err(GenError::MissingSymbol("KERNEL_VIRT_OFFSET"))
}
fn find_tables<F: Read + Seek>(elf: &mut ElfStream<AnyEndian, F>) -> Result<(u64, u64), GenError> {
let (shdrs, strtab) = elf.section_headers_with_strtab()?;
let strtab = strtab.ok_or_else(|| GenError::MissingSection(".strtab"))?;
for shdr in shdrs {
let name = strtab.get(shdr.sh_name as _)?;
if name == ".data.tables" {
// TODO section checks
return Ok((shdr.sh_offset, shdr.sh_addr));
}
}
Err(GenError::MissingSection(".data.tables"))
}
fn build_tables<F: Read + Seek>(file: F) -> Result<(FixedTables, u64), GenError> {
let mut elf = ElfStream::<AnyEndian, F>::open_stream(file)?;
let kernel_virt_offset = kernel_virt_offset(&mut elf)?;
let (kernel_start, kernel_end) = kernel_image_range(&mut elf, kernel_virt_offset)?;
let (table_offset, table_virt_addr) = find_tables(&mut elf)?;
let table_physical_address = table_virt_addr
.checked_sub(kernel_virt_offset)
.ok_or_else(|| GenError::IncorrectTablesPlacement(table_virt_addr))?;
println!("Kernel image range: {:#x?}", kernel_start..kernel_end);
println!("KERNEL_VIRT_OFFSET = {:#x}", kernel_virt_offset);
match elf.ehdr.e_machine {
EM_X86_64 => X8664Builder::new(
elf,
GenData {
kernel_virt_offset,
kernel_start,
kernel_end,
table_offset,
table_physical_address,
},
)?
.build(),
_ => todo!(),
}
}
fn write_tables<F: Write + Seek>(
mut file: F,
offset: u64,
tables: FixedTables,
) -> Result<(), GenError> {
let bytes = bytemuck::bytes_of(&tables);
file.seek(SeekFrom::Start(offset))?;
file.write_all(bytes)?;
Ok(())
}
fn gentables<P: AsRef<Path>>(image: P) -> Result<(), GenError> {
let mut file = OpenOptions::new()
.read(true)
.write(true)
.truncate(false)
.open(image)?;
let (tables, file_offset) = build_tables(&mut file)?;
write_tables(file, file_offset, tables)?;
Ok(())
}
fn main() -> ExitCode {
let args = Args::parse();
match gentables(&args.image) {
Ok(()) => ExitCode::SUCCESS,
Err(err) => {
eprintln!("{}: {}", args.image.display(), err);
ExitCode::FAILURE
}
}
}

View File

@ -0,0 +1,189 @@
use core::fmt;
use std::{
io::{Read, Seek},
mem::offset_of,
};
use bitflags::bitflags;
use bytemuck::Zeroable;
use elf::{
abi::{PF_W, PF_X, PT_LOAD},
endian::AnyEndian,
ElfStream,
};
use memtables::{FixedTables, KERNEL_L3_COUNT};
use crate::{GenData, GenError};
bitflags! {
#[derive(Clone, Copy)]
struct PageFlags: u64 {
const PRESENT = 1 << 0;
const WRITABLE = 1 << 1;
const NX = 1 << 63;
}
}
pub struct X8664Builder<F: Seek + Read> {
elf: ElfStream<AnyEndian, F>,
data: GenData,
tables: FixedTables,
l0i: usize,
l1i: usize,
start_l2i: usize,
end_l2i: usize,
}
impl PageFlags {
fn from_elf(flags: u32) -> Self {
let mut out = Self::empty();
if flags & PF_W != 0 {
out |= Self::WRITABLE;
}
if flags & PF_X == 0 {
out |= Self::NX;
}
out
}
}
impl fmt::Display for PageFlags {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"r{}{}",
if self.contains(Self::WRITABLE) {
'w'
} else {
'-'
},
if self.contains(Self::NX) { '-' } else { 'x' }
)
}
}
impl<F: Seek + Read> X8664Builder<F> {
pub fn new(elf: ElfStream<AnyEndian, F>, data: GenData) -> Result<Self, GenError> {
let l2_aligned_start = data.kernel_start & !0x1FFFFF;
let l2_aligned_end = (data.kernel_end + 0x1FFFFF) & !0x1FFFFF;
if l2_aligned_end <= l2_aligned_start {
todo!();
}
if (l2_aligned_end - l2_aligned_start) as usize >= KERNEL_L3_COUNT * 0x200000 {
return Err(GenError::KernelTooLarge(
l2_aligned_start..l2_aligned_end,
l2_aligned_end - l2_aligned_start,
(KERNEL_L3_COUNT * 0x20000) as u64,
));
}
let l0i = (data.kernel_start >> 39) as usize & 0x1FF;
let l1i = (data.kernel_start >> 30) as usize & 0x1FF;
let start_l2i = (l2_aligned_start >> 21) as usize & 0x1FF;
let end_l2i = (l2_aligned_end >> 21) as usize & 0x1FF;
Ok(Self {
elf,
data,
tables: FixedTables::zeroed(),
l0i,
l1i,
start_l2i,
end_l2i,
})
}
pub fn build(mut self) -> Result<(FixedTables, u64), GenError> {
// L0 -> L1
let l1_physical_address =
self.data.table_physical_address + offset_of!(FixedTables, kernel_l1) as u64;
self.tables.l0.data[self.l0i] =
l1_physical_address | (PageFlags::PRESENT | PageFlags::WRITABLE).bits();
// L1 -> L2
let l2_physical_address =
self.data.table_physical_address + offset_of!(FixedTables, kernel_l2) as u64;
self.tables.kernel_l1.data[self.l1i] =
l2_physical_address | (PageFlags::PRESENT | PageFlags::WRITABLE).bits();
// L2 -> L3s
for i in 0..KERNEL_L3_COUNT {
let l3_physical_address = self.data.table_physical_address
+ (offset_of!(FixedTables, kernel_l3s) + 0x1000 * i) as u64;
self.tables.kernel_l2.data[i + self.start_l2i] =
l3_physical_address | (PageFlags::PRESENT | PageFlags::WRITABLE).bits();
}
for (i, segment) in self.elf.segments().into_iter().enumerate() {
if segment.p_type != PT_LOAD
|| segment.p_vaddr != segment.p_paddr + self.data.kernel_virt_offset
{
continue;
}
let aligned_virt_start = segment.p_vaddr & !0xFFF;
let aligned_virt_end = (segment.p_vaddr + segment.p_memsz + 0xFFF) & !0xFFF;
let aligned_phys_start = segment.p_paddr & !0xFFF;
let count = (aligned_virt_end - aligned_virt_start) / 0x1000;
let flags = PageFlags::from_elf(segment.p_flags);
println!(
"{}: {:#x?} -> {:#x} {}",
i,
aligned_virt_start..aligned_virt_end,
aligned_phys_start,
flags
);
Self::map_segment(
self.start_l2i,
&mut self.tables,
aligned_virt_start,
aligned_phys_start,
count as usize,
flags,
)?;
}
Ok((self.tables, self.data.table_offset))
}
fn map_segment(
l2i_offset: usize,
tables: &mut FixedTables,
vaddr: u64,
paddr: u64,
count: usize,
flags: PageFlags,
) -> Result<(), GenError> {
for index in 0..count {
let address = vaddr + index as u64 * 0x1000;
let page = paddr + index as u64 * 0x1000;
let entry = page | (PageFlags::PRESENT | flags).bits();
let l2i = (address >> 21) as usize & 0x1FF - l2i_offset;
let l3i = (address >> 12) as usize & 0x1FF;
let l3 = &mut tables.kernel_l3s[l2i];
if l3.data[l3i] != 0 {
if l3.data[l3i] != entry {
todo!();
} else {
continue;
}
}
l3.data[l3i] = entry;
}
Ok(())
}
}