diff --git a/Cargo.toml b/Cargo.toml index 7c4e8789..e1067719 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,18 +7,17 @@ edition = "2021" [dependencies] yggdrasil-abi = { git = "https://git.alnyan.me/yggdrasil/yggdrasil-abi.git" } -vfs = { path = "lib/vfs" } -memfs = { path = "lib/memfs" } +# vfs = { path = "lib/vfs" } +# memfs = { path = "lib/memfs" } -aarch64-cpu = "9.3.1" -atomic_enum = "0.2.0" +# atomic_enum = "0.2.0" bitflags = "2.3.3" -fdt-rs = { version = "0.4.3", default-features = false } -linked_list_allocator = "0.10.5" -spinning_top = "0.2.5" -static_assertions = "1.1.0" -tock-registers = "0.8.1" +# linked_list_allocator = "0.10.5" +# spinning_top = "0.2.5" +# static_assertions = "1.1.0" +# tock-registers = "0.8.1" cfg-if = "1.0.0" +embedded-graphics = "0.8.0" [dependencies.elf] version = "0.7.2" @@ -26,5 +25,10 @@ git = "https://git.alnyan.me/yggdrasil/yggdrasil-elf.git" default-features = false features = ["no_std_stream"] +[target.'cfg(target_arch = "aarch64")'.dependencies] +fdt-rs = { version = "0.4.3", default-features = false } +aarch64-cpu = "9.3.1" + [target.'cfg(target_arch = "x86_64")'.dependencies] +bitmap-font = { version = "0.3.0" } yboot-proto = { git = "https://git.alnyan.me/yggdrasil/yboot-proto.git" } diff --git a/src/arch/aarch64/table.rs b/src/arch/aarch64/table.rs index 66ffd73e..57dcc78a 100644 --- a/src/arch/aarch64/table.rs +++ b/src/arch/aarch64/table.rs @@ -41,12 +41,6 @@ pub struct L2; #[derive(Clone, Copy)] pub struct L3; -/// Tag trait to mark that the page table level may point to a next-level table -pub trait NonTerminalEntryLevel: EntryLevel { - /// Tag type of the level this entry level may point to - type NextLevel: EntryLevel; -} - impl NonTerminalEntryLevel for L1 { type NextLevel = L2; } diff --git a/src/arch/mod.rs b/src/arch/mod.rs index 6496b936..3775b552 100644 --- a/src/arch/mod.rs +++ b/src/arch/mod.rs @@ -1,5 +1,6 @@ //! Provides architecture/platform-specific implementation details +use abi::error::Error; use cfg_if::cfg_if; cfg_if! { @@ -11,50 +12,53 @@ cfg_if! { use abi::error::Error; use cfg_if::cfg_if; - /// Describes messages sent from some CPU to others - #[derive(Clone, Copy, PartialEq, Debug)] - #[repr(u64)] - pub enum CpuMessage { - /// Indicates that the sender CPU entered kernel panic and wants other CPUs to follow - Panic, - } - - /// Interface for an architecture-specific facilities - pub trait Architecture { - /// Address, to which "zero" address is mapped in the virtual address space - const KERNEL_VIRT_OFFSET: usize; - - /// Initializes the memory management unit and sets up virtual memory management. - /// `bsp` flag is provided to make sure mapping tables are only initialized once in a SMP - /// system. - /// - /// # Safety - /// - /// Unsafe to call if the MMU has already been initialized. - unsafe fn init_mmu(&self, bsp: bool); - - /// Allocates a virtual mapping for the specified physical memory region - fn map_device_pages(&self, phys: usize, count: usize) -> Result<usize, Error>; - - // Architecture intrinsics - - /// Suspends CPU until an interrupt is received - fn wait_for_interrupt(); - - /// Sets the local CPU's interrupt mask. - /// - /// # Safety - /// - /// Enabling interrupts may lead to unexpected behavior unless the context explicitly expects - /// them. - unsafe fn set_interrupt_mask(mask: bool); - - /// Returns the local CPU's interrupt mask - fn interrupt_mask() -> bool; - } } else if #[cfg(target_arch = "x86_64")] { pub mod x86_64; + + pub use x86_64::{X86_64 as ArchitectureImpl, ARCHITECTURE}; } else { compile_error!("Architecture is not supported"); } } + +/// Describes messages sent from some CPU to others +#[derive(Clone, Copy, PartialEq, Debug)] +#[repr(u64)] +pub enum CpuMessage { + /// Indicates that the sender CPU entered kernel panic and wants other CPUs to follow + Panic, +} + +/// Interface for an architecture-specific facilities +pub trait Architecture { + /// Address, to which "zero" address is mapped in the virtual address space + const KERNEL_VIRT_OFFSET: usize; + + /// Initializes the memory management unit and sets up virtual memory management. + /// `bsp` flag is provided to make sure mapping tables are only initialized once in a SMP + /// system. + /// + /// # Safety + /// + /// Unsafe to call if the MMU has already been initialized. + unsafe fn init_mmu(&self, bsp: bool); + + /// Allocates a virtual mapping for the specified physical memory region + fn map_device_pages(&self, phys: usize, count: usize) -> Result<usize, Error>; + + // Architecture intrinsics + + /// Suspends CPU until an interrupt is received + fn wait_for_interrupt(); + + /// Sets the local CPU's interrupt mask. + /// + /// # Safety + /// + /// Enabling interrupts may lead to unexpected behavior unless the context explicitly expects + /// them. + unsafe fn set_interrupt_mask(mask: bool); + + /// Returns the local CPU's interrupt mask + fn interrupt_mask() -> bool; +} diff --git a/src/arch/x86_64/boot/mod.rs b/src/arch/x86_64/boot/mod.rs new file mode 100644 index 00000000..520924dc --- /dev/null +++ b/src/arch/x86_64/boot/mod.rs @@ -0,0 +1,292 @@ +use core::arch::global_asm; + +use abi::error::Error; +use bitmap_font::TextStyle; +use embedded_graphics::{ + pixelcolor::BinaryColor, + prelude::{DrawTarget, OriginDimensions, Point}, + text::Text, + Drawable, Pixel, +}; +use yboot_proto::{ + v1::FramebufferOption, LoadProtocolHeader, LoadProtocolV1, KERNEL_MAGIC, LOADER_MAGIC, + PROTOCOL_VERSION_1, +}; + +use crate::{ + arch::Architecture, + debug::{self, DebugSink}, + mem::device::DeviceMemory, + sync::IrqSafeSpinlock, + util::OneTimeInit, +}; + +use super::ARCHITECTURE; + +pub const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000; +const BOOT_STACK_SIZE: usize = 65536; + +#[repr(C, align(0x20))] +struct BootStack { + data: [u8; BOOT_STACK_SIZE], +} + +#[link_section = ".bss"] +static mut BSP_STACK: BootStack = BootStack { + data: [0; BOOT_STACK_SIZE], +}; + +#[used] +#[link_section = ".data.yboot"] +static mut YBOOT_DATA: LoadProtocolV1 = LoadProtocolV1 { + header: LoadProtocolHeader { + kernel_magic: KERNEL_MAGIC, + version: PROTOCOL_VERSION_1, + }, + kernel_virt_offset: KERNEL_VIRT_OFFSET as _, + + opt_framebuffer: FramebufferOption { + req_width: 640, + req_height: 480, + + res_width: 0, + res_height: 0, + res_stride: 0, + res_address: 0, + res_size: 0, + }, +}; + +struct LinearFramebufferInner { + mmio: DeviceMemory, + base: usize, + stride: usize, + width: usize, + height: usize, +} + +pub struct LinearFramebuffer { + inner: IrqSafeSpinlock<LinearFramebufferInner>, +} + +struct Position { + row: u32, + col: u32, +} + +pub struct FramebufferConsole { + framebuffer: &'static LinearFramebuffer, + position: IrqSafeSpinlock<Position>, + char_height: usize, + char_width: usize, + width_chars: usize, + height_chars: usize, +} + +impl OriginDimensions for LinearFramebufferInner { + fn size(&self) -> embedded_graphics::prelude::Size { + embedded_graphics::prelude::Size::new(self.width as _, self.height as _) + } +} + +impl DrawTarget for LinearFramebufferInner { + type Color = BinaryColor; + type Error = (); + + fn draw_iter<I>(&mut self, pixels: I) -> Result<(), Self::Error> + where + I: IntoIterator<Item = Pixel<Self::Color>>, + { + for Pixel(coord, color) in pixels { + let x = coord.x as usize; + let y = coord.y as usize; + let addr = self.base + y * self.stride + x * 4; + let ptr = addr as *mut u32; + unsafe { + if color.is_on() { + ptr.write_volatile(0xFFFFFFFF); + } else { + ptr.write_volatile(0); + } + } + } + + Ok(()) + } +} + +impl DebugSink for FramebufferConsole { + fn putc(&self, c: u8) -> Result<(), Error> { + let mut pos = self.position.lock(); + + self.framebuffer.draw_glyph( + self.char_width * pos.col as usize, + self.char_height * pos.row as usize, + c, + ); + + if c == b'\n' { + pos.row += 1; + pos.col = 0; + } else { + pos.col += 1; + } + + if pos.col == self.width_chars as u32 { + pos.row += 1; + pos.col = 0; + } + if pos.row == self.height_chars as u32 { + pos.row = self.height_chars as u32 - 1; + } + + Ok(()) + } +} + +impl LinearFramebuffer { + pub fn from_yboot(fb: &FramebufferOption) -> Result<Self, Error> { + let mmio = + unsafe { DeviceMemory::map("framebuffer", fb.res_address as _, fb.res_size as _) }?; + let inner = LinearFramebufferInner { + base: mmio.base(), + mmio, + stride: fb.res_stride as _, + width: fb.res_width as _, + height: fb.res_height as _, + }; + + Ok(Self { + inner: IrqSafeSpinlock::new(inner), + }) + } + + pub fn draw_glyph(&self, x: usize, y: usize, c: u8) { + let mut inner = self.inner.lock(); + let font = &bitmap_font::tamzen::FONT_6x12; + + let text_data = [c]; + let text_str = unsafe { core::str::from_utf8_unchecked(&text_data) }; + let text = Text::new( + text_str, + Point::new(x as _, y as _), + TextStyle::new(font, BinaryColor::On), + ); + + text.draw(&mut *inner).ok(); + } +} + +impl FramebufferConsole { + pub fn new(framebuffer: &'static LinearFramebuffer) -> Self { + let char_width = 6; + let char_height = 12; + let (w, h) = { + let inner = framebuffer.inner.lock(); + (inner.width, inner.height) + }; + + Self { + framebuffer, + position: IrqSafeSpinlock::new(Position { row: 0, col: 0 }), + width_chars: w / char_width, + height_chars: h / char_height, + char_width, + char_height, + } + } +} + +static DISPLAY: OneTimeInit<LinearFramebuffer> = OneTimeInit::new(); +static CONSOLE: OneTimeInit<FramebufferConsole> = OneTimeInit::new(); + +extern "C" fn __x86_64_upper_entry() -> ! { + unsafe { + ARCHITECTURE.init_mmu(true); + core::arch::asm!("wbinvd"); + } + + let fb = unsafe { &YBOOT_DATA.opt_framebuffer }; + + DISPLAY.init(LinearFramebuffer::from_yboot(fb).unwrap()); + CONSOLE.init(FramebufferConsole::new(DISPLAY.get())); + + debug::init_with_sink(CONSOLE.get()); + + for i in 0..10 { + debugln!("Test {}", i); + } + + loop { + unsafe { + core::arch::asm!("cli; hlt"); + } + } + + // if let Ok(fb_mmio) = unsafe { DeviceMemory::map("framebuffer", fb.res_address as _, 0x1000) } { + // unsafe { + // core::arch::asm!("mov %cr3, %rax; mov %rax, %cr3", options(att_syntax)); + // } + // let addr = 0xffffff8140000000usize; + // let slice = unsafe { core::slice::from_raw_parts_mut(addr as *mut u32, 1024) }; + // slice.fill(0xFFFF0000); + // loop {} + // // for y in 0..2 { + // // let y_val = (y * 255) / fb.res_height; + + // // let addr = fb_mmio.base() + y as usize * fb.res_stride as usize; + // // let row = + // // unsafe { core::slice::from_raw_parts_mut(addr as *mut u32, fb.res_width as _) }; + + // // let v = 0xFF000000 | (y_val << 16) | (y_val << 8) | y_val; + + // // row.fill(v); + // // } + // } + + // unsafe { + // core::arch::asm!( + // r#" + // mov $0x3F8, %dx + // mov $'@', %al + + // out %al, %dx + // "#, + // options(att_syntax) + // ); + // } + + loop {} +} + +global_asm!( + r#" +.global __x86_64_entry + +.section .text.entry +__x86_64_entry: + mov ${yboot_loader_magic}, %edi + cmp %edi, %eax + je 2f + + // (Currently) unsupported bootloader +1: + cli + hlt + jmp 1b + +2: + // yboot entry method + movabsq ${stack_bottom} + {stack_size}, %rax + movabsq ${entry}, %rcx + mov %rax, %rsp + jmp *%rcx + +.section .text +"#, + yboot_loader_magic = const LOADER_MAGIC, + stack_size = const BOOT_STACK_SIZE, + stack_bottom = sym BSP_STACK, + entry = sym __x86_64_upper_entry, + options(att_syntax) +); diff --git a/src/arch/x86_64/mod.rs b/src/arch/x86_64/mod.rs index b0a34b30..ce756c1a 100644 --- a/src/arch/x86_64/mod.rs +++ b/src/arch/x86_64/mod.rs @@ -1,73 +1,41 @@ -use core::arch::global_asm; +use abi::error::Error; -use yboot_proto::{ - v1::FramebufferOption, LoadProtocolHeader, LoadProtocolV1, KERNEL_MAGIC, LOADER_MAGIC, - PROTOCOL_VERSION_1, -}; +use crate::arch::x86_64::table::{init_fixed_tables, KERNEL_TABLES}; -pub const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000; -const BOOT_STACK_SIZE: usize = 65536; +use super::Architecture; -#[repr(C, align(0x20))] -struct BootStack { - data: [u8; BOOT_STACK_SIZE], +pub mod boot; +pub mod table; + +pub struct X86_64; + +impl Architecture for X86_64 { + const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000; + + unsafe fn init_mmu(&self, bsp: bool) { + if bsp { + init_fixed_tables(); + } + + let cr3 = KERNEL_TABLES.physical_address(); + core::arch::asm!("wbinvd; mov {0}, %cr3", in(reg) cr3, options(att_syntax)); + } + + fn map_device_pages(&self, phys: usize, count: usize) -> Result<usize, Error> { + unsafe { KERNEL_TABLES.map_device_pages(phys, count) } + } + + fn wait_for_interrupt() { + todo!() + } + + unsafe fn set_interrupt_mask(mask: bool) { + todo!() + } + + fn interrupt_mask() -> bool { + todo!() + } } -#[link_section = ".bss"] -static mut BSP_STACK: BootStack = BootStack { - data: [0; BOOT_STACK_SIZE], -}; - -#[used] -#[link_section = ".data.yboot"] -static mut YBOOT_DATA: LoadProtocolV1 = LoadProtocolV1 { - header: LoadProtocolHeader { - kernel_magic: KERNEL_MAGIC, - version: PROTOCOL_VERSION_1, - }, - kernel_virt_offset: KERNEL_VIRT_OFFSET as _, - - opt_framebuffer: FramebufferOption { - req_width: 1024, - req_height: 768, - - res_width: 0, - res_height: 0, - res_stride: 0, - res_address: 0, - }, -}; - -extern "C" fn __x86_64_upper_entry() -> ! { - loop {} -} - -global_asm!( - r#" -.global __x86_64_entry - -.section .text.entry -__x86_64_entry: - mov ${yboot_loader_magic}, %edi - cmp %edi, %eax - je 2f - - // (Currently) unsupported bootloader -1: - cli - hlt - jmp 1b - -2: - // yboot entry method - movabsq ${stack_bottom} + {stack_size}, %rax - movabsq ${entry}, %rcx - mov %rax, %rsp - jmp *%rcx -"#, - yboot_loader_magic = const LOADER_MAGIC, - stack_size = const BOOT_STACK_SIZE, - stack_bottom = sym BSP_STACK, - entry = sym __x86_64_upper_entry, - options(att_syntax) -); +pub static ARCHITECTURE: X86_64 = X86_64; diff --git a/src/arch/x86_64/table/fixed.rs b/src/arch/x86_64/table/fixed.rs new file mode 100644 index 00000000..cdae45f4 --- /dev/null +++ b/src/arch/x86_64/table/fixed.rs @@ -0,0 +1,109 @@ +use abi::error::Error; + +use crate::{ + arch::x86_64::table::{PageAttributes, PageEntry, PageTable, L0, L1, L2, L3}, + mem::KERNEL_VIRT_OFFSET, +}; + +// Means 4 lower GiB are mapped +const KERNEL_PD_COUNT: usize = 4; +// Leave 1GiB gap just for fool safety +const DEVICE_MAPPING_L1I: usize = KERNEL_PD_COUNT + 1; +const DEVICE_VIRT_OFFSET: usize = (DEVICE_MAPPING_L1I << 30) + KERNEL_VIRT_OFFSET; + +pub struct FixedTables { + // Common + l0: PageTable<L0>, + l1: PageTable<L1>, + + // Kernel mapping + kernel_l2: [PageTable<L2>; KERNEL_PD_COUNT], + // Device mapping + // 511 entries + device_l2: PageTable<L2>, + // 512 entries + device_l3: PageTable<L3>, + + device_l3i: usize, +} + +impl FixedTables { + pub const fn zeroed() -> Self { + Self { + // Global + l0: PageTable::zeroed(), + + // Higher-half common + l1: PageTable::zeroed(), + + // Kernel + kernel_l2: [PageTable::zeroed(); KERNEL_PD_COUNT], + + // Device + device_l2: PageTable::zeroed(), + device_l3: PageTable::zeroed(), + + device_l3i: 0, + } + } + + pub fn map_device_pages(&mut self, phys: usize, count: usize) -> Result<usize, Error> { + if count > 512 * 512 { + panic!("Unsupported device memory mapping size"); + } else if count > 512 { + // 2MiB mappings + todo!(); + } else { + // 4KiB mappings + if self.device_l3i + count > 512 { + return Err(Error::OutOfMemory); + } + + let virt = DEVICE_VIRT_OFFSET + (self.device_l3i << 12); + for i in 0..count { + self.device_l3[self.device_l3i + i] = + PageEntry::page(phys + i * 0x1000, PageAttributes::WRITABLE); + } + self.device_l3i += count; + + Ok(virt) + } + } + + pub fn physical_address(&self) -> usize { + self.l0.physical_address() + } +} + +pub static mut KERNEL_TABLES: FixedTables = FixedTables::zeroed(); + +pub unsafe fn init_fixed_tables() { + // Kernel L2 + for i in 0..512 * KERNEL_PD_COUNT { + let table_index = i / 512; + let table_offset = i % 512; + + KERNEL_TABLES.kernel_l2[table_index][table_offset] = + PageEntry::block(i << 21, PageAttributes::WRITABLE); + } + + // Device L2 + let addr = KERNEL_TABLES.device_l3.physical_address(); + KERNEL_TABLES.device_l2[0] = PageEntry::table(addr, PageAttributes::empty()); + + // Higher-half L1 + // Map kernel nGiB + for i in 0..KERNEL_PD_COUNT { + let addr = KERNEL_TABLES.kernel_l2[i].physical_address(); + KERNEL_TABLES.l1[i] = PageEntry::table(addr, PageAttributes::empty()); + } + + // Map device tables + let addr = KERNEL_TABLES.device_l2.physical_address(); + KERNEL_TABLES.l1[DEVICE_MAPPING_L1I] = PageEntry::table(addr, PageAttributes::empty()); + + // Global L0 + let addr = KERNEL_TABLES.l1.physical_address(); + // No lower mapping anymore + KERNEL_TABLES.l0[511] = PageEntry::table(addr, PageAttributes::empty()); +} diff --git a/src/arch/x86_64/table/mod.rs b/src/arch/x86_64/table/mod.rs new file mode 100644 index 00000000..672d1289 --- /dev/null +++ b/src/arch/x86_64/table/mod.rs @@ -0,0 +1,158 @@ +use core::{ + marker::PhantomData, + ops::{Index, IndexMut}, +}; + +use bitflags::bitflags; + +mod fixed; + +pub use fixed::{init_fixed_tables, KERNEL_TABLES}; + +use crate::mem::{ + table::{EntryLevel, NonTerminalEntryLevel}, + ConvertAddress, +}; + +bitflags! { + pub struct PageAttributes: u64 { + const PRESENT = 1 << 0; + const WRITABLE = 1 << 1; + const BLOCK = 1 << 7; + } +} + +#[derive(Clone, Copy)] +#[repr(transparent)] +pub struct PageEntry<L: EntryLevel>(u64, PhantomData<L>); + +#[derive(Clone, Copy)] +#[repr(C, align(0x1000))] +pub struct PageTable<L: EntryLevel> { + data: [PageEntry<L>; 512], +} + +// L0: PML4, 512GiB page +#[derive(Clone, Copy)] +pub struct L0; +// L1: PDPT, 1GiB page +#[derive(Clone, Copy)] +pub struct L1; +// L2: Page directory, 2MiB page +#[derive(Clone, Copy)] +pub struct L2; +// L3: Page table, 4KiB page +#[derive(Clone, Copy)] +pub struct L3; + +impl NonTerminalEntryLevel for L0 { + type NextLevel = L1; +} +impl NonTerminalEntryLevel for L1 { + type NextLevel = L2; +} +impl NonTerminalEntryLevel for L2 { + type NextLevel = L3; +} + +// #[repr(C)] +// pub struct AddressSpace { +// l0: *mut PageTable<L0>, +// } + +impl const EntryLevel for L0 { + fn index(addr: usize) -> usize { + todo!() + } + + fn page_offset(addr: usize) -> usize { + todo!() + } +} + +impl const EntryLevel for L1 { + fn index(addr: usize) -> usize { + (addr >> 30) & 0x1FF + } + + fn page_offset(addr: usize) -> usize { + addr & 0x3FFFFFFF + } +} + +impl const EntryLevel for L2 { + fn index(addr: usize) -> usize { + (addr >> 21) & 0x1FF + } + + fn page_offset(addr: usize) -> usize { + addr & 0x1FFFFF + } +} + +impl const EntryLevel for L3 { + fn index(addr: usize) -> usize { + (addr >> 12) & 0x1FF + } + + fn page_offset(addr: usize) -> usize { + addr & 0xFFF + } +} + +impl PageEntry<L3> { + pub fn page(phys: usize, attrs: PageAttributes) -> Self { + Self( + (phys as u64) | (attrs | PageAttributes::PRESENT).bits(), + PhantomData, + ) + } +} + +impl PageEntry<L2> { + pub fn block(phys: usize, attrs: PageAttributes) -> Self { + Self( + (phys as u64) | (attrs | PageAttributes::PRESENT | PageAttributes::BLOCK).bits(), + PhantomData, + ) + } +} + +impl<L: NonTerminalEntryLevel> PageEntry<L> { + pub fn table(phys: usize, attrs: PageAttributes) -> Self { + Self( + (phys as u64) | (attrs | PageAttributes::PRESENT | PageAttributes::WRITABLE).bits(), + PhantomData, + ) + } +} + +impl<L: EntryLevel> PageEntry<L> { + pub const INVALID: Self = Self(0, PhantomData); +} + +impl<L: EntryLevel> PageTable<L> { + pub const fn zeroed() -> Self { + Self { + data: [PageEntry::INVALID; 512], + } + } + + pub fn physical_address(&self) -> usize { + unsafe { (self.data.as_ptr() as usize).physicalize() } + } +} + +impl<L: EntryLevel> Index<usize> for PageTable<L> { + type Output = PageEntry<L>; + + fn index(&self, index: usize) -> &Self::Output { + &self.data[index] + } +} + +impl<L: EntryLevel> IndexMut<usize> for PageTable<L> { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + &mut self.data[index] + } +} diff --git a/src/arch/x86_64/tables.rs b/src/arch/x86_64/tables.rs deleted file mode 100644 index e69de29b..00000000 diff --git a/src/debug.rs b/src/debug.rs index 876b908e..df707292 100644 --- a/src/debug.rs +++ b/src/debug.rs @@ -1,12 +1,16 @@ //! Utilities for debug information logging use core::fmt::{self, Arguments}; -use crate::{ - arch::PLATFORM, - device::{platform::Platform, serial::SerialDevice}, - sync::IrqSafeSpinlock, - util::OneTimeInit, -}; +use abi::error::Error; + +use crate::{sync::IrqSafeSpinlock, util::OneTimeInit}; + +// use crate::{ +// arch::PLATFORM, +// device::{platform::Platform, serial::SerialDevice}, +// sync::IrqSafeSpinlock, +// util::OneTimeInit, +// }; /// Defines the severity of the message #[derive(Clone, Copy)] @@ -23,8 +27,16 @@ pub enum LogLevel { Fatal, } +pub trait DebugSink { + fn putc(&self, c: u8) -> Result<(), Error>; + + fn supports_color(&self) -> bool { + return false; + } +} + struct DebugPrinter { - sink: &'static dyn SerialDevice, + sink: &'static dyn DebugSink, } macro_rules! log_print_raw { @@ -35,7 +47,7 @@ macro_rules! log_print_raw { macro_rules! log_print { ($level:expr, $($args:tt)+) => { - log_print_raw!($level, "cpu{}:{}:{}: {}", $crate::arch::aarch64::cpu::Cpu::local_id(), file!(), line!(), format_args!($($args)+)) + log_print_raw!($level, "{}:{}: {}", file!(), line!(), format_args!($($args)+)) }; } @@ -92,7 +104,7 @@ impl LogLevel { impl fmt::Write for DebugPrinter { fn write_str(&mut self, s: &str) -> fmt::Result { for c in s.bytes() { - self.sink.send(c).ok(); + self.sink.putc(c).ok(); } Ok(()) @@ -126,14 +138,18 @@ pub fn hex_dump(level: LogLevel, addr_offset: usize, data: &[u8]) { /// /// Will panic if called more than once. pub fn init() { - DEBUG_PRINTER.init(IrqSafeSpinlock::new(DebugPrinter { - sink: PLATFORM.primary_serial().unwrap(), - })); - unsafe { - vfs::init_debug_hook(&move |args| { - debug_internal(args, LogLevel::Debug); - }); - } + // DEBUG_PRINTER.init(IrqSafeSpinlock::new(DebugPrinter { + // sink: PLATFORM.primary_serial().unwrap(), + // })); + // unsafe { + // vfs::init_debug_hook(&move |args| { + // debug_internal(args, LogLevel::Debug); + // }); + // } +} + +pub fn init_with_sink(sink: &'static dyn DebugSink) { + DEBUG_PRINTER.init(IrqSafeSpinlock::new(DebugPrinter { sink })); } #[doc(hidden)] @@ -143,8 +159,8 @@ pub fn debug_internal(args: Arguments, level: LogLevel) { if DEBUG_PRINTER.is_initialized() { let mut printer = DEBUG_PRINTER.get().lock(); - printer.write_str(level.log_prefix()).ok(); + // printer.write_str(level.log_prefix()).ok(); printer.write_fmt(args).ok(); - printer.write_str(level.log_suffix()).ok(); + // printer.write_str(level.log_suffix()).ok(); } } diff --git a/src/main.rs b/src/main.rs index ab5246ba..3d739565 100644 --- a/src/main.rs +++ b/src/main.rs @@ -15,7 +15,7 @@ #![no_std] #![no_main] -// extern crate yggdrasil_abi as abi; +extern crate yggdrasil_abi as abi; // // use abi::{ // error::Error, @@ -29,8 +29,8 @@ // // extern crate alloc; // -// #[macro_use] -// pub mod debug; +#[macro_use] +pub mod debug; #[macro_use] pub mod arch; @@ -41,13 +41,13 @@ fn panic_handler(_pi: &core::panic::PanicInfo) -> ! { // // pub mod device; // pub mod fs; -// pub mod mem; +pub mod mem; // pub mod panic; // pub mod proc; -// pub mod sync; +pub mod sync; // pub mod syscall; // pub mod task; -// pub mod util; +pub mod util; // // fn setup_root() -> Result<VnodeRef, Error> { // let initrd_data = INITRD_DATA.get(); diff --git a/src/mem/device.rs b/src/mem/device.rs index 898ddda8..d689a6eb 100644 --- a/src/mem/device.rs +++ b/src/mem/device.rs @@ -29,14 +29,20 @@ impl DeviceMemory { /// points to some device's MMIO. The caller must also make sure no aliasing for that range is /// possible. pub unsafe fn map(name: &'static str, phys: usize, size: usize) -> Result<Self, Error> { - if size > 0x1000 { - todo!("Device memory mappings larger than 4K"); - } + let aligned_base = phys & !0xFFF; + let base_offset = phys & 0xFFF; + let aligned_size = (size + 0xFFF) & !0xFFF; - let base = ARCHITECTURE.map_device_pages(phys, 1)?; + let base = ARCHITECTURE.map_device_pages(aligned_base, aligned_size / 0x1000)?; + let base = base + base_offset; Ok(Self { name, base, size }) } + + #[inline] + pub fn base(&self) -> usize { + self.base + } } impl<T> DeviceMemoryIo<T> { diff --git a/src/mem/mod.rs b/src/mem/mod.rs index 725d7d37..5c8f2c7a 100644 --- a/src/mem/mod.rs +++ b/src/mem/mod.rs @@ -1,25 +1,27 @@ //! Memory management utilities and types use core::{alloc::Layout, mem::size_of}; -use abi::error::Error; - -use crate::{ - arch::{Architecture, ArchitectureImpl, PlatformImpl}, - device::platform::Platform, -}; - -use self::table::AddressSpace; +// use abi::error::Error; +// +// use crate::{ +// arch::{Architecture, ArchitectureImpl, PlatformImpl}, +// device::platform::Platform, +// }; +// +// use self::table::AddressSpace; pub mod device; -pub mod heap; -pub mod phys; +// pub mod heap; +// pub mod phys; pub mod table; /// Kernel's physical load address -pub const KERNEL_PHYS_BASE: usize = PlatformImpl::KERNEL_PHYS_BASE; +// pub const KERNEL_PHYS_BASE: usize = PlatformImpl::KERNEL_PHYS_BASE; /// Kernel's virtual memory mapping offset (i.e. kernel's virtual address is [KERNEL_PHYS_BASE] + /// [KERNEL_VIRT_OFFSET]) -pub const KERNEL_VIRT_OFFSET: usize = ArchitectureImpl::KERNEL_VIRT_OFFSET; +// pub const KERNEL_VIRT_OFFSET: usize = ArchitectureImpl::KERNEL_VIRT_OFFSET; +// TODO fix this +pub const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000; /// Interface for converting between address spaces. /// @@ -49,74 +51,74 @@ pub unsafe trait ConvertAddress { unsafe fn physicalize(self) -> Self; } -/// Helper trait to allow cross-address space access to pointers -pub trait ForeignPointer: Sized { - /// Perform a volatile pointer write without dropping the old value. - /// - /// # Panics - /// - /// The function panics if any of the following conditions is met: - /// - /// * The address of the pointer is not mapped in the `space`. - /// * The pointer is not writable. - /// * The pointer is misaligned. - /// - /// # Safety - /// - /// As this function allows direct memory writes, it is inherently unsafe. - unsafe fn write_foreign_volatile(self: *mut Self, space: &AddressSpace, value: Self); - - /// Performs pointer validation for given address space: - /// - /// * Checks if the pointer has proper alignment for the type. - /// * Checks if the pointer is mapped in the address space. - /// * Checks if the pointer is above the userspace memory boundary. - /// - /// # Safety - /// - /// Even though this function does the necessary checks, it is still a raw pointer to reference - /// conversion, and thus is unsafe. - unsafe fn validate_user_ptr<'a>( - self: *const Self, - space: &AddressSpace, - ) -> Result<&'a Self, Error>; - - /// [ForeignPointer::validate_user_ptr], with extra "writability" check. - /// - /// # Safety - /// - /// Even though this function does the necessary checks, it is still a raw pointer to reference - /// conversion, and thus is unsafe. - unsafe fn validate_user_mut<'a>( - self: *mut Self, - space: &AddressSpace, - ) -> Result<&'a mut Self, Error>; - - /// [ForeignPointer::validate_user_ptr], but for slices - /// - /// # Safety - /// - /// Even though this function does the necessary checks, it is still a raw pointer to reference - /// conversion, and thus is unsafe. - unsafe fn validate_user_slice<'a>( - self: *const Self, - len: usize, - space: &AddressSpace, - ) -> Result<&'a [Self], Error>; - - /// [ForeignPointer::validate_user_slice], but for mutable slices - /// - /// # Safety - /// - /// Even though this function does the necessary checks, it is still a raw pointer to reference - /// conversion, and thus is unsafe. - unsafe fn validate_user_slice_mut<'a>( - self: *mut Self, - len: usize, - space: &AddressSpace, - ) -> Result<&'a mut [Self], Error>; -} - +// /// Helper trait to allow cross-address space access to pointers +// pub trait ForeignPointer: Sized { +// /// Perform a volatile pointer write without dropping the old value. +// /// +// /// # Panics +// /// +// /// The function panics if any of the following conditions is met: +// /// +// /// * The address of the pointer is not mapped in the `space`. +// /// * The pointer is not writable. +// /// * The pointer is misaligned. +// /// +// /// # Safety +// /// +// /// As this function allows direct memory writes, it is inherently unsafe. +// unsafe fn write_foreign_volatile(self: *mut Self, space: &AddressSpace, value: Self); +// +// /// Performs pointer validation for given address space: +// /// +// /// * Checks if the pointer has proper alignment for the type. +// /// * Checks if the pointer is mapped in the address space. +// /// * Checks if the pointer is above the userspace memory boundary. +// /// +// /// # Safety +// /// +// /// Even though this function does the necessary checks, it is still a raw pointer to reference +// /// conversion, and thus is unsafe. +// unsafe fn validate_user_ptr<'a>( +// self: *const Self, +// space: &AddressSpace, +// ) -> Result<&'a Self, Error>; +// +// /// [ForeignPointer::validate_user_ptr], with extra "writability" check. +// /// +// /// # Safety +// /// +// /// Even though this function does the necessary checks, it is still a raw pointer to reference +// /// conversion, and thus is unsafe. +// unsafe fn validate_user_mut<'a>( +// self: *mut Self, +// space: &AddressSpace, +// ) -> Result<&'a mut Self, Error>; +// +// /// [ForeignPointer::validate_user_ptr], but for slices +// /// +// /// # Safety +// /// +// /// Even though this function does the necessary checks, it is still a raw pointer to reference +// /// conversion, and thus is unsafe. +// unsafe fn validate_user_slice<'a>( +// self: *const Self, +// len: usize, +// space: &AddressSpace, +// ) -> Result<&'a [Self], Error>; +// +// /// [ForeignPointer::validate_user_slice], but for mutable slices +// /// +// /// # Safety +// /// +// /// Even though this function does the necessary checks, it is still a raw pointer to reference +// /// conversion, and thus is unsafe. +// unsafe fn validate_user_slice_mut<'a>( +// self: *mut Self, +// len: usize, +// space: &AddressSpace, +// ) -> Result<&'a mut [Self], Error>; +// } +// unsafe impl ConvertAddress for usize { #[inline(always)] unsafe fn virtualize(self) -> Self { @@ -163,120 +165,120 @@ unsafe impl<T> ConvertAddress for *const T { } } -impl<T> ForeignPointer for T { - unsafe fn write_foreign_volatile(self: *mut Self, space: &AddressSpace, value: T) { - // TODO check align - let addr = self as usize; - let start_page = addr & !0xFFF; - let end_page = (addr + size_of::<T>() - 1) & !0xFFF; - let page_offset = addr & 0xFFF; - - if start_page != end_page { - todo!("Foreign pointer write crossed a page boundary"); - } - - let phys_page = space - .translate(start_page) - .expect("Address is not mapped in the target address space"); - - let virt_ptr = (phys_page + page_offset).virtualize() as *mut T; - virt_ptr.write_volatile(value); - } - - unsafe fn validate_user_slice_mut<'a>( - self: *mut Self, - len: usize, - space: &AddressSpace, - ) -> Result<&'a mut [Self], Error> { - let base = self as usize; - let layout = Layout::array::<T>(len).unwrap(); - - validate_user_align_size(base, &layout)?; - validate_user_region(space, base, layout.size(), true)?; - - Ok(core::slice::from_raw_parts_mut(self, len)) - } - - unsafe fn validate_user_slice<'a>( - self: *const Self, - len: usize, - space: &AddressSpace, - ) -> Result<&'a [Self], Error> { - let base = self as usize; - let layout = Layout::array::<T>(len).unwrap(); - - validate_user_align_size(base, &layout)?; - validate_user_region(space, base, layout.size(), false)?; - - Ok(core::slice::from_raw_parts(self, len)) - } - - unsafe fn validate_user_mut<'a>( - self: *mut Self, - space: &AddressSpace, - ) -> Result<&'a mut Self, Error> { - let addr = self as usize; - let layout = Layout::new::<T>(); - - // Common validation - validate_user_align_size(addr, &layout)?; - - // Validate that the pages covered by this address are mapped as writable by the process - // TODO for CoW this may differ - validate_user_region(space, addr, layout.size(), true)?; - - Ok(&mut *self) - } - - unsafe fn validate_user_ptr<'a>( - self: *const Self, - space: &AddressSpace, - ) -> Result<&'a Self, Error> { - let addr = self as usize; - let layout = Layout::new::<T>(); - - // Common validation - validate_user_align_size(addr, &layout)?; - validate_user_region(space, addr, layout.size(), false)?; - - Ok(&*self) - } -} - -fn validate_user_align_size(addr: usize, layout: &Layout) -> Result<(), Error> { - // Explicitly disallow NULL - if addr == 0 { - return Err(Error::InvalidArgument); - } - // Validate alignment - if addr % layout.align() != 0 { - return Err(Error::InvalidArgument); - } - if addr + layout.size() > KERNEL_VIRT_OFFSET { - todo!(); - } - - Ok(()) -} - -/// Validates access to given userspace memory region with given constraints -pub fn validate_user_region( - space: &AddressSpace, - base: usize, - len: usize, - _need_write: bool, -) -> Result<(), Error> { - if base + len > crate::mem::KERNEL_VIRT_OFFSET { - panic!("Invalid argument"); - } - - let aligned_start = base & !0xFFF; - let aligned_end = (base + len + 0xFFF) & !0xFFF; - - for page in (aligned_start..aligned_end).step_by(0x1000) { - // TODO check writability - space.translate(page).ok_or(Error::InvalidArgument)?; - } - - Ok(()) -} +// impl<T> ForeignPointer for T { +// unsafe fn write_foreign_volatile(self: *mut Self, space: &AddressSpace, value: T) { +// // TODO check align +// let addr = self as usize; +// let start_page = addr & !0xFFF; +// let end_page = (addr + size_of::<T>() - 1) & !0xFFF; +// let page_offset = addr & 0xFFF; +// +// if start_page != end_page { +// todo!("Foreign pointer write crossed a page boundary"); +// } +// +// let phys_page = space +// .translate(start_page) +// .expect("Address is not mapped in the target address space"); +// +// let virt_ptr = (phys_page + page_offset).virtualize() as *mut T; +// virt_ptr.write_volatile(value); +// } +// +// unsafe fn validate_user_slice_mut<'a>( +// self: *mut Self, +// len: usize, +// space: &AddressSpace, +// ) -> Result<&'a mut [Self], Error> { +// let base = self as usize; +// let layout = Layout::array::<T>(len).unwrap(); +// +// validate_user_align_size(base, &layout)?; +// validate_user_region(space, base, layout.size(), true)?; +// +// Ok(core::slice::from_raw_parts_mut(self, len)) +// } +// +// unsafe fn validate_user_slice<'a>( +// self: *const Self, +// len: usize, +// space: &AddressSpace, +// ) -> Result<&'a [Self], Error> { +// let base = self as usize; +// let layout = Layout::array::<T>(len).unwrap(); +// +// validate_user_align_size(base, &layout)?; +// validate_user_region(space, base, layout.size(), false)?; +// +// Ok(core::slice::from_raw_parts(self, len)) +// } +// +// unsafe fn validate_user_mut<'a>( +// self: *mut Self, +// space: &AddressSpace, +// ) -> Result<&'a mut Self, Error> { +// let addr = self as usize; +// let layout = Layout::new::<T>(); +// +// // Common validation +// validate_user_align_size(addr, &layout)?; +// +// // Validate that the pages covered by this address are mapped as writable by the process +// // TODO for CoW this may differ +// validate_user_region(space, addr, layout.size(), true)?; +// +// Ok(&mut *self) +// } +// +// unsafe fn validate_user_ptr<'a>( +// self: *const Self, +// space: &AddressSpace, +// ) -> Result<&'a Self, Error> { +// let addr = self as usize; +// let layout = Layout::new::<T>(); +// +// // Common validation +// validate_user_align_size(addr, &layout)?; +// validate_user_region(space, addr, layout.size(), false)?; +// +// Ok(&*self) +// } +// } +// +// fn validate_user_align_size(addr: usize, layout: &Layout) -> Result<(), Error> { +// // Explicitly disallow NULL +// if addr == 0 { +// return Err(Error::InvalidArgument); +// } +// // Validate alignment +// if addr % layout.align() != 0 { +// return Err(Error::InvalidArgument); +// } +// if addr + layout.size() > KERNEL_VIRT_OFFSET { +// todo!(); +// } +// +// Ok(()) +// } +// +// /// Validates access to given userspace memory region with given constraints +// pub fn validate_user_region( +// space: &AddressSpace, +// base: usize, +// len: usize, +// _need_write: bool, +// ) -> Result<(), Error> { +// if base + len > crate::mem::KERNEL_VIRT_OFFSET { +// panic!("Invalid argument"); +// } +// +// let aligned_start = base & !0xFFF; +// let aligned_end = (base + len + 0xFFF) & !0xFFF; +// +// for page in (aligned_start..aligned_end).step_by(0x1000) { +// // TODO check writability +// space.translate(page).ok_or(Error::InvalidArgument)?; +// } +// +// Ok(()) +// } diff --git a/src/mem/table.rs b/src/mem/table.rs index 0456bca3..317b4262 100644 --- a/src/mem/table.rs +++ b/src/mem/table.rs @@ -1,7 +1,14 @@ //! Virtual memory table interface use abi::error::Error; +use cfg_if::cfg_if; -pub use crate::arch::aarch64::table::{AddressSpace, PageAttributes, PageEntry, PageTable}; +cfg_if! { + if #[cfg(target_arch = "aarch64")] { + pub use crate::arch::aarch64::table::{AddressSpace, PageAttributes, PageEntry, PageTable}; + } else if #[cfg(target_arch = "x86_64")] { + pub use crate::arch::x86_64::table::{PageAttributes}; + } +} /// Interface for virtual memory address space management pub trait VirtualMemoryManager { @@ -38,3 +45,9 @@ pub trait EntryLevel: Copy { /// Returns the offset of an address from the page start at current level fn page_offset(addr: usize) -> usize; } + +/// Tag trait to mark that the page table level may point to a next-level table +pub trait NonTerminalEntryLevel: EntryLevel { + /// Tag type of the level this entry level may point to + type NextLevel: EntryLevel; +} diff --git a/src/sync.rs b/src/sync.rs index 042388f8..baf080c9 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -5,9 +5,6 @@ use core::{ sync::atomic::{AtomicBool, AtomicUsize, Ordering}, }; -use aarch64_cpu::registers::DAIF; -use tock_registers::interfaces::{ReadWriteable, Readable, Writeable}; - /// Simple spinloop-based fence guaranteeing that the execution resumes only after its condition is /// met. pub struct SpinFence { @@ -132,15 +129,16 @@ impl<'a, T> DerefMut for IrqSafeSpinlockGuard<'a, T> { impl IrqGuard { /// Saves the current IRQ state and masks them pub fn acquire() -> Self { - let this = Self(DAIF.get()); - DAIF.modify(DAIF::I::SET); - this + Self(0) + // let this = Self(DAIF.get()); + // DAIF.modify(DAIF::I::SET); + // this } } impl Drop for IrqGuard { fn drop(&mut self) { - DAIF.set(self.0); + // DAIF.set(self.0); } }