feature: initial support for x86_64
This commit is contained in:
parent
fcbe412732
commit
ec5b5bc31b
17
Makefile
17
Makefile
@ -29,7 +29,14 @@ endif
|
||||
|
||||
QEMU_OPTS=-s
|
||||
ifeq ($(ARCH),x86_64)
|
||||
$(error TODO)
|
||||
MACH=none
|
||||
QEMU_OPTS+=-cdrom $(O)/image.iso \
|
||||
-M q35,accel=kvm \
|
||||
-cpu host \
|
||||
-enable-kvm \
|
||||
-m 512 \
|
||||
-serial mon:stdio \
|
||||
-net none
|
||||
else
|
||||
ifeq ($(MACH),qemu)
|
||||
QEMU_OPTS+=-kernel $(O)/kernel.bin \
|
||||
@ -65,10 +72,16 @@ endif
|
||||
|
||||
.PHONY: address error etc kernel src
|
||||
|
||||
all: kernel initrd
|
||||
all: kernel
|
||||
|
||||
kernel:
|
||||
cd kernel && cargo build $(CARGO_BUILD_OPTS)
|
||||
ifeq ($(ARCH),x86_64)
|
||||
mkdir -p $(O)/image/boot/grub
|
||||
cp etc/x86_64-none.grub $(O)/image/boot/grub/grub.cfg
|
||||
cp $(O)/kernel $(O)/image/boot/kernel
|
||||
grub-mkrescue -o $(O)/image.iso $(O)/image
|
||||
endif
|
||||
ifeq ($(ARCH),aarch64)
|
||||
$(LLVM_BASE)/llvm-strip -o $(O)/kernel.strip $(O)/kernel
|
||||
$(LLVM_BASE)/llvm-size $(O)/kernel.strip
|
||||
|
@ -1,3 +1,3 @@
|
||||
menuentry "OS" {
|
||||
multiboot2 /boot/kernel.elf
|
||||
multiboot2 /boot/kernel
|
||||
}
|
||||
|
@ -6,7 +6,8 @@ SECTIONS {
|
||||
. = 0x400000 + KERNEL_OFFSET;
|
||||
|
||||
.text : AT(. - KERNEL_OFFSET) {
|
||||
KEEP(*(.multiboot))
|
||||
KEEP(*(.text._multiboot))
|
||||
*(.text._entry)
|
||||
*(.text*)
|
||||
}
|
||||
|
||||
|
@ -15,13 +15,13 @@ memfs = { path = "../fs/memfs" }
|
||||
libsys = { path = "../libsys" }
|
||||
cfg-if = "1.x.x"
|
||||
tock-registers = "0.7.x"
|
||||
fdt-rs = { version = "0.x.x", default-features = false }
|
||||
bitflags = "^1.3.0"
|
||||
kernel-macros = { path = "macros" }
|
||||
fs-macros = { path = "../fs/macros" }
|
||||
|
||||
[target.'cfg(target_arch = "aarch64")'.dependencies]
|
||||
cortex-a = { version = "6.x.x" }
|
||||
fdt-rs = { version = "0.x.x", default-features = false }
|
||||
|
||||
[features]
|
||||
default = ["aggressive_syscall"]
|
||||
|
422
kernel/src/arch/aarch64/table.rs
Normal file
422
kernel/src/arch/aarch64/table.rs
Normal file
@ -0,0 +1,422 @@
|
||||
//
|
||||
// #[no_mangle]
|
||||
// static mut KERNEL_TTBR1: FixedTableGroup = FixedTableGroup::empty();
|
||||
|
||||
/// Transparent wrapper structure representing a single
|
||||
/// translation table entry
|
||||
#[derive(Clone, Copy)]
|
||||
#[repr(transparent)]
|
||||
pub struct Entry(u64);
|
||||
|
||||
/// Structure describing a single level of translation mappings
|
||||
#[repr(C, align(0x1000))]
|
||||
pub struct Table {
|
||||
entries: [Entry; 512],
|
||||
}
|
||||
|
||||
/// Wrapper for top-most level of address translation tables
|
||||
#[repr(transparent)]
|
||||
pub struct Space(Table);
|
||||
|
||||
bitflags! {
|
||||
/// Attributes attached to each translation [Entry]
|
||||
pub struct MapAttributes: u64 {
|
||||
// TODO use 2 lower bits to determine mapping size?
|
||||
/// nG bit -- determines whether a TLB entry associated with this mapping
|
||||
/// applies only to current ASID or all ASIDs.
|
||||
const NOT_GLOBAL = 1 << 11;
|
||||
/// AF bit -- must be set by software, otherwise Access Error exception is
|
||||
/// generated when the page is accessed
|
||||
const ACCESS = 1 << 10;
|
||||
/// The memory region is outer-shareable
|
||||
const SH_OUTER = 2 << 8;
|
||||
/// This page is used for device-MMIO mapping and uses MAIR attribute #1
|
||||
const DEVICE = 1 << 2;
|
||||
|
||||
/// Pages marked with this bit are Copy-on-Write
|
||||
const EX_COW = 1 << 55;
|
||||
|
||||
/// UXN bit -- if set, page may not be used for instruction fetching from EL0
|
||||
const UXN = 1 << 54;
|
||||
/// PXN bit -- if set, page may not be used for instruction fetching from EL1
|
||||
const PXN = 1 << 53;
|
||||
|
||||
// AP field
|
||||
// Default behavior is: read-write for EL1, no access for EL0
|
||||
/// If set, the page referred to by this entry is read-only for both EL0/EL1
|
||||
const AP_BOTH_READONLY = 3 << 6;
|
||||
/// If set, the page referred to by this entry is read-write for both EL0/EL1
|
||||
const AP_BOTH_READWRITE = 1 << 6;
|
||||
}
|
||||
}
|
||||
|
||||
impl Table {
|
||||
/// Returns next-level translation table reference for `index`, if one is present.
|
||||
/// If `index` represents a `Block`-type mapping, will return an error.
|
||||
/// If `index` does not map to any translation table, will try to allocate, init and
|
||||
/// map a new one, returning it after doing so.
|
||||
pub fn next_level_table_or_alloc(&mut self, index: usize) -> Result<&'static mut Table, Errno> {
|
||||
let entry = self[index];
|
||||
if entry.is_present() {
|
||||
if !entry.is_table() {
|
||||
return Err(Errno::InvalidArgument);
|
||||
}
|
||||
|
||||
Ok(unsafe { &mut *(mem::virtualize(entry.address_unchecked()) as *mut _) })
|
||||
} else {
|
||||
let phys = phys::alloc_page(PageUsage::Paging)?;
|
||||
let res = unsafe { &mut *(mem::virtualize(phys) as *mut Self) };
|
||||
self[index] = Entry::table(phys, MapAttributes::empty());
|
||||
res.entries.fill(Entry::invalid());
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns next-level translation table reference for `index`, if one is present.
|
||||
/// Same as [next_level_table_or_alloc], but returns `None` if no table is mapped.
|
||||
pub fn next_level_table(&mut self, index: usize) -> Option<&'static mut Table> {
|
||||
let entry = self[index];
|
||||
if entry.is_present() {
|
||||
if !entry.is_table() {
|
||||
panic!("Entry is not a table: idx={}", index);
|
||||
}
|
||||
|
||||
Some(unsafe { &mut *(mem::virtualize(entry.address_unchecked()) as *mut _) })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Constructs and fills a [Table] with non-present mappings
|
||||
pub const fn empty() -> Table {
|
||||
Table {
|
||||
entries: [Entry::invalid(); 512],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<usize> for Table {
|
||||
type Output = Entry;
|
||||
|
||||
fn index(&self, index: usize) -> &Self::Output {
|
||||
&self.entries[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexMut<usize> for Table {
|
||||
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
|
||||
&mut self.entries[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
const PRESENT: u64 = 1 << 0;
|
||||
const TABLE: u64 = 1 << 1;
|
||||
const PHYS_MASK: u64 = 0x0000FFFFFFFFF000;
|
||||
|
||||
/// Constructs a single non-present mapping
|
||||
pub const fn invalid() -> Self {
|
||||
Self(0)
|
||||
}
|
||||
|
||||
/// Constructs a `Block`-type memory mapping
|
||||
pub const fn block(phys: usize, attrs: MapAttributes) -> Self {
|
||||
Self((phys as u64 & Self::PHYS_MASK) | attrs.bits() | Self::PRESENT)
|
||||
}
|
||||
|
||||
/// Constructs a `Table` or `Page`-type mapping depending on translation level
|
||||
/// this entry is used at
|
||||
pub const fn table(phys: usize, attrs: MapAttributes) -> Self {
|
||||
Self((phys as u64 & Self::PHYS_MASK) | attrs.bits() | Self::PRESENT | Self::TABLE)
|
||||
}
|
||||
|
||||
/// Returns `true` if this entry is not invalid
|
||||
pub const fn is_present(self) -> bool {
|
||||
self.0 & Self::PRESENT != 0
|
||||
}
|
||||
|
||||
/// Returns `true` if this entry is a `Table` or `Page`-type mapping
|
||||
pub const fn is_table(self) -> bool {
|
||||
self.0 & Self::TABLE != 0
|
||||
}
|
||||
|
||||
/// Returns the target address of this translation entry.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Does not check if the entry is actually valid.
|
||||
pub const unsafe fn address_unchecked(self) -> usize {
|
||||
(self.0 & Self::PHYS_MASK) as usize
|
||||
}
|
||||
|
||||
unsafe fn set_address(&mut self, address: usize) {
|
||||
self.0 &= !Self::PHYS_MASK;
|
||||
self.0 |= (address as u64) & Self::PHYS_MASK;
|
||||
}
|
||||
|
||||
unsafe fn fork_flags(self) -> MapAttributes {
|
||||
MapAttributes::from_bits_unchecked(self.0 & !Self::PHYS_MASK)
|
||||
}
|
||||
|
||||
fn set_cow(&mut self) {
|
||||
self.0 |= (MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW).bits();
|
||||
}
|
||||
|
||||
fn clear_cow(&mut self) {
|
||||
self.0 &= !(MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW).bits();
|
||||
self.0 |= MapAttributes::AP_BOTH_READWRITE.bits();
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_cow(self) -> bool {
|
||||
let attrs = (MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW).bits();
|
||||
self.0 & attrs == attrs
|
||||
}
|
||||
}
|
||||
|
||||
impl Space {
|
||||
/// Creates a new virtual address space and fills it with [Entry::invalid()]
|
||||
/// mappings. Does physical memory page allocation.
|
||||
pub fn alloc_empty() -> Result<&'static mut Self, Errno> {
|
||||
let phys = phys::alloc_page(PageUsage::Paging)?;
|
||||
let res = unsafe { &mut *(mem::virtualize(phys) as *mut Self) };
|
||||
res.0.entries.fill(Entry::invalid());
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Inserts a single `virt` -> `phys` translation entry to this address space.
|
||||
///
|
||||
/// TODO: only works with 4K-sized pages at this moment.
|
||||
pub fn map(&mut self, virt: usize, phys: usize, flags: MapAttributes) -> Result<(), Errno> {
|
||||
let l0i = virt >> 30;
|
||||
let l1i = (virt >> 21) & 0x1FF;
|
||||
let l2i = (virt >> 12) & 0x1FF;
|
||||
|
||||
let l1_table = self.0.next_level_table_or_alloc(l0i)?;
|
||||
let l2_table = l1_table.next_level_table_or_alloc(l1i)?;
|
||||
|
||||
if l2_table[l2i].is_present() {
|
||||
Err(Errno::AlreadyExists)
|
||||
} else {
|
||||
l2_table[l2i] = Entry::table(phys, flags | MapAttributes::ACCESS);
|
||||
#[cfg(feature = "verbose")]
|
||||
debugln!("{:#p} Map {:#x} -> {:#x}, {:?}", self, virt, phys, flags);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Translates a virtual address into a corresponding physical one.
|
||||
///
|
||||
/// Only works for 4K pages atm.
|
||||
// TODO extract attributes
|
||||
pub fn translate(&mut self, virt: usize) -> Result<usize, Errno> {
|
||||
let l0i = virt >> 30;
|
||||
let l1i = (virt >> 21) & 0x1FF;
|
||||
let l2i = (virt >> 12) & 0x1FF;
|
||||
|
||||
let l1_table = self.0.next_level_table(l0i).ok_or(Errno::DoesNotExist)?;
|
||||
let l2_table = l1_table.next_level_table(l1i).ok_or(Errno::DoesNotExist)?;
|
||||
|
||||
let entry = l2_table[l2i];
|
||||
if entry.is_present() {
|
||||
Ok(unsafe { entry.address_unchecked() })
|
||||
} else {
|
||||
Err(Errno::DoesNotExist)
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to resolve a page fault at `virt` address by copying the
|
||||
/// underlying Copy-on-Write mapping (if any is present)
|
||||
pub fn try_cow_copy(&mut self, virt: usize) -> Result<(), Errno> {
|
||||
let virt = virt & !0xFFF;
|
||||
let l0i = virt >> 30;
|
||||
let l1i = (virt >> 21) & 0x1FF;
|
||||
let l2i = (virt >> 12) & 0x1FF;
|
||||
|
||||
let l1_table = self.0.next_level_table(l0i).ok_or(Errno::DoesNotExist)?;
|
||||
let l2_table = l1_table.next_level_table(l1i).ok_or(Errno::DoesNotExist)?;
|
||||
|
||||
let entry = l2_table[l2i];
|
||||
|
||||
if !entry.is_present() {
|
||||
warnln!("Entry is not present: {:#x}", virt);
|
||||
return Err(Errno::DoesNotExist);
|
||||
}
|
||||
|
||||
let src_phys = unsafe { entry.address_unchecked() };
|
||||
if !entry.is_cow() {
|
||||
warnln!(
|
||||
"Entry is not marked as CoW: {:#x}, points to {:#x}",
|
||||
virt,
|
||||
src_phys
|
||||
);
|
||||
return Err(Errno::DoesNotExist);
|
||||
}
|
||||
|
||||
let dst_phys = unsafe { phys::copy_cow_page(src_phys)? };
|
||||
unsafe {
|
||||
l2_table[l2i].set_address(dst_phys);
|
||||
}
|
||||
l2_table[l2i].clear_cow();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Allocates a contiguous region from the address space and maps
|
||||
/// physical pages to it
|
||||
pub fn allocate(
|
||||
&mut self,
|
||||
start: usize,
|
||||
end: usize,
|
||||
len: usize,
|
||||
flags: MapAttributes,
|
||||
usage: PageUsage,
|
||||
) -> Result<usize, Errno> {
|
||||
'l0: for page in (start..end).step_by(0x1000) {
|
||||
for i in 0..len {
|
||||
if self.translate(page + i * 0x1000).is_ok() {
|
||||
continue 'l0;
|
||||
}
|
||||
}
|
||||
|
||||
for i in 0..len {
|
||||
let phys = phys::alloc_page(usage).unwrap();
|
||||
self.map(page + i * 0x1000, phys, flags).unwrap();
|
||||
}
|
||||
return Ok(page);
|
||||
}
|
||||
Err(Errno::OutOfMemory)
|
||||
}
|
||||
|
||||
/// Removes a single 4K page mapping from the table and
|
||||
/// releases the underlying physical memory
|
||||
pub fn unmap_single(&mut self, page: usize) -> Result<(), Errno> {
|
||||
let l0i = page >> 30;
|
||||
let l1i = (page >> 21) & 0x1FF;
|
||||
let l2i = (page >> 12) & 0x1FF;
|
||||
|
||||
let l1_table = self.0.next_level_table(l0i).ok_or(Errno::DoesNotExist)?;
|
||||
let l2_table = l1_table.next_level_table(l1i).ok_or(Errno::DoesNotExist)?;
|
||||
|
||||
let entry = l2_table[l2i];
|
||||
|
||||
if !entry.is_present() {
|
||||
return Err(Errno::DoesNotExist);
|
||||
}
|
||||
|
||||
let phys = unsafe { entry.address_unchecked() };
|
||||
unsafe {
|
||||
phys::free_page(phys)?;
|
||||
}
|
||||
l2_table[l2i] = Entry::invalid();
|
||||
|
||||
unsafe {
|
||||
asm!("tlbi vaae1, {}", in(reg) page);
|
||||
}
|
||||
|
||||
// TODO release paging structure memory
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Releases a range of virtual pages and their corresponding physical pages
|
||||
pub fn free(&mut self, start: usize, len: usize) -> Result<(), Errno> {
|
||||
for i in 0..len {
|
||||
self.unmap_single(start + i * 0x1000)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Performs a copy of the address space, cloning data owned by it
|
||||
pub fn fork(&mut self) -> Result<&'static mut Self, Errno> {
|
||||
let res = Self::alloc_empty()?;
|
||||
for l0i in 0..512 {
|
||||
if let Some(l1_table) = self.0.next_level_table(l0i) {
|
||||
for l1i in 0..512 {
|
||||
if let Some(l2_table) = l1_table.next_level_table(l1i) {
|
||||
for l2i in 0..512 {
|
||||
let entry = l2_table[l2i];
|
||||
|
||||
if !entry.is_present() {
|
||||
continue;
|
||||
}
|
||||
|
||||
assert!(entry.is_table());
|
||||
let src_phys = unsafe { entry.address_unchecked() };
|
||||
let virt_addr = (l0i << 30) | (l1i << 21) | (l2i << 12);
|
||||
let dst_phys = unsafe { phys::fork_page(src_phys)? };
|
||||
|
||||
let mut flags = unsafe { entry.fork_flags() };
|
||||
if dst_phys != src_phys {
|
||||
todo!();
|
||||
// res.map(virt_addr, dst_phys, flags)?;
|
||||
} else {
|
||||
let writable = flags & MapAttributes::AP_BOTH_READONLY
|
||||
== MapAttributes::AP_BOTH_READWRITE;
|
||||
|
||||
if writable {
|
||||
flags |=
|
||||
MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW;
|
||||
l2_table[l2i].set_cow();
|
||||
|
||||
unsafe {
|
||||
asm!("tlbi vaae1, {}", in(reg) virt_addr);
|
||||
}
|
||||
}
|
||||
|
||||
res.map(virt_addr, dst_phys, flags)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Releases all the mappings from the address space. Frees all
|
||||
/// memory pages referenced by this space as well as those used for
|
||||
/// its paging tables.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Unsafe: may invalidate currently active address space
|
||||
pub unsafe fn release(space: &mut Self) {
|
||||
for l0i in 0..512 {
|
||||
let l0_entry = space.0[l0i];
|
||||
if !l0_entry.is_present() {
|
||||
continue;
|
||||
}
|
||||
|
||||
assert!(l0_entry.is_table());
|
||||
let l1_table = &mut *(mem::virtualize(l0_entry.address_unchecked()) as *mut Table);
|
||||
|
||||
for l1i in 0..512 {
|
||||
let l1_entry = l1_table[l1i];
|
||||
if !l1_entry.is_present() {
|
||||
continue;
|
||||
}
|
||||
assert!(l1_entry.is_table());
|
||||
let l2_table = &mut *(mem::virtualize(l1_entry.address_unchecked()) as *mut Table);
|
||||
|
||||
for l2i in 0..512 {
|
||||
let entry = l2_table[l2i];
|
||||
if !entry.is_present() {
|
||||
continue;
|
||||
}
|
||||
|
||||
assert!(entry.is_table());
|
||||
phys::free_page(entry.address_unchecked()).unwrap();
|
||||
}
|
||||
phys::free_page(l1_entry.address_unchecked()).unwrap();
|
||||
}
|
||||
phys::free_page(l0_entry.address_unchecked()).unwrap();
|
||||
}
|
||||
memset(space as *mut Space as *mut u8, 0, 4096);
|
||||
}
|
||||
|
||||
/// Returns the physical address of this structure
|
||||
pub fn address_phys(&mut self) -> usize {
|
||||
(self as *mut _ as usize) - mem::KERNEL_OFFSET
|
||||
}
|
||||
}
|
@ -15,6 +15,11 @@ cfg_if! {
|
||||
|
||||
pub use aarch64 as platform;
|
||||
pub use aarch64::machine;
|
||||
} else if #[cfg(target_arch = "x86_64")] {
|
||||
pub mod x86_64;
|
||||
|
||||
pub use x86_64 as platform;
|
||||
pub use x86_64 as machine;
|
||||
}
|
||||
}
|
||||
|
||||
|
42
kernel/src/arch/x86_64/boot/entry.S
Normal file
42
kernel/src/arch/x86_64/boot/entry.S
Normal file
@ -0,0 +1,42 @@
|
||||
.section .text._multiboot
|
||||
.set MAGIC, 0xE85250D6
|
||||
.set ARCH, 0x0
|
||||
.set HDRLEN, 16
|
||||
.set CHKSUM, (-(MAGIC + ARCH + HDRLEN)) & 0xFFFFFFFF
|
||||
|
||||
.long MAGIC
|
||||
.long ARCH
|
||||
.long HDRLEN
|
||||
.long CHKSUM
|
||||
.short 0
|
||||
.long 8
|
||||
|
||||
.section .text._entry
|
||||
.global _entry
|
||||
_entry:
|
||||
.code32
|
||||
cli
|
||||
lea (multiboot_registers - KERNEL_OFFSET), %edi
|
||||
mov %eax, 0(%edi)
|
||||
mov %ebx, 4(%edi)
|
||||
|
||||
// Setup paging tables
|
||||
lea (_entry_upper - KERNEL_OFFSET), %ebx
|
||||
jmp __x86_64_enter_upper
|
||||
|
||||
.code64
|
||||
_entry_upper:
|
||||
lea bsp_stack_top(%rip), %rax
|
||||
mov %rax, %rsp
|
||||
|
||||
mov multiboot_registers(%rip), %edi
|
||||
mov (4 + multiboot_registers)(%rip), %esi
|
||||
call __x86_64_bsp_main
|
||||
|
||||
.section .bss
|
||||
.align 16
|
||||
bsp_stack_bottom:
|
||||
.skip 65536
|
||||
bsp_stack_top:
|
||||
multiboot_registers:
|
||||
.skip 8
|
7
kernel/src/arch/x86_64/boot/macros.S
Normal file
7
kernel/src/arch/x86_64/boot/macros.S
Normal file
@ -0,0 +1,7 @@
|
||||
.set KERNEL_OFFSET, 0xFFFFFF8000000000
|
||||
|
||||
.set PTE_PRESENT, 1 << 0
|
||||
.set PTE_WRITABLE, 1 << 1
|
||||
.set PTE_USERSPACE, 1 << 2
|
||||
.set PTE_BLOCK, 1 << 7
|
||||
|
17
kernel/src/arch/x86_64/boot/mod.rs
Normal file
17
kernel/src/arch/x86_64/boot/mod.rs
Normal file
@ -0,0 +1,17 @@
|
||||
use crate::arch::x86_64::{gdt, idt};
|
||||
|
||||
#[no_mangle]
|
||||
extern "C" fn __x86_64_bsp_main(mb_checksum: u32, mb_info_ptr: u32) -> ! {
|
||||
// TODO enable FP support for kernel/user
|
||||
// Setup a proper GDT
|
||||
unsafe {
|
||||
gdt::init();
|
||||
idt::init(|_| {});
|
||||
}
|
||||
|
||||
loop {}
|
||||
}
|
||||
|
||||
global_asm!(include_str!("macros.S"), options(att_syntax));
|
||||
global_asm!(include_str!("entry.S"), options(att_syntax));
|
||||
global_asm!(include_str!("upper.S"), options(att_syntax));
|
102
kernel/src/arch/x86_64/boot/upper.S
Normal file
102
kernel/src/arch/x86_64/boot/upper.S
Normal file
@ -0,0 +1,102 @@
|
||||
.code32
|
||||
|
||||
.section .text._entry
|
||||
__x86_64_enter_upper:
|
||||
// Setup paging table
|
||||
mov $(PTE_PRESENT | PTE_BLOCK | PTE_WRITABLE | PTE_USERSPACE), %eax
|
||||
// Fill PD0: 0..1GiB
|
||||
lea (KERNEL_PD0 - KERNEL_OFFSET), %edi
|
||||
mov $512, %ecx
|
||||
1:
|
||||
dec %ecx
|
||||
|
||||
mov %ecx, %edx
|
||||
shl $21, %edx
|
||||
or %eax, %edx
|
||||
|
||||
mov %edx, (%edi, %ecx, 8)
|
||||
|
||||
test %ecx, %ecx
|
||||
jnz 1b
|
||||
|
||||
// Fill PD1: 1GiB..2GiB
|
||||
lea (KERNEL_PD1 - KERNEL_OFFSET), %edi
|
||||
mov $512, %ecx
|
||||
1:
|
||||
dec %ecx
|
||||
|
||||
mov %ecx, %edx
|
||||
add $512, %edx
|
||||
shl $21, %edx
|
||||
or %eax, %edx
|
||||
|
||||
mov %edx, (%edi, %ecx, 8)
|
||||
|
||||
test %ecx, %ecx
|
||||
jnz 1b
|
||||
|
||||
// Setup PDPT
|
||||
mov $(PTE_PRESENT | PTE_WRITABLE | PTE_USERSPACE), %eax
|
||||
lea (KERNEL_PDPT - KERNEL_OFFSET), %edi
|
||||
|
||||
lea (KERNEL_PD0 - KERNEL_OFFSET), %esi
|
||||
or %eax, %esi
|
||||
mov %esi, (%edi)
|
||||
|
||||
lea (KERNEL_PD1 - KERNEL_OFFSET), %esi
|
||||
or %eax, %esi
|
||||
mov %esi, 8(%edi)
|
||||
|
||||
// Setup PML4
|
||||
lea (KERNEL_PML4 - KERNEL_OFFSET), %edi
|
||||
lea (KERNEL_PDPT - KERNEL_OFFSET), %esi
|
||||
or %eax, %esi
|
||||
mov %esi, (%edi)
|
||||
mov %esi, 4088(%edi)
|
||||
|
||||
// Enable PAE/PSE
|
||||
mov %cr4, %eax
|
||||
or $((1 << 5) | (1 << 4)), %eax
|
||||
mov %eax, %cr4
|
||||
|
||||
// Enable EFER.LME
|
||||
mov $0xC0000080, %ecx
|
||||
rdmsr
|
||||
or $(1 << 8), %eax
|
||||
wrmsr
|
||||
|
||||
// Set CR3
|
||||
mov %edi, %cr3
|
||||
|
||||
// Enable paging
|
||||
mov %cr0, %eax
|
||||
or $(1 << 31), %eax
|
||||
mov %eax, %cr0
|
||||
|
||||
lgdt (gdtr64 - KERNEL_OFFSET)
|
||||
ljmp $0x08, $(1f - KERNEL_OFFSET)
|
||||
1:
|
||||
mov $0x10, %ax
|
||||
mov %ax, %ds
|
||||
mov %ax, %es
|
||||
mov %ax, %fs
|
||||
mov %ax, %gs
|
||||
mov %ax, %ss
|
||||
|
||||
.code64
|
||||
mov $KERNEL_OFFSET, %rax
|
||||
add %rax, %rbx
|
||||
jmp *%rbx
|
||||
|
||||
.section .rodata
|
||||
.code32
|
||||
.align 16
|
||||
gdt64:
|
||||
.quad 0
|
||||
.quad 0x00209A0000000000
|
||||
.quad 0x0000920000000000
|
||||
gdt_end64:
|
||||
.align 16
|
||||
gdtr64:
|
||||
.short gdt_end64 - gdt64 - 1
|
||||
.long gdt64 - KERNEL_OFFSET
|
70
kernel/src/arch/x86_64/exception.rs
Normal file
70
kernel/src/arch/x86_64/exception.rs
Normal file
@ -0,0 +1,70 @@
|
||||
use crate::debug::Level;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ExceptionFrame {
|
||||
r15: u64,
|
||||
r14: u64,
|
||||
r13: u64,
|
||||
r12: u64,
|
||||
r11: u64,
|
||||
r10: u64,
|
||||
r9: u64,
|
||||
r8: u64,
|
||||
rdi: u64,
|
||||
rsi: u64,
|
||||
rbp: u64,
|
||||
rbx: u64,
|
||||
rdx: u64,
|
||||
rcx: u64,
|
||||
rax: u64,
|
||||
|
||||
err_no: u64,
|
||||
err_code: u64,
|
||||
rip: u64,
|
||||
cs: u64,
|
||||
rflags: u64,
|
||||
rsp: u64,
|
||||
ss: u64,
|
||||
}
|
||||
|
||||
fn pfault_read_cr2() -> u64 {
|
||||
let mut res;
|
||||
unsafe {
|
||||
asm!("mov %cr2, {}", out(reg) res, options(att_syntax));
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
fn pfault_access_type(code: u64) -> &'static str {
|
||||
if code & (1 << 4) != 0 {
|
||||
"INSTRUCTION FETCH"
|
||||
} else if code & (1 << 1) != 0 {
|
||||
"WRITE"
|
||||
} else {
|
||||
"READ"
|
||||
}
|
||||
}
|
||||
|
||||
fn pfault_dump(level: Level, frame: &ExceptionFrame, cr2: u64) {
|
||||
println!(level, "\x1B[41;1mPage fault:");
|
||||
println!(level, " Illegal {} at {:#018x}\x1B[0m", pfault_access_type(frame.err_code), cr2);
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
extern "C" fn __x86_64_exception_handler(frame: &mut ExceptionFrame) {
|
||||
if frame.err_no == 14 {
|
||||
// TODO userspace page faults
|
||||
let cr2 = pfault_read_cr2();
|
||||
pfault_dump(Level::Error, frame, cr2);
|
||||
}
|
||||
|
||||
errorln!(
|
||||
"Exception occurred: err_no={}, err_code={:#x}",
|
||||
frame.err_no,
|
||||
frame.err_code,
|
||||
);
|
||||
errorln!("cs:rip = {:02x}:{:#x}", frame.cs, frame.rip);
|
||||
errorln!("ss:rsp = {:02x}:{:#x}", frame.ss, frame.rsp);
|
||||
|
||||
panic!("Unhandled exception");
|
||||
}
|
145
kernel/src/arch/x86_64/gdt.rs
Normal file
145
kernel/src/arch/x86_64/gdt.rs
Normal file
@ -0,0 +1,145 @@
|
||||
use core::mem::size_of_val;
|
||||
|
||||
#[repr(packed)]
|
||||
struct Entry {
|
||||
limit_lo: u16,
|
||||
base_lo: u16,
|
||||
base_mi: u8,
|
||||
access: u8,
|
||||
flags: u8,
|
||||
base_hi: u8,
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
struct Tss {
|
||||
__res0: u32,
|
||||
rsp0: u64,
|
||||
rsp1: u64,
|
||||
rsp2: u64,
|
||||
__res1: u32,
|
||||
ist1: u64,
|
||||
ist2: u64,
|
||||
ist3: u64,
|
||||
ist4: u64,
|
||||
ist5: u64,
|
||||
ist6: u64,
|
||||
ist7: u64,
|
||||
__res2: u64,
|
||||
__res3: u16,
|
||||
iopb_base: u16,
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
struct Pointer {
|
||||
size: u16,
|
||||
offset: usize,
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
const FLAG_LONG: u8 = 1 << 5;
|
||||
const ACC_PRESENT: u8 = 1 << 7;
|
||||
const ACC_SYSTEM: u8 = 1 << 4;
|
||||
const ACC_EXECUTE: u8 = 1 << 3;
|
||||
const ACC_WRITE: u8 = 1 << 1;
|
||||
const ACC_RING3: u8 = 3 << 5;
|
||||
const ACC_ACCESS: u8 = 1 << 0;
|
||||
|
||||
const fn new(base: u32, limit: u32, flags: u8, access: u8) -> Self {
|
||||
Self {
|
||||
base_lo: (base & 0xFFFF) as u16,
|
||||
base_mi: ((base >> 16) & 0xFF) as u8,
|
||||
base_hi: ((base >> 24) & 0xFF) as u8,
|
||||
access,
|
||||
flags: (flags & 0xF0) | (((limit >> 16) & 0xF) as u8),
|
||||
limit_lo: (limit & 0xFFFF) as u16,
|
||||
}
|
||||
}
|
||||
|
||||
const fn null() -> Self {
|
||||
Self {
|
||||
base_lo: 0,
|
||||
base_mi: 0,
|
||||
base_hi: 0,
|
||||
access: 0,
|
||||
flags: 0,
|
||||
limit_lo: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Tss {
|
||||
const fn new() -> Self {
|
||||
Self {
|
||||
__res0: 0,
|
||||
rsp0: 0,
|
||||
rsp1: 0,
|
||||
rsp2: 0,
|
||||
__res1: 0,
|
||||
ist1: 0,
|
||||
ist2: 0,
|
||||
ist3: 0,
|
||||
ist4: 0,
|
||||
ist5: 0,
|
||||
ist6: 0,
|
||||
ist7: 0,
|
||||
__res2: 0,
|
||||
__res3: 0,
|
||||
iopb_base: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const SIZE: usize = 7;
|
||||
static mut TSS: Tss = Tss::new();
|
||||
static mut GDT: [Entry; SIZE] = [
|
||||
Entry::null(),
|
||||
Entry::new(
|
||||
0,
|
||||
0,
|
||||
Entry::FLAG_LONG,
|
||||
Entry::ACC_PRESENT | Entry::ACC_SYSTEM | Entry::ACC_EXECUTE,
|
||||
),
|
||||
Entry::new(
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
Entry::ACC_PRESENT | Entry::ACC_SYSTEM | Entry::ACC_WRITE,
|
||||
),
|
||||
Entry::new(
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
Entry::ACC_PRESENT | Entry::ACC_SYSTEM | Entry::ACC_RING3 | Entry::ACC_WRITE,
|
||||
),
|
||||
Entry::new(
|
||||
0,
|
||||
0,
|
||||
Entry::FLAG_LONG,
|
||||
Entry::ACC_PRESENT | Entry::ACC_SYSTEM | Entry::ACC_RING3 | Entry::ACC_EXECUTE,
|
||||
),
|
||||
Entry::null(),
|
||||
Entry::null(),
|
||||
];
|
||||
|
||||
pub unsafe fn init() {
|
||||
let tss_addr = &TSS as *const _ as usize;
|
||||
|
||||
GDT[5] = Entry::new(
|
||||
(tss_addr & 0xFFFFFFFF) as u32,
|
||||
size_of_val(&TSS) as u32 - 1,
|
||||
Entry::FLAG_LONG,
|
||||
Entry::ACC_ACCESS | Entry::ACC_PRESENT | Entry::ACC_EXECUTE,
|
||||
);
|
||||
core::ptr::write(&mut GDT[6] as *mut _ as *mut u64, (tss_addr >> 32) as u64);
|
||||
|
||||
let gdtr = Pointer {
|
||||
size: size_of_val(&GDT) as u16 - 1,
|
||||
offset: &GDT as *const _ as usize,
|
||||
};
|
||||
asm!(r#"
|
||||
lgdt ({})
|
||||
|
||||
mov $0x28, %ax
|
||||
ltr %ax
|
||||
"#, in(reg) &gdtr, options(att_syntax));
|
||||
}
|
110
kernel/src/arch/x86_64/idt.S
Normal file
110
kernel/src/arch/x86_64/idt.S
Normal file
@ -0,0 +1,110 @@
|
||||
.macro isr_nerr, n
|
||||
exc_isr_\n:
|
||||
cli
|
||||
pushq $0
|
||||
pushq $\n
|
||||
jmp __x86_64_isr_common
|
||||
.endm
|
||||
|
||||
// ISR for exception with an error code
|
||||
.macro isr_yerr, n
|
||||
exc_isr_\n:
|
||||
cli
|
||||
pushq $\n
|
||||
jmp __x86_64_isr_common
|
||||
.endm
|
||||
|
||||
.section .text
|
||||
__x86_64_isr_common:
|
||||
push %rax
|
||||
push %rcx
|
||||
push %rdx
|
||||
push %rbx
|
||||
push %rbp
|
||||
push %rsi
|
||||
push %rdi
|
||||
push %r8
|
||||
push %r9
|
||||
push %r10
|
||||
push %r11
|
||||
push %r12
|
||||
push %r13
|
||||
push %r14
|
||||
push %r15
|
||||
|
||||
mov %rsp, %rdi
|
||||
call __x86_64_exception_handler
|
||||
|
||||
1:
|
||||
cli
|
||||
hlt
|
||||
jmp 1b
|
||||
|
||||
isr_nerr 0
|
||||
isr_nerr 1
|
||||
isr_nerr 2
|
||||
isr_nerr 3
|
||||
isr_nerr 4
|
||||
isr_nerr 5
|
||||
isr_nerr 6
|
||||
isr_nerr 7
|
||||
isr_yerr 8
|
||||
isr_nerr 9
|
||||
isr_yerr 10
|
||||
isr_yerr 11
|
||||
isr_yerr 12
|
||||
isr_yerr 13
|
||||
isr_yerr 14
|
||||
isr_nerr 15
|
||||
isr_nerr 16
|
||||
isr_yerr 17
|
||||
isr_nerr 18
|
||||
isr_nerr 19
|
||||
isr_nerr 20
|
||||
isr_nerr 21
|
||||
isr_nerr 22
|
||||
isr_nerr 23
|
||||
isr_nerr 24
|
||||
isr_nerr 25
|
||||
isr_nerr 26
|
||||
isr_nerr 27
|
||||
isr_nerr 28
|
||||
isr_nerr 29
|
||||
isr_yerr 30
|
||||
isr_nerr 31
|
||||
|
||||
.section .rodata
|
||||
.global __x86_64_exception_vectors
|
||||
__x86_64_exception_vectors:
|
||||
.quad exc_isr_0
|
||||
.quad exc_isr_1
|
||||
.quad exc_isr_2
|
||||
.quad exc_isr_3
|
||||
.quad exc_isr_4
|
||||
.quad exc_isr_5
|
||||
.quad exc_isr_6
|
||||
.quad exc_isr_7
|
||||
.quad exc_isr_8
|
||||
.quad exc_isr_9
|
||||
.quad exc_isr_10
|
||||
.quad exc_isr_11
|
||||
.quad exc_isr_12
|
||||
.quad exc_isr_13
|
||||
.quad exc_isr_14
|
||||
.quad exc_isr_15
|
||||
.quad exc_isr_16
|
||||
.quad exc_isr_17
|
||||
.quad exc_isr_18
|
||||
.quad exc_isr_19
|
||||
.quad exc_isr_20
|
||||
.quad exc_isr_21
|
||||
.quad exc_isr_22
|
||||
.quad exc_isr_23
|
||||
.quad exc_isr_24
|
||||
.quad exc_isr_25
|
||||
.quad exc_isr_26
|
||||
.quad exc_isr_27
|
||||
.quad exc_isr_28
|
||||
.quad exc_isr_29
|
||||
.quad exc_isr_30
|
||||
.quad exc_isr_31
|
70
kernel/src/arch/x86_64/idt.rs
Normal file
70
kernel/src/arch/x86_64/idt.rs
Normal file
@ -0,0 +1,70 @@
|
||||
use core::mem::size_of_val;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
#[repr(packed)]
|
||||
pub struct Entry {
|
||||
base_lo: u16,
|
||||
selector: u16,
|
||||
__res0: u8,
|
||||
flags: u8,
|
||||
base_hi: u16,
|
||||
base_ex: u32,
|
||||
__res1: u32
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
struct Pointer {
|
||||
limit: u16,
|
||||
offset: usize
|
||||
}
|
||||
|
||||
const SIZE: usize = 256;
|
||||
|
||||
impl Entry {
|
||||
const PRESENT: u8 = 1 << 7;
|
||||
const INT32: u8 = 0xE;
|
||||
|
||||
pub const fn new(base: usize, selector: u16, flags: u8) -> Self {
|
||||
Self {
|
||||
base_lo: (base & 0xFFFF) as u16,
|
||||
base_hi: ((base >> 16) & 0xFFFF) as u16,
|
||||
base_ex: (base >> 32) as u32,
|
||||
selector,
|
||||
flags,
|
||||
__res0: 0,
|
||||
__res1: 0
|
||||
}
|
||||
}
|
||||
|
||||
const fn empty() -> Self {
|
||||
Self {
|
||||
base_lo: 0,
|
||||
base_hi: 0,
|
||||
base_ex: 0,
|
||||
selector: 0,
|
||||
flags: 0,
|
||||
__res0: 0,
|
||||
__res1: 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static mut IDT: [Entry; SIZE] = [Entry::empty(); SIZE];
|
||||
|
||||
pub unsafe fn init<F: FnOnce(&mut [Entry; SIZE]) -> ()>(f: F) {
|
||||
extern "C" {
|
||||
static __x86_64_exception_vectors: [usize; 32];
|
||||
}
|
||||
|
||||
for (i, &entry) in __x86_64_exception_vectors.iter().enumerate() {
|
||||
IDT[i] = Entry::new(entry, 0x08, Entry::PRESENT | Entry::INT32);
|
||||
}
|
||||
|
||||
let idtr = Pointer {
|
||||
limit: size_of_val(&IDT) as u16 - 1,
|
||||
offset: &IDT as *const _ as usize
|
||||
};
|
||||
asm!("lidt ({})", in(reg) &idtr, options(att_syntax));
|
||||
}
|
||||
|
||||
global_asm!(include_str!("idt.S"), options(att_syntax));
|
28
kernel/src/arch/x86_64/io.rs
Normal file
28
kernel/src/arch/x86_64/io.rs
Normal file
@ -0,0 +1,28 @@
|
||||
use core::marker::PhantomData;
|
||||
|
||||
pub struct PortIo<T> {
|
||||
port: u16,
|
||||
_pd: PhantomData<T>
|
||||
}
|
||||
|
||||
impl<T> PortIo<T> {
|
||||
pub const unsafe fn new(port: u16) -> Self {
|
||||
Self { port, _pd: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
impl PortIo<u8> {
|
||||
pub fn read(&self) -> u8 {
|
||||
let mut res: u8;
|
||||
unsafe {
|
||||
asm!("inb %dx, %al", in("dx") self.port, out("al") res, options(att_syntax));
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub fn write(&self, value: u8) {
|
||||
unsafe {
|
||||
asm!("outb %al, %dx", in("dx") self.port, in("al") value, options(att_syntax));
|
||||
}
|
||||
}
|
||||
}
|
39
kernel/src/arch/x86_64/mod.rs
Normal file
39
kernel/src/arch/x86_64/mod.rs
Normal file
@ -0,0 +1,39 @@
|
||||
use crate::dev::serial::SerialDevice;
|
||||
|
||||
mod uart;
|
||||
use uart::Uart;
|
||||
mod io;
|
||||
pub(self) use io::PortIo;
|
||||
|
||||
pub mod boot;
|
||||
pub mod table;
|
||||
pub(self) mod gdt;
|
||||
pub(self) mod idt;
|
||||
pub(self) mod exception;
|
||||
|
||||
/// Masks IRQs and returns previous IRQ mask state
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Unsafe: disables IRQ handling temporarily
|
||||
#[inline(always)]
|
||||
pub unsafe fn irq_mask_save() -> u64 {
|
||||
loop {}
|
||||
}
|
||||
|
||||
/// Restores IRQ mask state
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Unsafe: modifies interrupt behavior. Must only be used in
|
||||
/// conjunction with [irq_mask_save]
|
||||
#[inline(always)]
|
||||
pub unsafe fn irq_restore(state: u64) {
|
||||
loop {}
|
||||
}
|
||||
|
||||
pub fn console() -> &'static impl SerialDevice {
|
||||
&COM1
|
||||
}
|
||||
|
||||
static COM1: Uart = unsafe { Uart::new(0x3F8) };
|
66
kernel/src/arch/x86_64/table.rs
Normal file
66
kernel/src/arch/x86_64/table.rs
Normal file
@ -0,0 +1,66 @@
|
||||
use crate::mem::virt::AddressSpace;
|
||||
use libsys::error::Errno;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
#[repr(transparent)]
|
||||
pub struct Entry(u64);
|
||||
|
||||
#[repr(C, align(0x1000))]
|
||||
pub struct Table {
|
||||
entries: [Entry; 512],
|
||||
}
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct Space(Table);
|
||||
|
||||
// Upper mappings
|
||||
#[no_mangle]
|
||||
static KERNEL_PDPT: Table = Table::empty();
|
||||
#[no_mangle]
|
||||
static KERNEL_PD0: Table = Table::empty();
|
||||
#[no_mangle]
|
||||
static KERNEL_PD1: Table = Table::empty();
|
||||
|
||||
#[no_mangle]
|
||||
static KERNEL_PML4: Space = Space::empty();
|
||||
|
||||
impl Entry {
|
||||
const fn invalid() -> Self {
|
||||
Self(0)
|
||||
}
|
||||
}
|
||||
|
||||
impl Table {
|
||||
const fn empty() -> Self {
|
||||
Self {
|
||||
entries: [Entry::invalid(); 512]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Space {
|
||||
const fn empty() -> Self {
|
||||
Self(Table::empty())
|
||||
}
|
||||
}
|
||||
|
||||
impl AddressSpace for Space {
|
||||
type Entry = Entry;
|
||||
|
||||
fn read_last_level_entry(&self, virt: usize) -> Result<Entry, Errno> {
|
||||
todo!();
|
||||
}
|
||||
|
||||
fn write_last_level_entry(
|
||||
&mut self,
|
||||
virt: usize,
|
||||
entry: Entry,
|
||||
map_intermediate: bool,
|
||||
) -> Result<(), Errno> {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enable() -> Result<(), Errno> {
|
||||
loop {}
|
||||
}
|
36
kernel/src/arch/x86_64/uart.rs
Normal file
36
kernel/src/arch/x86_64/uart.rs
Normal file
@ -0,0 +1,36 @@
|
||||
use crate::dev::{Device, serial::SerialDevice};
|
||||
use crate::arch::x86_64::PortIo;
|
||||
use libsys::error::Errno;
|
||||
|
||||
pub(super) struct Uart {
|
||||
dr: PortIo<u8>
|
||||
}
|
||||
|
||||
impl Device for Uart {
|
||||
fn name(&self) -> &'static str {
|
||||
"x86 COM-port"
|
||||
}
|
||||
|
||||
unsafe fn enable(&self) -> Result<(), Errno> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl SerialDevice for Uart {
|
||||
fn send(&self, byte: u8) -> Result<(), Errno> {
|
||||
self.dr.write(byte);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn recv(&self, _blocking: bool) -> Result<u8, Errno> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl Uart {
|
||||
pub const unsafe fn new(base: u16) -> Self {
|
||||
Self {
|
||||
dr: PortIo::new(base)
|
||||
}
|
||||
}
|
||||
}
|
@ -3,16 +3,16 @@
|
||||
use libsys::error::Errno;
|
||||
|
||||
// Device classes
|
||||
pub mod fdt;
|
||||
pub mod gpio;
|
||||
pub mod irq;
|
||||
pub mod pci;
|
||||
pub mod rtc;
|
||||
pub mod sd;
|
||||
// pub mod fdt;
|
||||
// pub mod gpio;
|
||||
// pub mod irq;
|
||||
// pub mod pci;
|
||||
// pub mod rtc;
|
||||
// pub mod sd;
|
||||
pub mod serial;
|
||||
pub mod timer;
|
||||
pub mod pseudo;
|
||||
pub mod tty;
|
||||
// pub mod timer;
|
||||
// pub mod pseudo;
|
||||
// pub mod tty;
|
||||
|
||||
/// Generic device trait
|
||||
pub trait Device {
|
||||
|
@ -11,8 +11,9 @@
|
||||
const_panic,
|
||||
panic_info_message,
|
||||
alloc_error_handler,
|
||||
linked_list_cursors,
|
||||
const_btree_new,
|
||||
// linked_list_cursors,
|
||||
// const_btree_new,
|
||||
core_intrinsics,
|
||||
const_generics_defaults,
|
||||
)]
|
||||
#![no_std]
|
||||
@ -25,29 +26,29 @@ extern crate kernel_macros;
|
||||
extern crate cfg_if;
|
||||
#[macro_use]
|
||||
extern crate bitflags;
|
||||
extern crate alloc;
|
||||
// extern crate alloc;
|
||||
|
||||
#[macro_use]
|
||||
pub mod debug;
|
||||
|
||||
//
|
||||
pub mod arch;
|
||||
pub mod config;
|
||||
pub mod dev;
|
||||
pub mod fs;
|
||||
pub mod init;
|
||||
// pub mod fs;
|
||||
// pub mod init;
|
||||
pub mod mem;
|
||||
pub mod proc;
|
||||
// pub mod proc;
|
||||
pub mod sync;
|
||||
pub mod syscall;
|
||||
// pub mod syscall;
|
||||
pub mod util;
|
||||
|
||||
#[panic_handler]
|
||||
fn panic_handler(pi: &core::panic::PanicInfo) -> ! {
|
||||
unsafe {
|
||||
asm!("msr daifset, #2");
|
||||
}
|
||||
// unsafe {
|
||||
// asm!("msr daifset, #2");
|
||||
// }
|
||||
|
||||
errorln!("Panic: {:?}", pi);
|
||||
// errorln!("Panic: {:?}", pi);
|
||||
// TODO
|
||||
loop {}
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ static HEAP: InitOnce<IrqSafeSpinLock<Heap>> = InitOnce::new();
|
||||
pub unsafe fn init(base: usize, size: usize) {
|
||||
let heap = Heap { base, size, ptr: 0 };
|
||||
|
||||
infoln!("Kernel heap: {:#x}..{:#x}", base, base + size);
|
||||
// infoln!("Kernel heap: {:#x}..{:#x}", base, base + size);
|
||||
|
||||
HEAP.init(IrqSafeSpinLock::new(heap));
|
||||
}
|
||||
|
@ -209,7 +209,7 @@ pub unsafe fn init_from_iter<T: Iterator<Item = MemoryRegion> + Clone>(iter: T)
|
||||
mem_base = reg.start;
|
||||
}
|
||||
}
|
||||
infoln!("Memory base is {:#x}", mem_base);
|
||||
// infoln!("Memory base is {:#x}", mem_base);
|
||||
// Step 1. Count available memory
|
||||
let mut total_pages = 0usize;
|
||||
for reg in iter.clone() {
|
||||
@ -238,7 +238,7 @@ pub unsafe fn init_from_iter<T: Iterator<Item = MemoryRegion> + Clone>(iter: T)
|
||||
}
|
||||
}
|
||||
}
|
||||
infoln!("{}K of usable physical memory", usable_pages * 4);
|
||||
// infoln!("{}K of usable physical memory", usable_pages * 4);
|
||||
*MANAGER.lock() = Some(manager);
|
||||
}
|
||||
|
||||
|
@ -47,12 +47,12 @@ static mut RESERVED_REGION_PAGES: MaybeUninit<ReservedRegion> = MaybeUninit::uni
|
||||
///
|
||||
/// Unsafe: `region` is passed as a raw pointer.
|
||||
pub unsafe fn reserve(usage: &str, region: *mut ReservedRegion) {
|
||||
infoln!(
|
||||
"Reserving {:?} region: {:#x}..{:#x}",
|
||||
usage,
|
||||
(*region).start,
|
||||
(*region).end
|
||||
);
|
||||
// infoln!(
|
||||
// "Reserving {:?} region: {:#x}..{:#x}",
|
||||
// usage,
|
||||
// (*region).start,
|
||||
// (*region).end
|
||||
// );
|
||||
(*region).next = RESERVED_REGIONS_HEAD;
|
||||
RESERVED_REGIONS_HEAD = region;
|
||||
}
|
||||
|
@ -2,18 +2,18 @@
|
||||
|
||||
use core::marker::PhantomData;
|
||||
use core::ops::Deref;
|
||||
use cortex_a::asm::barrier::{self, dsb, isb};
|
||||
use cortex_a::registers::TTBR0_EL1;
|
||||
// use cortex_a::asm::barrier::{self, dsb, isb};
|
||||
// use cortex_a::registers::TTBR0_EL1;
|
||||
use libsys::error::Errno;
|
||||
use tock_registers::interfaces::Writeable;
|
||||
|
||||
// use tock_registers::interfaces::Writeable;
|
||||
//
|
||||
pub mod table;
|
||||
pub use table::{Entry, MapAttributes, Space, Table};
|
||||
pub mod fixed;
|
||||
pub use fixed::FixedTableGroup;
|
||||
pub use table::{AddressSpace, Space};
|
||||
|
||||
#[no_mangle]
|
||||
static mut KERNEL_TTBR1: FixedTableGroup = FixedTableGroup::empty();
|
||||
// pub use table::{Entry, MapAttributes, Space, Table};
|
||||
// pub mod fixed;
|
||||
// pub use fixed::FixedTableGroup;
|
||||
use crate::arch::platform::table as plat_table;
|
||||
|
||||
/// Structure representing a region of memory used for MMIO/device access
|
||||
// TODO: this shouldn't be trivially-cloneable and should instead incorporate
|
||||
@ -45,15 +45,16 @@ impl DeviceMemory {
|
||||
///
|
||||
/// See [FixedTableGroup::map_region]
|
||||
pub fn map(name: &'static str, phys: usize, count: usize) -> Result<Self, Errno> {
|
||||
let base = unsafe { KERNEL_TTBR1.map_region(phys, count) }?;
|
||||
debugln!(
|
||||
"Mapping {:#x}..{:#x} -> {:#x} for {:?}",
|
||||
base,
|
||||
base + count * 0x1000,
|
||||
phys,
|
||||
name
|
||||
);
|
||||
Ok(Self { name, base, count })
|
||||
todo!();
|
||||
// let base = unsafe { KERNEL_TTBR1.map_region(phys, count) }?;
|
||||
// debugln!(
|
||||
// "Mapping {:#x}..{:#x} -> {:#x} for {:?}",
|
||||
// base,
|
||||
// base + count * 0x1000,
|
||||
// phys,
|
||||
// name
|
||||
// );
|
||||
// Ok(Self { name, base, count })
|
||||
}
|
||||
}
|
||||
|
||||
@ -90,16 +91,18 @@ impl<T> Deref for DeviceMemoryIo<T> {
|
||||
/// Sets up device mapping tables and disable lower-half
|
||||
/// identity-mapped translation
|
||||
pub fn enable() -> Result<(), Errno> {
|
||||
unsafe {
|
||||
KERNEL_TTBR1.init_device_map();
|
||||
|
||||
dsb(barrier::ISH);
|
||||
isb(barrier::SY);
|
||||
}
|
||||
|
||||
// Disable lower-half translation
|
||||
TTBR0_EL1.set(0);
|
||||
//TCR_EL1.modify(TCR_EL1::EPD0::SET);
|
||||
|
||||
Ok(())
|
||||
plat_table::enable()
|
||||
}
|
||||
// unsafe {
|
||||
// KERNEL_TTBR1.init_device_map();
|
||||
//
|
||||
// dsb(barrier::ISH);
|
||||
// isb(barrier::SY);
|
||||
// }
|
||||
//
|
||||
// // Disable lower-half translation
|
||||
// TTBR0_EL1.set(0);
|
||||
// //TCR_EL1.modify(TCR_EL1::EPD0::SET);
|
||||
//
|
||||
// Ok(())
|
||||
// }
|
||||
|
@ -7,421 +7,18 @@ use crate::mem::{
|
||||
use core::ops::{Index, IndexMut};
|
||||
use libsys::{error::Errno, mem::memset};
|
||||
|
||||
/// Transparent wrapper structure representing a single
|
||||
/// translation table entry
|
||||
#[derive(Clone, Copy)]
|
||||
#[repr(transparent)]
|
||||
pub struct Entry(u64);
|
||||
use crate::arch::platform::table;
|
||||
|
||||
/// Structure describing a single level of translation mappings
|
||||
#[repr(C, align(0x1000))]
|
||||
pub struct Table {
|
||||
entries: [Entry; 512],
|
||||
}
|
||||
pub use table::{Space, Table};
|
||||
|
||||
/// Wrapper for top-most level of address translation tables
|
||||
#[repr(transparent)]
|
||||
pub struct Space(Table);
|
||||
pub trait AddressSpace {
|
||||
type Entry;
|
||||
|
||||
bitflags! {
|
||||
/// Attributes attached to each translation [Entry]
|
||||
pub struct MapAttributes: u64 {
|
||||
// TODO use 2 lower bits to determine mapping size?
|
||||
/// nG bit -- determines whether a TLB entry associated with this mapping
|
||||
/// applies only to current ASID or all ASIDs.
|
||||
const NOT_GLOBAL = 1 << 11;
|
||||
/// AF bit -- must be set by software, otherwise Access Error exception is
|
||||
/// generated when the page is accessed
|
||||
const ACCESS = 1 << 10;
|
||||
/// The memory region is outer-shareable
|
||||
const SH_OUTER = 2 << 8;
|
||||
/// This page is used for device-MMIO mapping and uses MAIR attribute #1
|
||||
const DEVICE = 1 << 2;
|
||||
|
||||
/// Pages marked with this bit are Copy-on-Write
|
||||
const EX_COW = 1 << 55;
|
||||
|
||||
/// UXN bit -- if set, page may not be used for instruction fetching from EL0
|
||||
const UXN = 1 << 54;
|
||||
/// PXN bit -- if set, page may not be used for instruction fetching from EL1
|
||||
const PXN = 1 << 53;
|
||||
|
||||
// AP field
|
||||
// Default behavior is: read-write for EL1, no access for EL0
|
||||
/// If set, the page referred to by this entry is read-only for both EL0/EL1
|
||||
const AP_BOTH_READONLY = 3 << 6;
|
||||
/// If set, the page referred to by this entry is read-write for both EL0/EL1
|
||||
const AP_BOTH_READWRITE = 1 << 6;
|
||||
}
|
||||
}
|
||||
|
||||
impl Table {
|
||||
/// Returns next-level translation table reference for `index`, if one is present.
|
||||
/// If `index` represents a `Block`-type mapping, will return an error.
|
||||
/// If `index` does not map to any translation table, will try to allocate, init and
|
||||
/// map a new one, returning it after doing so.
|
||||
pub fn next_level_table_or_alloc(&mut self, index: usize) -> Result<&'static mut Table, Errno> {
|
||||
let entry = self[index];
|
||||
if entry.is_present() {
|
||||
if !entry.is_table() {
|
||||
return Err(Errno::InvalidArgument);
|
||||
}
|
||||
|
||||
Ok(unsafe { &mut *(mem::virtualize(entry.address_unchecked()) as *mut _) })
|
||||
} else {
|
||||
let phys = phys::alloc_page(PageUsage::Paging)?;
|
||||
let res = unsafe { &mut *(mem::virtualize(phys) as *mut Self) };
|
||||
self[index] = Entry::table(phys, MapAttributes::empty());
|
||||
res.entries.fill(Entry::invalid());
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns next-level translation table reference for `index`, if one is present.
|
||||
/// Same as [next_level_table_or_alloc], but returns `None` if no table is mapped.
|
||||
pub fn next_level_table(&mut self, index: usize) -> Option<&'static mut Table> {
|
||||
let entry = self[index];
|
||||
if entry.is_present() {
|
||||
if !entry.is_table() {
|
||||
panic!("Entry is not a table: idx={}", index);
|
||||
}
|
||||
|
||||
Some(unsafe { &mut *(mem::virtualize(entry.address_unchecked()) as *mut _) })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Constructs and fills a [Table] with non-present mappings
|
||||
pub const fn empty() -> Table {
|
||||
Table {
|
||||
entries: [Entry::invalid(); 512],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<usize> for Table {
|
||||
type Output = Entry;
|
||||
|
||||
fn index(&self, index: usize) -> &Self::Output {
|
||||
&self.entries[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexMut<usize> for Table {
|
||||
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
|
||||
&mut self.entries[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
const PRESENT: u64 = 1 << 0;
|
||||
const TABLE: u64 = 1 << 1;
|
||||
const PHYS_MASK: u64 = 0x0000FFFFFFFFF000;
|
||||
|
||||
/// Constructs a single non-present mapping
|
||||
pub const fn invalid() -> Self {
|
||||
Self(0)
|
||||
}
|
||||
|
||||
/// Constructs a `Block`-type memory mapping
|
||||
pub const fn block(phys: usize, attrs: MapAttributes) -> Self {
|
||||
Self((phys as u64 & Self::PHYS_MASK) | attrs.bits() | Self::PRESENT)
|
||||
}
|
||||
|
||||
/// Constructs a `Table` or `Page`-type mapping depending on translation level
|
||||
/// this entry is used at
|
||||
pub const fn table(phys: usize, attrs: MapAttributes) -> Self {
|
||||
Self((phys as u64 & Self::PHYS_MASK) | attrs.bits() | Self::PRESENT | Self::TABLE)
|
||||
}
|
||||
|
||||
/// Returns `true` if this entry is not invalid
|
||||
pub const fn is_present(self) -> bool {
|
||||
self.0 & Self::PRESENT != 0
|
||||
}
|
||||
|
||||
/// Returns `true` if this entry is a `Table` or `Page`-type mapping
|
||||
pub const fn is_table(self) -> bool {
|
||||
self.0 & Self::TABLE != 0
|
||||
}
|
||||
|
||||
/// Returns the target address of this translation entry.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Does not check if the entry is actually valid.
|
||||
pub const unsafe fn address_unchecked(self) -> usize {
|
||||
(self.0 & Self::PHYS_MASK) as usize
|
||||
}
|
||||
|
||||
unsafe fn set_address(&mut self, address: usize) {
|
||||
self.0 &= !Self::PHYS_MASK;
|
||||
self.0 |= (address as u64) & Self::PHYS_MASK;
|
||||
}
|
||||
|
||||
unsafe fn fork_flags(self) -> MapAttributes {
|
||||
MapAttributes::from_bits_unchecked(self.0 & !Self::PHYS_MASK)
|
||||
}
|
||||
|
||||
fn set_cow(&mut self) {
|
||||
self.0 |= (MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW).bits();
|
||||
}
|
||||
|
||||
fn clear_cow(&mut self) {
|
||||
self.0 &= !(MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW).bits();
|
||||
self.0 |= MapAttributes::AP_BOTH_READWRITE.bits();
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_cow(self) -> bool {
|
||||
let attrs = (MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW).bits();
|
||||
self.0 & attrs == attrs
|
||||
}
|
||||
}
|
||||
|
||||
impl Space {
|
||||
/// Creates a new virtual address space and fills it with [Entry::invalid()]
|
||||
/// mappings. Does physical memory page allocation.
|
||||
pub fn alloc_empty() -> Result<&'static mut Self, Errno> {
|
||||
let phys = phys::alloc_page(PageUsage::Paging)?;
|
||||
let res = unsafe { &mut *(mem::virtualize(phys) as *mut Self) };
|
||||
res.0.entries.fill(Entry::invalid());
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Inserts a single `virt` -> `phys` translation entry to this address space.
|
||||
///
|
||||
/// TODO: only works with 4K-sized pages at this moment.
|
||||
pub fn map(&mut self, virt: usize, phys: usize, flags: MapAttributes) -> Result<(), Errno> {
|
||||
let l0i = virt >> 30;
|
||||
let l1i = (virt >> 21) & 0x1FF;
|
||||
let l2i = (virt >> 12) & 0x1FF;
|
||||
|
||||
let l1_table = self.0.next_level_table_or_alloc(l0i)?;
|
||||
let l2_table = l1_table.next_level_table_or_alloc(l1i)?;
|
||||
|
||||
if l2_table[l2i].is_present() {
|
||||
Err(Errno::AlreadyExists)
|
||||
} else {
|
||||
l2_table[l2i] = Entry::table(phys, flags | MapAttributes::ACCESS);
|
||||
#[cfg(feature = "verbose")]
|
||||
debugln!("{:#p} Map {:#x} -> {:#x}, {:?}", self, virt, phys, flags);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Translates a virtual address into a corresponding physical one.
|
||||
///
|
||||
/// Only works for 4K pages atm.
|
||||
// TODO extract attributes
|
||||
pub fn translate(&mut self, virt: usize) -> Result<usize, Errno> {
|
||||
let l0i = virt >> 30;
|
||||
let l1i = (virt >> 21) & 0x1FF;
|
||||
let l2i = (virt >> 12) & 0x1FF;
|
||||
|
||||
let l1_table = self.0.next_level_table(l0i).ok_or(Errno::DoesNotExist)?;
|
||||
let l2_table = l1_table.next_level_table(l1i).ok_or(Errno::DoesNotExist)?;
|
||||
|
||||
let entry = l2_table[l2i];
|
||||
if entry.is_present() {
|
||||
Ok(unsafe { entry.address_unchecked() })
|
||||
} else {
|
||||
Err(Errno::DoesNotExist)
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to resolve a page fault at `virt` address by copying the
|
||||
/// underlying Copy-on-Write mapping (if any is present)
|
||||
pub fn try_cow_copy(&mut self, virt: usize) -> Result<(), Errno> {
|
||||
let virt = virt & !0xFFF;
|
||||
let l0i = virt >> 30;
|
||||
let l1i = (virt >> 21) & 0x1FF;
|
||||
let l2i = (virt >> 12) & 0x1FF;
|
||||
|
||||
let l1_table = self.0.next_level_table(l0i).ok_or(Errno::DoesNotExist)?;
|
||||
let l2_table = l1_table.next_level_table(l1i).ok_or(Errno::DoesNotExist)?;
|
||||
|
||||
let entry = l2_table[l2i];
|
||||
|
||||
if !entry.is_present() {
|
||||
warnln!("Entry is not present: {:#x}", virt);
|
||||
return Err(Errno::DoesNotExist);
|
||||
}
|
||||
|
||||
let src_phys = unsafe { entry.address_unchecked() };
|
||||
if !entry.is_cow() {
|
||||
warnln!(
|
||||
"Entry is not marked as CoW: {:#x}, points to {:#x}",
|
||||
virt,
|
||||
src_phys
|
||||
);
|
||||
return Err(Errno::DoesNotExist);
|
||||
}
|
||||
|
||||
let dst_phys = unsafe { phys::copy_cow_page(src_phys)? };
|
||||
unsafe {
|
||||
l2_table[l2i].set_address(dst_phys);
|
||||
}
|
||||
l2_table[l2i].clear_cow();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Allocates a contiguous region from the address space and maps
|
||||
/// physical pages to it
|
||||
pub fn allocate(
|
||||
fn read_last_level_entry(&self, virt: usize) -> Result<Self::Entry, Errno>;
|
||||
fn write_last_level_entry(
|
||||
&mut self,
|
||||
start: usize,
|
||||
end: usize,
|
||||
len: usize,
|
||||
flags: MapAttributes,
|
||||
usage: PageUsage,
|
||||
) -> Result<usize, Errno> {
|
||||
'l0: for page in (start..end).step_by(0x1000) {
|
||||
for i in 0..len {
|
||||
if self.translate(page + i * 0x1000).is_ok() {
|
||||
continue 'l0;
|
||||
}
|
||||
}
|
||||
|
||||
for i in 0..len {
|
||||
let phys = phys::alloc_page(usage).unwrap();
|
||||
self.map(page + i * 0x1000, phys, flags).unwrap();
|
||||
}
|
||||
return Ok(page);
|
||||
}
|
||||
Err(Errno::OutOfMemory)
|
||||
}
|
||||
|
||||
/// Removes a single 4K page mapping from the table and
|
||||
/// releases the underlying physical memory
|
||||
pub fn unmap_single(&mut self, page: usize) -> Result<(), Errno> {
|
||||
let l0i = page >> 30;
|
||||
let l1i = (page >> 21) & 0x1FF;
|
||||
let l2i = (page >> 12) & 0x1FF;
|
||||
|
||||
let l1_table = self.0.next_level_table(l0i).ok_or(Errno::DoesNotExist)?;
|
||||
let l2_table = l1_table.next_level_table(l1i).ok_or(Errno::DoesNotExist)?;
|
||||
|
||||
let entry = l2_table[l2i];
|
||||
|
||||
if !entry.is_present() {
|
||||
return Err(Errno::DoesNotExist);
|
||||
}
|
||||
|
||||
let phys = unsafe { entry.address_unchecked() };
|
||||
unsafe {
|
||||
phys::free_page(phys)?;
|
||||
}
|
||||
l2_table[l2i] = Entry::invalid();
|
||||
|
||||
unsafe {
|
||||
asm!("tlbi vaae1, {}", in(reg) page);
|
||||
}
|
||||
|
||||
// TODO release paging structure memory
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Releases a range of virtual pages and their corresponding physical pages
|
||||
pub fn free(&mut self, start: usize, len: usize) -> Result<(), Errno> {
|
||||
for i in 0..len {
|
||||
self.unmap_single(start + i * 0x1000)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Performs a copy of the address space, cloning data owned by it
|
||||
pub fn fork(&mut self) -> Result<&'static mut Self, Errno> {
|
||||
let res = Self::alloc_empty()?;
|
||||
for l0i in 0..512 {
|
||||
if let Some(l1_table) = self.0.next_level_table(l0i) {
|
||||
for l1i in 0..512 {
|
||||
if let Some(l2_table) = l1_table.next_level_table(l1i) {
|
||||
for l2i in 0..512 {
|
||||
let entry = l2_table[l2i];
|
||||
|
||||
if !entry.is_present() {
|
||||
continue;
|
||||
}
|
||||
|
||||
assert!(entry.is_table());
|
||||
let src_phys = unsafe { entry.address_unchecked() };
|
||||
let virt_addr = (l0i << 30) | (l1i << 21) | (l2i << 12);
|
||||
let dst_phys = unsafe { phys::fork_page(src_phys)? };
|
||||
|
||||
let mut flags = unsafe { entry.fork_flags() };
|
||||
if dst_phys != src_phys {
|
||||
todo!();
|
||||
// res.map(virt_addr, dst_phys, flags)?;
|
||||
} else {
|
||||
let writable = flags & MapAttributes::AP_BOTH_READONLY
|
||||
== MapAttributes::AP_BOTH_READWRITE;
|
||||
|
||||
if writable {
|
||||
flags |=
|
||||
MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW;
|
||||
l2_table[l2i].set_cow();
|
||||
|
||||
unsafe {
|
||||
asm!("tlbi vaae1, {}", in(reg) virt_addr);
|
||||
}
|
||||
}
|
||||
|
||||
res.map(virt_addr, dst_phys, flags)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Releases all the mappings from the address space. Frees all
|
||||
/// memory pages referenced by this space as well as those used for
|
||||
/// its paging tables.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Unsafe: may invalidate currently active address space
|
||||
pub unsafe fn release(space: &mut Self) {
|
||||
for l0i in 0..512 {
|
||||
let l0_entry = space.0[l0i];
|
||||
if !l0_entry.is_present() {
|
||||
continue;
|
||||
}
|
||||
|
||||
assert!(l0_entry.is_table());
|
||||
let l1_table = &mut *(mem::virtualize(l0_entry.address_unchecked()) as *mut Table);
|
||||
|
||||
for l1i in 0..512 {
|
||||
let l1_entry = l1_table[l1i];
|
||||
if !l1_entry.is_present() {
|
||||
continue;
|
||||
}
|
||||
assert!(l1_entry.is_table());
|
||||
let l2_table = &mut *(mem::virtualize(l1_entry.address_unchecked()) as *mut Table);
|
||||
|
||||
for l2i in 0..512 {
|
||||
let entry = l2_table[l2i];
|
||||
if !entry.is_present() {
|
||||
continue;
|
||||
}
|
||||
|
||||
assert!(entry.is_table());
|
||||
phys::free_page(entry.address_unchecked()).unwrap();
|
||||
}
|
||||
phys::free_page(l1_entry.address_unchecked()).unwrap();
|
||||
}
|
||||
phys::free_page(l0_entry.address_unchecked()).unwrap();
|
||||
}
|
||||
memset(space as *mut Space as *mut u8, 0, 4096);
|
||||
}
|
||||
|
||||
/// Returns the physical address of this structure
|
||||
pub fn address_phys(&mut self) -> usize {
|
||||
(self as *mut _ as usize) - mem::KERNEL_OFFSET
|
||||
}
|
||||
virt: usize,
|
||||
entry: Self::Entry,
|
||||
map_intermediate: bool,
|
||||
) -> Result<(), Errno>;
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ impl<T> IrqSafeSpinLock<T> {
|
||||
#[inline(always)]
|
||||
unsafe fn force_release(&self) {
|
||||
self.state.store(false, Ordering::Release);
|
||||
cortex_a::asm::sev();
|
||||
// cortex_a::asm::sev();
|
||||
}
|
||||
|
||||
/// Returns [IrqSafeSpinLockGuard] for this lock
|
||||
@ -47,7 +47,7 @@ impl<T> IrqSafeSpinLock<T> {
|
||||
let irq_state = unsafe { irq_mask_save() };
|
||||
|
||||
while self.try_lock().is_err() {
|
||||
cortex_a::asm::wfe();
|
||||
// cortex_a::asm::wfe();
|
||||
}
|
||||
|
||||
IrqSafeSpinLockGuard {
|
||||
|
Loading…
x
Reference in New Issue
Block a user