Compare commits

...

7 Commits

61 changed files with 3714 additions and 1316 deletions

9
Cargo.lock generated
View File

@ -97,6 +97,7 @@ dependencies = [
"kernel-macros",
"libsys",
"memfs",
"multiboot2",
"tock-registers",
"vfs",
]
@ -162,6 +163,14 @@ dependencies = [
"autocfg",
]
[[package]]
name = "multiboot2"
version = "0.12.2"
source = "git+https://github.com/alnyan/multiboot2?branch=expose-extra-traits-for-iters#7e86b55fa5ab82e54978021f8022068a1591166b"
dependencies = [
"bitflags",
]
[[package]]
name = "num-derive"
version = "0.3.3"

View File

@ -29,7 +29,12 @@ endif
QEMU_OPTS=-s
ifeq ($(ARCH),x86_64)
$(error TODO)
MACH=none
QEMU_OPTS+=-cdrom $(O)/image.iso \
-M q35 \
-m 512 \
-serial mon:stdio \
-net none
else
ifeq ($(MACH),qemu)
QEMU_OPTS+=-kernel $(O)/kernel.bin \
@ -65,7 +70,7 @@ endif
.PHONY: address error etc kernel src
all: kernel initrd
all: image
kernel:
cd kernel && cargo build $(CARGO_BUILD_OPTS)
@ -87,6 +92,16 @@ ifeq ($(MACH),orangepi3)
$(O)/uImage
endif
image: kernel initrd
ifeq ($(ARCH),x86_64)
mkdir -p $(O)/image/boot/grub
cp etc/x86_64-none.grub $(O)/image/boot/grub/grub.cfg
cp $(O)/kernel $(O)/image/boot/kernel
cp $(O)/initrd.img $(O)/image/boot/initrd.img
grub-mkrescue -o $(O)/image.iso $(O)/image
endif
initrd:
cd user && cargo build \
--target=../etc/$(ARCH)-osdev5.json \
@ -99,7 +114,6 @@ initrd:
touch $(O)/rootfs/sys/.do_not_remove
cp target/$(ARCH)-osdev5/$(PROFILE)/init $(O)/rootfs/init
cp target/$(ARCH)-osdev5/$(PROFILE)/shell $(O)/rootfs/bin
cp target/$(ARCH)-osdev5/$(PROFILE)/fuzzy $(O)/rootfs/bin
cp target/$(ARCH)-osdev5/$(PROFILE)/ls $(O)/rootfs/bin
cp target/$(ARCH)-osdev5/$(PROFILE)/cat $(O)/rootfs/bin
cp target/$(ARCH)-osdev5/$(PROFILE)/hexd $(O)/rootfs/bin

BIN
etc/default8x16.psfu Normal file

Binary file not shown.

View File

@ -1,3 +1,4 @@
menuentry "OS" {
multiboot2 /boot/kernel.elf
multiboot2 /boot/kernel
module2 /boot/initrd.img
}

View File

@ -6,7 +6,8 @@ SECTIONS {
. = 0x400000 + KERNEL_OFFSET;
.text : AT(. - KERNEL_OFFSET) {
KEEP(*(.multiboot))
KEEP(*(.text._multiboot))
*(.text._entry)
*(.text*)
}
@ -21,5 +22,8 @@ SECTIONS {
.bss : AT(. - KERNEL_OFFSET) {
*(COMMON)
*(.bss*)
. = ALIGN(4K);
}
PROVIDE(__kernel_end = .);
}

21
etc/x86_64-osdev5.json Normal file
View File

@ -0,0 +1,21 @@
{
"arch": "x86_64",
"cpu": "x86-64",
"data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128",
"disable-redzone": true,
"executables": true,
"panic-strategy": "abort",
"linker": "rust-lld",
"linker-flavor": "ld.lld",
"llvm-target": "x86_64-unknown-linux-gnu",
"max-atomic-width": 64,
"target-pointer-width": "64",
"os": "none",
"pre-link-args": {
"ld.lld": [ "-Tetc/x86_64-osdev5.ld" ]
}
}

32
etc/x86_64-osdev5.ld Normal file
View File

@ -0,0 +1,32 @@
ENTRY(_start);
PHDRS {
text PT_LOAD ;
rodata PT_LOAD ;
data PT_LOAD ;
}
SECTIONS {
. = 0x400000;
.text : {
*(.text._start)
*(.text*)
*(.eh_frame*)
} :text
. = ALIGN(0x1000);
.rodata : {
*(.rodata*)
} :rodata
. = ALIGN(0x1000);
.data : {
*(.data*)
} :data
.bss : {
*(COMMON)
*(.bss*)
} :data
}

View File

@ -15,13 +15,16 @@ memfs = { path = "../fs/memfs" }
libsys = { path = "../libsys" }
cfg-if = "1.x.x"
tock-registers = "0.7.x"
fdt-rs = { version = "0.x.x", default-features = false }
bitflags = "^1.3.0"
kernel-macros = { path = "macros" }
fs-macros = { path = "../fs/macros" }
[target.'cfg(target_arch = "x86_64")'.dependencies]
multiboot2 = { git = "https://github.com/alnyan/multiboot2", branch = "expose-extra-traits-for-iters" }
[target.'cfg(target_arch = "aarch64")'.dependencies]
cortex-a = { version = "6.x.x" }
fdt-rs = { version = "0.x.x", default-features = false }
[features]
default = ["aggressive_syscall"]

View File

@ -0,0 +1,422 @@
//
// #[no_mangle]
// static mut KERNEL_TTBR1: FixedTableGroup = FixedTableGroup::empty();
/// Transparent wrapper structure representing a single
/// translation table entry
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct Entry(u64);
/// Structure describing a single level of translation mappings
#[repr(C, align(0x1000))]
pub struct Table {
entries: [Entry; 512],
}
/// Wrapper for top-most level of address translation tables
#[repr(transparent)]
pub struct Space(Table);
bitflags! {
/// Attributes attached to each translation [Entry]
pub struct MapAttributes: u64 {
// TODO use 2 lower bits to determine mapping size?
/// nG bit -- determines whether a TLB entry associated with this mapping
/// applies only to current ASID or all ASIDs.
const NOT_GLOBAL = 1 << 11;
/// AF bit -- must be set by software, otherwise Access Error exception is
/// generated when the page is accessed
const ACCESS = 1 << 10;
/// The memory region is outer-shareable
const SH_OUTER = 2 << 8;
/// This page is used for device-MMIO mapping and uses MAIR attribute #1
const DEVICE = 1 << 2;
/// Pages marked with this bit are Copy-on-Write
const EX_COW = 1 << 55;
/// UXN bit -- if set, page may not be used for instruction fetching from EL0
const UXN = 1 << 54;
/// PXN bit -- if set, page may not be used for instruction fetching from EL1
const PXN = 1 << 53;
// AP field
// Default behavior is: read-write for EL1, no access for EL0
/// If set, the page referred to by this entry is read-only for both EL0/EL1
const AP_BOTH_READONLY = 3 << 6;
/// If set, the page referred to by this entry is read-write for both EL0/EL1
const AP_BOTH_READWRITE = 1 << 6;
}
}
impl Table {
/// Returns next-level translation table reference for `index`, if one is present.
/// If `index` represents a `Block`-type mapping, will return an error.
/// If `index` does not map to any translation table, will try to allocate, init and
/// map a new one, returning it after doing so.
pub fn next_level_table_or_alloc(&mut self, index: usize) -> Result<&'static mut Table, Errno> {
let entry = self[index];
if entry.is_present() {
if !entry.is_table() {
return Err(Errno::InvalidArgument);
}
Ok(unsafe { &mut *(mem::virtualize(entry.address_unchecked()) as *mut _) })
} else {
let phys = phys::alloc_page(PageUsage::Paging)?;
let res = unsafe { &mut *(mem::virtualize(phys) as *mut Self) };
self[index] = Entry::table(phys, MapAttributes::empty());
res.entries.fill(Entry::invalid());
Ok(res)
}
}
/// Returns next-level translation table reference for `index`, if one is present.
/// Same as [next_level_table_or_alloc], but returns `None` if no table is mapped.
pub fn next_level_table(&mut self, index: usize) -> Option<&'static mut Table> {
let entry = self[index];
if entry.is_present() {
if !entry.is_table() {
panic!("Entry is not a table: idx={}", index);
}
Some(unsafe { &mut *(mem::virtualize(entry.address_unchecked()) as *mut _) })
} else {
None
}
}
/// Constructs and fills a [Table] with non-present mappings
pub const fn empty() -> Table {
Table {
entries: [Entry::invalid(); 512],
}
}
}
impl Index<usize> for Table {
type Output = Entry;
fn index(&self, index: usize) -> &Self::Output {
&self.entries[index]
}
}
impl IndexMut<usize> for Table {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.entries[index]
}
}
impl Entry {
const PRESENT: u64 = 1 << 0;
const TABLE: u64 = 1 << 1;
const PHYS_MASK: u64 = 0x0000FFFFFFFFF000;
/// Constructs a single non-present mapping
pub const fn invalid() -> Self {
Self(0)
}
/// Constructs a `Block`-type memory mapping
pub const fn block(phys: usize, attrs: MapAttributes) -> Self {
Self((phys as u64 & Self::PHYS_MASK) | attrs.bits() | Self::PRESENT)
}
/// Constructs a `Table` or `Page`-type mapping depending on translation level
/// this entry is used at
pub const fn table(phys: usize, attrs: MapAttributes) -> Self {
Self((phys as u64 & Self::PHYS_MASK) | attrs.bits() | Self::PRESENT | Self::TABLE)
}
/// Returns `true` if this entry is not invalid
pub const fn is_present(self) -> bool {
self.0 & Self::PRESENT != 0
}
/// Returns `true` if this entry is a `Table` or `Page`-type mapping
pub const fn is_table(self) -> bool {
self.0 & Self::TABLE != 0
}
/// Returns the target address of this translation entry.
///
/// # Safety
///
/// Does not check if the entry is actually valid.
pub const unsafe fn address_unchecked(self) -> usize {
(self.0 & Self::PHYS_MASK) as usize
}
unsafe fn set_address(&mut self, address: usize) {
self.0 &= !Self::PHYS_MASK;
self.0 |= (address as u64) & Self::PHYS_MASK;
}
unsafe fn fork_flags(self) -> MapAttributes {
MapAttributes::from_bits_unchecked(self.0 & !Self::PHYS_MASK)
}
fn set_cow(&mut self) {
self.0 |= (MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW).bits();
}
fn clear_cow(&mut self) {
self.0 &= !(MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW).bits();
self.0 |= MapAttributes::AP_BOTH_READWRITE.bits();
}
#[inline]
fn is_cow(self) -> bool {
let attrs = (MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW).bits();
self.0 & attrs == attrs
}
}
impl Space {
/// Creates a new virtual address space and fills it with [Entry::invalid()]
/// mappings. Does physical memory page allocation.
pub fn alloc_empty() -> Result<&'static mut Self, Errno> {
let phys = phys::alloc_page(PageUsage::Paging)?;
let res = unsafe { &mut *(mem::virtualize(phys) as *mut Self) };
res.0.entries.fill(Entry::invalid());
Ok(res)
}
/// Inserts a single `virt` -> `phys` translation entry to this address space.
///
/// TODO: only works with 4K-sized pages at this moment.
pub fn map(&mut self, virt: usize, phys: usize, flags: MapAttributes) -> Result<(), Errno> {
let l0i = virt >> 30;
let l1i = (virt >> 21) & 0x1FF;
let l2i = (virt >> 12) & 0x1FF;
let l1_table = self.0.next_level_table_or_alloc(l0i)?;
let l2_table = l1_table.next_level_table_or_alloc(l1i)?;
if l2_table[l2i].is_present() {
Err(Errno::AlreadyExists)
} else {
l2_table[l2i] = Entry::table(phys, flags | MapAttributes::ACCESS);
#[cfg(feature = "verbose")]
debugln!("{:#p} Map {:#x} -> {:#x}, {:?}", self, virt, phys, flags);
Ok(())
}
}
/// Translates a virtual address into a corresponding physical one.
///
/// Only works for 4K pages atm.
// TODO extract attributes
pub fn translate(&mut self, virt: usize) -> Result<usize, Errno> {
let l0i = virt >> 30;
let l1i = (virt >> 21) & 0x1FF;
let l2i = (virt >> 12) & 0x1FF;
let l1_table = self.0.next_level_table(l0i).ok_or(Errno::DoesNotExist)?;
let l2_table = l1_table.next_level_table(l1i).ok_or(Errno::DoesNotExist)?;
let entry = l2_table[l2i];
if entry.is_present() {
Ok(unsafe { entry.address_unchecked() })
} else {
Err(Errno::DoesNotExist)
}
}
/// Attempts to resolve a page fault at `virt` address by copying the
/// underlying Copy-on-Write mapping (if any is present)
pub fn try_cow_copy(&mut self, virt: usize) -> Result<(), Errno> {
let virt = virt & !0xFFF;
let l0i = virt >> 30;
let l1i = (virt >> 21) & 0x1FF;
let l2i = (virt >> 12) & 0x1FF;
let l1_table = self.0.next_level_table(l0i).ok_or(Errno::DoesNotExist)?;
let l2_table = l1_table.next_level_table(l1i).ok_or(Errno::DoesNotExist)?;
let entry = l2_table[l2i];
if !entry.is_present() {
warnln!("Entry is not present: {:#x}", virt);
return Err(Errno::DoesNotExist);
}
let src_phys = unsafe { entry.address_unchecked() };
if !entry.is_cow() {
warnln!(
"Entry is not marked as CoW: {:#x}, points to {:#x}",
virt,
src_phys
);
return Err(Errno::DoesNotExist);
}
let dst_phys = unsafe { phys::copy_cow_page(src_phys)? };
unsafe {
l2_table[l2i].set_address(dst_phys);
}
l2_table[l2i].clear_cow();
Ok(())
}
/// Allocates a contiguous region from the address space and maps
/// physical pages to it
pub fn allocate(
&mut self,
start: usize,
end: usize,
len: usize,
flags: MapAttributes,
usage: PageUsage,
) -> Result<usize, Errno> {
'l0: for page in (start..end).step_by(0x1000) {
for i in 0..len {
if self.translate(page + i * 0x1000).is_ok() {
continue 'l0;
}
}
for i in 0..len {
let phys = phys::alloc_page(usage).unwrap();
self.map(page + i * 0x1000, phys, flags).unwrap();
}
return Ok(page);
}
Err(Errno::OutOfMemory)
}
/// Removes a single 4K page mapping from the table and
/// releases the underlying physical memory
pub fn unmap_single(&mut self, page: usize) -> Result<(), Errno> {
let l0i = page >> 30;
let l1i = (page >> 21) & 0x1FF;
let l2i = (page >> 12) & 0x1FF;
let l1_table = self.0.next_level_table(l0i).ok_or(Errno::DoesNotExist)?;
let l2_table = l1_table.next_level_table(l1i).ok_or(Errno::DoesNotExist)?;
let entry = l2_table[l2i];
if !entry.is_present() {
return Err(Errno::DoesNotExist);
}
let phys = unsafe { entry.address_unchecked() };
unsafe {
phys::free_page(phys)?;
}
l2_table[l2i] = Entry::invalid();
unsafe {
asm!("tlbi vaae1, {}", in(reg) page);
}
// TODO release paging structure memory
Ok(())
}
/// Releases a range of virtual pages and their corresponding physical pages
pub fn free(&mut self, start: usize, len: usize) -> Result<(), Errno> {
for i in 0..len {
self.unmap_single(start + i * 0x1000)?;
}
Ok(())
}
/// Performs a copy of the address space, cloning data owned by it
pub fn fork(&mut self) -> Result<&'static mut Self, Errno> {
let res = Self::alloc_empty()?;
for l0i in 0..512 {
if let Some(l1_table) = self.0.next_level_table(l0i) {
for l1i in 0..512 {
if let Some(l2_table) = l1_table.next_level_table(l1i) {
for l2i in 0..512 {
let entry = l2_table[l2i];
if !entry.is_present() {
continue;
}
assert!(entry.is_table());
let src_phys = unsafe { entry.address_unchecked() };
let virt_addr = (l0i << 30) | (l1i << 21) | (l2i << 12);
let dst_phys = unsafe { phys::fork_page(src_phys)? };
let mut flags = unsafe { entry.fork_flags() };
if dst_phys != src_phys {
todo!();
// res.map(virt_addr, dst_phys, flags)?;
} else {
let writable = flags & MapAttributes::AP_BOTH_READONLY
== MapAttributes::AP_BOTH_READWRITE;
if writable {
flags |=
MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW;
l2_table[l2i].set_cow();
unsafe {
asm!("tlbi vaae1, {}", in(reg) virt_addr);
}
}
res.map(virt_addr, dst_phys, flags)?;
}
}
}
}
}
}
Ok(res)
}
/// Releases all the mappings from the address space. Frees all
/// memory pages referenced by this space as well as those used for
/// its paging tables.
///
/// # Safety
///
/// Unsafe: may invalidate currently active address space
pub unsafe fn release(space: &mut Self) {
for l0i in 0..512 {
let l0_entry = space.0[l0i];
if !l0_entry.is_present() {
continue;
}
assert!(l0_entry.is_table());
let l1_table = &mut *(mem::virtualize(l0_entry.address_unchecked()) as *mut Table);
for l1i in 0..512 {
let l1_entry = l1_table[l1i];
if !l1_entry.is_present() {
continue;
}
assert!(l1_entry.is_table());
let l2_table = &mut *(mem::virtualize(l1_entry.address_unchecked()) as *mut Table);
for l2i in 0..512 {
let entry = l2_table[l2i];
if !entry.is_present() {
continue;
}
assert!(entry.is_table());
phys::free_page(entry.address_unchecked()).unwrap();
}
phys::free_page(l1_entry.address_unchecked()).unwrap();
}
phys::free_page(l0_entry.address_unchecked()).unwrap();
}
memset(space as *mut Space as *mut u8, 0, 4096);
}
/// Returns the physical address of this structure
pub fn address_phys(&mut self) -> usize {
(self as *mut _ as usize) - mem::KERNEL_OFFSET
}
}

View File

@ -15,6 +15,11 @@ cfg_if! {
pub use aarch64 as platform;
pub use aarch64::machine;
} else if #[cfg(target_arch = "x86_64")] {
pub mod x86_64;
pub use x86_64 as platform;
pub use x86_64 as machine;
}
}

View File

@ -0,0 +1,53 @@
.section .text._multiboot
.set MAGIC, 0xE85250D6
.set ARCH, 0x0
.set HDRLEN, 16
.set CHKSUM, (-(MAGIC + ARCH + HDRLEN)) & 0xFFFFFFFF
.long MAGIC
.long ARCH
.long HDRLEN
.long CHKSUM
.short 5
.short 0
.long 20
.long 800
.long 600
.long 32
.short 0
.long 8
.section .text._entry
.global _entry
_entry:
.code32
cli
lea (multiboot_registers - KERNEL_OFFSET), %edi
mov %eax, 0(%edi)
mov %ebx, 4(%edi)
// Setup paging tables
lea (_entry_upper - KERNEL_OFFSET), %ebx
jmp __x86_64_enter_upper
.code64
_entry_upper:
movabsq $1f, %rax
jmp *%rax
1:
lea bsp_stack_top(%rip), %rax
mov %rax, %rsp
mov multiboot_registers(%rip), %edi
mov (4 + multiboot_registers)(%rip), %esi
call __x86_64_bsp_main
.section .bss
.align 16
bsp_stack_bottom:
.skip 65536
bsp_stack_top:
multiboot_registers:
.skip 8

View File

@ -0,0 +1,7 @@
.set KERNEL_OFFSET, 0xFFFFFF8000000000
.set PTE_PRESENT, 1 << 0
.set PTE_WRITABLE, 1 << 1
.set PTE_USERSPACE, 1 << 2
.set PTE_BLOCK, 1 << 7

View File

@ -0,0 +1,114 @@
use crate::arch::x86_64::{
self, gdt, idt, intc,
reg::{CR0, CR4},
syscall,
};
use crate::config::{ConfigKey, CONFIG};
use crate::debug;
use crate::dev::{display::FramebufferInfo, pseudo, Device};
use crate::font;
use crate::fs::{devfs::{self, CharDeviceType}, sysfs};
use crate::mem::{
self, heap,
phys::{self, MemoryRegion, PageUsage, ReservedRegion},
virt,
};
use crate::proc;
use core::arch::{asm, global_asm};
use core::mem::MaybeUninit;
use multiboot2::{BootInformation, MemoryArea};
use tock_registers::interfaces::ReadWriteable;
static mut RESERVED_REGION_MB2: MaybeUninit<ReservedRegion> = MaybeUninit::uninit();
#[no_mangle]
extern "C" fn __x86_64_bsp_main(mb_checksum: u32, mb_info_ptr: u32) -> ! {
CR4.modify(CR4::OSXMMEXCPT::SET + CR4::OSFXSR::SET);
CR0.modify(CR0::EM::CLEAR + CR0::MP::SET);
unsafe {
// Setup a proper GDT
gdt::init();
idt::init(intc::map_isr_entries);
}
virt::enable().expect("Failed to initialize virtual memory");
let mb_info = unsafe {
multiboot2::load_with_offset(mb_info_ptr as usize, mem::KERNEL_OFFSET)
.expect("Failed to load multiboot info structure")
};
unsafe {
let mb_info_page = (mb_info_ptr & !0xFFF) as usize;
RESERVED_REGION_MB2.write(ReservedRegion::new(
mb_info_page,
mb_info_page + ((mb_info.total_size() + 0xFFF) & !0xFFF),
));
phys::reserve("multiboot2", RESERVED_REGION_MB2.as_mut_ptr());
phys::init_from_iter(
mb_info
.memory_map_tag()
.unwrap()
.memory_areas()
.map(|entry| MemoryRegion {
start: ((entry.start_address() + 0xFFF) & !0xFFF) as usize,
end: (entry.end_address() & !0xFFF) as usize,
}),
);
}
// Setup a heap
unsafe {
let heap_base_phys = phys::alloc_contiguous_pages(PageUsage::KernelHeap, 4096)
.expect("Failed to allocate memory for heap");
let heap_base_virt = mem::virtualize(heap_base_phys);
heap::init(heap_base_virt, 16 * 1024 * 1024);
}
let initrd_info = mb_info.module_tags().next().unwrap();
{
let mut cfg = CONFIG.lock();
cfg.set_usize(ConfigKey::InitrdBase, initrd_info.start_address() as usize);
cfg.set_usize(ConfigKey::InitrdSize, initrd_info.module_size() as usize);
}
// Setup hardware
unsafe {
x86_64::INTC.enable().ok();
}
let fb_info = mb_info.framebuffer_tag().unwrap();
let virt = mem::virtualize(fb_info.address as usize);
debugln!(
"Framebuffer base: phys={:#x}, virt={:#x}",
fb_info.address,
virt
);
x86_64::DISPLAY.set_framebuffer(FramebufferInfo {
width: fb_info.width as usize,
height: fb_info.height as usize,
phys_base: fb_info.address as usize,
virt_base: virt,
});
font::init();
debug::set_display(&x86_64::DISPLAY);
syscall::init();
devfs::init();
sysfs::init();
devfs::add_char_device(&x86_64::COM1, CharDeviceType::TtySerial).unwrap();
devfs::add_named_char_device(&pseudo::ZERO, "zero").unwrap();
devfs::add_named_char_device(&pseudo::RANDOM, "random").unwrap();
unsafe {
proc::enter();
}
}
global_asm!(include_str!("macros.S"), options(att_syntax));
global_asm!(include_str!("entry.S"), options(att_syntax));
global_asm!(include_str!("upper.S"), options(att_syntax));

View File

@ -0,0 +1,94 @@
.code32
.section .text._entry
__x86_64_enter_upper:
mov $(PTE_PRESENT | PTE_WRITABLE | PTE_USERSPACE), %edx
// Setup PML4
lea (KERNEL_FIXED - KERNEL_OFFSET), %edi
lea (KERNEL_FIXED + 4096 - KERNEL_OFFSET), %esi
mov %edx, %eax
or %esi, %eax
// pml4[0] = %eax
mov %eax, (%edi)
// pml4[511] = %eax
mov %eax, 4088(%edi)
// Setup PDPT
mov %esi, %edi
lea (KERNEL_FIXED + 8192 - KERNEL_OFFSET), %esi
xor %ecx, %ecx
1:
// %eax = &table[%ecx] | attrs
mov %esi, %eax
or %edx, %eax
mov %eax, (%edi, %ecx, 8)
add $4096, %esi
inc %ecx
cmp $16, %ecx
jne 1b
// Setup PDs
lea (KERNEL_FIXED + 8192 - KERNEL_OFFSET), %edi
mov $(PTE_PRESENT | PTE_BLOCK | PTE_WRITABLE), %edx
mov $(512 * 16), %ecx
1:
dec %ecx
// %eax = attrs | (i << 21)
mov %ecx, %eax
shl $21, %eax
or %edx, %eax
mov %eax, (%edi, %ecx, 8)
test %ecx, %ecx
jnz 1b
// Enable PAE/PSE
mov %cr4, %eax
or $((1 << 5) | (1 << 4)), %eax
mov %eax, %cr4
// Enable EFER.LME
mov $0xC0000080, %ecx
rdmsr
or $(1 << 8), %eax
wrmsr
// Set CR3
lea (KERNEL_FIXED - KERNEL_OFFSET), %edi
mov %edi, %cr3
// Enable paging
mov %cr0, %eax
or $(1 << 31), %eax
mov %eax, %cr0
lgdt (gdtr64 - KERNEL_OFFSET)
ljmp $0x08, $(1f - KERNEL_OFFSET)
1:
mov $0x10, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
mov %ax, %ss
.code64
mov $KERNEL_OFFSET, %rax
add %rax, %rbx
jmp *%rbx
.section .rodata
.code32
.align 16
gdt64:
.quad 0
.quad 0x00209A0000000000
.quad 0x0000920000000000
gdt_end64:
.align 16
gdtr64:
.short gdt_end64 - gdt64 - 1
.long gdt64 - KERNEL_OFFSET

View File

@ -0,0 +1,78 @@
.section .text
.global __x86_64_ctx_switch
.global __x86_64_ctx_switch_to
.global __x86_64_ctx_enter_kernel
.global __x86_64_ctx_enter_from_fork
__x86_64_ctx_enter_user:
pop %rcx
pop %rdi
pop %rdi
pop %rdx
push $0x1B
push %rcx
push $0x200
push $0x23
push %rdx
iretq
__x86_64_ctx_enter_kernel:
pop %rdi
pop %rdx
mov %rsp, %rcx
push $0x10
push %rcx
push $0x200
push $0x08
push %rdx
iretq
__x86_64_ctx_enter_from_fork:
jmp .
__x86_64_ctx_switch:
// %rsi -- src ctx ptr
// %rdi -- dst ctx ptr
push %r15
push %r14
push %r13
push %r12
push %rbx
push %rbp
mov %cr3, %rax
push %rax
// TODO save gs_base
mov (4 + TSS)(%rip), %rax
push %rax
mov %rsp, (%rsi)
__x86_64_ctx_switch_to:
mov (%rdi), %rsp
pop %rbp
pop %rbx
pop %r12
pop %r13
pop %r14
pop %r15
pop %rax
test %rax, %rax
jz 1f
mov %rax, %cr3
1:
pop %rax
mov %rax, (4 + TSS)(%rip)
// TODO set gs_base = rax
ret

View File

@ -0,0 +1,151 @@
use crate::mem::{
self,
phys::{self, PageUsage},
};
use core::mem::size_of;
use core::arch::global_asm;
struct Stack {
bp: usize,
sp: usize,
}
/// Structure representing thread context
#[repr(C)]
pub struct Context {
/// Thread's kernel stack pointer
pub k_sp: usize, // 0x00
stack_base: usize,
stack_page_count: usize,
}
impl Context {
/// Constructs a new kernel-space thread context
pub fn kernel(entry: usize, arg: usize) -> Self {
let mut stack = Stack::new(8);
stack.push(entry);
stack.push(arg);
stack.setup_common(__x86_64_ctx_enter_kernel as usize, 0, 0);
Self {
k_sp: stack.sp,
stack_base: stack.bp,
stack_page_count: 8,
}
}
/// Constructs a new user-space thread context
pub fn user(entry: usize, arg: usize, cr3: usize, ustack: usize) -> Self {
let cr3 = cr3 & 0xFFFFFFFF;
let mut stack = Stack::new(8);
let stack_top = stack.sp;
stack.push(entry);
stack.push(arg);
stack.push(0);
stack.push(ustack);
stack.setup_common(__x86_64_ctx_enter_user as usize, cr3, stack_top);
Self {
k_sp: stack.sp,
stack_base: stack.bp,
stack_page_count: 8,
}
}
/// Constructs an uninitialized thread context
pub fn empty() -> Self {
let stack = Stack::new(8);
Self {
k_sp: stack.sp,
stack_base: stack.bp,
stack_page_count: 8
}
}
/// Sets up a context for signal entry
///
/// # Safety
///
/// Unsafe: may clobber an already active context
pub unsafe fn setup_signal_entry(&mut self, entry: usize, arg: usize, cr3: usize, ustack: usize) {
todo!()
}
/// Performs initial thread entry
///
/// # Safety
///
/// Unsafe: does not check if any context has already been activated
/// before, so must only be called once.
pub unsafe extern "C" fn enter(&mut self) -> ! {
__x86_64_ctx_switch_to(self);
panic!("This code should not run");
}
/// Performs context switch from `self` to `to`.
///
/// # Safety
///
/// Unsafe: does not check if `self` is actually an active context.
pub unsafe extern "C" fn switch(&mut self, to: &mut Context) {
__x86_64_ctx_switch(to, self);
}
}
impl Stack {
pub fn new(page_count: usize) -> Stack {
let phys = phys::alloc_contiguous_pages(PageUsage::Kernel, page_count).unwrap();
let bp = mem::virtualize(phys);
Stack {
bp,
sp: bp + page_count * mem::PAGE_SIZE,
}
}
pub unsafe fn from_base_size(bp: usize, page_count: usize) -> Stack {
Stack {
bp,
sp: bp + page_count * mem::PAGE_SIZE
}
}
pub fn setup_common(&mut self, entry: usize, cr3: usize, tss_rsp0: usize) {
self.push(entry); // return address
self.push(tss_rsp0); // gs_base
self.push(cr3);
self.push(0); // r15
self.push(0); // r14
self.push(0); // r13
self.push(0); // r12
self.push(0); // rbx
self.push(0); // rbp
}
pub fn push(&mut self, value: usize) {
if self.bp == self.sp {
panic!("Stack overflow");
}
self.sp -= size_of::<usize>();
unsafe {
*(self.sp as *mut usize) = value;
}
}
}
extern "C" {
fn __x86_64_ctx_enter_from_fork();
fn __x86_64_ctx_enter_kernel();
fn __x86_64_ctx_enter_user();
fn __x86_64_ctx_switch(dst: *mut Context, src: *mut Context);
fn __x86_64_ctx_switch_to(dst: *mut Context);
}
global_asm!(include_str!("context.S"), options(att_syntax));

View File

View File

@ -0,0 +1,86 @@
use crate::arch::x86_64;
use crate::debug::Level;
use crate::dev::irq::{IntController, IrqContext};
use core::arch::{asm, global_asm};
#[derive(Debug)]
struct ExceptionFrame {
r15: u64,
r14: u64,
r13: u64,
r12: u64,
r11: u64,
r10: u64,
r9: u64,
r8: u64,
rdi: u64,
rsi: u64,
rbp: u64,
rbx: u64,
rdx: u64,
rcx: u64,
rax: u64,
err_no: u64,
err_code: u64,
rip: u64,
cs: u64,
rflags: u64,
rsp: u64,
ss: u64,
}
fn pfault_read_cr2() -> u64 {
let mut res;
unsafe {
asm!("mov %cr2, {}", out(reg) res, options(att_syntax));
}
res
}
fn pfault_access_type(code: u64) -> &'static str {
if code & (1 << 4) != 0 {
"INSTRUCTION FETCH"
} else if code & (1 << 1) != 0 {
"WRITE"
} else {
"READ"
}
}
fn pfault_dump(level: Level, frame: &ExceptionFrame, cr2: u64) {
println!(level, "\x1B[41;1mPage fault:");
println!(
level,
" Illegal {} at {:#018x}\x1B[0m",
pfault_access_type(frame.err_code),
cr2
);
}
#[no_mangle]
extern "C" fn __x86_64_exception_handler(frame: &mut ExceptionFrame) {
if frame.err_no == 14 {
// TODO userspace page faults
let cr2 = pfault_read_cr2();
pfault_dump(Level::Error, frame, cr2);
}
errorln!(
"Exception occurred: err_no={}, err_code={:#x}",
frame.err_no,
frame.err_code,
);
errorln!("cs:rip = {:02x}:{:#x}", frame.cs, frame.rip);
errorln!("ss:rsp = {:02x}:{:#x}", frame.ss, frame.rsp);
panic!("Unhandled exception");
}
#[no_mangle]
extern "C" fn __x86_64_irq_handler(frame: &mut ExceptionFrame) {
unsafe {
let ic = IrqContext::new(frame.err_no as usize);
x86_64::intc().handle_pending_irqs(&ic);
}
}

View File

@ -0,0 +1,147 @@
use core::mem::size_of_val;
use core::arch::asm;
#[repr(packed)]
struct Entry {
limit_lo: u16,
base_lo: u16,
base_mi: u8,
access: u8,
flags: u8,
base_hi: u8,
}
#[repr(packed)]
struct Tss {
__res0: u32,
rsp0: u64,
rsp1: u64,
rsp2: u64,
__res1: u32,
ist1: u64,
ist2: u64,
ist3: u64,
ist4: u64,
ist5: u64,
ist6: u64,
ist7: u64,
__res2: u64,
__res3: u16,
iopb_base: u16,
}
#[repr(packed)]
struct Pointer {
size: u16,
offset: usize,
}
impl Entry {
const FLAG_LONG: u8 = 1 << 5;
const ACC_PRESENT: u8 = 1 << 7;
const ACC_SYSTEM: u8 = 1 << 4;
const ACC_EXECUTE: u8 = 1 << 3;
const ACC_WRITE: u8 = 1 << 1;
const ACC_RING3: u8 = 3 << 5;
const ACC_ACCESS: u8 = 1 << 0;
const fn new(base: u32, limit: u32, flags: u8, access: u8) -> Self {
Self {
base_lo: (base & 0xFFFF) as u16,
base_mi: ((base >> 16) & 0xFF) as u8,
base_hi: ((base >> 24) & 0xFF) as u8,
access,
flags: (flags & 0xF0) | (((limit >> 16) & 0xF) as u8),
limit_lo: (limit & 0xFFFF) as u16,
}
}
const fn null() -> Self {
Self {
base_lo: 0,
base_mi: 0,
base_hi: 0,
access: 0,
flags: 0,
limit_lo: 0,
}
}
}
impl Tss {
const fn new() -> Self {
Self {
__res0: 0,
rsp0: 0,
rsp1: 0,
rsp2: 0,
__res1: 0,
ist1: 0,
ist2: 0,
ist3: 0,
ist4: 0,
ist5: 0,
ist6: 0,
ist7: 0,
__res2: 0,
__res3: 0,
iopb_base: 0,
}
}
}
const SIZE: usize = 7;
#[no_mangle]
static mut TSS: Tss = Tss::new();
static mut GDT: [Entry; SIZE] = [
Entry::null(),
Entry::new(
0,
0,
Entry::FLAG_LONG,
Entry::ACC_PRESENT | Entry::ACC_SYSTEM | Entry::ACC_EXECUTE,
),
Entry::new(
0,
0,
0,
Entry::ACC_PRESENT | Entry::ACC_SYSTEM | Entry::ACC_WRITE,
),
Entry::new(
0,
0,
0,
Entry::ACC_PRESENT | Entry::ACC_SYSTEM | Entry::ACC_RING3 | Entry::ACC_WRITE,
),
Entry::new(
0,
0,
Entry::FLAG_LONG,
Entry::ACC_PRESENT | Entry::ACC_SYSTEM | Entry::ACC_RING3 | Entry::ACC_EXECUTE,
),
Entry::null(),
Entry::null(),
];
pub unsafe fn init() {
let tss_addr = &TSS as *const _ as usize;
GDT[5] = Entry::new(
(tss_addr & 0xFFFFFFFF) as u32,
size_of_val(&TSS) as u32 - 1,
Entry::FLAG_LONG,
Entry::ACC_ACCESS | Entry::ACC_PRESENT | Entry::ACC_EXECUTE,
);
core::ptr::write(&mut GDT[6] as *mut _ as *mut u64, (tss_addr >> 32) as u64);
let gdtr = Pointer {
size: size_of_val(&GDT) as u16 - 1,
offset: &GDT as *const _ as usize,
};
asm!(r#"
lgdt ({})
mov $0x28, %ax
ltr %ax
"#, in(reg) &gdtr, options(att_syntax));
}

View File

@ -0,0 +1,110 @@
.macro isr_nerr, n
exc_isr_\n:
cli
pushq $0
pushq $\n
jmp __x86_64_isr_common
.endm
// ISR for exception with an error code
.macro isr_yerr, n
exc_isr_\n:
cli
pushq $\n
jmp __x86_64_isr_common
.endm
.section .text
__x86_64_isr_common:
push %rax
push %rcx
push %rdx
push %rbx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
mov %rsp, %rdi
call __x86_64_exception_handler
1:
cli
hlt
jmp 1b
isr_nerr 0
isr_nerr 1
isr_nerr 2
isr_nerr 3
isr_nerr 4
isr_nerr 5
isr_nerr 6
isr_nerr 7
isr_yerr 8
isr_nerr 9
isr_yerr 10
isr_yerr 11
isr_yerr 12
isr_yerr 13
isr_yerr 14
isr_nerr 15
isr_nerr 16
isr_yerr 17
isr_nerr 18
isr_nerr 19
isr_nerr 20
isr_nerr 21
isr_nerr 22
isr_nerr 23
isr_nerr 24
isr_nerr 25
isr_nerr 26
isr_nerr 27
isr_nerr 28
isr_nerr 29
isr_yerr 30
isr_nerr 31
.section .rodata
.global __x86_64_exception_vectors
__x86_64_exception_vectors:
.quad exc_isr_0
.quad exc_isr_1
.quad exc_isr_2
.quad exc_isr_3
.quad exc_isr_4
.quad exc_isr_5
.quad exc_isr_6
.quad exc_isr_7
.quad exc_isr_8
.quad exc_isr_9
.quad exc_isr_10
.quad exc_isr_11
.quad exc_isr_12
.quad exc_isr_13
.quad exc_isr_14
.quad exc_isr_15
.quad exc_isr_16
.quad exc_isr_17
.quad exc_isr_18
.quad exc_isr_19
.quad exc_isr_20
.quad exc_isr_21
.quad exc_isr_22
.quad exc_isr_23
.quad exc_isr_24
.quad exc_isr_25
.quad exc_isr_26
.quad exc_isr_27
.quad exc_isr_28
.quad exc_isr_29
.quad exc_isr_30
.quad exc_isr_31

View File

@ -0,0 +1,73 @@
use core::arch::{asm, global_asm};
use core::mem::size_of_val;
#[derive(Clone, Copy)]
#[repr(packed)]
pub struct Entry {
base_lo: u16,
selector: u16,
__res0: u8,
flags: u8,
base_hi: u16,
base_ex: u32,
__res1: u32,
}
#[repr(packed)]
struct Pointer {
limit: u16,
offset: usize,
}
pub const SIZE: usize = 256;
impl Entry {
pub const PRESENT: u8 = 1 << 7;
pub const INT32: u8 = 0xE;
pub const fn new(base: usize, selector: u16, flags: u8) -> Self {
Self {
base_lo: (base & 0xFFFF) as u16,
base_hi: ((base >> 16) & 0xFFFF) as u16,
base_ex: (base >> 32) as u32,
selector,
flags,
__res0: 0,
__res1: 0,
}
}
const fn empty() -> Self {
Self {
base_lo: 0,
base_hi: 0,
base_ex: 0,
selector: 0,
flags: 0,
__res0: 0,
__res1: 0,
}
}
}
static mut IDT: [Entry; SIZE] = [Entry::empty(); SIZE];
pub unsafe fn init<F: FnOnce(&mut [Entry; SIZE]) -> ()>(f: F) {
extern "C" {
static __x86_64_exception_vectors: [usize; 32];
}
for (i, &entry) in __x86_64_exception_vectors.iter().enumerate() {
IDT[i] = Entry::new(entry, 0x08, Entry::PRESENT | Entry::INT32);
}
f(&mut IDT);
let idtr = Pointer {
limit: size_of_val(&IDT) as u16 - 1,
offset: &IDT as *const _ as usize,
};
asm!("lidt ({})", in(reg) &idtr, options(att_syntax));
}
global_asm!(include_str!("idt.S"), options(att_syntax));

View File

@ -0,0 +1,145 @@
use crate::arch::x86_64::{
idt::{Entry as IdtEntry, SIZE as IDT_SIZE},
PortIo,
};
use crate::dev::{
irq::{IntController, IntSource, IrqContext},
Device,
};
use crate::sync::IrqSafeSpinLock;
use libsys::error::Errno;
use core::arch::global_asm;
const ICW1_INIT: u8 = 0x10;
const ICW1_ICW4: u8 = 0x01;
const ICW4_8086: u8 = 0x01;
pub(super) struct I8259 {
cmd_a: PortIo<u8>,
cmd_b: PortIo<u8>,
data_a: PortIo<u8>,
data_b: PortIo<u8>,
table: IrqSafeSpinLock<[Option<&'static (dyn IntSource + Sync)>; 15]>,
}
#[derive(Clone, Copy, Debug)]
#[repr(transparent)]
pub struct IrqNumber(u32);
impl IrqNumber {
pub const MAX: u32 = 16;
pub const fn new(u: u32) -> Self {
if u > Self::MAX {
panic!();
}
Self(u)
}
}
impl Device for I8259 {
fn name(&self) -> &'static str {
"i8259-compatible IRQ controller"
}
unsafe fn enable(&self) -> Result<(), Errno> {
self.cmd_a.write(ICW1_INIT | ICW1_ICW4);
self.cmd_b.write(ICW1_INIT | ICW1_ICW4);
self.data_a.write(32);
self.data_b.write(32 + 8);
self.data_a.write(4);
self.data_b.write(2);
self.data_a.write(ICW4_8086);
self.data_b.write(ICW4_8086);
self.data_a.write(0xFE);
self.data_b.write(0xFF);
Ok(())
}
}
impl IntController for I8259 {
type IrqNumber = IrqNumber;
fn register_handler(
&self,
irq: Self::IrqNumber,
handler: &'static (dyn IntSource + Sync),
) -> Result<(), Errno> {
if irq.0 == 0 {
return Err(Errno::InvalidArgument);
}
let index = (irq.0 - 1) as usize;
let mut lock = self.table.lock();
if lock[index].is_some() {
return Err(Errno::AlreadyExists);
}
lock[index] = Some(handler);
Ok(())
}
fn enable_irq(&self, irq: Self::IrqNumber) -> Result<(), Errno> {
let port = if irq.0 < 8 {
&self.data_a
} else {
&self.data_b
};
let mask = port.read() & !(1 << (irq.0 & 0x7));
port.write(mask);
Ok(())
}
fn handle_pending_irqs<'irq_context>(&'irq_context self, ic: &IrqContext<'irq_context>) {
let irq_number = ic.token();
assert!(irq_number > 0);
if irq_number > 8 {
self.cmd_b.write(0x20);
}
self.cmd_a.write(0x20);
{
let table = self.table.lock();
match table[irq_number - 1] {
None => panic!("No handler registered for irq{}", irq_number),
Some(handler) => {
drop(table);
handler.handle_irq().expect("irq handler failed")
}
}
}
}
}
impl I8259 {
pub const fn new() -> Self {
unsafe {
Self {
cmd_a: PortIo::new(0x20),
data_a: PortIo::new(0x21),
cmd_b: PortIo::new(0xA0),
data_b: PortIo::new(0xA1),
table: IrqSafeSpinLock::new([None; 15]),
}
}
}
}
pub fn map_isr_entries(entries: &mut [IdtEntry; IDT_SIZE]) {
extern "C" {
static __x86_64_irq_vectors: [usize; 16];
}
for (i, &entry) in unsafe { __x86_64_irq_vectors.iter().enumerate() } {
entries[i + 32] = IdtEntry::new(entry, 0x08, IdtEntry::PRESENT | IdtEntry::INT32);
}
}
global_asm!(include_str!("irq_vectors.S"), options(att_syntax));

View File

@ -0,0 +1,16 @@
use core::arch::asm;
#[inline(always)]
pub unsafe fn rdmsr(a: u32) -> u64 {
let mut eax: u32;
let mut edx: u32;
asm!("rdmsr", in("ecx") a, out("eax") eax, out("edx") edx);
(eax as u64) | ((edx as u64) << 32)
}
#[inline(always)]
pub unsafe fn wrmsr(a: u32, b: u64) {
let eax = b as u32;
let edx = (b >> 32) as u32;
asm!("wrmsr", in("ecx") a, in("eax") eax, in("edx") edx);
}

View File

@ -0,0 +1,29 @@
use core::marker::PhantomData;
use core::arch::asm;
pub struct PortIo<T> {
port: u16,
_pd: PhantomData<T>
}
impl<T> PortIo<T> {
pub const unsafe fn new(port: u16) -> Self {
Self { port, _pd: PhantomData }
}
}
impl PortIo<u8> {
pub fn read(&self) -> u8 {
let mut res: u8;
unsafe {
asm!("inb %dx, %al", in("dx") self.port, out("al") res, options(att_syntax));
}
res
}
pub fn write(&self, value: u8) {
unsafe {
asm!("outb %al, %dx", in("dx") self.port, in("al") value, options(att_syntax));
}
}
}

View File

@ -0,0 +1,115 @@
.macro irq_entry no
__x86_64_irq_\no:
cli
pushq $0
pushq $\no
push %rax
push %rcx
push %rdx
push %rbx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
mov %rsp, %rdi
call __x86_64_irq_handler
jmp .
.endm
.section .text
__x86_64_irq_0:
cli
push %rax
push %rcx
push %rdx
push %rbx
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
mov $0x3F8, %dx
mov $'T', %al
outb %al, %dx
mov $0x20, %al
mov $0x20, %dx
outb %al, %dx
call sched_yield
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rbx
pop %rdx
pop %rcx
pop %rax
iretq
irq_entry 1
irq_entry 2
irq_entry 3
irq_entry 4
irq_entry 5
irq_entry 6
irq_entry 7
irq_entry 8
irq_entry 9
irq_entry 10
irq_entry 11
irq_entry 12
irq_entry 13
irq_entry 14
irq_entry 15
.section .rodata
.global __x86_64_irq_vectors
__x86_64_irq_vectors:
.quad __x86_64_irq_0
.quad __x86_64_irq_1
.quad __x86_64_irq_2
.quad __x86_64_irq_3
.quad __x86_64_irq_4
.quad __x86_64_irq_5
.quad __x86_64_irq_6
.quad __x86_64_irq_7
.quad __x86_64_irq_8
.quad __x86_64_irq_9
.quad __x86_64_irq_10
.quad __x86_64_irq_11
.quad __x86_64_irq_12
.quad __x86_64_irq_13
.quad __x86_64_irq_14
.quad __x86_64_irq_15

View File

@ -0,0 +1,59 @@
use crate::dev::{serial::SerialDevice, display::StaticFramebuffer, irq::IntController};
use core::arch::asm;
mod uart;
use uart::Uart;
mod intc;
use intc::I8259;
mod io;
pub(self) use io::PortIo;
pub mod boot;
pub mod table;
pub mod context;
pub mod intrin;
pub mod reg;
pub(self) mod syscall;
pub(self) mod gdt;
pub(self) mod idt;
pub(self) mod exception;
pub use syscall::SyscallFrame as ForkFrame;
/// Masks IRQs and returns previous IRQ mask state
///
/// # Safety
///
/// Unsafe: disables IRQ handling temporarily
#[inline(always)]
pub unsafe fn irq_mask_save() -> u64 {
let mut res;
asm!("pushf; cli; pop {}", out(reg) res, options(att_syntax));
res
}
/// Restores IRQ mask state
///
/// # Safety
///
/// Unsafe: modifies interrupt behavior. Must only be used in
/// conjunction with [irq_mask_save]
#[inline(always)]
pub unsafe fn irq_restore(state: u64) {
if state & (1 << 9) != 0 {
asm!("sti");
}
}
pub fn intc() -> &'static impl IntController {
&INTC
}
pub fn console() -> &'static impl SerialDevice {
&COM1
}
static COM1: Uart = unsafe { Uart::new(0x3F8) };
static INTC: I8259 = I8259::new();
pub(self) static DISPLAY: StaticFramebuffer = StaticFramebuffer::uninit();

View File

@ -0,0 +1,135 @@
macro_rules! wrap_msr {
($struct_name:ident, $name:ident, $address:expr, $fields:tt) => {
register_bitfields! {
u64,
pub $name $fields
}
pub struct $struct_name;
impl Readable for $struct_name {
type T = u64;
type R = $name::Register;
#[inline(always)]
fn get(&self) -> Self::T {
unsafe {
rdmsr($address)
}
}
}
impl Writeable for $struct_name {
type T = u64;
type R = $name::Register;
#[inline(always)]
fn set(&self, value: Self::T) {
unsafe {
wrmsr($address, value);
}
}
}
pub const $name: $struct_name = $struct_name;
}
}
use tock_registers::{
interfaces::{Readable, Writeable},
register_bitfields,
};
use core::arch::asm;
use crate::arch::x86_64::intrin::{rdmsr, wrmsr};
// CRn registers
register_bitfields! {
u64,
pub CR4 [
OSFXSR OFFSET(9) NUMBITS(1) [],
OSXMMEXCPT OFFSET(10) NUMBITS(1) []
]
}
register_bitfields! {
u64,
pub CR0 [
EM OFFSET(2) NUMBITS(1) [],
MP OFFSET(1) NUMBITS(1) []
]
}
pub struct Cr4;
pub struct Cr0;
impl Readable for Cr4 {
type T = u64;
type R = CR4::Register;
#[inline(always)]
fn get(&self) -> Self::T {
let mut res: u64;
unsafe {
asm!("mov %cr4, {}", out(reg) res, options(att_syntax))
}
res
}
}
impl Writeable for Cr4 {
type T = u64;
type R = CR4::Register;
#[inline(always)]
fn set(&self, value: Self::T) {
unsafe {
asm!("mov {}, %cr4", in(reg) value, options(att_syntax));
}
}
}
impl Readable for Cr0 {
type T = u64;
type R = CR0::Register;
#[inline(always)]
fn get(&self) -> Self::T {
let mut res: u64;
unsafe {
asm!("mov %cr0, {}", out(reg) res, options(att_syntax))
}
res
}
}
impl Writeable for Cr0 {
type T = u64;
type R = CR0::Register;
#[inline(always)]
fn set(&self, value: Self::T) {
unsafe {
asm!("mov {}, %cr0", in(reg) value, options(att_syntax));
}
}
}
pub const CR4: Cr4 = Cr4;
pub const CR0: Cr0 = Cr0;
wrap_msr!(MsrIa32Efer, MSR_IA32_EFER, 0xC0000080, [
SCE OFFSET(0) NUMBITS(1) [],
LME OFFSET(8) NUMBITS(1) [],
LMA OFFSET(10) NUMBITS(1) [],
NXE OFFSET(11) NUMBITS(1) []
]);
wrap_msr!(MsrIa32Lstar, MSR_IA32_LSTAR, 0xC0000082, [
VALUE OFFSET(0) NUMBITS(64) []
]);
wrap_msr!(MsrIa32Star, MSR_IA32_STAR, 0xC0000081, [
SYSCALL_CS_SS OFFSET(32) NUMBITS(8) [],
SYSRET_CS_SS OFFSET(48) NUMBITS(8) []
]);
wrap_msr!(MsrIa32Sfmask, MSR_IA32_SFMASK, 0xC0000084, [
IF OFFSET(9) NUMBITS(1) []
]);

View File

@ -0,0 +1,13 @@
use tock_registers::{
interfaces::{Readable, Writeable},
register_bitfields,
};
use crate::arch::x86_64::intrin::{rdmsr, wrmsr};
register_bitfields! {
u64,
pub MSR_IA32_EFER [
]
}
wrap_msr!(MSR_IA32_EFER, 0xC0000080);

View File

@ -0,0 +1,60 @@
.section .text
.global __x86_64_syscall_entry
__x86_64_syscall_entry:
// Syscalls only happen from user space, so
// relying on TSS.RSP0 is safe here I guess
mov %rsp, scratch(%rip)
mov (4 + TSS)(%rip), %rsp
// Now on kernel stack
// Push the whole state
push %rcx // saved %rip
push %r11 // saved %rflags
mov scratch(%rip), %r11
push %r11 // saved %rsp
push %r12
push %r13
push %r14
push %r15
push %rbp
push %rbx
push %rax
push %r9
push %r8
push %r10
push %rdx
push %rsi
push %rdi
mov %rsp, %rdi
call __x86_64_syscall
pop %rdi
pop %rsi
pop %rdx
pop %r10
pop %r8
pop %r9
pop %rax
pop %rbx
pop %rbp
pop %r15
pop %r14
pop %r13
pop %r12
pop %rdi
pop %r11
pop %rcx
mov %rdi, %rsp
sysretq
.section .bss
scratch:
.skip 8

View File

@ -0,0 +1,53 @@
use crate::arch::x86_64::reg::{MSR_IA32_EFER, MSR_IA32_LSTAR, MSR_IA32_SFMASK, MSR_IA32_STAR};
use core::arch::global_asm;
use tock_registers::interfaces::{ReadWriteable, Writeable};
use libsys::abi::SystemCall;
use crate::syscall;
#[derive(Clone, Debug)]
pub struct SyscallFrame {
x: [usize; 13],
saved_rsp: usize,
saved_rflags: usize,
saved_rip: usize,
}
pub(super) fn init() {
extern "C" {
fn __x86_64_syscall_entry();
}
MSR_IA32_SFMASK.write(MSR_IA32_SFMASK::IF::SET);
MSR_IA32_LSTAR.set(__x86_64_syscall_entry as u64);
MSR_IA32_STAR
.write(MSR_IA32_STAR::SYSRET_CS_SS.val(0x1B - 8) + MSR_IA32_STAR::SYSCALL_CS_SS.val(0x08));
MSR_IA32_EFER.modify(MSR_IA32_EFER::SCE::SET);
}
#[no_mangle]
extern "C" fn __x86_64_syscall(frame: &mut SyscallFrame) {
let num = SystemCall::from_repr(frame.x[6]);
if num.is_none() {
todo!();
}
let num = num.unwrap();
if num == SystemCall::Fork {
match unsafe { syscall::sys_fork(frame) } {
Ok(pid) => frame.x[6] = u32::from(pid) as usize,
Err(err) => {
frame.x[6] = err.to_negative_isize() as usize;
}
}
return;
}
match syscall::syscall(num, &frame.x[..6]) {
Ok(val) => frame.x[6] = val,
Err(err) => {
frame.x[6] = err.to_negative_isize() as usize;
}
}
}
global_asm!(include_str!("syscall.S"), options(att_syntax));

View File

@ -0,0 +1,286 @@
use crate::mem::{
self,
virt::{AddressSpace, table::{MapAttributes, Entry as AbstractEntry}},
phys::{self, PageUsage}
};
use core::ops::{Index, IndexMut};
use libsys::error::Errno;
use core::arch::asm;
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct Entry(u64);
#[derive(Clone, Copy)]
#[repr(C, align(0x1000))]
pub struct Table {
entries: [Entry; 512]
}
#[repr(C, align(0x1000))]
pub struct FixedTableGroup {
pml4: Table,
pdpt: Table,
pd: [Table; 16]
}
#[repr(transparent)]
pub struct Space(Table);
bitflags! {
/// Attributes attached to each translation [Entry]
pub struct RawAttributes: u64 {
const PRESENT = 1 << 0;
const WRITE = 1 << 1;
const USER = 1 << 2;
const BLOCK = 1 << 7;
const GLOBAL = 1 << 8;
}
}
// Upper mappings
#[no_mangle]
static mut KERNEL_FIXED: FixedTableGroup = FixedTableGroup {
pml4: Table::empty(),
pdpt: Table::empty(),
pd: [Table::empty(); 16]
};
impl TryFrom<MapAttributes> for RawAttributes {
type Error = Errno;
fn try_from(i: MapAttributes) -> Result<Self, Errno> {
let mut res = RawAttributes::empty();
if i.contains(MapAttributes::USER_READ) {
res |= RawAttributes::USER;
}
if i.contains(MapAttributes::USER_WRITE) || i.contains(MapAttributes::KERNEL_WRITE) {
res |= RawAttributes::WRITE;
}
Ok(res)
}
}
impl AbstractEntry for Entry {
fn from_parts(phys: usize, attrs: MapAttributes) -> Self {
let attrs = RawAttributes::try_from(attrs).unwrap();
Self((phys as u64) | attrs.bits() | 1)
}
fn is_present(self) -> bool {
self.0 & (1 << 0) != 0
}
fn is_table(self) -> bool {
self.0 & (1 << 7) == 0
}
fn target(self) -> usize {
(self.0 & !0xFFF) as usize
}
}
impl Entry {
const fn invalid() -> Self {
Self(0)
}
}
impl Table {
const fn empty() -> Self {
Self {
entries: [Entry::invalid(); 512]
}
}
/// Returns next-level translation table reference for `index`, if one is present.
/// If `index` represents a `Block`-type mapping, will return an error.
/// If `index` does not map to any translation table, will try to allocate, init and
/// map a new one, returning it after doing so.
pub fn next_level_table_or_alloc(&mut self, index: usize) -> Result<&'static mut Table, Errno> {
let entry = self[index];
if entry.is_present() {
if !entry.is_table() {
return Err(Errno::InvalidArgument);
}
Ok(unsafe { &mut *(mem::virtualize(entry.target()) as *mut _) })
} else {
let phys = phys::alloc_page(PageUsage::Paging)?;
let res = unsafe { &mut *(mem::virtualize(phys) as *mut Self) };
self[index] = Entry::from_parts(phys, MapAttributes::USER_WRITE | MapAttributes::USER_READ | MapAttributes::NOT_GLOBAL);
res.entries.fill(Entry::invalid());
Ok(res)
}
}
/// Returns next-level translation table reference for `index`, if one is present.
/// Same as [next_level_table_or_alloc], but returns `None` if no table is mapped.
pub fn next_level_table(&mut self, index: usize) -> Option<&'static mut Table> {
let entry = self[index];
if entry.is_present() {
if !entry.is_table() {
panic!("Entry is not a table: idx={}", index);
}
Some(unsafe { &mut *(mem::virtualize(entry.target()) as *mut _) })
} else {
None
}
}
}
impl Space {
const fn empty() -> Self {
Self(Table::empty())
}
}
impl AddressSpace for Space {
type Entry = Entry;
fn alloc_empty() -> Result<&'static mut Self, Errno> {
let pdpt_phys = unsafe {
&KERNEL_FIXED.pdpt as *const _ as usize - mem::KERNEL_OFFSET
};
let page = phys::alloc_page(PageUsage::Paging)?;
let res = unsafe { &mut *(mem::virtualize(page) as *mut Self) };
res.0.entries[..511].fill(Entry::invalid());
res.0.entries[511] = Entry::from_parts(pdpt_phys, MapAttributes::SHARE_OUTER | MapAttributes::KERNEL_EXEC | MapAttributes::KERNEL_WRITE | MapAttributes::NOT_GLOBAL);
Ok(res)
}
fn release(space: &mut Self) {
todo!()
}
fn address_phys(&mut self) -> usize {
todo!();
}
fn read_last_level_entry(&mut self, virt: usize) -> Result<Entry, Errno> {
let l0i = virt >> 39;
let l1i = (virt >> 30) & 0x1FF;
let l2i = (virt >> 21) & 0x1FF;
let l3i = (virt >> 12) & 0x1FF;
let l0_table = self.0.next_level_table(l0i).ok_or(Errno::DoesNotExist)?;
let l1_table = l0_table.next_level_table(l1i).ok_or(Errno::DoesNotExist)?;
let l2_table = l1_table.next_level_table(l2i).ok_or(Errno::DoesNotExist)?;
let entry = l2_table[l3i];
if entry.is_present() {
Ok(entry)
} else {
Err(Errno::DoesNotExist)
}
}
fn write_last_level_entry(
&mut self,
virt: usize,
entry: Entry,
map_intermediate: bool,
) -> Result<(), Errno> {
let l0i = virt >> 39;
let l1i = (virt >> 30) & 0x1FF;
let l2i = (virt >> 21) & 0x1FF;
let l3i = (virt >> 12) & 0x1FF;
let l0_table = self.0.next_level_table_or_alloc(l0i)?;
let l1_table = l0_table.next_level_table_or_alloc(l1i)?;
let l2_table = l1_table.next_level_table_or_alloc(l2i)?;
if l2_table[l3i].is_present() {
warnln!("Entry already exists for address: virt={:#x}, prev={:#x}, new={:#x}", virt, l2_table[l3i].target(), entry.target());
Err(Errno::AlreadyExists)
} else {
l2_table[l3i] = entry;
unsafe {
core::arch::asm!("invlpg ({})", in(reg) virt, options(att_syntax));
}
#[cfg(feature = "verbose")]
debugln!("{:#p} Map {:#x} -> {:#x}", self, virt, entry.target());
Ok(())
}
}
/// Performs a copy of the address space, cloning data owned by it
fn fork(&mut self) -> Result<&'static mut Self, Errno> {
let res = Self::alloc_empty()?;
let pdpt0 = self.0.next_level_table(0).unwrap();
for pdpti in 0..512 {
if let Some(pd) = pdpt0.next_level_table(pdpti) {
for pdi in 0..512 {
if let Some(pt) = pd.next_level_table(pdi) {
for pti in 0..512 {
let entry = pt[pti];
if !entry.is_present() {
continue;
}
assert!(entry.is_table());
todo!();
// let src_phys = unsafe { entry.address_unchecked() };
// let virt_addr = (l0i << 30) | (l1i << 21) | (l2i << 12);
// let dst_phys = unsafe { phys::fork_page(src_phys)? };
// let mut flags = unsafe { entry.fork_flags() };
// if dst_phys != src_phys {
// todo!();
// // res.map(virt_addr, dst_phys, flags)?;
// } else {
// let writable = flags & MapAttributes::AP_BOTH_READONLY
// == MapAttributes::AP_BOTH_READWRITE;
// if writable {
// flags |=
// MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW;
// l2_table[l2i].set_cow();
// unsafe {
// asm!("tlbi vaae1, {}", in(reg) virt_addr);
// }
// }
// res.map(virt_addr, dst_phys, flags)?;
// }
}
}
}
}
}
Ok(res)
}
}
impl Index<usize> for Table {
type Output = Entry;
fn index(&self, index: usize) -> &Self::Output {
&self.entries[index]
}
}
impl IndexMut<usize> for Table {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.entries[index]
}
}
pub fn enable() -> Result<(), Errno> {
unsafe {
// Remove the lower mapping
KERNEL_FIXED.pml4.entries[0] = Entry::invalid();
// Flush the TLB by reloading cr3
asm!("mov %cr3, %rax; mov %rax, %cr3", options(att_syntax));
}
Ok(())
}

View File

@ -0,0 +1,50 @@
use crate::arch::x86_64::PortIo;
use libsys::error::Errno;
use crate::dev::{
tty::{CharRing, TtyDevice},
irq::{IntController, IntSource},
serial::SerialDevice,
Device,
};
#[derive(TtyCharDevice)]
pub(super) struct Uart {
dr: PortIo<u8>,
ring: CharRing<16>
}
impl Device for Uart {
fn name(&self) -> &'static str {
"x86 COM-port"
}
unsafe fn enable(&self) -> Result<(), Errno> {
Ok(())
}
}
impl TtyDevice<16> for Uart {
fn ring(&self) -> &CharRing<16> {
&self.ring
}
}
impl SerialDevice for Uart {
fn send(&self, byte: u8) -> Result<(), Errno> {
self.dr.write(byte);
Ok(())
}
fn recv(&self, _blocking: bool) -> Result<u8, Errno> {
todo!()
}
}
impl Uart {
pub const unsafe fn new(base: u16) -> Self {
Self {
dr: PortIo::new(base),
ring: CharRing::new()
}
}
}

View File

@ -11,12 +11,68 @@
//! * [warnln!]
//! * [errorln!]
use crate::dev::serial::SerialDevice;
use libsys::{debug::TraceLevel, error::Errno};
use crate::dev::{
display::{Display, FramebufferInfo},
serial::SerialDevice,
};
use crate::font;
use crate::sync::IrqSafeSpinLock;
use core::convert::TryFrom;
use core::fmt;
use libsys::{debug::TraceLevel, error::Errno, mem::memcpy};
pub static LEVEL: Level = Level::Debug;
static COLOR_MAP: [u32; 16] = [
0x000000, 0x0000AA, 0x00AA00, 0x00AAAA, 0xAA0000, 0xAA00AA, 0xAA5500, 0xAAAAAA, 0x555555,
0x5555FF, 0x55FF55, 0x55FFFF, 0xFF5555, 0xFF55FF, 0xFFFF55, 0xFFFFFF,
];
static ATTR_MAP: [usize; 10] = [0, 4, 2, 6, 1, 5, 3, 7, 7, 7];
static DISPLAY: IrqSafeSpinLock<FramebufferOutput> = IrqSafeSpinLock::new(FramebufferOutput {
display: None,
col: 0,
row: 0,
fg: 0xBBBBBB,
bg: 0x000000,
esc: EscapeState::None,
esc_argv: [0; 8],
esc_argc: 0,
});
enum EscapeState {
None,
Esc,
Data,
}
struct FramebufferOutput {
display: Option<&'static dyn Display>,
row: usize,
col: usize,
fg: u32,
bg: u32,
esc: EscapeState,
esc_argv: [usize; 8],
esc_argc: usize,
}
impl fmt::Write for FramebufferOutput {
fn write_str(&mut self, s: &str) -> fmt::Result {
if self.display.is_none() {
return Ok(());
}
let fb = self.display.unwrap().framebuffer().unwrap();
for ch in s.chars() {
self.putc(&fb, ch);
}
Ok(())
}
}
pub fn set_display(disp: &'static dyn Display) {
DISPLAY.lock().display = Some(disp);
}
/// Kernel logging levels
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
@ -42,7 +98,7 @@ impl TryFrom<u32> for Level {
2 => Ok(Level::Info),
3 => Ok(Level::Warn),
4 => Ok(Level::Error),
_ => Err(Errno::InvalidArgument)
_ => Err(Errno::InvalidArgument),
}
}
}
@ -137,6 +193,10 @@ pub fn _debug(level: Level, args: fmt::Arguments) {
use crate::arch::machine;
use fmt::Write;
if level > Level::Debug {
DISPLAY.lock().write_fmt(args).ok();
}
if level >= LEVEL {
SerialOutput {
inner: machine::console(),
@ -145,3 +205,101 @@ pub fn _debug(level: Level, args: fmt::Arguments) {
.ok();
}
}
impl FramebufferOutput {
const CW: usize = 8;
const CH: usize = 12;
pub fn set_char(&mut self, fb: &FramebufferInfo, x: usize, y: usize, ch: char) {
if (x + 1) * Self::CW >= fb.width || (y + 1) * Self::CH >= fb.height {
return;
}
font::get().draw(fb, x * Self::CW, y * Self::CH, ch, self.fg, self.bg);
}
pub fn scroll(&mut self, fb: &FramebufferInfo) {
let stride = 4 * Self::CH * fb.width;
let h = fb.height / Self::CH - 1;
if self.row == h {
for y in 0..(h - 1) {
unsafe {
memcpy(
(fb.virt_base + stride * y) as *mut u8,
(fb.virt_base + (y + 1) * stride) as *const u8,
stride,
);
}
}
self.row = h - 1;
}
}
pub fn putc(&mut self, fb: &FramebufferInfo, ch: char) {
match self.esc {
EscapeState::None => {
match ch {
'\x1B' => {
self.esc = EscapeState::Esc;
self.esc_argv.fill(0);
self.esc_argc = 0;
}
' '..='\x7E' => {
self.set_char(fb, self.col, self.row, ch);
// Advance the cursor
self.col += 1;
if (self.col + 1) * Self::CW >= fb.width {
self.col = 0;
self.row += 1;
}
}
'\n' => {
self.col = 0;
self.row += 1;
}
_ => {}
}
}
EscapeState::Esc => match ch {
'[' => {
self.esc = EscapeState::Data;
}
_ => {
self.esc = EscapeState::None;
}
},
EscapeState::Data => {
match ch {
'0'..='9' => {
self.esc_argv[self.esc_argc] *= 10;
self.esc_argv[self.esc_argc] += (ch as u8 - b'0') as usize;
}
';' => {
self.esc_argc += 1;
}
_ => {
self.esc_argc += 1;
self.esc = EscapeState::None;
}
}
match ch {
'm' => {
for i in 0..self.esc_argc {
let item = self.esc_argv[i];
if item / 10 == 4 {
self.bg = COLOR_MAP[ATTR_MAP[(item % 10) as usize]];
}
if item / 10 == 3 {
self.fg = COLOR_MAP[ATTR_MAP[(item % 10) as usize]];
}
}
}
_ => {}
}
}
};
self.scroll(fb);
}
}

58
kernel/src/dev/display.rs Normal file
View File

@ -0,0 +1,58 @@
use crate::dev::Device;
use libsys::error::Errno;
use crate::util::InitOnce;
pub struct FramebufferInfo {
pub width: usize,
pub height: usize,
pub phys_base: usize,
pub virt_base: usize
}
pub trait Display: Device {
fn set_mode(&self, mode: DisplayMode) -> Result<(), Errno>;
fn framebuffer<'a>(&'a self) -> Result<&'a FramebufferInfo, Errno>;
}
pub struct DisplayMode {
width: u16,
height: u16,
}
pub struct StaticFramebuffer {
framebuffer: InitOnce<FramebufferInfo>
}
impl Device for StaticFramebuffer {
fn name(&self) -> &'static str {
"Generic framebuffer device"
}
unsafe fn enable(&self) -> Result<(), Errno> {
Ok(())
}
}
impl Display for StaticFramebuffer {
fn set_mode(&self, mode: DisplayMode) -> Result<(), Errno> {
Err(Errno::InvalidOperation)
}
fn framebuffer(&self) -> Result<&FramebufferInfo, Errno> {
if let Some(fb) = self.framebuffer.as_ref_option() {
Ok(fb)
} else {
Err(Errno::InvalidOperation)
}
}
}
impl StaticFramebuffer {
pub const fn uninit() -> Self {
Self { framebuffer: InitOnce::new() }
}
pub fn set_framebuffer(&self, framebuffer: FramebufferInfo) {
self.framebuffer.init(framebuffer);
}
}

View File

@ -5,6 +5,7 @@ use libsys::error::Errno;
/// Token to indicate the local core is running in IRQ context
pub struct IrqContext<'irq_context> {
token: usize,
_0: PhantomData<&'irq_context ()>,
}
@ -45,7 +46,11 @@ impl<'q> IrqContext<'q> {
///
/// Only allowed to be constructed in top-level IRQ handlers
#[inline(always)]
pub unsafe fn new() -> Self {
Self { _0: PhantomData }
pub unsafe fn new(token: usize) -> Self {
Self { token, _0: PhantomData }
}
pub const fn token(&self) -> usize {
self.token
}
}

View File

@ -3,12 +3,13 @@
use libsys::error::Errno;
// Device classes
pub mod fdt;
pub mod gpio;
// pub mod fdt;
// pub mod gpio;
pub mod irq;
pub mod pci;
pub mod rtc;
pub mod sd;
pub mod display;
// pub mod pci;
// pub mod rtc;
// pub mod sd;
pub mod serial;
pub mod timer;
pub mod pseudo;

View File

@ -1,8 +1,5 @@
use crate::arch::machine::{self, IrqNumber};
use crate::dev::{
irq::{IntController, IntSource},
serial::SerialDevice,
tty::{CharRing, TtyDevice},
Device,
};
use crate::mem::virt::DeviceMemoryIo;

View File

@ -126,7 +126,8 @@ pub trait TtyDevice<const N: usize>: SerialDevice {
// TODO send to pgid
let proc = Process::get(pgid);
if let Some(proc) = proc {
proc.set_signal(Signal::Interrupt);
// TODO
// proc.set_signal(Signal::Interrupt);
}
}
return;

49
kernel/src/font.rs Normal file
View File

@ -0,0 +1,49 @@
use crate::util::InitOnce;
use libsys::mem::read_le32;
use crate::dev::display::FramebufferInfo;
static FONT_DATA: &[u8] = include_bytes!("../../etc/default8x16.psfu");
static FONT: InitOnce<Font> = InitOnce::new();
pub struct Font {
char_width: usize,
char_height: usize,
bytes_per_glyph: usize,
data: &'static [u8],
}
impl Font {
pub fn draw(&self, fb: &FramebufferInfo, bx: usize, by: usize, ch: char, fg: u32, bg: u32) {
if ch >= ' ' && ch < '\x7B' {
let char_data = &self.data[ch as usize * self.bytes_per_glyph..];
for iy in 0..self.char_height {
for ix in 0..self.char_width {
let cx = self.char_width - ix - 1;
let ptr = fb.virt_base + (ix + bx + (iy + by) * fb.width) * 4;
let value = if char_data[iy + (cx) / 8] & (1 << (cx & 0x7)) != 0 {
fg
} else {
bg
};
unsafe { core::ptr::write_volatile(ptr as *mut u32, value) }
}
}
}
}
}
pub fn init() {
assert_eq!(read_le32(&FONT_DATA[..]), 0x864ab572);
FONT.init(Font {
char_width: read_le32(&FONT_DATA[28..]) as usize,
char_height: read_le32(&FONT_DATA[24..]) as usize,
bytes_per_glyph: read_le32(&FONT_DATA[20..]) as usize,
data: &FONT_DATA[32..]
});
}
pub fn get() -> &'static Font {
FONT.get()
}

View File

@ -162,8 +162,8 @@ pub fn init() {
use crate::dev::timer::TimestampSource;
let mut writer = BufferWriter::new(buf);
let time = machine::local_timer().timestamp()?;
write!(&mut writer, "{} {}\n", time.as_secs(), time.subsec_nanos()).map_err(|_| Errno::InvalidArgument)?;
// let time = machine::local_timer().timestamp()?;
// write!(&mut writer, "{} {}\n", time.as_secs(), time.subsec_nanos()).map_err(|_| Errno::InvalidArgument)?;
Ok(writer.count())
});
}

View File

@ -13,6 +13,7 @@
alloc_error_handler,
linked_list_cursors,
const_btree_new,
core_intrinsics,
const_generics_defaults,
)]
#![no_std]
@ -29,11 +30,12 @@ extern crate alloc;
#[macro_use]
pub mod debug;
//
pub mod arch;
pub mod config;
pub mod dev;
pub mod fs;
pub mod font;
pub mod init;
pub mod mem;
pub mod proc;
@ -43,9 +45,9 @@ pub mod util;
#[panic_handler]
fn panic_handler(pi: &core::panic::PanicInfo) -> ! {
unsafe {
asm!("msr daifset, #2");
}
// unsafe {
// asm!("msr daifset, #2");
// }
errorln!("Panic: {:?}", pi);
// TODO

View File

@ -59,7 +59,7 @@ static HEAP: InitOnce<IrqSafeSpinLock<Heap>> = InitOnce::new();
pub unsafe fn init(base: usize, size: usize) {
let heap = Heap { base, size, ptr: 0 };
infoln!("Kernel heap: {:#x}..{:#x}", base, base + size);
// infoln!("Kernel heap: {:#x}..{:#x}", base, base + size);
HEAP.init(IrqSafeSpinLock::new(heap));
}

View File

@ -9,7 +9,7 @@ mod manager;
mod reserved;
use manager::{Manager, SimpleManager, MANAGER};
pub use reserved::ReservedRegion;
pub use reserved::{ReservedRegion, reserve};
type ManagerImpl = SimpleManager;
@ -213,7 +213,10 @@ pub unsafe fn init_from_iter<T: Iterator<Item = MemoryRegion> + Clone>(iter: T)
// Step 1. Count available memory
let mut total_pages = 0usize;
for reg in iter.clone() {
total_pages += (reg.end - reg.start) / PAGE_SIZE;
let upper = (reg.end - mem_base) / PAGE_SIZE;
if upper > total_pages {
total_pages = upper;
}
}
// TODO maybe instead of size_of::<...> use Layout?
let need_pages = ((total_pages * size_of::<PageInfo>()) + 0xFFF) / 0x1000;

View File

@ -2,18 +2,18 @@
use core::marker::PhantomData;
use core::ops::Deref;
use cortex_a::asm::barrier::{self, dsb, isb};
use cortex_a::registers::TTBR0_EL1;
// use cortex_a::asm::barrier::{self, dsb, isb};
// use cortex_a::registers::TTBR0_EL1;
use libsys::error::Errno;
use tock_registers::interfaces::Writeable;
// use tock_registers::interfaces::Writeable;
//
pub mod table;
pub use table::{Entry, MapAttributes, Space, Table};
pub mod fixed;
pub use fixed::FixedTableGroup;
pub use table::{AddressSpace, Space};
#[no_mangle]
static mut KERNEL_TTBR1: FixedTableGroup = FixedTableGroup::empty();
// pub use table::{Entry, MapAttributes, Space, Table};
// pub mod fixed;
// pub use fixed::FixedTableGroup;
use crate::arch::platform::table as plat_table;
/// Structure representing a region of memory used for MMIO/device access
// TODO: this shouldn't be trivially-cloneable and should instead incorporate
@ -45,15 +45,16 @@ impl DeviceMemory {
///
/// See [FixedTableGroup::map_region]
pub fn map(name: &'static str, phys: usize, count: usize) -> Result<Self, Errno> {
let base = unsafe { KERNEL_TTBR1.map_region(phys, count) }?;
debugln!(
"Mapping {:#x}..{:#x} -> {:#x} for {:?}",
base,
base + count * 0x1000,
phys,
name
);
Ok(Self { name, base, count })
todo!();
// let base = unsafe { KERNEL_TTBR1.map_region(phys, count) }?;
// debugln!(
// "Mapping {:#x}..{:#x} -> {:#x} for {:?}",
// base,
// base + count * 0x1000,
// phys,
// name
// );
// Ok(Self { name, base, count })
}
}
@ -90,16 +91,18 @@ impl<T> Deref for DeviceMemoryIo<T> {
/// Sets up device mapping tables and disable lower-half
/// identity-mapped translation
pub fn enable() -> Result<(), Errno> {
unsafe {
KERNEL_TTBR1.init_device_map();
dsb(barrier::ISH);
isb(barrier::SY);
}
// Disable lower-half translation
TTBR0_EL1.set(0);
//TCR_EL1.modify(TCR_EL1::EPD0::SET);
Ok(())
plat_table::enable()
}
// unsafe {
// KERNEL_TTBR1.init_device_map();
//
// dsb(barrier::ISH);
// isb(barrier::SY);
// }
//
// // Disable lower-half translation
// TTBR0_EL1.set(0);
// //TCR_EL1.modify(TCR_EL1::EPD0::SET);
//
// Ok(())
// }

View File

@ -7,269 +7,62 @@ use crate::mem::{
use core::ops::{Index, IndexMut};
use libsys::{error::Errno, mem::memset};
/// Transparent wrapper structure representing a single
/// translation table entry
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct Entry(u64);
use crate::arch::platform::table;
/// Structure describing a single level of translation mappings
#[repr(C, align(0x1000))]
pub struct Table {
entries: [Entry; 512],
}
/// Wrapper for top-most level of address translation tables
#[repr(transparent)]
pub struct Space(Table);
pub use table::{Space, Table};
bitflags! {
/// Attributes attached to each translation [Entry]
pub struct MapAttributes: u64 {
// TODO use 2 lower bits to determine mapping size?
/// nG bit -- determines whether a TLB entry associated with this mapping
/// applies only to current ASID or all ASIDs.
const NOT_GLOBAL = 1 << 11;
/// AF bit -- must be set by software, otherwise Access Error exception is
/// generated when the page is accessed
const ACCESS = 1 << 10;
/// The memory region is outer-shareable
const SH_OUTER = 2 << 8;
/// This page is used for device-MMIO mapping and uses MAIR attribute #1
const DEVICE = 1 << 2;
const USER_READ = 1 << 1;
const USER_WRITE = 1 << 2;
const USER_EXEC = 1 << 3;
const KERNEL_WRITE = 1 << 4;
const KERNEL_EXEC = 1 << 5;
/// Pages marked with this bit are Copy-on-Write
const EX_COW = 1 << 55;
const SHARE_OUTER = 1 << 6;
const SHARE_INNER = 2 << 6;
/// UXN bit -- if set, page may not be used for instruction fetching from EL0
const UXN = 1 << 54;
/// PXN bit -- if set, page may not be used for instruction fetching from EL1
const PXN = 1 << 53;
// AP field
// Default behavior is: read-write for EL1, no access for EL0
/// If set, the page referred to by this entry is read-only for both EL0/EL1
const AP_BOTH_READONLY = 3 << 6;
/// If set, the page referred to by this entry is read-write for both EL0/EL1
const AP_BOTH_READWRITE = 1 << 6;
const NOT_GLOBAL = 1 << 8;
}
}
impl Table {
/// Returns next-level translation table reference for `index`, if one is present.
/// If `index` represents a `Block`-type mapping, will return an error.
/// If `index` does not map to any translation table, will try to allocate, init and
/// map a new one, returning it after doing so.
pub fn next_level_table_or_alloc(&mut self, index: usize) -> Result<&'static mut Table, Errno> {
let entry = self[index];
if entry.is_present() {
if !entry.is_table() {
return Err(Errno::InvalidArgument);
}
Ok(unsafe { &mut *(mem::virtualize(entry.address_unchecked()) as *mut _) })
} else {
let phys = phys::alloc_page(PageUsage::Paging)?;
let res = unsafe { &mut *(mem::virtualize(phys) as *mut Self) };
self[index] = Entry::table(phys, MapAttributes::empty());
res.entries.fill(Entry::invalid());
Ok(res)
}
}
/// Returns next-level translation table reference for `index`, if one is present.
/// Same as [next_level_table_or_alloc], but returns `None` if no table is mapped.
pub fn next_level_table(&mut self, index: usize) -> Option<&'static mut Table> {
let entry = self[index];
if entry.is_present() {
if !entry.is_table() {
panic!("Entry is not a table: idx={}", index);
}
Some(unsafe { &mut *(mem::virtualize(entry.address_unchecked()) as *mut _) })
} else {
None
}
}
/// Constructs and fills a [Table] with non-present mappings
pub const fn empty() -> Table {
Table {
entries: [Entry::invalid(); 512],
}
}
pub trait Entry: Clone + Copy {
fn from_parts(phys: usize, attrs: MapAttributes) -> Self;
fn target(self) -> usize;
fn is_present(self) -> bool;
fn is_table(self) -> bool;
}
impl Index<usize> for Table {
type Output = Entry;
pub trait AddressSpace {
type Entry: Entry;
fn index(&self, index: usize) -> &Self::Output {
&self.entries[index]
}
}
fn alloc_empty() -> Result<&'static mut Self, Errno>;
fn fork(&mut self) -> Result<&'static mut Self, Errno>;
fn release(space: &mut Self);
fn address_phys(&mut self) -> usize;
fn read_last_level_entry(&mut self, virt: usize) -> Result<Self::Entry, Errno>;
fn write_last_level_entry(
&mut self,
virt: usize,
entry: Self::Entry,
map_intermediate: bool,
) -> Result<(), Errno>;
impl IndexMut<usize> for Table {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.entries[index]
}
}
impl Entry {
const PRESENT: u64 = 1 << 0;
const TABLE: u64 = 1 << 1;
const PHYS_MASK: u64 = 0x0000FFFFFFFFF000;
/// Constructs a single non-present mapping
pub const fn invalid() -> Self {
Self(0)
#[inline(always)]
fn map(&mut self, virt: usize, phys: usize, attrs: MapAttributes) -> Result<(), Errno> {
let entry = Entry::from_parts(phys, attrs);
self.write_last_level_entry(virt, entry, true).map(|_| ())
}
/// Constructs a `Block`-type memory mapping
pub const fn block(phys: usize, attrs: MapAttributes) -> Self {
Self((phys as u64 & Self::PHYS_MASK) | attrs.bits() | Self::PRESENT)
}
/// Constructs a `Table` or `Page`-type mapping depending on translation level
/// this entry is used at
pub const fn table(phys: usize, attrs: MapAttributes) -> Self {
Self((phys as u64 & Self::PHYS_MASK) | attrs.bits() | Self::PRESENT | Self::TABLE)
}
/// Returns `true` if this entry is not invalid
pub const fn is_present(self) -> bool {
self.0 & Self::PRESENT != 0
}
/// Returns `true` if this entry is a `Table` or `Page`-type mapping
pub const fn is_table(self) -> bool {
self.0 & Self::TABLE != 0
}
/// Returns the target address of this translation entry.
///
/// # Safety
///
/// Does not check if the entry is actually valid.
pub const unsafe fn address_unchecked(self) -> usize {
(self.0 & Self::PHYS_MASK) as usize
}
unsafe fn set_address(&mut self, address: usize) {
self.0 &= !Self::PHYS_MASK;
self.0 |= (address as u64) & Self::PHYS_MASK;
}
unsafe fn fork_flags(self) -> MapAttributes {
MapAttributes::from_bits_unchecked(self.0 & !Self::PHYS_MASK)
}
fn set_cow(&mut self) {
self.0 |= (MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW).bits();
}
fn clear_cow(&mut self) {
self.0 &= !(MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW).bits();
self.0 |= MapAttributes::AP_BOTH_READWRITE.bits();
}
#[inline]
fn is_cow(self) -> bool {
let attrs = (MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW).bits();
self.0 & attrs == attrs
}
}
impl Space {
/// Creates a new virtual address space and fills it with [Entry::invalid()]
/// mappings. Does physical memory page allocation.
pub fn alloc_empty() -> Result<&'static mut Self, Errno> {
let phys = phys::alloc_page(PageUsage::Paging)?;
let res = unsafe { &mut *(mem::virtualize(phys) as *mut Self) };
res.0.entries.fill(Entry::invalid());
Ok(res)
}
/// Inserts a single `virt` -> `phys` translation entry to this address space.
///
/// TODO: only works with 4K-sized pages at this moment.
pub fn map(&mut self, virt: usize, phys: usize, flags: MapAttributes) -> Result<(), Errno> {
let l0i = virt >> 30;
let l1i = (virt >> 21) & 0x1FF;
let l2i = (virt >> 12) & 0x1FF;
let l1_table = self.0.next_level_table_or_alloc(l0i)?;
let l2_table = l1_table.next_level_table_or_alloc(l1i)?;
if l2_table[l2i].is_present() {
Err(Errno::AlreadyExists)
} else {
l2_table[l2i] = Entry::table(phys, flags | MapAttributes::ACCESS);
#[cfg(feature = "verbose")]
debugln!("{:#p} Map {:#x} -> {:#x}, {:?}", self, virt, phys, flags);
Ok(())
}
}
/// Translates a virtual address into a corresponding physical one.
///
/// Only works for 4K pages atm.
// TODO extract attributes
pub fn translate(&mut self, virt: usize) -> Result<usize, Errno> {
let l0i = virt >> 30;
let l1i = (virt >> 21) & 0x1FF;
let l2i = (virt >> 12) & 0x1FF;
let l1_table = self.0.next_level_table(l0i).ok_or(Errno::DoesNotExist)?;
let l2_table = l1_table.next_level_table(l1i).ok_or(Errno::DoesNotExist)?;
let entry = l2_table[l2i];
if entry.is_present() {
Ok(unsafe { entry.address_unchecked() })
} else {
Err(Errno::DoesNotExist)
}
}
/// Attempts to resolve a page fault at `virt` address by copying the
/// underlying Copy-on-Write mapping (if any is present)
pub fn try_cow_copy(&mut self, virt: usize) -> Result<(), Errno> {
let virt = virt & !0xFFF;
let l0i = virt >> 30;
let l1i = (virt >> 21) & 0x1FF;
let l2i = (virt >> 12) & 0x1FF;
let l1_table = self.0.next_level_table(l0i).ok_or(Errno::DoesNotExist)?;
let l2_table = l1_table.next_level_table(l1i).ok_or(Errno::DoesNotExist)?;
let entry = l2_table[l2i];
if !entry.is_present() {
warnln!("Entry is not present: {:#x}", virt);
return Err(Errno::DoesNotExist);
}
let src_phys = unsafe { entry.address_unchecked() };
if !entry.is_cow() {
warnln!(
"Entry is not marked as CoW: {:#x}, points to {:#x}",
virt,
src_phys
);
return Err(Errno::DoesNotExist);
}
let dst_phys = unsafe { phys::copy_cow_page(src_phys)? };
unsafe {
l2_table[l2i].set_address(dst_phys);
}
l2_table[l2i].clear_cow();
Ok(())
#[inline(always)]
fn translate(&mut self, virt: usize) -> Result<usize, Errno> {
self.read_last_level_entry(virt).map(Entry::target)
}
/// Allocates a contiguous region from the address space and maps
/// physical pages to it
pub fn allocate(
fn allocate(
&mut self,
start: usize,
end: usize,
@ -292,136 +85,4 @@ impl Space {
}
Err(Errno::OutOfMemory)
}
/// Removes a single 4K page mapping from the table and
/// releases the underlying physical memory
pub fn unmap_single(&mut self, page: usize) -> Result<(), Errno> {
let l0i = page >> 30;
let l1i = (page >> 21) & 0x1FF;
let l2i = (page >> 12) & 0x1FF;
let l1_table = self.0.next_level_table(l0i).ok_or(Errno::DoesNotExist)?;
let l2_table = l1_table.next_level_table(l1i).ok_or(Errno::DoesNotExist)?;
let entry = l2_table[l2i];
if !entry.is_present() {
return Err(Errno::DoesNotExist);
}
let phys = unsafe { entry.address_unchecked() };
unsafe {
phys::free_page(phys)?;
}
l2_table[l2i] = Entry::invalid();
unsafe {
asm!("tlbi vaae1, {}", in(reg) page);
}
// TODO release paging structure memory
Ok(())
}
/// Releases a range of virtual pages and their corresponding physical pages
pub fn free(&mut self, start: usize, len: usize) -> Result<(), Errno> {
for i in 0..len {
self.unmap_single(start + i * 0x1000)?;
}
Ok(())
}
/// Performs a copy of the address space, cloning data owned by it
pub fn fork(&mut self) -> Result<&'static mut Self, Errno> {
let res = Self::alloc_empty()?;
for l0i in 0..512 {
if let Some(l1_table) = self.0.next_level_table(l0i) {
for l1i in 0..512 {
if let Some(l2_table) = l1_table.next_level_table(l1i) {
for l2i in 0..512 {
let entry = l2_table[l2i];
if !entry.is_present() {
continue;
}
assert!(entry.is_table());
let src_phys = unsafe { entry.address_unchecked() };
let virt_addr = (l0i << 30) | (l1i << 21) | (l2i << 12);
let dst_phys = unsafe { phys::fork_page(src_phys)? };
let mut flags = unsafe { entry.fork_flags() };
if dst_phys != src_phys {
todo!();
// res.map(virt_addr, dst_phys, flags)?;
} else {
let writable = flags & MapAttributes::AP_BOTH_READONLY
== MapAttributes::AP_BOTH_READWRITE;
if writable {
flags |=
MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW;
l2_table[l2i].set_cow();
unsafe {
asm!("tlbi vaae1, {}", in(reg) virt_addr);
}
}
res.map(virt_addr, dst_phys, flags)?;
}
}
}
}
}
}
Ok(res)
}
/// Releases all the mappings from the address space. Frees all
/// memory pages referenced by this space as well as those used for
/// its paging tables.
///
/// # Safety
///
/// Unsafe: may invalidate currently active address space
pub unsafe fn release(space: &mut Self) {
for l0i in 0..512 {
let l0_entry = space.0[l0i];
if !l0_entry.is_present() {
continue;
}
assert!(l0_entry.is_table());
let l1_table = &mut *(mem::virtualize(l0_entry.address_unchecked()) as *mut Table);
for l1i in 0..512 {
let l1_entry = l1_table[l1i];
if !l1_entry.is_present() {
continue;
}
assert!(l1_entry.is_table());
let l2_table = &mut *(mem::virtualize(l1_entry.address_unchecked()) as *mut Table);
for l2i in 0..512 {
let entry = l2_table[l2i];
if !entry.is_present() {
continue;
}
assert!(entry.is_table());
phys::free_page(entry.address_unchecked()).unwrap();
}
phys::free_page(l1_entry.address_unchecked()).unwrap();
}
phys::free_page(l0_entry.address_unchecked()).unwrap();
}
memset(space as *mut Space as *mut u8, 0, 4096);
}
/// Returns the physical address of this structure
pub fn address_phys(&mut self) -> usize {
(self as *mut _ as usize) - mem::KERNEL_OFFSET
}
}

View File

@ -2,7 +2,7 @@
use crate::mem::{
self,
phys::{self, PageUsage},
virt::{MapAttributes, Space},
virt::{Space, table::{AddressSpace, MapAttributes}},
};
use core::mem::{size_of, MaybeUninit};
use libsys::{
@ -66,10 +66,10 @@ struct Phdr<E: Elf> {
}
fn map_flags(elf_flags: usize) -> MapAttributes {
let mut dst_flags = MapAttributes::NOT_GLOBAL | MapAttributes::SH_OUTER;
let mut dst_flags = MapAttributes::SHARE_OUTER | MapAttributes::NOT_GLOBAL;
if elf_flags & (1 << 0) /* PF_X */ == 0 {
dst_flags |= MapAttributes::UXN | MapAttributes::PXN;
if elf_flags & (1 << 0) /* PF_X */ != 0 {
dst_flags |= MapAttributes::USER_EXEC | MapAttributes::KERNEL_EXEC;
}
match (elf_flags & (3 << 1)) >> 1 {
@ -78,9 +78,9 @@ fn map_flags(elf_flags: usize) -> MapAttributes {
// Write-only: not sure if such mapping should exist at all
1 => todo!(),
// Read-only
2 => dst_flags |= MapAttributes::AP_BOTH_READONLY,
2 => dst_flags |= MapAttributes::USER_READ,
// Read+Write
3 => dst_flags |= MapAttributes::AP_BOTH_READWRITE,
3 => dst_flags |= MapAttributes::USER_READ | MapAttributes::USER_WRITE | MapAttributes::KERNEL_WRITE,
_ => unreachable!(),
};

View File

@ -27,6 +27,11 @@ pub fn switch() {
SCHED.switch(false);
}
#[no_mangle]
extern "C" fn sched_yield() {
SCHED.switch(false);
}
pub(self) static PROCESSES: IrqSafeSpinLock<BTreeMap<Pid, ProcessRef>> =
IrqSafeSpinLock::new(BTreeMap::new());

View File

@ -1,14 +1,14 @@
//! Process data and control
use crate::arch::aarch64::exception::ExceptionFrame;
use crate::arch::platform::ForkFrame;
use crate::mem::{
self,
phys::{self, PageUsage},
virt::{MapAttributes, Space},
virt::{table::{MapAttributes, AddressSpace}, Space},
};
use crate::proc::{
wait::Wait, Context, ProcessIo, Thread, ThreadRef, ThreadState, PROCESSES, SCHED, Tid,
};
use crate::sync::{IrqSafeSpinLock};
use crate::sync::{IrqSafeSpinLock, IrqSafeSpinLockGuard};
use alloc::{rc::Rc, vec::Vec};
use core::sync::atomic::{AtomicU32, Ordering};
use libsys::{
@ -18,6 +18,7 @@ use libsys::{
signal::Signal,
ProgramArgs,
};
use core::arch::asm;
/// Wrapper type for a process struct reference
pub type ProcessRef = Rc<Process>;
@ -54,7 +55,7 @@ pub struct Process {
impl Process {
const USTACK_VIRT_TOP: usize = 0x100000000;
const USTACK_PAGES: usize = 4;
const USTACK_PAGES: usize = 8;
/// Returns the process ID
#[inline]
@ -155,191 +156,200 @@ impl Process {
None
}
/// Handles all pending signals (when returning from aborted syscall)
pub fn handle_pending_signals(&self) {
let mut lock = self.inner.lock();
let ttbr0 = lock.space.as_mut().unwrap().address_phys() | ((lock.id.asid() as usize) << 48);
let main_thread = Thread::get(lock.threads[0]).unwrap();
drop(lock);
loop {
let state = self.signal_state.load(Ordering::Acquire);
if let Some(signal) = Self::find1(state).map(|e| Signal::try_from(e as u32).unwrap()) {
self.signal_state.fetch_and(!(1 << (signal as u32)), Ordering::Release);
main_thread.clone().enter_signal(signal, ttbr0);
} else {
break;
}
}
fn space_phys(lock: &mut IrqSafeSpinLockGuard<ProcessInner>) -> usize {
lock.space.as_mut().unwrap().address_phys() | ((lock.id.asid() as usize) << 48)
}
/// Sets a pending signal for a process
pub fn set_signal(&self, signal: Signal) {
let mut lock = self.inner.lock();
let ttbr0 = lock.space.as_mut().unwrap().address_phys() | ((lock.id.asid() as usize) << 48);
let main_thread = Thread::get(lock.threads[0]).unwrap();
drop(lock);
// /// Handles all pending signals (when returning from aborted syscall)
// pub fn handle_pending_signals(&self) {
// let mut lock = self.inner.lock();
// let table = Self::space_phys(&lock);
// let main_thread = Thread::get(lock.threads[0]).unwrap();
// drop(lock);
// TODO check that `signal` is not a fault signal
// it is illegal to call this function with
// fault signals
// loop {
// let state = self.signal_state.load(Ordering::Acquire);
// if let Some(signal) = Self::find1(state).map(|e| Signal::try_from(e as u32).unwrap()) {
// self.signal_state.fetch_and(!(1 << (signal as u32)), Ordering::Release);
// main_thread.clone().enter_signal(signal, table);
// } else {
// break;
// }
// }
// }
match main_thread.state() {
ThreadState::Running => {
main_thread.enter_signal(signal, ttbr0);
}
ThreadState::Waiting => {
self.signal_state.fetch_or(1 << (signal as u32), Ordering::Release);
main_thread.interrupt_wait(true);
}
ThreadState::Ready => {
main_thread.clone().setup_signal(signal, ttbr0);
main_thread.interrupt_wait(false);
}
ThreadState::Finished => {
// TODO report error back
todo!()
}
}
}
// /// Sets a pending signal for a process
// pub fn set_signal(&self, signal: Signal) {
// let mut lock = self.inner.lock();
// let table = Self::space_phys(&lock);
// let main_thread = Thread::get(lock.threads[0]).unwrap();
// drop(lock);
/// Immediately delivers a signal to requested thread
pub fn enter_fault_signal(&self, thread: ThreadRef, signal: Signal) {
let mut lock = self.inner.lock();
let ttbr0 = lock.space.as_mut().unwrap().address_phys() | ((lock.id.asid() as usize) << 48);
drop(lock);
thread.enter_signal(signal, ttbr0);
}
// // TODO check that `signal` is not a fault signal
// // it is illegal to call this function with
// // fault signals
/// Crates a new thread in the process
pub fn new_user_thread(&self, entry: usize, stack: usize, arg: usize) -> Result<Tid, Errno> {
let mut lock = self.inner.lock();
// match main_thread.state() {
// ThreadState::Running => {
// main_thread.enter_signal(signal, table);
// }
// ThreadState::Waiting => {
// self.signal_state.fetch_or(1 << (signal as u32), Ordering::Release);
// main_thread.interrupt_wait(true);
// }
// ThreadState::Ready => {
// main_thread.clone().setup_signal(signal, table);
// main_thread.interrupt_wait(false);
// }
// ThreadState::Finished => {
// // TODO report error back
// todo!()
// }
// }
// }
let space_phys = lock.space.as_mut().unwrap().address_phys();
let ttbr0 = space_phys | ((lock.id.asid() as usize) << 48);
// /// Immediately delivers a signal to requested thread
// pub fn enter_fault_signal(&self, thread: ThreadRef, signal: Signal) {
// let mut lock = self.inner.lock();
// let table = Self::space_phys(&lock);
// drop(lock);
// thread.enter_signal(signal, table);
// }
let thread = Thread::new_user(lock.id, entry, stack, arg, ttbr0)?;
let tid = thread.id();
lock.threads.push(tid);
SCHED.enqueue(tid);
// /// Crates a new thread in the process
// pub fn new_user_thread(&self, entry: usize, stack: usize, arg: usize) -> Result<Tid, Errno> {
// let mut lock = self.inner.lock();
Ok(tid)
}
// let table = Self::space_phys(&lock);
// let thread = Thread::new_user(lock.id, entry, stack, arg, table)?;
// let tid = thread.id();
// lock.threads.push(tid);
// SCHED.enqueue(tid);
// Ok(tid)
// }
/// Creates a "fork" of the process, cloning its address space and
/// resources
pub fn fork(&self, frame: &mut ExceptionFrame) -> Result<Pid, Errno> {
pub fn fork(&self, frame: &mut ForkFrame) -> Result<Pid, Errno> {
todo!();
let src_io = self.io.lock();
let mut src_inner = self.inner.lock();
let dst_id = new_user_pid();
let dst_space = src_inner.space.as_mut().unwrap().fork()?;
let dst_space_phys = (dst_space as *mut _ as usize) - mem::KERNEL_OFFSET;
let dst_ttbr0 = dst_space_phys | ((dst_id.asid() as usize) << 48);
let mut threads = Vec::new();
let tid = Thread::fork(Some(dst_id), frame, dst_ttbr0)?.id();
threads.push(tid);
todo!()
let dst = Rc::new(Self {
exit_wait: Wait::new("process_exit"),
io: IrqSafeSpinLock::new(src_io.fork()?),
signal_state: AtomicU32::new(0),
inner: IrqSafeSpinLock::new(ProcessInner {
threads,
exit: None,
space: Some(dst_space),
state: ProcessState::Active,
id: dst_id,
pgid: src_inner.pgid,
ppid: Some(src_inner.id),
sid: src_inner.sid,
}),
});
// let dst_space_phys = (dst_space as *mut _ as usize) - mem::KERNEL_OFFSET;
// let dst_ttbr0 = dst_space_phys | ((dst_id.asid() as usize) << 48);
debugln!("Process {:?} forked into {:?}", src_inner.id, dst_id);
assert!(PROCESSES.lock().insert(dst_id, dst).is_none());
// let mut threads = Vec::new();
// let tid = Thread::fork(Some(dst_id), frame, dst_ttbr0)?.id();
// threads.push(tid);
SCHED.enqueue(tid);
// let dst = Rc::new(Self {
// exit_wait: Wait::new("process_exit"),
// io: IrqSafeSpinLock::new(src_io.fork()?),
// signal_state: AtomicU32::new(0),
// inner: IrqSafeSpinLock::new(ProcessInner {
// threads,
// exit: None,
// space: Some(dst_space),
// state: ProcessState::Active,
// id: dst_id,
// pgid: src_inner.pgid,
// ppid: Some(src_inner.id),
// sid: src_inner.sid,
// }),
// });
Ok(dst_id)
// debugln!("Process {:?} forked into {:?}", src_inner.id, dst_id);
// assert!(PROCESSES.lock().insert(dst_id, dst).is_none());
// SCHED.enqueue(tid);
// Ok(dst_id)
}
/// Terminates a process.
pub fn exit(self: ProcessRef, status: ExitCode) {
let thread = Thread::current();
let mut lock = self.inner.lock();
let is_running = thread.owner_id().map(|e| e == lock.id).unwrap_or(false);
infoln!("Process {:?} is exiting: {:?}", lock.id, status);
assert!(lock.exit.is_none());
lock.exit = Some(status);
lock.state = ProcessState::Finished;
for &tid in lock.threads.iter() {
let thread = Thread::get(tid).unwrap();
if thread.state() == ThreadState::Waiting {
todo!()
}
thread.terminate(status);
SCHED.dequeue(tid);
}
// let thread = Thread::current();
// let mut lock = self.inner.lock();
// let is_running = thread.owner_id().map(|e| e == lock.id).unwrap_or(false);
if let Some(space) = lock.space.take() {
unsafe {
Space::release(space);
Process::invalidate_asid((lock.id.asid() as usize) << 48);
}
}
// infoln!("Process {:?} is exiting: {:?}", lock.id, status);
// assert!(lock.exit.is_none());
// lock.exit = Some(status);
// lock.state = ProcessState::Finished;
// TODO when exiting from signal handler interrupting an IO operation
// deadlock is achieved
self.io.lock().handle_exit();
// for &tid in lock.threads.iter() {
// let thread = Thread::get(tid).unwrap();
// if thread.state() == ThreadState::Waiting {
// todo!()
// }
// thread.terminate(status);
// SCHED.dequeue(tid);
// }
drop(lock);
// if let Some(space) = lock.space.take() {
// unsafe {
// Space::release(space);
// Process::invalidate_asid((lock.id.asid() as usize) << 48);
// }
// }
self.exit_wait.wakeup_all();
// // TODO when exiting from signal handler interrupting an IO operation
// // deadlock is achieved
// self.io.lock().handle_exit();
if is_running {
SCHED.switch(true);
panic!("This code should never run");
}
// drop(lock);
// self.exit_wait.wakeup_all();
// if is_running {
// SCHED.switch(true);
// panic!("This code should never run");
// }
}
/// Terminates a thread of the process. If the thread is the only
/// one remaining, process itself is exited (see [Process::exit])
pub fn exit_thread(thread: ThreadRef, status: ExitCode) {
let switch = {
let switch = thread.state() == ThreadState::Running;
let process = thread.owner().unwrap();
let mut lock = process.inner.lock();
let tid = thread.id();
todo!()
if lock.threads.len() == 1 {
// TODO call Process::exit instead?
drop(lock);
process.exit(status);
return;
}
// let switch = {
// let switch = thread.state() == ThreadState::Running;
// let process = thread.owner().unwrap();
// let mut lock = process.inner.lock();
// let tid = thread.id();
lock.threads.retain(|&e| e != tid);
// if lock.threads.len() == 1 {
// // TODO call Process::exit instead?
// drop(lock);
// process.exit(status);
// return;
// }
thread.terminate(status);
SCHED.dequeue(tid);
debugln!("Thread {:?} terminated", tid);
// lock.threads.retain(|&e| e != tid);
switch
};
// thread.terminate(status);
// SCHED.dequeue(tid);
// debugln!("Thread {:?} terminated", tid);
if switch {
// TODO retain thread ID in process "finished" list and
// drop it when process finishes
SCHED.switch(true);
panic!("This code should not run");
} else {
// Can drop this thread: it's not running
todo!();
}
// switch
// };
// if switch {
// // TODO retain thread ID in process "finished" list and
// // drop it when process finishes
// SCHED.switch(true);
// panic!("This code should not run");
// } else {
// // Can drop this thread: it's not running
// todo!();
// }
}
fn collect(&self) -> Option<ExitCode> {
@ -381,11 +391,9 @@ impl Process {
phys
} else {
let page = phys::alloc_page(PageUsage::UserPrivate)?;
let flags = MapAttributes::SH_OUTER
let flags = MapAttributes::SHARE_OUTER
| MapAttributes::NOT_GLOBAL
| MapAttributes::UXN
| MapAttributes::PXN
| MapAttributes::AP_BOTH_READONLY;
| MapAttributes::USER_READ;
space.map(page_virt, page, flags)?;
page
};
@ -405,11 +413,9 @@ impl Process {
phys
} else {
let page = phys::alloc_page(PageUsage::UserPrivate)?;
let flags = MapAttributes::SH_OUTER
let flags = MapAttributes::SHARE_OUTER
| MapAttributes::NOT_GLOBAL
| MapAttributes::UXN
| MapAttributes::PXN
| MapAttributes::AP_BOTH_READONLY;
| MapAttributes::USER_READ;
space.map(page_virt, page, flags)?;
page
};
@ -464,16 +470,16 @@ impl Process {
(self.id().asid() as usize) << 48
}
pub fn invalidate_tlb(&self) {
Process::invalidate_asid(self.asid());
}
// pub fn invalidate_tlb(&self) {
// Process::invalidate_asid(self.asid());
// }
#[inline]
pub fn invalidate_asid(asid: usize) {
unsafe {
asm!("tlbi aside1, {}", in(reg) asid);
}
}
// #[inline]
// pub fn invalidate_asid(asid: usize) {
// unsafe {
// asm!("tlbi aside1, {}", in(reg) asid);
// }
// }
/// Loads a new program into current process address space
pub fn execve<F: FnOnce(&mut Space) -> Result<usize, Errno>>(
@ -482,7 +488,8 @@ impl Process {
) -> Result<(), Errno> {
unsafe {
// Run with interrupts disabled
asm!("msr daifset, #2");
// asm!("msr daifset, #2");
asm!("cli");
}
let proc = Process::current();
@ -519,11 +526,11 @@ impl Process {
let ustack_virt_bottom = Self::USTACK_VIRT_TOP - Self::USTACK_PAGES * mem::PAGE_SIZE;
for i in 0..Self::USTACK_PAGES {
let page = phys::alloc_page(PageUsage::UserPrivate).unwrap();
let flags = MapAttributes::SH_OUTER
let flags = MapAttributes::SHARE_OUTER
| MapAttributes::NOT_GLOBAL
| MapAttributes::UXN
| MapAttributes::PXN
| MapAttributes::AP_BOTH_READWRITE;
| MapAttributes::USER_WRITE
| MapAttributes::USER_READ
| MapAttributes::KERNEL_WRITE;
new_space
.map(ustack_virt_bottom + i * mem::PAGE_SIZE, page, flags)
.unwrap();
@ -539,7 +546,7 @@ impl Process {
// TODO drop old context
let ctx = thread.ctx.get();
let asid = (process_lock.id.asid() as usize) << 48;
Process::invalidate_asid(asid);
// Process::invalidate_asid(asid);
ctx.write(Context::user(
entry,

View File

@ -4,6 +4,7 @@ use crate::sync::IrqSafeSpinLock;
use crate::util::InitOnce;
use libsys::proc::Tid;
use alloc::{collections::VecDeque, rc::Rc};
use core::arch::asm;
struct SchedulerInner {
queue: VecDeque<Tid>,
@ -69,7 +70,8 @@ impl Scheduler {
THREADS.lock().get(&id).unwrap().clone()
};
asm!("msr daifset, #2");
asm!("cli");
// asm!("msr daifset, #2");
Thread::enter(thread)
}
@ -121,7 +123,8 @@ impl Scheduler {
if !Rc::ptr_eq(&from, &to) {
unsafe {
asm!("msr daifset, #2");
asm!("cli");
// asm!("msr daifset, #2");
Thread::switch(from, to, discard);
}
}
@ -150,7 +153,7 @@ pub fn is_ready() -> bool {
#[inline(never)]
extern "C" fn idle_fn(_a: usize) -> ! {
loop {
cortex_a::asm::wfi();
// cortex_a::asm::wfi();
}
}

View File

@ -1,6 +1,6 @@
//! Facilities for controlling threads - smallest units of
//! execution in the operating system
use crate::arch::aarch64::exception::ExceptionFrame;
// use crate::arch::aarch64::exception::ExceptionFrame;
use crate::proc::{
wait::{Wait, WaitStatus},
Process, ProcessRef, SCHED, THREADS,
@ -143,34 +143,34 @@ impl Thread {
Ok(res)
}
/// Creates a fork thread cloning `frame` context
pub fn fork(
owner: Option<Pid>,
frame: &ExceptionFrame,
ttbr0: usize,
) -> Result<ThreadRef, Errno> {
let id = new_tid();
// /// Creates a fork thread cloning `frame` context
// pub fn fork(
// owner: Option<Pid>,
// frame: &ExceptionFrame,
// ttbr0: usize,
// ) -> Result<ThreadRef, Errno> {
// let id = new_tid();
let res = Rc::new(Self {
ctx: UnsafeCell::new(Context::fork(frame, ttbr0)),
signal_ctx: UnsafeCell::new(Context::empty()),
signal_pending: AtomicU32::new(0),
exit_wait: Wait::new("thread_exit"),
exit_status: InitOnce::new(),
inner: IrqSafeSpinLock::new(ThreadInner {
signal_entry: 0,
signal_stack: 0,
id,
owner,
pending_wait: None,
wait_status: WaitStatus::Done,
state: State::Ready,
}),
});
debugln!("Forked new user thread: {:?}", id);
assert!(THREADS.lock().insert(id, res.clone()).is_none());
Ok(res)
}
// let res = Rc::new(Self {
// ctx: UnsafeCell::new(Context::fork(frame, ttbr0)),
// signal_ctx: UnsafeCell::new(Context::empty()),
// signal_pending: AtomicU32::new(0),
// exit_wait: Wait::new("thread_exit"),
// exit_status: InitOnce::new(),
// inner: IrqSafeSpinLock::new(ThreadInner {
// signal_entry: 0,
// signal_stack: 0,
// id,
// owner,
// pending_wait: None,
// wait_status: WaitStatus::Done,
// state: State::Ready,
// }),
// });
// debugln!("Forked new user thread: {:?}", id);
// assert!(THREADS.lock().insert(id, res.clone()).is_none());
// Ok(res)
// }
/// Returns the thread ID
#[inline]

View File

@ -39,35 +39,37 @@ pub static WAIT_SELECT: Wait = Wait::new("select");
/// Checks for any timed out wait channels and interrupts them
pub fn tick() {
let time = machine::local_timer().timestamp().unwrap();
let mut list = TICK_LIST.lock();
let mut cursor = list.cursor_front_mut();
todo!();
// let time = machine::local_timer().timestamp().unwrap();
// let mut list = TICK_LIST.lock();
// let mut cursor = list.cursor_front_mut();
while let Some(item) = cursor.current() {
if time > item.deadline {
let tid = item.tid;
cursor.remove_current();
SCHED.enqueue(tid);
} else {
cursor.move_next();
}
}
// while let Some(item) = cursor.current() {
// if time > item.deadline {
// let tid = item.tid;
// cursor.remove_current();
// SCHED.enqueue(tid);
// } else {
// cursor.move_next();
// }
// }
}
/// Suspends current process for given duration
pub fn sleep(timeout: Duration, remaining: &mut Duration) -> Result<(), Errno> {
// Dummy wait descriptor which will never receive notifications
static SLEEP_NOTIFY: Wait = Wait::new("sleep");
let deadline = machine::local_timer().timestamp()? + timeout;
match SLEEP_NOTIFY.wait(Some(deadline)) {
Err(Errno::Interrupt) => {
*remaining = deadline - machine::local_timer().timestamp()?;
Err(Errno::Interrupt)
}
Err(Errno::TimedOut) => Ok(()),
Ok(_) => panic!("Impossible result"),
res => res,
}
todo!()
// // Dummy wait descriptor which will never receive notifications
// static SLEEP_NOTIFY: Wait = Wait::new("sleep");
// let deadline = machine::local_timer().timestamp()? + timeout;
// match SLEEP_NOTIFY.wait(Some(deadline)) {
// Err(Errno::Interrupt) => {
// *remaining = deadline - machine::local_timer().timestamp()?;
// Err(Errno::Interrupt)
// }
// Err(Errno::TimedOut) => Ok(()),
// Ok(_) => panic!("Impossible result"),
// res => res,
// }
}
/// Suspends current process until some file descriptor
@ -78,49 +80,51 @@ pub fn select(
mut wfds: Option<&mut FdSet>,
timeout: Option<Duration>,
) -> Result<usize, Errno> {
if wfds.is_none() && rfds.is_none() {
todo!();
}
let read = rfds.as_deref().map(FdSet::clone);
let write = wfds.as_deref().map(FdSet::clone);
if let Some(rfds) = &mut rfds {
rfds.reset();
}
if let Some(wfds) = &mut wfds {
wfds.reset();
}
let deadline = timeout.map(|v| v + machine::local_timer().timestamp().unwrap());
let proc = thread.owner().unwrap();
let mut io = proc.io.lock();
// if wfds.is_none() && rfds.is_none() {
// todo!();
// }
// let read = rfds.as_deref().map(FdSet::clone);
// let write = wfds.as_deref().map(FdSet::clone);
// if let Some(rfds) = &mut rfds {
// rfds.reset();
// }
// if let Some(wfds) = &mut wfds {
// wfds.reset();
// }
loop {
if let Some(read) = &read {
for fd in read.iter() {
let file = io.file(fd)?;
if file.borrow().is_ready(false)? {
rfds.as_mut().unwrap().set(fd);
return Ok(1);
}
}
}
if let Some(write) = &write {
for fd in write.iter() {
let file = io.file(fd)?;
if file.borrow().is_ready(true)? {
wfds.as_mut().unwrap().set(fd);
return Ok(1);
}
}
}
// let deadline = timeout.map(|v| v + machine::local_timer().timestamp().unwrap());
// let proc = thread.owner().unwrap();
// let mut io = proc.io.lock();
// Suspend
match WAIT_SELECT.wait(deadline) {
Err(Errno::TimedOut) => return Ok(0),
Err(e) => return Err(e),
Ok(_) => {}
}
}
// loop {
// if let Some(read) = &read {
// for fd in read.iter() {
// let file = io.file(fd)?;
// if file.borrow().is_ready(false)? {
// rfds.as_mut().unwrap().set(fd);
// return Ok(1);
// }
// }
// }
// if let Some(write) = &write {
// for fd in write.iter() {
// let file = io.file(fd)?;
// if file.borrow().is_ready(true)? {
// wfds.as_mut().unwrap().set(fd);
// return Ok(1);
// }
// }
// }
// // Suspend
// match WAIT_SELECT.wait(deadline) {
// Err(Errno::TimedOut) => return Ok(0),
// Err(e) => return Err(e),
// Ok(_) => {}
// }
// }
}
impl Wait {
@ -134,61 +138,65 @@ impl Wait {
/// Interrupt wait pending on the channel
pub fn abort(&self, tid: Tid, enqueue: bool) {
let mut queue = self.queue.lock();
let mut tick_lock = TICK_LIST.lock();
let mut cursor = tick_lock.cursor_front_mut();
while let Some(item) = cursor.current() {
if tid == item.tid {
cursor.remove_current();
break;
} else {
cursor.move_next();
}
}
todo!();
let mut cursor = queue.cursor_front_mut();
while let Some(item) = cursor.current() {
if tid == *item {
cursor.remove_current();
let thread = Thread::get(tid).unwrap();
thread.set_wait_status(WaitStatus::Interrupted);
if enqueue {
SCHED.enqueue(tid);
}
break;
} else {
cursor.move_next();
}
}
// let mut queue = self.queue.lock();
// let mut tick_lock = TICK_LIST.lock();
// let mut cursor = tick_lock.cursor_front_mut();
// while let Some(item) = cursor.current() {
// if tid == item.tid {
// cursor.remove_current();
// break;
// } else {
// cursor.move_next();
// }
// }
// let mut cursor = queue.cursor_front_mut();
// while let Some(item) = cursor.current() {
// if tid == *item {
// cursor.remove_current();
// let thread = Thread::get(tid).unwrap();
// thread.set_wait_status(WaitStatus::Interrupted);
// if enqueue {
// SCHED.enqueue(tid);
// }
// break;
// } else {
// cursor.move_next();
// }
// }
}
fn wakeup_some(&self, mut limit: usize) -> usize {
// No IRQs will arrive now == safe to manipulate tick list
let mut queue = self.queue.lock();
let mut count = 0;
while limit != 0 && !queue.is_empty() {
let tid = queue.pop_front();
if let Some(tid) = tid {
let mut tick_lock = TICK_LIST.lock();
let mut cursor = tick_lock.cursor_front_mut();
while let Some(item) = cursor.current() {
if tid == item.tid {
cursor.remove_current();
break;
} else {
cursor.move_next();
}
}
drop(tick_lock);
todo!();
Thread::get(tid).unwrap().set_wait_status(WaitStatus::Done);
SCHED.enqueue(tid);
}
// // No IRQs will arrive now == safe to manipulate tick list
// let mut queue = self.queue.lock();
// let mut count = 0;
// while limit != 0 && !queue.is_empty() {
// let tid = queue.pop_front();
// if let Some(tid) = tid {
// let mut tick_lock = TICK_LIST.lock();
// let mut cursor = tick_lock.cursor_front_mut();
// while let Some(item) = cursor.current() {
// if tid == item.tid {
// cursor.remove_current();
// break;
// } else {
// cursor.move_next();
// }
// }
// drop(tick_lock);
limit -= 1;
count += 1;
}
count
// Thread::get(tid).unwrap().set_wait_status(WaitStatus::Done);
// SCHED.enqueue(tid);
// }
// limit -= 1;
// count += 1;
// }
// count
}
/// Notifies all processes waiting for this event
@ -204,51 +212,53 @@ impl Wait {
/// Suspends current process until event is signalled or
/// (optional) deadline is reached
pub fn wait(&self, deadline: Option<Duration>) -> Result<(), Errno> {
let thread = Thread::current();
//let deadline = timeout.map(|t| machine::local_timer().timestamp().unwrap() + t);
let mut queue_lock = self.queue.lock();
todo!();
queue_lock.push_back(thread.id());
thread.setup_wait(self);
// let thread = Thread::current();
// //let deadline = timeout.map(|t| machine::local_timer().timestamp().unwrap() + t);
// let mut queue_lock = self.queue.lock();
if let Some(deadline) = deadline {
TICK_LIST.lock().push_back(Timeout {
tid: thread.id(),
deadline,
});
}
// queue_lock.push_back(thread.id());
// thread.setup_wait(self);
loop {
match thread.wait_status() {
WaitStatus::Pending => {}
WaitStatus::Done => {
return Ok(());
}
WaitStatus::Interrupted => {
return Err(Errno::Interrupt);
}
};
// if let Some(deadline) = deadline {
// TICK_LIST.lock().push_back(Timeout {
// tid: thread.id(),
// deadline,
// });
// }
drop(queue_lock);
thread.enter_wait();
queue_lock = self.queue.lock();
// loop {
// match thread.wait_status() {
// WaitStatus::Pending => {}
// WaitStatus::Done => {
// return Ok(());
// }
// WaitStatus::Interrupted => {
// return Err(Errno::Interrupt);
// }
// };
if let Some(deadline) = deadline {
if machine::local_timer().timestamp()? > deadline {
let mut cursor = queue_lock.cursor_front_mut();
// drop(queue_lock);
// thread.enter_wait();
// queue_lock = self.queue.lock();
while let Some(&mut item) = cursor.current() {
if thread.id() == item {
cursor.remove_current();
break;
} else {
cursor.move_next();
}
}
// if let Some(deadline) = deadline {
// if machine::local_timer().timestamp()? > deadline {
// let mut cursor = queue_lock.cursor_front_mut();
return Err(Errno::TimedOut);
}
}
}
// while let Some(&mut item) = cursor.current() {
// if thread.id() == item {
// cursor.remove_current();
// break;
// } else {
// cursor.move_next();
// }
// }
// return Err(Errno::TimedOut);
// }
// }
// }
}
}

View File

@ -38,7 +38,7 @@ impl<T> IrqSafeSpinLock<T> {
#[inline(always)]
unsafe fn force_release(&self) {
self.state.store(false, Ordering::Release);
cortex_a::asm::sev();
// cortex_a::asm::sev();
}
/// Returns [IrqSafeSpinLockGuard] for this lock
@ -47,7 +47,7 @@ impl<T> IrqSafeSpinLock<T> {
let irq_state = unsafe { irq_mask_save() };
while self.try_lock().is_err() {
cortex_a::asm::wfe();
// cortex_a::asm::wfe();
}
IrqSafeSpinLockGuard {

View File

@ -12,19 +12,22 @@ macro_rules! invalid_memory {
warnln!($($args)+);
#[cfg(feature = "aggressive_syscall")]
{
use libsys::signal::Signal;
use crate::proc::Thread;
todo!()
// use libsys::signal::Signal;
// use crate::proc::Thread;
let thread = Thread::current();
let proc = thread.owner().unwrap();
proc.enter_fault_signal(thread, Signal::SegmentationFault);
// let thread = Thread::current();
// let proc = thread.owner().unwrap();
// proc.enter_fault_signal(thread, Signal::SegmentationFault);
}
return Err(Errno::InvalidArgument);
}
}
#[inline(always)]
fn is_el0_accessible(virt: usize, write: bool) -> bool {
cfg_if! {
if #[cfg(target_arch = "aarch64")] {
#[inline(always)]
fn is_el0_accessible(virt: usize, write: bool) -> bool {
let mut res: usize;
unsafe {
if write {
@ -34,6 +37,14 @@ fn is_el0_accessible(virt: usize, write: bool) -> bool {
}
}
res & 1 == 0
}
} else {
#[inline(always)]
fn is_el0_accessible(virt: usize, write: bool) -> bool {
// TODO implement this
true
}
}
}
/// Checks given argument and interprets it as a `T` reference
@ -129,9 +140,10 @@ pub fn validate_ptr(base: usize, len: usize, write: bool) -> Result<(), Errno> {
// It's possible a CoW page hasn't yet been cloned when trying
// a write access
let res = if write {
todo!();
process.manipulate_space(|space| {
space.try_cow_copy(i * mem::PAGE_SIZE)?;
Process::invalidate_asid(asid);
// space.try_cow_copy(i * mem::PAGE_SIZE)?;
// Process::invalidate_asid(asid);
Ok(())
})
} else {

View File

@ -1,14 +1,18 @@
//! System call implementation
use crate::arch::{machine, platform::exception::ExceptionFrame};
// use crate::arch::{machine, platform::exception::ExceptionFrame};
use crate::arch::platform::ForkFrame;
use crate::debug::Level;
use crate::dev::timer::TimestampSource;
// use crate::dev::timer::TimestampSource;
use crate::fs::create_filesystem;
use crate::mem::{phys::PageUsage, virt::MapAttributes};
use crate::mem::{
phys::PageUsage,
virt::table::{AddressSpace, MapAttributes},
};
use crate::proc::{self, elf, wait, Process, ProcessIo, Thread};
use core::mem::size_of;
// use core::mem::size_of;
use core::ops::DerefMut;
use core::time::Duration;
// use core::time::Duration;
use libsys::{
abi::SystemCall,
debug::TraceLevel,
@ -33,7 +37,7 @@ pub mod arg;
///
/// Unsafe: accepts and clones process states. Only legal to call
/// from exception handlers.
pub unsafe fn sys_fork(regs: &mut ExceptionFrame) -> Result<Pid, Errno> {
pub unsafe fn sys_fork(regs: &mut ForkFrame) -> Result<Pid, Errno> {
Process::current().fork(regs)
}
@ -75,132 +79,132 @@ fn _syscall(num: SystemCall, args: &[usize]) -> Result<usize, Errno> {
io.file(fd)?.borrow_mut().write(buf)
}
SystemCall::Open => {
let at_fd = FileDescriptor::from_i32(args[0] as i32)?;
let path = arg::str_ref(args[1], args[2])?;
let mode = FileMode::from_bits(args[3] as u32).ok_or(Errno::InvalidArgument)?;
let opts = OpenFlags::from_bits(args[4] as u32).ok_or(Errno::InvalidArgument)?;
let proc = Process::current();
let mut io = proc.io.lock();
let at = if let Some(fd) = at_fd {
io.file(fd)?.borrow().node()
} else {
None
};
let file = io.ioctx().open(at, path, mode, opts)?;
Ok(u32::from(io.place_file(file)?) as usize)
}
SystemCall::Close => {
let proc = Process::current();
let mut io = proc.io.lock();
let fd = FileDescriptor::from(args[0] as u32);
io.close_file(fd)?;
Ok(0)
}
SystemCall::FileStatus => {
let at_fd = FileDescriptor::from_i32(args[0] as i32)?;
let filename = arg::str_ref(args[1], args[2])?;
let buf = arg::struct_mut::<Stat>(args[3])?;
let flags = args[4] as u32;
let proc = Process::current();
let mut io = proc.io.lock();
let stat =
find_at_node(&mut io, at_fd, filename, flags & AT_EMPTY_PATH != 0)?.stat()?;
*buf = stat;
Ok(0)
}
SystemCall::Ioctl => {
let fd = FileDescriptor::from(args[0] as u32);
let cmd = IoctlCmd::try_from(args[1] as u32)?;
let proc = Process::current();
let mut io = proc.io.lock();
let node = io.file(fd)?.borrow().node().ok_or(Errno::InvalidFile)?;
node.ioctl(cmd, args[2], args[3])
}
SystemCall::Select => {
let rfds = arg::option_struct_mut::<FdSet>(args[0])?;
let wfds = arg::option_struct_mut::<FdSet>(args[1])?;
let timeout = if args[2] == 0 {
None
} else {
Some(Duration::from_nanos(args[2] as u64))
};
wait::select(Thread::current(), rfds, wfds, timeout)
}
SystemCall::Access => {
let at_fd = FileDescriptor::from_i32(args[0] as i32)?;
let path = arg::str_ref(args[1], args[2])?;
let mode = AccessMode::from_bits(args[3] as u32).ok_or(Errno::InvalidArgument)?;
let flags = args[4] as u32;
let proc = Process::current();
let mut io = proc.io.lock();
find_at_node(&mut io, at_fd, path, flags & AT_EMPTY_PATH != 0)?
.check_access(io.ioctx(), mode)?;
Ok(0)
}
SystemCall::ReadDirectory => {
let proc = Process::current();
let fd = FileDescriptor::from(args[0] as u32);
let mut io = proc.io.lock();
let buf = arg::struct_buf_mut::<DirectoryEntry>(args[1], args[2])?;
io.file(fd)?.borrow_mut().readdir(buf)
}
SystemCall::GetUserId => {
let proc = Process::current();
let uid = proc.io.lock().uid();
Ok(u32::from(uid) as usize)
}
SystemCall::GetGroupId => {
let proc = Process::current();
let gid = proc.io.lock().gid();
Ok(u32::from(gid) as usize)
}
SystemCall::DuplicateFd => {
let src = FileDescriptor::from(args[0] as u32);
let dst = FileDescriptor::from_i32(args[1] as i32)?;
let proc = Process::current();
let mut io = proc.io.lock();
let res = io.duplicate_file(src, dst)?;
Ok(u32::from(res) as usize)
}
SystemCall::SetUserId => {
let uid = UserId::from(args[0] as u32);
let proc = Process::current();
proc.io.lock().set_uid(uid)?;
Ok(0)
}
SystemCall::SetGroupId => {
let gid = GroupId::from(args[0] as u32);
let proc = Process::current();
proc.io.lock().set_gid(gid)?;
Ok(0)
}
SystemCall::SetCurrentDirectory => {
let path = arg::str_ref(args[0], args[1])?;
let proc = Process::current();
proc.io.lock().ioctx().chdir(path)?;
Ok(0)
}
SystemCall::GetCurrentDirectory => {
todo!()
}
SystemCall::Seek => {
todo!()
}
// SystemCall::Open => {
// let at_fd = FileDescriptor::from_i32(args[0] as i32)?;
// let path = arg::str_ref(args[1], args[2])?;
// let mode = FileMode::from_bits(args[3] as u32).ok_or(Errno::InvalidArgument)?;
// let opts = OpenFlags::from_bits(args[4] as u32).ok_or(Errno::InvalidArgument)?;
//
// let proc = Process::current();
// let mut io = proc.io.lock();
//
// let at = if let Some(fd) = at_fd {
// io.file(fd)?.borrow().node()
// } else {
// None
// };
//
// let file = io.ioctx().open(at, path, mode, opts)?;
// Ok(u32::from(io.place_file(file)?) as usize)
// }
// SystemCall::Close => {
// let proc = Process::current();
// let mut io = proc.io.lock();
// let fd = FileDescriptor::from(args[0] as u32);
//
// io.close_file(fd)?;
// Ok(0)
// }
// SystemCall::FileStatus => {
// let at_fd = FileDescriptor::from_i32(args[0] as i32)?;
// let filename = arg::str_ref(args[1], args[2])?;
// let buf = arg::struct_mut::<Stat>(args[3])?;
// let flags = args[4] as u32;
//
// let proc = Process::current();
// let mut io = proc.io.lock();
// let stat =
// find_at_node(&mut io, at_fd, filename, flags & AT_EMPTY_PATH != 0)?.stat()?;
// *buf = stat;
// Ok(0)
// }
// SystemCall::Ioctl => {
// let fd = FileDescriptor::from(args[0] as u32);
// let cmd = IoctlCmd::try_from(args[1] as u32)?;
//
// let proc = Process::current();
// let mut io = proc.io.lock();
//
// let node = io.file(fd)?.borrow().node().ok_or(Errno::InvalidFile)?;
// node.ioctl(cmd, args[2], args[3])
// }
// SystemCall::Select => {
// let rfds = arg::option_struct_mut::<FdSet>(args[0])?;
// let wfds = arg::option_struct_mut::<FdSet>(args[1])?;
// let timeout = if args[2] == 0 {
// None
// } else {
// Some(Duration::from_nanos(args[2] as u64))
// };
//
// wait::select(Thread::current(), rfds, wfds, timeout)
// }
// SystemCall::Access => {
// let at_fd = FileDescriptor::from_i32(args[0] as i32)?;
// let path = arg::str_ref(args[1], args[2])?;
// let mode = AccessMode::from_bits(args[3] as u32).ok_or(Errno::InvalidArgument)?;
// let flags = args[4] as u32;
//
// let proc = Process::current();
// let mut io = proc.io.lock();
//
// find_at_node(&mut io, at_fd, path, flags & AT_EMPTY_PATH != 0)?
// .check_access(io.ioctx(), mode)?;
// Ok(0)
// }
// SystemCall::ReadDirectory => {
// let proc = Process::current();
// let fd = FileDescriptor::from(args[0] as u32);
// let mut io = proc.io.lock();
// let buf = arg::struct_buf_mut::<DirectoryEntry>(args[1], args[2])?;
//
// io.file(fd)?.borrow_mut().readdir(buf)
// }
// SystemCall::GetUserId => {
// let proc = Process::current();
// let uid = proc.io.lock().uid();
// Ok(u32::from(uid) as usize)
// }
// SystemCall::GetGroupId => {
// let proc = Process::current();
// let gid = proc.io.lock().gid();
// Ok(u32::from(gid) as usize)
// }
// SystemCall::DuplicateFd => {
// let src = FileDescriptor::from(args[0] as u32);
// let dst = FileDescriptor::from_i32(args[1] as i32)?;
//
// let proc = Process::current();
// let mut io = proc.io.lock();
//
// let res = io.duplicate_file(src, dst)?;
//
// Ok(u32::from(res) as usize)
// }
// SystemCall::SetUserId => {
// let uid = UserId::from(args[0] as u32);
// let proc = Process::current();
// proc.io.lock().set_uid(uid)?;
// Ok(0)
// }
// SystemCall::SetGroupId => {
// let gid = GroupId::from(args[0] as u32);
// let proc = Process::current();
// proc.io.lock().set_gid(gid)?;
// Ok(0)
// }
// SystemCall::SetCurrentDirectory => {
// let path = arg::str_ref(args[0], args[1])?;
// let proc = Process::current();
// proc.io.lock().ioctx().chdir(path)?;
// Ok(0)
// }
// SystemCall::GetCurrentDirectory => {
// todo!()
// }
// SystemCall::Seek => {
// todo!()
// }
SystemCall::MapMemory => {
let len = args[1];
if len == 0 || (len & 0xFFF) != 0 {
@ -210,7 +214,7 @@ fn _syscall(num: SystemCall, args: &[usize]) -> Result<usize, Errno> {
let _flags = MemoryAccess::from_bits(args[3] as u32).ok_or(Errno::InvalidArgument)?;
let mut attrs =
MapAttributes::NOT_GLOBAL | MapAttributes::SH_OUTER | MapAttributes::PXN;
MapAttributes::NOT_GLOBAL | MapAttributes::SHARE_OUTER | MapAttributes::USER_READ;
if !acc.contains(MemoryAccess::READ) {
return Err(Errno::NotImplemented);
}
@ -218,12 +222,12 @@ fn _syscall(num: SystemCall, args: &[usize]) -> Result<usize, Errno> {
if acc.contains(MemoryAccess::EXEC) {
return Err(Errno::PermissionDenied);
}
attrs |= MapAttributes::AP_BOTH_READWRITE;
attrs |= MapAttributes::USER_WRITE | MapAttributes::KERNEL_WRITE;
} else {
attrs |= MapAttributes::AP_BOTH_READONLY;
attrs |= MapAttributes::USER_READ;
}
if !acc.contains(MemoryAccess::EXEC) {
attrs |= MapAttributes::UXN;
attrs |= MapAttributes::USER_EXEC;
}
// TODO don't ignore flags
@ -235,186 +239,186 @@ fn _syscall(num: SystemCall, args: &[usize]) -> Result<usize, Errno> {
space.allocate(0x100000000, 0xF00000000, len / 4096, attrs, usage)
})
}
SystemCall::UnmapMemory => {
let addr = args[0];
let len = args[1];
if addr == 0 || len == 0 || addr & 0xFFF != 0 || len & 0xFFF != 0 {
return Err(Errno::InvalidArgument);
}
let proc = Process::current();
proc.manipulate_space(move |space| space.free(addr, len / 4096))?;
Ok(0)
}
// Process
SystemCall::Clone => {
let entry = args[0];
let stack = args[1];
let arg = args[2];
Process::current()
.new_user_thread(entry, stack, arg)
.map(|e| u32::from(e) as usize)
}
SystemCall::Exec => {
let filename = arg::str_ref(args[0], args[1])?;
let argv = arg::struct_buf_ref::<&str>(args[2], args[3])?;
// Validate each argument as well
for item in argv.iter() {
arg::validate_ptr(item.as_ptr() as usize, item.len(), false)?;
}
let node = {
let proc = Process::current();
let mut io = proc.io.lock();
// TODO argv, envp array passing ABI?
let node = io.ioctx().find(None, filename, true)?;
drop(io);
node
};
let file = node.open(OpenFlags::O_RDONLY)?;
Process::execve(move |space| elf::load_elf(space, file), argv).unwrap();
panic!();
}
SystemCall::Exit => {
let status = ExitCode::from(args[0] as i32);
let flags = args[1];
if flags & (1 << 0) != 0 {
Process::exit_thread(Thread::current(), status);
} else {
Process::current().exit(status);
}
unreachable!();
}
SystemCall::WaitPid => {
// TODO special "pid" values
let pid = Pid::try_from(args[0] as u32)?;
let status = arg::struct_mut::<i32>(args[1])?;
match Process::waitpid(pid) {
Ok(exit) => {
*status = i32::from(exit);
Ok(0)
}
e => e.map(|e| i32::from(e) as usize),
}
}
SystemCall::WaitTid => {
let tid = Tid::from(args[0] as u32);
match Thread::waittid(tid) {
Ok(_) => Ok(0),
_ => todo!(),
}
}
SystemCall::GetPid => Ok(u32::from(Process::current().id()) as usize),
SystemCall::GetTid => Ok(u32::from(Thread::current().id()) as usize),
SystemCall::Sleep => {
let rem_buf = arg::option_buf_ref(args[1], size_of::<u64>() * 2)?;
let mut rem = Duration::new(0, 0);
let res = wait::sleep(Duration::from_nanos(args[0] as u64), &mut rem);
if res == Err(Errno::Interrupt) {
warnln!("Sleep interrupted, {:?} remaining", rem);
if rem_buf.is_some() {
todo!()
}
}
res.map(|_| 0)
}
SystemCall::SetSignalEntry => {
Thread::current().set_signal_entry(args[0], args[1]);
Ok(0)
}
SystemCall::SignalReturn => {
Thread::current().return_from_signal();
unreachable!();
}
SystemCall::SendSignal => {
let target = SignalDestination::from(args[0] as isize);
let signal = Signal::try_from(args[1] as u32)?;
match target {
SignalDestination::This => Process::current().set_signal(signal),
SignalDestination::Process(pid) => Process::get(pid)
.ok_or(Errno::DoesNotExist)?
.set_signal(signal),
_ => todo!(),
};
Ok(0)
}
SystemCall::Yield => {
proc::switch();
Ok(0)
}
SystemCall::GetSid => {
// TODO handle kernel processes here?
let pid = Pid::to_option(args[0] as u32);
let current = Process::current();
let proc = if let Some(pid) = pid {
let proc = Process::get(pid).ok_or(Errno::DoesNotExist)?;
if proc.sid() != current.sid() {
return Err(Errno::PermissionDenied);
}
proc
} else {
current
};
Ok(u32::from(proc.sid()) as usize)
}
SystemCall::GetPgid => {
// TODO handle kernel processes here?
let pid = Pid::to_option(args[0] as u32);
let current = Process::current();
let proc = if let Some(pid) = pid {
Process::get(pid).ok_or(Errno::DoesNotExist)?
} else {
current
};
Ok(u32::from(proc.pgid()) as usize)
}
SystemCall::GetPpid => Ok(u32::from(Process::current().ppid().unwrap()) as usize),
SystemCall::SetSid => {
let proc = Process::current();
let mut io = proc.io.lock();
if let Some(_ctty) = io.ctty() {
todo!();
}
let id = proc.id();
proc.set_sid(id);
Ok(u32::from(id) as usize)
}
SystemCall::SetPgid => {
let pid = Pid::to_option(args[0] as u32);
let pgid = Pid::to_option(args[1] as u32);
let current = Process::current();
let proc = if let Some(_pid) = pid {
todo!()
} else {
current
};
if let Some(_pgid) = pgid {
todo!();
} else {
proc.set_pgid(proc.id());
}
Ok(u32::from(proc.pgid()) as usize)
}
// System
SystemCall::GetCpuTime => {
let time = machine::local_timer().timestamp()?;
Ok(time.as_nanos() as usize)
}
// SystemCall::UnmapMemory => {
// let addr = args[0];
// let len = args[1];
//
// if addr == 0 || len == 0 || addr & 0xFFF != 0 || len & 0xFFF != 0 {
// return Err(Errno::InvalidArgument);
// }
//
// let proc = Process::current();
// proc.manipulate_space(move |space| space.free(addr, len / 4096))?;
// Ok(0)
// }
//
// // Process
// SystemCall::Clone => {
// let entry = args[0];
// let stack = args[1];
// let arg = args[2];
//
// Process::current()
// .new_user_thread(entry, stack, arg)
// .map(|e| u32::from(e) as usize)
// }
// SystemCall::Exec => {
// let filename = arg::str_ref(args[0], args[1])?;
// let argv = arg::struct_buf_ref::<&str>(args[2], args[3])?;
// // Validate each argument as well
// for item in argv.iter() {
// arg::validate_ptr(item.as_ptr() as usize, item.len(), false)?;
// }
// let node = {
// let proc = Process::current();
// let mut io = proc.io.lock();
// // TODO argv, envp array passing ABI?
// let node = io.ioctx().find(None, filename, true)?;
// drop(io);
// node
// };
// let file = node.open(OpenFlags::O_RDONLY)?;
// Process::execve(move |space| elf::load_elf(space, file), argv).unwrap();
// panic!();
// }
// SystemCall::Exit => {
// let status = ExitCode::from(args[0] as i32);
// let flags = args[1];
//
// if flags & (1 << 0) != 0 {
// Process::exit_thread(Thread::current(), status);
// } else {
// Process::current().exit(status);
// }
//
// unreachable!();
// }
// SystemCall::WaitPid => {
// // TODO special "pid" values
// let pid = Pid::try_from(args[0] as u32)?;
// let status = arg::struct_mut::<i32>(args[1])?;
//
// match Process::waitpid(pid) {
// Ok(exit) => {
// *status = i32::from(exit);
// Ok(0)
// }
// e => e.map(|e| i32::from(e) as usize),
// }
// }
// SystemCall::WaitTid => {
// let tid = Tid::from(args[0] as u32);
//
// match Thread::waittid(tid) {
// Ok(_) => Ok(0),
// _ => todo!(),
// }
// }
// SystemCall::GetPid => Ok(u32::from(Process::current().id()) as usize),
// SystemCall::GetTid => Ok(u32::from(Thread::current().id()) as usize),
// SystemCall::Sleep => {
// let rem_buf = arg::option_buf_ref(args[1], size_of::<u64>() * 2)?;
// let mut rem = Duration::new(0, 0);
// let res = wait::sleep(Duration::from_nanos(args[0] as u64), &mut rem);
// if res == Err(Errno::Interrupt) {
// warnln!("Sleep interrupted, {:?} remaining", rem);
// if rem_buf.is_some() {
// todo!()
// }
// }
// res.map(|_| 0)
// }
// SystemCall::SetSignalEntry => {
// Thread::current().set_signal_entry(args[0], args[1]);
// Ok(0)
// }
// SystemCall::SignalReturn => {
// Thread::current().return_from_signal();
// unreachable!();
// }
// SystemCall::SendSignal => {
// let target = SignalDestination::from(args[0] as isize);
// let signal = Signal::try_from(args[1] as u32)?;
//
// match target {
// SignalDestination::This => Process::current().set_signal(signal),
// SignalDestination::Process(pid) => Process::get(pid)
// .ok_or(Errno::DoesNotExist)?
// .set_signal(signal),
// _ => todo!(),
// };
// Ok(0)
// }
// SystemCall::Yield => {
// proc::switch();
// Ok(0)
// }
// SystemCall::GetSid => {
// // TODO handle kernel processes here?
// let pid = Pid::to_option(args[0] as u32);
// let current = Process::current();
// let proc = if let Some(pid) = pid {
// let proc = Process::get(pid).ok_or(Errno::DoesNotExist)?;
// if proc.sid() != current.sid() {
// return Err(Errno::PermissionDenied);
// }
// proc
// } else {
// current
// };
//
// Ok(u32::from(proc.sid()) as usize)
// }
// SystemCall::GetPgid => {
// // TODO handle kernel processes here?
// let pid = Pid::to_option(args[0] as u32);
// let current = Process::current();
// let proc = if let Some(pid) = pid {
// Process::get(pid).ok_or(Errno::DoesNotExist)?
// } else {
// current
// };
//
// Ok(u32::from(proc.pgid()) as usize)
// }
// SystemCall::GetPpid => Ok(u32::from(Process::current().ppid().unwrap()) as usize),
// SystemCall::SetSid => {
// let proc = Process::current();
// let mut io = proc.io.lock();
//
// if let Some(_ctty) = io.ctty() {
// todo!();
// }
//
// let id = proc.id();
// proc.set_sid(id);
// Ok(u32::from(id) as usize)
// }
// SystemCall::SetPgid => {
// let pid = Pid::to_option(args[0] as u32);
// let pgid = Pid::to_option(args[1] as u32);
//
// let current = Process::current();
// let proc = if let Some(_pid) = pid {
// todo!()
// } else {
// current
// };
//
// if let Some(_pgid) = pgid {
// todo!();
// } else {
// proc.set_pgid(proc.id());
// }
//
// Ok(u32::from(proc.pgid()) as usize)
// }
//
// // System
// SystemCall::GetCpuTime => {
// let time = machine::local_timer().timestamp()?;
// Ok(time.as_nanos() as usize)
// }
SystemCall::Mount => {
let target = arg::str_ref(args[0], args[1])?;
let options = arg::struct_ref::<MountOptions>(args[2])?;
@ -430,7 +434,7 @@ fn _syscall(num: SystemCall, args: &[usize]) -> Result<usize, Errno> {
target_node.mount(root)?;
Ok(0)
}
},
// Debugging
SystemCall::DebugTrace => {
@ -444,8 +448,9 @@ fn _syscall(num: SystemCall, args: &[usize]) -> Result<usize, Errno> {
Ok(args[1])
}
// Handled elsewhere
SystemCall::Fork => unreachable!(),
// // Handled elsewhere
// SystemCall::Fork => unreachable!(),
_ => panic!("Unimplemented: {:?}", num),
}
}
@ -455,7 +460,7 @@ pub fn syscall(num: SystemCall, args: &[usize]) -> Result<usize, Errno> {
let process = thread.owner().unwrap();
let result = _syscall(num, args);
if !thread.is_handling_signal() {
process.handle_pending_signals();
// process.handle_pending_signals();
}
result
}

View File

@ -20,6 +20,14 @@ impl<T> InitOnce<T> {
}
}
pub fn as_ref_option(&self) -> Option<&T> {
if self.is_initialized() {
Some(self.get())
} else {
None
}
}
/// Returns `true` if this [InitOnce<T>] can be used
#[inline(always)]
pub fn is_initialized(&self) -> bool {
@ -29,7 +37,7 @@ impl<T> InitOnce<T> {
/// Returns the initialized value. Will panic if the value has not
/// yet been initialized.
#[allow(clippy::mut_from_ref)]
pub fn get(&self) -> &mut T {
pub fn get(&self) -> &T {
assert!(self.is_initialized(), "Access to uninitialized InitOnce<T>");
unsafe { (*self.inner.get()).assume_init_mut() }
}

View File

@ -11,49 +11,62 @@ use crate::{
},
};
use core::time::Duration;
use core::arch::asm;
// TODO document the syscall ABI
// rax
//
// rdi
// rsi
// rdx
// r10
// r8
// r9
// TODO move this to libusr
#[cfg(target_arch = "x86_64")]
macro_rules! syscall {
($num:expr) => {{
let mut res: usize;
asm!("svc #0", out("x0") res, in("x8") $num.repr(), options(nostack));
let mut res: usize = $num.repr();
asm!("syscall",
inout("rax") res,
options(nostack));
res
}};
($num:expr, $a0:expr) => {{
let mut res: usize = $a0;
asm!("svc #0",
inout("x0") res,
in("x8") $num.repr(), options(nostack));
let mut res: usize = $num.repr();
asm!("syscall",
inout("rax") res, in("rdi") $a0,
options(nostack));
res
}};
($num:expr, $a0:expr, $a1:expr) => {{
let mut res: usize = $a0;
asm!("svc #0",
inout("x0") res, in("x1") $a1,
in("x8") $num.repr(), options(nostack));
let mut res: usize = $num.repr();
asm!("syscall",
inout("rax") res, in("rdi") $a0, in("rsi") $a1,
options(nostack));
res
}};
($num:expr, $a0:expr, $a1:expr, $a2:expr) => {{
let mut res: usize = $a0;
asm!("svc #0",
inout("x0") res, in("x1") $a1, in("x2") $a2,
in("x8") $num.repr(), options(nostack));
let mut res: usize = $num.repr();
asm!("syscall",
inout("rax") res, in("rdi") $a0, in("rsi") $a1,
in("rdx") $a2, options(nostack));
res
}};
($num:expr, $a0:expr, $a1:expr, $a2:expr, $a3:expr) => {{
let mut res: usize = $a0;
asm!("svc #0",
inout("x0") res, in("x1") $a1, in("x2") $a2,
in("x3") $a3, in("x8") $num.repr(), options(nostack));
let mut res: usize = $num.repr();
asm!("syscall",
inout("rax") res, in("rdi") $a0, in("rsi") $a1,
in("rdx") $a2, in("r10") $a3, options(nostack));
res
}};
($num:expr, $a0:expr, $a1:expr, $a2:expr, $a3:expr, $a4:expr) => {{
let mut res: usize = $a0;
asm!("svc #0",
inout("x0") res, in("x1") $a1, in("x2") $a2,
in("x3") $a3, in("x4") $a4, in("x8") $num.repr(), options(nostack));
let mut res: usize = $num.repr();
asm!("syscall",
inout("rax") res, in("rdi") $a0, in("rsi") $a1,
in("rdx") $a2, in("r10") $a3, in("r8") $a4, options(nostack));
res
}};
}

View File

@ -40,7 +40,8 @@ extern "C" fn _start(arg: &'static ProgramArgs) -> ! {
fn panic_handler(pi: &PanicInfo) -> ! {
// TODO unwind to send panic argument back to parent thread
// TODO print to stdout/stderr (if available)
let thread = thread::current();
trace!(TraceLevel::Error, "{:?} panicked: {:?}", thread, pi);
sys::sys_exit(ExitCode::from(-1));
// let thread = thread::current();
// trace!(TraceLevel::Error, "{:?} panicked: {:?}", thread, pi);
loop {}
// sys::sys_exit(ExitCode::from(-1));
}

View File

@ -60,16 +60,16 @@ impl<T> JoinHandle<T> {
}
unsafe fn init_common(signal_stack_pointer: *mut u8) {
let tid = u32::from(sys_ex_gettid()) as u64;
asm!("msr tpidr_el0, {:x}", in(reg) tid);
// let tid = u32::from(sys_ex_gettid()) as u64;
// asm!("msr tpidr_el0, {:x}", in(reg) tid);
// thread::current() should be valid at this point
sys_ex_signal(
signal::signal_handler as usize,
signal_stack_pointer as usize,
)
.unwrap();
// sys_ex_signal(
// signal::signal_handler as usize,
// signal_stack_pointer as usize,
// )
// .unwrap();
}
pub(crate) unsafe fn init_main() {
@ -82,11 +82,12 @@ pub(crate) unsafe fn init_main() {
}
pub fn current() -> Thread {
let mut id: u64;
unsafe {
asm!("mrs {:x}, tpidr_el0", out(reg) id);
}
Thread { id: Tid::from(id as u32) }
todo!()
// let mut id: u64;
// unsafe {
// asm!("mrs {:x}, tpidr_el0", out(reg) id);
// }
// Thread { id: Tid::from(id as u32) }
}
pub fn spawn<F, T>(f: F) -> JoinHandle<T>

View File

@ -13,9 +13,9 @@ path = "src/init/main.rs"
name = "shell"
path = "src/bin/shell.rs"
[[bin]]
name = "fuzzy"
path = "src/bin/fuzzy.rs"
# [[bin]]
# name = "fuzzy"
# path = "src/bin/fuzzy.rs"
[[bin]]
name = "ls"

View File

@ -1,139 +1,141 @@
#![feature(asm)]
// #![feature(asm)]
#![no_std]
#![no_main]
#![allow(unused_macros)]
#![allow(dead_code)]
//
// #![allow(unused_macros)]
// #![allow(dead_code)]
//
#[macro_use]
extern crate libusr;
use libusr::sys::{abi::SystemCall, stat::Stat};
static mut STATE: u64 = 0;
macro_rules! syscall {
($num:expr) => {{
let mut res: usize;
asm!("svc #0", out("x0") res, in("x8") $num, options(nostack));
res
}};
($num:expr, $a0:expr) => {{
let mut res: usize = $a0;
asm!("svc #0",
inout("x0") res,
in("x8") $num, options(nostack));
res
}};
($num:expr, $a0:expr, $a1:expr) => {{
let mut res: usize = $a0;
asm!("svc #0",
inout("x0") res, in("x1") $a1,
in("x8") $num, options(nostack));
res
}};
($num:expr, $a0:expr, $a1:expr, $a2:expr) => {{
let mut res: usize = $a0;
asm!("svc #0",
inout("x0") res, in("x1") $a1, in("x2") $a2,
in("x8") $num, options(nostack));
res
}};
($num:expr, $a0:expr, $a1:expr, $a2:expr, $a3:expr) => {{
let mut res: usize = $a0;
asm!("svc #0",
inout("x0") res, in("x1") $a1, in("x2") $a2,
in("x3") $a3, in("x8") $num, options(nostack));
res
}};
($num:expr, $a0:expr, $a1:expr, $a2:expr, $a3:expr, $a4:expr) => {{
let mut res: usize = $a0;
asm!("svc #0",
inout("x0") res, in("x1") $a1, in("x2") $a2,
in("x3") $a3, in("x4") $a4, in("x8") $num, options(nostack));
res
}};
}
/// Integer/size argument
macro_rules! argn {
($a:expr) => {
$a as usize
};
}
/// Pointer/base argument
macro_rules! argp {
($a:expr) => {
$a as usize
};
}
fn random_set_seed(seed: u64) {
unsafe { STATE = seed; }
}
fn random_u64() -> u64 {
let mut x = unsafe { STATE };
x ^= x << 13;
x ^= x >> 7;
x ^= x << 17;
unsafe {
STATE = x;
}
x
}
fn random_ascii_char() -> u8 {
((random_u64() % (0x7F - 0x20)) as u8) + 0x20
}
fn random_str_range(buf: &mut [u8], min: usize, max: usize) -> &str {
let max = core::cmp::min(buf.len(), max);
assert!(max > min);
let len = ((random_u64() as usize) % (max - min)) + min;
for c in buf[..len].iter_mut() {
*c = random_ascii_char();
}
core::str::from_utf8(&buf[..len]).unwrap()
}
fn random_str(buf: &mut [u8]) -> &str {
random_str_range(buf, 0, buf.len())
}
fn random_bytes(buf: &mut [u8]) {
for byte in buf.iter_mut() {
*byte = (random_u64() & 0xFF) as u8;
}
}
//
// use libusr::sys::{abi::SystemCall, stat::Stat};
//
// static mut STATE: u64 = 0;
//
// macro_rules! syscall {
// ($num:expr) => {{
// let mut res: usize;
// asm!("svc #0", out("x0") res, in("x8") $num, options(nostack));
// res
// }};
// ($num:expr, $a0:expr) => {{
// let mut res: usize = $a0;
// asm!("svc #0",
// inout("x0") res,
// in("x8") $num, options(nostack));
// res
// }};
// ($num:expr, $a0:expr, $a1:expr) => {{
// let mut res: usize = $a0;
// asm!("svc #0",
// inout("x0") res, in("x1") $a1,
// in("x8") $num, options(nostack));
// res
// }};
// ($num:expr, $a0:expr, $a1:expr, $a2:expr) => {{
// let mut res: usize = $a0;
// asm!("svc #0",
// inout("x0") res, in("x1") $a1, in("x2") $a2,
// in("x8") $num, options(nostack));
// res
// }};
// ($num:expr, $a0:expr, $a1:expr, $a2:expr, $a3:expr) => {{
// let mut res: usize = $a0;
// asm!("svc #0",
// inout("x0") res, in("x1") $a1, in("x2") $a2,
// in("x3") $a3, in("x8") $num, options(nostack));
// res
// }};
// ($num:expr, $a0:expr, $a1:expr, $a2:expr, $a3:expr, $a4:expr) => {{
// let mut res: usize = $a0;
// asm!("svc #0",
// inout("x0") res, in("x1") $a1, in("x2") $a2,
// in("x3") $a3, in("x4") $a4, in("x8") $num, options(nostack));
// res
// }};
// }
//
// /// Integer/size argument
// macro_rules! argn {
// ($a:expr) => {
// $a as usize
// };
// }
// /// Pointer/base argument
// macro_rules! argp {
// ($a:expr) => {
// $a as usize
// };
// }
//
// fn random_set_seed(seed: u64) {
// unsafe { STATE = seed; }
// }
//
// fn random_u64() -> u64 {
// let mut x = unsafe { STATE };
// x ^= x << 13;
// x ^= x >> 7;
// x ^= x << 17;
// unsafe {
// STATE = x;
// }
// x
// }
//
// fn random_ascii_char() -> u8 {
// ((random_u64() % (0x7F - 0x20)) as u8) + 0x20
// }
//
// fn random_str_range(buf: &mut [u8], min: usize, max: usize) -> &str {
// let max = core::cmp::min(buf.len(), max);
// assert!(max > min);
// let len = ((random_u64() as usize) % (max - min)) + min;
// for c in buf[..len].iter_mut() {
// *c = random_ascii_char();
// }
// core::str::from_utf8(&buf[..len]).unwrap()
// }
//
// fn random_str(buf: &mut [u8]) -> &str {
// random_str_range(buf, 0, buf.len())
// }
//
// fn random_bytes(buf: &mut [u8]) {
// for byte in buf.iter_mut() {
// *byte = (random_u64() & 0xFF) as u8;
// }
// }
//
#[no_mangle]
fn main() -> i32 {
let seed = libusr::sys::sys_ex_getcputime().unwrap().as_nanos() as u64 / 13;
println!("Using seed: {:#x}", seed);
random_set_seed(seed);
let mut buf = [0; 256];
// Test sys_ex_getcputime()
let mut prev_time = libusr::sys::sys_ex_getcputime().unwrap().as_nanos();
for _ in 0..1000 {
let t = libusr::sys::sys_ex_getcputime().unwrap().as_nanos();
assert!(t >= prev_time);
prev_time = t;
}
// Test non-utf8 input fed into syscalls expecting strings
// let old_signal = signal::set_handler(Signal::InvalidSystemCall, SignalHandler::Ignore);
for _ in 0..10000 {
random_bytes(&mut buf);
let mut stat = Stat::default();
unsafe {
syscall!(SystemCall::FileStatus.repr(), (-2i32) as usize, buf.as_mut_ptr() as usize, buf.len(), (&mut stat) as *mut _ as usize);
}
}
// signal::set_handler(Signal::InvalidSystemCall, old_signal);
0
loop {}
}
// let seed = libusr::sys::sys_ex_getcputime().unwrap().as_nanos() as u64 / 13;
// println!("Using seed: {:#x}", seed);
// random_set_seed(seed);
//
// let mut buf = [0; 256];
//
// // Test sys_ex_getcputime()
// let mut prev_time = libusr::sys::sys_ex_getcputime().unwrap().as_nanos();
// for _ in 0..1000 {
// let t = libusr::sys::sys_ex_getcputime().unwrap().as_nanos();
// assert!(t >= prev_time);
// prev_time = t;
// }
//
// // Test non-utf8 input fed into syscalls expecting strings
// // let old_signal = signal::set_handler(Signal::InvalidSystemCall, SignalHandler::Ignore);
// for _ in 0..10000 {
// random_bytes(&mut buf);
// let mut stat = Stat::default();
//
// unsafe {
// syscall!(SystemCall::FileStatus.repr(), (-2i32) as usize, buf.as_mut_ptr() as usize, buf.len(), (&mut stat) as *mut _ as usize);
// }
// }
// // signal::set_handler(Signal::InvalidSystemCall, old_signal);
//
// 0
// }

View File

@ -9,6 +9,9 @@ use libusr::sys::{stat::MountOptions, sys_execve, sys_fork, sys_mount, sys_waitp
#[no_mangle]
fn main() -> i32 {
println!("Test");
trace_debug!("test!");
sys_mount(
"/dev",
&MountOptions {
@ -26,18 +29,23 @@ fn main() -> i32 {
)
.expect("Failed to mount sysfs");
if let Some(pid) = unsafe { sys_fork().unwrap() } {
let mut status = 0;
sys_waitpid(pid, &mut status).unwrap();
println!("Process {:?} exited with status {}", pid, status);
let pid = unsafe { sys_fork().unwrap() };
loop {
unsafe {
asm!("nop");
}
}
} else {
sys_execve("/sbin/login", &["/sbin/login", "/dev/ttyS0"]).unwrap();
unreachable!();
}
trace_debug!("fork returned {:?}", pid);
loop {}
// loop {}
// if let Some(pid) = unsafe { sys_fork().unwrap() } {
// let mut status = 0;
// sys_waitpid(pid, &mut status).unwrap();
// println!("Process {:?} exited with status {}", pid, status);
// loop {
// }
// } else {
// sys_execve("/sbin/login", &["/sbin/login", "/dev/ttyS0"]).unwrap();
// unreachable!();
// }
}