x86-64: ring0/ring3 tasks + syscall

This commit is contained in:
Mark Poliakov 2023-08-01 18:05:10 +03:00
parent 3a795f0229
commit a9b9c71e47
20 changed files with 1226 additions and 318 deletions

View File

@ -10,7 +10,7 @@ yggdrasil-abi = { git = "https://git.alnyan.me/yggdrasil/yggdrasil-abi.git" }
# vfs = { path = "lib/vfs" }
# memfs = { path = "lib/memfs" }
# atomic_enum = "0.2.0"
atomic_enum = "0.2.0"
bitflags = "2.3.3"
linked_list_allocator = "0.10.5"
spinning_top = "0.2.5"

View File

@ -73,7 +73,7 @@ pub extern "C" fn __x86_64_apic_irq_handler(vector: u32, _frame: *mut ExceptionF
cpu.local_apic().clear_interrupt();
if vector == APIC_TIMER_VECTOR {
// TODO
unsafe { Cpu::local().queue().yield_cpu() }
} else if (APIC_EXTERNAL_OFFSET..APIC_EXTERNAL_OFFSET + MAX_EXTERNAL_VECTORS).contains(&vector)
{
PLATFORM.ioapic.handle_irq(vector - APIC_EXTERNAL_OFFSET);

View File

@ -9,7 +9,7 @@ use yboot_proto::{
use crate::{
arch::{
x86_64::{apic::local::LocalApic, cpu::Cpu, exception, gdt},
x86_64::{apic::local::LocalApic, cpu::Cpu, cpuid, exception},
Architecture, ArchitectureImpl, PLATFORM,
},
debug,
@ -19,6 +19,7 @@ use crate::{
phys::{self, PageUsage},
ConvertAddress, KERNEL_VIRT_OFFSET,
},
task,
};
use super::ARCHITECTURE;
@ -73,6 +74,7 @@ unsafe extern "C" fn __x86_64_upper_entry() -> ! {
debug::init();
infoln!("Yggdrasil kernel git {} starting", git_version!());
cpuid::feature_gate();
if YBOOT_DATA.memory_map.address > 0xFFFFFFFF {
errorln!("Unhandled case: memory map is above 4GiB");
@ -86,9 +88,6 @@ unsafe extern "C" fn __x86_64_upper_entry() -> ! {
PLATFORM.init_rsdp(YBOOT_DATA.rsdp_address as _);
}
gdt::init();
exception::init_exceptions(0);
// Setup physical memory allocation
ArchitectureImpl::init_physical_memory(&YBOOT_DATA.memory_map);
@ -99,13 +98,13 @@ unsafe extern "C" fn __x86_64_upper_entry() -> ! {
// Also initializes local APIC
Cpu::init_local(LocalApic::new(), 0);
exception::init_exceptions(0);
PLATFORM.init(true).unwrap();
ArchitectureImpl::set_interrupt_mask(false);
loop {
ArchitectureImpl::wait_for_interrupt();
}
task::init().expect("Failed to initialize the scheduler");
task::enter()
}
global_asm!(

120
src/arch/x86_64/context.S Normal file
View File

@ -0,0 +1,120 @@
.macro SAVE_TASK_STATE
sub ${context_size}, %rsp
mov %rbx, 0(%rsp)
mov %r12, 8(%rsp)
mov %r13, 16(%rsp)
mov %r14, 24(%rsp)
mov %r15, 32(%rsp)
// TODO save %fs
mov %rbp, 48(%rsp)
mov %cr3, %rbx
mov %rbx, 56(%rsp)
.endm
.macro LOAD_TASK_STATE
mov 56(%rsp), %rbx
mov %rbx, %cr3
mov 0(%rsp), %rbx
mov 8(%rsp), %r12
mov 16(%rsp), %r13
mov 24(%rsp), %r14
mov 32(%rsp), %r15
// TODO
// mov 40(%rsp), %fs
mov 48(%rsp), %rbp
add ${context_size}, %rsp
.endm
.global __x86_64_task_enter_user
.global __x86_64_task_enter_kernel
.global __x86_64_enter_task
.global __x86_64_switch_task
.section .text
__x86_64_task_enter_user:
// User stack pointer
popq %rcx
// Argument
popq %rdi
// Entry address
popq %rax
// SS:RSP
pushq $0x1B
pushq %rcx
// RFLAGS
pushq $0x200
// CS:RIP
pushq $0x23
pushq %rax
iretq
__x86_64_task_enter_kernel:
// Argument
popq %rdi
// Entry address
popq %rax
// Enable IRQ in RFLAGS
pushfq
popq %rdx
or $(1 << 9), %rdx
mov %rsp, %rcx
// SS:RSP
pushq $0x10
pushq %rcx
// RFLAGS
pushq %rdx
// CS:RIP
pushq $0x08
pushq %rax
iretq
// %rsi - from struct ptr, %rdi - to struct ptr
__x86_64_switch_task:
SAVE_TASK_STATE
mov %rsp, 0(%rsi)
// TSS.RSP0
mov 8(%rdi), %rax
// Kernel stack
mov 0(%rdi), %rdi
mov %rdi, %rsp
// Load TSS.RSP0
mov %gs:(8), %rdi
mov %rax, 4(%rdi)
LOAD_TASK_STATE
ret
// %rdi - to struct ptr
__x86_64_enter_task:
// TSS.RSP0
mov 8(%rdi), %rax
// Kernel stack
mov 0(%rdi), %rdi
mov %rdi, %rsp
// Load TSS.RSP0
mov %gs:(8), %rdi
mov %rax, 4(%rdi)
LOAD_TASK_STATE
ret

190
src/arch/x86_64/context.rs Normal file
View File

@ -0,0 +1,190 @@
//! x86-64 task context setup and switching
use core::{arch::global_asm, cell::UnsafeCell};
use abi::error::Error;
use alloc::boxed::Box;
use crate::{
arch::x86_64::table::KERNEL_TABLES,
mem::{
phys::{self, PageUsage},
ConvertAddress,
},
};
struct StackBuilder {
base: usize,
sp: usize,
}
#[repr(C, align(0x10))]
struct Inner {
// 0x00
sp: usize,
// 0x08
tss_rsp0: usize,
}
/// x86-64 implementation of a task context
#[allow(dead_code)]
pub struct TaskContext {
inner: UnsafeCell<Inner>,
stack_base: usize,
stack_size: usize,
}
unsafe impl Sync for TaskContext {}
// 8 registers + return address (which is not included)
const COMMON_CONTEXT_SIZE: usize = 8 * 8;
impl StackBuilder {
fn new(base: usize, size: usize) -> Self {
Self {
base,
sp: base + size,
}
}
fn push(&mut self, value: usize) {
if self.sp == self.base {
panic!();
}
self.sp -= 8;
unsafe {
(self.sp as *mut usize).write_volatile(value);
}
}
fn _skip(&mut self, count: usize) {
self.sp -= count * 8;
if self.sp < self.base {
panic!();
}
}
fn build(self) -> usize {
self.sp
}
fn init_common(&mut self, entry: usize, cr3: usize) {
self.push(entry); // address for ret
// End of common context
self.push(cr3); // %cr3
self.push(0); // %rbp
self.push(0); // %fs (TODO)
self.push(0); // %r15
self.push(0); // %r14
self.push(0); // %r13
self.push(0); // %r12
self.push(0); // %rbx
}
}
impl TaskContext {
/// Constructs a kernel-space task context
pub fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> {
const KERNEL_TASK_PAGES: usize = 4;
let stack_base = unsafe {
phys::alloc_pages_contiguous(KERNEL_TASK_PAGES, PageUsage::Used)?.virtualize()
};
let mut stack = StackBuilder::new(stack_base, KERNEL_TASK_PAGES * 0x1000);
// Entry and argument
stack.push(entry as _);
stack.push(arg);
stack.init_common(__x86_64_task_enter_kernel as _, unsafe {
KERNEL_TABLES.physical_address()
});
let sp = stack.build();
infoln!("Kernel stack {:#x}", sp);
// TODO stack is leaked
Ok(Self {
inner: UnsafeCell::new(Inner { sp, tss_rsp0: 0 }),
stack_base,
stack_size: KERNEL_TASK_PAGES * 0x1000,
})
}
/// Constructs a user thread context. The caller is responsible for allocating the userspace
/// stack and setting up a valid address space for the context.
pub fn user(entry: usize, arg: usize, cr3: usize, user_stack_sp: usize) -> Result<Self, Error> {
const USER_TASK_PAGES: usize = 8;
let stack_base =
unsafe { phys::alloc_pages_contiguous(USER_TASK_PAGES, PageUsage::Used)?.virtualize() };
let mut stack = StackBuilder::new(stack_base, USER_TASK_PAGES * 0x1000);
stack.push(entry as _);
stack.push(arg);
stack.push(user_stack_sp);
stack.init_common(__x86_64_task_enter_user as _, cr3);
let sp = stack.build();
let rsp0 = stack_base + USER_TASK_PAGES * 0x1000;
debugln!("TSS.RSP0 = {:#x}", rsp0);
Ok(Self {
inner: UnsafeCell::new(Inner { sp, tss_rsp0: rsp0 }),
stack_base,
stack_size: USER_TASK_PAGES * 0x1000,
})
}
/// Constructs a safe wrapper process to execute a kernel-space closure
pub fn kernel_closure<F: FnOnce() + Send + 'static>(f: F) -> Result<Self, Error> {
extern "C" fn closure_wrapper<F: FnOnce() + Send + 'static>(closure_addr: usize) -> ! {
let closure = unsafe { Box::from_raw(closure_addr as *mut F) };
closure();
todo!("Process termination");
}
let closure = Box::new(f);
debugln!("closure: {:p}", closure);
Self::kernel(closure_wrapper::<F>, Box::into_raw(closure) as usize)
}
/// Performs an entry into a context.
///
/// # Safety
///
/// Only meant to be called from the scheduler code.
pub unsafe fn enter(&self) -> ! {
__x86_64_enter_task(self.inner.get())
}
/// Performs a context switch between two contexts.
///
/// # Safety
///
/// Only meant to be called from the scheduler code.
pub unsafe fn switch(&self, from: &Self) {
let to = self.inner.get();
let from = from.inner.get();
if to != from {
__x86_64_switch_task(to, from)
}
}
}
extern "C" {
fn __x86_64_task_enter_kernel();
fn __x86_64_task_enter_user();
fn __x86_64_enter_task(to: *mut Inner) -> !;
fn __x86_64_switch_task(to: *mut Inner, from: *mut Inner);
}
global_asm!(
include_str!("context.S"),
context_size = const COMMON_CONTEXT_SIZE,
options(att_syntax)
);

View File

@ -4,7 +4,11 @@ use core::ptr::null_mut;
use alloc::boxed::Box;
use tock_registers::interfaces::Writeable;
use crate::arch::x86_64::registers::MSR_IA32_KERNEL_GS_BASE;
use crate::{
arch::x86_64::{gdt, registers::MSR_IA32_KERNEL_GS_BASE},
task::sched::CpuQueue,
util::OneTimeInit,
};
use super::apic::local::LocalApic;
@ -13,9 +17,15 @@ use super::apic::local::LocalApic;
pub struct Cpu {
// 0x00
this: *mut Cpu,
// 0x08, used in assembly
tss_address: usize,
// 0x10, used in assembly for temporary RSP storage
tmp_address: usize,
// 0x08
id: u32,
local_apic: LocalApic,
id: u32,
queue: OneTimeInit<&'static CpuQueue>,
}
impl Cpu {
@ -25,10 +35,16 @@ impl Cpu {
///
/// Only meant to be called once per each CPU during their init.
pub unsafe fn init_local(local_apic: LocalApic, id: u32) {
let tss_address = gdt::init(id);
let this = Box::new(Cpu {
this: null_mut(),
local_apic,
tss_address,
tmp_address: 0,
id,
local_apic,
queue: OneTimeInit::new(),
});
let this = Box::into_raw(this);
(*this).this = this;
@ -44,6 +60,11 @@ impl Cpu {
Self::get_local().unwrap()
}
/// Returns the system ID of the CPU
pub fn local_id() -> u32 {
Self::local().id()
}
/// Returns this CPU's local data structure or None if it hasn't been set up yet
#[inline(always)]
pub fn get_local<'a>() -> Option<&'a Self> {
@ -63,4 +84,14 @@ impl Cpu {
pub fn local_apic(&self) -> &LocalApic {
&self.local_apic
}
/// Returns the CPU's execution queue
pub fn queue(&self) -> &'static CpuQueue {
self.queue.get()
}
/// Sets up the local CPU's execution queue
pub fn init_queue(&self, queue: &'static CpuQueue) {
self.queue.init(queue);
}
}

80
src/arch/x86_64/cpuid.rs Normal file
View File

@ -0,0 +1,80 @@
//! x86-64 CPUID interface
use tock_registers::interfaces::ReadWriteable;
use crate::arch::x86_64::registers::CR4;
use super::registers::XCR0;
unsafe fn cpuid(eax: u32, result: &mut [u32]) {
core::arch::asm!(
r#"
push %rbx
cpuid
mov %ebx, {0:e}
pop %rbx
"#,
out(reg) result[0],
out("edx") result[1],
out("ecx") result[2],
in("eax") eax,
options(att_syntax)
);
}
type RequiredBit = (u32, &'static str);
const EAX1_ECX_REQUIRED_FEATURES: &[RequiredBit] = &[
(1 << 0, "SSE3"),
(1 << 19, "SSE4.1"),
(1 << 20, "SSE4.2"),
(1 << 24, "TSC"),
(1 << 26, "XSAVE"),
(1 << 28, "AVX"),
];
const EAX1_EDX_REQUIRED_FEATURES: &[RequiredBit] = &[
(1 << 0, "FPU"),
(1 << 3, "PSE"),
(1 << 4, "TSC (%edx)"),
(1 << 5, "MSR"),
(1 << 6, "PAE"),
(1 << 9, "APIC"),
(1 << 13, "PGE"),
(1 << 23, "MMX"),
(1 << 24, "FXSR"),
(1 << 25, "SSE"),
(1 << 26, "SSE2"),
];
fn enable_cr4_features() {
// TODO maybe also include FSGSBASE here?
CR4.modify(CR4::OSXSAVE::SET + CR4::OSFXSR::SET + CR4::PGE::SET);
}
fn enable_xcr0_features() {
XCR0.modify(XCR0::X87::SET + XCR0::SSE::SET + XCR0::AVX::SET);
}
/// Checks for the features required by the kernel and enables them
pub fn feature_gate() {
// TODO the compiler may have generated instructions from SSE/AVX sets by now, find some way to
// perform this as early as possible
let mut data = [0; 3];
unsafe {
cpuid(1, &mut data);
}
for (bit, name) in EAX1_ECX_REQUIRED_FEATURES {
if data[2] & bit == 0 {
panic!("Required feature not supported: {}", name);
}
}
for (bit, name) in EAX1_EDX_REQUIRED_FEATURES {
if data[1] & bit == 0 {
panic!("Required feature not supported: {}", name);
}
}
// Enable the SSE/AVX features
enable_cr4_features();
enable_xcr0_features();
}

View File

@ -1,15 +1,20 @@
//! x86-64 exception and interrupt handling
use core::{arch::global_asm, mem::size_of_val};
use tock_registers::interfaces::{ReadWriteable, Writeable};
use crate::arch::{
x86_64::apic::{self, __x86_64_apic_irq_handler},
x86_64::{
apic::{self, __x86_64_apic_irq_handler},
registers::{MSR_IA32_EFER, MSR_IA32_LSTAR, MSR_IA32_SFMASK, MSR_IA32_STAR},
},
Architecture, ArchitectureImpl,
};
/// Set of registers saved when taking an exception/interrupt
/// Set of registers saved by most of the exception/syscall/irq handlers
#[derive(Debug)]
#[repr(C)]
pub struct ExceptionFrame {
pub struct CommonFrame {
rax: usize,
rcx: usize,
rdx: usize,
@ -25,8 +30,26 @@ pub struct ExceptionFrame {
r13: usize,
r14: usize,
r15: usize,
_0: usize,
}
/// Set of registers saved when taking an exception/interrupt
#[derive(Debug)]
#[repr(C)]
pub struct ExceptionFrame {
c: CommonFrame,
exc_number: usize,
exc_code: usize,
rip: usize,
cs: usize,
}
/// Set of registers saved when taking a syscall instruction
#[derive(Debug)]
#[repr(C)]
pub struct SyscallFrame {
user_sp: usize,
c: CommonFrame,
}
/// Exception table entry
@ -88,6 +111,7 @@ static mut IDT: [Entry; SIZE] = [Entry::NULL; SIZE];
extern "C" fn __x86_64_exception_handler(frame: *mut ExceptionFrame) {
let frame = unsafe { &*frame };
errorln!("Exception {}", frame.exc_number);
errorln!("At {:#x}:{:#x}", frame.cs, frame.rip);
if frame.exc_number == 14 {
let cr2: usize;
@ -102,6 +126,11 @@ extern "C" fn __x86_64_exception_handler(frame: *mut ExceptionFrame) {
}
}
extern "C" fn __x86_64_syscall_handler(frame: *mut SyscallFrame) {
let frame = unsafe { &*frame };
debugln!("Syscall {}", frame.c.rax);
}
/// Initializes the interrupt descriptor table for the given CPU.
///
/// # Safety
@ -110,6 +139,7 @@ extern "C" fn __x86_64_exception_handler(frame: *mut ExceptionFrame) {
pub unsafe fn init_exceptions(_cpu_index: usize) {
extern "C" {
static __x86_64_exception_vectors: [usize; 32];
fn __x86_64_syscall_vector();
}
for (i, &entry) in __x86_64_exception_vectors.iter().enumerate() {
@ -124,11 +154,24 @@ pub unsafe fn init_exceptions(_cpu_index: usize) {
};
core::arch::asm!("wbinvd; lidt ({0})", in(reg) &idtr, options(att_syntax));
// Initialize syscall vector
MSR_IA32_LSTAR.set(__x86_64_syscall_vector as u64);
MSR_IA32_SFMASK.write(MSR_IA32_SFMASK::IF::Masked);
MSR_IA32_STAR.write(
// On sysret, CS = val + 16 (0x23), SS = val + 8 (0x1B)
MSR_IA32_STAR::SYSRET_CS_SS.val(0x1B - 8) +
// On syscall, CS = val (0x08), SS = val + 8 (0x10)
MSR_IA32_STAR::SYSCALL_CS_SS.val(0x08),
);
MSR_IA32_EFER.modify(MSR_IA32_EFER::SCE::Enable);
}
global_asm!(
include_str!("vectors.S"),
exception_handler = sym __x86_64_exception_handler,
apic_irq_handler = sym __x86_64_apic_irq_handler,
syscall_handler = sym __x86_64_syscall_handler,
options(att_syntax)
);

View File

@ -1,6 +1,6 @@
//! x86-64 Global Descriptor Table interface
// TODO TSS
use core::mem::size_of_val;
use core::mem::{size_of, size_of_val};
#[allow(dead_code)]
#[repr(packed)]
@ -13,6 +13,26 @@ struct Entry {
base_hi: u8,
}
#[allow(dead_code)]
#[repr(packed)]
struct Tss {
_0: u32,
rsp0: u64,
rsp1: u64,
rsp2: u64,
_1: u32,
ist1: u64,
ist2: u64,
ist3: u64,
ist4: u64,
ist5: u64,
ist6: u64,
ist7: u64,
_2: u64,
_3: u16,
iopb_base: u16,
}
#[allow(dead_code)]
#[repr(packed)]
struct Pointer {
@ -20,6 +40,26 @@ struct Pointer {
offset: usize,
}
impl Tss {
const NULL: Self = Self {
_0: 0,
rsp0: 0,
rsp1: 0,
rsp2: 0,
_1: 0,
ist1: 0,
ist2: 0,
ist3: 0,
ist4: 0,
ist5: 0,
ist6: 0,
ist7: 0,
_2: 0,
_3: 0,
iopb_base: size_of::<Tss>() as u16,
};
}
impl Entry {
const FLAG_LONG: u8 = 1 << 5;
const ACC_PRESENT: u8 = 1 << 7;
@ -53,9 +93,12 @@ impl Entry {
}
}
const SIZE: usize = 3;
const SIZE: usize = 7;
// TODO per-CPU
#[no_mangle]
static mut TSS: Tss = Tss::NULL;
static mut GDT: [Entry; SIZE] = [
// 0x00, NULL
Entry::NULL,
@ -73,6 +116,23 @@ static mut GDT: [Entry; SIZE] = [
0,
Entry::ACC_PRESENT | Entry::ACC_SYSTEM | Entry::ACC_WRITE,
),
// 0x18 (0x1B), Ring3 DS64
Entry::new(
0,
0,
0,
Entry::ACC_PRESENT | Entry::ACC_SYSTEM | Entry::ACC_RING3 | Entry::ACC_WRITE,
),
// 0x20 (0x23), Ring3 CS64
Entry::new(
0,
0,
Entry::FLAG_LONG,
Entry::ACC_PRESENT | Entry::ACC_SYSTEM | Entry::ACC_RING3 | Entry::ACC_EXECUTE,
),
// 0x28, TSS
Entry::NULL,
Entry::NULL,
];
/// Initializes the global descriptor table.
@ -80,7 +140,18 @@ static mut GDT: [Entry; SIZE] = [
/// # Safety
///
/// Only meant to be called by the CPUs during their early init.
pub unsafe fn init() {
pub unsafe fn init(_id: u32) -> usize {
let tss_addr = &TSS as *const _ as usize;
GDT[5] = Entry::new(
tss_addr as u32,
size_of_val(&TSS) as u32 - 1,
Entry::FLAG_LONG,
Entry::ACC_ACCESS | Entry::ACC_PRESENT | Entry::ACC_EXECUTE,
);
let tss_upper = &mut GDT[6] as *mut _ as *mut u64;
tss_upper.write_unaligned((tss_addr >> 32) as u64);
let gdtr = Pointer {
limit: size_of_val(&GDT) as u16 - 1,
offset: &GDT as *const _ as usize,
@ -113,9 +184,14 @@ pub unsafe fn init() {
mov %ax, %fs
mov %ax, %gs
mov %ax, %ss
mov $0x28, %ax
ltr %ax
"#,
in(reg) &gdtr,
out("rax") _,
options(att_syntax)
);
tss_addr
}

View File

@ -34,7 +34,9 @@ pub mod intrinsics;
pub mod apic;
pub mod boot;
pub mod context;
pub mod cpu;
pub mod cpuid;
pub mod exception;
pub mod gdt;
pub mod peripherals;
@ -160,7 +162,7 @@ impl Architecture for X86_64 {
fn interrupt_mask() -> bool {
let mut flags: u64;
unsafe {
core::arch::asm!("pushfd; pop {0}", out(reg) flags, options(att_syntax));
core::arch::asm!("pushfq; pop {0}", out(reg) flags, options(att_syntax));
}
// If IF is zero, interrupts are disabled (masked)
flags & (1 << 9) == 0

View File

@ -98,5 +98,206 @@ mod msr_ia32_apic_base {
pub const MSR_IA32_APIC_BASE: Reg = Reg;
}
mod msr_ia32_sfmask {
use tock_registers::register_bitfields;
register_bitfields! {
u64,
#[allow(missing_docs)]
pub MSR_IA32_SFMASK [
IF OFFSET(9) NUMBITS(1) [
Masked = 1,
Unmasked = 0
]
]
}
const ADDR: u32 = 0xC0000084;
pub struct Reg;
msr_impl_read!(Reg, ADDR, MSR_IA32_SFMASK::Register);
msr_impl_write!(Reg, ADDR, MSR_IA32_SFMASK::Register);
/// IA32_SFMASK model-specific register
pub const MSR_IA32_SFMASK: Reg = Reg;
}
mod msr_ia32_star {
use tock_registers::register_bitfields;
register_bitfields! {
u64,
#[allow(missing_docs)]
pub MSR_IA32_STAR [
SYSCALL_CS_SS OFFSET(32) NUMBITS(16) [],
SYSRET_CS_SS OFFSET(48) NUMBITS(16) [],
]
}
const ADDR: u32 = 0xC0000081;
pub struct Reg;
msr_impl_read!(Reg, ADDR, MSR_IA32_STAR::Register);
msr_impl_write!(Reg, ADDR, MSR_IA32_STAR::Register);
/// IA32_STAR model-specific register
pub const MSR_IA32_STAR: Reg = Reg;
}
mod msr_ia32_lstar {
const ADDR: u32 = 0xC0000082;
pub struct Reg;
msr_impl_read!(Reg, ADDR);
msr_impl_write!(Reg, ADDR);
/// IA32_LSTAR model-specific register
pub const MSR_IA32_LSTAR: Reg = Reg;
}
mod msr_ia32_efer {
use tock_registers::register_bitfields;
register_bitfields! {
u64,
#[allow(missing_docs)]
pub MSR_IA32_EFER [
// If set, support for SYSCALL/SYSRET instructions is enabled
SCE OFFSET(0) NUMBITS(1) [
Enable = 1,
Disable = 0
]
]
}
const ADDR: u32 = 0xC0000080;
pub struct Reg;
msr_impl_read!(Reg, ADDR, MSR_IA32_EFER::Register);
msr_impl_write!(Reg, ADDR, MSR_IA32_EFER::Register);
/// IA32_EFER Extended Feature Enable model-specific Register
pub const MSR_IA32_EFER: Reg = Reg;
}
mod cr4 {
use tock_registers::{
interfaces::{Readable, Writeable},
register_bitfields,
};
register_bitfields! {
u64,
#[allow(missing_docs)]
pub CR4 [
/// If set, XSAVE and extended processor states are enabled
OSXSAVE OFFSET(18) NUMBITS(1) [],
/// Indicates OS support for FXSAVE and FXRSTOR instructions
OSFXSR OFFSET(9) NUMBITS(1) [],
/// If set, "page global" attribute is enabled
PGE OFFSET(8) NUMBITS(1) [],
]
}
pub struct Reg;
impl Readable for Reg {
type T = u64;
type R = CR4::Register;
fn get(&self) -> Self::T {
let value: u64;
unsafe {
core::arch::asm!("mov %cr4, {0}", out(reg) value, options(att_syntax));
}
value
}
}
impl Writeable for Reg {
type T = u64;
type R = CR4::Register;
fn set(&self, value: Self::T) {
unsafe {
core::arch::asm!("mov {0}, %cr4", in(reg) value, options(att_syntax));
}
}
}
/// x86-64 control register 4
pub const CR4: Reg = Reg;
}
mod xcr0 {
use tock_registers::{
interfaces::{Readable, Writeable},
register_bitfields,
};
register_bitfields! {
u64,
#[allow(missing_docs)]
pub XCR0 [
/// If set, x87 FPU/MMX is enabled
X87 OFFSET(0) NUMBITS(1) [],
/// If set, XSAVE support for MXCSR and XMM registers is enabled
SSE OFFSET(1) NUMBITS(1) [],
/// If set, AVX is enabled and XSAVE supports YMM upper halves
AVX OFFSET(2) NUMBITS(1) [],
]
}
pub struct Reg;
impl Readable for Reg {
type T = u64;
type R = XCR0::Register;
fn get(&self) -> Self::T {
let eax: u32;
let edx: u32;
unsafe {
core::arch::asm!(
"xgetbv",
in("ecx") 0,
out("eax") eax,
out("edx") edx,
options(att_syntax)
);
}
((edx as u64) << 32) | (eax as u64)
}
}
impl Writeable for Reg {
type T = u64;
type R = XCR0::Register;
fn set(&self, value: Self::T) {
let eax = value as u32;
let edx = (value >> 32) as u32;
unsafe {
core::arch::asm!(
"xsetbv",
in("ecx") 0,
in("eax") eax,
in("edx") edx,
options(att_syntax)
);
}
}
}
/// Extended control register for SSE/AVX/FPU configuration
pub const XCR0: Reg = Reg;
}
pub use cr4::CR4;
pub use msr_ia32_apic_base::MSR_IA32_APIC_BASE;
pub use msr_ia32_efer::MSR_IA32_EFER;
pub use msr_ia32_kernel_gs_base::MSR_IA32_KERNEL_GS_BASE;
pub use msr_ia32_lstar::MSR_IA32_LSTAR;
pub use msr_ia32_sfmask::MSR_IA32_SFMASK;
pub use msr_ia32_star::MSR_IA32_STAR;
pub use xcr0::XCR0;

View File

@ -91,6 +91,10 @@ impl FixedTables {
pub fn physical_address(&self) -> usize {
self.l0.physical_address()
}
pub fn clone_into(&self, target: &mut PageTable<L0>) {
target[511] = self.l0[511];
}
}
/// Instance of fixed translation tables

View File

@ -5,6 +5,7 @@ use core::{
ops::{Index, IndexMut},
};
use abi::error::Error;
use bitflags::bitflags;
mod fixed;
@ -12,6 +13,7 @@ mod fixed;
pub use fixed::{init_fixed_tables, KERNEL_TABLES};
use crate::mem::{
phys::{self, PageUsage},
table::{EntryLevel, NonTerminalEntryLevel},
ConvertAddress,
};
@ -27,9 +29,19 @@ bitflags! {
/// When set for L2 entries, the mapping specifies a 2MiB page instead of a page table
/// reference
const BLOCK = 1 << 7;
/// For tables, allows user access to further translation levels, for pages/blocks, allows
/// user access to the region covered by the entry
const USER = 1 << 2;
}
}
/// Represents a process or kernel address space. Because x86-64 does not have cool stuff like
/// TTBR0 and TTBR1, all address spaces are initially cloned from the kernel space.
#[repr(C)]
pub struct AddressSpace {
l0: *mut PageTable<L0>,
}
/// Represents a single virtual address space mapping depending on its translation level
#[derive(Clone, Copy)]
#[repr(transparent)]
@ -109,7 +121,7 @@ impl PageEntry<L3> {
/// Constructs a mapping which points to a 4KiB page
pub fn page(phys: usize, attrs: PageAttributes) -> Self {
Self(
(phys as u64) | (attrs | PageAttributes::PRESENT).bits(),
(phys as u64) | (attrs | PageAttributes::PRESENT | PageAttributes::USER).bits(),
PhantomData,
)
}
@ -119,7 +131,9 @@ impl PageEntry<L2> {
/// Constructs a mapping which points to a 2MiB block
pub fn block(phys: usize, attrs: PageAttributes) -> Self {
Self(
(phys as u64) | (attrs | PageAttributes::PRESENT | PageAttributes::BLOCK).bits(),
(phys as u64)
| (attrs | PageAttributes::PRESENT | PageAttributes::BLOCK | PageAttributes::USER)
.bits(),
PhantomData,
)
}
@ -129,7 +143,12 @@ impl<L: NonTerminalEntryLevel> PageEntry<L> {
/// Constructs a mapping which points to a next-level table
pub fn table(phys: usize, attrs: PageAttributes) -> Self {
Self(
(phys as u64) | (attrs | PageAttributes::PRESENT | PageAttributes::WRITABLE).bits(),
(phys as u64)
| (attrs
| PageAttributes::PRESENT
| PageAttributes::WRITABLE
| PageAttributes::USER)
.bits(),
PhantomData,
)
}
@ -167,3 +186,26 @@ impl<L: EntryLevel> IndexMut<usize> for PageTable<L> {
&mut self.data[index]
}
}
impl AddressSpace {
/// Allocates an empty address space with all entries marked as non-present
pub fn new_empty() -> Result<Self, Error> {
let l0 = unsafe { phys::alloc_page(PageUsage::Used)?.virtualize() as *mut PageTable<L0> };
for i in 0..512 {
unsafe {
(*l0)[i] = PageEntry::INVALID;
}
}
unsafe {
KERNEL_TABLES.clone_into(&mut *l0);
}
Ok(Self { l0 })
}
/// Returns the physical address of the root table
pub fn physical_address(&self) -> usize {
unsafe { (self.l0 as usize).physicalize() }
}
}

View File

@ -1,39 +1,5 @@
.macro ISR_NERR, n
__x86_64_exc_\n:
cli
pushq $0
pushq $\n
jmp __x86_64_exc_common
.endm
.macro ISR_YERR, n
__x86_64_exc_\n:
cli
pushq $\n
jmp __x86_64_exc_common
.endm
.macro apic_vector, n
__x86_64_apic_irq_\n:
cli
// Push dummy error codes
// pushq $0
// pushq $0
EXC_SAVE_STATE
mov $\n, %rdi
mov %rsp, %rsi
call {apic_irq_handler}
EXC_RESTORE_STATE
// Remove dummy error codes
// add $16, %rsp
iretq
.endm
// 16 general-purpose registers
// vi: ft=asm :
// 15 general-purpose registers
.set PT_REGS_SIZE, 15 * 8
.macro EXC_SAVE_STATE
@ -77,8 +43,49 @@ __x86_64_apic_irq_\n:
addq $PT_REGS_SIZE, %rsp
.endm
.macro ISR_NERR, n
__x86_64_exc_\n:
cli
pushq $0
pushq $\n
jmp __x86_64_exc_common
.endm
.macro ISR_YERR, n
__x86_64_exc_\n:
cli
pushq $\n
jmp __x86_64_exc_common
.endm
.macro apic_vector, n
__x86_64_apic_irq_\n:
cli
// Push dummy error codes
// pushq $0
// pushq $0
EXC_SAVE_STATE
mov $0x10, %ax
mov %ax, %ss
mov $\n, %rdi
mov %rsp, %rsi
call {apic_irq_handler}
EXC_RESTORE_STATE
// Remove dummy error codes
// add $16, %rsp
iretq
.endm
.global __x86_64_exception_vectors
.global __x86_64_apic_vectors
.global __x86_64_syscall_vector
.section .text
__x86_64_exc_common:
@ -142,6 +149,38 @@ apic_vector 13
apic_vector 14
apic_vector 15
__x86_64_syscall_vector:
// On entry:
// %rcx - userspace %rip
// %r11 - rflags
// Store user RSP
// TODO: eliminate magic %gs-relative addresses
mov %rsp, %gs:(16)
// Load the task's RSP0 from TSS
mov %gs:(8), %rsp
mov 4(%rsp), %rsp
// Store the state
EXC_SAVE_STATE
// Store user stack pointer
mov %gs:(16), %rax
pushq %rax
mov %rsp, %rdi
call {syscall_handler}
// Restore user stack pointer
popq %rax
mov %rax, %gs:(16)
// Restore the state
EXC_RESTORE_STATE
// %rcx and %r11 now contain the expected values
// Restore user RSP
mov %gs:(16), %rsp
sysretq
.section .rodata
.global __x86_64_exception_vectors

View File

@ -10,7 +10,7 @@
const_mut_refs,
let_chains
)]
#![allow(clippy::new_without_default)]
#![allow(clippy::new_without_default, clippy::fn_to_numeric_cast)]
#![warn(missing_docs)]
#![no_std]
#![no_main]
@ -41,7 +41,7 @@ pub mod panic;
// pub mod proc;
pub mod sync;
// pub mod syscall;
// pub mod task;
pub mod task;
pub mod util;
//
// fn setup_root() -> Result<VnodeRef, Error> {

View File

@ -6,7 +6,7 @@ cfg_if! {
if #[cfg(target_arch = "aarch64")] {
pub use crate::arch::aarch64::table::{AddressSpace, PageAttributes, PageEntry, PageTable};
} else if #[cfg(target_arch = "x86_64")] {
pub use crate::arch::x86_64::table::{PageAttributes};
pub use crate::arch::x86_64::table::{AddressSpace, PageAttributes, PageEntry, PageTable};
}
}

View File

@ -5,6 +5,8 @@ use core::{
sync::atomic::{AtomicBool, AtomicUsize, Ordering},
};
use crate::arch::{Architecture, ArchitectureImpl};
/// Simple spinloop-based fence guaranteeing that the execution resumes only after its condition is
/// met.
pub struct SpinFence {
@ -13,7 +15,7 @@ pub struct SpinFence {
/// Token type used to prevent IRQs from firing during some critical section. Normal IRQ operation
/// (if enabled before) is resumed when [IrqGuard]'s lifetime is over.
pub struct IrqGuard(u64);
pub struct IrqGuard(bool);
struct SpinlockInner<T> {
value: UnsafeCell<T>,
@ -129,16 +131,19 @@ impl<'a, T> DerefMut for IrqSafeSpinlockGuard<'a, T> {
impl IrqGuard {
/// Saves the current IRQ state and masks them
pub fn acquire() -> Self {
Self(0)
// let this = Self(DAIF.get());
// DAIF.modify(DAIF::I::SET);
// this
let mask = ArchitectureImpl::interrupt_mask();
unsafe {
ArchitectureImpl::set_interrupt_mask(true);
}
Self(mask)
}
}
impl Drop for IrqGuard {
fn drop(&mut self) {
// DAIF.set(self.0);
unsafe {
ArchitectureImpl::set_interrupt_mask(self.0);
}
}
}

View File

@ -1,14 +1,27 @@
//! Multitasking and process/thread management interfaces
use core::sync::atomic::Ordering;
use aarch64_cpu::registers::MPIDR_EL1;
#![allow(dead_code)]
use abi::error::Error;
use alloc::{rc::Rc, vec::Vec};
use tock_registers::interfaces::Readable;
use cfg_if::cfg_if;
cfg_if! {
if #[cfg(target_arch = "aarch64")] {
pub use crate::arch::aarch64::{context::TaskContext, cpu::Cpu};
} else if #[cfg(target_arch = "x86_64")] {
pub use crate::arch::x86_64::{context::TaskContext, cpu::Cpu};
}
}
use crate::{
arch::aarch64::{context::TaskContext, cpu::Cpu, smp::CPU_COUNT},
kernel_main,
mem::{
phys::{self, PageUsage},
table::AddressSpace,
ConvertAddress,
},
//arch::aarch64::{context::TaskContext, cpu::Cpu, smp::CPU_COUNT},
// kernel_main,
sync::{IrqSafeSpinlock, SpinFence},
task::sched::CpuQueue,
};
@ -69,15 +82,69 @@ pub fn spawn_kernel_closure<F: Fn() + Send + 'static>(f: F) -> Result<(), Error>
Ok(())
}
unsafe fn spawn_user_in_kernel(entry: usize, arg: usize) -> Result<(), Error> {
let stack = phys::alloc_pages_contiguous(4, PageUsage::Used)?;
let stack_pointer = stack + 4 * 0x1000;
let space = AddressSpace::new_empty()?;
let user_ctx = TaskContext::user(
entry,
arg,
space.physical_address(),
stack_pointer.virtualize(),
)?;
let proc = Process::new_with_context(Some(space), user_ctx);
proc.enqueue_somewhere();
Ok(())
}
extern "C" fn f1(arg: usize) -> ! {
loop {
unsafe {
core::arch::asm!("syscall", in("rax") arg, options(att_syntax), clobber_abi("C"));
}
for _ in 0..100000 {
unsafe {
core::arch::asm!("nop");
}
}
}
}
/// Sets up CPU queues and gives them some processes to run
pub fn init() -> Result<(), Error> {
let cpu_count = CPU_COUNT.load(Ordering::Acquire);
// XXX
// let cpu_count = CPU_COUNT.load(Ordering::Acquire);
let cpu_count = 1;
// Create a queue for each CPU
sched::init_queues(Vec::from_iter((0..cpu_count).map(|_| CpuQueue::new())));
// Spawn kernel main task
spawn_kernel_closure(kernel_main)?;
unsafe {
spawn_user_in_kernel(f1 as usize, 0).unwrap();
}
unsafe {
spawn_user_in_kernel(f1 as usize, 1).unwrap();
}
// spawn_kernel_closure(move || loop {
// debugln!("A");
// for _ in 0..100000 {
// core::hint::spin_loop();
// }
// })?;
// spawn_kernel_closure(move || loop {
// debugln!("B");
// for _ in 0..100000 {
// core::hint::spin_loop();
// }
// })?;
Ok(())
}
@ -94,7 +161,7 @@ pub fn init() -> Result<(), Error> {
pub unsafe fn enter() -> ! {
static AP_CAN_ENTER: SpinFence = SpinFence::new();
let cpu_id = MPIDR_EL1.get() & 0xF;
let cpu_id = Cpu::local_id();
if cpu_id != 0 {
// Wait until BSP allows us to enter

View File

@ -1,32 +1,26 @@
//! Process data structures
use core::{
mem::size_of,
ops::Deref,
sync::atomic::{AtomicU32, Ordering},
};
use aarch64_cpu::registers::DAIF;
use abi::{
error::Error,
process::{ExitCode, Signal, SignalEntryData},
};
use alloc::{collections::VecDeque, rc::Rc};
// use aarch64_cpu::registers::DAIF;
use alloc::rc::Rc;
use atomic_enum::atomic_enum;
use tock_registers::interfaces::Readable;
use vfs::VnodeRef;
use crate::{
arch::aarch64::{context::TaskContext, cpu::Cpu, exception::ExceptionFrame},
mem::{table::AddressSpace, ForeignPointer},
proc::{
io::ProcessIo,
wait::{Wait, WaitStatus, PROCESS_EXIT_WAIT},
},
mem::table::AddressSpace,
// arch::aarch64::{context::TaskContext, cpu::Cpu, exception::ExceptionFrame},
// mem::{table::AddressSpace, ForeignPointer},
// proc::{
// io::ProcessIo,
// wait::{Wait, WaitStatus, PROCESS_EXIT_WAIT},
// },
sync::{IrqGuard, IrqSafeSpinlock},
util::OneTimeInit,
};
use super::{sched::CpuQueue, ProcessId, PROCESSES};
use super::{sched::CpuQueue, Cpu, ProcessId, TaskContext, PROCESSES};
/// Represents the states a process can be at some point in time
#[atomic_enum]
@ -50,16 +44,17 @@ pub struct SignalEntry {
}
struct ProcessInner {
pending_wait: Option<&'static Wait>,
wait_status: WaitStatus,
exit_status: i32,
// XXX
// pending_wait: Option<&'static Wait>,
// wait_status: WaitStatus,
// exit_status: i32,
session_id: ProcessId,
group_id: ProcessId,
session_terminal: Option<VnodeRef>,
// session_id: ProcessId,
// group_id: ProcessId,
// session_terminal: Option<VnodeRef>,
signal_entry: Option<SignalEntry>,
signal_stack: VecDeque<Signal>,
// signal_entry: Option<SignalEntry>,
// signal_stack: VecDeque<Signal>,
}
/// Process data and state structure
@ -71,11 +66,9 @@ pub struct Process {
state: AtomicProcessState,
cpu_id: AtomicU32,
space: Option<AddressSpace>,
inner: IrqSafeSpinlock<ProcessInner>,
/// I/O state of the task
pub io: IrqSafeSpinlock<ProcessIo>,
// /// I/O state of the task
// pub io: IrqSafeSpinlock<ProcessIo>,
}
/// Guard type that provides [Process] operations only available for current processes
@ -97,29 +90,30 @@ impl Process {
space,
inner: IrqSafeSpinlock::new(ProcessInner {
pending_wait: None,
wait_status: WaitStatus::Done,
exit_status: 0,
// XXX
// pending_wait: None,
// wait_status: WaitStatus::Done,
// exit_status: 0,
session_id: 0,
group_id: 0,
session_terminal: None,
// session_id: 0,
// group_id: 0,
// session_terminal: None,
signal_entry: None,
signal_stack: VecDeque::new(),
// signal_entry: None,
// signal_stack: VecDeque::new(),
}),
io: IrqSafeSpinlock::new(ProcessIo::new()),
// XXX
// io: IrqSafeSpinlock::new(ProcessIo::new()),
});
let id = unsafe { PROCESSES.lock().push(this.clone()) };
this.id.init(id);
{
let mut inner = this.inner.lock();
inner.session_id = id;
inner.group_id = id;
}
// {
// let mut inner = this.inner.lock();
// inner.session_id = id;
// inner.group_id = id;
// }
this
}
@ -168,35 +162,36 @@ impl Process {
self.space.as_ref()
}
/// Replaces the task's session terminal device with another one
pub fn set_session_terminal(&self, terminal: VnodeRef) {
self.inner.lock().session_terminal.replace(terminal);
}
// XXX
// /// Replaces the task's session terminal device with another one
// pub fn set_session_terminal(&self, terminal: VnodeRef) {
// self.inner.lock().session_terminal.replace(terminal);
// }
/// Removes the task's current terminal
pub fn clear_session_terminal(&self) -> Option<VnodeRef> {
self.inner.lock().session_terminal.take()
}
// /// Removes the task's current terminal
// pub fn clear_session_terminal(&self) -> Option<VnodeRef> {
// self.inner.lock().session_terminal.take()
// }
/// Returns the current terminal of the task
pub fn session_terminal(&self) -> Option<VnodeRef> {
self.inner.lock().session_terminal.clone()
}
// /// Returns the current terminal of the task
// pub fn session_terminal(&self) -> Option<VnodeRef> {
// self.inner.lock().session_terminal.clone()
// }
/// Sets the session ID of the task
pub fn set_session_id(&self, sid: ProcessId) {
self.inner.lock().session_id = sid;
}
// /// Sets the session ID of the task
// pub fn set_session_id(&self, sid: ProcessId) {
// self.inner.lock().session_id = sid;
// }
/// Sets the process group ID of the task
pub fn set_group_id(&self, gid: ProcessId) {
self.inner.lock().group_id = gid;
}
// /// Sets the process group ID of the task
// pub fn set_group_id(&self, gid: ProcessId) {
// self.inner.lock().group_id = gid;
// }
/// Returns the process group ID of the task
pub fn group_id(&self) -> ProcessId {
self.inner.lock().group_id
}
// /// Returns the process group ID of the task
// pub fn group_id(&self) -> ProcessId {
// self.inner.lock().group_id
// }
/// Selects a suitable CPU queue and submits the process for execution.
///
@ -249,43 +244,44 @@ impl Process {
ProcessState::Suspended => (),
ProcessState::Terminated => panic!("Process is terminated"),
ProcessState::Running => {
let cpu_id = self.cpu_id.load(Ordering::Acquire);
let local_cpu_id = Cpu::local_id();
let queue = Cpu::local().queue();
todo!();
// let cpu_id = self.cpu_id.load(Ordering::Acquire);
// let local_cpu_id = Cpu::local_id();
// let queue = Cpu::local().queue();
if cpu_id == local_cpu_id {
// Suspending a process running on local CPU
unsafe { queue.yield_cpu() }
} else {
todo!();
}
// if cpu_id == local_cpu_id {
// // Suspending a process running on local CPU
// unsafe { queue.yield_cpu() }
// } else {
// todo!();
// }
}
}
}
/// Returns current wait status of the task
pub fn wait_status(&self) -> WaitStatus {
self.inner.lock().wait_status
}
// /// Returns current wait status of the task
// pub fn wait_status(&self) -> WaitStatus {
// self.inner.lock().wait_status
// }
/// Updates the wait status for the task.
///
/// # Safety
///
/// This function is only meant to be called on waiting tasks, otherwise atomicity is not
/// guaranteed.
pub unsafe fn set_wait_status(&self, status: WaitStatus) {
self.inner.lock().wait_status = status;
}
// /// Updates the wait status for the task.
// ///
// /// # Safety
// ///
// /// This function is only meant to be called on waiting tasks, otherwise atomicity is not
// /// guaranteed.
// pub unsafe fn set_wait_status(&self, status: WaitStatus) {
// self.inner.lock().wait_status = status;
// }
/// Returns an exit code if the process exited, [None] if it didn't
pub fn get_exit_status(&self) -> Option<ExitCode> {
if self.state() == ProcessState::Terminated {
Some(ExitCode::from(self.inner.lock().exit_status))
} else {
None
}
}
// /// Returns an exit code if the process exited, [None] if it didn't
// pub fn get_exit_status(&self) -> Option<ExitCode> {
// if self.state() == ProcessState::Terminated {
// Some(ExitCode::from(self.inner.lock().exit_status))
// } else {
// None
// }
// }
/// Returns the [Process] currently executing on local CPU, None if idling.
pub fn get_current() -> Option<CurrentProcess> {
@ -304,71 +300,71 @@ impl Process {
Self::get_current().unwrap()
}
/// Handles the cleanup of an exited process
pub fn handle_exit(&self) {
// Queue lock is still held
assert_eq!(self.state(), ProcessState::Terminated);
// /// Handles the cleanup of an exited process
// pub fn handle_exit(&self) {
// // Queue lock is still held
// assert_eq!(self.state(), ProcessState::Terminated);
// TODO cancel Wait if a process was killed while suspended?
{
let inner = self.inner.lock();
let exit_status = ExitCode::from(inner.exit_status);
debugln!(
"Handling exit of #{} with status {:?}",
self.id(),
exit_status
);
// // TODO cancel Wait if a process was killed while suspended?
// {
// let inner = self.inner.lock();
// let exit_status = ExitCode::from(inner.exit_status);
// debugln!(
// "Handling exit of #{} with status {:?}",
// self.id(),
// exit_status
// );
// TODO cleanup address space
// if let Some(space) = self.get_address_space() {
// }
// // TODO cleanup address space
// // if let Some(space) = self.get_address_space() {
// // }
self.io.lock().handle_exit();
}
// self.io.lock().handle_exit();
// }
// Notify any waiters we're done
PROCESS_EXIT_WAIT.wakeup_all();
}
// // Notify any waiters we're done
// PROCESS_EXIT_WAIT.wakeup_all();
// }
/// Raises an asynchronous signal for the target process
pub fn try_set_signal(self: &Rc<Self>, signal: Signal) -> Result<(), Error> {
{
let mut inner = self.inner.lock();
inner.wait_status = WaitStatus::Interrupted;
inner.signal_stack.push_back(signal);
}
// /// Raises an asynchronous signal for the target process
// pub fn try_set_signal(self: &Rc<Self>, signal: Signal) -> Result<(), Error> {
// {
// let mut inner = self.inner.lock();
// inner.wait_status = WaitStatus::Interrupted;
// inner.signal_stack.push_back(signal);
// }
if self.state() == ProcessState::Suspended {
self.clone().enqueue_somewhere();
}
// if self.state() == ProcessState::Suspended {
// self.clone().enqueue_somewhere();
// }
Ok(())
}
// Ok(())
// }
/// Inherits the data from a parent process. Meant to be called from SpawnProcess handler.
pub fn inherit(&self, parent: &Rc<Process>) -> Result<(), Error> {
let mut our_inner = self.inner.lock();
let their_inner = parent.inner.lock();
// /// Inherits the data from a parent process. Meant to be called from SpawnProcess handler.
// pub fn inherit(&self, parent: &Rc<Process>) -> Result<(), Error> {
// let mut our_inner = self.inner.lock();
// let their_inner = parent.inner.lock();
our_inner.session_id = their_inner.session_id;
our_inner.group_id = their_inner.group_id;
our_inner.session_terminal = their_inner.session_terminal.clone();
// our_inner.session_id = their_inner.session_id;
// our_inner.group_id = their_inner.group_id;
// our_inner.session_terminal = their_inner.session_terminal.clone();
Ok(())
}
// Ok(())
// }
/// Raises a signal for the specified process group
pub fn signal_group(group_id: ProcessId, signal: Signal) {
let processes = PROCESSES.lock();
for (_, proc) in processes.data.iter() {
let inner = proc.inner.lock();
if proc.state() != ProcessState::Terminated && inner.group_id == group_id {
debugln!("Deliver group signal to {}: {:?}", proc.id(), signal);
drop(inner);
proc.try_set_signal(signal).unwrap();
}
}
}
// /// Raises a signal for the specified process group
// pub fn signal_group(group_id: ProcessId, signal: Signal) {
// let processes = PROCESSES.lock();
// for (_, proc) in processes.data.iter() {
// let inner = proc.inner.lock();
// if proc.state() != ProcessState::Terminated && inner.group_id == group_id {
// debugln!("Deliver group signal to {}: {:?}", proc.id(), signal);
// drop(inner);
// proc.try_set_signal(signal).unwrap();
// }
// }
// }
}
impl Drop for Process {
@ -384,98 +380,99 @@ impl CurrentProcess {
///
/// Only meant to be called from [Process::current] or [CpuQueue::current_process].
pub unsafe fn new(inner: Rc<Process>) -> Self {
assert_eq!(DAIF.read(DAIF::I), 1);
// XXX
// assert_eq!(DAIF.read(DAIF::I), 1);
Self(inner)
}
/// Sets up a pending wait for the process.
///
/// # Safety
///
/// This function is only meant to be called in no-IRQ context and when caller can guarantee
/// the task won't get scheduled to a CPU in such state.
pub unsafe fn setup_wait(&self, wait: &'static Wait) {
let mut inner = self.inner.lock();
inner.pending_wait.replace(wait);
inner.wait_status = WaitStatus::Pending;
}
// /// Sets up a pending wait for the process.
// ///
// /// # Safety
// ///
// /// This function is only meant to be called in no-IRQ context and when caller can guarantee
// /// the task won't get scheduled to a CPU in such state.
// pub unsafe fn setup_wait(&self, wait: &'static Wait) {
// let mut inner = self.inner.lock();
// inner.pending_wait.replace(wait);
// inner.wait_status = WaitStatus::Pending;
// }
/// Sets up a return frame to handle a pending signal, if any is present in the task's queue.
///
/// # Safety
///
/// This function is only meant to be called right before returning from an userspace
/// exception handler.
pub unsafe fn handle_signal(&self, frame: &mut ExceptionFrame) {
let mut inner = self.inner.lock();
if let Some(signal) = inner.signal_stack.pop_front() {
let Some(entry) = inner.signal_entry.clone() else {
todo!();
};
// /// Sets up a return frame to handle a pending signal, if any is present in the task's queue.
// ///
// /// # Safety
// ///
// /// This function is only meant to be called right before returning from an userspace
// /// exception handler.
// pub unsafe fn handle_signal(&self, frame: &mut ExceptionFrame) {
// let mut inner = self.inner.lock();
// if let Some(signal) = inner.signal_stack.pop_front() {
// let Some(entry) = inner.signal_entry.clone() else {
// todo!();
// };
debugln!(
"Enter signal handler from: pc={:#x}, sp={:#x}",
frame.elr_el1,
frame.sp_el0
);
// debugln!(
// "Enter signal handler from: pc={:#x}, sp={:#x}",
// frame.elr_el1,
// frame.sp_el0
// );
// TODO check if really in a syscall, lol
let syscall_return = -(u32::from(Error::Interrupted) as isize);
frame.r[0] = syscall_return as u64;
// // TODO check if really in a syscall, lol
// let syscall_return = -(u32::from(Error::Interrupted) as isize);
// frame.r[0] = syscall_return as u64;
// Setup signal frame
let usp = (entry.stack - size_of::<SignalEntryData>()) & !0xF;
let frame_ptr = usp as *mut SignalEntryData;
// // Setup signal frame
// let usp = (entry.stack - size_of::<SignalEntryData>()) & !0xF;
// let frame_ptr = usp as *mut SignalEntryData;
let saved_frame = frame.to_saved_frame();
frame_ptr.write_foreign_volatile(
self.address_space(),
SignalEntryData {
signal,
frame: saved_frame,
},
);
// let saved_frame = frame.to_saved_frame();
// frame_ptr.write_foreign_volatile(
// self.address_space(),
// SignalEntryData {
// signal,
// frame: saved_frame,
// },
// );
// Setup return to signal handler
debugln!(
"Syscall entry @ pc={:#x}, sp={:#x} (top = {:#x})",
entry.entry,
usp,
entry.stack
);
frame.sp_el0 = usp as _;
frame.elr_el1 = entry.entry as _;
// // Setup return to signal handler
// debugln!(
// "Syscall entry @ pc={:#x}, sp={:#x} (top = {:#x})",
// entry.entry,
// usp,
// entry.stack
// );
// frame.sp_el0 = usp as _;
// frame.elr_el1 = entry.entry as _;
// Pass the frame pointer as an argument to signal handler entry
frame.r[0] = usp as _;
}
}
// // Pass the frame pointer as an argument to signal handler entry
// frame.r[0] = usp as _;
// }
// }
/// Configures signal entry information for the process
pub fn set_signal_entry(&self, entry: usize, stack: usize) {
let mut inner = self.inner.lock();
inner.signal_entry.replace(SignalEntry { entry, stack });
}
// /// Configures signal entry information for the process
// pub fn set_signal_entry(&self, entry: usize, stack: usize) {
// let mut inner = self.inner.lock();
// inner.signal_entry.replace(SignalEntry { entry, stack });
// }
/// Terminate the current process
pub fn exit(&self, status: ExitCode) {
self.inner.lock().exit_status = status.into();
let current_state = self.state.swap(ProcessState::Terminated, Ordering::SeqCst);
assert_eq!(current_state, ProcessState::Running);
infoln!("Process {} exited with code {:?}", self.id(), status);
// /// Terminate the current process
// pub fn exit(&self, status: ExitCode) {
// self.inner.lock().exit_status = status.into();
// let current_state = self.state.swap(ProcessState::Terminated, Ordering::SeqCst);
// assert_eq!(current_state, ProcessState::Running);
// infoln!("Process {} exited with code {:?}", self.id(), status);
match current_state {
ProcessState::Suspended => {
todo!();
}
ProcessState::Ready => todo!(),
ProcessState::Running => {
self.handle_exit();
unsafe { Cpu::local().queue().yield_cpu() }
}
ProcessState::Terminated => todo!(),
}
}
// match current_state {
// ProcessState::Suspended => {
// todo!();
// }
// ProcessState::Ready => todo!(),
// ProcessState::Running => {
// self.handle_exit();
// unsafe { Cpu::local().queue().yield_cpu() }
// }
// ProcessState::Terminated => todo!(),
// }
// }
}
impl Deref for CurrentProcess {

View File

@ -1,18 +1,18 @@
//! Per-CPU queue implementation
use aarch64_cpu::registers::CNTPCT_EL0;
// use aarch64_cpu::registers::CNTPCT_EL0;
use alloc::{collections::VecDeque, rc::Rc, vec::Vec};
use tock_registers::interfaces::Readable;
use cfg_if::cfg_if;
use crate::{
arch::aarch64::{context::TaskContext, cpu::Cpu},
// arch::aarch64::{context::TaskContext, cpu::Cpu},
sync::{IrqSafeSpinlock, IrqSafeSpinlockGuard},
util::OneTimeInit,
};
use super::{
process::{CurrentProcess, Process, ProcessState},
ProcessId,
Cpu, ProcessId, TaskContext,
};
/// Per-CPU statistics
@ -49,7 +49,19 @@ static QUEUES: OneTimeInit<Vec<CpuQueue>> = OneTimeInit::new();
#[naked]
extern "C" fn __idle(_x: usize) -> ! {
unsafe {
core::arch::asm!("1: nop; b 1b", options(noreturn));
cfg_if! {
if #[cfg(target_arch = "aarch64")] {
core::arch::asm!("1: nop; b 1b", options(noreturn));
} else if #[cfg(target_arch = "x86_64")] {
core::arch::asm!(r#"
1:
nop
jmp 1b
"#, options(noreturn, att_syntax));
} else {
core::arch::asm!("", options(noreturn));
}
}
}
}
@ -113,8 +125,8 @@ impl CpuQueue {
/// Only meant to be called from [crate::task::enter()] function.
pub unsafe fn enter(&self) -> ! {
// Start from idle thread to avoid having a Rc stuck here without getting dropped
let t = CNTPCT_EL0.get();
self.lock().stats.measure_time = t;
// let t = CNTPCT_EL0.get();
// self.lock().stats.measure_time = t;
self.idle.enter()
}
@ -127,9 +139,9 @@ impl CpuQueue {
pub unsafe fn yield_cpu(&self) {
let mut inner = self.inner.lock();
let t = CNTPCT_EL0.get();
let delta = t - inner.stats.measure_time;
inner.stats.measure_time = t;
// let t = CNTPCT_EL0.get();
// let delta = t - inner.stats.measure_time;
// inner.stats.measure_time = t;
let current = inner.current.clone();
@ -139,9 +151,9 @@ impl CpuQueue {
}
inner.queue.push_back(current.clone());
inner.stats.cpu_time += delta;
// inner.stats.cpu_time += delta;
} else {
inner.stats.idle_time += delta;
// inner.stats.idle_time += delta;
}
let next = inner.next_ready_task();