2021-11-04 13:29:27 +02:00
|
|
|
//! Process data and control
|
|
|
|
use crate::arch::aarch64::exception::ExceptionFrame;
|
2021-10-20 13:54:33 +03:00
|
|
|
use crate::mem::{
|
|
|
|
self,
|
|
|
|
phys::{self, PageUsage},
|
|
|
|
virt::{MapAttributes, Space},
|
|
|
|
};
|
2021-11-05 15:24:10 +02:00
|
|
|
use crate::proc::{wait::Wait, ProcessIo, PROCESSES, SCHED};
|
2021-10-21 19:10:41 +03:00
|
|
|
use crate::sync::IrqSafeSpinLock;
|
2021-11-05 14:06:20 +02:00
|
|
|
use alloc::rc::Rc;
|
2021-10-20 13:54:33 +03:00
|
|
|
use core::cell::UnsafeCell;
|
|
|
|
use core::fmt;
|
|
|
|
use core::sync::atomic::{AtomicU32, Ordering};
|
2021-11-11 22:54:41 +02:00
|
|
|
use libsys::{error::Errno, signal::Signal};
|
2021-10-20 13:54:33 +03:00
|
|
|
|
|
|
|
pub use crate::arch::platform::context::{self, Context};
|
|
|
|
|
|
|
|
/// Wrapper type for a process struct reference
|
2021-10-20 16:32:07 +03:00
|
|
|
pub type ProcessRef = Rc<Process>;
|
|
|
|
|
|
|
|
/// Wrapper type for process exit code
|
|
|
|
#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Debug)]
|
|
|
|
#[repr(transparent)]
|
|
|
|
pub struct ExitCode(i32);
|
2021-10-20 13:54:33 +03:00
|
|
|
|
|
|
|
/// Wrapper type for process ID
|
|
|
|
#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq)]
|
|
|
|
#[repr(transparent)]
|
|
|
|
pub struct Pid(u32);
|
|
|
|
|
|
|
|
/// List of possible process states
|
|
|
|
#[derive(Clone, Copy, Debug, PartialEq)]
|
|
|
|
pub enum State {
|
|
|
|
/// Process is ready to be executed and/or is scheduled for it
|
|
|
|
Ready,
|
|
|
|
/// Process is currently running or is in system call/interrupt handler
|
|
|
|
Running,
|
|
|
|
/// Process has finished execution and is waiting to be reaped
|
|
|
|
Finished,
|
|
|
|
/// Process is waiting for some external event
|
|
|
|
Waiting,
|
|
|
|
}
|
|
|
|
|
2021-10-20 16:32:07 +03:00
|
|
|
struct ProcessInner {
|
|
|
|
space: Option<&'static mut Space>,
|
|
|
|
state: State,
|
|
|
|
id: Pid,
|
2021-10-31 22:37:29 +02:00
|
|
|
wait_flag: bool,
|
2021-10-26 13:38:29 +03:00
|
|
|
exit: Option<ExitCode>,
|
2021-11-11 13:46:36 +02:00
|
|
|
signal_entry: usize,
|
|
|
|
signal_stack: usize,
|
2021-10-20 16:32:07 +03:00
|
|
|
}
|
|
|
|
|
2021-10-20 13:54:33 +03:00
|
|
|
/// Structure describing an operating system process
|
|
|
|
#[allow(dead_code)]
|
|
|
|
pub struct Process {
|
2021-10-20 16:32:07 +03:00
|
|
|
ctx: UnsafeCell<Context>,
|
2021-11-11 13:46:36 +02:00
|
|
|
signal_ctx: UnsafeCell<Context>,
|
2021-10-21 19:10:41 +03:00
|
|
|
inner: IrqSafeSpinLock<ProcessInner>,
|
2021-11-05 15:24:10 +02:00
|
|
|
exit_wait: Wait,
|
2021-11-11 13:46:36 +02:00
|
|
|
signal_state: AtomicU32,
|
|
|
|
signal_pending: AtomicU32,
|
2021-11-04 13:29:27 +02:00
|
|
|
/// Process I/O context
|
2021-11-02 15:36:34 +02:00
|
|
|
pub io: IrqSafeSpinLock<ProcessIo>,
|
2021-10-20 16:32:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl From<i32> for ExitCode {
|
|
|
|
fn from(f: i32) -> Self {
|
|
|
|
Self(f)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<()> for ExitCode {
|
|
|
|
fn from(_: ()) -> Self {
|
|
|
|
Self(0)
|
|
|
|
}
|
2021-10-20 13:54:33 +03:00
|
|
|
}
|
|
|
|
|
2021-11-05 15:24:10 +02:00
|
|
|
impl From<ExitCode> for i32 {
|
|
|
|
fn from(f: ExitCode) -> Self {
|
|
|
|
f.0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-20 13:54:33 +03:00
|
|
|
impl Pid {
|
|
|
|
/// Kernel idle process always has PID of zero
|
2021-10-20 16:32:07 +03:00
|
|
|
pub const IDLE: Self = Self(Self::KERNEL_BIT);
|
2021-10-20 13:54:33 +03:00
|
|
|
|
|
|
|
const KERNEL_BIT: u32 = 1 << 31;
|
|
|
|
|
2021-10-20 16:32:07 +03:00
|
|
|
/// Constructs an instance of user-space PID
|
|
|
|
pub const fn user(id: u32) -> Self {
|
|
|
|
assert!(id < 256, "PID is too high");
|
|
|
|
Self(id)
|
|
|
|
}
|
|
|
|
|
2021-10-20 13:54:33 +03:00
|
|
|
/// Allocates a new kernel-space PID
|
|
|
|
pub fn new_kernel() -> Self {
|
|
|
|
static LAST: AtomicU32 = AtomicU32::new(0);
|
|
|
|
let id = LAST.fetch_add(1, Ordering::Relaxed);
|
|
|
|
assert!(id & Self::KERNEL_BIT == 0, "Out of kernel PIDs");
|
|
|
|
Self(id | Self::KERNEL_BIT)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Allocates a new user-space PID.
|
|
|
|
///
|
|
|
|
/// First user PID is #1.
|
|
|
|
pub fn new_user() -> Self {
|
|
|
|
static LAST: AtomicU32 = AtomicU32::new(1);
|
|
|
|
let id = LAST.fetch_add(1, Ordering::Relaxed);
|
|
|
|
assert!(id < 256, "Out of user PIDs");
|
|
|
|
Self(id)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns `true` if this PID belongs to a kernel process
|
|
|
|
pub fn is_kernel(self) -> bool {
|
|
|
|
self.0 & Self::KERNEL_BIT != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns address space ID of a user-space process.
|
|
|
|
///
|
|
|
|
/// Panics if called on kernel process PID.
|
|
|
|
pub fn asid(self) -> u8 {
|
|
|
|
assert!(!self.is_kernel());
|
|
|
|
self.0 as u8
|
|
|
|
}
|
2021-11-04 11:26:15 +02:00
|
|
|
|
2021-11-04 13:29:27 +02:00
|
|
|
/// Returns bit value of this pid
|
2021-11-04 11:26:15 +02:00
|
|
|
pub const fn value(self) -> u32 {
|
|
|
|
self.0
|
|
|
|
}
|
2021-11-05 15:24:10 +02:00
|
|
|
|
2021-11-11 22:46:01 +02:00
|
|
|
/// Constructs [Pid] from raw [u32] value
|
|
|
|
///
|
|
|
|
/// # Safety
|
|
|
|
///
|
|
|
|
/// Unsafe: does not check `num`
|
2021-11-05 15:24:10 +02:00
|
|
|
pub const unsafe fn from_raw(num: u32) -> Self {
|
|
|
|
Self(num)
|
|
|
|
}
|
2021-10-20 13:54:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl fmt::Display for Pid {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
write!(
|
|
|
|
f,
|
|
|
|
"Pid(#{}{})",
|
|
|
|
if self.is_kernel() { "K" } else { "U" },
|
|
|
|
self.0 & !Self::KERNEL_BIT
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Process {
|
2021-10-20 16:32:07 +03:00
|
|
|
const USTACK_VIRT_TOP: usize = 0x100000000;
|
|
|
|
const USTACK_PAGES: usize = 4;
|
|
|
|
|
2021-10-21 12:16:24 +03:00
|
|
|
/// Returns currently executing process
|
|
|
|
pub fn current() -> ProcessRef {
|
|
|
|
SCHED.current_process()
|
|
|
|
}
|
|
|
|
|
2021-11-11 22:46:01 +02:00
|
|
|
/// Returns process (if any) to which `pid` refers
|
2021-11-05 15:24:10 +02:00
|
|
|
pub fn get(pid: Pid) -> Option<ProcessRef> {
|
|
|
|
PROCESSES.lock().get(&pid).cloned()
|
|
|
|
}
|
|
|
|
|
2021-11-12 10:26:58 +02:00
|
|
|
/// Sets a pending signal for a process
|
2021-11-11 13:46:36 +02:00
|
|
|
pub fn set_signal(&self, signal: Signal) {
|
2021-11-12 10:26:58 +02:00
|
|
|
let lock = self.inner.lock();
|
2021-11-11 13:46:36 +02:00
|
|
|
|
|
|
|
match lock.state {
|
|
|
|
State::Running => {
|
|
|
|
drop(lock);
|
|
|
|
self.enter_signal(signal);
|
|
|
|
}
|
|
|
|
State::Waiting => {
|
|
|
|
// TODO abort whatever the process is waiting for
|
|
|
|
todo!()
|
|
|
|
}
|
|
|
|
State::Ready => {
|
|
|
|
todo!()
|
|
|
|
}
|
|
|
|
State::Finished => {
|
|
|
|
// TODO report error back
|
|
|
|
todo!()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-12 10:26:58 +02:00
|
|
|
/// Switches current thread back from signal handler
|
2021-11-11 13:46:36 +02:00
|
|
|
pub fn return_from_signal(&self) {
|
|
|
|
if self.signal_pending.load(Ordering::Acquire) == 0 {
|
|
|
|
panic!("TODO handle cases when returning from no signal");
|
|
|
|
}
|
|
|
|
self.signal_pending.store(0, Ordering::Release);
|
|
|
|
|
|
|
|
let src_ctx = self.signal_ctx.get();
|
|
|
|
let dst_ctx = self.ctx.get();
|
|
|
|
|
|
|
|
assert_eq!(self.inner.lock().state, State::Running);
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
(&mut *src_ctx).switch(&mut *dst_ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-12 10:26:58 +02:00
|
|
|
/// Switches current thread to a signal handler
|
2021-11-11 13:46:36 +02:00
|
|
|
pub fn enter_signal(&self, signal: Signal) {
|
|
|
|
if self
|
|
|
|
.signal_pending
|
|
|
|
.compare_exchange_weak(0, signal as u32, Ordering::SeqCst, Ordering::Relaxed)
|
|
|
|
.is_err()
|
|
|
|
{
|
|
|
|
panic!("Already handling a signal (maybe handle this case)");
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut lock = self.inner.lock();
|
|
|
|
let signal_ctx = unsafe { &mut *self.signal_ctx.get() };
|
|
|
|
|
|
|
|
let dst_id = lock.id;
|
|
|
|
let dst_space_phys = lock.space.as_mut().unwrap().address_phys();
|
|
|
|
let dst_ttbr0 = dst_space_phys | ((dst_id.asid() as usize) << 48);
|
|
|
|
|
|
|
|
debugln!(
|
|
|
|
"Signal entry: pc={:#x}, sp={:#x}, ttbr0={:#x}",
|
|
|
|
lock.signal_entry,
|
|
|
|
lock.signal_stack,
|
|
|
|
dst_ttbr0
|
|
|
|
);
|
|
|
|
assert_eq!(lock.state, State::Running);
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
signal_ctx.setup_signal_entry(
|
|
|
|
lock.signal_entry,
|
|
|
|
signal as usize,
|
|
|
|
dst_ttbr0,
|
|
|
|
lock.signal_stack,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
let src_ctx = self.ctx.get();
|
|
|
|
drop(lock);
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
(&mut *src_ctx).switch(signal_ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-12 10:26:58 +02:00
|
|
|
/// Sets up values needed for signal entry
|
2021-11-11 13:46:36 +02:00
|
|
|
pub fn setup_signal_context(&self, entry: usize, stack: usize) {
|
|
|
|
let mut lock = self.inner.lock();
|
|
|
|
lock.signal_entry = entry;
|
|
|
|
lock.signal_stack = stack;
|
|
|
|
}
|
|
|
|
|
2021-10-20 16:32:07 +03:00
|
|
|
/// Schedules an initial thread for execution
|
2021-10-20 13:54:33 +03:00
|
|
|
///
|
2021-10-20 16:32:07 +03:00
|
|
|
/// # Safety
|
|
|
|
///
|
|
|
|
/// Unsafe: only allowed to be called once, repeated calls
|
|
|
|
/// will generate undefined behavior
|
|
|
|
pub unsafe fn enter(proc: ProcessRef) -> ! {
|
|
|
|
// FIXME use some global lock to guarantee atomicity of thread entry?
|
|
|
|
proc.inner.lock().state = State::Running;
|
2021-11-11 13:46:36 +02:00
|
|
|
proc.current_context().enter()
|
2021-10-20 13:54:33 +03:00
|
|
|
}
|
|
|
|
|
2021-11-11 22:46:01 +02:00
|
|
|
/// Executes a function allowing mutation of the process address space
|
2021-11-09 18:24:08 +02:00
|
|
|
#[inline]
|
|
|
|
pub fn manipulate_space<F: FnOnce(&mut Space) -> Result<(), Errno>>(
|
|
|
|
&self,
|
|
|
|
f: F,
|
|
|
|
) -> Result<(), Errno> {
|
|
|
|
f(self.inner.lock().space.as_mut().unwrap())
|
|
|
|
}
|
|
|
|
|
2021-11-12 10:26:58 +02:00
|
|
|
#[allow(clippy::mut_from_ref)]
|
2021-11-11 13:46:36 +02:00
|
|
|
fn current_context(&self) -> &mut Context {
|
|
|
|
if self.signal_pending.load(Ordering::Acquire) != 0 {
|
|
|
|
unsafe { &mut *self.signal_ctx.get() }
|
|
|
|
} else {
|
|
|
|
unsafe { &mut *self.ctx.get() }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-20 16:32:07 +03:00
|
|
|
/// Schedules a next thread for execution
|
|
|
|
///
|
|
|
|
/// # Safety
|
|
|
|
///
|
|
|
|
/// Unsafe:
|
2021-10-20 13:54:33 +03:00
|
|
|
///
|
2021-10-20 16:32:07 +03:00
|
|
|
/// * Does not ensure src and dst threads are not the same thread
|
|
|
|
/// * Does not ensure src is actually current context
|
|
|
|
pub unsafe fn switch(src: ProcessRef, dst: ProcessRef, discard: bool) {
|
|
|
|
{
|
|
|
|
let mut src_lock = src.inner.lock();
|
|
|
|
let mut dst_lock = dst.inner.lock();
|
|
|
|
|
|
|
|
if !discard {
|
|
|
|
assert_eq!(src_lock.state, State::Running);
|
|
|
|
src_lock.state = State::Ready;
|
|
|
|
}
|
2021-10-31 22:37:29 +02:00
|
|
|
assert!(dst_lock.state == State::Ready || dst_lock.state == State::Waiting);
|
2021-10-20 16:32:07 +03:00
|
|
|
dst_lock.state = State::Running;
|
|
|
|
}
|
|
|
|
|
2021-11-11 13:46:36 +02:00
|
|
|
let src_ctx = src.current_context();
|
|
|
|
let dst_ctx = dst.current_context();
|
2021-10-20 16:32:07 +03:00
|
|
|
|
|
|
|
(&mut *src_ctx).switch(&mut *dst_ctx);
|
2021-10-20 13:54:33 +03:00
|
|
|
}
|
|
|
|
|
2021-11-04 13:29:27 +02:00
|
|
|
/// Suspends current process with a "waiting" status
|
2021-10-31 22:37:29 +02:00
|
|
|
pub fn enter_wait(&self) {
|
|
|
|
let drop = {
|
|
|
|
let mut lock = self.inner.lock();
|
|
|
|
let drop = lock.state == State::Running;
|
|
|
|
lock.state = State::Waiting;
|
|
|
|
SCHED.dequeue(lock.id);
|
|
|
|
drop
|
|
|
|
};
|
|
|
|
if drop {
|
|
|
|
SCHED.switch(true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-04 13:29:27 +02:00
|
|
|
/// Changes process wait condition status
|
2021-10-31 22:37:29 +02:00
|
|
|
pub fn set_wait_flag(&self, v: bool) {
|
|
|
|
self.inner.lock().wait_flag = v;
|
|
|
|
}
|
|
|
|
|
2021-11-04 13:29:27 +02:00
|
|
|
/// Returns `true` if process wait condition has not been reached
|
2021-10-31 22:37:29 +02:00
|
|
|
pub fn wait_flag(&self) -> bool {
|
|
|
|
self.inner.lock().wait_flag
|
|
|
|
}
|
|
|
|
|
2021-10-20 16:32:07 +03:00
|
|
|
/// Returns the process ID
|
|
|
|
pub fn id(&self) -> Pid {
|
|
|
|
self.inner.lock().id
|
2021-10-20 13:54:33 +03:00
|
|
|
}
|
|
|
|
|
2021-10-20 16:32:07 +03:00
|
|
|
/// Creates a new kernel process
|
|
|
|
pub fn new_kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<ProcessRef, Errno> {
|
2021-10-20 13:54:33 +03:00
|
|
|
let id = Pid::new_kernel();
|
2021-10-20 16:32:07 +03:00
|
|
|
let res = Rc::new(Self {
|
|
|
|
ctx: UnsafeCell::new(Context::kernel(entry as usize, arg)),
|
2021-11-12 10:26:58 +02:00
|
|
|
signal_ctx: UnsafeCell::new(Context::empty()),
|
2021-11-02 17:26:51 +02:00
|
|
|
io: IrqSafeSpinLock::new(ProcessIo::new()),
|
2021-11-05 15:24:10 +02:00
|
|
|
exit_wait: Wait::new(),
|
2021-11-11 13:46:36 +02:00
|
|
|
signal_state: AtomicU32::new(0),
|
|
|
|
signal_pending: AtomicU32::new(0),
|
2021-10-21 19:10:41 +03:00
|
|
|
inner: IrqSafeSpinLock::new(ProcessInner {
|
2021-11-11 13:46:36 +02:00
|
|
|
signal_entry: 0,
|
|
|
|
signal_stack: 0,
|
2021-10-20 16:32:07 +03:00
|
|
|
id,
|
|
|
|
exit: None,
|
|
|
|
space: None,
|
2021-10-31 22:37:29 +02:00
|
|
|
wait_flag: false,
|
2021-10-20 16:32:07 +03:00
|
|
|
state: State::Ready,
|
|
|
|
}),
|
|
|
|
});
|
2021-10-21 12:16:24 +03:00
|
|
|
debugln!("New kernel process: {}", id);
|
2021-10-20 13:54:33 +03:00
|
|
|
assert!(PROCESSES.lock().insert(id, res.clone()).is_none());
|
|
|
|
Ok(res)
|
|
|
|
}
|
|
|
|
|
2021-11-04 13:29:27 +02:00
|
|
|
/// Creates a "fork" of the process, cloning its address space and
|
|
|
|
/// resources
|
2021-11-04 11:26:15 +02:00
|
|
|
pub fn fork(&self, frame: &mut ExceptionFrame) -> Result<Pid, Errno> {
|
|
|
|
let src_io = self.io.lock();
|
|
|
|
let mut src_inner = self.inner.lock();
|
|
|
|
|
|
|
|
let dst_id = Pid::new_user();
|
|
|
|
let dst_space = src_inner.space.as_mut().unwrap().fork()?;
|
|
|
|
let dst_space_phys = (dst_space as *mut _ as usize) - mem::KERNEL_OFFSET;
|
|
|
|
let dst_ttbr0 = dst_space_phys | ((dst_id.asid() as usize) << 48);
|
|
|
|
|
|
|
|
let dst = Rc::new(Self {
|
|
|
|
ctx: UnsafeCell::new(Context::fork(frame, dst_ttbr0)),
|
2021-11-12 10:26:58 +02:00
|
|
|
signal_ctx: UnsafeCell::new(Context::empty()),
|
2021-11-04 11:26:15 +02:00
|
|
|
io: IrqSafeSpinLock::new(src_io.fork()?),
|
2021-11-05 15:24:10 +02:00
|
|
|
exit_wait: Wait::new(),
|
2021-11-11 13:46:36 +02:00
|
|
|
signal_state: AtomicU32::new(0),
|
|
|
|
signal_pending: AtomicU32::new(0),
|
2021-11-04 11:26:15 +02:00
|
|
|
inner: IrqSafeSpinLock::new(ProcessInner {
|
2021-11-11 13:46:36 +02:00
|
|
|
signal_entry: 0,
|
|
|
|
signal_stack: 0,
|
2021-11-04 11:26:15 +02:00
|
|
|
id: dst_id,
|
|
|
|
exit: None,
|
|
|
|
space: Some(dst_space),
|
|
|
|
state: State::Ready,
|
2021-11-04 13:29:27 +02:00
|
|
|
wait_flag: false,
|
|
|
|
}),
|
2021-11-04 11:26:15 +02:00
|
|
|
});
|
|
|
|
debugln!("Process {} forked into {}", src_inner.id, dst_id);
|
2021-11-04 12:04:37 +02:00
|
|
|
assert!(PROCESSES.lock().insert(dst_id, dst).is_none());
|
2021-11-04 11:26:15 +02:00
|
|
|
SCHED.enqueue(dst_id);
|
|
|
|
|
|
|
|
Ok(dst_id)
|
|
|
|
}
|
|
|
|
|
2021-10-20 13:54:33 +03:00
|
|
|
/// Terminates a process.
|
2021-10-20 16:32:07 +03:00
|
|
|
pub fn exit<I: Into<ExitCode>>(&self, status: I) {
|
|
|
|
let status = status.into();
|
|
|
|
let drop = {
|
|
|
|
let mut lock = self.inner.lock();
|
|
|
|
let drop = lock.state == State::Running;
|
|
|
|
infoln!("Process {} is exiting: {:?}", lock.id, status);
|
|
|
|
assert!(lock.exit.is_none());
|
|
|
|
lock.exit = Some(status);
|
|
|
|
lock.state = State::Finished;
|
2021-11-05 15:24:10 +02:00
|
|
|
|
2021-11-09 18:24:08 +02:00
|
|
|
if let Some(space) = lock.space.take() {
|
|
|
|
unsafe {
|
|
|
|
Space::release(space);
|
2021-11-10 09:53:44 +02:00
|
|
|
asm!("tlbi aside1, {}", in(reg) ((lock.id.asid() as usize) << 48));
|
2021-11-09 18:24:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-05 15:24:10 +02:00
|
|
|
self.io.lock().handle_exit();
|
|
|
|
|
2021-10-20 16:32:07 +03:00
|
|
|
SCHED.dequeue(lock.id);
|
|
|
|
drop
|
|
|
|
};
|
2021-11-05 15:24:10 +02:00
|
|
|
self.exit_wait.wakeup_all();
|
2021-10-20 16:32:07 +03:00
|
|
|
if drop {
|
|
|
|
SCHED.switch(true);
|
|
|
|
panic!("This code should never run");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn collect(&self) -> Option<ExitCode> {
|
|
|
|
let lock = self.inner.lock();
|
|
|
|
if lock.state == State::Finished {
|
|
|
|
lock.exit
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Waits for a process to finish and reaps it
|
|
|
|
pub fn waitpid(pid: Pid) -> Result<ExitCode, Errno> {
|
|
|
|
loop {
|
2021-10-26 13:38:29 +03:00
|
|
|
let proc = PROCESSES
|
|
|
|
.lock()
|
|
|
|
.get(&pid)
|
|
|
|
.cloned()
|
|
|
|
.ok_or(Errno::DoesNotExist)?;
|
2021-10-20 16:32:07 +03:00
|
|
|
|
|
|
|
if let Some(r) = proc.collect() {
|
2021-11-05 15:24:10 +02:00
|
|
|
// TODO drop the process struct itself
|
2021-10-20 16:32:07 +03:00
|
|
|
PROCESSES.lock().remove(&proc.id());
|
2021-11-05 15:24:10 +02:00
|
|
|
debugln!("pid {} has {} refs", proc.id(), Rc::strong_count(&proc));
|
2021-10-20 16:32:07 +03:00
|
|
|
return Ok(r);
|
|
|
|
}
|
2021-11-05 15:24:10 +02:00
|
|
|
|
|
|
|
proc.exit_wait.wait(None)?;
|
2021-10-20 16:32:07 +03:00
|
|
|
}
|
2021-10-20 13:54:33 +03:00
|
|
|
}
|
|
|
|
|
2021-10-20 16:32:07 +03:00
|
|
|
/// Loads a new program into current process address space
|
2021-10-20 13:54:33 +03:00
|
|
|
pub fn execve<F: FnOnce(&mut Space) -> Result<usize, Errno>>(
|
|
|
|
loader: F,
|
|
|
|
arg: usize,
|
|
|
|
) -> Result<(), Errno> {
|
2021-10-20 16:32:07 +03:00
|
|
|
unsafe {
|
|
|
|
// Run with interrupts disabled
|
|
|
|
asm!("msr daifset, #2");
|
|
|
|
}
|
|
|
|
|
|
|
|
let proc = SCHED.current_process();
|
|
|
|
let mut lock = proc.inner.lock();
|
|
|
|
if lock.id.is_kernel() {
|
|
|
|
let mut proc_lock = PROCESSES.lock();
|
|
|
|
let old_pid = lock.id;
|
|
|
|
assert!(
|
|
|
|
proc_lock.remove(&old_pid).is_some(),
|
|
|
|
"Failed to downgrade kernel process (remove kernel pid)"
|
|
|
|
);
|
|
|
|
lock.id = Pid::new_user();
|
|
|
|
debugln!(
|
|
|
|
"Process downgrades from kernel to user: {} -> {}",
|
|
|
|
old_pid,
|
|
|
|
lock.id
|
|
|
|
);
|
2021-10-21 12:16:24 +03:00
|
|
|
assert!(proc_lock.insert(lock.id, proc.clone()).is_none());
|
2021-10-20 16:32:07 +03:00
|
|
|
unsafe {
|
|
|
|
SCHED.hack_current_pid(lock.id);
|
|
|
|
}
|
2021-11-05 00:42:14 +02:00
|
|
|
} else {
|
|
|
|
// Invalidate user ASID
|
|
|
|
let input = (lock.id.asid() as usize) << 48;
|
|
|
|
unsafe {
|
|
|
|
asm!("tlbi aside1, {}", in(reg) input);
|
|
|
|
}
|
2021-10-20 16:32:07 +03:00
|
|
|
}
|
|
|
|
|
2021-11-05 13:45:22 +02:00
|
|
|
proc.io.lock().handle_cloexec();
|
|
|
|
|
2021-10-20 16:32:07 +03:00
|
|
|
let new_space = Space::alloc_empty()?;
|
|
|
|
let new_space_phys = (new_space as *mut _ as usize) - mem::KERNEL_OFFSET;
|
|
|
|
|
|
|
|
let ustack_virt_bottom = Self::USTACK_VIRT_TOP - Self::USTACK_PAGES * mem::PAGE_SIZE;
|
|
|
|
for i in 0..Self::USTACK_PAGES {
|
|
|
|
let page = phys::alloc_page(PageUsage::UserPrivate).unwrap();
|
|
|
|
let flags = MapAttributes::SH_OUTER
|
|
|
|
| MapAttributes::NOT_GLOBAL
|
|
|
|
| MapAttributes::UXN
|
|
|
|
| MapAttributes::PXN
|
|
|
|
| MapAttributes::AP_BOTH_READWRITE;
|
|
|
|
new_space
|
|
|
|
.map(ustack_virt_bottom + i * mem::PAGE_SIZE, page, flags)
|
|
|
|
.unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
let entry = loader(new_space)?;
|
|
|
|
|
|
|
|
debugln!("Will now enter at {:#x}", entry);
|
|
|
|
// TODO drop old address space
|
|
|
|
lock.space = Some(new_space);
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
// TODO drop old context
|
|
|
|
let ctx = proc.ctx.get();
|
|
|
|
|
|
|
|
ctx.write(Context::user(
|
|
|
|
entry,
|
|
|
|
arg,
|
|
|
|
new_space_phys | ((lock.id.asid() as usize) << 48),
|
|
|
|
Self::USTACK_VIRT_TOP,
|
|
|
|
));
|
|
|
|
|
|
|
|
assert_eq!(lock.state, State::Running);
|
|
|
|
|
|
|
|
drop(lock);
|
|
|
|
|
|
|
|
(*ctx).enter();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for Process {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
debugln!("Dropping process {}", self.id());
|
2021-10-20 13:54:33 +03:00
|
|
|
}
|
|
|
|
}
|