proc,dev: split processes into threads, small fb fixes

This commit is contained in:
Mark Poliakov 2023-11-21 14:16:18 +02:00
parent 4e1560f38b
commit da62b5b533
31 changed files with 1576 additions and 941 deletions

View File

@ -2,7 +2,7 @@ use core::{cell::RefCell, marker::PhantomData};
use alloc::boxed::Box;
use vfs::{Vnode, VnodeImpl, VnodeKind, VnodeRef, DIR_POSITION_FROM_CACHE};
use vfs::{CreateInfo, Vnode, VnodeImpl, VnodeKind, VnodeRef, DIR_POSITION_FROM_CACHE};
use yggdrasil_abi::{
error::Error,
io::{FileAttr, FileMode, FileType, OpenOptions},
@ -11,16 +11,27 @@ use yggdrasil_abi::{
use crate::{block::BlockAllocator, bvec::BVec, file::FileNode};
pub(crate) struct DirectoryNode<A: BlockAllocator> {
uid: u32,
gid: u32,
mode: FileMode,
_pd: PhantomData<A>,
}
impl<A: BlockAllocator> VnodeImpl for DirectoryNode<A> {
fn create(&self, at: &VnodeRef, name: &str, kind: VnodeKind) -> Result<VnodeRef, Error> {
let child = Vnode::new(name, kind);
match kind {
VnodeKind::Directory => child.set_data(Box::new(Self { _pd: PhantomData })),
fn create(&self, at: &VnodeRef, info: &CreateInfo) -> Result<VnodeRef, Error> {
let child = Vnode::new(info.name, info.kind);
match info.kind {
VnodeKind::Directory => child.set_data(Box::new(Self {
uid: info.uid,
gid: info.gid,
mode: info.mode,
_pd: PhantomData,
})),
VnodeKind::Regular => child.set_data(Box::new(FileNode {
data: RefCell::new(BVec::<A>::new()),
uid: info.uid,
gid: info.gid,
mode: info.mode,
})),
_ => todo!(),
}
@ -28,7 +39,7 @@ impl<A: BlockAllocator> VnodeImpl for DirectoryNode<A> {
Ok(child)
}
fn open(&self, _node: &VnodeRef, _opts: OpenOptions, _mode: FileMode) -> Result<u64, Error> {
fn open(&self, _node: &VnodeRef, _opts: OpenOptions) -> Result<u64, Error> {
Ok(DIR_POSITION_FROM_CACHE)
}
@ -43,14 +54,19 @@ impl<A: BlockAllocator> VnodeImpl for DirectoryNode<A> {
fn metadata(&self, _node: &VnodeRef) -> Result<FileAttr, Error> {
Ok(FileAttr {
size: 0,
mode: FileMode::default_dir(),
mode: self.mode,
ty: FileType::Directory,
})
}
}
impl<A: BlockAllocator> DirectoryNode<A> {
pub fn new() -> Self {
Self { _pd: PhantomData }
pub fn new(uid: u32, gid: u32, mode: FileMode) -> Self {
Self {
uid,
gid,
mode,
_pd: PhantomData,
}
}
}

View File

@ -10,10 +10,13 @@ use crate::{block::BlockAllocator, bvec::BVec};
pub(crate) struct FileNode<A: BlockAllocator> {
pub(crate) data: RefCell<BVec<'static, A>>,
pub(crate) uid: u32,
pub(crate) gid: u32,
pub(crate) mode: FileMode,
}
impl<A: BlockAllocator> VnodeImpl for FileNode<A> {
fn open(&self, _node: &VnodeRef, opts: OpenOptions, _mode: FileMode) -> Result<u64, Error> {
fn open(&self, _node: &VnodeRef, opts: OpenOptions) -> Result<u64, Error> {
if opts.contains(OpenOptions::APPEND) {
Ok(self.data.borrow().size() as u64)
} else {
@ -40,7 +43,7 @@ impl<A: BlockAllocator> VnodeImpl for FileNode<A> {
fn metadata(&self, _node: &VnodeRef) -> Result<FileAttr, Error> {
Ok(FileAttr {
size: self.data.borrow().size() as u64,
mode: FileMode::default_file(),
mode: self.mode,
ty: FileType::File,
})
}

View File

@ -17,8 +17,8 @@ use core::{
use alloc::{boxed::Box, rc::Rc};
use block::BlockAllocator;
use vfs::{BlockDevice, Filesystem, Vnode, VnodeKind, VnodeRef};
use yggdrasil_abi::{error::Error, path};
use vfs::{BlockDevice, CreateInfo, Filesystem, Vnode, VnodeKind, VnodeRef};
use yggdrasil_abi::{error::Error, io::FileMode, path};
use crate::{bvec::BVec, dir::DirectoryNode, file::FileNode, tar::TarIterator};
@ -58,6 +58,9 @@ mod dir;
mod file;
mod tar;
const DEFAULT_FILE_MODE: FileMode = FileMode::new(0o644);
const DEFAULT_DIR_MODE: FileMode = FileMode::new(0o755);
/// In-memory read/write filesystem
pub struct MemoryFilesystem<A: BlockAllocator> {
root: RefCell<Option<VnodeRef>>,
@ -101,6 +104,10 @@ impl<A: BlockAllocator> MemoryFilesystem<A> {
}
let node = self.create_node_initial(element, kind);
// TODO require .tar's to have all the directories present to extract their metadata?
if kind == VnodeKind::Directory {
node.set_data(Box::new(DirectoryNode::<A>::new(0, 0, DEFAULT_DIR_MODE)));
}
at.add_child(node.clone());
node
@ -122,11 +129,13 @@ impl<A: BlockAllocator> MemoryFilesystem<A> {
let node = Vnode::new(name, kind);
node.set_fs(self.clone());
match kind {
VnodeKind::Directory => node.set_data(Box::new(DirectoryNode::<A>::new())),
VnodeKind::Regular => {}
_ => todo!(),
}
// match kind {
// VnodeKind::Directory => node.set_data(Box::new(DirectoryNode::<A>::new(
// info.uid, info.gid, info.mode,
// ))),
// VnodeKind::Regular => {}
// _ => todo!(),
// }
node
}
@ -134,7 +143,7 @@ impl<A: BlockAllocator> MemoryFilesystem<A> {
fn from_slice_internal(self: &Rc<Self>, tar_data: &'static [u8]) -> Result<VnodeRef, Error> {
let root = Vnode::new("", VnodeKind::Directory);
root.set_fs(self.clone());
root.set_data(Box::new(DirectoryNode::<A>::new()));
root.set_data(Box::new(DirectoryNode::<A>::new(0, 0, DEFAULT_DIR_MODE)));
// 1. Create paths in tar
for item in TarIterator::new(tar_data) {
@ -155,14 +164,23 @@ impl<A: BlockAllocator> MemoryFilesystem<A> {
let Ok((hdr, data)) = item else {
panic!("Unreachable");
};
if hdr.node_kind() == VnodeKind::Regular {
let data = data.unwrap();
let path = hdr.name.as_str()?.trim_matches('/');
let node = self.make_path(&root, path, VnodeKind::Directory, false)?;
let path = hdr.name.as_str()?.trim_matches('/');
let node = self.make_path(&root, path, VnodeKind::Directory, false)?;
assert_eq!(node.kind(), hdr.node_kind());
if hdr.node_kind() == VnodeKind::Regular {
let uid = usize::from(&hdr.uid).try_into().unwrap();
let gid = usize::from(&hdr.gid).try_into().unwrap();
let mode = convert_mode(usize::from(&hdr.mode))?;
let data = data.unwrap();
let bvec = BVec::<A>::try_from(data)?;
assert_eq!(bvec.size(), data.len());
node.set_data(Box::new(FileNode {
uid,
gid,
mode,
data: RefCell::new(bvec),
}));
}
@ -190,13 +208,17 @@ impl<A: BlockAllocator> MemoryFilesystem<A> {
_pd: PhantomData,
});
let root = Vnode::new("", VnodeKind::Directory);
root.set_data(Box::new(DirectoryNode::<A>::new()));
root.set_data(Box::new(DirectoryNode::<A>::new(0, 0, DEFAULT_DIR_MODE)));
root.set_fs(fs.clone());
fs.root.replace(Some(root));
fs
}
}
fn convert_mode(mode: usize) -> Result<FileMode, Error> {
Ok(FileMode::new(mode as u32 & 0o777))
}
#[cfg(test)]
mod tests {
use core::sync::atomic::Ordering;
@ -225,7 +247,7 @@ mod tests {
assert_eq!(node.kind(), VnodeKind::Regular);
assert_eq!(node.size().unwrap(), expected_data.len() as u64);
let file = node.open(OpenOptions::READ, FileMode::empty()).unwrap();
let file = node.open(OpenOptions::READ).unwrap();
let mut buf = [0; 512];
assert_eq!(
@ -257,7 +279,7 @@ mod tests {
// Write to the file
{
let node = ioctx.find(None, "/test1.txt", false, false).unwrap();
let file = node.open(OpenOptions::WRITE, FileMode::empty()).unwrap();
let file = node.open(OpenOptions::WRITE).unwrap();
assert_eq!(file.borrow_mut().write(b"Hello").unwrap(), 5);
}
@ -267,7 +289,7 @@ mod tests {
// Read back
{
let node = ioctx.find(None, "/test1.txt", false, false).unwrap();
let file = node.open(OpenOptions::READ, FileMode::empty()).unwrap();
let file = node.open(OpenOptions::READ).unwrap();
let mut buf = [0; 512];
assert_eq!(file.borrow_mut().read(&mut buf).unwrap(), old_data.len());
@ -277,50 +299,50 @@ mod tests {
}
}
#[test]
fn test_memfs_create_and_write() {
test_allocator_with_counter!(A_COUNTER, A);
// #[test]
// fn test_memfs_create_and_write() {
// test_allocator_with_counter!(A_COUNTER, A);
let fs = MemoryFilesystem::<A>::empty();
let root = fs.root().unwrap();
// let fs = MemoryFilesystem::<A>::empty();
// let root = fs.root().unwrap();
let ioctx = IoContext::new(root.clone());
// let ioctx = IoContext::new(root.clone());
// Create, write, seek and read file
{
// TODO CREATE option handling
root.create("test1.txt", VnodeKind::Regular).unwrap();
// // Create, write, seek and read file
// {
// // TODO CREATE option handling
// root.create("test1.txt", VnodeKind::Regular).unwrap();
let file = ioctx
.open(
None,
"/test1.txt",
OpenOptions::WRITE | OpenOptions::READ,
FileMode::empty(),
)
.unwrap();
// let file = ioctx
// .open(
// None,
// "/test1.txt",
// OpenOptions::WRITE | OpenOptions::READ,
// FileMode::empty(),
// )
// .unwrap();
let write_data = [1, 2, 3, 4];
let mut read_data = [0; 512];
// let write_data = [1, 2, 3, 4];
// let mut read_data = [0; 512];
let mut file = file.borrow_mut();
assert_eq!(file.write(&write_data).unwrap(), write_data.len());
assert_eq!(file.seek(SeekFrom::Start(0)).unwrap(), 0);
assert_eq!(file.read(&mut read_data).unwrap(), write_data.len());
assert_eq!(&read_data[..write_data.len()], &write_data[..]);
}
// let mut file = file.borrow_mut();
// assert_eq!(file.write(&write_data).unwrap(), write_data.len());
// assert_eq!(file.seek(SeekFrom::Start(0)).unwrap(), 0);
// assert_eq!(file.read(&mut read_data).unwrap(), write_data.len());
// assert_eq!(&read_data[..write_data.len()], &write_data[..]);
// }
// Create a directory
{
// TODO read directory
root.create("dir1", VnodeKind::Directory).unwrap();
// // Create a directory
// {
// // TODO read directory
// root.create("dir1", VnodeKind::Directory).unwrap();
let dir1 = ioctx.find(None, "/dir1", false, false).unwrap();
let node = dir1.create("file1.txt", VnodeKind::Regular).unwrap();
assert!(Rc::ptr_eq(
&ioctx.find(None, "/dir1/file1.txt", false, false).unwrap(),
&node
));
}
}
// let dir1 = ioctx.find(None, "/dir1", false, false).unwrap();
// let node = dir1.create("file1.txt", VnodeKind::Regular).unwrap();
// assert!(Rc::ptr_eq(
// &ioctx.find(None, "/dir1/file1.txt", false, false).unwrap(),
// &node
// ));
// }
// }
}

View File

@ -20,9 +20,9 @@ pub(crate) struct TarIterator<'a> {
#[repr(packed)]
pub(crate) struct TarEntry {
pub name: TarString<100>,
_mode: OctalField<8>,
_uid: OctalField<8>,
_gid: OctalField<8>,
pub mode: OctalField<8>,
pub uid: OctalField<8>,
pub gid: OctalField<8>,
pub size: OctalField<12>,
_mtime: OctalField<12>,
_checksum: OctalField<8>,

View File

@ -23,7 +23,7 @@ impl CharDeviceWrapper {
}
impl VnodeImpl for CharDeviceWrapper {
fn open(&self, _node: &VnodeRef, _opts: OpenOptions, _mode: FileMode) -> Result<u64, Error> {
fn open(&self, _node: &VnodeRef, _opts: OpenOptions) -> Result<u64, Error> {
Ok(0)
}

View File

@ -112,7 +112,7 @@ impl IoContext {
at: Option<VnodeRef>,
path: &str,
opts: OpenOptions,
mode: FileMode,
_mode: FileMode,
) -> Result<FileRef, Error> {
let node = match self.find(at.clone(), path, true, true) {
Err(Error::DoesNotExist) => {
@ -122,7 +122,7 @@ impl IoContext {
o => o,
}?;
node.open(opts, mode)
node.open(opts)
}
pub fn root(&self) -> &VnodeRef {

View File

@ -22,7 +22,7 @@ pub use self::char::{CharDevice, CharDeviceWrapper};
pub use file::{File, FileFlags, FileRef};
pub use fs::Filesystem;
pub use ioctx::IoContext;
pub use node::{Vnode, VnodeDump, VnodeImpl, VnodeKind, VnodeRef, VnodeWeak};
pub use node::{CreateInfo, Vnode, VnodeDump, VnodeImpl, VnodeKind, VnodeRef, VnodeWeak};
pub const DIR_POSITION_FROM_CACHE: u64 = u64::MAX;

View File

@ -50,9 +50,17 @@ pub struct Vnode {
target: RefCell<Option<VnodeRef>>,
}
pub struct CreateInfo<'a> {
pub name: &'a str,
pub kind: VnodeKind,
pub mode: FileMode,
pub uid: u32,
pub gid: u32,
}
#[allow(unused_variables)]
pub trait VnodeImpl {
fn create(&self, at: &VnodeRef, name: &str, kind: VnodeKind) -> Result<VnodeRef, Error> {
fn create(&self, at: &VnodeRef, info: &CreateInfo) -> Result<VnodeRef, Error> {
Err(Error::NotImplemented)
}
@ -60,7 +68,7 @@ pub trait VnodeImpl {
Err(Error::NotImplemented)
}
fn open(&self, node: &VnodeRef, opts: OpenOptions, mode: FileMode) -> Result<u64, Error> {
fn open(&self, node: &VnodeRef, opts: OpenOptions) -> Result<u64, Error> {
Err(Error::NotImplemented)
}
@ -265,7 +273,7 @@ impl Vnode {
}
// Node operations
pub fn open(self: &VnodeRef, flags: OpenOptions, mode: FileMode) -> Result<FileRef, Error> {
pub fn open(self: &VnodeRef, flags: OpenOptions) -> Result<FileRef, Error> {
let mut open_flags = FileFlags::empty();
if flags.contains(OpenOptions::READ) {
@ -280,7 +288,7 @@ impl Vnode {
}
if let Some(data) = self.data() {
let pos = data.open(self, flags, mode)?;
let pos = data.open(self, flags)?;
Ok(File::normal(self.clone(), pos, open_flags))
} else {
todo!()
@ -293,7 +301,7 @@ impl Vnode {
}
if let Some(data) = self.data() {
let pos = data.open(self, OpenOptions::READ, FileMode::empty())?;
let pos = data.open(self, OpenOptions::READ)?;
Ok(File::directory(self.clone(), pos))
} else {
// TODO: some options here?
@ -309,22 +317,22 @@ impl Vnode {
}
}
pub fn create(self: &VnodeRef, name: &str, kind: VnodeKind) -> Result<VnodeRef, Error> {
pub fn create(self: &VnodeRef, info: &CreateInfo) -> Result<VnodeRef, Error> {
if self.kind != VnodeKind::Directory {
todo!();
}
if name.contains('/') {
if info.name.contains('/') {
return Err(Error::InvalidArgument);
}
match self.lookup_or_load(name) {
match self.lookup_or_load(info.name) {
Err(Error::DoesNotExist) => {}
Ok(_) => return Err(Error::AlreadyExists),
e => return e,
};
if let Some(data) = self.data() {
let vnode = data.create(self, name, kind)?;
let vnode = data.create(self, info)?;
if let Some(fs) = self.fs() {
vnode.set_fs(fs);
}

View File

@ -19,7 +19,7 @@ use crate::{
arch::{aarch64::cpu::Cpu, Architecture, ArchitectureImpl},
debug::LogLevel,
syscall::raw_syscall_handler,
task::{context::TaskFrame, process::Process},
task::{context::TaskFrame, thread::Thread},
};
use super::ARCHITECTURE;
@ -128,13 +128,14 @@ fn dump_irrecoverable_exception(frame: &ExceptionFrame, ec: u64, iss: u64) {
log_print_raw!(LogLevel::Fatal, "Register dump:\n");
log_print_raw!(LogLevel::Fatal, "{:?}\n", frame);
if let Some(cpu) = cpu {
let current = cpu.queue().current_process();
// XXX
// if let Some(cpu) = cpu {
// let current = cpu.queue().current_process();
if let Some(current) = current {
log_print_raw!(LogLevel::Fatal, "In process {}\n", current.id());
}
}
// if let Some(current) = current {
// log_print_raw!(LogLevel::Fatal, "In process {}\n", current.id());
// }
// }
match ec {
// Data abort from lower level
@ -218,8 +219,8 @@ extern "C" fn __aa64_el0_sync_handler(frame: *mut ExceptionFrame) {
// );
unsafe {
let process = Process::current();
process.handle_signal(frame);
let thread = Thread::current();
thread.handle_pending_signals(frame);
}
}
@ -253,8 +254,8 @@ extern "C" fn __aa64_el0_irq_handler(frame: *mut ExceptionFrame) {
// log_print_raw!(LogLevel::Fatal, "sp = {:#x}\n", frame.sp_el0);
unsafe {
let process = Process::current();
process.handle_signal(frame);
let thread = Thread::current();
thread.handle_pending_signals(frame);
}
}
@ -321,23 +322,28 @@ fn el0_sync_inner(frame: &mut ExceptionFrame) {
}
// BRK in AArch64
0b111100 => {
let proc = Process::current();
warnln!("Process #{} hit a breakpoint", proc.id());
proc.raise_signal(Signal::Aborted);
let thread = Thread::current();
warnln!(
"Thread {} {:?} hit a breakpoint",
thread.id(),
thread.name()
);
thread.raise_signal(Signal::Aborted);
}
_ => {
let iss = esr_el1 & 0x1FFFFFF;
if ec == 0b100100 {
// Data abort from lower level
let proc = Process::current();
let thread = Thread::current();
warnln!(
"Data abort in #{} at {:#x} with address {:#x}",
proc.id(),
"Data abort in {} {:?} at {:#x} with address {:#x}",
thread.id(),
thread.name(),
ELR_EL1.get(),
FAR_EL1.get()
);
proc.raise_signal(Signal::MemoryAccessViolation);
thread.raise_signal(Signal::MemoryAccessViolation);
return;
}

View File

@ -4,7 +4,7 @@ use core::arch::global_asm;
use crate::{
arch::{x86_64::cpu::Cpu, Architecture},
task::process::Process,
task::thread::Thread,
};
use super::{
@ -57,8 +57,8 @@ unsafe extern "C" fn irq_handler(vector: usize, frame: *mut IrqFrame) {
.handle_specific_irq(vector);
cpu.local_apic().clear_interrupt();
if let Some(process) = Process::get_current() {
process.handle_signal(frame);
if let Some(thread) = Thread::get_current() {
thread.handle_pending_signals(frame);
}
}
@ -69,8 +69,8 @@ unsafe extern "C" fn local_timer_irq_handler(frame: *mut IrqFrame) {
cpu.local_apic().clear_interrupt();
cpu.queue().yield_cpu();
if let Some(process) = Process::get_current() {
process.handle_signal(frame);
if let Some(thread) = Thread::get_current() {
thread.handle_pending_signals(frame);
}
}

View File

@ -5,7 +5,7 @@ use abi::{arch::SavedFrame, primitive_enum, process::Signal};
use crate::{
arch::x86_64::apic,
task::{context::TaskFrame, process::Process, Cpu},
task::{context::TaskFrame, process::Process, thread::Thread, Cpu},
};
use super::ARCHITECTURE;
@ -265,9 +265,9 @@ impl Entry {
static mut IDT: [Entry; SIZE] = [Entry::NULL; SIZE];
fn user_exception_inner(kind: ExceptionKind, frame: &ExceptionFrame) {
let process = Process::current();
let thread = Thread::current();
warnln!("{:?} in #{} {:?}", kind, process.id(), process.name());
warnln!("{:?} in {} {:?}", kind, thread.id(), thread.name());
warnln!("CS:RIP = {:#x}:{:#x}", frame.cs, frame.rip);
warnln!("SS:RSP = {:#x}:{:#x}", frame.ss, frame.rsp);
@ -280,10 +280,10 @@ fn user_exception_inner(kind: ExceptionKind, frame: &ExceptionFrame) {
warnln!("CR2 = {:#x}", cr2);
process.raise_signal(Signal::MemoryAccessViolation);
thread.raise_signal(Signal::MemoryAccessViolation);
}
ExceptionKind::InvalidOpcode => {
process.raise_signal(Signal::Aborted);
thread.raise_signal(Signal::Aborted);
}
_ => todo!("No handler for exception: {:?}", kind),
}
@ -316,7 +316,7 @@ extern "C" fn __x86_64_exception_handler(frame: *mut ExceptionFrame) {
user_exception_inner(kind, frame);
unsafe {
Process::current().handle_signal(frame);
Thread::current().handle_pending_signals(frame);
}
} else {
if kind == ExceptionKind::NonMaskableInterrupt {

View File

@ -8,7 +8,7 @@ use tock_registers::interfaces::{ReadWriteable, Writeable};
use crate::{
arch::x86_64::registers::{MSR_IA32_EFER, MSR_IA32_LSTAR, MSR_IA32_SFMASK, MSR_IA32_STAR},
syscall::raw_syscall_handler,
task::{context::TaskFrame, process::Process},
task::{context::TaskFrame, process::Process, thread::Thread},
};
/// Set of registers saved when taking a syscall instruction
@ -125,10 +125,10 @@ fn syscall_inner(frame: &mut SyscallFrame) {
extern "C" fn __x86_64_syscall_handler(frame: *mut SyscallFrame) {
let frame = unsafe { &mut *frame };
let process = Process::current();
let thread = Thread::current();
syscall_inner(frame);
unsafe {
process.handle_signal(frame);
thread.handle_pending_signals(frame);
}
}

View File

@ -4,7 +4,7 @@ use core::fmt::{self, Arguments};
use abi::error::Error;
use kernel_util::util::StaticVector;
use crate::sync::IrqSafeSpinlock;
use crate::{sync::IrqSafeSpinlock, task::process::Process};
const MAX_DEBUG_SINKS: usize = 4;
@ -13,6 +13,8 @@ struct SimpleLogger;
/// Defines the severity of the message
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum LogLevel {
/// Very verbose low-level debugging information
Trace,
/// Debugging and verbose information
Debug,
/// General information about transitions in the system state
@ -93,11 +95,25 @@ impl log::Log for SimpleLogger {
}
fn log(&self, record: &log::Record) {
let file = record.file().unwrap_or("<???>");
let line = record.line().unwrap_or(0);
match record.level() {
log::Level::Warn => warnln!("{}", record.args()),
log::Level::Info => infoln!("{}", record.args()),
log::Level::Trace | log::Level::Debug => debugln!("{}", record.args()),
log::Level::Error => errorln!("{}", record.args()),
log::Level::Error => {
log_print_raw!(LogLevel::Error, "{}:{}: {}\n", file, line, record.args())
}
log::Level::Warn => {
log_print_raw!(LogLevel::Warning, "{}:{}: {}\n", file, line, record.args())
} // warnln!("{}", record.args()),
log::Level::Info => {
log_print_raw!(LogLevel::Info, "{}:{}: {}\n", file, line, record.args())
}
log::Level::Debug => {
log_print_raw!(LogLevel::Debug, "{}:{}: {}\n", file, line, record.args())
}
log::Level::Trace => {
log_print_raw!(LogLevel::Trace, "{}:{}: {}\n", file, line, record.args())
}
}
}
@ -107,6 +123,7 @@ impl log::Log for SimpleLogger {
impl LogLevel {
fn log_prefix(self) -> &'static str {
match self {
LogLevel::Trace => "",
LogLevel::Debug => "",
LogLevel::Info => "\x1b[36m\x1b[1m",
LogLevel::Warning => "\x1b[33m\x1b[1m",
@ -117,6 +134,7 @@ impl LogLevel {
fn log_suffix(self) -> &'static str {
match self {
LogLevel::Trace => "",
LogLevel::Debug => "",
LogLevel::Info => "\x1b[0m",
LogLevel::Warning => "\x1b[0m",
@ -166,6 +184,17 @@ pub fn add_sink(sink: &'static dyn DebugSink, level: LogLevel) {
.push(DebugSinkWrapper { inner: sink, level });
}
/// Print a trace message coming from a process
pub fn program_trace(process: &Process, message: &str) {
log_print_raw!(
LogLevel::Trace,
"[trace {} {:?}] {}\n",
process.id(),
process.name(),
message
);
}
/// Resets the debugging terminal by clearing it
pub fn init() {
log::set_logger(&LOGGER)

View File

@ -137,6 +137,12 @@ pub trait DisplayConsole {
self.flush(&mut state);
}
}
/// Returns the dimensions of the console in chars: (rows, columns)
fn text_dimensions(&self) -> (usize, usize) {
let state = self.state().lock();
(state.buffer.height as _, CONSOLE_ROW_LEN as _)
}
}
impl ConsoleChar {
@ -275,6 +281,11 @@ impl ConsoleBuffer {
self.rows[row as usize].chars[col as usize] = c;
}
#[inline(never)]
fn set_dirty(&mut self, row: u32) {
self.rows[row as usize].dirty = 1;
}
/// Returns an iterator over dirty rows, while clearing dirty flag for them
pub fn flush_rows(&mut self) -> RowIter {
RowIter {
@ -289,6 +300,21 @@ impl ConsoleBuffer {
}
}
fn clear_row(&mut self, row: u32, bg: ColorAttribute) {
self.rows[row as usize].dirty = 1;
self.rows[row as usize].clear(bg);
}
fn erase_in_row(&mut self, row: u32, start: usize, bg: ColorAttribute) {
self.rows[row as usize].dirty = 1;
self.rows[row as usize].chars[start..].fill(ConsoleChar::from_parts(
b' ',
DEFAULT_FG_COLOR,
bg,
Attributes::empty(),
));
}
fn scroll_once(&mut self, bg: ColorAttribute) {
self.rows.copy_within(1.., 0);
self.rows[(self.height - 1) as usize].clear(bg);
@ -395,9 +421,22 @@ impl ConsoleState {
self.attributes |= Attributes::BOLD;
}
// Foreground colors
31..=37 => {
30..=39 => {
let vt_color = self.esc_args[0] % 10;
self.fg_color = ColorAttribute::from_vt100(vt_color as u8);
if vt_color == 9 {
self.fg_color = DEFAULT_FG_COLOR;
} else {
self.fg_color = ColorAttribute::from_vt100(vt_color as u8);
}
}
// Background colors
40..=49 => {
let vt_color = self.esc_args[0] % 10;
if vt_color == 9 {
self.bg_color = DEFAULT_BG_COLOR;
} else {
self.bg_color = ColorAttribute::from_vt100(vt_color as u8);
}
}
_ => (),
}
@ -405,9 +444,11 @@ impl ConsoleState {
}
// Move cursor to position
b'f' => {
let row = self.esc_args[1].clamp(1, self.buffer.height) - 1;
let row = self.esc_args[0].clamp(1, self.buffer.height) - 1;
let col = self.esc_args[1].clamp(1, CONSOLE_ROW_LEN as u32) - 1;
self.buffer.set_dirty(row);
self.cursor_row = row;
self.cursor_col = col;
}
@ -423,6 +464,19 @@ impl ConsoleState {
}
_ => (),
},
// Erase in Line
b'K' => match self.esc_args[0] {
// Erase to Right
0 => {
self.buffer
.erase_in_row(self.cursor_row, self.cursor_col as _, self.bg_color);
}
// Erase All
2 => {
self.buffer.clear_row(self.cursor_row, self.bg_color);
}
_ => (),
},
_ => (),
}

View File

@ -18,6 +18,8 @@ struct Inner {
char_height: u32,
width: u32,
height: u32,
cursor_row: u32,
cursor_col: u32,
}
struct DrawGlyph {
@ -61,6 +63,20 @@ impl DisplayConsole for FramebufferConsole {
let mut iter = state.buffer.flush_rows();
let old_cursor_col = inner.cursor_col;
let old_cursor_row = inner.cursor_row;
// New cursor
let cursor_col = state.cursor_col;
let cursor_row = state.cursor_row;
inner.fill_rect(
old_cursor_col * cw,
old_cursor_row * ch,
cw,
ch,
state.bg_color.as_rgba(false),
);
while let Some((row_idx, row)) = iter.next_dirty() {
if row_idx >= inner.height {
break;
@ -70,9 +86,12 @@ impl DisplayConsole for FramebufferConsole {
let glyph = chr.character();
let (fg, bg, attr) = chr.attributes();
let fg = fg.as_rgba(attr.contains(Attributes::BOLD));
let bg = bg.as_rgba(false);
let mut fg = fg.as_rgba(attr.contains(Attributes::BOLD));
let mut bg = bg.as_rgba(false);
if row_idx == cursor_row && col_idx == cursor_col as usize {
core::mem::swap(&mut fg, &mut bg);
}
inner.draw_glyph(
font,
DrawGlyph {
@ -88,9 +107,6 @@ impl DisplayConsole for FramebufferConsole {
}
// Place cursor
let cursor_row = state.cursor_row;
let cursor_col = state.cursor_col;
inner.fill_rect(
cursor_col * cw,
cursor_row * ch,
@ -98,6 +114,9 @@ impl DisplayConsole for FramebufferConsole {
ch,
state.fg_color.as_rgba(false),
);
inner.cursor_col = cursor_col;
inner.cursor_row = cursor_row;
}
}
@ -120,6 +139,8 @@ impl FramebufferConsole {
height: dim.height / char_height,
char_width,
char_height,
cursor_row: 0,
cursor_col: 0,
};
Ok(Self {
@ -149,6 +170,7 @@ impl Inner {
while x < font.width() {
let v = if glyph[0] & mask != 0 { g.fg } else { g.bg };
let v = v | 0xFF000000;
fb[g.sy + y][(g.sx + x) as usize] = v;
mask >>= 1;
x += 1;

View File

@ -92,6 +92,7 @@ impl DisplayDevice for LinearFramebuffer {
impl FramebufferAccess {
/// Copies `count` rows starting from `src_row` to `dst_row`
#[optimize(speed)]
pub fn copy_rows(&mut self, src_row: u32, dst_row: u32, count: u32) {
use core::ffi::c_void;
extern "C" {

View File

@ -22,6 +22,7 @@ use crate::{
fs::devfs::{self, CharDeviceType},
mem::{address::FromRaw, device::DeviceMemoryIo, PhysicalAddress},
sync::IrqSafeSpinlock,
task::process::ProcessId,
};
register_bitfields! {
@ -127,7 +128,12 @@ impl CharDevice for Pl011 {
fn device_request(&self, req: &mut DeviceRequest) -> Result<(), Error> {
match req {
&mut DeviceRequest::SetTerminalGroup(id) => {
self.set_signal_group(id as _);
self.set_signal_group(ProcessId::from(id));
Ok(())
}
DeviceRequest::SetTerminalOptions(config) => self.context.set_config(config),
DeviceRequest::GetTerminalOptions(config) => {
config.write(self.context.config());
Ok(())
}
_ => Err(Error::InvalidArgument),
@ -149,25 +155,25 @@ impl InterruptHandler for Pl011 {
let byte = inner.regs.DR.get();
drop(inner);
if byte == b'\x1b' as u32 {
use crate::task::sched::CpuQueue;
// if byte == b'\x1b' as u32 {
// use crate::task::sched::CpuQueue;
for (i, queue) in CpuQueue::all().enumerate() {
log_print_raw!(LogLevel::Fatal, "queue{}:\n", i);
let lock = unsafe { queue.grab() };
for item in lock.iter() {
log_print_raw!(
LogLevel::Fatal,
"* {} {:?} {:?}\n",
item.id(),
item.name(),
item.state()
);
}
}
} else {
self.recv_byte(byte as u8);
}
// for (i, queue) in CpuQueue::all().enumerate() {
// log_print_raw!(LogLevel::Fatal, "queue{}:\n", i);
// let lock = unsafe { queue.grab() };
// for item in lock.iter() {
// log_print_raw!(
// LogLevel::Fatal,
// "* {} {:?} {:?}\n",
// item.id(),
// item.name(),
// item.state()
// );
// }
// }
// } else {
self.recv_byte(byte as u8);
// }
true
}

View File

@ -8,15 +8,18 @@ use device_api::serial::SerialDevice;
use crate::{
sync::IrqSafeSpinlock,
task::{process::Process, ProcessId},
task::process::{Process, ProcessId},
util::ring::AsyncRing,
};
#[cfg(feature = "fb_console")]
pub mod combined {
//! Combined console + keyboard terminal device
use crate::block;
use abi::{error::Error, io::DeviceRequest};
use crate::{block, task::process::ProcessId};
use abi::{
error::Error,
io::{DeviceRequest, TerminalSize},
};
use device_api::{input::KeyboardConsumer, serial::SerialDevice};
use vfs::CharDevice;
@ -90,10 +93,19 @@ pub mod combined {
fn device_request(&self, req: &mut DeviceRequest) -> Result<(), Error> {
match req {
&mut DeviceRequest::SetTerminalGroup(id) => {
self.set_signal_group(id as _);
self.set_signal_group(ProcessId::from(id));
Ok(())
}
DeviceRequest::SetTerminalOptions(config) => self.context.set_config(config),
DeviceRequest::GetTerminalOptions(config) => {
config.write(self.context.config());
Ok(())
}
DeviceRequest::GetTerminalSize(out) => {
let (rows, columns) = self.output.text_dimensions();
out.write(TerminalSize { rows, columns });
Ok(())
}
DeviceRequest::ConfigureTerminal(config) => self.context.set_config(config),
_ => Err(Error::InvalidArgument),
}
}
@ -292,6 +304,10 @@ impl TtyContext {
self.inner.lock().config = config.clone();
Ok(())
}
pub fn config(&self) -> TerminalOptions {
self.inner.lock().config
}
}
// impl<const N: usize> CharRingInner<N> {

View File

@ -23,14 +23,15 @@ pub enum CharDeviceType {
struct DevfsDirectory;
impl VnodeImpl for DevfsDirectory {
fn open(&self, _node: &VnodeRef, _opts: OpenOptions, _mode: FileMode) -> Result<u64, Error> {
fn open(&self, _node: &VnodeRef, _opts: OpenOptions) -> Result<u64, Error> {
Ok(DIR_POSITION_FROM_CACHE)
}
fn metadata(&self, _node: &VnodeRef) -> Result<FileAttr, Error> {
Ok(FileAttr {
size: 0,
mode: FileMode::default_dir(),
// TODO mutable directory mode
mode: FileMode::from(0o755),
ty: FileType::Directory,
})
}

View File

@ -46,7 +46,7 @@ pub fn kinit() {
let ioctx = IoContext::new(root);
let node = ioctx.find(None, "/init", true, true).unwrap();
let file = node.open(OpenOptions::READ, FileMode::empty()).unwrap();
let file = node.open(OpenOptions::READ).unwrap();
let devfs = devfs::root();
#[cfg(target_arch = "x86_64")]
@ -55,12 +55,14 @@ pub fn kinit() {
let console = ioctx
.find(Some(devfs.clone()), "ttyS0", true, true)
.unwrap();
let stdin = console.open(OpenOptions::READ, FileMode::empty()).unwrap();
let stdout = console.open(OpenOptions::WRITE, FileMode::empty()).unwrap();
let stdin = console.open(OpenOptions::READ).unwrap();
let stdout = console.open(OpenOptions::WRITE).unwrap();
let stderr = stdout.clone();
{
let user_init = proc::exec::load_elf("init", file, &["/init", "xxx"], &[]).unwrap();
// XXX
let (user_init, user_init_main) =
proc::exec::load_elf("init", file, &["/init", "xxx"], &[]).unwrap();
let mut io = user_init.io.lock();
io.set_ioctx(ioctx);
io.set_file(RawFd::STDIN, stdin).unwrap();
@ -70,6 +72,6 @@ pub fn kinit() {
user_init.set_session_terminal(console);
user_init.enqueue_somewhere();
user_init_main.enqueue_somewhere();
}
}

View File

@ -51,78 +51,78 @@ fn panic_handler(pi: &core::panic::PanicInfo) -> ! {
infoln!("{:?}", pi);
if PANIC_HAPPENED
.compare_exchange(false, true, Ordering::Release, Ordering::Acquire)
.is_ok()
{
let id = Cpu::local_id();
// Let other CPUs know we're screwed
unsafe {
ARCHITECTURE
.send_ipi(IpiDeliveryTarget::OtherCpus, CpuMessage::Panic)
.ok();
}
// if PANIC_HAPPENED
// .compare_exchange(false, true, Ordering::Release, Ordering::Acquire)
// .is_ok()
// {
// let id = Cpu::local_id();
// // Let other CPUs know we're screwed
// unsafe {
// ARCHITECTURE
// .send_ipi(IpiDeliveryTarget::OtherCpus, CpuMessage::Panic)
// .ok();
// }
let ap_count = ArchitectureImpl::cpu_count() - 1;
PANIC_HANDLED_FENCE.wait_all(ap_count);
// let ap_count = ArchitectureImpl::cpu_count() - 1;
// PANIC_HANDLED_FENCE.wait_all(ap_count);
unsafe {
hack_locks();
}
// unsafe {
// hack_locks();
// }
log_print_raw!(LogLevel::Fatal, "--- BEGIN PANIC ---\n");
log_print_raw!(LogLevel::Fatal, "In CPU {}\n", Cpu::local_id());
log_print_raw!(LogLevel::Fatal, "Kernel panic ");
// log_print_raw!(LogLevel::Fatal, "--- BEGIN PANIC ---\n");
// log_print_raw!(LogLevel::Fatal, "In CPU {}\n", Cpu::local_id());
// log_print_raw!(LogLevel::Fatal, "Kernel panic ");
if let Some(location) = pi.location() {
log_print_raw!(
LogLevel::Fatal,
"at {}:{}:",
location.file(),
location.line()
);
} else {
log_print_raw!(LogLevel::Fatal, ":");
}
// if let Some(location) = pi.location() {
// log_print_raw!(
// LogLevel::Fatal,
// "at {}:{}:",
// location.file(),
// location.line()
// );
// } else {
// log_print_raw!(LogLevel::Fatal, ":");
// }
log_print_raw!(LogLevel::Fatal, "\n");
// log_print_raw!(LogLevel::Fatal, "\n");
if let Some(msg) = pi.message() {
debug_internal(*msg, LogLevel::Fatal);
log_print_raw!(LogLevel::Fatal, "\n");
}
// if let Some(msg) = pi.message() {
// debug_internal(*msg, LogLevel::Fatal);
// log_print_raw!(LogLevel::Fatal, "\n");
// }
for (i, queue) in CpuQueue::all().enumerate() {
log_print_raw!(LogLevel::Fatal, "queue{}:\n", i);
let lock = unsafe { queue.grab() };
for item in lock.iter() {
log_print_raw!(
LogLevel::Fatal,
"* {} {:?} {:?}\n",
item.id(),
item.name(),
item.state()
);
}
}
// for (i, queue) in CpuQueue::all().enumerate() {
// log_print_raw!(LogLevel::Fatal, "queue{}:\n", i);
// let lock = unsafe { queue.grab() };
// for item in lock.iter() {
// log_print_raw!(
// LogLevel::Fatal,
// "* {} {:?} {:?}\n",
// item.id(),
// item.name(),
// item.state()
// );
// }
// }
log_print_raw!(LogLevel::Fatal, "--- END PANIC ---\n");
// log_print_raw!(LogLevel::Fatal, "--- END PANIC ---\n");
PANIC_FINISHED_FENCE.signal();
while PANIC_SEQUENCE.load(Ordering::Acquire) != id {
core::hint::spin_loop();
}
// PANIC_FINISHED_FENCE.signal();
// while PANIC_SEQUENCE.load(Ordering::Acquire) != id {
// core::hint::spin_loop();
// }
log_print_raw!(LogLevel::Fatal, "X");
// log_print_raw!(LogLevel::Fatal, "X");
flush_consoles();
// flush_consoles();
PANIC_SEQUENCE.fetch_add(1, Ordering::Release);
// PANIC_SEQUENCE.fetch_add(1, Ordering::Release);
unsafe {
ARCHITECTURE.reset();
}
}
// unsafe {
// ARCHITECTURE.reset();
// }
// }
loop {
ArchitectureImpl::wait_for_interrupt();

View File

@ -15,7 +15,7 @@ use crate::{
ForeignPointer,
},
proc,
task::{context::TaskContextImpl, process::Process, TaskContext},
task::{context::TaskContextImpl, process::Process, thread::Thread, TaskContext},
};
pub struct BufferPlacer<'a> {
@ -102,7 +102,7 @@ fn setup_binary<S: Into<String>>(
entry: usize,
args: &[&str],
envs: &[&str],
) -> Result<Arc<Process>, Error> {
) -> Result<(Arc<Process>, Arc<Thread>), Error> {
const USER_STACK_PAGES: usize = 16;
let virt_stack_base = 0x3000000;
@ -140,7 +140,8 @@ fn setup_binary<S: Into<String>>(
let context = TaskContext::user(entry, arg, space.as_address_with_asid(), user_sp)?;
Ok(Process::new_with_context(name, Some(space), context))
Ok(Process::new_with_main(name, Arc::new(space), context))
// Ok(Process::new_with_context(name, Some(space), context))
}
/// Loads an ELF bianary from `file` and sets up all the necessary data/argument memory
@ -149,7 +150,7 @@ pub fn load_elf<S: Into<String>>(
file: FileRef,
args: &[&str],
envs: &[&str],
) -> Result<Arc<Process>, Error> {
) -> Result<(Arc<Process>, Arc<Thread>), Error> {
let space = ProcessAddressSpace::new()?;
let elf_entry = proc::elf::load_elf_from_file(&space, file)?;

View File

@ -3,17 +3,19 @@ use yggdrasil_abi::error::Error;
use crate::{
mem::{validate_user_region, ForeignPointer},
task::process::Process,
task::thread::Thread,
};
// XXX
pub(super) fn arg_buffer_ref<'a>(base: usize, len: usize) -> Result<&'a [u8], Error> {
let proc = Process::current();
let proc = Thread::current();
validate_user_region(proc.address_space(), base, len, false)?;
Ok(unsafe { core::slice::from_raw_parts(base as *const u8, len) })
}
pub(super) fn arg_buffer_mut<'a>(base: usize, len: usize) -> Result<&'a mut [u8], Error> {
let proc = Process::current();
let proc = Thread::current();
validate_user_region(proc.address_space(), base, len, true)?;
Ok(unsafe { core::slice::from_raw_parts_mut(base as *mut u8, len) })
}
@ -28,13 +30,13 @@ pub(super) fn arg_user_str<'a>(base: usize, len: usize) -> Result<&'a str, Error
}
pub(super) fn arg_user_ref<'a, T: Sized>(addr: usize) -> Result<&'a T, Error> {
let proc = Process::current();
let proc = Thread::current();
let ptr = addr as *const T;
unsafe { ptr.validate_user_ptr(proc.address_space()) }
}
pub(super) fn arg_user_mut<'a, T: Sized>(addr: usize) -> Result<&'a mut T, Error> {
let proc = Process::current();
let proc = Thread::current();
let ptr = addr as *mut T;
unsafe { ptr.validate_user_mut(proc.address_space()) }
}
@ -43,7 +45,7 @@ pub(super) fn arg_user_slice_mut<'a, T: Sized>(
base: usize,
count: usize,
) -> Result<&'a mut [T], Error> {
let proc = Process::current();
let proc = Thread::current();
let ptr = base as *mut T;
unsafe { ptr.validate_user_slice_mut(count, proc.address_space()) }
}

View File

@ -15,20 +15,24 @@ use yggdrasil_abi::{
};
use crate::{
block, fs,
block,
debug::LogLevel,
fs,
mem::{phys, table::MapAttributes},
proc::{self, io::ProcessIo},
sync::IrqSafeSpinlockGuard,
task::{process::Process, runtime, ProcessId},
task::{
process::{Process, ProcessId},
runtime,
thread::Thread,
},
};
mod arg;
use arg::*;
fn run_with_io<T, F: FnOnce(IrqSafeSpinlockGuard<ProcessIo>) -> T>(f: F) -> T {
let proc = Process::current();
fn run_with_io<T, F: FnOnce(IrqSafeSpinlockGuard<ProcessIo>) -> T>(proc: &Process, f: F) -> T {
let io = proc.io.lock();
f(io)
}
@ -36,10 +40,10 @@ fn run_with_io_at<
T,
F: FnOnce(Option<VnodeRef>, IrqSafeSpinlockGuard<ProcessIo>) -> Result<T, Error>,
>(
proc: &Process,
at: Option<RawFd>,
f: F,
) -> Result<T, Error> {
let proc = Process::current();
let io = proc.io.lock();
let at = at
.map(|fd| io.file(fd).and_then(|f| f.borrow().node()))
@ -49,34 +53,36 @@ fn run_with_io_at<
}
fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error> {
let thread = Thread::current();
let process = thread.process();
match func {
SyscallFunction::DebugTrace => {
let pid = Process::current().id();
let pid = process.id();
let tid = thread.id();
let arg = arg_user_str(args[0] as usize, args[1] as usize)?;
debugln!("[{}] TRACE: {:?}", pid, arg);
log_print_raw!(LogLevel::Debug, "[{}:{}] TRACE: {}\n", pid, tid, arg);
Ok(0)
}
SyscallFunction::Nanosleep => {
let seconds = args[0];
let nanos = args[1] as u32;
let duration = Duration::new(seconds, nanos);
todo!();
// let seconds = args[0];
// let nanos = args[1] as u32;
// let duration = Duration::new(seconds, nanos);
block! {
runtime::sleep(duration).await
}
.map(|_| 0)
}
SyscallFunction::Exit => {
let code = ExitCode::from(args[0] as i32);
Process::current().exit(code);
panic!();
// block! {
// runtime::sleep(duration).await
// }
// .map(|_| 0)
}
// Resource management
SyscallFunction::MapMemory => {
let len = args[1] as usize;
let proc = Process::current();
let space = proc.address_space();
let space = thread.address_space();
if len & 0xFFF != 0 {
todo!();
@ -93,8 +99,7 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
let addr = args[0] as usize;
let len = args[1] as usize;
let proc = Process::current();
let space = proc.address_space();
let space = thread.address_space();
if len & 0xFFF != 0 {
todo!();
@ -106,11 +111,20 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
Ok(0)
}
SyscallFunction::SetSignalEntry => {
let entry = args[0] as usize;
let sp = args[1] as usize;
thread.set_signal_entry(entry, sp);
Ok(0)
}
// I/O
SyscallFunction::Write => {
let fd = RawFd(args[0] as u32);
let data = arg_buffer_ref(args[1] as _, args[2] as _)?;
run_with_io(|io| {
run_with_io(&process, |io| {
let file = io.file(fd)?;
let mut file_borrow = file.borrow_mut();
@ -121,7 +135,7 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
let fd = RawFd(args[0] as u32);
let data = arg_buffer_mut(args[1] as _, args[2] as _)?;
run_with_io(|io| {
run_with_io(&process, |io| {
let file = io.file(fd)?;
let mut file_borrow = file.borrow_mut();
@ -133,15 +147,15 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
let path = arg_user_str(args[1] as usize, args[2] as usize)?;
let opts = OpenOptions::from(args[3] as u32);
let mode = FileMode::from(args[4] as u32);
let proc = Process::current();
run_with_io_at(at, |at, mut io| {
run_with_io_at(&process, at, |at, mut io| {
let file = io.ioctx().open(at, path, opts, mode)?;
if proc.session_terminal().is_none() &&
// TODO NO_CTTY?
if process.session_terminal().is_none() &&
let Ok(node) = file.borrow().node() && node.kind() == VnodeKind::Char {
debugln!("Session terminal set for #{}: {}", proc.id(), path);
proc.set_session_terminal(node);
debugln!("Session terminal set for #{}: {}", process.id(), path);
process.set_session_terminal(node);
}
let fd = io.place_file(file)?;
@ -151,46 +165,16 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
SyscallFunction::Close => {
let fd = RawFd(args[0] as u32);
run_with_io(|mut io| {
run_with_io(&process, |mut io| {
io.close_file(fd)?;
Ok(0)
})
}
SyscallFunction::Mount => {
let options = arg_user_ref::<MountOptions>(args[0] as usize)?;
run_with_io(|mut io| {
let target_node = io.ioctx().find(None, options.target, true, false)?;
if !target_node.is_directory() {
return Err(Error::NotADirectory);
}
let fs_root = fs::create_filesystem(options)?;
target_node.mount(fs_root)?;
debugln!("{:?}", vfs::VnodeDump::new(io.ioctx().root().clone()));
Ok(0)
})
}
SyscallFunction::Unmount => {
let options = arg_user_ref::<UnmountOptions>(args[0] as usize)?;
run_with_io(|mut io| {
let mountpoint = io.ioctx().find(None, options.mountpoint, true, false)?;
mountpoint.unmount_target()?;
debugln!("{:?}", vfs::VnodeDump::new(io.ioctx().root().clone()));
Ok(0)
})
}
SyscallFunction::OpenDirectory => {
let at = arg_option_fd(args[0] as u32);
let path = arg_user_str(args[1] as usize, args[2] as usize)?;
run_with_io_at(at, |at, mut io| {
run_with_io_at(&process, at, |at, mut io| {
let node = io.ioctx().find(at, path, true, true)?;
let file = node.open_directory()?;
let fd = io.place_file(file)?;
@ -205,7 +189,7 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
args[2] as usize,
)?;
run_with_io(|io| {
run_with_io(&process, |io| {
let file = io.file(fd)?;
let mut file_borrow = file.borrow_mut();
@ -217,10 +201,11 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
let path = arg_user_str(args[1] as usize, args[2] as usize)?;
let _mode = FileMode::from(args[3] as u32);
run_with_io_at(at, |at, mut io| {
run_with_io_at(&process, at, |at, mut io| {
let (parent, name) = abi::path::split_right(path);
let parent_node = io.ioctx().find(at, parent, true, true)?;
parent_node.create(name, VnodeKind::Directory)?;
todo!();
// parent_node.create(name, VnodeKind::Directory)?;
Ok(0)
})
@ -230,7 +215,7 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
let path = arg_user_str(args[1] as usize, args[2] as usize)?;
let recurse = args[3] != 0;
run_with_io_at(at, |at, mut io| {
run_with_io_at(&process, at, |at, mut io| {
let node = io.ioctx().find(at, path, false, false)?;
if node.is_root() || Rc::ptr_eq(io.ioctx().root(), &node) {
@ -250,7 +235,7 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
let buffer = arg_user_mut::<MaybeUninit<FileAttr>>(args[3] as usize)?;
let follow = args[4] != 0;
run_with_io_at(at, |at, mut io| {
run_with_io_at(&process, at, |at, mut io| {
let node = if path.is_empty() {
at.ok_or(Error::InvalidArgument)?
} else {
@ -267,37 +252,79 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
let fd = RawFd(args[0] as u32);
let pos = SeekFrom::from(args[1]);
run_with_io(|io| {
run_with_io(&process, |io| {
let file = io.file(fd)?;
let mut file_borrow = file.borrow_mut();
file_borrow.seek(pos).map(|v| v as usize)
})
}
SyscallFunction::Mount => {
let options = arg_user_ref::<MountOptions>(args[0] as usize)?;
run_with_io(&process, |mut io| {
let target_node = io.ioctx().find(None, options.target, true, false)?;
if !target_node.is_directory() {
return Err(Error::NotADirectory);
}
let fs_root = fs::create_filesystem(options)?;
target_node.mount(fs_root)?;
debugln!("{:?}", vfs::VnodeDump::new(io.ioctx().root().clone()));
Ok(0)
})
}
SyscallFunction::Unmount => {
let options = arg_user_ref::<UnmountOptions>(args[0] as usize)?;
run_with_io(&process, |mut io| {
let mountpoint = io.ioctx().find(None, options.mountpoint, true, false)?;
mountpoint.unmount_target()?;
debugln!("{:?}", vfs::VnodeDump::new(io.ioctx().root().clone()));
Ok(0)
})
}
SyscallFunction::DeviceRequest => {
let fd = RawFd(args[0] as u32);
let req = arg_user_mut::<DeviceRequest>(args[1] as usize)?;
run_with_io(&process, |io| {
let file = io.file(fd)?;
let node = file.borrow().node()?;
node.device_request(req)?;
Ok(0)
})
}
// Process management
SyscallFunction::Spawn => {
let options = arg_user_ref::<SpawnOptions>(args[0] as usize)?;
let proc = Process::current();
run_with_io(|mut io| {
run_with_io(&process, |mut io| {
let node = io.ioctx().find(None, options.program, true, true)?;
// Setup a new process from the file
let file = node.open(OpenOptions::READ, FileMode::empty())?;
let child = proc::exec::load_elf(
let file = node.open(OpenOptions::READ)?;
let (child_process, child_main) = proc::exec::load_elf(
options.program,
file,
options.arguments,
options.environment,
)?;
let pid = child.id() as u32;
let pid: u32 = child_process.id().into();
// Inherit group and session from the creator
child.inherit(&proc)?;
child_process.inherit(&process)?;
// Inherit root from the creator
let child_ioctx = IoContext::new(io.ioctx().root().clone());
let mut child_io = child.io.lock();
let mut child_io = child_process.io.lock();
child_io.set_ioctx(child_ioctx);
for opt in options.optional {
@ -307,7 +334,7 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
child_io.set_file(child, src_file)?;
}
&SpawnOption::SetProcessGroup(pgroup) => {
child.set_group_id(pgroup as _);
child_process.set_group_id(pgroup.into());
}
_ => (),
}
@ -323,21 +350,63 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
debugln!("{} requested terminal {:?}", pid, fd);
let file = child_io.file(fd)?;
let node = file.borrow().node()?;
let mut req = DeviceRequest::SetTerminalGroup(child.group_id() as _);
let mut req = DeviceRequest::SetTerminalGroup(child_process.group_id().into());
node.device_request(&mut req)?;
}
drop(child_io);
child.enqueue_somewhere();
child_main.enqueue_somewhere();
Ok(pid as _)
})
}
SyscallFunction::Exit => {
let code = ExitCode::from(args[0] as i32);
// TODO separate handlers for process exit and thread exit?
thread.exit(code);
// Process::current().exit(code);
panic!();
}
SyscallFunction::GetPid => Ok(u32::from(process.id()) as usize),
SyscallFunction::GetSessionId => todo!(),
SyscallFunction::GetProcessGroupId => todo!(),
SyscallFunction::SetProcessGroupId => {
let pid = ProcessId::from(args[0] as u32);
let group_id = ProcessId::from(args[1] as u32);
// Other syscall variants are not currently supported
assert_eq!(pid, group_id);
assert_eq!(process.id(), pid);
process.set_group_id(group_id);
Ok(0)
}
SyscallFunction::StartSession => {
let session_terminal = process.clear_session_terminal();
if let Some(ctty) = session_terminal {
// Drop all FDs referring to the old session terminal
run_with_io(&process, |mut io| {
io.retain(|_, f| {
f.borrow()
.node()
.map(|node| !Rc::ptr_eq(&node, &ctty))
.unwrap_or(true)
});
});
}
process.set_session_id(process.id());
process.set_group_id(process.id());
Ok(0)
}
// Waiting and polling
SyscallFunction::WaitProcess => {
let pid = args[0] as ProcessId;
let status = arg_user_mut::<ExitCode>(args[1] as _)?;
let pid = ProcessId::from(args[0] as u32);
debugln!("WaitProcess #{}", pid);
let status = arg_user_mut::<ExitCode>(args[1] as _)?;
let target = Process::get(pid).ok_or(Error::DoesNotExist)?;
@ -350,78 +419,20 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
Ok(0)
}
SyscallFunction::SendSignal => {
let pid = args[0] as u32;
let signal = Signal::try_from(args[1] as u32).map_err(|_| Error::InvalidArgument)?;
_ => todo!("{:?}", func),
// SyscallFunction::SendSignal => {
// let pid = args[0] as u32;
// let signal = Signal::try_from(args[1] as u32).map_err(|_| Error::InvalidArgument)?;
let target = Process::get(pid as _).ok_or(Error::DoesNotExist)?;
target.raise_signal(signal);
// let target = Process::get(pid as _).ok_or(Error::DoesNotExist)?;
// target.raise_signal(signal);
Ok(0)
}
SyscallFunction::SetSignalEntry => {
let entry = args[0] as usize;
let sp = args[1] as usize;
Process::current().set_signal_entry(entry, sp);
Ok(0)
}
SyscallFunction::ExitSignal => {
panic!("Handled elsewhere");
// Process::current().exit_signal();
}
SyscallFunction::GetPid => Ok(Process::current().id()),
SyscallFunction::GetSessionId => todo!(),
SyscallFunction::GetProcessGroupId => todo!(),
SyscallFunction::SetProcessGroupId => {
let pid = args[0] as ProcessId;
let group_id = args[1] as ProcessId;
let proc = Process::current();
// Other syscall variants are not currently supported
assert_eq!(pid, group_id);
assert_eq!(proc.id(), pid);
proc.set_group_id(group_id);
Ok(0)
}
SyscallFunction::StartSession => {
let proc = Process::current();
let session_terminal = proc.clear_session_terminal();
if let Some(ctty) = session_terminal {
// Drop all FDs referring to the old session terminal
run_with_io(|mut io| {
io.retain(|_, f| {
f.borrow()
.node()
.map(|node| !Rc::ptr_eq(&node, &ctty))
.unwrap_or(true)
});
});
}
proc.set_session_id(proc.id());
proc.set_group_id(proc.id());
Ok(0)
}
SyscallFunction::DeviceRequest => {
let fd = RawFd(args[0] as u32);
let req = arg_user_mut::<DeviceRequest>(args[1] as usize)?;
run_with_io(|io| {
let file = io.file(fd)?;
let node = file.borrow().node()?;
node.device_request(req)?;
Ok(0)
})
}
// Ok(0)
// }
// SyscallFunction::ExitSignal => {
// panic!("Handled elsewhere");
// // Process::current().exit_signal();
// }
}
}

View File

@ -4,7 +4,9 @@ use abi::{arch::SavedFrame, error::Error, process::ExitCode};
use alloc::boxed::Box;
use cfg_if::cfg_if;
use crate::task::process::Process;
use crate::task::thread::Thread;
// use crate::task::process::Process;
cfg_if! {
if #[cfg(target_arch = "aarch64")] {
@ -77,7 +79,8 @@ pub trait TaskContextImpl: Sized {
extern "C" fn closure_wrapper<F: FnOnce() + Send + 'static>(closure_addr: usize) -> ! {
let closure = unsafe { Box::from_raw(closure_addr as *mut F) };
closure();
Process::current().exit(ExitCode::SUCCESS);
Thread::current().exit(ExitCode::SUCCESS);
unreachable!();
}

View File

@ -8,7 +8,7 @@ use alloc::{string::String, sync::Arc, vec::Vec};
use crate::{
arch::{Architecture, ArchitectureImpl},
sync::{IrqSafeSpinlock, SpinFence},
task::sched::CpuQueue,
task::{sched::CpuQueue, thread::Thread},
};
use self::{context::TaskContextImpl, process::Process};
@ -17,58 +17,59 @@ pub mod context;
pub mod process;
pub mod runtime;
pub mod sched;
pub mod thread;
pub use context::{Cpu, TaskContext};
/// Process identifier alias for clarity
pub type ProcessId = usize;
// /// Process identifier alias for clarity
// pub type ProcessId = usize;
/// Wrapper structure to hold all the system's processes
pub struct ProcessList {
data: Vec<(ProcessId, Arc<Process>)>,
last_process_id: ProcessId,
}
impl ProcessList {
/// Constructs an empty process list
pub const fn new() -> Self {
Self {
last_process_id: 0,
data: Vec::new(),
}
}
/// Inserts a new process into the list.
///
/// # Safety
///
/// Only meant to be called from inside the Process impl, as this function does not perform any
/// accounting information updates.
pub unsafe fn push(&mut self, process: Arc<Process>) -> ProcessId {
self.last_process_id += 1;
debugln!("Insert process with ID {}", self.last_process_id);
self.data.push((self.last_process_id, process));
self.last_process_id
}
/// Looks up a process by its ID
pub fn get(&self, id: ProcessId) -> Option<&Arc<Process>> {
self.data
.iter()
.find_map(|(i, p)| if *i == id { Some(p) } else { None })
}
}
/// Global shared process list
pub static PROCESSES: IrqSafeSpinlock<ProcessList> = IrqSafeSpinlock::new(ProcessList::new());
// /// Wrapper structure to hold all the system's processes
// pub struct ProcessList {
// data: Vec<(ProcessId, Arc<Process>)>,
// last_process_id: ProcessId,
// }
//
// impl ProcessList {
// /// Constructs an empty process list
// pub const fn new() -> Self {
// Self {
// last_process_id: 0,
// data: Vec::new(),
// }
// }
//
// /// Inserts a new process into the list.
// ///
// /// # Safety
// ///
// /// Only meant to be called from inside the Process impl, as this function does not perform any
// /// accounting information updates.
// pub unsafe fn push(&mut self, process: Arc<Process>) -> ProcessId {
// self.last_process_id += 1;
// debugln!("Insert process with ID {}", self.last_process_id);
// self.data.push((self.last_process_id, process));
// self.last_process_id
// }
//
// /// Looks up a process by its ID
// pub fn get(&self, id: ProcessId) -> Option<&Arc<Process>> {
// self.data
// .iter()
// .find_map(|(i, p)| if *i == id { Some(p) } else { None })
// }
// }
//
// /// Global shared process list
// pub static PROCESSES: IrqSafeSpinlock<ProcessList> = IrqSafeSpinlock::new(ProcessList::new());
/// Creates a new kernel-space process to execute a closure and queues it to some CPU
pub fn spawn_kernel_closure<S: Into<String>, F: Fn() + Send + 'static>(
name: S,
f: F,
) -> Result<(), Error> {
let proc = Process::new_with_context(name, None, TaskContext::kernel_closure(f)?);
proc.enqueue_somewhere();
let thread = Thread::new_kthread(name, TaskContext::kernel_closure(f)?);
thread.enqueue_somewhere();
Ok(())
}

View File

@ -1,387 +1,142 @@
//! Process data structures
use core::{
mem::size_of,
ops::Deref,
fmt,
pin::Pin,
sync::atomic::{AtomicU32, Ordering},
sync::atomic::{AtomicU64, Ordering},
task::{Context, Poll},
};
use abi::{
error::Error,
process::{ExitCode, Signal, SignalEntryData},
process::{ExitCode, Signal},
};
use alloc::{collections::VecDeque, string::String, sync::Arc};
use atomic_enum::atomic_enum;
use futures_util::{task::ArcWake, Future};
use alloc::{
collections::{BTreeMap, VecDeque},
string::String,
sync::Arc,
vec::Vec,
};
use futures_util::Future;
use kernel_util::util::OneTimeInit;
use vfs::VnodeRef;
use crate::{
mem::{process::ProcessAddressSpace, ForeignPointer},
proc::io::ProcessIo,
sync::{IrqGuard, IrqSafeSpinlock},
task::context::TaskContextImpl,
};
use crate::{mem::process::ProcessAddressSpace, proc::io::ProcessIo, sync::IrqSafeSpinlock};
use super::{
context::TaskFrame, runtime::QueueWaker, sched::CpuQueue, Cpu, ProcessId, TaskContext,
PROCESSES,
runtime::QueueWaker,
thread::{Thread, ThreadId, ThreadState},
TaskContext,
};
/// Represents the states a process can be at some point in time
#[atomic_enum]
#[derive(PartialEq)]
pub enum ProcessState {
/// Process is ready for execution and is present in some CPU's queue
Ready,
/// Process is currently being executed by some CPU
Running,
/// Process is present in a global list, but is not queued for execution until it is resumed
Suspended,
/// Process is terminated and waits to be reaped
Terminated,
Terminated(ExitCode),
}
/// Describes signal entry information
#[derive(Debug, Clone)]
pub struct SignalEntry {
entry: usize,
stack: usize,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct ProcessId(u64);
struct ProcessInner {
exit_status: i32,
state: ProcessState,
session_id: ProcessId,
group_id: ProcessId,
session_terminal: Option<VnodeRef>,
signal_entry: Option<SignalEntry>,
signal_stack: VecDeque<Signal>,
queue: Option<&'static CpuQueue>,
session_terminal: Option<VnodeRef>,
threads: Vec<Arc<Thread>>,
}
/// Process data and state structure
pub struct Process {
normal_context: TaskContext,
name: String,
id: ProcessId,
// Process state info
id: OneTimeInit<ProcessId>,
state: AtomicProcessState,
cpu_id: AtomicU32,
space: Option<ProcessAddressSpace>,
space: Arc<ProcessAddressSpace>,
inner: IrqSafeSpinlock<ProcessInner>,
exit_waker: QueueWaker,
/// I/O state of the task
pub io: IrqSafeSpinlock<ProcessIo>,
}
/// Guard type that provides [Process] operations only available for current processes
pub struct CurrentProcess(Arc<Process>, IrqGuard);
static PROCESSES: IrqSafeSpinlock<BTreeMap<ProcessId, Arc<Process>>> =
IrqSafeSpinlock::new(BTreeMap::new());
impl Process {
/// Creates a process from raw architecture-specific [TaskContext].
///
/// # Note
///
/// Has side-effect of allocating a new PID for itself.
pub fn new_with_context<S: Into<String>>(
pub fn new_with_main<S: Into<String>>(
name: S,
space: Option<ProcessAddressSpace>,
normal_context: TaskContext,
) -> Arc<Self> {
let this = Arc::new(Self {
normal_context,
space: Arc<ProcessAddressSpace>,
context: TaskContext,
) -> (Arc<Self>, Arc<Thread>) {
let name = name.into();
let id = ProcessId::next();
id: OneTimeInit::new(),
name: name.into(),
state: AtomicProcessState::new(ProcessState::Suspended),
cpu_id: AtomicU32::new(0),
space,
let process = Arc::new(Self {
name,
id,
space: space.clone(),
inner: IrqSafeSpinlock::new(ProcessInner {
state: ProcessState::Running,
session_id: id,
group_id: id,
session_terminal: None,
threads: Vec::new(),
}),
exit_waker: QueueWaker::new(),
inner: IrqSafeSpinlock::new(ProcessInner {
exit_status: 0,
session_id: 0,
group_id: 0,
session_terminal: None,
signal_entry: None,
signal_stack: VecDeque::new(),
queue: None,
}),
io: IrqSafeSpinlock::new(ProcessIo::new()),
});
let id = unsafe { PROCESSES.lock().push(this.clone()) };
this.id.init(id);
// Create "main" thread
let thread = Thread::new_uthread(process.clone(), space, context);
process.inner.lock().threads.push(thread.clone());
{
let mut inner = this.inner.lock();
inner.session_id = id;
inner.group_id = id;
}
PROCESSES.lock().insert(id, process.clone());
this
(process, thread)
}
/// Returns a reference to the inner architecture-specific [TaskContext].
pub fn current_context(&self) -> &TaskContext {
&self.normal_context
}
/// Returns this process' ID
pub fn id(&self) -> ProcessId {
*self.id.get()
self.id
}
/// Returns the binary name of the process
pub fn name(&self) -> &str {
self.name.as_str()
}
/// Returns the state of the process.
///
/// # Note
///
/// Maybe I should remove this and make ALL state changes atomic.
pub fn state(&self) -> ProcessState {
self.state.load(Ordering::Acquire)
}
/// Atomically updates the state of the process and returns the previous one.
pub fn set_state(&self, state: ProcessState) -> ProcessState {
self.state.swap(state, Ordering::SeqCst)
}
/// Marks the task as running on the specified CPU.
///
/// # Safety
///
/// Only meant to be called from scheduler routines.
pub unsafe fn set_running(&self, cpu: u32) {
self.cpu_id.store(cpu, Ordering::Release);
self.state.store(ProcessState::Running, Ordering::Release);
}
/// Returns the address space of the task
pub fn address_space(&self) -> &ProcessAddressSpace {
self.space.as_ref().unwrap()
}
/// Returns the address space of the task, if one is set
pub fn get_address_space(&self) -> Option<&ProcessAddressSpace> {
self.space.as_ref()
}
/// Replaces the task's session terminal device with another one
pub fn set_session_terminal(&self, terminal: VnodeRef) {
self.inner.lock().session_terminal.replace(terminal);
}
/// Removes the task's current terminal
pub fn clear_session_terminal(&self) -> Option<VnodeRef> {
self.inner.lock().session_terminal.take()
}
/// Returns the current terminal of the task
pub fn session_terminal(&self) -> Option<VnodeRef> {
self.inner.lock().session_terminal.clone()
}
/// Sets the session ID of the task
pub fn set_session_id(&self, sid: ProcessId) {
self.inner.lock().session_id = sid;
}
/// Sets the process group ID of the task
pub fn set_group_id(&self, mut gid: ProcessId) {
if gid == 0 {
gid = self.id();
}
self.inner.lock().group_id = gid;
}
/// Returns the process group ID of the task
pub fn group_id(&self) -> ProcessId {
self.inner.lock().group_id
}
/// Returns the CPU number this task in running on (or the last one)
pub fn cpu_id(&self) -> u32 {
self.cpu_id.load(Ordering::Acquire)
pub fn session_id(&self) -> ProcessId {
self.inner.lock().session_id
}
/// Selects a suitable CPU queue and submits the process for execution.
///
/// # Panics
///
/// Currently, the code will panic if the process is queued/executing on any queue.
pub fn enqueue_somewhere(self: Arc<Self>) -> usize {
// Doesn't have to be precise, so even if something changes, we can still be rebalanced
// to another CPU
let (index, queue) = CpuQueue::least_loaded().unwrap();
self.enqueue_to(queue);
index
pub fn name(&self) -> &str {
self.name.as_ref()
}
/// Submits the process to a specific queue.
///
/// # Panics
///
/// Currently, the code will panic if the process is queued/executing on any queue.
pub fn enqueue_to(self: Arc<Self>, queue: &'static CpuQueue) {
let _irq = IrqGuard::acquire();
{
let mut inner = self.inner.lock();
let old_queue = inner.queue.replace(queue);
if old_queue.is_some() {
// Already in some queue
return;
}
}
match self.state.compare_exchange(
ProcessState::Suspended,
ProcessState::Ready,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Err(ProcessState::Terminated) => {
// Process might've been killed while `await`ing in a `block!`
debugln!(
"Process {} {:?} already terminated, dropping",
self.id(),
self.name()
);
}
Err(state) => {
todo!("Unexpected process state when enqueueing: {:?}", state)
}
Ok(_) => unsafe {
queue.enqueue(self);
},
}
pub fn set_group_id(&self, id: ProcessId) {
self.inner.lock().group_id = id;
}
fn dequeue(&self, new_state: ProcessState) {
let _irq = IrqGuard::acquire();
assert_ne!(new_state, ProcessState::Ready);
assert_ne!(new_state, ProcessState::Running);
let mut inner = self.inner.lock();
let current_state = self.state.swap(new_state, Ordering::SeqCst);
let proc_queue = inner.queue.take().unwrap();
proc_queue.dequeue(self.id());
match current_state {
// NOTE: I'm not sure if the process could've been queued between the store and this
// but most likely not (if I'm not that bad with atomics)
// Do nothing, its queue will just drop the process
ProcessState::Ready => (),
// Do nothing, not in a queue already
ProcessState::Suspended => (),
ProcessState::Terminated => panic!("Process is terminated"),
ProcessState::Running => {
let cpu_id = self.cpu_id.load(Ordering::Acquire);
let local_cpu_id = Cpu::local_id();
let queue = Cpu::local().queue();
if cpu_id == local_cpu_id {
assert_eq!(queue as *const _, proc_queue as *const _, "Process queue mismatch: process says cpu{}, queue {:p}, actual cpu{}, queue {:p}", cpu_id, proc_queue, local_cpu_id, queue);
drop(inner);
// Suspending a process running on local CPU
unsafe { queue.yield_cpu() }
} else {
todo!();
}
}
}
pub fn set_session_id(&self, id: ProcessId) {
self.inner.lock().session_id = id;
}
/// Marks the process as suspended, blocking it from being run until it's resumed.
///
/// # Note
///
/// The process may not halt its execution immediately when this function is called, only when
/// this function is called targeting the *current process* running on *local* CPU.
pub fn suspend(&self) {
self.dequeue(ProcessState::Suspended);
// Resources
pub fn session_terminal(&self) -> Option<VnodeRef> {
self.inner.lock().session_terminal.clone()
}
/// Returns an exit code if the process exited, [None] if it didn't
pub fn get_exit_status(&self) -> Option<ExitCode> {
if self.state() == ProcessState::Terminated {
Some(ExitCode::from(self.inner.lock().exit_status))
} else {
None
}
pub fn set_session_terminal(&self, node: VnodeRef) {
self.inner.lock().session_terminal.replace(node);
}
/// Returns the [Process] currently executing on local CPU, None if idling.
pub fn get_current() -> Option<CurrentProcess> {
let queue = Cpu::local().queue();
queue.current_process()
pub fn clear_session_terminal(&self) -> Option<VnodeRef> {
self.inner.lock().session_terminal.take()
}
/// Returns a process by its ID
pub fn get(pid: ProcessId) -> Option<Arc<Self>> {
PROCESSES.lock().get(pid).cloned()
}
/// Wraps [Process::get_current()] for cases when the caller is absolutely sure there is a
/// running process (e.g. the call itself comes from a process).
pub fn current() -> CurrentProcess {
Self::get_current().unwrap()
}
/// Handles the cleanup of an exited process
pub fn handle_exit(&self) {
// Scheduler still holds a lock of this process?
// TODO cancel Wait if a process was killed while suspended?
{
let inner = self.inner.lock();
let exit_status = ExitCode::from(inner.exit_status);
debugln!(
"Handling exit of #{} with status {:?}",
self.id(),
exit_status
);
// TODO cleanup address space
// if let Some(space) = self.get_address_space() {
// }
self.io.lock().handle_exit();
}
// Notify any waiters we're done
self.exit_waker.wake_all();
}
/// Raises a signal for the currentprocess
pub fn raise_signal(self: &Arc<Self>, signal: Signal) {
{
let mut inner = self.inner.lock();
inner.signal_stack.push_back(signal);
}
if self.state() == ProcessState::Suspended {
self.clone().enqueue_somewhere();
}
}
/// Inherits the data from a parent process. Meant to be called from SpawnProcess handler.
pub fn inherit(&self, parent: &Arc<Process>) -> Result<(), Error> {
pub fn inherit(&self, parent: &Process) -> Result<(), Error> {
let mut our_inner = self.inner.lock();
let their_inner = parent.inner.lock();
@ -392,20 +147,15 @@ impl Process {
Ok(())
}
/// Raises a signal for the specified process group
pub fn signal_group(group_id: ProcessId, signal: Signal) {
let processes = PROCESSES.lock();
for (_, proc) in processes.data.iter() {
let inner = proc.inner.lock();
if proc.state() != ProcessState::Terminated && inner.group_id == group_id {
debugln!("Deliver group signal to {}: {:?}", proc.id(), signal);
drop(inner);
proc.raise_signal(signal);
}
// State
pub fn get_exit_status(&self) -> Option<ExitCode> {
match self.inner.lock().state {
ProcessState::Running => None,
ProcessState::Terminated(x) => Some(x),
}
}
pub fn wait_for_exit(process: Arc<Self>) -> impl Future<Output = ExitCode> {
pub fn wait_for_exit(process: Arc<Process>) -> impl Future<Output = ExitCode> {
struct ProcessExitFuture {
process: Arc<Process>,
}
@ -429,114 +179,338 @@ impl Process {
ProcessExitFuture { process }
}
}
impl ArcWake for Process {
fn wake_by_ref(arc_self: &Arc<Self>) {
arc_self.clone().enqueue_somewhere();
}
}
impl Drop for Process {
fn drop(&mut self) {
infoln!("Drop process!");
}
}
impl CurrentProcess {
/// Wraps a process in this structure.
///
/// # Safety
///
/// Only meant to be called from [Process::current] or [CpuQueue::current_process].
pub unsafe fn new(inner: Arc<Process>, guard: IrqGuard) -> Self {
Self(inner, guard)
}
/// Configures signal entry information for the process
pub fn set_signal_entry(&self, entry: usize, stack: usize) {
pub fn handle_thread_exit(&self, thread: ThreadId, code: ExitCode) {
debugln!("Thread {} of process {}: {:?}", thread, self.id, code);
let mut inner = self.inner.lock();
inner.signal_entry.replace(SignalEntry { entry, stack });
}
pub fn suspend(&self) -> Result<(), Error> {
self.dequeue(ProcessState::Suspended);
// TODO make this cleaner
let old_len = inner.threads.len();
inner.threads.retain(|t| t.id() != thread);
assert_ne!(inner.threads.len(), old_len);
let inner = self.inner.lock();
if !inner.signal_stack.is_empty() {
return Err(Error::Interrupted);
let last_thread = inner.threads.is_empty();
if last_thread {
debugln!("Last thread of {} exited", self.id);
inner.state = ProcessState::Terminated(code);
self.io.lock().handle_exit();
drop(inner);
self.exit_waker.wake_all();
}
Ok(())
// // Scheduler still holds a lock of this process?
// // TODO cancel Wait if a process was killed while suspended?
// {
// let inner = self.inner.lock();
// let exit_status = ExitCode::from(inner.exit_status);
// debugln!(
// "Handling exit of #{} with status {:?}",
// self.id(),
// exit_status
// );
//
// // TODO cleanup address space
// // if let Some(space) = self.get_address_space() {
// // }
//
// self.io.lock().handle_exit();
// }
//
// // Notify any waiters we're done
// self.exit_waker.wake_all();
}
/// Terminate the current process
pub fn exit(&self, status: ExitCode) {
self.inner.lock().exit_status = status.into();
debugln!("Process {} exited with code {:?}", self.id(), status);
/// Raises a signal for the specified process
pub fn raise_signal(self: &Arc<Self>, signal: Signal) {
let thread = self.inner.lock().threads[0].clone();
thread.raise_signal(signal);
self.handle_exit();
self.dequeue(ProcessState::Terminated);
// // TODO if the process does not have any running/ready threads, pick one and wake it up
// if inner
// .threads
// .iter()
// .all(|t| t.state.load(Ordering::Acquire) == ThreadState::Suspended)
// {
// let thread = inner.threads[0].clone();
// drop(inner);
// thread.enqueue_somewhere();
// return;
// }
}
/// Sets up a return frame to handle a pending signal, if any is present in the task's queue.
///
/// # Safety
///
/// This function is only meant to be called right before returning from an userspace
/// exception handler.
pub unsafe fn handle_signal<F: TaskFrame>(&self, frame: &mut F) {
let mut inner = self.inner.lock();
if let Some(signal) = inner.signal_stack.pop_front() {
let Some(entry) = inner.signal_entry.clone() else {
todo!();
};
debugln!(
"Enter signal handler from: pc={:#x}, sp={:#x}",
frame.user_ip(),
frame.user_sp()
);
// TODO check if really in a syscall, lol
let syscall_return = -(u32::from(Error::Interrupted) as isize);
frame.set_return_value(syscall_return as u64);
// Setup signal frame
let usp = ((entry.stack - size_of::<SignalEntryData>()) & !0xF)
- TaskContext::SIGNAL_STACK_EXTRA_ALIGN;
let frame_ptr = usp as *mut SignalEntryData;
let saved_frame = frame.store();
frame_ptr.write_foreign_volatile(
self.address_space(),
SignalEntryData {
signal,
frame: saved_frame,
},
);
// Setup return to signal handler
debugln!(
"Syscall entry @ pc={:#x}, sp={:#x} (top = {:#x})",
entry.entry,
usp,
entry.stack
);
frame.set_user_sp(usp);
frame.set_user_ip(entry.entry);
// Pass the frame pointer as an argument to signal handler entry
frame.set_argument(usp as _);
/// Raises a signal for the specified process group
pub fn signal_group(group_id: ProcessId, signal: Signal) {
let processes = PROCESSES.lock();
for (_, proc) in processes.iter() {
let inner = proc.inner.lock();
if !matches!(inner.state, ProcessState::Terminated(_)) && inner.group_id == group_id {
debugln!("Deliver group signal to {}: {:?}", proc.id(), signal);
drop(inner);
proc.raise_signal(signal);
}
}
}
}
impl Deref for CurrentProcess {
type Target = Arc<Process>;
fn deref(&self) -> &Self::Target {
&self.0
// Process list
pub fn get(id: ProcessId) -> Option<Arc<Self>> {
PROCESSES.lock().get(&id).cloned()
}
}
impl fmt::Display for ProcessId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use fmt::Write;
write!(f, "<Process {}>", self.0)
}
}
// XXX TODO Remove this
impl From<ProcessId> for u32 {
fn from(value: ProcessId) -> Self {
value.0 as _
}
}
impl From<u32> for ProcessId {
fn from(value: u32) -> Self {
Self(value as _)
}
}
impl ProcessId {
pub fn next() -> Self {
static COUNTER: AtomicU64 = AtomicU64::new(1);
let id = COUNTER.fetch_add(1, Ordering::SeqCst);
Self(id)
}
}
// /// Returns the state of the process.
// ///
// /// # Note
// ///
// /// Maybe I should remove this and make ALL state changes atomic.
// pub fn state(&self) -> ProcessState {
// self.state.load(Ordering::Acquire)
// }
//
// /// Atomically updates the state of the process and returns the previous one.
// pub fn set_state(&self, state: ProcessState) -> ProcessState {
// self.state.swap(state, Ordering::SeqCst)
// }
//
// /// Marks the task as running on the specified CPU.
// ///
// /// # Safety
// ///
// /// Only meant to be called from scheduler routines.
// pub unsafe fn set_running(&self, cpu: u32) {
// self.cpu_id.store(cpu, Ordering::Release);
// self.state.store(ProcessState::Running, Ordering::Release);
// }
//
// /// Returns the address space of the task
// pub fn address_space(&self) -> &ProcessAddressSpace {
// self.space.as_ref().unwrap()
// }
//
// /// Returns the address space of the task, if one is set
// pub fn get_address_space(&self) -> Option<&ProcessAddressSpace> {
// self.space.as_ref()
// }
//
// /// Replaces the task's session terminal device with another one
// pub fn set_session_terminal(&self, terminal: VnodeRef) {
// self.inner.lock().session_terminal.replace(terminal);
// }
//
// /// Removes the task's current terminal
// pub fn clear_session_terminal(&self) -> Option<VnodeRef> {
// }
//
// /// Returns the current terminal of the task
// pub fn session_terminal(&self) -> Option<VnodeRef> {
// self.inner.lock().session_terminal.clone()
// }
//
// /// Sets the session ID of the task
// pub fn set_session_id(&self, sid: ProcessId) {
// self.inner.lock().session_id = sid;
// }
//
// /// Sets the process group ID of the task
// pub fn set_group_id(&self, mut gid: ProcessId) {
// if gid == 0 {
// gid = self.id();
// }
// self.inner.lock().group_id = gid;
// }
//
// /// Returns the process group ID of the task
// pub fn group_id(&self) -> ProcessId {
// self.inner.lock().group_id
// }
//
// /// Returns the CPU number this task in running on (or the last one)
// pub fn cpu_id(&self) -> u32 {
// self.cpu_id.load(Ordering::Acquire)
// }
//
// /// Selects a suitable CPU queue and submits the process for execution.
// ///
// /// # Panics
// ///
// /// Currently, the code will panic if the process is queued/executing on any queue.
// pub fn enqueue_somewhere(self: Arc<Self>) -> usize {
// // Doesn't have to be precise, so even if something changes, we can still be rebalanced
// // to another CPU
// let (index, queue) = CpuQueue::least_loaded().unwrap();
//
// self.enqueue_to(queue);
//
// index
// }
//
// /// Submits the process to a specific queue.
// ///
// /// # Panics
// ///
// /// Currently, the code will panic if the process is queued/executing on any queue.
// pub fn enqueue_to(self: Arc<Self>, queue: &'static CpuQueue) {
// let _irq = IrqGuard::acquire();
//
// {
// let mut inner = self.inner.lock();
// let old_queue = inner.queue.replace(queue);
// if old_queue.is_some() {
// // Already in some queue
// return;
// }
// }
// match self.state.compare_exchange(
// ProcessState::Suspended,
// ProcessState::Ready,
// Ordering::SeqCst,
// Ordering::Relaxed,
// ) {
// Err(ProcessState::Terminated) => {
// // Process might've been killed while `await`ing in a `block!`
// debugln!(
// "Process {} {:?} already terminated, dropping",
// self.id(),
// self.name()
// );
// }
// Err(state) => {
// todo!("Unexpected process state when enqueueing: {:?}", state)
// }
// Ok(_) => unsafe {
// queue.enqueue(self);
// },
// }
// }
//
// /// Returns an exit code if the process exited, [None] if it didn't
// pub fn get_exit_status(&self) -> Option<ExitCode> {
// if self.state() == ProcessState::Terminated {
// Some(ExitCode::from(self.inner.lock().exit_status))
// } else {
// None
// }
// }
//
// /// Returns the [Process] currently executing on local CPU, None if idling.
// pub fn get_current() -> Option<CurrentProcess> {
// let queue = Cpu::local().queue();
// queue.current_process()
// }
//
// /// Returns a process by its ID
// pub fn get(pid: ProcessId) -> Option<Arc<Self>> {
// PROCESSES.lock().get(pid).cloned()
// }
//
// /// Wraps [Process::get_current()] for cases when the caller is absolutely sure there is a
// /// running process (e.g. the call itself comes from a process).
// pub fn current() -> CurrentProcess {
// Self::get_current().unwrap()
// }
//
// /// Handles the cleanup of an exited process
// pub fn handle_exit(&self) {
// }
//
// /// Inherits the data from a parent process. Meant to be called from SpawnProcess handler.
// pub fn inherit(&self, parent: &Arc<Process>) -> Result<(), Error> {
// }
//
//
// pub fn wait_for_exit(process: Arc<Self>) -> impl Future<Output = ExitCode> {
// }
// }
//
// impl ArcWake for Process {
// fn wake_by_ref(arc_self: &Arc<Self>) {
// arc_self.clone().enqueue_somewhere();
// }
// }
//
// impl Drop for Process {
// fn drop(&mut self) {
// infoln!("Drop process!");
// }
// }
//
// impl CurrentProcess {
// /// Wraps a process in this structure.
// ///
// /// # Safety
// ///
// /// Only meant to be called from [Process::current] or [CpuQueue::current_process].
// pub unsafe fn new(inner: Arc<Process>, guard: IrqGuard) -> Self {
// Self(inner, guard)
// }
//
// /// Configures signal entry information for the process
// pub fn set_signal_entry(&self, entry: usize, stack: usize) {
// let mut inner = self.inner.lock();
// inner.signal_entry.replace(SignalEntry { entry, stack });
// }
//
// pub fn suspend(&self) -> Result<(), Error> {
// self.dequeue(ProcessState::Suspended);
//
// let inner = self.inner.lock();
// if !inner.signal_stack.is_empty() {
// return Err(Error::Interrupted);
// }
//
// Ok(())
// }
//
// /// Terminate the current process
// pub fn exit(&self, status: ExitCode) {
// self.inner.lock().exit_status = status.into();
// debugln!("Process {} exited with code {:?}", self.id(), status);
//
// self.handle_exit();
// self.dequeue(ProcessState::Terminated);
// }
//
// }
//
// impl Deref for CurrentProcess {
// type Target = Arc<Process>;
//
// fn deref(&self) -> &Self::Target {
// &self.0
// }
// }

View File

@ -4,7 +4,7 @@ use abi::error::Error;
use alloc::{boxed::Box, format, sync::Arc};
use futures_util::{task::waker_ref, Future};
use crate::task::{process::Process, spawn_kernel_closure};
use crate::task::{spawn_kernel_closure, thread::Thread};
use super::{
task::Task,
@ -40,17 +40,17 @@ pub fn spawn<F: Future<Output = ()> + Send + 'static>(future: F) -> Result<(), E
}
pub fn run_to_completion<'a, T, F: Future<Output = T> + Send + 'a>(future: F) -> Result<T, Error> {
let process = Process::current();
let thread = Thread::current();
let mut future = Box::pin(future);
loop {
let waker = waker_ref(&process);
let waker = waker_ref(&thread);
let context = &mut Context::from_waker(&waker);
match future.as_mut().poll(context) {
Poll::Ready(value) => break Ok(value),
Poll::Pending => {
if let Err(error) = process.suspend() {
if let Err(error) = thread.suspend() {
break Err(error);
}
}

View File

@ -6,7 +6,7 @@ use kernel_util::util::OneTimeInit;
use crate::{
arch::{Architecture, ArchitectureImpl},
sync::IrqGuard,
task::process::Process,
task::thread::Thread,
};
use super::task::Task;
@ -15,7 +15,7 @@ pub(super) static TASK_QUEUE: OneTimeInit<TaskQueue> = OneTimeInit::new();
pub(super) struct TaskQueue {
// Queue of workers waiting for an item
pending_workers: ArrayQueue<Arc<Process>>,
pending_workers: ArrayQueue<Arc<Thread>>,
task_queue: ArrayQueue<Arc<Task>>,
}
@ -44,19 +44,19 @@ impl TaskQueue {
}
pub fn dequeue(&self) -> Result<Arc<Task>, Error> {
let process = Process::current();
let thread = Thread::current();
assert!(ArchitectureImpl::interrupt_mask());
loop {
if let Some(task) = self.task_queue.pop() {
return Ok(task);
}
if self.pending_workers.push(process.clone()).is_err() {
if self.pending_workers.push(thread.clone()).is_err() {
panic!("Pending worker queue overflow");
}
// This must not fail. Signals must not be raised.
process.suspend().unwrap();
thread.suspend().unwrap();
}
}
}

View File

@ -1,7 +1,13 @@
//! Per-CPU queue implementation
use core::sync::atomic::Ordering;
// use aarch64_cpu::registers::CNTPCT_EL0;
use alloc::{collections::VecDeque, sync::Arc, vec::Vec};
use alloc::{
collections::{BTreeMap, VecDeque},
sync::Arc,
vec::Vec,
};
use cfg_if::cfg_if;
use kernel_util::util::OneTimeInit;
@ -9,12 +15,13 @@ use crate::{
// arch::aarch64::{context::TaskContext, cpu::Cpu},
arch::{Architecture, ArchitectureImpl},
sync::{IrqGuard, IrqSafeSpinlock, IrqSafeSpinlockGuard},
task::thread::ThreadState,
};
use super::{
context::TaskContextImpl,
process::{CurrentProcess, Process, ProcessState},
Cpu, ProcessId, TaskContext,
thread::{Thread, ThreadId},
Cpu, TaskContext,
};
/// Per-CPU statistics
@ -29,15 +36,21 @@ pub struct CpuQueueStats {
measure_time: u64,
}
/// Per-CPU queue's inner data, normally resides under a lock
pub struct CpuQueueInner {
/// Current process, None if idling
pub current: Option<Arc<Process>>,
/// LIFO queue for processes waiting for execution
pub queue: VecDeque<Arc<Process>>,
// /// Per-CPU queue's inner data, normally resides under a lock
// pub struct CpuQueueInner {
// /// Current process, None if idling
// pub current: Option<Arc<Process>>,
// /// LIFO queue for processes waiting for execution
// pub queue: VecDeque<Arc<Process>>,
//
// /// CPU time usage statistics
// pub stats: CpuQueueStats,
// }
/// CPU time usage statistics
pub stats: CpuQueueStats,
struct CpuQueueInner {
current: Option<ThreadId>,
queue: VecDeque<ThreadId>,
stats: CpuQueueStats,
}
/// Per-CPU queue
@ -80,21 +93,22 @@ impl CpuQueueInner {
/// Picks a next task for execution, skipping (dropping) those that were suspended. May return
/// None if the queue is empty or no valid task was found, in which case the scheduler should
/// go idle.
pub fn next_ready_task(&mut self) -> Option<Arc<Process>> {
pub fn next_ready_task(&mut self) -> Option<Arc<Thread>> {
#[allow(clippy::never_loop)]
while !self.queue.is_empty() {
let task = self.queue.pop_front().unwrap();
let task_t = Thread::get(task).unwrap();
match task.state() {
ProcessState::Ready => {
return Some(task);
match task_t.state.load(Ordering::Acquire) {
ThreadState::Ready => {
return Some(task_t);
}
e => panic!(
"Unexpected process state in CpuQueue: {:?} ({} {:?}, cpu_id={})",
"Unexpected thread state in CpuQueue: {:?} ({} {:?}, cpu_id={})",
e,
task.id(),
task.name(),
task.cpu_id()
task_t.id(),
task_t.name(),
1234 // task_t.cpu_id()
),
}
}
@ -102,15 +116,15 @@ impl CpuQueueInner {
None
}
/// Returns an iterator over all the processes in the queue plus the currently running process,
/// Returns an iterator over all the threads in the queue plus the currently running thread,
/// if there is one.
pub fn iter(&self) -> impl Iterator<Item = &Arc<Process>> {
pub fn iter(&self) -> impl Iterator<Item = &ThreadId> {
Iterator::chain(self.queue.iter(), self.current.iter())
}
}
impl CpuQueue {
/// Constructs an empty queue with its own idle task
// /// Constructs an empty queue with its own idle task
pub fn new(index: usize) -> Self {
let idle = TaskContext::kernel(__idle, Cpu::local_id() as usize)
.expect("Could not construct an idle task");
@ -158,57 +172,66 @@ impl CpuQueue {
// inner.stats.measure_time = t;
let current = inner.current.clone();
let current_t = current.and_then(Thread::get);
if let Some(current) = current.as_ref() {
if current.state() == ProcessState::Running {
current.set_state(ProcessState::Ready);
inner.queue.push_back(current.clone());
if let Some(current_t) = current_t.as_ref() {
if current_t
.state
.compare_exchange(
ThreadState::Running,
ThreadState::Ready,
Ordering::SeqCst,
Ordering::Relaxed,
)
.is_ok()
{
inner.queue.push_back(current_t.id());
// inner.stats.cpu_time += delta;
}
// inner.stats.cpu_time += delta;
} else {
// inner.stats.idle_time += delta;
}
// else
// inner.stats.idle_time += delta;
let next = inner.next_ready_task();
let next_t = inner.next_ready_task();
// let next_t = next.and_then(Thread::get);
inner.current = next.clone();
inner.current = next_t.as_deref().map(Thread::id);
// Can drop the lock, we hold current and next Arc's
drop(inner);
let (from, _from_rc) = if let Some(current) = current.as_ref() {
(current.current_context(), Arc::strong_count(current))
let (from, _from_rc) = if let Some(current_t) = current_t.as_ref() {
(current_t.context(), Arc::strong_count(current_t))
} else {
(&self.idle, 0)
};
let (to, _to_rc) = if let Some(next) = next.as_ref() {
next.set_running(Cpu::local_id());
(next.current_context(), Arc::strong_count(next))
let (to, _to_rc) = if let Some(next_t) = next_t.as_ref() {
next_t.set_running(Cpu::local_id());
(next_t.context(), Arc::strong_count(next_t))
} else {
(&self.idle, 0)
};
// log_print_raw!(crate::debug::LogLevel::Info, "{}: ", Cpu::local_id());
// if let Some(from) = current.as_ref() {
// log_print_raw!(crate::debug::LogLevel::Info, "{}", from.id(),);
// } else {
// log_print_raw!(crate::debug::LogLevel::Info, "{{idle}}");
// }
// log_print_raw!(crate::debug::LogLevel::Info, " -> ");
// if let Some(to) = next.as_ref() {
// log_print_raw!(crate::debug::LogLevel::Info, "{}", to.id(),);
// } else {
// log_print_raw!(crate::debug::LogLevel::Info, "{{idle}}");
// }
// log_print_raw!(crate::debug::LogLevel::Info, "\n");
assert!(ArchitectureImpl::interrupt_mask());
to.switch(from)
// // log_print_raw!(crate::debug::LogLevel::Info, "{}: ", Cpu::local_id());
// // if let Some(from) = current.as_ref() {
// // log_print_raw!(crate::debug::LogLevel::Info, "{}", from.id(),);
// // } else {
// // log_print_raw!(crate::debug::LogLevel::Info, "{{idle}}");
// // }
// // log_print_raw!(crate::debug::LogLevel::Info, " -> ");
// // if let Some(to) = next.as_ref() {
// // log_print_raw!(crate::debug::LogLevel::Info, "{}", to.id(),);
// // } else {
// // log_print_raw!(crate::debug::LogLevel::Info, "{{idle}}");
// // }
// // log_print_raw!(crate::debug::LogLevel::Info, "\n");
}
/// Pushes the process to the back of the execution queue.
@ -217,20 +240,20 @@ impl CpuQueue {
///
/// Only meant to be called from Process impl. The function does not set any process accounting
/// information, which may lead to invalid states.
pub unsafe fn enqueue(&self, p: Arc<Process>) {
pub unsafe fn enqueue(&self, tid: ThreadId) {
let mut inner = self.inner.lock();
assert!(ArchitectureImpl::interrupt_mask());
assert_eq!(p.state(), ProcessState::Ready);
// assert_eq!(p.state(), ProcessState::Ready);
inner.queue.push_back(p);
inner.queue.push_back(tid);
}
/// Removes process with given PID from the exeuction queue.
pub fn dequeue(&self, pid: ProcessId) {
/// Removes thread with given TID from the exeuction queue.
pub fn dequeue(&self, tid: ThreadId) {
assert!(ArchitectureImpl::interrupt_mask());
let mut inner = self.inner.lock();
inner.queue.retain(|p| p.id() != pid)
inner.queue.retain(|&p| p != tid)
}
/// Returns the queue length at this moment.
@ -251,38 +274,42 @@ impl CpuQueue {
self.inner.lock().queue.is_empty()
}
/// Returns a safe reference to the inner data structure.
pub fn lock(&self) -> IrqSafeSpinlockGuard<CpuQueueInner> {
self.inner.lock()
}
// /// Returns a safe reference to the inner data structure.
// pub fn lock(&self) -> IrqSafeSpinlockGuard<CpuQueueInner> {
// self.inner.lock()
// }
/// Returns an unsafe reference to the queue.
///
/// # Safety
///
/// Only meant to be called to dump the queue contents when panicking.
#[allow(clippy::mut_from_ref)]
pub unsafe fn grab(&self) -> &mut CpuQueueInner {
self.inner.grab()
}
// /// Returns an unsafe reference to the queue.
// ///
// /// # Safety
// ///
// /// Only meant to be called to dump the queue contents when panicking.
// #[allow(clippy::mut_from_ref)]
// pub unsafe fn grab(&self) -> &mut CpuQueueInner {
// self.inner.grab()
// }
//
// /// Returns the process currently being executed.
// ///
// /// # Note
// ///
// /// This function should be safe in all kernel thread/interrupt contexts:
// ///
// /// * (in kthread) the code calling this will still remain on the same thread.
// /// * (in irq) the code cannot be interrupted and other CPUs shouldn't change this queue, so it
// /// will remain valid until the end of the interrupt or until [CpuQueue::yield_cpu]
// /// is called.
// pub fn current_process(&self) -> Option<CurrentProcess> {
// let guard = IrqGuard::acquire();
// self.inner
// .lock()
// .current
// .clone()
// .map(|p| unsafe { CurrentProcess::new(p, guard) })
// }
/// Returns the process currently being executed.
///
/// # Note
///
/// This function should be safe in all kernel thread/interrupt contexts:
///
/// * (in kthread) the code calling this will still remain on the same thread.
/// * (in irq) the code cannot be interrupted and other CPUs shouldn't change this queue, so it
/// will remain valid until the end of the interrupt or until [CpuQueue::yield_cpu]
/// is called.
pub fn current_process(&self) -> Option<CurrentProcess> {
let guard = IrqGuard::acquire();
self.inner
.lock()
.current
.clone()
.map(|p| unsafe { CurrentProcess::new(p, guard) })
pub fn current_id(&self) -> Option<ThreadId> {
self.inner.lock().current
}
/// Returns a queue for given CPU index

430
src/task/thread.rs Normal file
View File

@ -0,0 +1,430 @@
use core::{
fmt,
mem::size_of,
ops::Deref,
sync::atomic::{AtomicU32, AtomicU64, Ordering},
};
use abi::{
error::Error,
process::{ExitCode, Signal, SignalEntryData},
};
use alloc::{
collections::{BTreeMap, VecDeque},
string::String,
sync::Arc,
};
use atomic_enum::atomic_enum;
use futures_util::task::ArcWake;
use kernel_util::util::OneTimeInit;
use crate::{
mem::{process::ProcessAddressSpace, ForeignPointer},
sync::{IrqGuard, IrqSafeSpinlock},
task::{context::TaskContextImpl, Cpu},
};
use super::{context::TaskFrame, process::Process, sched::CpuQueue, TaskContext};
/// Represents the states a thread can be at some point in time
#[atomic_enum]
#[derive(PartialEq)]
pub enum ThreadState {
/// Thread is ready for execution and is present in some CPU's queue
Ready,
/// Thread is currently being executed by some CPU
Running,
/// Thread is present in a global list, but is not queued for execution until it is resumed
Suspended,
/// Thread is terminated and waits to be reaped
Terminated,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, Ord, PartialOrd)]
pub enum ThreadId {
Kernel(u64),
User(u64),
}
pub struct CurrentThread(Arc<Thread>, IrqGuard);
struct SignalEntry {
entry: usize,
stack: usize,
}
struct ThreadInner {
exit_status: i32,
queue: Option<&'static CpuQueue>,
signal_entry: Option<SignalEntry>,
signal_stack: VecDeque<Signal>,
}
pub struct Thread {
context: TaskContext,
name: Option<String>,
id: ThreadId,
cpu_id: AtomicU32,
pub(super) state: AtomicThreadState,
process: Option<Arc<Process>>,
space: Option<Arc<ProcessAddressSpace>>,
inner: IrqSafeSpinlock<ThreadInner>,
}
static THREADS: IrqSafeSpinlock<BTreeMap<ThreadId, Arc<Thread>>> =
IrqSafeSpinlock::new(BTreeMap::new());
impl Thread {
// pub fn new(index: usize, parent: &Process, context: TaskContext) -> Self {
// todo!()
// }
fn new(
id: ThreadId,
name: Option<String>,
process: Option<Arc<Process>>,
space: Option<Arc<ProcessAddressSpace>>,
context: TaskContext,
) -> Arc<Self> {
let thread = Arc::new(Self {
context,
name,
id,
cpu_id: AtomicU32::new(0),
state: AtomicThreadState::new(ThreadState::Suspended),
process,
space,
inner: IrqSafeSpinlock::new(ThreadInner {
exit_status: 0,
queue: None,
signal_stack: VecDeque::new(),
signal_entry: None,
}),
});
THREADS.lock().insert(id, thread.clone());
thread
}
pub fn new_uthread(
parent: Arc<Process>,
space: Arc<ProcessAddressSpace>,
context: TaskContext,
) -> Arc<Self> {
Self::new(
ThreadId::next_user(),
None,
Some(parent),
Some(space),
context,
)
}
pub fn new_kthread<S: Into<String>>(name: S, context: TaskContext) -> Arc<Self> {
Self::new(
ThreadId::next_kernel(),
Some(name.into()),
None,
None,
context,
)
}
// Info
pub fn id(&self) -> ThreadId {
self.id
}
pub fn name(&self) -> Option<&String> {
self.name.as_ref()
}
pub fn context(&self) -> &TaskContext {
&self.context
}
pub fn address_space(&self) -> &Arc<ProcessAddressSpace> {
self.space.as_ref().unwrap()
}
pub fn process(&self) -> &Arc<Process> {
self.process.as_ref().unwrap()
}
// Queue operation
pub fn current() -> CurrentThread {
Self::get_current().unwrap()
}
pub fn get_current() -> Option<CurrentThread> {
let guard = IrqGuard::acquire();
Cpu::local()
.queue()
.current_id()
.and_then(Self::get)
.map(|t| unsafe { CurrentThread(t, guard) })
}
pub fn enqueue_somewhere(&self) {
// Doesn't have to be precise, so even if something changes, we can still be rebalanced
// to another CPU
let (index, queue) = CpuQueue::least_loaded().unwrap();
self.enqueue_to(queue);
// index
}
pub fn enqueue_to(&self, queue: &'static CpuQueue) {
let _irq = IrqGuard::acquire();
{
let mut inner = self.inner.lock();
let old_queue = inner.queue.replace(queue);
if old_queue.is_some() {
// Already in some queue
return;
}
}
match self.state.compare_exchange(
ThreadState::Suspended,
ThreadState::Ready,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Err(ThreadState::Terminated) => {
// Process might've been killed while `await`ing in a `block!`
debugln!(
"Thread {} {:?} already terminated, dropping",
self.id(),
self.name()
);
}
Err(state) => {
todo!("Unexpected process state when enqueueing: {:?}", state)
}
Ok(_) => unsafe {
queue.enqueue(self.id);
},
}
}
// TODO maybe separate dequeue for current and "other" threads
fn dequeue(&self, new_state: ThreadState) {
let _irq = IrqGuard::acquire();
assert_ne!(new_state, ThreadState::Ready);
assert_ne!(new_state, ThreadState::Running);
let mut inner = self.inner.lock();
let current_state = self.state.swap(new_state, Ordering::SeqCst);
let proc_queue = inner.queue.take().unwrap();
proc_queue.dequeue(self.id());
match current_state {
// NOTE: I'm not sure if the process could've been queued between the store and this
// but most likely not (if I'm not that bad with atomics)
// Do nothing, its queue will just drop the process
ThreadState::Ready => (),
// Do nothing, not in a queue already
ThreadState::Suspended => (),
ThreadState::Terminated => panic!("Thread is terminated"),
ThreadState::Running => {
let cpu_id = self.cpu_id.load(Ordering::Acquire);
let local_cpu_id = Cpu::local_id();
let queue = Cpu::local().queue();
if cpu_id == local_cpu_id {
assert_eq!(queue as *const _, proc_queue as *const _, "Thread queue mismatch: process says cpu{}, queue {:p}, actual cpu{}, queue {:p}", cpu_id, proc_queue, local_cpu_id, queue);
drop(inner);
// Suspending a process running on local CPU
unsafe { queue.yield_cpu() }
} else {
todo!();
}
}
}
}
pub unsafe fn set_running(&self, cpu: u32) {
self.cpu_id.store(cpu, Ordering::Release);
self.state.store(ThreadState::Running, Ordering::Release);
}
pub fn suspend(&self) {
self.dequeue(ThreadState::Suspended);
}
// Accounting
pub fn get(id: ThreadId) -> Option<Arc<Thread>> {
THREADS.lock().get(&id).cloned()
}
// Thread inner
/// Handles the cleanup of an exited thread
pub fn handle_exit(&self) {
// Scheduler still holds a lock of this process?
// TODO cancel Wait if a process was killed while suspended?
let code = {
let inner = self.inner.lock();
let exit_status = ExitCode::from(inner.exit_status);
exit_status
};
if let Some(process) = self.process.as_ref() {
process.handle_thread_exit(self.id(), code);
}
// TODO WaitThread, notify any waiters we're done
// self.exit_waker.wake_all();
}
pub fn set_signal_entry(&self, entry: usize, stack: usize) {
let mut inner = self.inner.lock();
inner.signal_entry.replace(SignalEntry { entry, stack });
}
pub fn raise_signal(self: &Arc<Self>, signal: Signal) {
self.inner.lock().signal_stack.push_back(signal);
if self.state.load(Ordering::Acquire) == ThreadState::Suspended {
self.clone().enqueue_somewhere();
}
}
}
impl ArcWake for Thread {
fn wake_by_ref(arc_self: &Arc<Self>) {
arc_self.clone().enqueue_somewhere();
}
}
impl CurrentThread {
/// Terminate the current process
pub fn exit(&self, status: ExitCode) {
self.inner.lock().exit_status = status.into();
debugln!("Thread {} exited with code {:?}", self.id(), status);
self.handle_exit();
self.dequeue(ThreadState::Terminated);
}
pub fn suspend(&self) -> Result<(), Error> {
self.dequeue(ThreadState::Suspended);
let inner = self.inner.lock();
if !inner.signal_stack.is_empty() {
return Err(Error::Interrupted);
}
Ok(())
}
/// Sets up a return frame to handle a pending signal, if any is present in the task's queue.
///
/// # Safety
///
/// This function is only meant to be called right before returning from an userspace
/// exception handler.
pub unsafe fn handle_pending_signals<F: TaskFrame>(&self, frame: &mut F) {
let ThreadId::User(id) = self.id else {
return;
};
let mut inner = self.inner.lock();
if let Some(signal) = inner.signal_stack.pop_front() {
let Some(entry) = inner.signal_entry.as_ref() else {
todo!();
};
// TODO check if really in a syscall, lol
let syscall_return = -(u32::from(Error::Interrupted) as isize);
frame.set_return_value(syscall_return as u64);
// Setup signal frame
let usp = ((entry.stack - size_of::<SignalEntryData>()) & !0xF)
- TaskContext::SIGNAL_STACK_EXTRA_ALIGN;
let frame_ptr = usp as *mut SignalEntryData;
let saved_frame = frame.store();
frame_ptr.write_foreign_volatile(
self.address_space(),
SignalEntryData {
signal,
frame: saved_frame,
},
);
// Setup return to signal handler
debugln!(
"Syscall entry @ pc={:#x}, sp={:#x} (top = {:#x})",
entry.entry,
usp,
entry.stack
);
frame.set_user_sp(usp);
frame.set_user_ip(entry.entry);
// Pass the frame pointer as an argument to signal handler entry
frame.set_argument(usp as _);
}
// let process = self.process();
// if let Some((entry, signal)) = process.pop_signal() {
// debugln!(
// "{} of {}, enter signal handler from pc={:#x}, sp={:#x}",
// self.id,
// process.id(),
// frame.user_ip(),
// frame.user_sp()
// );
// }
}
}
impl Deref for CurrentThread {
type Target = Arc<Thread>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ThreadId {
pub fn next_kernel() -> Self {
static COUNT: AtomicU64 = AtomicU64::new(1);
let id = COUNT.fetch_add(1, Ordering::SeqCst);
Self::Kernel(id)
}
pub fn next_user() -> Self {
static COUNT: AtomicU64 = AtomicU64::new(1);
let id = COUNT.fetch_add(1, Ordering::SeqCst);
Self::User(id)
}
}
impl fmt::Display for ThreadId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use fmt::Write;
match self {
Self::Kernel(id) => write!(f, "#[{id}]"),
Self::User(id) => write!(f, "#{id}"),
}
}
}