refactor: add docs

This commit is contained in:
Mark Poliakov 2023-12-07 12:50:54 +02:00
parent 57d255d466
commit 2b70a35882
41 changed files with 302 additions and 356 deletions

View File

@ -1,3 +1,4 @@
//! AArch64 boot and entry implementation
use core::{arch::global_asm, sync::atomic::Ordering};
use aarch64_cpu::{

View File

@ -1,3 +1,4 @@
//! AArch64-specific memory management interfaces and functions
use core::{
alloc::Layout,
ops::{Deref, DerefMut},
@ -73,27 +74,16 @@ pub(super) const RAM_MAPPING_OFFSET: usize = MAPPING_OFFSET | (RAM_MAPPING_START
pub(super) static MEMORY_LIMIT: OneTimeInit<usize> = OneTimeInit::new();
#[link_section = ".data.tables"]
pub static mut KERNEL_TABLES: KernelImageObject<FixedTables> =
pub(super) static mut KERNEL_TABLES: KernelImageObject<FixedTables> =
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
/// Memory mapping which may be used for performing early kernel initialization
pub struct EarlyMapping<'a, T: ?Sized> {
value: &'a mut T,
page_count: usize,
}
impl<'a, T: Sized> EarlyMapping<'a, T> {
// pub(super) unsafe fn map(physical: PhysicalAddress) -> Result<EarlyMapping<'a, T>, Error> {
// let layout = Layout::new::<T>();
// let aligned = physical.page_align_down::<L3>();
// let offset = physical.page_offset::<L3>();
// let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
// let virt = map_early_pages(aligned, page_count)?;
// let value = &mut *((virt + offset) as *mut T);
// Ok(EarlyMapping { value, page_count })
// }
pub(super) unsafe fn map_slice(
physical: PhysicalAddress,
len: usize,

View File

@ -1,3 +1,4 @@
//! AArch64-specific process address space management
use core::sync::atomic::{AtomicU8, Ordering};
use abi::error::Error;
@ -13,6 +14,7 @@ use crate::mem::{
use super::table::{PageEntry, PageTable, L1, L2, L3};
/// AArch64 implementation of a process address space table
#[repr(C)]
pub struct ProcessAddressSpaceImpl {
l1: PhysicalRefMut<'static, PageTable<L1>>,

View File

@ -59,9 +59,11 @@ struct BootStack {
data: [u8; BOOT_STACK_SIZE],
}
/// AArch64 architecture implementation
pub struct AArch64 {
dt: OneTimeInit<DeviceTree<'static>>,
/// Optional instance of PSCI on this platform
pub psci: OneTimeInit<&'static Psci>,
reset: OneTimeInit<&'static dyn ResetDevice>,
@ -417,6 +419,7 @@ impl AArch64 {
}
}
/// AArch64 implementation value
pub static ARCHITECTURE: AArch64 = AArch64 {
dt: OneTimeInit::new(),
initrd: OneTimeInit::new(),
@ -430,8 +433,11 @@ pub static ARCHITECTURE: AArch64 = AArch64 {
mtimer: OneTimeInit::new(),
};
/// AArch64-specific interrupt number
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum IrqNumber {
/// CPU-private interrupt
Private(u32),
/// Shared interrupt
Shared(u32),
}

View File

@ -107,6 +107,8 @@ pub trait Architecture {
/// The caller must ensure the mapping is and will no longer be used.
unsafe fn unmap_device_memory(&self, map: &RawDeviceMemoryMapping);
/// Maps the physical memory regions into the kernel space so they can later be accessed by the
/// kernel
fn map_physical_memory<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
&self,
it: I,
@ -114,8 +116,10 @@ pub trait Architecture {
memory_end: PhysicalAddress,
) -> Result<(), Error>;
/// Converts a physical address to a virtual one, so it can be accessed by the kernel
fn virtualize(address: PhysicalAddress) -> Result<usize, Error>;
/// Converts a virtual address created by [Architecture::virtualize] back to its physical form
fn physicalize(address: usize) -> Result<PhysicalAddress, Error>;
// Architecture intrinsics

View File

@ -1,3 +1,4 @@
//! x86-64-specific memory management functions and interfaces
use core::{
alloc::Layout,
ops::{Deref, DerefMut},
@ -46,7 +47,7 @@ const RAM_MAPPING_L0I: usize = KERNEL_L0_INDEX - 1;
const DEVICE_MAPPING_L3_COUNT: usize = 4;
#[link_section = ".data.tables"]
pub static mut KERNEL_TABLES: KernelImageObject<FixedTables> =
pub(super) static mut KERNEL_TABLES: KernelImageObject<FixedTables> =
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
// 2MiB for early mappings
@ -236,6 +237,7 @@ pub(super) unsafe fn map_heap_block(index: usize, page: PhysicalAddress) {
HEAP_MAPPING_L2[index] = PageEntry::<L2>::block(page, PageAttributes::WRITABLE);
}
/// Memory mapping which may be used for performing early kernel initialization
pub struct EarlyMapping<'a, T: ?Sized> {
value: &'a mut T,
page_count: usize,

View File

@ -1,3 +1,4 @@
//! x86-64-specific process address space management functions
use yggdrasil_abi::error::Error;
use crate::{

View File

@ -1,3 +1,4 @@
//! x86-64-specific memory translation table management interfaces and functions
use core::{
marker::PhantomData,
ops::{Index, IndexMut},
@ -113,6 +114,7 @@ impl PageEntry<L2> {
}
impl PageEntry<L1> {
/// Constructs a mapping which points to a 1GiB block
pub fn block(phys: PhysicalAddress, attrs: PageAttributes) -> Self {
Self(
u64::from(phys) | (attrs | PageAttributes::PRESENT | PageAttributes::BLOCK).bits(),
@ -147,6 +149,7 @@ impl<L: NonTerminalEntryLevel> PageEntry<L> {
}
}
/// Returns `true` if the mapping represents a "page"/"block" and not a table
pub fn is_block(self) -> bool {
self.0 & PageAttributes::BLOCK.bits() != 0
}

View File

@ -1,4 +1,4 @@
// TODO fix all TODOs
//! x86-64 architecture implementation
use core::{mem::size_of, ops::DerefMut, sync::atomic::Ordering};
use abi::error::Error;
@ -74,12 +74,16 @@ use self::{
use super::{Architecture, CpuMessage};
/// x86-64-specific interrupt number
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
pub enum IrqNumber {
/// Legacy (ISA) interrupt
Isa(u8),
/// Global System Interrupt
Gsi(u8),
}
/// x86-64 architecture implementation
pub struct X86_64 {
boot_data: OneTimeInit<BootData>,
acpi: OneTimeInit<AcpiTables<AcpiHandlerImpl>>,
@ -95,6 +99,7 @@ pub struct X86_64 {
static SHUTDOWN_FENCE: SpinFence = SpinFence::new();
/// Global x86-64 architecture value
pub static ARCHITECTURE: X86_64 = X86_64 {
boot_data: OneTimeInit::new(),
acpi: OneTimeInit::new(),

View File

@ -15,6 +15,7 @@ struct RingLoggerInner {
data: RingBuffer<u8, RING_LOGGER_CAPACITY>,
}
/// Logger sink which collects output to an internal ring buffer
pub struct RingLoggerSink {
inner: IrqSafeSpinlock<RingLoggerInner>,
}
@ -161,7 +162,7 @@ impl fmt::Write for DebugSinkWrapper {
}
impl RingLoggerSink {
pub const fn new() -> Self {
const fn new() -> Self {
Self {
inner: IrqSafeSpinlock::new(RingLoggerInner {
data: RingBuffer::new(0),
@ -169,6 +170,7 @@ impl RingLoggerSink {
}
}
/// Reads data from the sink without blocking and waiting for more to arrive
pub fn read(&self, pos: usize, buffer: &mut [u8]) -> usize {
self.inner.lock().data.read_all_static(pos, buffer)
}
@ -193,6 +195,7 @@ static LOGGER: SimpleLogger = SimpleLogger;
static DEBUG_SINKS: IrqSafeSpinlock<StaticVector<DebugSinkWrapper, MAX_DEBUG_SINKS>> =
IrqSafeSpinlock::new(StaticVector::new());
/// See [RingLoggerSink]
pub static RING_LOGGER_SINK: RingLoggerSink = RingLoggerSink::new();
/// Prints a hex-dump of a slice, appending a virtual address offset to the output

View File

@ -204,6 +204,8 @@ impl<'a> DeviceTree<'a> {
}
// Commonly used functions for convenience
/// Returns the /chosen.stdout-path value
pub fn chosen_stdout_path(&self) -> Option<&str> {
let chosen = self.node_by_path("/chosen")?;
chosen.prop("stdout-path")

View File

@ -1,10 +1,11 @@
//! Font management and data structures
use core::mem::size_of;
use abi::error::Error;
use bytemuck::{Pod, Zeroable};
use kernel_util::AlignedTo;
// static CONSOLE_FONT: &[u8] = include_bytes!("font.psfu");
static CONSOLE_FONT: &AlignedTo<u32, [u8]> = &AlignedTo {
align: [],
bytes: *include_bytes!("font.psfu"),
@ -23,6 +24,7 @@ struct PsfHeader {
width: u32,
}
/// Represents a PSF-format font object
#[derive(Clone, Copy)]
pub struct PcScreenFont<'a> {
header: &'a PsfHeader,
@ -36,6 +38,7 @@ impl Default for PcScreenFont<'static> {
}
impl<'a> PcScreenFont<'a> {
/// Constructs an instance of [PcScreenFont] from its byte representation
pub fn from_bytes(bytes: &'a [u8]) -> Result<Self, Error> {
let header: &PsfHeader = bytemuck::from_bytes(&bytes[..size_of::<PsfHeader>()]);
let glyph_data = &bytes[header.header_size as usize..];
@ -43,22 +46,26 @@ impl<'a> PcScreenFont<'a> {
Ok(Self { header, glyph_data })
}
/// Returns the character width of the font
#[inline]
pub const fn width(&self) -> u32 {
self.header.width
}
/// Returns the character height of the font
#[inline]
pub const fn height(&self) -> u32 {
self.header.height
}
/// Returns the count of glyphs present in the font
#[allow(clippy::len_without_is_empty)]
#[inline]
pub const fn len(&self) -> u32 {
self.header.num_glyph
}
/// Returns the data slice of a single glyph within the font
#[inline]
pub fn raw_glyph_data(&self, index: u32) -> &[u8] {
&self.glyph_data[(index * self.header.bytes_per_glyph) as usize..]

View File

@ -1,7 +1,10 @@
//! Timer device utilities
use core::time::Duration;
use crate::task::runtime;
/// Global system timer tick handler
pub fn tick(now: Duration) {
runtime::tick(now);
}

View File

@ -118,9 +118,9 @@ struct TtyContextInner {
process_group: Option<ProcessId>,
}
/// Represents the context of a terminal device
pub struct TtyContext {
ring: AsyncRing<u8, 128>,
// input_queue: AsyncQueue<u8>,
inner: IrqSafeSpinlock<TtyContextInner>,
}
@ -280,6 +280,7 @@ pub trait TtyDevice: SerialDevice {
}
impl TtyContext {
/// Constructs a new [TtyContext]
pub fn new() -> Self {
Self {
ring: AsyncRing::new(0),
@ -290,67 +291,24 @@ impl TtyContext {
}
}
/// Writes a single character to the terminal
pub fn putc(&self, ch: u8) {
self.ring.try_write(ch).unwrap();
}
/// Performs a blocking read of a single character from the terminal
pub async fn getc(&self) -> u8 {
self.ring.read().await
}
/// Changes the configuration of the terminal
pub fn set_config(&self, config: &TerminalOptions) -> Result<(), Error> {
self.inner.lock().config = *config;
Ok(())
}
/// Returns the configuration of the terminal
pub fn config(&self) -> TerminalOptions {
self.inner.lock().config
}
}
// impl<const N: usize> CharRingInner<N> {
// }
//
// impl<const N: usize> CharRing<N> {
// /// Constructs an empty ring buffer
// pub const fn new() -> Self {
// Self {
// inner: IrqSafeSpinlock::new(CharRingInner {
// rd: 0,
// wr: 0,
// data: [0; N],
// flags: 0,
// process_group: None,
// }),
// // wait_read: Wait::new("char_ring_read"),
// // wait_write: Wait::new("char_ring_write"),
// config: IrqSafeSpinlock::new(TerminalOptions::const_default()),
// }
// }
//
// /// Reads a single character from the buffer, blocking until available
// pub fn getc(&'static self) -> Result<u8, Error> {
// todo!()
// // let mut lock = self.inner.lock();
// // loop {
// // if !lock.is_readable() && lock.flags == 0 {
// // drop(lock);
// // self.wait_read.wait(None)?;
// // lock = self.inner.lock();
// // } else {
// // break;
// // }
// // }
//
// // let byte = unsafe { lock.read_unchecked() };
// // drop(lock);
// // self.wait_write.wakeup_one();
// // // TODO WAIT_SELECT
// // Ok(byte)
// }
//
// /// Sends a single character to the buffer
// pub fn putc(&self, ch: u8, blocking: bool) -> Result<(), Error> {
// todo!()
// }
// }

View File

@ -21,6 +21,7 @@ pub enum CharDeviceType {
}
static DEVFS_ROOT: OneTimeInit<NodeRef> = OneTimeInit::new();
/// Sets up the device filesystem
pub fn init() {
let root = MemoryDirectory::empty();
@ -60,6 +61,7 @@ pub fn add_char_device(dev: &'static dyn CharDevice, kind: CharDeviceType) -> Re
_add_char_device(dev, name)
}
/// Adds "pseudo"-devices to the filesystem (i.e. /dev/random)
pub fn add_pseudo_devices() -> Result<(), Error> {
let random = read_fn_node(move |_, buf| {
random::read(buf);

View File

@ -1,3 +1,5 @@
//! "System" filesystem implementation
use abi::error::Error;
use git_version::git_version;
use kernel_util::util::OneTimeInit;
@ -10,6 +12,7 @@ use crate::{debug, util};
static ROOT: OneTimeInit<NodeRef> = OneTimeInit::new();
/// Returns the root of the filesystem
pub fn root() -> &'static NodeRef {
ROOT.get()
}
@ -18,6 +21,7 @@ fn read_kernel_log(pos: u64, buffer: &mut [u8]) -> Result<usize, Error> {
Ok(debug::RING_LOGGER_SINK.read(pos as usize, buffer))
}
/// Sets up the entries within the filesystem
pub fn init() {
let d_kernel = mdir([
("version", const_value_node(env!("CARGO_PKG_VERSION"))),

View File

@ -28,8 +28,7 @@
clippy::match_single_binding,
async_fn_in_trait
)]
// #![warn(missing_docs)]
#![allow(missing_docs)]
#![deny(missing_docs)]
#![no_std]
#![no_main]

View File

@ -1,3 +1,5 @@
//! Address manipulation interfaces and utilities
use core::{
fmt,
iter::Step,
@ -9,25 +11,32 @@ use crate::arch::{Architecture, ArchitectureImpl};
use super::{table::EntryLevel, KERNEL_VIRT_OFFSET};
/// Wrapper type to represent an object residing within the kernel
#[repr(transparent)]
pub struct KernelImageObject<T> {
inner: T,
}
/// Wrapper type to represent a physical memory address
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
#[repr(transparent)]
pub struct PhysicalAddress(u64);
/// Interface for converting addresses from their raw values to more specific types
#[const_trait]
pub trait FromRaw<T> {
/// Converts a raw value into the address wrapper type
fn from_raw(value: T) -> Self;
}
/// Interface for converting wrapper types into their raw address representations
#[const_trait]
pub trait IntoRaw<T> {
/// Converts a wrapper type value into its raw address
fn into_raw(self) -> T;
}
/// Interface for obtaining physical addresses of values
pub trait AsPhysicalAddress {
/// Returns the value's physical address.
///
@ -73,36 +82,46 @@ impl<T> DerefMut for KernelImageObject<T> {
//
impl PhysicalAddress {
/// Physical address of zero
pub const ZERO: Self = Self(0);
/// Maximum representable physical address
pub const MAX: Self = Self(u64::MAX);
/// Minumum representable physical address
pub const MIN: Self = Self(u64::MIN);
/// Applies an offset to the address
pub const fn add(self, offset: usize) -> Self {
Self(self.0 + offset as u64)
}
/// Returns `true` if the address is zero
#[inline(always)]
pub const fn is_zero(self) -> bool {
self.0 == 0
}
/// Returns the offset this address has within a page of level `L`
pub const fn page_offset<L: ~const EntryLevel>(self) -> usize {
L::page_offset(self.0 as usize)
}
/// Aligns the address down to a boundary of a page of level `L`
pub const fn page_align_down<L: ~const EntryLevel>(self) -> Self {
Self(self.0 & !(L::SIZE as u64 - 1))
}
/// Aligns the address up to a boundary of a page of level `L`
pub const fn page_align_up<L: ~const EntryLevel>(self) -> Self {
Self((self.0 + L::SIZE as u64 - 1) & !(L::SIZE as u64 - 1))
}
/// Returns the page index this address has at level `L`
pub const fn page_index<L: ~const EntryLevel>(self) -> usize {
L::index(self.0 as usize)
}
/// Returns `true` if the address is aligned to a boundary of a page at level `L`
#[inline]
pub const fn is_aligned_for<T: Sized>(self) -> bool {
self.0 as usize % align_of::<T>() == 0
@ -119,6 +138,7 @@ impl PhysicalAddress {
ArchitectureImpl::physicalize(address).unwrap()
}
/// Converts the physical address to a virtual one
pub fn virtualize_raw(self) -> usize {
ArchitectureImpl::virtualize(self).unwrap()
}

View File

@ -1,6 +1,4 @@
//! Facilities for mapping devices to virtual address space
// TODO
#![allow(unused)]
use core::{mem::size_of, ops::Deref};
use abi::error::Error;
@ -10,22 +8,31 @@ use crate::arch::{Architecture, ARCHITECTURE};
use super::PhysicalAddress;
/// Describes a single device memory mapping
#[derive(Debug)]
pub struct RawDeviceMemoryMapping {
/// Virtual address of the mapped object
pub address: usize,
/// Base address of the mapping start
pub base_address: usize,
/// Page size used for the mapping
pub page_size: usize,
/// Number of pages used to map the object
pub page_count: usize,
}
/// Describes a single untyped device memory mapping
#[derive(Clone, Debug)]
pub struct DeviceMemoryMapping {
#[allow(unused)]
inner: Arc<RawDeviceMemoryMapping>,
address: usize,
}
/// Describes a single typed device memory mapping
#[derive(Clone, Debug)]
pub struct DeviceMemoryIo<'a, T: ?Sized> {
#[allow(unused)]
inner: Arc<RawDeviceMemoryMapping>,
value: &'a T,
}
@ -41,6 +48,8 @@ impl RawDeviceMemoryMapping {
ARCHITECTURE.map_device_memory(base, size)
}
/// Consumes the device mapping, leaking its address without deallocating the translation
/// mapping itself
pub fn leak(self) -> usize {
let address = self.address;
core::mem::forget(self);
@ -73,6 +82,7 @@ impl DeviceMemoryMapping {
})
}
/// Returns the address to which the object is mapped
pub fn address(&self) -> usize {
self.address
}

View File

@ -22,6 +22,7 @@ pub use address::PhysicalAddress;
use self::{device::DeviceMemoryMapping, process::ProcessAddressSpace};
/// Offset applied to the physical kernel image when translating it into the virtual address space
pub const KERNEL_VIRT_OFFSET: usize = ArchitectureImpl::KERNEL_VIRT_OFFSET;
/// Reads a value from an arbitrary physical address.

View File

@ -1,3 +1,5 @@
//! Physical memory management utilities
use core::ops::Range;
use abi::error::Error;
@ -41,6 +43,7 @@ impl PhysicalMemoryRegion {
self.base..self.end()
}
/// Constrains the [PhysicalMemoryRegion] to global memory limits set in the kernel
pub fn clamp(self) -> Option<(PhysicalAddress, PhysicalAddress)> {
let start = self.base.min(MEMORY_UPPER_LIMIT);
let end = self.end().min(MEMORY_UPPER_LIMIT);
@ -67,6 +70,7 @@ pub fn alloc_pages_contiguous(count: usize) -> Result<PhysicalAddress, Error> {
PHYSICAL_MEMORY.get().lock().alloc_contiguous_pages(count)
}
/// Allocates a single 2MiB page of physical memory from the global manager
pub fn alloc_2m_page() -> Result<PhysicalAddress, Error> {
PHYSICAL_MEMORY.get().lock().alloc_2m_page()
}
@ -102,6 +106,7 @@ fn physical_memory_range<I: Iterator<Item = PhysicalMemoryRegion>>(
}
}
/// Locates a contiguous region of available physical memory within the memory region list
pub fn find_contiguous_region<I: Iterator<Item = PhysicalMemoryRegion>>(
it: I,
count: usize,

View File

@ -1,3 +1,5 @@
//! Pointer utilities and interfaces
use core::{
fmt,
ops::{Deref, DerefMut},
@ -5,11 +7,13 @@ use core::{
use super::{address::AsPhysicalAddress, PhysicalAddress};
/// Wrapper for immutably accessing a value at a physical address
#[repr(transparent)]
pub struct PhysicalRef<'a, T: ?Sized> {
value: &'a T,
}
/// Wrapper for mutably accessing a value at a physical address
#[repr(transparent)]
pub struct PhysicalRefMut<'a, T: ?Sized> {
value: &'a mut T,
@ -44,6 +48,7 @@ impl<'a, T: Sized> PhysicalRefMut<'a, T> {
}
impl<T: ?Sized> PhysicalRefMut<'_, T> {
/// Returns the "address" part of the reference
#[inline]
pub fn as_address(&self) -> usize {
(self.value as *const T).addr()
@ -104,6 +109,7 @@ impl<'a, T: Sized> PhysicalRef<'a, T> {
}
impl<T: ?Sized> PhysicalRef<'_, T> {
/// Returns the "address" part of the reference
#[inline]
pub fn as_address(&self) -> usize {
(self.value as *const T).addr()

View File

@ -1,3 +1,5 @@
//! Process address space structures and management functions
use abi::error::Error;
use cfg_if::cfg_if;
use kernel_util::sync::IrqSafeSpinlock;
@ -17,10 +19,14 @@ cfg_if! {
/// Interface for virtual memory address space management
pub trait ProcessAddressSpaceManager: Sized {
/// Page size used by this implementation
const PAGE_SIZE: usize;
/// PFN of a minimum address allowed for virtual region allocation
const LOWER_LIMIT_PFN: usize;
/// PFN of a maximum address allowed for virtual region allocation
const UPPER_LIMIT_PFN: usize;
/// Constructs a new implementation-specific per-process address space
fn new() -> Result<Self, Error>;
/// Places a single PAGE_SIZE mapping into the address space.
@ -43,8 +49,11 @@ pub trait ProcessAddressSpaceManager: Sized {
/// will not access this page.
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error>;
/// Returns the [PhysicalAddress] and [MapAttributes] associated with given virtual `address`,
/// if one is mapped
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error>;
/// Returns the implementation specific physical address of this space, with ASID applied
fn as_address_with_asid(&self) -> u64;
}
@ -53,6 +62,7 @@ struct Inner {
table: ProcessAddressSpaceImpl,
}
/// Data structure for managing the address translation and allocation for a single process
pub struct ProcessAddressSpace {
inner: IrqSafeSpinlock<Inner>,
}
@ -144,6 +154,7 @@ impl Inner {
}
impl ProcessAddressSpace {
/// Constructs a new [ProcessAddressSpace]
pub fn new() -> Result<Self, Error> {
let table = ProcessAddressSpaceImpl::new()?;
let allocator = VirtualMemoryAllocator::new(
@ -155,6 +166,8 @@ impl ProcessAddressSpace {
})
}
/// Allocates a region of virtual memory within the address space and maps the pages to the
/// ones returned from `get_page` function
pub fn allocate<F: Fn(usize) -> Result<PhysicalAddress, Error>>(
&self,
_hint: Option<usize>,
@ -173,6 +186,7 @@ impl ProcessAddressSpace {
)
}
/// Maps a region of memory in the address space
pub fn map<F: Fn(usize) -> Result<PhysicalAddress, Error>>(
&self,
address: usize,
@ -193,6 +207,7 @@ impl ProcessAddressSpace {
)
}
/// Adds a single-page mapping to the address space
pub fn map_single(
&self,
address: usize,
@ -208,6 +223,8 @@ impl ProcessAddressSpace {
.map_range(address, 1, |_| Ok(physical), attributes)
}
/// Returns the [PhysicalAddress] associated with given virtual `address`,
/// if one is mapped
pub fn translate(&self, address: usize) -> Result<PhysicalAddress, Error> {
// Offset is handled at impl level
self.inner.lock().table.translate(address).map(|e| e.0)
@ -234,6 +251,7 @@ impl ProcessAddressSpace {
)
}
/// Returns the physical address of this table, with ASID applied
pub fn as_address_with_asid(&self) -> u64 {
self.inner.lock().table.as_address_with_asid()
}

View File

@ -22,7 +22,9 @@ bitflags! {
pub trait NextPageTable {
/// Type for the next-level page table
type NextLevel;
/// Type for an immutable reference to the next-level page table
type TableRef: Deref<Target = Self::NextLevel>;
/// Type for a mutable reference to the next-level page table
type TableRefMut: DerefMut<Target = Self::NextLevel>;
/// Tries looking up a next-level table at given index, allocating and mapping one if it is not
@ -30,14 +32,16 @@ pub trait NextPageTable {
fn get_mut_or_alloc(&mut self, index: usize) -> Result<Self::TableRefMut, Error>;
/// Returns a mutable reference to a next-level table at `index`, if present
fn get_mut(&mut self, index: usize) -> Option<Self::TableRefMut>;
/// Returns an immutable reference to a next-level table at `index`, if present
fn get(&self, index: usize) -> Option<Self::TableRef>;
}
/// Interface for a single level of address translation
#[const_trait]
pub trait EntryLevel: Copy {
/// The right shift needed to obtain an index of an entry at this level from an address
const SHIFT: usize;
/// The size of a page at this entry level
const SIZE: usize = 1 << Self::SHIFT;
/// Returns the index into a page table for a given address
@ -51,11 +55,13 @@ pub trait EntryLevel: Copy {
addr & (Self::SIZE - 1)
}
/// Aligns the `addr` up to the level page boundary
#[inline]
fn align_up(addr: usize) -> usize {
(addr + Self::SIZE - 1) & !(Self::SIZE - 1)
}
/// Returns the page count needed to fully contain a block of size `addr`
#[inline]
fn page_count(addr: usize) -> usize {
(addr + Self::SIZE - 1) / Self::SIZE

View File

@ -59,6 +59,7 @@ fn from_parse_error(v: ParseError) -> Error {
Error::InvalidFile
}
/// Creates a new copy of the TLS from given master image
pub fn clone_tls(space: &ProcessAddressSpace, image: &ProcessImage) -> Result<usize, Error> {
let Some(tls) = image.tls.as_ref() else {
// No TLS

View File

@ -25,7 +25,7 @@ use crate::{
},
};
pub struct BufferPlacer<'a> {
struct BufferPlacer<'a> {
buffer: &'a mut [u8],
virtual_offset: usize,
offset: usize,
@ -175,6 +175,7 @@ fn load_binary(
}
}
/// Loads a program from given `path`
pub fn load<P: AsRef<Path>>(
ioctx: &mut IoContext,
path: P,

View File

@ -20,18 +20,22 @@ impl ProcessIo {
}
}
/// Returns the [FileRef] associated with given `fd` or an error if it does not exist
pub fn file(&self, fd: RawFd) -> Result<&FileRef, Error> {
self.files.get(&fd).ok_or(Error::InvalidFile)
}
/// Returns the associated [IoContext] reference
pub fn ioctx(&mut self) -> &mut IoContext {
self.ioctx.as_mut().unwrap()
}
/// Changes the [IoContext] of this struct
pub fn set_ioctx(&mut self, ioctx: IoContext) {
self.ioctx = Some(ioctx);
}
/// Associates a `file` with `fd`, returning an error if requested `fd` is already taken
pub fn set_file(&mut self, fd: RawFd, file: FileRef) -> Result<(), Error> {
if self.files.contains_key(&fd) {
return Err(Error::AlreadyExists);
@ -40,6 +44,7 @@ impl ProcessIo {
Ok(())
}
/// Associates a `file` with any available [RawFd] and returns it
pub fn place_file(&mut self, file: FileRef) -> Result<RawFd, Error> {
for idx in 0..64 {
let fd = RawFd::from(idx);
@ -54,12 +59,14 @@ impl ProcessIo {
Err(Error::OutOfMemory)
}
/// Removes and closes a [FileRef] from the struct
pub fn close_file(&mut self, fd: RawFd) -> Result<(), Error> {
// Do nothing, file will be dropped and closed
self.files.remove(&fd).ok_or(Error::InvalidFile)?;
Ok(())
}
/// Removes all [FileRef]s from the struct which do not pass the `predicate` check
pub fn retain<F: Fn(&RawFd, &mut FileRef) -> bool>(&mut self, predicate: F) {
self.files.retain(predicate);
}

View File

@ -1,3 +1,5 @@
//! Random generation utilities
use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit};
use crate::arch::{Architecture, ARCHITECTURE};
@ -55,11 +57,13 @@ impl RandomState {
static RANDOM_STATE: OneTimeInit<IrqSafeSpinlock<RandomState>> = OneTimeInit::new();
/// Fills `buf` with random bytes
pub fn read(buf: &mut [u8]) {
let state = RANDOM_STATE.get();
state.lock().read_buf(buf)
}
/// Initializes the random generator state
pub fn init() {
let now = ARCHITECTURE
.monotonic_timer()

View File

@ -18,7 +18,9 @@ cfg_if! {
}
}
/// Conversion trait to allow multiple kernel closure return types
pub trait Termination {
/// Converts the closure return type into [ExitCode]
fn into_exit_code(self) -> ExitCode;
}

View File

@ -30,12 +30,16 @@ use super::{
TaskContext,
};
/// Represents a process state
#[derive(PartialEq)]
pub enum ProcessState {
/// Process is running, meaning it still has at least one thread alive
Running,
/// Process has finished with some [ExitCode]
Terminated(ExitCode),
}
/// Unique number assigned to each [Process]
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct ProcessId(u64);
@ -47,26 +51,38 @@ pub struct ProcessId(u64);
// TLS layout (aarch64):
// | uthread_size (0x10?) | mem_size |
// | ??? | Data .....|
/// Describes Thread-Local Storage of a process
#[derive(Debug)]
pub struct ProcessTlsInfo {
/// Location of the TLS master copy within the process's memory
pub master_copy_base: usize,
/// Layout of the TLS
pub layout: ProcessTlsLayout,
}
/// Describes TLS layout for a program image
#[derive(Debug)]
pub struct ProcessTlsLayout {
/// Data offset from the TLS base
pub data_offset: usize,
/// struct uthread offset from the TLS base
pub uthread_offset: usize,
/// Pointer offset from the TLS base. The pointer is passed to the userspace
pub ptr_offset: usize,
/// Data size of the TLS segment
pub data_size: usize,
/// Memory size of the TLS segment (mem_size >= data_size)
pub mem_size: usize,
/// Overall allocation size of the TLS data
pub full_size: usize,
}
#[cfg(target_arch = "aarch64")]
impl ProcessTlsLayout {
/// Constructs a new thread-local storage layout info struct
pub fn new(align: usize, data_size: usize, mem_size: usize) -> Self {
debug_assert!(align.is_power_of_two());
let tls_block0_offset = (size_of::<usize>() * 2 + align - 1) & !(align - 1);
@ -87,6 +103,7 @@ impl ProcessTlsLayout {
#[cfg(target_arch = "x86_64")]
impl ProcessTlsLayout {
/// Constructs a new thread-local storage layout info struct
pub fn new(align: usize, data_size: usize, mem_size: usize) -> Self {
// The static TLS blocks are placed below TP
// TP points to the TCB
@ -109,8 +126,11 @@ impl ProcessTlsLayout {
}
}
/// Describes information about a program's image in memory
pub struct ProcessImage {
/// Entry point address
pub entry: usize,
/// Thread-local storage information
pub tls: Option<ProcessTlsInfo>,
}
@ -125,6 +145,7 @@ struct ProcessInner {
mutexes: BTreeMap<usize, Arc<UserspaceMutex>>,
}
/// Describes a process within the system
pub struct Process {
name: String,
id: ProcessId,
@ -134,6 +155,7 @@ pub struct Process {
image: Option<ProcessImage>,
exit_waker: QueueWaker,
/// Process I/O information
pub io: IrqSafeSpinlock<ProcessIo>,
}
@ -141,6 +163,7 @@ static PROCESSES: IrqSafeSpinlock<BTreeMap<ProcessId, Arc<Process>>> =
IrqSafeSpinlock::new(BTreeMap::new());
impl Process {
/// Creates a new process with given main thread
pub fn new_with_main<S: Into<String>>(
name: S,
space: Arc<ProcessAddressSpace>,
@ -179,6 +202,7 @@ impl Process {
(process, thread)
}
/// Spawns a new child thread within the process
pub fn spawn_thread(self: &Arc<Self>, options: &ThreadSpawnOptions) -> Result<ThreadId, Error> {
debugln!(
"Spawn thread in {} with options: {:#x?}",
@ -210,43 +234,54 @@ impl Process {
Ok(id)
}
/// Returns the [ProcessId] of this process
pub fn id(&self) -> ProcessId {
self.id
}
/// Returns the process group ID of the process
pub fn group_id(&self) -> ProcessId {
self.inner.lock().group_id
}
/// Returns the process session ID of the process
pub fn session_id(&self) -> ProcessId {
self.inner.lock().session_id
}
/// Returns the process name
pub fn name(&self) -> &str {
self.name.as_ref()
}
/// Changes the process's group ID
pub fn set_group_id(&self, id: ProcessId) {
self.inner.lock().group_id = id;
}
/// Changes the process's session ID
pub fn set_session_id(&self, id: ProcessId) {
self.inner.lock().session_id = id;
}
// Resources
/// Returns the current session terminal of the process, if set
pub fn session_terminal(&self) -> Option<NodeRef> {
self.inner.lock().session_terminal.clone()
}
/// Changes the current session terminal of the process
pub fn set_session_terminal(&self, node: NodeRef) {
self.inner.lock().session_terminal.replace(node);
}
/// Resets the current session terminal of the process
pub fn clear_session_terminal(&self) -> Option<NodeRef> {
self.inner.lock().session_terminal.take()
}
/// Inherits the process information from the `parent`
pub fn inherit(&self, parent: &Process) -> Result<(), Error> {
let mut our_inner = self.inner.lock();
let their_inner = parent.inner.lock();
@ -259,6 +294,8 @@ impl Process {
}
// State
/// Returns the [ExitCode] of the process, if it has exited
pub fn get_exit_status(&self) -> Option<ExitCode> {
match self.inner.lock().state {
ProcessState::Running => None,
@ -266,6 +303,7 @@ impl Process {
}
}
/// Suspends the task until the process exits
pub fn wait_for_exit(process: Arc<Process>) -> impl Future<Output = ExitCode> {
struct ProcessExitFuture {
process: Arc<Process>,
@ -291,6 +329,7 @@ impl Process {
ProcessExitFuture { process }
}
/// Handles exit of a single child thread
pub fn handle_thread_exit(&self, thread: ThreadId, code: ExitCode) {
debugln!("Thread {} of process {}: {:?}", thread, self.id, code);
let mut inner = self.inner.lock();
@ -335,6 +374,8 @@ impl Process {
}
}
/// Returns a [UserspaceMutex] associated with the `address`. If one does not exist, will
/// create it.
pub fn get_or_insert_mutex(&self, address: usize) -> Arc<UserspaceMutex> {
let mut inner = self.inner.lock();
inner
@ -345,10 +386,13 @@ impl Process {
}
// Process list
/// Returns the process with given [ProcessId], if it exists
pub fn get(id: ProcessId) -> Option<Arc<Self>> {
PROCESSES.lock().get(&id).cloned()
}
/// Terminates all children of the process, `except` one
pub async fn terminate_others(&self, except: ThreadId) {
let mut inner = self.inner.lock();
@ -385,231 +429,10 @@ impl From<u32> for ProcessId {
}
impl ProcessId {
/// Generates a new [ProcessId]
pub fn next() -> Self {
static COUNTER: AtomicU64 = AtomicU64::new(1);
let id = COUNTER.fetch_add(1, Ordering::SeqCst);
Self(id)
}
}
// /// Returns the state of the process.
// ///
// /// # Note
// ///
// /// Maybe I should remove this and make ALL state changes atomic.
// pub fn state(&self) -> ProcessState {
// self.state.load(Ordering::Acquire)
// }
//
// /// Atomically updates the state of the process and returns the previous one.
// pub fn set_state(&self, state: ProcessState) -> ProcessState {
// self.state.swap(state, Ordering::SeqCst)
// }
//
// /// Marks the task as running on the specified CPU.
// ///
// /// # Safety
// ///
// /// Only meant to be called from scheduler routines.
// pub unsafe fn set_running(&self, cpu: u32) {
// self.cpu_id.store(cpu, Ordering::Release);
// self.state.store(ProcessState::Running, Ordering::Release);
// }
//
// /// Returns the address space of the task
// pub fn address_space(&self) -> &ProcessAddressSpace {
// self.space.as_ref().unwrap()
// }
//
// /// Returns the address space of the task, if one is set
// pub fn get_address_space(&self) -> Option<&ProcessAddressSpace> {
// self.space.as_ref()
// }
//
// /// Replaces the task's session terminal device with another one
// pub fn set_session_terminal(&self, terminal: VnodeRef) {
// self.inner.lock().session_terminal.replace(terminal);
// }
//
// /// Removes the task's current terminal
// pub fn clear_session_terminal(&self) -> Option<VnodeRef> {
// }
//
// /// Returns the current terminal of the task
// pub fn session_terminal(&self) -> Option<VnodeRef> {
// self.inner.lock().session_terminal.clone()
// }
//
// /// Sets the session ID of the task
// pub fn set_session_id(&self, sid: ProcessId) {
// self.inner.lock().session_id = sid;
// }
//
// /// Sets the process group ID of the task
// pub fn set_group_id(&self, mut gid: ProcessId) {
// if gid == 0 {
// gid = self.id();
// }
// self.inner.lock().group_id = gid;
// }
//
// /// Returns the process group ID of the task
// pub fn group_id(&self) -> ProcessId {
// self.inner.lock().group_id
// }
//
// /// Returns the CPU number this task in running on (or the last one)
// pub fn cpu_id(&self) -> u32 {
// self.cpu_id.load(Ordering::Acquire)
// }
//
// /// Selects a suitable CPU queue and submits the process for execution.
// ///
// /// # Panics
// ///
// /// Currently, the code will panic if the process is queued/executing on any queue.
// pub fn enqueue_somewhere(self: Arc<Self>) -> usize {
// // Doesn't have to be precise, so even if something changes, we can still be rebalanced
// // to another CPU
// let (index, queue) = CpuQueue::least_loaded().unwrap();
//
// self.enqueue_to(queue);
//
// index
// }
//
// /// Submits the process to a specific queue.
// ///
// /// # Panics
// ///
// /// Currently, the code will panic if the process is queued/executing on any queue.
// pub fn enqueue_to(self: Arc<Self>, queue: &'static CpuQueue) {
// let _irq = IrqGuard::acquire();
//
// {
// let mut inner = self.inner.lock();
// let old_queue = inner.queue.replace(queue);
// if old_queue.is_some() {
// // Already in some queue
// return;
// }
// }
// match self.state.compare_exchange(
// ProcessState::Suspended,
// ProcessState::Ready,
// Ordering::SeqCst,
// Ordering::Relaxed,
// ) {
// Err(ProcessState::Terminated) => {
// // Process might've been killed while `await`ing in a `block!`
// debugln!(
// "Process {} {:?} already terminated, dropping",
// self.id(),
// self.name()
// );
// }
// Err(state) => {
// todo!("Unexpected process state when enqueueing: {:?}", state)
// }
// Ok(_) => unsafe {
// queue.enqueue(self);
// },
// }
// }
//
// /// Returns an exit code if the process exited, [None] if it didn't
// pub fn get_exit_status(&self) -> Option<ExitCode> {
// if self.state() == ProcessState::Terminated {
// Some(ExitCode::from(self.inner.lock().exit_status))
// } else {
// None
// }
// }
//
// /// Returns the [Process] currently executing on local CPU, None if idling.
// pub fn get_current() -> Option<CurrentProcess> {
// let queue = Cpu::local().queue();
// queue.current_process()
// }
//
// /// Returns a process by its ID
// pub fn get(pid: ProcessId) -> Option<Arc<Self>> {
// PROCESSES.lock().get(pid).cloned()
// }
//
// /// Wraps [Process::get_current()] for cases when the caller is absolutely sure there is a
// /// running process (e.g. the call itself comes from a process).
// pub fn current() -> CurrentProcess {
// Self::get_current().unwrap()
// }
//
// /// Handles the cleanup of an exited process
// pub fn handle_exit(&self) {
// }
//
// /// Inherits the data from a parent process. Meant to be called from SpawnProcess handler.
// pub fn inherit(&self, parent: &Arc<Process>) -> Result<(), Error> {
// }
//
//
// pub fn wait_for_exit(process: Arc<Self>) -> impl Future<Output = ExitCode> {
// }
// }
//
// impl ArcWake for Process {
// fn wake_by_ref(arc_self: &Arc<Self>) {
// arc_self.clone().enqueue_somewhere();
// }
// }
//
// impl Drop for Process {
// fn drop(&mut self) {
// infoln!("Drop process!");
// }
// }
//
// impl CurrentProcess {
// /// Wraps a process in this structure.
// ///
// /// # Safety
// ///
// /// Only meant to be called from [Process::current] or [CpuQueue::current_process].
// pub unsafe fn new(inner: Arc<Process>, guard: IrqGuard) -> Self {
// Self(inner, guard)
// }
//
// /// Configures signal entry information for the process
// pub fn set_signal_entry(&self, entry: usize, stack: usize) {
// let mut inner = self.inner.lock();
// inner.signal_entry.replace(SignalEntry { entry, stack });
// }
//
// pub fn suspend(&self) -> Result<(), Error> {
// self.dequeue(ProcessState::Suspended);
//
// let inner = self.inner.lock();
// if !inner.signal_stack.is_empty() {
// return Err(Error::Interrupted);
// }
//
// Ok(())
// }
//
// /// Terminate the current process
// pub fn exit(&self, status: ExitCode) {
// self.inner.lock().exit_status = status.into();
// debugln!("Process {} exited with code {:?}", self.id(), status);
//
// self.handle_exit();
// self.dequeue(ProcessState::Terminated);
// }
//
// }
//
// impl Deref for CurrentProcess {
// type Target = Arc<Process>;
//
// fn deref(&self) -> &Self::Target {
// &self.0
// }
// }

View File

@ -11,10 +11,12 @@ use super::{
task_queue::{self},
};
/// Pushes a task into the executor's queue
pub fn enqueue(task: Arc<Task>) -> Result<(), Error> {
task_queue::push_task(task)
}
/// Spawns a background worker to execute the tasks from the global queue
pub fn spawn_async_worker(index: usize) -> Result<(), Error> {
let name = format!("[async-worker-{}]", index);
@ -35,10 +37,12 @@ pub fn spawn_async_worker(index: usize) -> Result<(), Error> {
})
}
/// Creates a new task for the [Future] and queues it for execution in background
pub fn spawn<F: Future<Output = ()> + Send + 'static>(future: F) -> Result<(), Error> {
enqueue(Task::new(future))
}
/// Runs a [Future] to its completion on the current thread
pub fn run_to_completion<'a, T, F: Future<Output = T> + Send + 'a>(future: F) -> Result<T, Error> {
let thread = Thread::current();
let mut future = Box::pin(future);

View File

@ -1,3 +1,4 @@
/// Runs a [futures_util::Future] to its completion
#[macro_export]
macro_rules! block {
($($stmt:tt)*) => {
@ -7,6 +8,7 @@ macro_rules! block {
};
}
/// Runs two Futures, returning the result of whichever finishes first
#[macro_export]
macro_rules! any {
($fut0:ident = $pat0:pat => $body0:expr, $fut1:ident = $pat1:pat => $body1:expr) => {

View File

@ -1,3 +1,5 @@
//! Async/await runtime implementation
use core::{
pin::Pin,
task::{Context, Poll},
@ -18,6 +20,7 @@ pub use executor::{run_to_completion, spawn, spawn_async_worker};
pub use task_queue::init_task_queue;
pub use waker::QueueWaker;
/// [Future] implementation that wraps a poll-function
pub struct PollFn<F> {
f: F,
}
@ -34,6 +37,7 @@ where
}
}
/// Constructs a [PollFn] from given poll-function
pub fn poll_fn<T, F>(f: F) -> PollFn<F>
where
F: FnMut(&mut Context) -> Poll<T>,
@ -41,8 +45,9 @@ where
PollFn { f }
}
pub static SLEEP_WAKER: QueueWaker = QueueWaker::new();
static SLEEP_WAKER: QueueWaker = QueueWaker::new();
/// Suspends the task until given duration passes
pub fn sleep(duration: Duration) -> impl Future<Output = ()> {
struct SleepFuture {
deadline: Duration,
@ -75,6 +80,7 @@ pub fn sleep(duration: Duration) -> impl Future<Output = ()> {
SleepFuture { deadline }
}
/// Updates the runtime's time
pub fn tick(_now: Duration) {
SLEEP_WAKER.wake_all();
}

View File

@ -60,6 +60,7 @@ impl TaskQueue {
}
}
/// Initializes the global async/await task queue
pub fn init_task_queue() {
TASK_QUEUE.init(TaskQueue::new(128));
}

View File

@ -3,17 +3,20 @@ use core::task::Waker;
use alloc::collections::VecDeque;
use kernel_util::sync::IrqSafeSpinlock;
/// Async/await primitive to suspend and wake up tasks waiting on some shared resource
pub struct QueueWaker {
queue: IrqSafeSpinlock<VecDeque<Waker>>,
}
impl QueueWaker {
/// Constructs an empty [QueueWaker]
pub const fn new() -> Self {
Self {
queue: IrqSafeSpinlock::new(VecDeque::new()),
}
}
/// Registers a [Waker] reference to be waken up by this [QueueWaker]
pub fn register(&self, waker: &Waker) {
let mut queue = self.queue.lock();
@ -24,6 +27,7 @@ impl QueueWaker {
queue.push_back(waker.clone());
}
/// Removes a [Waker] reference from this [QueueWaker]
pub fn remove(&self, waker: &Waker) -> bool {
let mut queue = self.queue.lock();
let mut index = 0;
@ -40,6 +44,7 @@ impl QueueWaker {
removed
}
/// Wakes up up to `limit` tasks waiting on this queue
pub fn wake_some(&self, limit: usize) -> usize {
let mut queue = self.queue.lock();
let mut count = 0;
@ -52,10 +57,12 @@ impl QueueWaker {
count
}
/// Wakes up a single task waiting on this queue
pub fn wake_one(&self) -> bool {
self.wake_some(1) != 0
}
/// Wakes up all tasks waiting on this queue
pub fn wake_all(&self) -> usize {
self.wake_some(usize::MAX)
}

View File

@ -29,17 +29,6 @@ pub struct CpuQueueStats {
measure_time: u64,
}
// /// Per-CPU queue's inner data, normally resides under a lock
// pub struct CpuQueueInner {
// /// Current process, None if idling
// pub current: Option<Arc<Process>>,
// /// LIFO queue for processes waiting for execution
// pub queue: VecDeque<Arc<Process>>,
//
// /// CPU time usage statistics
// pub stats: CpuQueueStats,
// }
struct CpuQueueInner {
current: Option<ThreadId>,
queue: VecDeque<ThreadId>,
@ -117,7 +106,7 @@ impl CpuQueueInner {
}
impl CpuQueue {
// /// Constructs an empty queue with its own idle task
/// Constructs an empty queue with its own idle task
pub fn new(index: usize) -> Self {
let idle = TaskContext::kernel(__idle, Cpu::local_id() as usize)
.expect("Could not construct an idle task");
@ -246,40 +235,7 @@ impl CpuQueue {
self.inner.lock().queue.is_empty()
}
// /// Returns a safe reference to the inner data structure.
// pub fn lock(&self) -> IrqSafeSpinlockGuard<CpuQueueInner> {
// self.inner.lock()
// }
// /// Returns an unsafe reference to the queue.
// ///
// /// # Safety
// ///
// /// Only meant to be called to dump the queue contents when panicking.
// #[allow(clippy::mut_from_ref)]
// pub unsafe fn grab(&self) -> &mut CpuQueueInner {
// self.inner.grab()
// }
//
// /// Returns the process currently being executed.
// ///
// /// # Note
// ///
// /// This function should be safe in all kernel thread/interrupt contexts:
// ///
// /// * (in kthread) the code calling this will still remain on the same thread.
// /// * (in irq) the code cannot be interrupted and other CPUs shouldn't change this queue, so it
// /// will remain valid until the end of the interrupt or until [CpuQueue::yield_cpu]
// /// is called.
// pub fn current_process(&self) -> Option<CurrentProcess> {
// let guard = IrqGuard::acquire();
// self.inner
// .lock()
// .current
// .clone()
// .map(|p| unsafe { CurrentProcess::new(p, guard) })
// }
/// Returns the current [ThreadId], or [None] if idle
pub fn current_id(&self) -> Option<ThreadId> {
self.inner.lock().current
}
@ -302,6 +258,7 @@ impl CpuQueue {
queues.iter().enumerate().min_by_key(|(_, q)| q.len())
}
/// Returns the CPU index of the queue
pub fn index(&self) -> usize {
self.index
}

View File

@ -1,3 +1,5 @@
//! Higher-order task synchronization primitives
use core::{
pin::Pin,
sync::atomic::{AtomicU32, Ordering},
@ -9,12 +11,14 @@ use futures_util::Future;
use super::runtime::QueueWaker;
/// User-space mutex (like BSD/Linux's futex) data structure
pub struct UserspaceMutex {
queue: QueueWaker,
address: usize,
}
impl UserspaceMutex {
/// Creates a new [UserspaceMutex] associated with given `address`
pub fn new(address: usize) -> Self {
Self {
queue: QueueWaker::new(),
@ -22,6 +26,7 @@ impl UserspaceMutex {
}
}
/// Blocks until the value at the mutex's address becomes different from `compare_value`
pub fn wait(self: Arc<Self>, compare_value: u32) -> impl Future<Output = ()> {
struct WaitFuture {
mutex: Arc<UserspaceMutex>,
@ -57,10 +62,12 @@ impl UserspaceMutex {
}
}
/// Wakes up a single task waiting on the mutex
pub fn wake(&self) {
self.queue.wake_one();
}
/// Wakes up all tasks waiting on the mutex
pub fn wake_all(&self) {
self.queue.wake_all();
}

View File

@ -1,3 +1,5 @@
//! Thread data structures and management
use core::{
fmt,
mem::size_of,
@ -44,12 +46,16 @@ pub enum ThreadState {
Terminated,
}
/// Unique number describing a single kernel or userspace thread
#[derive(Debug, PartialEq, Eq, Clone, Copy, Ord, PartialOrd)]
pub enum ThreadId {
/// Describes a kernel-space thread
Kernel(u64),
/// Describes an user-space thread
User(u64),
}
/// Wrapper which guarantees the thread referred to is the current one on the current CPU
pub struct CurrentThread(Arc<Thread>, IrqGuard);
struct SignalEntry {
@ -64,6 +70,7 @@ struct ThreadInner {
signal_stack: VecDeque<Signal>,
}
/// Describes a single thread within the system
pub struct Thread {
context: TaskContext,
name: Option<String>,
@ -83,10 +90,6 @@ static THREADS: IrqSafeSpinlock<BTreeMap<ThreadId, Arc<Thread>>> =
IrqSafeSpinlock::new(BTreeMap::new());
impl Thread {
// pub fn new(index: usize, parent: &Process, context: TaskContext) -> Self {
// todo!()
// }
fn new(
id: ThreadId,
name: Option<String>,
@ -120,6 +123,7 @@ impl Thread {
thread
}
/// Constructs a new user-space thread
pub fn new_uthread(
parent: Arc<Process>,
space: Arc<ProcessAddressSpace>,
@ -134,6 +138,7 @@ impl Thread {
)
}
/// Constructs a new kernel-space thread
pub fn new_kthread<S: Into<String>>(name: S, context: TaskContext) -> Arc<Self> {
Self::new(
ThreadId::next_kernel(),
@ -145,31 +150,53 @@ impl Thread {
}
// Info
/// Returns the thread's ID
pub fn id(&self) -> ThreadId {
self.id
}
/// Returns the thread's name, if set
pub fn name(&self) -> Option<&String> {
self.name.as_ref()
}
/// Returns the thread's [TaskContext]
pub fn context(&self) -> &TaskContext {
&self.context
}
/// Returns the thread's [ProcessAddressSpace] reference.
///
/// # Panics
///
/// Will panic if the thread has no associated address space.
pub fn address_space(&self) -> &Arc<ProcessAddressSpace> {
self.space.as_ref().unwrap()
}
/// Returns the thread's parent [Process] reference.
///
/// # Panics
///
/// Will panic if the thread has no process associated (i.e. it's a kernel thread).
pub fn process(&self) -> &Arc<Process> {
self.process.as_ref().unwrap()
}
// Queue operation
/// Returns the current thread on the CPU.
///
/// # Panics
///
/// Will panic if no current thread is present. For try-style getter, see
/// [Thread::get_current].
pub fn current() -> CurrentThread {
Self::get_current().unwrap()
}
/// Returns the current thread on the CPU, if any is present
pub fn get_current() -> Option<CurrentThread> {
let guard = IrqGuard::acquire();
Cpu::local()
@ -179,6 +206,8 @@ impl Thread {
.map(|t| CurrentThread(t, guard))
}
/// Enqueues the thread onto any (usually the least loaded) CPU queue and returns its index.
/// See [Thread::enqueue_to].
pub fn enqueue_somewhere(&self) -> usize {
// Doesn't have to be precise, so even if something changes, we can still be rebalanced
// to another CPU
@ -189,6 +218,11 @@ impl Thread {
index
}
/// Enqueues the thread onto the specific CPU's queue.
///
/// # Panics
///
/// Will panic if the process is in some weird state while being queued.
pub fn enqueue_to(&self, queue: &'static CpuQueue) {
let _irq = IrqGuard::acquire();
@ -261,7 +295,7 @@ impl Thread {
}
}
/// Marks the process as running and sets its "current" CPU index.
/// Marks the thread as running and sets its "current" CPU index.
///
/// # Safety
///
@ -271,21 +305,31 @@ impl Thread {
self.state.store(ThreadState::Running, Ordering::Release);
}
/// Suspends the thread and removes it from the queue until it is "waken up" again
pub fn suspend(&self) {
self.dequeue(ThreadState::Suspended);
}
// Accounting
/// Returns the thread with given [ThreadId], if it exists
pub fn get(id: ThreadId) -> Option<Arc<Thread>> {
THREADS.lock().get(&id).cloned()
}
// Thread inner
/// Changes the thread's signal entry point information
pub fn set_signal_entry(&self, entry: usize, stack: usize) {
let mut inner = self.inner.lock();
inner.signal_entry.replace(SignalEntry { entry, stack });
}
/// Pushes a [Signal] onto the thread's signal stack.
///
/// When executed on a current thread, the signal is guaranteed to be handled exactly before
/// returning to user context (i.e. from syscall). Otherwise, the signal handling order and
/// whether it will be delivered at all is not guaranteed.
pub fn raise_signal(self: &Arc<Self>, signal: Signal) {
self.inner.lock().signal_stack.push_back(signal);
@ -294,6 +338,7 @@ impl Thread {
}
}
/// Requests thread termination and blocks until said thread finishes fully:
pub fn terminate(self: &Arc<Self>) -> impl Future<Output = ()> {
struct F(Arc<Thread>);
@ -365,6 +410,8 @@ impl CurrentThread {
self.dequeue_terminate(code)
}
/// Terminate the parent process of the thread, including all other threads and the current
/// thread itself
pub fn exit_process(&self, code: ExitCode) {
let _guard = IrqGuard::acquire();
@ -384,6 +431,7 @@ impl CurrentThread {
unreachable!();
}
/// Suspends the current thread until it is waken up again. Guaranteed to happen immediately.
pub fn suspend(&self) -> Result<(), Error> {
self.dequeue(ThreadState::Suspended);
@ -457,18 +505,25 @@ impl Deref for CurrentThread {
}
impl ThreadId {
/// Generates a new kernel-space thread ID
pub fn next_kernel() -> Self {
static COUNT: AtomicU64 = AtomicU64::new(1);
let id = COUNT.fetch_add(1, Ordering::SeqCst);
Self::Kernel(id)
}
/// Generates a new user-space thread ID
pub fn next_user() -> Self {
static COUNT: AtomicU64 = AtomicU64::new(1);
let id = COUNT.fetch_add(1, Ordering::SeqCst);
Self::User(id)
}
/// Returns the number of the userspace thread represented by this ID.
///
/// # Panics
///
/// Will panic if this [ThreadId] does not represent a user-space thread.
pub fn as_user(&self) -> u64 {
match self {
Self::Kernel(_) => panic!(),
@ -476,6 +531,7 @@ impl ThreadId {
}
}
/// Returns `true` if the [ThreadId] represents a user-space thread
pub fn is_user(&self) -> bool {
matches!(self, ThreadId::User(_))
}

View File

@ -3,7 +3,9 @@
pub mod queue;
pub mod ring;
/// Extension trait for [Iterator]s of [Result]s
pub trait ResultIterator<T, E> {
/// Drops entries from the iterator until the first error
fn collect_error(self) -> Option<E>;
}
@ -18,6 +20,7 @@ impl<T, E, I: Iterator<Item = Result<T, E>>> ResultIterator<T, E> for I {
}
}
/// Returns the architecture name string
pub const fn arch_str() -> &'static str {
#[cfg(target_arch = "aarch64")]
{
@ -28,12 +31,3 @@ pub const fn arch_str() -> &'static str {
"x86_64"
}
}
// /// Performs a busy-loop sleep until the specified duration has passed
// pub fn polling_sleep(duration: Duration) -> Result<(), Error> {
// // TODO no non-IRQ mode timestamp provider
// for i in 0..1000000 {
// core::hint::spin_loop();
// }
// Ok(())
// }

View File

@ -1,3 +1,5 @@
//! Asynchronous array queue implementation
use core::{
pin::Pin,
task::{Context, Poll},
@ -9,12 +11,14 @@ use futures_util::Future;
use crate::task::runtime::QueueWaker;
/// Asynchronous queue
pub struct AsyncQueue<T> {
waker: Arc<QueueWaker>,
queue: Arc<ArrayQueue<T>>,
}
impl<T> AsyncQueue<T> {
/// Constructs a new [AsyncQueue] of requested capacity
pub fn new(capacity: usize) -> Self {
Self {
waker: Arc::new(QueueWaker::new()),
@ -22,6 +26,7 @@ impl<T> AsyncQueue<T> {
}
}
/// Pushes an entry to the queue and signals a single task about the available value
pub fn send(&self, value: T) -> Result<(), T> {
let result = self.queue.push(value);
if result.is_ok() {
@ -30,6 +35,7 @@ impl<T> AsyncQueue<T> {
result
}
/// Asynchronously receives an entry from the queue
pub fn recv(&self) -> impl Future<Output = T> {
struct AsyncQueueRecvFuture<T> {
waker: Arc<QueueWaker>,

View File

@ -1,3 +1,5 @@
//! Ring buffer implementation
use core::{
pin::Pin,
task::{Context, Poll},
@ -10,18 +12,21 @@ use kernel_util::sync::IrqSafeSpinlock;
use crate::task::runtime::QueueWaker;
/// Ring buffer base
pub struct RingBuffer<T, const N: usize> {
rd: usize,
wr: usize,
data: [T; N],
}
/// Ring buffer with async read support
pub struct AsyncRing<T, const N: usize> {
inner: Arc<IrqSafeSpinlock<RingBuffer<T, N>>>,
read_waker: Arc<QueueWaker>,
}
impl<T: Copy, const N: usize> RingBuffer<T, N> {
/// Constructs a new [RingBuffer] and fills it with `value`
pub const fn new(value: T) -> Self {
Self {
rd: 0,
@ -44,12 +49,13 @@ impl<T: Copy, const N: usize> RingBuffer<T, N> {
}
#[inline]
fn read_single(&mut self) -> T {
fn read_single_unchecked(&mut self) -> T {
let res = self.data[self.rd];
self.rd = (self.rd + 1) % N;
res
}
/// Reads all entries available from `pos` to the write head
pub fn read_all_static(&mut self, pos: usize, buffer: &mut [T]) -> usize {
let mut pos = (self.rd + pos) % N;
let mut off = 0;
@ -61,6 +67,7 @@ impl<T: Copy, const N: usize> RingBuffer<T, N> {
off
}
/// Writes a single entry to the buffer
#[inline]
pub fn write(&mut self, ch: T) {
self.data[self.wr] = ch;
@ -69,6 +76,7 @@ impl<T: Copy, const N: usize> RingBuffer<T, N> {
}
impl<T: Copy, const N: usize> AsyncRing<T, N> {
/// Constructs a new [AsyncRing] and fills it with `value`
pub fn new(value: T) -> Self {
Self {
inner: Arc::new(IrqSafeSpinlock::new(RingBuffer::new(value))),
@ -76,6 +84,7 @@ impl<T: Copy, const N: usize> AsyncRing<T, N> {
}
}
/// Writes a single entry to the buffer and signals readers
pub fn try_write(&self, item: T) -> Result<(), Error> {
{
let mut lock = self.inner.lock();
@ -86,6 +95,7 @@ impl<T: Copy, const N: usize> AsyncRing<T, N> {
Ok(())
}
/// Asynchronously reads an entry from the buffer
pub fn read(&self) -> impl Future<Output = T> {
struct ReadFuture<T: Copy, const N: usize> {
inner: Arc<IrqSafeSpinlock<RingBuffer<T, N>>>,
@ -101,7 +111,7 @@ impl<T: Copy, const N: usize> AsyncRing<T, N> {
let mut inner = self.inner.lock();
if inner.is_readable() {
self.read_waker.remove(cx.waker());
Poll::Ready(inner.read_single())
Poll::Ready(inner.read_single_unchecked())
} else {
Poll::Pending
}