diff --git a/kernel/libk/src/task/futex.rs b/kernel/libk/src/task/futex.rs index df947ea4..98cf63de 100644 --- a/kernel/libk/src/task/futex.rs +++ b/kernel/libk/src/task/futex.rs @@ -34,7 +34,7 @@ impl UserspaceMutex { .atomic_load_foreign(&self.space, Ordering::Acquire) } - async fn wait_predicate bool>(&self, predicate: P) -> Result<(), Error> { + pub async fn wait_until bool>(&self, predicate: P) -> Result<(), Error> { poll_fn(|cx| { let result = self.load(); match result { @@ -55,14 +55,14 @@ impl UserspaceMutex { .await } - pub async fn wait_until(self: Arc, compare_value: u32) -> Result<(), Error> { - self.wait_predicate(|value| value == compare_value).await - } + // pub async fn wait_until(self: Arc, compare_value: u32) -> Result<(), Error> { + // self.wait_predicate(|value| value == compare_value).await + // } - /// Blocks until the value at the mutex's address becomes different from `compare_value` - pub async fn wait(self: Arc, compare_value: u32) -> Result<(), Error> { - self.wait_predicate(|value| value != compare_value).await - } + // /// Blocks until the value at the mutex's address becomes different from `compare_value` + // pub async fn wait(self: Arc, compare_value: u32) -> Result<(), Error> { + // self.wait_predicate(|value| value != compare_value).await + // } /// Wakes up a single task waiting on the mutex pub fn wake(&self) { diff --git a/kernel/src/syscall/imp/sys_process.rs b/kernel/src/syscall/imp/sys_process.rs index 0d605286..328ef28c 100644 --- a/kernel/src/syscall/imp/sys_process.rs +++ b/kernel/src/syscall/imp/sys_process.rs @@ -240,8 +240,18 @@ pub(crate) fn mutex(mutex: &AtomicU32, op: &MutexOperation) -> Result<(), Error> let mutex = process.get_or_insert_mutex((mutex as *const AtomicU32).addr())?; match op { - &MutexOperation::Wait(value, _timeout) => block! { mutex.wait(value).await }?, - &MutexOperation::WaitUntil(value, _timeout) => block! { mutex.wait_until(value).await }?, + &MutexOperation::WaitWhileEqual(value, _timeout) => { + block! { mutex.wait_until(|v| v != value).await }? + } + &MutexOperation::WaitUntilEqual(value, _timeout) => { + block! { mutex.wait_until(|v| v == value).await }? + } + &MutexOperation::WaitWhileSet(mask, _timeout) => { + block! { mutex.wait_until(|v| v & mask != v).await }? + } + &MutexOperation::WaitUntilSet(mask, _timeout) => { + block! { mutex.wait_until(|v| v & mask == v).await }? + } MutexOperation::Wake => { mutex.wake(); Ok(()) diff --git a/lib/abi/src/process/mod.rs b/lib/abi/src/process/mod.rs index 1bcb4669..c1d18159 100644 --- a/lib/abi/src/process/mod.rs +++ b/lib/abi/src/process/mod.rs @@ -53,9 +53,13 @@ pub enum SpawnOption { #[repr(C)] pub enum MutexOperation { /// Waits on the mutex object until it is different from "compare value" - Wait(u32, Option), + WaitWhileEqual(u32, Option), /// Waits on the mutex object until it becomes equal to the "compare value" - WaitUntil(u32, Option), + WaitUntilEqual(u32, Option), + /// Waits on the mutex object until `mutex & A == 0` + WaitWhileSet(u32, Option), + /// Waits on the mutex object until `mutex & A != 0` + WaitUntilSet(u32, Option), /// Wakes a single mutex-waiting thread Wake, /// Wakes all threads waiting on the mutex diff --git a/lib/libyalloc/src/allocator.rs b/lib/libyalloc/src/allocator.rs index b33d195d..8b74e52c 100644 --- a/lib/libyalloc/src/allocator.rs +++ b/lib/libyalloc/src/allocator.rs @@ -149,6 +149,8 @@ impl BucketAllocator

{ } } +unsafe impl Sync for BucketAllocator

{} + #[cfg(test)] mod tests { use core::{alloc::Layout, ptr::NonNull}; diff --git a/lib/runtime/src/lib.rs b/lib/runtime/src/lib.rs index 362fe94d..f5db9cbf 100644 --- a/lib/runtime/src/lib.rs +++ b/lib/runtime/src/lib.rs @@ -24,6 +24,7 @@ pub mod debug; pub mod io; pub mod net; pub mod process; +pub mod sync; pub mod sys; pub mod time; diff --git a/lib/runtime/src/process/thread.rs b/lib/runtime/src/process/thread.rs index 6c69af02..d844e529 100644 --- a/lib/runtime/src/process/thread.rs +++ b/lib/runtime/src/process/thread.rs @@ -1,8 +1,6 @@ //! Runtime utilities for thread handling use core::{ - cell::UnsafeCell, - mem::MaybeUninit, ptr, sync::atomic::{AtomicU32, AtomicUsize, Ordering}, }; @@ -14,7 +12,7 @@ use abi::{ }; use alloc::{boxed::Box, sync::Arc}; -use crate::process::thread_local; +use crate::{process::thread_local, sync::rwlock::RwLock}; use super::{signal, thread_local::TlsImage}; @@ -23,8 +21,7 @@ use super::{signal, thread_local::TlsImage}; /// `R` generic parameter denotes the thread's return type. pub struct Thread { id: AtomicU32, - // TODO mutex - result: UnsafeCell>, + result: RwLock>, } /// Describes a "handle" to some runtime thread. This is what gets returned to the caller @@ -106,7 +103,7 @@ impl Thread { let thread = Arc::new(Thread:: { id: AtomicU32::new(0), - result: UnsafeCell::new(None), + result: RwLock::new(None), }); let thread_argument = Box::into_raw(Box::new(ThreadArgument { @@ -199,10 +196,7 @@ impl Thread { None => unreachable!(), }; - let result_ptr = argument.thread_self.result.get(); - unsafe { - (*result_ptr).insert(result); - } + argument.thread_self.result.write().insert(result); } unsafe { crate::sys::exit_thread() }; @@ -218,10 +212,8 @@ impl ThreadHandle { /// Waits for the thread to finish and returns its result. pub fn join(self) -> Result, Error> { // TODO prevent threads from attempting to join themselves? - unsafe { - crate::sys::wait_thread(self.thread.id.load(Ordering::Acquire))?; - Ok(self.into_result()) - } + unsafe { crate::sys::wait_thread(self.thread.id.load(Ordering::Acquire)) }?; + Ok(self.into_result()) } /// Waits for the thread to finish, ignoring interrupts. @@ -236,12 +228,12 @@ impl ThreadHandle { Err(Error::Interrupted) => continue, Err(error) => panic!("wait_thread syscall returned error: {error:?}"), } - return unsafe { self.into_result() }; + return self.into_result(); } } - unsafe fn into_result(self) -> Option { - (*self.thread.result.get()).take() + fn into_result(self) -> Option { + self.thread.result.write().take() } } @@ -267,6 +259,9 @@ impl Drop for OwnedStack { } } +unsafe impl Send for Thread {} +unsafe impl Sync for Thread {} + /// Sets the `SELF` TLS variable /// /// # Safety @@ -276,7 +271,7 @@ impl Drop for OwnedStack { pub unsafe fn track_main() { let main = Arc::new(Thread:: { id: AtomicU32::new(0), - result: UnsafeCell::new(None), + result: RwLock::new(None), }); SELF = Arc::into_raw(main).addr(); } diff --git a/lib/runtime/src/process/thread_local/mod.rs b/lib/runtime/src/process/thread_local/mod.rs index 3f755b02..af87fdab 100644 --- a/lib/runtime/src/process/thread_local/mod.rs +++ b/lib/runtime/src/process/thread_local/mod.rs @@ -121,6 +121,8 @@ impl TlsImage { } } +unsafe impl Sync for TlsImage {} + impl Dtv { fn new() -> Self { Self { diff --git a/lib/runtime/src/sync/mod.rs b/lib/runtime/src/sync/mod.rs new file mode 100644 index 00000000..98ebcfd9 --- /dev/null +++ b/lib/runtime/src/sync/mod.rs @@ -0,0 +1,5 @@ +//! Synchronization mechanism implementations + +pub mod mutex; +pub mod once; +pub mod rwlock; diff --git a/lib/runtime/src/sync/mutex.rs b/lib/runtime/src/sync/mutex.rs new file mode 100644 index 00000000..1ce32597 --- /dev/null +++ b/lib/runtime/src/sync/mutex.rs @@ -0,0 +1,176 @@ +//! Mutual exclusion primitive implementation + +use core::{ + cell::UnsafeCell, + marker::PhantomData, + ops::{Deref, DerefMut}, + sync::atomic::{AtomicU32, Ordering}, + time::Duration, +}; + +use abi::{error::Error, process::MutexOperation}; + +/// Raw implementation of the mutual exclusion lock. +pub struct RawMutex { + state: AtomicU32, +} + +/// Guards a value of type `T` by a mutual exclusion lock. +pub struct Mutex { + value: UnsafeCell, + lock: RawMutex, +} + +/// Represents a method to access a [Mutex], currently locked by this structure's owner. +pub struct MutexGuard<'a, T> { + mutex: &'a Mutex, + _pd: PhantomData<&'a mut T>, +} + +impl RawMutex { + const UNLOCKED: u32 = 0; + const LOCKED: u32 = 1; + + /// Constructs a new unlocked mutex. + pub const fn new() -> Self { + Self { + state: AtomicU32::new(Self::UNLOCKED), + } + } + + /// Attempts to gain a lock on a mutex immediately. + /// + /// Returns `true` if mutex can be locked instantly. + pub fn try_lock(&self) -> bool { + self.state + .compare_exchange( + Self::UNLOCKED, + Self::LOCKED, + Ordering::Acquire, + Ordering::Relaxed, + ) + .is_ok() + } + + /// Gains a lock on a mutex. Blocks if mutex is currently locked by someone else. + pub fn lock(&self) { + loop { + if self.try_lock() { + return; + } + + self.wait(None); + } + } + + /// Gains a lock on a mutex. Block if mutex is currently locked by someone else, but waits + /// for the mutex to become available no more than `timeout`. + /// + /// Returns [Error::TimedOut] if the function could not gain a lock within the requested time. + pub fn lock_timeout(&self, timeout: Duration) -> Result<(), Error> { + if self.try_lock() { + return Ok(()); + } + + self.wait(Some(timeout)); + + if self.try_lock() { + Ok(()) + } else { + Err(Error::TimedOut) + } + } + + /// Releases the mutex lock. + /// + /// # Safety + /// + /// The caller must ensure the lock is owned/held by themselves. + pub unsafe fn release(&self) { + if self.state.swap(Self::UNLOCKED, Ordering::Release) == Self::LOCKED { + self.notify_one(); + } + } + + fn wait(&self, duration: Option) { + unsafe { + crate::sys::mutex( + &self.state, + &MutexOperation::WaitWhileEqual(Self::LOCKED, duration), + ) + } + .expect("call to mutex wait failed"); + } + + fn notify_one(&self) { + unsafe { crate::sys::mutex(&self.state, &MutexOperation::Wake).ok() }; + } +} + +impl Mutex { + /// Constructs a new [Mutex], wrapping the given `value`. + pub const fn new(value: T) -> Self { + Self { + value: UnsafeCell::new(value), + lock: RawMutex::new(), + } + } + + /// Attempts to immediately gain a lock on a mutex. + /// + /// Returns [Some] if lock can be acquired instantly, [None] if someone else is already holding + /// the lock at the moment. + pub fn try_lock(&self) -> Option> { + if self.lock.try_lock() { + Some(MutexGuard { + mutex: self, + _pd: PhantomData, + }) + } else { + None + } + } + + /// Gains a lock on a mutex, returning a [MutexGuard]. + pub fn lock(&self) -> MutexGuard<'_, T> { + self.lock.lock(); + MutexGuard { + mutex: self, + _pd: PhantomData, + } + } + + /// Same at [Mutex::lock], except blocking for no more than `timeout` specified. + pub fn lock_timeout(&self, timeout: Duration) -> Result, Error> { + self.lock.lock_timeout(timeout)?; + Ok(MutexGuard { + mutex: self, + _pd: PhantomData, + }) + } +} + +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +impl Deref for MutexGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + unsafe { &*self.mutex.value.get() } + } +} + +impl DerefMut for MutexGuard<'_, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { &mut *self.mutex.value.get() } + } +} + +impl Drop for MutexGuard<'_, T> { + fn drop(&mut self) { + unsafe { self.mutex.lock.release() }; + } +} + +unsafe impl Sync for MutexGuard<'_, T> {} diff --git a/lib/runtime/src/sync/once.rs b/lib/runtime/src/sync/once.rs new file mode 100644 index 00000000..4ad876f5 --- /dev/null +++ b/lib/runtime/src/sync/once.rs @@ -0,0 +1,124 @@ +//! Runtime implementations for synchronized mutable-only-once values. +use core::{ + cell::UnsafeCell, + mem::MaybeUninit, + sync::atomic::{AtomicU32, Ordering}, +}; + +use abi::process::MutexOperation; + +/// Describes a value which can only be initialized once at runtime and provides only immutable +/// access to `T`. +pub struct Once { + value: UnsafeCell>, + state: AtomicU32, +} + +impl Once { + const UNINIT: u32 = 0; + const PENDING: u32 = 1; + const INIT: u32 = 2; + + /// Constructs an uninitialized [Once]. + pub const fn uninit() -> Self { + Self { + value: UnsafeCell::new(MaybeUninit::uninit()), + state: AtomicU32::new(Self::UNINIT), + } + } + + /// Attempts to initialize this [Once], if not already. + /// + /// Returns `true` if the value was initialized from the `producer` function, `false` if + /// the [Once] is already initialized. + pub fn try_init_with T>(&self, producer: F) -> bool { + // Try state -> PENDING + if self + .state + .compare_exchange( + Self::UNINIT, + Self::PENDING, + Ordering::Acquire, + Ordering::Relaxed, + ) + .is_err() + { + return false; + } + + // Write the value while state == PENDING + unsafe { (*self.value.get()).write(producer()) }; + + // state -> INIT + self.state.store(Self::INIT, Ordering::Release); + + // Notify anyone waiting + self.notify(); + + true + } + + /// Initializes this [Once] with the `producer` given. + /// + /// # Panics + /// + /// Panics if the value has already been initialized. + #[track_caller] + pub fn init_with T>(&self, producer: F) { + if !self.try_init_with(producer) { + panic!("Once has already been initialized"); + } + } + + /// Waits until this [Once] becomes initialized and returns a reference to the value. + pub fn wait(&self) -> &'static T { + loop { + if let Some(value) = self.try_get() { + return value; + } + + self.wait_initialized(); + } + } + + /// Returns a reference to the value, if it was initialized previously, or [None] if + /// the value is uninitialized at the moment of this call. + pub fn try_get(&self) -> Option<&'static T> { + if self.state.load(Ordering::Acquire) == Self::INIT { + Some(unsafe { (*self.value.get()).assume_init_ref() }) + } else { + None + } + } + + /// Returns a reference to the value. + /// + /// # Panics + /// + /// Panics if the value has not yet been initialized. + #[track_caller] + pub fn get(&self) -> &'static T { + match self.try_get() { + Some(value) => value, + None => { + panic!("Attempt to use an uninitialized Once") + } + } + } + + fn notify(&self) { + unsafe { crate::sys::mutex(&self.state, &MutexOperation::WakeAll) }.ok(); + } + + fn wait_initialized(&self) { + unsafe { + crate::sys::mutex( + &self.state, + &MutexOperation::WaitUntilEqual(Self::INIT, None), + ) + } + .expect("Kernel mutex wait failed"); + } +} + +unsafe impl Sync for Once {} diff --git a/lib/runtime/src/sync/rwlock.rs b/lib/runtime/src/sync/rwlock.rs new file mode 100644 index 00000000..00e684d0 --- /dev/null +++ b/lib/runtime/src/sync/rwlock.rs @@ -0,0 +1,181 @@ +//! Read-write lock mechanism implementation +use core::{ + cell::UnsafeCell, + marker::PhantomData, + ops::{Deref, DerefMut}, + sync::atomic::{AtomicU32, Ordering}, +}; + +use abi::process::MutexOperation; + +/// Defines a read-write lock primitive. +pub struct RwLock { + value: UnsafeCell, + state: AtomicU32, +} + +/// Represents a read-only access mechanism to a currently held [RwLock]. +pub struct RwLockReadGuard<'a, T> { + lock: &'a RwLock, + _pd: PhantomData<&'a T>, +} + +/// Represents a read-write access mechanism to a currently held [RwLock]. +pub struct RwLockWriteGuard<'a, T> { + lock: &'a RwLock, + _pd: PhantomData<&'a mut T>, +} + +impl RwLock { + const LOCKED_WRITE: u32 = 1 << 0; + const LOCKED_READ: u32 = 1 << 2; + + /// Constructs a new [RwLock], wrapping the `value`. + pub const fn new(value: T) -> Self { + Self { + value: UnsafeCell::new(value), + state: AtomicU32::new(0), + } + } + + /// Tries to acquire a read-only lock on the [RwLock] instantly. + /// + /// # Panics + /// + /// Will panic if there're more than `1 << 30` readers present. + pub fn try_read(&self) -> Option> { + let value = self.state.fetch_add(Self::LOCKED_READ, Ordering::Acquire); + // Prevent overflows + if value >= (u32::MAX - Self::LOCKED_READ * 2) { + panic!("Too many readers holding a RwLock!"); + } + if value & Self::LOCKED_WRITE == 0 { + // Can't acquire + Some(RwLockReadGuard { + lock: self, + _pd: PhantomData, + }) + } else { + self.state.fetch_sub(Self::LOCKED_READ, Ordering::Release); + None + } + } + + /// Tries to acquire a read-write lock on the [RwLock] immediately. + pub fn try_write(&self) -> Option> { + if self + .state + .compare_exchange(0, Self::LOCKED_WRITE, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + Some(RwLockWriteGuard { + lock: self, + _pd: PhantomData, + }) + } else { + None + } + } + + /// Acquires a read-only lock on the [RwLock], blocking for the lock to become read-available + /// if necessary. + /// + /// # Panics + /// + /// Will panic if there're more than `1 << 30` readers present. + pub fn read(&self) -> RwLockReadGuard<'_, T> { + loop { + if let Some(guard) = self.try_read() { + return guard; + } + + self.wait_readable(); + } + } + + /// Acquires a read-write lock on the [RwLock], blocking for the lock to become write-available + /// if necessary. + pub fn write(&self) -> RwLockWriteGuard<'_, T> { + loop { + if let Some(guard) = self.try_write() { + return guard; + } + + self.wait_writeable(); + } + } + + /// Releases a read-access lock to this [RwLock]. + /// + /// # Safety + /// + /// Intended to be called from [RwLockReadGuard]'s [Drop] handler. + pub unsafe fn release_read(&self) { + if self.state.fetch_sub(Self::LOCKED_READ, Ordering::Release) == Self::LOCKED_READ { + // Only one lock and only for read access was held, which meant the RwLock just became + // both readable and writeable, notify a writer about that + self.notify_writeable(); + } + } + + /// Releases a read-write-access lock to this [RwLock]. + /// + /// # Safety + /// + /// Intended to be called from [RwLockWriteGuard]'s [Drop] handler. + pub unsafe fn release_write(&self) { + self.state.fetch_and(!Self::LOCKED_WRITE, Ordering::Release); + self.notify_readable(); + } + + fn wait_readable(&self) { + // TODO bitwise wait operations in mutex syscall + core::hint::spin_loop(); + } + + fn wait_writeable(&self) { + unsafe { crate::sys::mutex(&self.state, &MutexOperation::WaitUntilEqual(0, None)) } + .expect("wait_writeable() failed"); + } + + // nop + fn notify_readable(&self) {} + + fn notify_writeable(&self) { + unsafe { crate::sys::mutex(&self.state, &MutexOperation::Wake) }.ok(); + } +} + +impl Deref for RwLockReadGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + unsafe { &*self.lock.value.get() } + } +} + +impl Drop for RwLockReadGuard<'_, T> { + fn drop(&mut self) { + unsafe { self.lock.release_read() }; + } +} + +impl Deref for RwLockWriteGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + unsafe { &*self.lock.value.get() } + } +} + +impl DerefMut for RwLockWriteGuard<'_, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { &mut *self.lock.value.get() } + } +} + +impl Drop for RwLockWriteGuard<'_, T> { + fn drop(&mut self) { + unsafe { self.lock.release_write() }; + } +} diff --git a/userspace/lib/ygglibc/src/allocator.rs b/userspace/lib/ygglibc/src/allocator.rs index 1e4e97ff..1fb380b7 100644 --- a/userspace/lib/ygglibc/src/allocator.rs +++ b/userspace/lib/ygglibc/src/allocator.rs @@ -6,8 +6,7 @@ use core::{ use libyalloc::{allocator::BucketAllocator, sys::PageProvider}; use yggdrasil_rt::{ - mem::{MappingFlags, MappingSource}, - sys as syscall, + mem::{MappingFlags, MappingSource}, sync::mutex::Mutex, sys as syscall }; use crate::{ @@ -16,7 +15,6 @@ use crate::{ errno, string::mem::{memcpy, memset}, }, - sync::Mutex, }; struct Allocator; diff --git a/userspace/lib/ygglibc/src/headers/pthread/attr.rs b/userspace/lib/ygglibc/src/headers/pthread/attr.rs index f8f4f29c..a131a987 100644 --- a/userspace/lib/ygglibc/src/headers/pthread/attr.rs +++ b/userspace/lib/ygglibc/src/headers/pthread/attr.rs @@ -1,80 +1,141 @@ -use core::ffi::{c_int, c_void}; +use core::{ + ffi::{c_int, c_void}, + ptr::{null_mut, NonNull}, +}; -use crate::headers::{sched::__ygg_sched_param_t, sys_types::pthread_attr_t}; +use crate::{ + error::{self, CIntZeroResult, CResult, OptionExt}, + headers::{ + errno, + pthread::{PTHREAD_CREATE_JOINABLE, PTHREAD_INHERIT_SCHED, PTHREAD_SCOPE_PROCESS}, + sched::{__ygg_sched_param_t, SCHED_RR}, + sys_time::timespec, + sys_types::pthread_attr_t, + }, + thread, + util::PointerExt, +}; #[no_mangle] unsafe extern "C" fn pthread_attr_destroy(_attr: *mut pthread_attr_t) -> c_int { - todo!() + 0 } #[no_mangle] unsafe extern "C" fn pthread_attr_getdetachstate( - _attr: *const pthread_attr_t, - _state: *mut c_int, + attr: *const pthread_attr_t, + state: *mut c_int, ) -> c_int { - todo!() + let _attr = attr.ensure(); + if let Some(state) = NonNull::new(state) { + state.write(PTHREAD_CREATE_JOINABLE); + } + 0 } #[no_mangle] unsafe extern "C" fn pthread_attr_getguardsize( - _attr: *const pthread_attr_t, - _size: *mut usize, + attr: *const pthread_attr_t, + size: *mut usize, ) -> c_int { - todo!() + let _attr = attr.ensure(); + if let Some(size) = NonNull::new(size) { + size.write(0); + } + 0 } #[no_mangle] unsafe extern "C" fn pthread_attr_getinheritsched( - _attr: *const pthread_attr_t, - _inherit: *mut c_int, + attr: *const pthread_attr_t, + inherit: *mut c_int, ) -> c_int { - todo!() + let _attr = attr.ensure(); + if let Some(inherit) = NonNull::new(inherit) { + inherit.write(PTHREAD_INHERIT_SCHED); + } + 0 } #[no_mangle] unsafe extern "C" fn pthread_attr_getschedparam( - _attr: *const pthread_attr_t, - _param: *mut __ygg_sched_param_t, + attr: *const pthread_attr_t, + param: *mut __ygg_sched_param_t, ) -> c_int { - todo!() + let _attr = attr.ensure(); + if let Some(param) = param.as_mut() { + param.sched_priority = 0; + param.sched_ss_max_repl = 0; + param.sched_ss_repl_period = timespec::zero(); + param.sched_ss_init_budget = timespec::zero(); + param.sched_ss_low_priority = 0; + } + 0 } #[no_mangle] unsafe extern "C" fn pthread_attr_getschedpolicy( - _attr: *const pthread_attr_t, - _policy: *mut c_int, + attr: *const pthread_attr_t, + policy: *mut c_int, ) -> c_int { - todo!() + let _attr = attr.ensure(); + if let Some(policy) = NonNull::new(policy) { + policy.write(SCHED_RR); + } + 0 } #[no_mangle] unsafe extern "C" fn pthread_attr_getscope( - _attr: *const pthread_attr_t, - _scope: *mut c_int, + attr: *const pthread_attr_t, + scope: *mut c_int, ) -> c_int { - todo!() + let _attr = attr.ensure(); + if let Some(scope) = NonNull::new(scope) { + scope.write(PTHREAD_SCOPE_PROCESS); + } + 0 } #[no_mangle] unsafe extern "C" fn pthread_attr_getstack( - _attr: *const pthread_attr_t, - _stack: *mut *mut c_void, - _size: *mut usize, + attr: *const pthread_attr_t, + stack: *mut *mut c_void, + size: *mut usize, ) -> c_int { - todo!() + let attr = attr.ensure(); + if let Some(stack) = NonNull::new(stack) { + match attr.stack { + Some(ptr) => stack.write(ptr.as_ptr()), + None => stack.write(null_mut()), + } + } + if let Some(size) = NonNull::new(size) { + size.write(attr.stack_size); + } + 0 } #[no_mangle] unsafe extern "C" fn pthread_attr_getstacksize( - _attr: *const pthread_attr_t, - _size: *mut usize, + attr: *const pthread_attr_t, + size: *mut usize, ) -> c_int { - todo!() + let attr = attr.ensure(); + if let Some(size) = NonNull::new(size) { + size.write(attr.stack_size); + } + 0 } #[no_mangle] -unsafe extern "C" fn pthread_attr_init(_attr: *mut pthread_attr_t) -> c_int { - todo!() +unsafe extern "C" fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int { + let attr = attr.ensure_mut(); + *attr = pthread_attr_t { + stack_size: thread::DEFAULT_STACK_SIZE, + stack: None, + }; + 0 } #[no_mangle] @@ -82,12 +143,13 @@ unsafe extern "C" fn pthread_attr_setdetachstate( _attr: *mut pthread_attr_t, _detach: c_int, ) -> c_int { - todo!() + todo!("pthread_attr_setdetachstate()") } #[no_mangle] unsafe extern "C" fn pthread_attr_setguardsize(_attr: *mut pthread_attr_t, _size: usize) -> c_int { - todo!() + yggdrasil_rt::debug_trace!("TODO: pthread_attr_setguardsize()"); + 0 } #[no_mangle] @@ -95,7 +157,8 @@ unsafe extern "C" fn pthread_attr_setinheritsched( _attr: *mut pthread_attr_t, _inherit: c_int, ) -> c_int { - todo!() + yggdrasil_rt::debug_trace!("TODO: pthread_attr_setinheritsched()"); + 0 } #[no_mangle] @@ -103,7 +166,8 @@ unsafe extern "C" fn pthread_attr_setschedparam( _attr: *mut pthread_attr_t, _param: *const __ygg_sched_param_t, ) -> c_int { - todo!() + yggdrasil_rt::debug_trace!("TODO: pthread_attr_setschedparam()"); + 0 } #[no_mangle] @@ -111,24 +175,45 @@ unsafe extern "C" fn pthread_attr_setschedpolicy( _attr: *mut pthread_attr_t, _policy: c_int, ) -> c_int { - todo!() + yggdrasil_rt::debug_trace!("TODO: pthread_attr_setschedpolicy()"); + 0 } #[no_mangle] unsafe extern "C" fn pthread_attr_setscope(_attr: *mut pthread_attr_t, _scope: c_int) -> c_int { - todo!() + yggdrasil_rt::debug_trace!("TODO: pthread_attr_setscope()"); + 0 } #[no_mangle] unsafe extern "C" fn pthread_attr_setstack( - _attr: *mut pthread_attr_t, - _stack: *mut c_void, - _size: usize, -) -> c_int { - todo!() + attr: *mut pthread_attr_t, + stack: *mut c_void, + size: usize, +) -> CIntZeroResult { + let attr = attr.ensure_mut(); + let stack = NonNull::new(stack).e_ok_or(errno::EINVAL)?; + if size >= thread::MIN_STACK_SIZE { + attr.stack = Some(stack); + attr.stack_size = size; + CIntZeroResult::SUCCESS + } else { + error::errno = errno::EINVAL; + CIntZeroResult::ERROR + } } #[no_mangle] -unsafe extern "C" fn pthread_attr_setstacksize(_attr: *mut pthread_attr_t, _size: usize) -> c_int { - todo!() +unsafe extern "C" fn pthread_attr_setstacksize( + attr: *mut pthread_attr_t, + size: usize, +) -> CIntZeroResult { + let attr = attr.ensure_mut(); + if size >= thread::MIN_STACK_SIZE { + attr.stack_size = size; + CIntZeroResult::SUCCESS + } else { + error::errno = errno::EINVAL; + CIntZeroResult::ERROR + } } diff --git a/userspace/lib/ygglibc/src/headers/pthread/barrier.rs b/userspace/lib/ygglibc/src/headers/pthread/barrier.rs index 2a66f15e..f5f564bd 100644 --- a/userspace/lib/ygglibc/src/headers/pthread/barrier.rs +++ b/userspace/lib/ygglibc/src/headers/pthread/barrier.rs @@ -40,7 +40,7 @@ impl pthread_barrier_t { unsafe { yggdrasil_rt::sys::mutex( &self.__state, - &MutexOperation::WaitUntil(self.__limit, None), + &MutexOperation::WaitUntilEqual(self.__limit, None), ) .ok() }; diff --git a/userspace/lib/ygglibc/src/headers/pthread/mod.rs b/userspace/lib/ygglibc/src/headers/pthread/mod.rs index 26e1be4a..eeadadaa 100644 --- a/userspace/lib/ygglibc/src/headers/pthread/mod.rs +++ b/userspace/lib/ygglibc/src/headers/pthread/mod.rs @@ -14,20 +14,34 @@ pub mod tls; // PTHREAD_CANCEL_DEFERRED // PTHREAD_CANCEL_DISABLE // PTHREAD_CANCELED -// PTHREAD_CREATE_DETACHED -// PTHREAD_CREATE_JOINABLE -// PTHREAD_EXPLICIT_SCHED -// PTHREAD_INHERIT_SCHED // PTHREAD_ONCE_INIT -// PTHREAD_SCOPE_PROCESS -// PTHREAD_SCOPE_SYSTEM // +// Thread pub const PTHREAD_PRIO_NONE: c_int = 0; pub const PTHREAD_PRIO_INHERIT: c_int = 1; pub const PTHREAD_PRIO_PROTECT: c_int = 2; +pub const PTHREAD_CREATE_JOINABLE: c_int = 0; +pub const PTHREAD_CREATE_DETACHED: c_int = 1; + pub const PTHREAD_PROCESS_PRIVATE: c_int = 0; pub const PTHREAD_PROCESS_SHARED: c_int = 1; +pub const PTHREAD_INHERIT_SCHED: c_int = 0; +pub const PTHREAD_EXPLICIT_SCHED: c_int = 1; + +pub const PTHREAD_SCOPE_PROCESS: c_int = 0; +pub const PTHREAD_SCOPE_SYSTEM: c_int = 1; + +// Mutex +pub const PTHREAD_MUTEX_NORMAL: c_int = 0; +pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; +pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; + +pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; + +pub const PTHREAD_MUTEX_STALLED: c_int = 0; +pub const PTHREAD_MUTEX_ROBUST: c_int = 1; + // int pthread_once(pthread_once_t *, void (*)(void)); diff --git a/userspace/lib/ygglibc/src/headers/pthread/mutex.rs b/userspace/lib/ygglibc/src/headers/pthread/mutex.rs index f646807a..cc533bdb 100644 --- a/userspace/lib/ygglibc/src/headers/pthread/mutex.rs +++ b/userspace/lib/ygglibc/src/headers/pthread/mutex.rs @@ -6,15 +6,15 @@ use crate::{ error::{self, CIntZeroResult, CResult, EResult}, headers::{ errno, - pthread::PTHREAD_PRIO_NONE, + pthread::{ + PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_STALLED, PTHREAD_PRIO_NONE, PTHREAD_PROCESS_PRIVATE, + }, sys_time::__ygg_timespec_t, sys_types::{pthread_mutex_t, pthread_mutexattr_t}, }, util::PointerExt, }; -use super::PTHREAD_PROCESS_PRIVATE; - impl pthread_mutex_t { const UNLOCKED: u32 = 0; const LOCKED: u32 = 1; @@ -50,7 +50,7 @@ impl pthread_mutex_t { fn wait(&self) { unsafe { - yggdrasil_rt::sys::mutex(&self.__state, &MutexOperation::Wait(Self::LOCKED, None)).ok() + yggdrasil_rt::sys::mutex(&self.__state, &MutexOperation::WaitWhileEqual(Self::LOCKED, None)).ok() }; } @@ -62,15 +62,6 @@ impl pthread_mutex_t { } } -pub const PTHREAD_MUTEX_NORMAL: c_int = 0; -pub const PTHREAD_MUTEX_RECURSIVE: c_int = 1; -pub const PTHREAD_MUTEX_ERRORCHECK: c_int = 2; - -pub const PTHREAD_MUTEX_DEFAULT: c_int = PTHREAD_MUTEX_NORMAL; - -pub const PTHREAD_MUTEX_STALLED: c_int = 0; -pub const PTHREAD_MUTEX_ROBUST: c_int = 1; - #[no_mangle] unsafe extern "C" fn pthread_mutex_consistent(_mutex: *mut pthread_mutex_t) -> c_int { yggdrasil_rt::debug_trace!("TODO: pthread_mutex_consistent()"); diff --git a/userspace/lib/ygglibc/src/headers/sys_time/mod.rs b/userspace/lib/ygglibc/src/headers/sys_time/mod.rs index 75823a4c..b0feeda8 100644 --- a/userspace/lib/ygglibc/src/headers/sys_time/mod.rs +++ b/userspace/lib/ygglibc/src/headers/sys_time/mod.rs @@ -50,6 +50,15 @@ unsafe extern "C" fn utimes(_path: *const c_char, _times: *const timeval) -> c_i todo!() } +impl timespec { + pub const fn zero() -> Self { + Self { + tv_sec: time_t(0), + tv_nsec: 0 + } + } +} + impl From<__ygg_timespec_t> for Duration { fn from(value: __ygg_timespec_t) -> Self { Self::new( diff --git a/userspace/lib/ygglibc/src/headers/sys_types/mod.rs b/userspace/lib/ygglibc/src/headers/sys_types/mod.rs index 26e6776b..b27fa74c 100644 --- a/userspace/lib/ygglibc/src/headers/sys_types/mod.rs +++ b/userspace/lib/ygglibc/src/headers/sys_types/mod.rs @@ -1,4 +1,4 @@ -use core::{ffi::{c_int, c_ulong, c_void}, sync::atomic::AtomicU32}; +use core::{ffi::{c_int, c_ulong, c_void}, ptr::NonNull, sync::atomic::AtomicU32}; #[cfg(any(target_pointer_width = "64", rust_analyzer))] mod bits64; @@ -47,7 +47,7 @@ impl time_t { #[repr(C)] pub struct pthread_attr_t { - pub stack: *mut c_void, + pub stack: Option>, pub stack_size: usize, } diff --git a/userspace/lib/ygglibc/src/io/managed.rs b/userspace/lib/ygglibc/src/io/managed.rs index 478a2ece..35bb9069 100644 --- a/userspace/lib/ygglibc/src/io/managed.rs +++ b/userspace/lib/ygglibc/src/io/managed.rs @@ -12,6 +12,7 @@ use bitflags::bitflags; use yggdrasil_rt::{ io::{FileMode, OpenOptions, RawFd, SeekFrom}, path::Path, + sync::mutex::{Mutex, RawMutex}, }; use crate::{ @@ -20,11 +21,12 @@ use crate::{ errno, stdio::{BUFSIZ, UNGETC_MAX, _IOFBF, _IOLBF, _IONBF}, }, - sync::{Mutex, RawMutex}, }; use super::{ - buffer::{FileWriter, FullBufferedWriter, LineBufferedWriter, ReadBuffer, UnbufferedWriter}, raw::RawFile, AsRawFd, BufRead, FromRawFd, Read, Seek, Write + buffer::{FileWriter, FullBufferedWriter, LineBufferedWriter, ReadBuffer, UnbufferedWriter}, + raw::RawFile, + AsRawFd, BufRead, FromRawFd, Read, Seek, Write, }; macro locked_op($self:expr, $op:expr) {{ @@ -57,7 +59,7 @@ pub enum BufferingMode { #[derive(Debug, PartialEq, Clone, Copy)] pub enum Direction { Read, - Write + Write, } pub enum FileBacking { @@ -83,7 +85,11 @@ pub struct FILE { // man setvbuf(3): // The setvbuf() function may be used only after opening a stream and // before any other operations have been performed on it. - last_operation: Option + last_operation: Option, +} + +struct FileSet { + files: BTreeSet>, } // ManagedFile @@ -208,8 +214,12 @@ impl FILE { let write_buffer = backing.make_ref(); let write_buffer: Box = match buffering { BufferingMode::None => Box::new(UnbufferedWriter::new(write_buffer)), - BufferingMode::Line => Box::new(LineBufferedWriter::with_capacity(write_buffer, BUFSIZ)), - BufferingMode::Full => Box::new(FullBufferedWriter::with_capacity(write_buffer, BUFSIZ)), + BufferingMode::Line => { + Box::new(LineBufferedWriter::with_capacity(write_buffer, BUFSIZ)) + } + BufferingMode::Full => { + Box::new(FullBufferedWriter::with_capacity(write_buffer, BUFSIZ)) + } }; Self { @@ -360,9 +370,7 @@ impl FILE { self.last_operation = None; match self.write_buffer.flush() { - EResult::Ok(()) => { - EResult::Ok(()) - }, + EResult::Ok(()) => EResult::Ok(()), EResult::Err(err) => { self.flags |= FileFlags::ERROR; EResult::Err(err) @@ -461,8 +469,8 @@ impl FILE { Some(dir) if dir != direction => { self.flags |= FileFlags::ERROR; EResult::Err(errno::EINVAL) - }, - _ => EResult::Ok(()) + } + _ => EResult::Ok(()), } } } @@ -558,6 +566,28 @@ impl TryFromExt for BufferingMode { } } +impl FileSet { + const fn new() -> Self { + Self { files: BTreeSet::new() } + } + + fn insert(&mut self, file: NonNull) { + self.files.insert(file); + } + + fn remove(&mut self, file: &NonNull) -> bool { + self.files.remove(file) + } + + fn cleanup(&mut self) { + while let Some(file) = self.files.pop_first() { + unsafe { FILE::close(file) }.ok(); + } + } +} + +unsafe impl Sync for FileSet {} + #[no_mangle] pub static mut stdout: *mut FILE = null_mut(); #[no_mangle] @@ -565,7 +595,7 @@ pub static mut stderr: *mut FILE = null_mut(); #[no_mangle] pub static mut stdin: *mut FILE = null_mut(); -static OPEN_FILES: Mutex>> = Mutex::new(BTreeSet::new()); +static OPEN_FILES: Mutex = Mutex::new(FileSet::new()); pub fn deregister_file(file: NonNull) -> bool { OPEN_FILES.lock().remove(&file) @@ -603,8 +633,5 @@ pub unsafe fn init() { } pub unsafe fn cleanup() { - let mut open_files = OPEN_FILES.lock(); - while let Some(file) = open_files.pop_first() { - FILE::close(file).ok(); - } + OPEN_FILES.lock().cleanup(); } diff --git a/userspace/lib/ygglibc/src/lib.rs b/userspace/lib/ygglibc/src/lib.rs index fc763d54..60a2ec6a 100644 --- a/userspace/lib/ygglibc/src/lib.rs +++ b/userspace/lib/ygglibc/src/lib.rs @@ -51,7 +51,6 @@ mod panic; mod process; mod signal; mod ssp; -mod sync; mod thread; mod types; mod util; diff --git a/userspace/lib/ygglibc/src/sync.rs b/userspace/lib/ygglibc/src/sync.rs deleted file mode 100644 index 3879f92b..00000000 --- a/userspace/lib/ygglibc/src/sync.rs +++ /dev/null @@ -1,103 +0,0 @@ -use core::{ - cell::UnsafeCell, - ops::{Deref, DerefMut}, - sync::atomic::{AtomicU32, Ordering}, -}; - -use yggdrasil_rt::{process::MutexOperation, sys as syscall}; - -pub struct RawMutex { - value: AtomicU32, -} - -pub struct Mutex { - value: UnsafeCell, - inner: RawMutex, -} - -pub struct MutexGuard<'a, T> { - lock: &'a Mutex, -} - -impl RawMutex { - const UNLOCKED: u32 = 0; - const LOCKED: u32 = 1; - - pub const fn new() -> Self { - Self { - value: AtomicU32::new(Self::UNLOCKED), - } - } - - pub fn try_lock(&self) -> bool { - self.value - .compare_exchange( - Self::UNLOCKED, - Self::LOCKED, - Ordering::Acquire, - Ordering::Relaxed, - ) - .is_ok() - } - - pub fn lock(&self) { - loop { - if self.try_lock() { - // Got a lock - return; - } - - self.wait(Self::LOCKED); - } - } - - pub unsafe fn release(&self) { - if self.value.swap(Self::UNLOCKED, Ordering::Release) == Self::LOCKED { - self.wake(); - } - } - - fn wait(&self, value: u32) { - unsafe { syscall::mutex(&self.value, &MutexOperation::Wait(value, None)).unwrap() }; - } - - fn wake(&self) { - unsafe { syscall::mutex(&self.value, &MutexOperation::Wake).unwrap() }; - } -} - -impl Mutex { - pub const fn new(value: T) -> Self { - Self { - value: UnsafeCell::new(value), - inner: RawMutex::new(), - } - } - - pub fn lock(&self) -> MutexGuard { - self.inner.lock(); - MutexGuard { lock: self } - } -} - -unsafe impl Sync for Mutex {} - -impl Deref for MutexGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &Self::Target { - unsafe { &*self.lock.value.get() } - } -} - -impl DerefMut for MutexGuard<'_, T> { - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *self.lock.value.get() } - } -} - -impl Drop for MutexGuard<'_, T> { - fn drop(&mut self) { - unsafe { self.lock.inner.release() }; - } -} diff --git a/userspace/lib/ygglibc/src/thread/mod.rs b/userspace/lib/ygglibc/src/thread/mod.rs index f57dfa35..37cc0f4d 100644 --- a/userspace/lib/ygglibc/src/thread/mod.rs +++ b/userspace/lib/ygglibc/src/thread/mod.rs @@ -1,17 +1,12 @@ -use core::{ - cell::OnceCell, - ffi::c_void, - ptr::{self, null_mut}, - sync::atomic::{AtomicPtr, AtomicU32, Ordering}, -}; +use core::{ffi::c_void, ptr::null_mut}; -use alloc::{boxed::Box, collections::BTreeMap, sync::Arc}; +use alloc::{boxed::Box, collections::BTreeMap}; use yggdrasil_rt::{ - mem::{MappingFlags, MappingSource}, process::{ thread::{self as rt, ThreadCreateInfo}, - thread_local, ProgramArgumentInner, ThreadSpawnOptions, + thread_local, ProgramArgumentInner, }, + sync::mutex::Mutex, }; use crate::{ @@ -20,14 +15,17 @@ use crate::{ errno, sys_types::{pthread_attr_t, pthread_t}, }, - process, signal, - sync::Mutex, }; pub mod tls; static THREADS: Mutex> = Mutex::new(BTreeMap::new()); +pub const DEFAULT_STACK_SIZE: usize = 4096 * 16; +pub const DEFAULT_SIGNAL_STACK_SIZE: usize = 4096 * 8; + +pub const MIN_STACK_SIZE: usize = 4096 * 4; + pub struct Thread { handle: rt::ThreadHandle<*mut c_void>, } @@ -38,13 +36,21 @@ impl Thread { entry: extern "C" fn(*mut c_void) -> *mut c_void, argument: *mut c_void, ) -> EResult { + let stack = if let Some(attr) = attr { + assert!(attr.stack_size >= MIN_STACK_SIZE); + match attr.stack { + Some(stack) => rt::ThreadStack::Raw(usize::from(stack.addr()) + attr.stack_size), + None => rt::ThreadStack::Allocate(attr.stack_size), + } + } else { + rt::ThreadStack::Allocate(DEFAULT_STACK_SIZE) + }; // TODO honor attr stack let info = ThreadCreateInfo { - signal_stack: rt::ThreadSignalStack::Allocate(4096 * 8), - stack: rt::ThreadStack::Allocate(4096 * 16), + signal_stack: rt::ThreadSignalStack::Allocate(DEFAULT_SIGNAL_STACK_SIZE), + stack, entry: rt::ThreadFunction::Closure(Box::new(move |arg| entry(arg))), - #[allow(static_mut_refs)] - tls_image: unsafe { tls::TLS_IMAGE.as_ref() }, + tls_image: tls::TLS_IMAGE.get().as_ref(), }; let handle = rt::Thread::spawn(info, argument, true)?; let id = handle.id(); @@ -64,199 +70,6 @@ impl Thread { } } -// #[thread_local] -// static SELF: OnceCell> = OnceCell::new(); -// -// enum ThreadStack { -// Owned(usize, usize), -// Borrowed, -// } -// -// struct ThreadArgument { -// entry: extern "C" fn(*mut c_void) -> *mut c_void, -// argument: *mut c_void, -// thread: Arc, -// } -// -// #[repr(C)] -// pub struct Thread { -// id: AtomicU32, -// stack: ThreadStack, -// result: AtomicPtr, -// } -// -// impl Thread { -// fn main() -> Self { -// Self { -// id: AtomicU32::new(0), -// stack: ThreadStack::Borrowed, -// result: AtomicPtr::new(null_mut()), -// } -// } -// -// pub fn is_main(&self) -> bool { -// self.id.load(Ordering::Acquire) == 0 -// } -// -// pub fn spawn( -// attr: Option<&pthread_attr_t>, -// entry: extern "C" fn(*mut c_void) -> *mut c_void, -// argument: *mut c_void, -// ) -> EResult { -// // TODO move defaults somewhere else -// let stack_size = attr -// .as_ref() -// .map(|attr| attr.stack_size) -// .unwrap_or(4096 * 16); -// // If no custom stack was specified via pthread_attr_setstack, allocate owned stack -// let stack_base = attr.as_ref().map(|attr| attr.stack).unwrap_or(null_mut()); -// -// let stack_size = stack_size.max(4096 * 8); -// -// let (stack, stack_top) = match stack_base.is_null() { -// // program-supplied stack -// false => todo!(), -// // NULL or unspecified stack -// true => { -// let base = unsafe { -// yggdrasil_rt::sys::map_memory( -// None, -// stack_size, -// MappingFlags::WRITE, -// &MappingSource::Anonymous, -// ) -// }?; -// let top = base + stack_size; -// yggdrasil_rt::debug_trace!("Allocate stack: {:#x?}", base..top); -// (ThreadStack::Owned(base, stack_size), top) -// } -// }; -// -// let thread = Arc::new(Self { -// id: AtomicU32::new(0), -// result: AtomicPtr::new(null_mut()), -// stack, -// }); -// let argument = Box::into_raw(Box::new(ThreadArgument { -// entry, -// argument, -// thread: thread.clone(), -// })); -// -// let options = ThreadSpawnOptions { -// entry: Self::thread_entry, -// argument: argument.addr(), -// stack_top, -// }; -// -// let id = unsafe { yggdrasil_rt::sys::spawn_thread(&options) }?; -// thread.id.store(id, Ordering::Release); -// -// THREADS.lock().insert(id, thread); -// -// EResult::Ok(id) -// } -// -// pub fn join(id: pthread_t) -> EResult> { -// unsafe { yggdrasil_rt::sys::wait_thread(id) }?; -// let thread = THREADS -// .lock() -// .remove(&id) -// .expect("wait_thread() succeeded but thread not registered?"); -// EResult::Ok(thread) -// } -// -// pub fn id(&self) -> pthread_t { -// self.id.load(Ordering::Acquire) -// } -// -// pub fn this() -> EResult> { -// SELF.get().cloned().e_ok_or(errno::EINVAL) -// } -// -// unsafe fn set_this(thread: Arc) { -// if SELF.set(thread).is_err() { -// unreachable!("pthread_self already initialized?") -// } -// } -// -// unsafe fn clear_this() { -// let this = SELF.get().expect("pthread_self == NULL"); -// Arc::decrement_strong_count(this); -// } -// -// pub fn result(&self) -> *mut c_void { -// self.result.load(Ordering::Acquire) -// } -// -// pub fn abort_current() -> ! { -// let this = Self::this(); -// if let EResult::Ok(this) = this -// && !this.is_main() -// { -// this.result.store(null_mut(), Ordering::Release); -// unsafe { yggdrasil_rt::sys::exit_thread() }; -// } else { -// process::abort(); -// } -// } -// -// extern "C" fn thread_entry(argument: usize) -> ! { -// // Set up TLS as soon as possible. Note the `force = true` parameter, because the image -// // contains "already initialized" tag, which only matters for the main thread. -// #[allow(static_mut_refs)] -// if let Err(err) = unsafe { thread_local::init_tls(tls::TLS_IMAGE.as_ref(), true) } { -// yggdrasil_rt::debug_trace!("thread_entry failed: TLS init error: {err:?}"); -// unsafe { yggdrasil_rt::sys::exit_thread() }; -// } -// -// { -// assert_ne!(argument, 0); -// -// let argument: Box = -// unsafe { Box::from_raw(ptr::with_exposed_provenance_mut(argument)) }; -// yggdrasil_rt::debug_trace!( -// "thread_entry entry={:p}, argument={:p}", -// argument.entry, -// argument.argument -// ); -// -// // TODO better way to initialize the thread ID -// while argument.thread.id.load(Ordering::Acquire) == 0 { -// core::hint::spin_loop(); -// } -// -// unsafe { -// Self::setup_self(&argument.thread); -// } -// -// let result = (argument.entry)(argument.argument); -// argument.thread.result.store(result, Ordering::Release); -// -// unsafe { Self::clear_this() }; -// } -// -// // TODO call thread-local destructors -// unsafe { yggdrasil_rt::sys::exit_thread() } -// } -// -// unsafe fn setup_self(self: &Arc) { -// Self::set_this(self.clone()); -// // Setup signal stack -// signal::init(false); -// } -// } -// -// impl Drop for Thread { -// fn drop(&mut self) { -// yggdrasil_rt::debug_trace!("Drop thread {:?}", self.id); -// if let &ThreadStack::Owned(base, size) = &self.stack { -// yggdrasil_rt::debug_trace!("Drop stack {:#x?}", base..base + size); -// unsafe { yggdrasil_rt::sys::unmap_memory(base, size) }.ok(); -// } -// } -// } - pub fn init_main_thread(arg: &ProgramArgumentInner) { // Will create a TLS for the main thread (if not done already). // Usually, a dynamic loader will do this for us, but this still needs to be @@ -266,9 +79,7 @@ pub fn init_main_thread(arg: &ProgramArgumentInner) { thread_local::init_tls_from_auxv(arg.auxv(), false).expect("Could not initialize TLS"); // Store the TLS image, it'll be needed when creating new threads - unsafe { - tls::TLS_IMAGE = tls_image; - } + tls::TLS_IMAGE.init_with(|| tls_image); unsafe { rt::track_main::<*mut c_void>() }; } diff --git a/userspace/lib/ygglibc/src/thread/tls.rs b/userspace/lib/ygglibc/src/thread/tls.rs index 8b75ee15..2e9441e7 100644 --- a/userspace/lib/ygglibc/src/thread/tls.rs +++ b/userspace/lib/ygglibc/src/thread/tls.rs @@ -1,7 +1,6 @@ -use yggdrasil_rt::process::thread_local::TlsImage; +use yggdrasil_rt::{process::thread_local::TlsImage, sync::once::Once}; -// TODO OnceLock -pub(super) static mut TLS_IMAGE: Option = None; +pub(super) static TLS_IMAGE: Once> = Once::uninit(); #[linkage = "weak"] #[no_mangle]