refactor: move kernel::sync to kernel-util

This commit is contained in:
Mark Poliakov 2023-11-30 11:00:51 +02:00
parent cc816920b0
commit 2d7568f829
28 changed files with 274 additions and 251 deletions

View File

@ -0,0 +1,7 @@
extern "Rust" {
pub fn __acquire_irq_guard() -> bool;
pub fn __release_irq_guard(mask: bool);
pub fn __suspend();
pub fn __yield();
}

View File

@ -1,6 +1,8 @@
#![no_std]
#![feature(maybe_uninit_slice)]
pub(crate) mod api;
pub mod sync;
pub mod util;

View File

@ -0,0 +1,207 @@
//! Synchronization primitives
use core::{
cell::UnsafeCell,
ops::{Deref, DerefMut},
sync::atomic::{AtomicBool, AtomicUsize, Ordering},
};
use crate::api;
// use crate::arch::{Architecture, ArchitectureImpl};
static LOCK_HACK: AtomicBool = AtomicBool::new(false);
/// "Hacks" all the locks in the kernel to make them function as "NULL"-locks instead of spinlocks.
///
/// # Safety
///
/// Only meant to be called from panic handler when the caller is sure other CPUs are halted.
pub unsafe fn hack_locks() {
LOCK_HACK.store(true, Ordering::Release);
}
/// Simple spinloop-based fence guaranteeing that the execution resumes only after its condition is
/// met.
pub struct SpinFence {
value: AtomicUsize,
}
/// Token type used to prevent IRQs from firing during some critical section. Normal IRQ operation
/// (if enabled before) is resumed when [IrqGuard]'s lifetime is over.
pub struct IrqGuard(bool);
struct SpinlockInner<T> {
value: UnsafeCell<T>,
state: AtomicBool,
}
struct SpinlockInnerGuard<'a, T> {
lock: &'a SpinlockInner<T>,
}
/// Spinlock implementation which prevents interrupts to avoid deadlocks when an interrupt handler
/// tries to acquire a lock taken before the IRQ fired.
pub struct IrqSafeSpinlock<T> {
inner: SpinlockInner<T>,
}
/// Token type allowing safe access to the underlying data of the [IrqSafeSpinlock]. Resumes normal
/// IRQ operation (if enabled before acquiring) when the lifetime is over.
pub struct IrqSafeSpinlockGuard<'a, T> {
// Must come first to ensure the lock is dropped first and only then IRQs are re-enabled
inner: SpinlockInnerGuard<'a, T>,
_irq: IrqGuard,
}
// Spinlock impls
impl<T> SpinlockInner<T> {
const fn new(value: T) -> Self {
Self {
value: UnsafeCell::new(value),
state: AtomicBool::new(false),
}
}
fn lock(&self) -> SpinlockInnerGuard<T> {
// Loop until the lock can be acquired
if LOCK_HACK.load(Ordering::Acquire) {
return SpinlockInnerGuard { lock: self };
}
while self
.state
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
core::hint::spin_loop();
}
SpinlockInnerGuard { lock: self }
}
}
impl<'a, T> Deref for SpinlockInnerGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*self.lock.value.get() }
}
}
impl<'a, T> DerefMut for SpinlockInnerGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.lock.value.get() }
}
}
impl<'a, T> Drop for SpinlockInnerGuard<'a, T> {
fn drop(&mut self) {
if !LOCK_HACK.load(Ordering::Acquire) {
self.lock
.state
.compare_exchange(true, false, Ordering::Release, Ordering::Relaxed)
.unwrap();
}
}
}
unsafe impl<T> Sync for SpinlockInner<T> {}
unsafe impl<T> Send for SpinlockInner<T> {}
// IrqSafeSpinlock impls
impl<T> IrqSafeSpinlock<T> {
/// Wraps the value in a spinlock primitive
pub const fn new(value: T) -> Self {
Self {
inner: SpinlockInner::new(value),
}
}
/// Attempts to acquire a lock. IRQs will be disabled until the lock is released.
pub fn lock(&self) -> IrqSafeSpinlockGuard<T> {
// Disable IRQs to avoid IRQ handler trying to acquire the same lock
let irq_guard = IrqGuard::acquire();
// Acquire the inner lock
let inner = self.inner.lock();
IrqSafeSpinlockGuard {
inner,
_irq: irq_guard,
}
}
/// Returns an unsafe reference to the inner value.
///
/// # Safety
///
/// Unsafe: explicitly ignores proper access sharing.
#[allow(clippy::mut_from_ref)]
pub unsafe fn grab(&self) -> &mut T {
unsafe { &mut *self.inner.value.get() }
}
}
impl<'a, T> Deref for IrqSafeSpinlockGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.inner.deref()
}
}
impl<'a, T> DerefMut for IrqSafeSpinlockGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner.deref_mut()
}
}
// IrqGuard impls
impl IrqGuard {
/// Saves the current IRQ state and masks them
pub fn acquire() -> Self {
Self(unsafe { api::__acquire_irq_guard() })
}
}
impl Drop for IrqGuard {
fn drop(&mut self) {
unsafe { api::__release_irq_guard(self.0) }
}
}
// SpinFence impls
impl SpinFence {
/// Constructs a new [SpinFence]
pub const fn new() -> Self {
Self {
value: AtomicUsize::new(0),
}
}
/// Resets a fence back to its original state
pub fn reset(&self) {
self.value.store(0, Ordering::Release);
}
/// "Signals" a fence, incrementing its internal counter by one
pub fn signal(&self) {
self.value.fetch_add(1, Ordering::SeqCst);
}
/// Waits until the fence is signalled at least the amount of times specified
pub fn wait_all(&self, count: usize) {
while self.value.load(Ordering::Acquire) < count {
core::hint::spin_loop();
}
}
/// Waits until the fence is signalled at least once
pub fn wait_one(&self) {
self.wait_all(1);
}
/// Returns `true` if the fence has been signalled at least the amount of times specified
pub fn try_wait_all(&self, count: usize) -> bool {
self.value.load(Ordering::Acquire) >= count
}
}

View File

@ -3,10 +3,10 @@ use core::sync::atomic::Ordering;
use aarch64_cpu::registers::{MPIDR_EL1, TPIDR_EL1};
use alloc::{boxed::Box, vec::Vec};
use kernel_util::util::OneTimeInit;
use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit};
use tock_registers::interfaces::{Readable, Writeable};
use crate::{arch::CpuMessage, panic, sync::IrqSafeSpinlock, task::sched::CpuQueue};
use crate::{arch::CpuMessage, panic, task::sched::CpuQueue};
use super::smp::CPU_COUNT;

View File

@ -278,7 +278,7 @@ extern "C" fn __aa64_el1_sync_handler(frame: *mut ExceptionFrame) {
let iss = esr_el1 & 0x1FFFFFF;
unsafe {
crate::sync::hack_locks();
kernel_util::sync::hack_locks();
}
dump_irrecoverable_exception(frame, ec, iss);

View File

@ -12,7 +12,7 @@ use device_api::{
},
Device,
};
use kernel_util::util::OneTimeInit;
use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit};
use crate::{
arch::{aarch64::IrqNumber, Architecture, CpuMessage},
@ -23,7 +23,6 @@ use crate::{
device::{DeviceMemoryIo, RawDeviceMemoryMapping},
PhysicalAddress,
},
sync::IrqSafeSpinlock,
};
use self::{gicc::Gicc, gicd::Gicd};

View File

@ -42,6 +42,26 @@ cfg_if! {
}
}
// External API for architecture specifics
#[no_mangle]
fn __acquire_irq_guard() -> bool {
let mask = ArchitectureImpl::interrupt_mask();
unsafe {
ArchitectureImpl::set_interrupt_mask(true);
}
mask
}
#[no_mangle]
fn __release_irq_guard(mask: bool) {
unsafe {
ArchitectureImpl::set_interrupt_mask(mask);
}
}
// Architecture interfaces
/// Describes messages sent from some CPU to others
#[derive(Clone, Copy, PartialEq, Debug)]
#[repr(u64)]

View File

@ -4,10 +4,12 @@ use core::fmt::{self, Arguments};
use abi::error::Error;
use alloc::sync::Arc;
use futures_util::Future;
use kernel_util::util::{OneTimeInit, StaticVector};
use kernel_util::{
sync::IrqSafeSpinlock,
util::{OneTimeInit, StaticVector},
};
use crate::{
sync::IrqSafeSpinlock,
task::{process::Process, runtime::QueueWaker},
util::ring::RingBuffer,
};

View File

@ -5,9 +5,9 @@ use core::time::Duration;
use abi::{error::Error, primitive_enum};
use alloc::{vec, vec::Vec};
use bitflags::bitflags;
use kernel_util::util::StaticVector;
use kernel_util::{sync::IrqSafeSpinlock, util::StaticVector};
use crate::{debug::DebugSink, sync::IrqSafeSpinlock, task::runtime};
use crate::{debug::DebugSink, task::runtime};
const CONSOLE_ROW_LEN: usize = 80;
const MAX_CSI_ARGS: usize = 8;

View File

@ -1,8 +1,9 @@
//! Framebuffer console driver
use abi::error::Error;
use kernel_util::sync::IrqSafeSpinlock;
use crate::{debug::DebugSink, sync::IrqSafeSpinlock};
use crate::debug::DebugSink;
use super::{
console::{Attributes, ConsoleBuffer, ConsoleState, DisplayConsole},

View File

@ -4,11 +4,9 @@ use core::ops::{Index, IndexMut};
use abi::error::Error;
use device_api::Device;
use kernel_util::sync::IrqSafeSpinlock;
use crate::{
mem::{device::RawDeviceMemoryMapping, PhysicalAddress},
sync::IrqSafeSpinlock,
};
use crate::mem::{device::RawDeviceMemoryMapping, PhysicalAddress};
use super::{DisplayDevice, DisplayDimensions};

View File

@ -1,8 +1,7 @@
//! Device management and interfaces
use device_api::{manager::DeviceManager, Device, DeviceId};
use crate::sync::{IrqSafeSpinlock, IrqSafeSpinlockGuard};
use kernel_util::sync::{IrqSafeSpinlock, IrqSafeSpinlockGuard};
#[cfg(target_arch = "aarch64")]
pub mod devtree;

View File

@ -2,7 +2,7 @@
use abi::{error::Error, io::DeviceRequest};
use alloc::boxed::Box;
use device_api::{interrupt::InterruptHandler, serial::SerialDevice, Device};
use kernel_util::util::OneTimeInit;
use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit};
use tock_registers::{
interfaces::{ReadWriteable, Readable, Writeable},
register_bitfields, register_structs,
@ -19,7 +19,6 @@ use crate::{
},
device_tree_driver,
mem::{address::FromRaw, device::DeviceMemoryIo, PhysicalAddress},
sync::IrqSafeSpinlock,
task::process::ProcessId,
};

View File

@ -5,9 +5,9 @@ use abi::{
process::Signal,
};
use device_api::serial::SerialDevice;
use kernel_util::sync::IrqSafeSpinlock;
use crate::{
sync::IrqSafeSpinlock,
task::process::{Process, ProcessId},
util::ring::AsyncRing,
};

View File

@ -31,11 +31,11 @@
#![no_main]
use arch::Architecture;
use kernel_util::sync::SpinFence;
use crate::{
arch::{ArchitectureImpl, ARCHITECTURE},
mem::heap,
sync::SpinFence,
task::{spawn_kernel_closure, Cpu},
};
@ -55,7 +55,6 @@ pub mod init;
pub mod mem;
pub mod panic;
pub mod proc;
pub mod sync;
pub mod syscall;
pub mod task;
pub mod util;

View File

@ -1,12 +1,11 @@
use core::ops::Range;
use abi::error::Error;
use kernel_util::util::OneTimeInit;
use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit};
use crate::{
arch::{Architecture, ARCHITECTURE},
mem::{address::IntoRaw, phys::reserved::is_reserved},
sync::IrqSafeSpinlock,
};
use self::{

View File

@ -1,8 +1,9 @@
use abi::error::Error;
use cfg_if::cfg_if;
use kernel_util::sync::IrqSafeSpinlock;
use vmalloc::VirtualMemoryAllocator;
use crate::{mem::phys, sync::IrqSafeSpinlock};
use crate::mem::phys;
use super::{table::MapAttributes, PhysicalAddress};

View File

@ -2,12 +2,12 @@
use core::sync::atomic::{AtomicBool, AtomicU32, Ordering};
use device_api::interrupt::IpiDeliveryTarget;
use kernel_util::sync::{hack_locks, SpinFence};
use crate::{
arch::{Architecture, ArchitectureImpl, CpuMessage, ARCHITECTURE},
debug::{debug_internal, LogLevel},
device::display::console::flush_consoles,
sync::{hack_locks, SpinFence},
task::{sched::CpuQueue, Cpu},
};

View File

@ -1,211 +0,0 @@
//! Synchronization primitives
use core::{
cell::UnsafeCell,
ops::{Deref, DerefMut},
sync::atomic::{AtomicBool, AtomicUsize, Ordering},
};
use crate::arch::{Architecture, ArchitectureImpl};
static LOCK_HACK: AtomicBool = AtomicBool::new(false);
/// "Hacks" all the locks in the kernel to make them function as "NULL"-locks instead of spinlocks.
///
/// # Safety
///
/// Only meant to be called from panic handler when the caller is sure other CPUs are halted.
pub unsafe fn hack_locks() {
LOCK_HACK.store(true, Ordering::Release);
}
/// Simple spinloop-based fence guaranteeing that the execution resumes only after its condition is
/// met.
pub struct SpinFence {
value: AtomicUsize,
}
/// Token type used to prevent IRQs from firing during some critical section. Normal IRQ operation
/// (if enabled before) is resumed when [IrqGuard]'s lifetime is over.
pub struct IrqGuard(bool);
struct SpinlockInner<T> {
value: UnsafeCell<T>,
state: AtomicBool,
}
struct SpinlockInnerGuard<'a, T> {
lock: &'a SpinlockInner<T>,
}
/// Spinlock implementation which prevents interrupts to avoid deadlocks when an interrupt handler
/// tries to acquire a lock taken before the IRQ fired.
pub struct IrqSafeSpinlock<T> {
inner: SpinlockInner<T>,
}
/// Token type allowing safe access to the underlying data of the [IrqSafeSpinlock]. Resumes normal
/// IRQ operation (if enabled before acquiring) when the lifetime is over.
pub struct IrqSafeSpinlockGuard<'a, T> {
// Must come first to ensure the lock is dropped first and only then IRQs are re-enabled
inner: SpinlockInnerGuard<'a, T>,
_irq: IrqGuard,
}
// Spinlock impls
impl<T> SpinlockInner<T> {
const fn new(value: T) -> Self {
Self {
value: UnsafeCell::new(value),
state: AtomicBool::new(false),
}
}
fn lock(&self) -> SpinlockInnerGuard<T> {
// Loop until the lock can be acquired
if LOCK_HACK.load(Ordering::Acquire) {
return SpinlockInnerGuard { lock: self };
}
while self
.state
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
core::hint::spin_loop();
}
SpinlockInnerGuard { lock: self }
}
}
impl<'a, T> Deref for SpinlockInnerGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*self.lock.value.get() }
}
}
impl<'a, T> DerefMut for SpinlockInnerGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.lock.value.get() }
}
}
impl<'a, T> Drop for SpinlockInnerGuard<'a, T> {
fn drop(&mut self) {
if !LOCK_HACK.load(Ordering::Acquire) {
self.lock
.state
.compare_exchange(true, false, Ordering::Release, Ordering::Relaxed)
.unwrap();
}
}
}
unsafe impl<T> Sync for SpinlockInner<T> {}
unsafe impl<T> Send for SpinlockInner<T> {}
// IrqSafeSpinlock impls
impl<T> IrqSafeSpinlock<T> {
/// Wraps the value in a spinlock primitive
pub const fn new(value: T) -> Self {
Self {
inner: SpinlockInner::new(value),
}
}
/// Attempts to acquire a lock. IRQs will be disabled until the lock is released.
pub fn lock(&self) -> IrqSafeSpinlockGuard<T> {
// Disable IRQs to avoid IRQ handler trying to acquire the same lock
let irq_guard = IrqGuard::acquire();
// Acquire the inner lock
let inner = self.inner.lock();
IrqSafeSpinlockGuard {
inner,
_irq: irq_guard,
}
}
/// Returns an unsafe reference to the inner value.
///
/// # Safety
///
/// Unsafe: explicitly ignores proper access sharing.
#[allow(clippy::mut_from_ref)]
pub unsafe fn grab(&self) -> &mut T {
unsafe { &mut *self.inner.value.get() }
}
}
impl<'a, T> Deref for IrqSafeSpinlockGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.inner.deref()
}
}
impl<'a, T> DerefMut for IrqSafeSpinlockGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner.deref_mut()
}
}
// IrqGuard impls
impl IrqGuard {
/// Saves the current IRQ state and masks them
pub fn acquire() -> Self {
let mask = ArchitectureImpl::interrupt_mask();
unsafe {
ArchitectureImpl::set_interrupt_mask(true);
}
Self(mask)
}
}
impl Drop for IrqGuard {
fn drop(&mut self) {
unsafe {
ArchitectureImpl::set_interrupt_mask(self.0);
}
}
}
// SpinFence impls
impl SpinFence {
/// Constructs a new [SpinFence]
pub const fn new() -> Self {
Self {
value: AtomicUsize::new(0),
}
}
/// Resets a fence back to its original state
pub fn reset(&self) {
self.value.store(0, Ordering::Release);
}
/// "Signals" a fence, incrementing its internal counter by one
pub fn signal(&self) {
self.value.fetch_add(1, Ordering::SeqCst);
}
/// Waits until the fence is signalled at least the amount of times specified
pub fn wait_all(&self, count: usize) {
while self.value.load(Ordering::Acquire) < count {
core::hint::spin_loop();
}
}
/// Waits until the fence is signalled at least once
pub fn wait_one(&self) {
self.wait_all(1);
}
/// Returns `true` if the fence has been signalled at least the amount of times specified
pub fn try_wait_all(&self, count: usize) -> bool {
self.value.load(Ordering::Acquire) >= count
}
}

View File

@ -8,6 +8,7 @@ use abi::{
syscall::SyscallFunction,
};
use alloc::rc::Rc;
use kernel_util::sync::IrqSafeSpinlockGuard;
// use vfs::{IoContext, Read, ReadDirectory, Seek, VnodeKind, VnodeRef, Write};
use yggdrasil_abi::{
error::SyscallResult,
@ -19,7 +20,6 @@ use crate::{
debug::LogLevel,
mem::{phys, table::MapAttributes},
proc::{self, io::ProcessIo},
sync::IrqSafeSpinlockGuard,
task::{
process::{Process, ProcessId},
runtime,

View File

@ -4,10 +4,10 @@
use abi::error::Error;
use alloc::{string::String, sync::Arc, vec::Vec};
use kernel_util::sync::{IrqSafeSpinlock, SpinFence};
use crate::{
arch::{Architecture, ArchitectureImpl},
sync::{IrqSafeSpinlock, SpinFence},
task::{sched::CpuQueue, thread::Thread},
};

View File

@ -19,7 +19,7 @@ use alloc::{
vec::Vec,
};
use futures_util::Future;
use kernel_util::util::OneTimeInit;
use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit};
use crate::{
mem::{
@ -29,7 +29,6 @@ use crate::{
table::MapAttributes,
},
proc::{self, io::ProcessIo},
sync::IrqSafeSpinlock,
task::context::TaskContextImpl,
};

View File

@ -1,7 +1,6 @@
use alloc::sync::Arc;
use futures_util::{future::BoxFuture, task::ArcWake, Future, FutureExt};
use crate::sync::IrqSafeSpinlock;
use kernel_util::sync::IrqSafeSpinlock;
use super::executor;

View File

@ -1,11 +1,10 @@
use abi::error::Error;
use alloc::sync::Arc;
use crossbeam_queue::ArrayQueue;
use kernel_util::util::OneTimeInit;
use kernel_util::{sync::IrqGuard, util::OneTimeInit};
use crate::{
arch::{Architecture, ArchitectureImpl},
sync::IrqGuard,
task::thread::Thread,
};

View File

@ -1,8 +1,7 @@
use core::task::Waker;
use alloc::collections::VecDeque;
use crate::sync::IrqSafeSpinlock;
use kernel_util::sync::IrqSafeSpinlock;
pub struct QueueWaker {
queue: IrqSafeSpinlock<VecDeque<Waker>>,

View File

@ -9,12 +9,14 @@ use alloc::{
vec::Vec,
};
use cfg_if::cfg_if;
use kernel_util::util::OneTimeInit;
use kernel_util::{
sync::{IrqGuard, IrqSafeSpinlock, IrqSafeSpinlockGuard},
util::OneTimeInit,
};
use crate::{
// arch::aarch64::{context::TaskContext, cpu::Cpu},
arch::{Architecture, ArchitectureImpl},
sync::{IrqGuard, IrqSafeSpinlock, IrqSafeSpinlockGuard},
task::thread::ThreadState,
};

View File

@ -18,12 +18,14 @@ use alloc::{
};
use atomic_enum::atomic_enum;
use futures_util::{task::ArcWake, Future};
use kernel_util::util::OneTimeInit;
use kernel_util::{
sync::{IrqGuard, IrqSafeSpinlock},
util::OneTimeInit,
};
use crate::{
block,
mem::{process::ProcessAddressSpace, ForeignPointer},
sync::{IrqGuard, IrqSafeSpinlock},
task::{context::TaskContextImpl, Cpu},
};

View File

@ -6,8 +6,9 @@ use core::{
use abi::error::Error;
use alloc::sync::Arc;
use futures_util::Future;
use kernel_util::sync::IrqSafeSpinlock;
use crate::{sync::IrqSafeSpinlock, task::runtime::QueueWaker};
use crate::task::runtime::QueueWaker;
pub struct RingBuffer<T, const N: usize> {
rd: usize,