2024-02-08 15:50:25 +02:00
|
|
|
use core::{
|
|
|
|
cell::UnsafeCell,
|
|
|
|
marker::PhantomData,
|
|
|
|
mem,
|
|
|
|
ops::{Deref, DerefMut},
|
|
|
|
sync::atomic::{AtomicBool, Ordering},
|
|
|
|
};
|
|
|
|
|
|
|
|
use crate::{guard::IrqGuard, Architecture};
|
|
|
|
|
2024-07-28 12:53:30 +03:00
|
|
|
pub struct Spinlock<A: Architecture, T> {
|
2024-02-08 15:50:25 +02:00
|
|
|
value: UnsafeCell<T>,
|
|
|
|
state: AtomicBool,
|
|
|
|
_pd: PhantomData<A>,
|
|
|
|
}
|
|
|
|
|
2024-07-28 12:53:30 +03:00
|
|
|
pub struct SpinlockGuard<'a, A: Architecture, T> {
|
|
|
|
lock: &'a Spinlock<A, T>,
|
2024-02-08 15:50:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Spinlock implementation which prevents interrupts to avoid deadlocks when an interrupt handler
|
|
|
|
/// tries to acquire a lock taken before the IRQ fired.
|
|
|
|
pub struct IrqSafeSpinlock<A: Architecture, T> {
|
2024-07-28 12:53:30 +03:00
|
|
|
inner: Spinlock<A, T>,
|
2024-02-08 15:50:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Token type allowing safe access to the underlying data of the [IrqSafeSpinlock]. Resumes normal
|
|
|
|
/// IRQ operation (if enabled before acquiring) when the lifetime is over.
|
|
|
|
pub struct IrqSafeSpinlockGuard<'a, A: Architecture, T> {
|
|
|
|
// Must come first to ensure the lock is dropped first and only then IRQs are re-enabled
|
2024-07-28 12:53:30 +03:00
|
|
|
inner: SpinlockGuard<'a, A, T>,
|
2024-02-08 15:50:25 +02:00
|
|
|
_irq: IrqGuard<A>,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Spinlock impls
|
2024-07-28 12:53:30 +03:00
|
|
|
impl<A: Architecture, T> Spinlock<A, T> {
|
|
|
|
pub const fn new(value: T) -> Self {
|
2024-02-08 15:50:25 +02:00
|
|
|
Self {
|
|
|
|
value: UnsafeCell::new(value),
|
|
|
|
state: AtomicBool::new(false),
|
|
|
|
_pd: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-28 12:53:30 +03:00
|
|
|
pub fn lock(&self) -> SpinlockGuard<A, T> {
|
2024-02-08 15:50:25 +02:00
|
|
|
// Loop until the lock can be acquired
|
|
|
|
// if LOCK_HACK.load(Ordering::Acquire) {
|
|
|
|
// return SpinlockInnerGuard { lock: self };
|
|
|
|
// }
|
|
|
|
while self
|
|
|
|
.state
|
|
|
|
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
|
|
|
|
.is_err()
|
|
|
|
{
|
|
|
|
core::hint::spin_loop();
|
|
|
|
}
|
|
|
|
|
2024-07-28 12:53:30 +03:00
|
|
|
SpinlockGuard { lock: self }
|
2024-02-08 15:50:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-11-19 19:33:08 +02:00
|
|
|
impl<A: Architecture, T> Deref for SpinlockGuard<'_, A, T> {
|
2024-02-08 15:50:25 +02:00
|
|
|
type Target = T;
|
|
|
|
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
|
|
unsafe { &*self.lock.value.get() }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-11-19 19:33:08 +02:00
|
|
|
impl<A: Architecture, T> DerefMut for SpinlockGuard<'_, A, T> {
|
2024-02-08 15:50:25 +02:00
|
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
|
|
unsafe { &mut *self.lock.value.get() }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-11-19 19:33:08 +02:00
|
|
|
impl<A: Architecture, T> Drop for SpinlockGuard<'_, A, T> {
|
2024-02-08 15:50:25 +02:00
|
|
|
fn drop(&mut self) {
|
|
|
|
// if !LOCK_HACK.load(Ordering::Acquire) {
|
|
|
|
self.lock
|
|
|
|
.state
|
|
|
|
.compare_exchange(true, false, Ordering::Release, Ordering::Relaxed)
|
|
|
|
.unwrap();
|
|
|
|
// }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-11-19 19:33:08 +02:00
|
|
|
unsafe impl<A: Architecture, T: Send> Sync for Spinlock<A, T> {}
|
|
|
|
unsafe impl<A: Architecture, T: Send> Send for Spinlock<A, T> {}
|
2024-02-08 15:50:25 +02:00
|
|
|
|
|
|
|
// IrqSafeSpinlock impls
|
|
|
|
impl<A: Architecture, T> IrqSafeSpinlock<A, T> {
|
|
|
|
/// Wraps the value in a spinlock primitive
|
|
|
|
pub const fn new(value: T) -> Self {
|
|
|
|
Self {
|
2024-07-28 12:53:30 +03:00
|
|
|
inner: Spinlock::new(value),
|
2024-02-08 15:50:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
pub fn replace(&self, value: T) -> T {
|
|
|
|
let mut lock = self.lock();
|
|
|
|
mem::replace(&mut lock, value)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Attempts to acquire a lock. IRQs will be disabled until the lock is released.
|
|
|
|
pub fn lock(&self) -> IrqSafeSpinlockGuard<A, T> {
|
|
|
|
// Disable IRQs to avoid IRQ handler trying to acquire the same lock
|
|
|
|
let irq_guard = IrqGuard::acquire();
|
|
|
|
|
|
|
|
// Acquire the inner lock
|
|
|
|
let inner = self.inner.lock();
|
|
|
|
|
|
|
|
IrqSafeSpinlockGuard {
|
|
|
|
inner,
|
|
|
|
_irq: irq_guard,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns an unsafe reference to the inner value.
|
|
|
|
///
|
|
|
|
/// # Safety
|
|
|
|
///
|
|
|
|
/// Unsafe: explicitly ignores proper access sharing.
|
|
|
|
#[allow(clippy::mut_from_ref)]
|
|
|
|
pub unsafe fn grab(&self) -> &mut T {
|
|
|
|
unsafe { &mut *self.inner.value.get() }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<A: Architecture, T: Clone> IrqSafeSpinlock<A, T> {
|
|
|
|
pub fn get_cloned(&self) -> T {
|
|
|
|
self.lock().clone()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<A: Architecture, T: Clone> Clone for IrqSafeSpinlock<A, T> {
|
|
|
|
fn clone(&self) -> Self {
|
|
|
|
let inner = self.lock();
|
|
|
|
IrqSafeSpinlock::new(inner.clone())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-11-19 19:33:08 +02:00
|
|
|
impl<A: Architecture, T> Deref for IrqSafeSpinlockGuard<'_, A, T> {
|
2024-02-08 15:50:25 +02:00
|
|
|
type Target = T;
|
|
|
|
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
|
|
self.inner.deref()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-11-19 19:33:08 +02:00
|
|
|
impl<A: Architecture, T> DerefMut for IrqSafeSpinlockGuard<'_, A, T> {
|
2024-02-08 15:50:25 +02:00
|
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
|
|
self.inner.deref_mut()
|
|
|
|
}
|
|
|
|
}
|
2024-11-03 12:34:38 +02:00
|
|
|
|
|
|
|
/// Helper macro to implement "split" locks. This may be needed when a very specific storage
|
|
|
|
/// layout for the locked type is required.
|
|
|
|
pub macro split_spinlock(
|
|
|
|
$(use $use:path;)*
|
|
|
|
|
|
|
|
$(#[$meta:meta])*
|
|
|
|
static $name:ident<$lock:ident: $arch:ty>: $ty:ty = $init:expr;
|
|
|
|
) {
|
|
|
|
pub use $name::$name;
|
|
|
|
|
|
|
|
#[allow(non_snake_case)]
|
|
|
|
pub mod $name {
|
|
|
|
$(use $use;)*
|
|
|
|
|
|
|
|
use core::cell::UnsafeCell;
|
|
|
|
use core::marker::PhantomData;
|
|
|
|
use core::sync::atomic::{AtomicBool, Ordering};
|
|
|
|
|
2024-11-22 17:18:44 +02:00
|
|
|
#[repr(transparent)]
|
|
|
|
pub struct __Wrapper(UnsafeCell<$ty>);
|
|
|
|
|
2024-11-03 12:34:38 +02:00
|
|
|
$(#[$meta])*
|
2024-11-22 17:18:44 +02:00
|
|
|
pub static $name: __Wrapper = __Wrapper(UnsafeCell::new($init));
|
2024-11-03 12:34:38 +02:00
|
|
|
|
2024-11-22 17:18:44 +02:00
|
|
|
static __LOCK: AtomicBool = AtomicBool::new(false);
|
2024-11-03 12:34:38 +02:00
|
|
|
pub struct __Guard($crate::guard::IrqGuard<$arch>);
|
|
|
|
|
|
|
|
impl __Wrapper {
|
|
|
|
pub fn $lock(&self) -> __Guard {
|
|
|
|
let irq = $crate::guard::IrqGuard::acquire();
|
|
|
|
while __LOCK.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
|
|
|
|
core::hint::spin_loop();
|
|
|
|
}
|
|
|
|
__Guard(irq)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe impl Sync for __Wrapper {}
|
|
|
|
|
|
|
|
impl core::ops::Deref for __Guard {
|
|
|
|
type Target = $ty;
|
|
|
|
|
|
|
|
fn deref(&self) -> &Self::Target {
|
2024-11-22 17:18:44 +02:00
|
|
|
unsafe { &*$name.0.get() }
|
2024-11-03 12:34:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl core::ops::DerefMut for __Guard {
|
|
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
2024-11-22 17:18:44 +02:00
|
|
|
unsafe { &mut *$name.0.get() }
|
2024-11-03 12:34:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for __Guard {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
__LOCK.store(false, Ordering::Release)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|