From 2d7568f829daa2a7dcdf236bd80ce158e869a88f Mon Sep 17 00:00:00 2001 From: Mark Poliakov Date: Thu, 30 Nov 2023 11:00:51 +0200 Subject: [PATCH] refactor: move kernel::sync to kernel-util --- lib/kernel-util/src/api.rs | 7 + lib/kernel-util/src/lib.rs | 2 + lib/kernel-util/src/sync.rs | 207 ++++++++++++++++++++++++++++++ src/arch/aarch64/cpu.rs | 4 +- src/arch/aarch64/exception.rs | 2 +- src/arch/aarch64/gic/mod.rs | 3 +- src/arch/mod.rs | 20 +++ src/debug.rs | 6 +- src/device/display/console.rs | 4 +- src/device/display/fb_console.rs | 3 +- src/device/display/linear_fb.rs | 6 +- src/device/mod.rs | 3 +- src/device/serial/pl011.rs | 3 +- src/device/tty.rs | 2 +- src/main.rs | 3 +- src/mem/phys/mod.rs | 3 +- src/mem/process.rs | 3 +- src/panic.rs | 2 +- src/sync.rs | 211 ------------------------------- src/syscall/mod.rs | 2 +- src/task/mod.rs | 2 +- src/task/process.rs | 3 +- src/task/runtime/task.rs | 3 +- src/task/runtime/task_queue.rs | 3 +- src/task/runtime/waker.rs | 3 +- src/task/sched.rs | 6 +- src/task/thread.rs | 6 +- src/util/ring.rs | 3 +- 28 files changed, 274 insertions(+), 251 deletions(-) create mode 100644 lib/kernel-util/src/api.rs delete mode 100644 src/sync.rs diff --git a/lib/kernel-util/src/api.rs b/lib/kernel-util/src/api.rs new file mode 100644 index 00000000..5ead83e6 --- /dev/null +++ b/lib/kernel-util/src/api.rs @@ -0,0 +1,7 @@ +extern "Rust" { + pub fn __acquire_irq_guard() -> bool; + pub fn __release_irq_guard(mask: bool); + + pub fn __suspend(); + pub fn __yield(); +} diff --git a/lib/kernel-util/src/lib.rs b/lib/kernel-util/src/lib.rs index 9597c0aa..dc25946f 100644 --- a/lib/kernel-util/src/lib.rs +++ b/lib/kernel-util/src/lib.rs @@ -1,6 +1,8 @@ #![no_std] #![feature(maybe_uninit_slice)] +pub(crate) mod api; + pub mod sync; pub mod util; diff --git a/lib/kernel-util/src/sync.rs b/lib/kernel-util/src/sync.rs index e69de29b..c567357c 100644 --- a/lib/kernel-util/src/sync.rs +++ b/lib/kernel-util/src/sync.rs @@ -0,0 +1,207 @@ +//! Synchronization primitives +use core::{ + cell::UnsafeCell, + ops::{Deref, DerefMut}, + sync::atomic::{AtomicBool, AtomicUsize, Ordering}, +}; + +use crate::api; + +// use crate::arch::{Architecture, ArchitectureImpl}; + +static LOCK_HACK: AtomicBool = AtomicBool::new(false); + +/// "Hacks" all the locks in the kernel to make them function as "NULL"-locks instead of spinlocks. +/// +/// # Safety +/// +/// Only meant to be called from panic handler when the caller is sure other CPUs are halted. +pub unsafe fn hack_locks() { + LOCK_HACK.store(true, Ordering::Release); +} + +/// Simple spinloop-based fence guaranteeing that the execution resumes only after its condition is +/// met. +pub struct SpinFence { + value: AtomicUsize, +} + +/// Token type used to prevent IRQs from firing during some critical section. Normal IRQ operation +/// (if enabled before) is resumed when [IrqGuard]'s lifetime is over. +pub struct IrqGuard(bool); + +struct SpinlockInner { + value: UnsafeCell, + state: AtomicBool, +} + +struct SpinlockInnerGuard<'a, T> { + lock: &'a SpinlockInner, +} + +/// Spinlock implementation which prevents interrupts to avoid deadlocks when an interrupt handler +/// tries to acquire a lock taken before the IRQ fired. +pub struct IrqSafeSpinlock { + inner: SpinlockInner, +} + +/// Token type allowing safe access to the underlying data of the [IrqSafeSpinlock]. Resumes normal +/// IRQ operation (if enabled before acquiring) when the lifetime is over. +pub struct IrqSafeSpinlockGuard<'a, T> { + // Must come first to ensure the lock is dropped first and only then IRQs are re-enabled + inner: SpinlockInnerGuard<'a, T>, + _irq: IrqGuard, +} + +// Spinlock impls +impl SpinlockInner { + const fn new(value: T) -> Self { + Self { + value: UnsafeCell::new(value), + state: AtomicBool::new(false), + } + } + + fn lock(&self) -> SpinlockInnerGuard { + // Loop until the lock can be acquired + if LOCK_HACK.load(Ordering::Acquire) { + return SpinlockInnerGuard { lock: self }; + } + while self + .state + .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_err() + { + core::hint::spin_loop(); + } + + SpinlockInnerGuard { lock: self } + } +} + +impl<'a, T> Deref for SpinlockInnerGuard<'a, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + unsafe { &*self.lock.value.get() } + } +} + +impl<'a, T> DerefMut for SpinlockInnerGuard<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { &mut *self.lock.value.get() } + } +} + +impl<'a, T> Drop for SpinlockInnerGuard<'a, T> { + fn drop(&mut self) { + if !LOCK_HACK.load(Ordering::Acquire) { + self.lock + .state + .compare_exchange(true, false, Ordering::Release, Ordering::Relaxed) + .unwrap(); + } + } +} + +unsafe impl Sync for SpinlockInner {} +unsafe impl Send for SpinlockInner {} + +// IrqSafeSpinlock impls +impl IrqSafeSpinlock { + /// Wraps the value in a spinlock primitive + pub const fn new(value: T) -> Self { + Self { + inner: SpinlockInner::new(value), + } + } + + /// Attempts to acquire a lock. IRQs will be disabled until the lock is released. + pub fn lock(&self) -> IrqSafeSpinlockGuard { + // Disable IRQs to avoid IRQ handler trying to acquire the same lock + let irq_guard = IrqGuard::acquire(); + + // Acquire the inner lock + let inner = self.inner.lock(); + + IrqSafeSpinlockGuard { + inner, + _irq: irq_guard, + } + } + + /// Returns an unsafe reference to the inner value. + /// + /// # Safety + /// + /// Unsafe: explicitly ignores proper access sharing. + #[allow(clippy::mut_from_ref)] + pub unsafe fn grab(&self) -> &mut T { + unsafe { &mut *self.inner.value.get() } + } +} + +impl<'a, T> Deref for IrqSafeSpinlockGuard<'a, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + self.inner.deref() + } +} + +impl<'a, T> DerefMut for IrqSafeSpinlockGuard<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.inner.deref_mut() + } +} + +// IrqGuard impls +impl IrqGuard { + /// Saves the current IRQ state and masks them + pub fn acquire() -> Self { + Self(unsafe { api::__acquire_irq_guard() }) + } +} + +impl Drop for IrqGuard { + fn drop(&mut self) { + unsafe { api::__release_irq_guard(self.0) } + } +} + +// SpinFence impls +impl SpinFence { + /// Constructs a new [SpinFence] + pub const fn new() -> Self { + Self { + value: AtomicUsize::new(0), + } + } + + /// Resets a fence back to its original state + pub fn reset(&self) { + self.value.store(0, Ordering::Release); + } + + /// "Signals" a fence, incrementing its internal counter by one + pub fn signal(&self) { + self.value.fetch_add(1, Ordering::SeqCst); + } + + /// Waits until the fence is signalled at least the amount of times specified + pub fn wait_all(&self, count: usize) { + while self.value.load(Ordering::Acquire) < count { + core::hint::spin_loop(); + } + } + + /// Waits until the fence is signalled at least once + pub fn wait_one(&self) { + self.wait_all(1); + } + + /// Returns `true` if the fence has been signalled at least the amount of times specified + pub fn try_wait_all(&self, count: usize) -> bool { + self.value.load(Ordering::Acquire) >= count + } +} diff --git a/src/arch/aarch64/cpu.rs b/src/arch/aarch64/cpu.rs index 2ef75b42..177927d2 100644 --- a/src/arch/aarch64/cpu.rs +++ b/src/arch/aarch64/cpu.rs @@ -3,10 +3,10 @@ use core::sync::atomic::Ordering; use aarch64_cpu::registers::{MPIDR_EL1, TPIDR_EL1}; use alloc::{boxed::Box, vec::Vec}; -use kernel_util::util::OneTimeInit; +use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit}; use tock_registers::interfaces::{Readable, Writeable}; -use crate::{arch::CpuMessage, panic, sync::IrqSafeSpinlock, task::sched::CpuQueue}; +use crate::{arch::CpuMessage, panic, task::sched::CpuQueue}; use super::smp::CPU_COUNT; diff --git a/src/arch/aarch64/exception.rs b/src/arch/aarch64/exception.rs index 03c2dd36..5f0784d9 100644 --- a/src/arch/aarch64/exception.rs +++ b/src/arch/aarch64/exception.rs @@ -278,7 +278,7 @@ extern "C" fn __aa64_el1_sync_handler(frame: *mut ExceptionFrame) { let iss = esr_el1 & 0x1FFFFFF; unsafe { - crate::sync::hack_locks(); + kernel_util::sync::hack_locks(); } dump_irrecoverable_exception(frame, ec, iss); diff --git a/src/arch/aarch64/gic/mod.rs b/src/arch/aarch64/gic/mod.rs index 511e8792..f912f143 100644 --- a/src/arch/aarch64/gic/mod.rs +++ b/src/arch/aarch64/gic/mod.rs @@ -12,7 +12,7 @@ use device_api::{ }, Device, }; -use kernel_util::util::OneTimeInit; +use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit}; use crate::{ arch::{aarch64::IrqNumber, Architecture, CpuMessage}, @@ -23,7 +23,6 @@ use crate::{ device::{DeviceMemoryIo, RawDeviceMemoryMapping}, PhysicalAddress, }, - sync::IrqSafeSpinlock, }; use self::{gicc::Gicc, gicd::Gicd}; diff --git a/src/arch/mod.rs b/src/arch/mod.rs index 651ee988..2584b1a8 100644 --- a/src/arch/mod.rs +++ b/src/arch/mod.rs @@ -42,6 +42,26 @@ cfg_if! { } } +// External API for architecture specifics + +#[no_mangle] +fn __acquire_irq_guard() -> bool { + let mask = ArchitectureImpl::interrupt_mask(); + unsafe { + ArchitectureImpl::set_interrupt_mask(true); + } + mask +} + +#[no_mangle] +fn __release_irq_guard(mask: bool) { + unsafe { + ArchitectureImpl::set_interrupt_mask(mask); + } +} + +// Architecture interfaces + /// Describes messages sent from some CPU to others #[derive(Clone, Copy, PartialEq, Debug)] #[repr(u64)] diff --git a/src/debug.rs b/src/debug.rs index 34062f6c..e1021547 100644 --- a/src/debug.rs +++ b/src/debug.rs @@ -4,10 +4,12 @@ use core::fmt::{self, Arguments}; use abi::error::Error; use alloc::sync::Arc; use futures_util::Future; -use kernel_util::util::{OneTimeInit, StaticVector}; +use kernel_util::{ + sync::IrqSafeSpinlock, + util::{OneTimeInit, StaticVector}, +}; use crate::{ - sync::IrqSafeSpinlock, task::{process::Process, runtime::QueueWaker}, util::ring::RingBuffer, }; diff --git a/src/device/display/console.rs b/src/device/display/console.rs index 31505c13..68548e22 100644 --- a/src/device/display/console.rs +++ b/src/device/display/console.rs @@ -5,9 +5,9 @@ use core::time::Duration; use abi::{error::Error, primitive_enum}; use alloc::{vec, vec::Vec}; use bitflags::bitflags; -use kernel_util::util::StaticVector; +use kernel_util::{sync::IrqSafeSpinlock, util::StaticVector}; -use crate::{debug::DebugSink, sync::IrqSafeSpinlock, task::runtime}; +use crate::{debug::DebugSink, task::runtime}; const CONSOLE_ROW_LEN: usize = 80; const MAX_CSI_ARGS: usize = 8; diff --git a/src/device/display/fb_console.rs b/src/device/display/fb_console.rs index 53c4cb6b..9336b750 100644 --- a/src/device/display/fb_console.rs +++ b/src/device/display/fb_console.rs @@ -1,8 +1,9 @@ //! Framebuffer console driver use abi::error::Error; +use kernel_util::sync::IrqSafeSpinlock; -use crate::{debug::DebugSink, sync::IrqSafeSpinlock}; +use crate::debug::DebugSink; use super::{ console::{Attributes, ConsoleBuffer, ConsoleState, DisplayConsole}, diff --git a/src/device/display/linear_fb.rs b/src/device/display/linear_fb.rs index 0d15b89b..843af4dd 100644 --- a/src/device/display/linear_fb.rs +++ b/src/device/display/linear_fb.rs @@ -4,11 +4,9 @@ use core::ops::{Index, IndexMut}; use abi::error::Error; use device_api::Device; +use kernel_util::sync::IrqSafeSpinlock; -use crate::{ - mem::{device::RawDeviceMemoryMapping, PhysicalAddress}, - sync::IrqSafeSpinlock, -}; +use crate::mem::{device::RawDeviceMemoryMapping, PhysicalAddress}; use super::{DisplayDevice, DisplayDimensions}; diff --git a/src/device/mod.rs b/src/device/mod.rs index 032006a3..7fa4d9e5 100644 --- a/src/device/mod.rs +++ b/src/device/mod.rs @@ -1,8 +1,7 @@ //! Device management and interfaces use device_api::{manager::DeviceManager, Device, DeviceId}; - -use crate::sync::{IrqSafeSpinlock, IrqSafeSpinlockGuard}; +use kernel_util::sync::{IrqSafeSpinlock, IrqSafeSpinlockGuard}; #[cfg(target_arch = "aarch64")] pub mod devtree; diff --git a/src/device/serial/pl011.rs b/src/device/serial/pl011.rs index 5b75b017..53fc5225 100644 --- a/src/device/serial/pl011.rs +++ b/src/device/serial/pl011.rs @@ -2,7 +2,7 @@ use abi::{error::Error, io::DeviceRequest}; use alloc::boxed::Box; use device_api::{interrupt::InterruptHandler, serial::SerialDevice, Device}; -use kernel_util::util::OneTimeInit; +use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit}; use tock_registers::{ interfaces::{ReadWriteable, Readable, Writeable}, register_bitfields, register_structs, @@ -19,7 +19,6 @@ use crate::{ }, device_tree_driver, mem::{address::FromRaw, device::DeviceMemoryIo, PhysicalAddress}, - sync::IrqSafeSpinlock, task::process::ProcessId, }; diff --git a/src/device/tty.rs b/src/device/tty.rs index 06f00a6d..dfc80398 100644 --- a/src/device/tty.rs +++ b/src/device/tty.rs @@ -5,9 +5,9 @@ use abi::{ process::Signal, }; use device_api::serial::SerialDevice; +use kernel_util::sync::IrqSafeSpinlock; use crate::{ - sync::IrqSafeSpinlock, task::process::{Process, ProcessId}, util::ring::AsyncRing, }; diff --git a/src/main.rs b/src/main.rs index 56d2851f..34e110b2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -31,11 +31,11 @@ #![no_main] use arch::Architecture; +use kernel_util::sync::SpinFence; use crate::{ arch::{ArchitectureImpl, ARCHITECTURE}, mem::heap, - sync::SpinFence, task::{spawn_kernel_closure, Cpu}, }; @@ -55,7 +55,6 @@ pub mod init; pub mod mem; pub mod panic; pub mod proc; -pub mod sync; pub mod syscall; pub mod task; pub mod util; diff --git a/src/mem/phys/mod.rs b/src/mem/phys/mod.rs index 0c0a19b6..b645fe58 100644 --- a/src/mem/phys/mod.rs +++ b/src/mem/phys/mod.rs @@ -1,12 +1,11 @@ use core::ops::Range; use abi::error::Error; -use kernel_util::util::OneTimeInit; +use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit}; use crate::{ arch::{Architecture, ARCHITECTURE}, mem::{address::IntoRaw, phys::reserved::is_reserved}, - sync::IrqSafeSpinlock, }; use self::{ diff --git a/src/mem/process.rs b/src/mem/process.rs index cac37a1c..ef5e23dd 100644 --- a/src/mem/process.rs +++ b/src/mem/process.rs @@ -1,8 +1,9 @@ use abi::error::Error; use cfg_if::cfg_if; +use kernel_util::sync::IrqSafeSpinlock; use vmalloc::VirtualMemoryAllocator; -use crate::{mem::phys, sync::IrqSafeSpinlock}; +use crate::mem::phys; use super::{table::MapAttributes, PhysicalAddress}; diff --git a/src/panic.rs b/src/panic.rs index 312b31db..14939ecf 100644 --- a/src/panic.rs +++ b/src/panic.rs @@ -2,12 +2,12 @@ use core::sync::atomic::{AtomicBool, AtomicU32, Ordering}; use device_api::interrupt::IpiDeliveryTarget; +use kernel_util::sync::{hack_locks, SpinFence}; use crate::{ arch::{Architecture, ArchitectureImpl, CpuMessage, ARCHITECTURE}, debug::{debug_internal, LogLevel}, device::display::console::flush_consoles, - sync::{hack_locks, SpinFence}, task::{sched::CpuQueue, Cpu}, }; diff --git a/src/sync.rs b/src/sync.rs deleted file mode 100644 index dc8debf9..00000000 --- a/src/sync.rs +++ /dev/null @@ -1,211 +0,0 @@ -//! Synchronization primitives -use core::{ - cell::UnsafeCell, - ops::{Deref, DerefMut}, - sync::atomic::{AtomicBool, AtomicUsize, Ordering}, -}; - -use crate::arch::{Architecture, ArchitectureImpl}; - -static LOCK_HACK: AtomicBool = AtomicBool::new(false); - -/// "Hacks" all the locks in the kernel to make them function as "NULL"-locks instead of spinlocks. -/// -/// # Safety -/// -/// Only meant to be called from panic handler when the caller is sure other CPUs are halted. -pub unsafe fn hack_locks() { - LOCK_HACK.store(true, Ordering::Release); -} - -/// Simple spinloop-based fence guaranteeing that the execution resumes only after its condition is -/// met. -pub struct SpinFence { - value: AtomicUsize, -} - -/// Token type used to prevent IRQs from firing during some critical section. Normal IRQ operation -/// (if enabled before) is resumed when [IrqGuard]'s lifetime is over. -pub struct IrqGuard(bool); - -struct SpinlockInner { - value: UnsafeCell, - state: AtomicBool, -} - -struct SpinlockInnerGuard<'a, T> { - lock: &'a SpinlockInner, -} - -/// Spinlock implementation which prevents interrupts to avoid deadlocks when an interrupt handler -/// tries to acquire a lock taken before the IRQ fired. -pub struct IrqSafeSpinlock { - inner: SpinlockInner, -} - -/// Token type allowing safe access to the underlying data of the [IrqSafeSpinlock]. Resumes normal -/// IRQ operation (if enabled before acquiring) when the lifetime is over. -pub struct IrqSafeSpinlockGuard<'a, T> { - // Must come first to ensure the lock is dropped first and only then IRQs are re-enabled - inner: SpinlockInnerGuard<'a, T>, - _irq: IrqGuard, -} - -// Spinlock impls -impl SpinlockInner { - const fn new(value: T) -> Self { - Self { - value: UnsafeCell::new(value), - state: AtomicBool::new(false), - } - } - - fn lock(&self) -> SpinlockInnerGuard { - // Loop until the lock can be acquired - if LOCK_HACK.load(Ordering::Acquire) { - return SpinlockInnerGuard { lock: self }; - } - while self - .state - .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) - .is_err() - { - core::hint::spin_loop(); - } - - SpinlockInnerGuard { lock: self } - } -} - -impl<'a, T> Deref for SpinlockInnerGuard<'a, T> { - type Target = T; - - fn deref(&self) -> &Self::Target { - unsafe { &*self.lock.value.get() } - } -} - -impl<'a, T> DerefMut for SpinlockInnerGuard<'a, T> { - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *self.lock.value.get() } - } -} - -impl<'a, T> Drop for SpinlockInnerGuard<'a, T> { - fn drop(&mut self) { - if !LOCK_HACK.load(Ordering::Acquire) { - self.lock - .state - .compare_exchange(true, false, Ordering::Release, Ordering::Relaxed) - .unwrap(); - } - } -} - -unsafe impl Sync for SpinlockInner {} -unsafe impl Send for SpinlockInner {} - -// IrqSafeSpinlock impls -impl IrqSafeSpinlock { - /// Wraps the value in a spinlock primitive - pub const fn new(value: T) -> Self { - Self { - inner: SpinlockInner::new(value), - } - } - - /// Attempts to acquire a lock. IRQs will be disabled until the lock is released. - pub fn lock(&self) -> IrqSafeSpinlockGuard { - // Disable IRQs to avoid IRQ handler trying to acquire the same lock - let irq_guard = IrqGuard::acquire(); - - // Acquire the inner lock - let inner = self.inner.lock(); - - IrqSafeSpinlockGuard { - inner, - _irq: irq_guard, - } - } - - /// Returns an unsafe reference to the inner value. - /// - /// # Safety - /// - /// Unsafe: explicitly ignores proper access sharing. - #[allow(clippy::mut_from_ref)] - pub unsafe fn grab(&self) -> &mut T { - unsafe { &mut *self.inner.value.get() } - } -} - -impl<'a, T> Deref for IrqSafeSpinlockGuard<'a, T> { - type Target = T; - - fn deref(&self) -> &Self::Target { - self.inner.deref() - } -} - -impl<'a, T> DerefMut for IrqSafeSpinlockGuard<'a, T> { - fn deref_mut(&mut self) -> &mut Self::Target { - self.inner.deref_mut() - } -} - -// IrqGuard impls -impl IrqGuard { - /// Saves the current IRQ state and masks them - pub fn acquire() -> Self { - let mask = ArchitectureImpl::interrupt_mask(); - unsafe { - ArchitectureImpl::set_interrupt_mask(true); - } - Self(mask) - } -} - -impl Drop for IrqGuard { - fn drop(&mut self) { - unsafe { - ArchitectureImpl::set_interrupt_mask(self.0); - } - } -} - -// SpinFence impls -impl SpinFence { - /// Constructs a new [SpinFence] - pub const fn new() -> Self { - Self { - value: AtomicUsize::new(0), - } - } - - /// Resets a fence back to its original state - pub fn reset(&self) { - self.value.store(0, Ordering::Release); - } - - /// "Signals" a fence, incrementing its internal counter by one - pub fn signal(&self) { - self.value.fetch_add(1, Ordering::SeqCst); - } - - /// Waits until the fence is signalled at least the amount of times specified - pub fn wait_all(&self, count: usize) { - while self.value.load(Ordering::Acquire) < count { - core::hint::spin_loop(); - } - } - - /// Waits until the fence is signalled at least once - pub fn wait_one(&self) { - self.wait_all(1); - } - - /// Returns `true` if the fence has been signalled at least the amount of times specified - pub fn try_wait_all(&self, count: usize) -> bool { - self.value.load(Ordering::Acquire) >= count - } -} diff --git a/src/syscall/mod.rs b/src/syscall/mod.rs index 0e0fd087..bf401418 100644 --- a/src/syscall/mod.rs +++ b/src/syscall/mod.rs @@ -8,6 +8,7 @@ use abi::{ syscall::SyscallFunction, }; use alloc::rc::Rc; +use kernel_util::sync::IrqSafeSpinlockGuard; // use vfs::{IoContext, Read, ReadDirectory, Seek, VnodeKind, VnodeRef, Write}; use yggdrasil_abi::{ error::SyscallResult, @@ -19,7 +20,6 @@ use crate::{ debug::LogLevel, mem::{phys, table::MapAttributes}, proc::{self, io::ProcessIo}, - sync::IrqSafeSpinlockGuard, task::{ process::{Process, ProcessId}, runtime, diff --git a/src/task/mod.rs b/src/task/mod.rs index 1f7658aa..f3c3c8c5 100644 --- a/src/task/mod.rs +++ b/src/task/mod.rs @@ -4,10 +4,10 @@ use abi::error::Error; use alloc::{string::String, sync::Arc, vec::Vec}; +use kernel_util::sync::{IrqSafeSpinlock, SpinFence}; use crate::{ arch::{Architecture, ArchitectureImpl}, - sync::{IrqSafeSpinlock, SpinFence}, task::{sched::CpuQueue, thread::Thread}, }; diff --git a/src/task/process.rs b/src/task/process.rs index da6903e9..341649b7 100644 --- a/src/task/process.rs +++ b/src/task/process.rs @@ -19,7 +19,7 @@ use alloc::{ vec::Vec, }; use futures_util::Future; -use kernel_util::util::OneTimeInit; +use kernel_util::{sync::IrqSafeSpinlock, util::OneTimeInit}; use crate::{ mem::{ @@ -29,7 +29,6 @@ use crate::{ table::MapAttributes, }, proc::{self, io::ProcessIo}, - sync::IrqSafeSpinlock, task::context::TaskContextImpl, }; diff --git a/src/task/runtime/task.rs b/src/task/runtime/task.rs index c4b5b253..7a064c32 100644 --- a/src/task/runtime/task.rs +++ b/src/task/runtime/task.rs @@ -1,7 +1,6 @@ use alloc::sync::Arc; use futures_util::{future::BoxFuture, task::ArcWake, Future, FutureExt}; - -use crate::sync::IrqSafeSpinlock; +use kernel_util::sync::IrqSafeSpinlock; use super::executor; diff --git a/src/task/runtime/task_queue.rs b/src/task/runtime/task_queue.rs index 0fc0715d..43970baf 100644 --- a/src/task/runtime/task_queue.rs +++ b/src/task/runtime/task_queue.rs @@ -1,11 +1,10 @@ use abi::error::Error; use alloc::sync::Arc; use crossbeam_queue::ArrayQueue; -use kernel_util::util::OneTimeInit; +use kernel_util::{sync::IrqGuard, util::OneTimeInit}; use crate::{ arch::{Architecture, ArchitectureImpl}, - sync::IrqGuard, task::thread::Thread, }; diff --git a/src/task/runtime/waker.rs b/src/task/runtime/waker.rs index 2ca9b9f4..2f20f992 100644 --- a/src/task/runtime/waker.rs +++ b/src/task/runtime/waker.rs @@ -1,8 +1,7 @@ use core::task::Waker; use alloc::collections::VecDeque; - -use crate::sync::IrqSafeSpinlock; +use kernel_util::sync::IrqSafeSpinlock; pub struct QueueWaker { queue: IrqSafeSpinlock>, diff --git a/src/task/sched.rs b/src/task/sched.rs index f36b12b4..9de56d65 100644 --- a/src/task/sched.rs +++ b/src/task/sched.rs @@ -9,12 +9,14 @@ use alloc::{ vec::Vec, }; use cfg_if::cfg_if; -use kernel_util::util::OneTimeInit; +use kernel_util::{ + sync::{IrqGuard, IrqSafeSpinlock, IrqSafeSpinlockGuard}, + util::OneTimeInit, +}; use crate::{ // arch::aarch64::{context::TaskContext, cpu::Cpu}, arch::{Architecture, ArchitectureImpl}, - sync::{IrqGuard, IrqSafeSpinlock, IrqSafeSpinlockGuard}, task::thread::ThreadState, }; diff --git a/src/task/thread.rs b/src/task/thread.rs index 1719db56..84b91cd0 100644 --- a/src/task/thread.rs +++ b/src/task/thread.rs @@ -18,12 +18,14 @@ use alloc::{ }; use atomic_enum::atomic_enum; use futures_util::{task::ArcWake, Future}; -use kernel_util::util::OneTimeInit; +use kernel_util::{ + sync::{IrqGuard, IrqSafeSpinlock}, + util::OneTimeInit, +}; use crate::{ block, mem::{process::ProcessAddressSpace, ForeignPointer}, - sync::{IrqGuard, IrqSafeSpinlock}, task::{context::TaskContextImpl, Cpu}, }; diff --git a/src/util/ring.rs b/src/util/ring.rs index 41205e47..56e06d2c 100644 --- a/src/util/ring.rs +++ b/src/util/ring.rs @@ -6,8 +6,9 @@ use core::{ use abi::error::Error; use alloc::sync::Arc; use futures_util::Future; +use kernel_util::sync::IrqSafeSpinlock; -use crate::{sync::IrqSafeSpinlock, task::runtime::QueueWaker}; +use crate::task::runtime::QueueWaker; pub struct RingBuffer { rd: usize,