346 lines
10 KiB
Rust
346 lines
10 KiB
Rust
//! Per-CPU queue implementation
|
|
|
|
use core::sync::atomic::Ordering;
|
|
|
|
// use aarch64_cpu::registers::CNTPCT_EL0;
|
|
use alloc::{
|
|
collections::{BTreeMap, VecDeque},
|
|
sync::Arc,
|
|
vec::Vec,
|
|
};
|
|
use cfg_if::cfg_if;
|
|
use kernel_util::util::OneTimeInit;
|
|
|
|
use crate::{
|
|
// arch::aarch64::{context::TaskContext, cpu::Cpu},
|
|
arch::{Architecture, ArchitectureImpl},
|
|
sync::{IrqGuard, IrqSafeSpinlock, IrqSafeSpinlockGuard},
|
|
task::thread::ThreadState,
|
|
};
|
|
|
|
use super::{
|
|
context::TaskContextImpl,
|
|
thread::{Thread, ThreadId},
|
|
Cpu, TaskContext,
|
|
};
|
|
|
|
/// Per-CPU statistics
|
|
#[derive(Default)]
|
|
pub struct CpuQueueStats {
|
|
/// Ticks spent idling
|
|
pub idle_time: u64,
|
|
/// Ticks spent running CPU tasks
|
|
pub cpu_time: u64,
|
|
|
|
/// Time since last measurement
|
|
measure_time: u64,
|
|
}
|
|
|
|
// /// Per-CPU queue's inner data, normally resides under a lock
|
|
// pub struct CpuQueueInner {
|
|
// /// Current process, None if idling
|
|
// pub current: Option<Arc<Process>>,
|
|
// /// LIFO queue for processes waiting for execution
|
|
// pub queue: VecDeque<Arc<Process>>,
|
|
//
|
|
// /// CPU time usage statistics
|
|
// pub stats: CpuQueueStats,
|
|
// }
|
|
|
|
struct CpuQueueInner {
|
|
current: Option<ThreadId>,
|
|
queue: VecDeque<ThreadId>,
|
|
stats: CpuQueueStats,
|
|
}
|
|
|
|
/// Per-CPU queue
|
|
pub struct CpuQueue {
|
|
inner: IrqSafeSpinlock<CpuQueueInner>,
|
|
idle: TaskContext,
|
|
index: usize,
|
|
}
|
|
|
|
static QUEUES: OneTimeInit<Vec<CpuQueue>> = OneTimeInit::new();
|
|
|
|
#[naked]
|
|
extern "C" fn __idle(_x: usize) -> ! {
|
|
unsafe {
|
|
cfg_if! {
|
|
if #[cfg(target_arch = "aarch64")] {
|
|
core::arch::asm!("1: nop; b 1b", options(noreturn));
|
|
} else if #[cfg(target_arch = "x86_64")] {
|
|
core::arch::asm!(r#"
|
|
1:
|
|
nop
|
|
jmp 1b
|
|
"#, options(noreturn, att_syntax));
|
|
} else {
|
|
core::arch::asm!("", options(noreturn));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
impl CpuQueueStats {
|
|
/// Reset the stats to zero values
|
|
pub fn reset(&mut self) {
|
|
self.cpu_time = 0;
|
|
self.idle_time = 0;
|
|
}
|
|
}
|
|
|
|
impl CpuQueueInner {
|
|
/// Picks a next task for execution, skipping (dropping) those that were suspended. May return
|
|
/// None if the queue is empty or no valid task was found, in which case the scheduler should
|
|
/// go idle.
|
|
pub fn next_ready_task(&mut self) -> Option<Arc<Thread>> {
|
|
#[allow(clippy::never_loop)]
|
|
while !self.queue.is_empty() {
|
|
let task = self.queue.pop_front().unwrap();
|
|
let task_t = Thread::get(task).unwrap();
|
|
|
|
match task_t.state.load(Ordering::Acquire) {
|
|
ThreadState::Ready => {
|
|
return Some(task_t);
|
|
}
|
|
e => panic!(
|
|
"Unexpected thread state in CpuQueue: {:?} ({} {:?}, cpu_id={})",
|
|
e,
|
|
task_t.id(),
|
|
task_t.name(),
|
|
1234 // task_t.cpu_id()
|
|
),
|
|
}
|
|
}
|
|
|
|
None
|
|
}
|
|
|
|
/// Returns an iterator over all the threads in the queue plus the currently running thread,
|
|
/// if there is one.
|
|
pub fn iter(&self) -> impl Iterator<Item = &ThreadId> {
|
|
Iterator::chain(self.queue.iter(), self.current.iter())
|
|
}
|
|
}
|
|
|
|
impl CpuQueue {
|
|
// /// Constructs an empty queue with its own idle task
|
|
pub fn new(index: usize) -> Self {
|
|
let idle = TaskContext::kernel(__idle, Cpu::local_id() as usize)
|
|
.expect("Could not construct an idle task");
|
|
|
|
Self {
|
|
inner: {
|
|
IrqSafeSpinlock::new(CpuQueueInner {
|
|
current: None,
|
|
queue: VecDeque::new(),
|
|
stats: CpuQueueStats::default(),
|
|
})
|
|
},
|
|
idle,
|
|
index,
|
|
}
|
|
}
|
|
|
|
/// Starts queue execution on current CPU.
|
|
///
|
|
/// # Safety
|
|
///
|
|
/// Only meant to be called from [crate::task::enter()] function.
|
|
pub unsafe fn enter(&self) -> ! {
|
|
assert!(ArchitectureImpl::interrupt_mask());
|
|
// Start from idle thread to avoid having a Arc stuck here without getting dropped
|
|
// let t = CNTPCT_EL0.get();
|
|
// self.lock().stats.measure_time = t;
|
|
self.idle.enter()
|
|
}
|
|
|
|
/// Yields CPU execution to the next task in queue (or idle task if there aren't any).
|
|
///
|
|
/// # Safety
|
|
///
|
|
/// The function is only meant to be called from kernel threads (e.g. if they want to yield
|
|
/// CPU execution to wait for something) or interrupt handlers.
|
|
pub unsafe fn yield_cpu(&self) {
|
|
// Make sure the scheduling process doesn't get interrupted
|
|
ArchitectureImpl::set_interrupt_mask(true);
|
|
|
|
let mut inner = self.inner.lock();
|
|
|
|
// let t = CNTPCT_EL0.get();
|
|
// let delta = t - inner.stats.measure_time;
|
|
// inner.stats.measure_time = t;
|
|
|
|
let current = inner.current.clone();
|
|
let current_t = current.and_then(Thread::get);
|
|
|
|
if let Some(current_t) = current_t.as_ref() {
|
|
if current_t.state.load(Ordering::Acquire) == ThreadState::Terminated {
|
|
current_t.exit_notify.wake_all();
|
|
}
|
|
|
|
if current_t
|
|
.state
|
|
.compare_exchange(
|
|
ThreadState::Running,
|
|
ThreadState::Ready,
|
|
Ordering::SeqCst,
|
|
Ordering::Relaxed,
|
|
)
|
|
.is_ok()
|
|
{
|
|
inner.queue.push_back(current_t.id());
|
|
// inner.stats.cpu_time += delta;
|
|
}
|
|
}
|
|
// else
|
|
// inner.stats.idle_time += delta;
|
|
|
|
let next_t = inner.next_ready_task();
|
|
// let next_t = next.and_then(Thread::get);
|
|
|
|
inner.current = next_t.as_deref().map(Thread::id);
|
|
|
|
// Can drop the lock, we hold current and next Arc's
|
|
drop(inner);
|
|
|
|
let (from, _from_rc) = if let Some(current_t) = current_t.as_ref() {
|
|
(current_t.context(), Arc::strong_count(current_t))
|
|
} else {
|
|
(&self.idle, 0)
|
|
};
|
|
|
|
let (to, _to_rc) = if let Some(next_t) = next_t.as_ref() {
|
|
next_t.set_running(Cpu::local_id());
|
|
(next_t.context(), Arc::strong_count(next_t))
|
|
} else {
|
|
(&self.idle, 0)
|
|
};
|
|
|
|
assert!(ArchitectureImpl::interrupt_mask());
|
|
to.switch(from)
|
|
|
|
// // log_print_raw!(crate::debug::LogLevel::Info, "{}: ", Cpu::local_id());
|
|
// // if let Some(from) = current.as_ref() {
|
|
// // log_print_raw!(crate::debug::LogLevel::Info, "{}", from.id(),);
|
|
// // } else {
|
|
// // log_print_raw!(crate::debug::LogLevel::Info, "{{idle}}");
|
|
// // }
|
|
|
|
// // log_print_raw!(crate::debug::LogLevel::Info, " -> ");
|
|
|
|
// // if let Some(to) = next.as_ref() {
|
|
// // log_print_raw!(crate::debug::LogLevel::Info, "{}", to.id(),);
|
|
// // } else {
|
|
// // log_print_raw!(crate::debug::LogLevel::Info, "{{idle}}");
|
|
// // }
|
|
|
|
// // log_print_raw!(crate::debug::LogLevel::Info, "\n");
|
|
}
|
|
|
|
/// Pushes the process to the back of the execution queue.
|
|
///
|
|
/// # Safety
|
|
///
|
|
/// Only meant to be called from Process impl. The function does not set any process accounting
|
|
/// information, which may lead to invalid states.
|
|
pub unsafe fn enqueue(&self, tid: ThreadId) {
|
|
let mut inner = self.inner.lock();
|
|
assert!(ArchitectureImpl::interrupt_mask());
|
|
// assert_eq!(p.state(), ProcessState::Ready);
|
|
|
|
inner.queue.push_back(tid);
|
|
}
|
|
|
|
/// Removes thread with given TID from the exeuction queue.
|
|
pub fn dequeue(&self, tid: ThreadId) {
|
|
assert!(ArchitectureImpl::interrupt_mask());
|
|
|
|
let mut inner = self.inner.lock();
|
|
inner.queue.retain(|&p| p != tid)
|
|
}
|
|
|
|
/// Returns the queue length at this moment.
|
|
///
|
|
/// # Note
|
|
///
|
|
/// This value may immediately change.
|
|
pub fn len(&self) -> usize {
|
|
self.inner.lock().queue.len()
|
|
}
|
|
|
|
/// Returns `true` if the queue is empty at the moment.
|
|
///
|
|
/// # Note
|
|
///
|
|
/// This may immediately change.
|
|
pub fn is_empty(&self) -> bool {
|
|
self.inner.lock().queue.is_empty()
|
|
}
|
|
|
|
// /// Returns a safe reference to the inner data structure.
|
|
// pub fn lock(&self) -> IrqSafeSpinlockGuard<CpuQueueInner> {
|
|
// self.inner.lock()
|
|
// }
|
|
|
|
// /// Returns an unsafe reference to the queue.
|
|
// ///
|
|
// /// # Safety
|
|
// ///
|
|
// /// Only meant to be called to dump the queue contents when panicking.
|
|
// #[allow(clippy::mut_from_ref)]
|
|
// pub unsafe fn grab(&self) -> &mut CpuQueueInner {
|
|
// self.inner.grab()
|
|
// }
|
|
//
|
|
// /// Returns the process currently being executed.
|
|
// ///
|
|
// /// # Note
|
|
// ///
|
|
// /// This function should be safe in all kernel thread/interrupt contexts:
|
|
// ///
|
|
// /// * (in kthread) the code calling this will still remain on the same thread.
|
|
// /// * (in irq) the code cannot be interrupted and other CPUs shouldn't change this queue, so it
|
|
// /// will remain valid until the end of the interrupt or until [CpuQueue::yield_cpu]
|
|
// /// is called.
|
|
// pub fn current_process(&self) -> Option<CurrentProcess> {
|
|
// let guard = IrqGuard::acquire();
|
|
// self.inner
|
|
// .lock()
|
|
// .current
|
|
// .clone()
|
|
// .map(|p| unsafe { CurrentProcess::new(p, guard) })
|
|
// }
|
|
|
|
pub fn current_id(&self) -> Option<ThreadId> {
|
|
self.inner.lock().current
|
|
}
|
|
|
|
/// Returns a queue for given CPU index
|
|
pub fn for_cpu(id: usize) -> &'static CpuQueue {
|
|
&QUEUES.get()[id]
|
|
}
|
|
|
|
/// Returns an iterator over all queues of the system
|
|
#[inline]
|
|
pub fn all() -> impl Iterator<Item = &'static CpuQueue> {
|
|
QUEUES.get().iter()
|
|
}
|
|
|
|
/// Picks a queue with the least amount of tasks in it
|
|
pub fn least_loaded() -> Option<(usize, &'static CpuQueue)> {
|
|
let queues = QUEUES.get();
|
|
|
|
queues.iter().enumerate().min_by_key(|(_, q)| q.len())
|
|
}
|
|
|
|
pub fn index(&self) -> usize {
|
|
self.index
|
|
}
|
|
}
|
|
|
|
/// Initializes the global queue list
|
|
pub fn init_queues(queues: Vec<CpuQueue>) {
|
|
QUEUES.init(queues);
|
|
}
|