block/nvme: multi-queue + multi-MSI

This commit is contained in:
Mark Poliakov 2023-12-11 21:13:33 +02:00
parent 4ce7a57c4a
commit 61f217ab56
16 changed files with 367 additions and 137 deletions

View File

@ -1,6 +1,6 @@
use core::{fmt, mem::MaybeUninit}; use core::{fmt, mem::MaybeUninit};
use alloc::collections::BTreeMap; use alloc::collections::{btree_map::Entry, BTreeMap};
use kernel_util::mem::PageBox; use kernel_util::mem::PageBox;
use yggdrasil_abi::error::Error; use yggdrasil_abi::error::Error;
@ -40,12 +40,11 @@ impl<K: Ord + Eq> BlockCache<K> {
where where
K: Copy + fmt::Display, K: Copy + fmt::Display,
{ {
if !self.table.contains_key(&index) { if let Entry::Vacant(entry) = self.table.entry(index) {
let mut block = PageBox::new_uninit_slice(self.block_size)?; let mut block = PageBox::new_uninit_slice(self.block_size)?;
log::debug!("Missed block with index {}, fetching", index); log::debug!("Missed block with index {}, fetching", index);
f(&mut block)?; f(&mut block)?;
self.table entry.insert(unsafe { block.assume_init_slice() });
.insert(index, unsafe { block.assume_init_slice() });
} }
Ok(self.table.get_mut(&index).unwrap()) Ok(self.table.get_mut(&index).unwrap())

View File

@ -1,22 +1,30 @@
#![feature(strict_provenance, const_trait_impl)] #![feature(strict_provenance, const_trait_impl, let_chains)]
#![allow(missing_docs)] #![allow(missing_docs)]
#![no_std] #![no_std]
extern crate alloc; extern crate alloc;
use core::{mem::size_of, time::Duration}; use core::{
mem::size_of,
sync::atomic::{AtomicUsize, Ordering},
time::Duration,
};
use alloc::{boxed::Box, collections::BTreeMap, vec::Vec}; use alloc::{boxed::Box, collections::BTreeMap, vec::Vec};
use command::{IdentifyActiveNamespaceIdListRequest, IdentifyControllerRequest}; use command::{IdentifyActiveNamespaceIdListRequest, IdentifyControllerRequest};
use device_api::{interrupt::MsiHandler, Device}; use device_api::{
interrupt::{InterruptAffinity, MsiHandler},
Device,
};
use drive::NvmeDrive; use drive::NvmeDrive;
use kernel_util::{ use kernel_util::{
cpu_count, cpu_index,
mem::{ mem::{
address::{FromRaw, IntoRaw, PhysicalAddress}, address::{FromRaw, IntoRaw, PhysicalAddress},
device::{DeviceMemoryIo, DeviceMemoryIoMut}, device::DeviceMemoryIo,
}, },
message_interrupt_controller, runtime, message_interrupt_controller, runtime,
sync::IrqSafeSpinlock, sync::{IrqGuard, IrqSafeSpinlock},
util::OneTimeInit, util::OneTimeInit,
}; };
use tock_registers::{ use tock_registers::{
@ -25,7 +33,7 @@ use tock_registers::{
registers::{ReadOnly, ReadWrite, WriteOnly}, registers::{ReadOnly, ReadWrite, WriteOnly},
}; };
use ygg_driver_pci::{ use ygg_driver_pci::{
capability::{MsiXCapability, MsiXEntry}, capability::{MsiXCapability, MsiXVectorTable},
PciBaseAddress, PciCommandRegister, PciConfigurationSpace, PciDeviceInfo, PciBaseAddress, PciCommandRegister, PciConfigurationSpace, PciDeviceInfo,
}; };
use yggdrasil_abi::error::Error; use yggdrasil_abi::error::Error;
@ -116,10 +124,12 @@ pub struct NvmeController {
regs: IrqSafeSpinlock<DeviceMemoryIo<'static, Regs>>, regs: IrqSafeSpinlock<DeviceMemoryIo<'static, Regs>>,
admin_q: OneTimeInit<QueuePair>, admin_q: OneTimeInit<QueuePair>,
ioqs: OneTimeInit<Vec<QueuePair>>, ioqs: OneTimeInit<Vec<QueuePair>>,
vector_table: IrqSafeSpinlock<DeviceMemoryIoMut<'static, [MsiXEntry]>>, io_queue_count: AtomicUsize,
drive_table: IrqSafeSpinlock<BTreeMap<u32, &'static NvmeDrive>>, drive_table: IrqSafeSpinlock<BTreeMap<u32, &'static NvmeDrive>>,
controller_id: OneTimeInit<usize>, controller_id: OneTimeInit<usize>,
vector_table: IrqSafeSpinlock<MsiXVectorTable<'static>>,
doorbell_shift: usize, doorbell_shift: usize,
} }
@ -138,7 +148,86 @@ impl Regs {
} }
impl NvmeController { impl NvmeController {
const ADMIN_QUEUE_SIZE: usize = 32;
const IO_QUEUE_SIZE: usize = 32;
async fn create_queues(&'static self) -> Result<(), NvmeError> {
let admin_q = self.admin_q.get();
let io_queue_count = self.io_queue_count.load(Ordering::Acquire);
log::info!(
"Creating {} queue pairs for nvme{}",
io_queue_count,
self.controller_id.get()
);
// Request a CQ/SQ pair for I/O
admin_q
.request_no_data(SetFeatureRequest::NumberOfQueues(
io_queue_count as _,
io_queue_count as _,
))
.await?;
let mut queues = Vec::new();
for i in 1..=io_queue_count {
let id = i as u32;
let (sq_doorbell, cq_doorbell) = unsafe { self.doorbell_pair(i) };
let queue = QueuePair::new(id, i, Self::IO_QUEUE_SIZE, sq_doorbell, cq_doorbell)
.map_err(NvmeError::MemoryError)?;
admin_q
.request_no_data(CreateIoCompletionQueue {
id,
vector: id,
size: Self::IO_QUEUE_SIZE,
data: queue.cq_physical_pointer(),
})
.await?;
admin_q
.request_no_data(CreateIoSubmissionQueue {
id,
cq_id: id,
size: Self::IO_QUEUE_SIZE,
data: queue.sq_physical_pointer(),
})
.await?;
queues.push(queue);
}
self.ioqs.init(queues);
Ok(())
}
async fn late_init(&'static self) -> Result<(), NvmeError> { async fn late_init(&'static self) -> Result<(), NvmeError> {
let io_queue_count = cpu_count();
self.io_queue_count.store(io_queue_count, Ordering::Release);
{
// Register io_queue_count + 1 vectors
// TODO register vectors on different CPUs
let mut vt = self.vector_table.lock();
let range = vt
.register_range(
0,
io_queue_count + 1,
message_interrupt_controller(),
InterruptAffinity::Any,
self,
)
.unwrap();
// TODO handle different MSI range allocations
for (i, msi) in range.iter().enumerate() {
assert_eq!(i, msi.vector);
}
}
register_nvme_controller(self); register_nvme_controller(self);
let admin_q = self.admin_q.get(); let admin_q = self.admin_q.get();
@ -148,35 +237,7 @@ impl NvmeController {
// TODO do something with identify_controller // TODO do something with identify_controller
// Request a CQ/SQ pair for I/O self.create_queues().await?;
admin_q
.request_no_data(SetFeatureRequest::NumberOfQueues(1, 1))
.await?;
// Allocate the queue
let (sq_doorbell, cq_doorbell) = unsafe { self.doorbell_pair(1) };
let io_q =
QueuePair::new(1, 0, 32, sq_doorbell, cq_doorbell).map_err(NvmeError::MemoryError)?;
// Create the queue on the device side
admin_q
.request_no_data(CreateIoCompletionQueue {
id: 1,
size: 32,
vector: 0,
data: io_q.cq_physical_pointer(),
})
.await?;
admin_q
.request_no_data(CreateIoSubmissionQueue {
id: 1,
cq_id: 1,
size: 32,
data: io_q.sq_physical_pointer(),
})
.await?;
self.ioqs.init(Vec::from_iter([io_q]));
// Identify namespaces // Identify namespaces
self.enumerate_namespaces().await?; self.enumerate_namespaces().await?;
@ -208,7 +269,6 @@ impl NvmeController {
Ok(()) Ok(())
} }
// TODO sane methods for IO
pub async fn perform_io( pub async fn perform_io(
&'static self, &'static self,
nsid: u32, nsid: u32,
@ -216,9 +276,18 @@ impl NvmeController {
buffer_address: PhysicalAddress, buffer_address: PhysicalAddress,
direction: IoDirection, direction: IoDirection,
) -> Result<(), NvmeError> { ) -> Result<(), NvmeError> {
let ioq = &self.ioqs.get()[0]; let _guard = IrqGuard::acquire();
let cpu_index = cpu_index();
let ioq = &self.ioqs.get()[cpu_index];
log::debug!(
"{:?} ioq #{}, nsid={}, lba={:#x}",
direction,
cpu_index,
nsid,
lba
);
log::debug!("{:?} nsid={}, lba={:#x}", direction, nsid, lba);
let cmd_id = match direction { let cmd_id = match direction {
IoDirection::Read => ioq.submit( IoDirection::Read => ioq.submit(
IoRead { IoRead {
@ -254,16 +323,17 @@ impl NvmeController {
} }
impl MsiHandler for NvmeController { impl MsiHandler for NvmeController {
fn handle_msi(&self, _vector: usize) -> bool { fn handle_msi(&self, vector: usize) -> bool {
// TODO check MSI-X pending bits if vector == 0 {
self.admin_q.get().process_completions(); self.admin_q.get().process_completions() != 0
if let Some(qs) = self.ioqs.try_get() { } else if vector <= self.io_queue_count.load(Ordering::Acquire)
for q in qs { && let Some(ioqs) = self.ioqs.try_get()
q.process_completions(); {
ioqs[vector - 1].process_completions() != 0
} else {
false
} }
} }
true
}
} }
impl Device for NvmeController { impl Device for NvmeController {
@ -283,8 +353,7 @@ impl Device for NvmeController {
core::hint::spin_loop(); core::hint::spin_loop();
} }
let queue_slots = 32; if Self::ADMIN_QUEUE_SIZE as u64 > regs.CAP.read(CAP::MQES) + 1 {
if queue_slots > regs.CAP.read(CAP::MQES) + 1 {
todo!( todo!(
"queue_slots too big, max = {}", "queue_slots too big, max = {}",
regs.CAP.read(CAP::MQES) + 1 regs.CAP.read(CAP::MQES) + 1
@ -298,14 +367,16 @@ impl Device for NvmeController {
let admin_q = QueuePair::new( let admin_q = QueuePair::new(
0, 0,
0, 0,
queue_slots as usize, Self::ADMIN_QUEUE_SIZE,
admin_sq_doorbell, admin_sq_doorbell,
admin_cq_doorbell, admin_cq_doorbell,
) )
.unwrap(); .unwrap();
regs.AQA regs.AQA.modify(
.modify(AQA::ASQS.val(queue_slots as u32 - 1) + AQA::ACQS.val(queue_slots as u32 - 1)); AQA::ASQS.val(Self::ADMIN_QUEUE_SIZE as u32 - 1)
+ AQA::ACQS.val(Self::ADMIN_QUEUE_SIZE as u32 - 1),
);
regs.ASQ.set(admin_q.sq_physical_pointer().into_raw()); regs.ASQ.set(admin_q.sq_physical_pointer().into_raw());
regs.ACQ.set(admin_q.cq_physical_pointer().into_raw()); regs.ACQ.set(admin_q.cq_physical_pointer().into_raw());
@ -335,16 +406,6 @@ impl Device for NvmeController {
self.admin_q.init(admin_q); self.admin_q.init(admin_q);
// Register the IRQs (TODO: use multiple)
{
let mut vt = self.vector_table.lock();
// Register vector 0
vt[0]
.register(message_interrupt_controller(), self)
.unwrap();
}
// Schedule late_init task // Schedule late_init task
runtime::spawn(self.late_init())?; runtime::spawn(self.late_init())?;
@ -356,11 +417,6 @@ impl Device for NvmeController {
} }
} }
// impl FromPciBus for NvmeController {
// fn from_pci_bus(info: &PciDeviceInfo) -> Result<Self, Error> {
// }
// }
static NVME_CONTROLLERS: IrqSafeSpinlock<Vec<&'static NvmeController>> = static NVME_CONTROLLERS: IrqSafeSpinlock<Vec<&'static NvmeController>> =
IrqSafeSpinlock::new(Vec::new()); IrqSafeSpinlock::new(Vec::new());
@ -373,9 +429,10 @@ pub fn probe(info: &PciDeviceInfo) -> Result<&'static dyn Device, Error> {
let mut msix = info.config_space.capability::<MsiXCapability>().unwrap(); let mut msix = info.config_space.capability::<MsiXCapability>().unwrap();
let mut vt = msix.vector_table()?; let mut vt = msix.vector_table()?;
for vector in vt.iter_mut() { // TODO is this really needed? PCI spec says this is masked on reset, though I'm not sure if
vector.set_masked(true); // firmware puts it back in masked state after loading the kernel
} vt.mask_all();
msix.set_function_mask(false);
msix.set_enabled(true); msix.set_enabled(true);
let mut cmd = PciCommandRegister::from_bits_retain(info.config_space.command()); let mut cmd = PciCommandRegister::from_bits_retain(info.config_space.command());
@ -394,9 +451,12 @@ pub fn probe(info: &PciDeviceInfo) -> Result<&'static dyn Device, Error> {
regs: IrqSafeSpinlock::new(regs), regs: IrqSafeSpinlock::new(regs),
admin_q: OneTimeInit::new(), admin_q: OneTimeInit::new(),
ioqs: OneTimeInit::new(), ioqs: OneTimeInit::new(),
vector_table: IrqSafeSpinlock::new(vt),
drive_table: IrqSafeSpinlock::new(BTreeMap::new()), drive_table: IrqSafeSpinlock::new(BTreeMap::new()),
controller_id: OneTimeInit::new(), controller_id: OneTimeInit::new(),
vector_table: IrqSafeSpinlock::new(vt),
io_queue_count: AtomicUsize::new(1),
doorbell_shift, doorbell_shift,
}))) })))
} }

View File

@ -356,10 +356,10 @@ impl QueuePair {
command_id command_id
} }
pub fn request_no_data<'r, C: Command>( pub fn request_no_data<C: Command>(
&'r self, &self,
req: C, req: C,
) -> impl Future<Output = Result<(), CommandError>> + 'r { ) -> impl Future<Output = Result<(), CommandError>> + '_ {
let command_id = self.submit(req, &[], true); let command_id = self.submit(req, &[], true);
self.wait_for_completion(command_id, ()) self.wait_for_completion(command_id, ())
} }

View File

@ -1,6 +1,7 @@
//! PCI capability structures and queries //! PCI capability structures and queries
use device_api::interrupt::{MessageInterruptController, MsiHandler}; use alloc::{vec, vec::Vec};
use device_api::interrupt::{InterruptAffinity, MessageInterruptController, MsiHandler, MsiInfo};
use kernel_util::mem::{ use kernel_util::mem::{
address::{FromRaw, PhysicalAddress}, address::{FromRaw, PhysicalAddress},
device::DeviceMemoryIoMut, device::DeviceMemoryIoMut,
@ -24,6 +25,10 @@ pub struct MsiXEntry {
pub control: u32, pub control: u32,
} }
pub struct MsiXVectorTable<'a> {
vectors: DeviceMemoryIoMut<'a, [MsiXEntry]>,
}
/// MSI-X capability data structure /// MSI-X capability data structure
pub struct MsiXData<'s, S: PciConfigurationSpace + ?Sized + 's> { pub struct MsiXData<'s, S: PciConfigurationSpace + ?Sized + 's> {
space: &'s S, space: &'s S,
@ -45,7 +50,7 @@ impl PciCapability for MsiXCapability {
impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiXData<'s, S> { impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiXData<'s, S> {
// TODO use pending bits as well // TODO use pending bits as well
/// Maps and returns the vector table associated with the device's MSI-X capability /// Maps and returns the vector table associated with the device's MSI-X capability
pub fn vector_table<'a>(&self) -> Result<DeviceMemoryIoMut<'a, [MsiXEntry]>, Error> { pub fn vector_table<'a>(&self) -> Result<MsiXVectorTable<'a>, Error> {
let w0 = self.space.read_u16(self.offset + 2); let w0 = self.space.read_u16(self.offset + 2);
let dw1 = self.space.read_u32(self.offset + 4); let dw1 = self.space.read_u32(self.offset + 4);
@ -63,7 +68,10 @@ impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiXData<'s, S> {
log::debug!("MSI-X table address: {:#x}", base + table_offset); log::debug!("MSI-X table address: {:#x}", base + table_offset);
unsafe { unsafe {
DeviceMemoryIoMut::map_slice(PhysicalAddress::from_raw(base + table_offset), table_size) MsiXVectorTable::from_raw_parts(
PhysicalAddress::from_raw(base + table_offset),
table_size,
)
} }
} }
@ -78,28 +86,66 @@ impl<'s, S: PciConfigurationSpace + ?Sized + 's> MsiXData<'s, S> {
} }
self.space.write_u32(self.offset, w0); self.space.write_u32(self.offset, w0);
} }
pub fn set_function_mask(&mut self, masked: bool) {
let mut w0 = self.space.read_u32(self.offset);
if masked {
w0 |= 1 << 30;
} else {
w0 &= !(1 << 30);
}
self.space.write_u32(self.offset, w0);
}
}
impl MsiXVectorTable<'_> {
unsafe fn from_raw_parts(base: PhysicalAddress, len: usize) -> Result<Self, Error> {
let vectors = DeviceMemoryIoMut::map_slice(base, len)?;
Ok(Self { vectors })
}
pub fn mask_all(&mut self) {
for vector in self.vectors.iter_mut() {
vector.set_masked(true);
}
}
pub fn register_range<C: MessageInterruptController + ?Sized>(
&mut self,
start: usize,
end: usize,
ic: &C,
affinity: InterruptAffinity,
handler: &'static dyn MsiHandler,
) -> Result<Vec<MsiInfo>, Error> {
assert!(end > start);
let mut range = vec![
MsiInfo {
affinity,
..Default::default()
};
end - start
];
ic.register_msi_range(&mut range, handler)?;
for (i, info) in range.iter().enumerate() {
let index = i + start;
self.vectors[index].address = info.address as _;
self.vectors[index].data = info.value;
self.vectors[index].set_masked(false);
}
Ok(range)
}
} }
impl MsiXEntry { impl MsiXEntry {
/// If set, prevents the MSI-X interrupt from being delivered /// If set, prevents the MSI-X interrupt from being delivered
pub fn set_masked(&mut self, masked: bool) { fn set_masked(&mut self, masked: bool) {
if masked { if masked {
self.control |= 1; self.control |= 1;
} else { } else {
self.control &= !1; self.control &= !1;
} }
} }
/// Registers the MSI-X vector with the interrupt controller and enables it
pub fn register<C: MessageInterruptController + ?Sized>(
&mut self,
ic: &C,
handler: &'static dyn MsiHandler,
) -> Result<(), Error> {
let info = ic.register_msi(handler)?;
self.address = info.address as _;
self.data = info.value as _;
self.set_masked(false);
Ok(())
}
} }

View File

@ -33,11 +33,19 @@ pub struct IrqOptions {
pub trigger: IrqTrigger, pub trigger: IrqTrigger,
} }
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug, Default)]
pub struct MsiInfo { pub struct MsiInfo {
pub address: usize, pub address: usize,
pub value: u32, pub value: u32,
pub vector: usize, pub vector: usize,
pub affinity: InterruptAffinity,
}
#[derive(Clone, Copy, Debug, Default)]
pub enum InterruptAffinity {
#[default]
Any,
Specific(usize),
} }
pub trait InterruptTable { pub trait InterruptTable {
@ -45,7 +53,27 @@ pub trait InterruptTable {
} }
pub trait MessageInterruptController { pub trait MessageInterruptController {
fn register_msi(&self, handler: &'static dyn MsiHandler) -> Result<MsiInfo, Error>; fn register_msi(
&self,
affinity: InterruptAffinity,
handler: &'static dyn MsiHandler,
) -> Result<MsiInfo, Error> {
let mut range = [MsiInfo {
affinity,
..Default::default()
}];
self.register_msi_range(&mut range, handler)?;
Ok(range[0])
}
#[allow(unused)]
fn register_msi_range(
&self,
range: &mut [MsiInfo],
handler: &'static dyn MsiHandler,
) -> Result<(), Error> {
Err(Error::NotImplemented)
}
fn handle_msi(&self, #[allow(unused)] vector: usize) {} fn handle_msi(&self, #[allow(unused)] vector: usize) {}
} }

View File

@ -1,3 +1,4 @@
#![feature(trait_alias)]
#![no_std] #![no_std]
extern crate alloc; extern crate alloc;

View File

@ -13,6 +13,9 @@ extern "Rust" {
pub fn __acquire_irq_guard() -> bool; pub fn __acquire_irq_guard() -> bool;
pub fn __release_irq_guard(mask: bool); pub fn __release_irq_guard(mask: bool);
pub fn __cpu_index() -> usize;
pub fn __cpu_count() -> usize;
pub fn __allocate_2m_page() -> Result<PhysicalAddress, Error>; pub fn __allocate_2m_page() -> Result<PhysicalAddress, Error>;
pub fn __allocate_page() -> Result<PhysicalAddress, Error>; pub fn __allocate_page() -> Result<PhysicalAddress, Error>;
pub fn __allocate_contiguous_pages(count: usize) -> Result<PhysicalAddress, Error>; pub fn __allocate_contiguous_pages(count: usize) -> Result<PhysicalAddress, Error>;

View File

@ -27,6 +27,16 @@ pub fn message_interrupt_controller() -> &'static dyn MessageInterruptController
unsafe { api::__message_interrupt_controller() } unsafe { api::__message_interrupt_controller() }
} }
#[inline]
pub fn cpu_index() -> usize {
unsafe { api::__cpu_index() }
}
#[inline]
pub fn cpu_count() -> usize {
unsafe { api::__cpu_count() }
}
#[repr(C)] #[repr(C)]
pub struct AlignedTo<Align, Bytes: ?Sized> { pub struct AlignedTo<Align, Bytes: ?Sized> {
pub align: [Align; 0], pub align: [Align; 0],

View File

@ -117,6 +117,26 @@ impl<'a, T: Sized> DeviceMemoryIo<'a, T> {
Ok(DeviceMemoryIo { inner, value }) Ok(DeviceMemoryIo { inner, value })
} }
/// Maps a physical address as device memory of type `[T]`.
///
/// # Safety
///
/// The caller must ensure the address actually points to a value of type `T`, as well as
/// proper access synchronization. The caller must also ensure the `len` is valid.
pub unsafe fn map_slice(
base: PhysicalAddress,
count: usize,
) -> Result<DeviceMemoryIo<'a, [T]>, Error> {
let layout = Layout::array::<T>(count).unwrap();
let inner = RawDeviceMemoryMapping::map(base, layout.size())?;
let value = core::slice::from_raw_parts(inner.address as *mut T, count);
Ok(DeviceMemoryIo {
inner: Arc::new(inner),
value,
})
}
/// Maps a physical address as device memory of type `T`. /// Maps a physical address as device memory of type `T`.
/// ///
/// # Safety /// # Safety

View File

@ -120,6 +120,11 @@ impl<T: ?Sized> PageBox<T> {
} }
impl<T> PageBox<MaybeUninit<T>> { impl<T> PageBox<MaybeUninit<T>> {
/// Consumes the [PageBox], returning a new one with [MaybeUninit] removed.
///
/// # Safety
///
/// See [MaybeUninit::assume_init_mut].
pub unsafe fn assume_init(self) -> PageBox<T> { pub unsafe fn assume_init(self) -> PageBox<T> {
// SAFETY: Memory-safe, as: // SAFETY: Memory-safe, as:
// 1. MaybeUninit<T> is transparent // 1. MaybeUninit<T> is transparent
@ -135,6 +140,11 @@ impl<T> PageBox<MaybeUninit<T>> {
} }
impl<T> PageBox<[MaybeUninit<T>]> { impl<T> PageBox<[MaybeUninit<T>]> {
/// Consumes the [PageBox], returning a new one with [MaybeUninit] removed.
///
/// # Safety
///
/// See [MaybeUninit::slice_assume_init_mut].
pub unsafe fn assume_init_slice(self) -> PageBox<[T]> { pub unsafe fn assume_init_slice(self) -> PageBox<[T]> {
// SAFETY: Memory-safe, as: // SAFETY: Memory-safe, as:
// 1. MaybeUninit<T> is transparent // 1. MaybeUninit<T> is transparent
@ -147,10 +157,20 @@ impl<T> PageBox<[MaybeUninit<T>]> {
PageBox { value, page_count } PageBox { value, page_count }
} }
/// Returns a reference to the slice data with [MaybeUninit] removed.
///
/// # Safety
///
/// See [MaybeUninit::slice_assume_init_ref]
pub unsafe fn assume_init_slice_ref(&self) -> &[T] { pub unsafe fn assume_init_slice_ref(&self) -> &[T] {
MaybeUninit::slice_assume_init_ref(self.deref()) MaybeUninit::slice_assume_init_ref(self.deref())
} }
/// Returns a mutable reference to the slice data with [MaybeUninit] removed.
///
/// # Safety
///
/// See [MaybeUninit::slice_assume_init_mut]
pub unsafe fn assume_init_slice_mut(&mut self) -> &mut [T] { pub unsafe fn assume_init_slice_mut(&mut self) -> &mut [T] {
MaybeUninit::slice_assume_init_mut(self.deref_mut()) MaybeUninit::slice_assume_init_mut(self.deref_mut())
} }

View File

@ -10,16 +10,16 @@ pub trait EntryLevel: Copy {
#[const_trait] #[const_trait]
pub trait EntryLevelExt: Sized { pub trait EntryLevelExt: Sized {
fn page_index<T: EntryLevel>(self) -> usize; fn page_index<T: EntryLevel>(&self) -> usize;
fn page_offset<T: EntryLevel>(self) -> usize; fn page_offset<T: EntryLevel>(&self) -> usize;
fn page_count<T: EntryLevel>(self) -> usize; fn page_count<T: EntryLevel>(&self) -> usize;
fn page_align_up<T: EntryLevel>(self) -> Self; fn page_align_up<T: EntryLevel>(&self) -> Self;
fn page_align_down<T: EntryLevel>(self) -> Self; fn page_align_down<T: EntryLevel>(&self) -> Self;
fn is_page_aligned_for<T: EntryLevel>(self) -> bool; fn is_page_aligned_for<T: EntryLevel>(&self) -> bool;
} }
#[const_trait] #[const_trait]
trait AddressLike: Sized { trait AddressLike: Sized + Copy {
fn into_usize(self) -> usize; fn into_usize(self) -> usize;
fn from_usize(v: usize) -> Self; fn from_usize(v: usize) -> Self;
} }
@ -48,32 +48,32 @@ impl const AddressLike for PhysicalAddress {
impl<T: ~const AddressLike> const EntryLevelExt for T { impl<T: ~const AddressLike> const EntryLevelExt for T {
#[inline(always)] #[inline(always)]
fn page_index<L: EntryLevel>(self) -> usize { fn page_index<L: EntryLevel>(&self) -> usize {
(self.into_usize() >> L::SHIFT) & 0x1FF (self.into_usize() >> L::SHIFT) & 0x1FF
} }
#[inline(always)] #[inline(always)]
fn page_offset<L: EntryLevel>(self) -> usize { fn page_offset<L: EntryLevel>(&self) -> usize {
self.into_usize() & (L::SIZE - 1) self.into_usize() & (L::SIZE - 1)
} }
#[inline(always)] #[inline(always)]
fn page_count<L: EntryLevel>(self) -> usize { fn page_count<L: EntryLevel>(&self) -> usize {
(self.into_usize() + L::SIZE - 1) / L::SIZE (self.into_usize() + L::SIZE - 1) / L::SIZE
} }
#[inline(always)] #[inline(always)]
fn page_align_up<L: EntryLevel>(self) -> Self { fn page_align_up<L: EntryLevel>(&self) -> Self {
Self::from_usize((self.into_usize() + L::SIZE - 1) & !(L::SIZE - 1)) Self::from_usize((self.into_usize() + L::SIZE - 1) & !(L::SIZE - 1))
} }
#[inline(always)] #[inline(always)]
fn page_align_down<L: EntryLevel>(self) -> Self { fn page_align_down<L: EntryLevel>(&self) -> Self {
Self::from_usize(self.into_usize() & !(L::SIZE - 1)) Self::from_usize(self.into_usize() & !(L::SIZE - 1))
} }
#[inline(always)] #[inline(always)]
fn is_page_aligned_for<L: EntryLevel>(self) -> bool { fn is_page_aligned_for<L: EntryLevel>(&self) -> bool {
self.page_offset::<L>() == 0 self.page_offset::<L>() == 0
} }
} }

View File

@ -149,11 +149,18 @@ impl<T> IrqSafeSpinlock<T> {
} }
impl<T: Clone> IrqSafeSpinlock<T> { impl<T: Clone> IrqSafeSpinlock<T> {
pub fn cloned(&self) -> T { pub fn get_cloned(&self) -> T {
self.lock().clone() self.lock().clone()
} }
} }
impl<T: Clone> Clone for IrqSafeSpinlock<T> {
fn clone(&self) -> Self {
let inner = self.lock();
IrqSafeSpinlock::new(inner.clone())
}
}
impl<'a, T> Deref for IrqSafeSpinlockGuard<'a, T> { impl<'a, T> Deref for IrqSafeSpinlockGuard<'a, T> {
type Target = T; type Target = T;

View File

@ -42,11 +42,13 @@ impl Termination for () {
} }
impl Thread { impl Thread {
pub fn spawn<S: Into<String>, F: FnOnce() + Send + 'static>( pub fn spawn<S: Into<String>, T: Termination, F: FnOnce() -> T + Send + 'static>(
name: S, name: S,
f: F, f: F,
) -> Result<(), Error> { ) -> Result<(), Error> {
extern "C" fn closure_wrapper<F: FnOnce() + Send + 'static>(closure_addr: usize) -> ! { extern "C" fn closure_wrapper<T: Termination, F: FnOnce() -> T + Send + 'static>(
closure_addr: usize,
) -> ! {
let closure = unsafe { Box::from_raw(closure_addr as *mut F) }; let closure = unsafe { Box::from_raw(closure_addr as *mut F) };
let result = closure(); let result = closure();
Thread::current().exit(result.into_exit_code()); Thread::current().exit(result.into_exit_code());
@ -58,7 +60,7 @@ impl Thread {
let thread = unsafe { let thread = unsafe {
api::__create_kthread( api::__create_kthread(
name.into(), name.into(),
closure_wrapper::<F>, closure_wrapper::<T, F>,
Box::into_raw(closure) as usize, Box::into_raw(closure) as usize,
) )
}?; }?;

View File

@ -34,7 +34,7 @@ use kernel_util::mem::{
address::PhysicalAddress, device::RawDeviceMemoryMapping, table::EntryLevel, address::PhysicalAddress, device::RawDeviceMemoryMapping, table::EntryLevel,
}; };
use crate::mem::phys::PhysicalMemoryRegion; use crate::{mem::phys::PhysicalMemoryRegion, task::Cpu};
cfg_if! { cfg_if! {
if #[cfg(target_arch = "aarch64")] { if #[cfg(target_arch = "aarch64")] {
@ -245,6 +245,16 @@ fn __release_irq_guard(mask: bool) {
} }
} }
#[no_mangle]
fn __cpu_index() -> usize {
Cpu::local_id() as _
}
#[no_mangle]
fn __cpu_count() -> usize {
ArchitectureImpl::cpu_count()
}
#[no_mangle] #[no_mangle]
fn __virtualize(addr: u64) -> usize { fn __virtualize(addr: u64) -> usize {
ArchitectureImpl::virtualize(addr) ArchitectureImpl::virtualize(addr)

View File

@ -2,11 +2,11 @@
use core::sync::atomic::Ordering; use core::sync::atomic::Ordering;
use abi::error::Error; use abi::error::Error;
use alloc::vec::Vec; use alloc::{vec, vec::Vec};
use device_api::{ use device_api::{
interrupt::{ interrupt::{
IpiDeliveryTarget, LocalInterruptController, MessageInterruptController, MsiHandler, InterruptAffinity, IpiDeliveryTarget, LocalInterruptController, MessageInterruptController,
MsiInfo, MsiHandler, MsiInfo,
}, },
Device, Device,
}; };
@ -16,7 +16,7 @@ use kernel_util::{
device::DeviceMemoryIo, device::DeviceMemoryIo,
table::EntryLevelExt, table::EntryLevelExt,
}, },
sync::IrqSafeSpinlock, sync::{IrqGuard, IrqSafeSpinlock},
util::OneTimeInit, util::OneTimeInit,
}; };
use tock_registers::{ use tock_registers::{
@ -37,6 +37,7 @@ use crate::{
use super::{ use super::{
APIC_IPI_VECTOR, APIC_LINT0_VECTOR, APIC_LINT1_VECTOR, APIC_SPURIOUS_VECTOR, APIC_TIMER_VECTOR, APIC_IPI_VECTOR, APIC_LINT0_VECTOR, APIC_LINT1_VECTOR, APIC_SPURIOUS_VECTOR, APIC_TIMER_VECTOR,
MAX_MSI_VECTORS,
}; };
const TIMER_INTERVAL: u32 = 150000; const TIMER_INTERVAL: u32 = 150000;
@ -145,7 +146,8 @@ register_structs! {
/// Per-processor local APIC interface /// Per-processor local APIC interface
pub struct LocalApic { pub struct LocalApic {
regs: DeviceMemoryIo<'static, Regs>, regs: DeviceMemoryIo<'static, Regs>,
msi_vectors: IrqSafeSpinlock<Vec<&'static dyn MsiHandler>>, id: u32,
msi_vectors: Vec<IrqSafeSpinlock<Vec<&'static dyn MsiHandler>>>,
} }
unsafe impl Send for LocalApic {} unsafe impl Send for LocalApic {}
@ -160,10 +162,11 @@ impl Device for LocalApic {
impl MessageInterruptController for LocalApic { impl MessageInterruptController for LocalApic {
fn handle_msi(&self, vector: usize) { fn handle_msi(&self, vector: usize) {
// TODO this is ugly // TODO this is ugly
let row = &self.msi_vectors[vector];
let mut i = 0; let mut i = 0;
loop { loop {
let table = self.msi_vectors.lock(); let table = row.lock();
let Some(&handler) = table.get(i) else { let Some(&handler) = table.get(i) else {
break; break;
}; };
@ -177,24 +180,42 @@ impl MessageInterruptController for LocalApic {
} }
} }
fn register_msi(&self, handler: &'static dyn MsiHandler) -> Result<MsiInfo, Error> { fn register_msi_range(
// TODO only 1 ISR vector allocated for MSIs &self,
let vector = 0; range: &mut [MsiInfo],
let mut table = self.msi_vectors.lock(); handler: &'static dyn MsiHandler,
) -> Result<(), Error> {
let _guard = IrqGuard::acquire();
table.push(handler); // TODO fill smallest vectors first
// TODO don't ignore affinity
// TODO magic numbers for (i, msi) in range.iter_mut().enumerate() {
let apic_vector = 32 + APIC_MSI_OFFSET + vector; let row = &self.msi_vectors[i];
let mut row = row.lock();
let value = apic_vector; row.push(handler);
let address = Self::base();
Ok(MsiInfo { infoln!(
address: address.into_raw(), "Bind {}:{} -> apic{}:msi{}",
handler.display_name(),
i,
self.id,
i
);
let value = 32 + APIC_MSI_OFFSET + i as u32;
let address = IntoRaw::<usize>::into_raw(Self::base()) | ((self.id as usize) << 12);
*msi = MsiInfo {
address,
value, value,
vector: vector as _, vector: i,
}) affinity: InterruptAffinity::Specific(self.id as _),
};
}
Ok(())
} }
} }
@ -280,9 +301,12 @@ impl LocalApic {
LocalVectorEntry::Mask::Masked + LocalVectorEntry::Vector.val(APIC_LINT1_VECTOR + 32), LocalVectorEntry::Mask::Masked + LocalVectorEntry::Vector.val(APIC_LINT1_VECTOR + 32),
); );
let msi_vectors = vec![IrqSafeSpinlock::new(Vec::new()); MAX_MSI_VECTORS as _];
Self { Self {
id,
regs, regs,
msi_vectors: IrqSafeSpinlock::new(Vec::new()), msi_vectors,
} }
} }

View File

@ -78,7 +78,7 @@ unsafe extern "C" fn irq_handler(vector: usize, frame: *mut IrqFrame) {
} }
unsafe extern "C" fn msi_handler(vector: usize, frame: *mut IrqFrame) { unsafe extern "C" fn msi_handler(vector: usize, frame: *mut IrqFrame) {
if vector != 0 { if vector >= MAX_MSI_VECTORS as _ {
todo!("Got a weird MSI with vector {}", vector); todo!("Got a weird MSI with vector {}", vector);
} }