2024-12-04 18:28:27 +02:00
|
|
|
#![feature(const_trait_impl, let_chains, if_let_guard, maybe_uninit_slice)]
|
2023-12-08 14:30:49 +02:00
|
|
|
#![allow(missing_docs)]
|
2023-12-10 23:22:21 +02:00
|
|
|
#![no_std]
|
|
|
|
|
|
|
|
extern crate alloc;
|
|
|
|
|
2023-12-11 21:13:33 +02:00
|
|
|
use core::{
|
|
|
|
mem::size_of,
|
|
|
|
sync::atomic::{AtomicUsize, Ordering},
|
|
|
|
time::Duration,
|
|
|
|
};
|
2023-12-08 14:30:49 +02:00
|
|
|
|
2024-12-10 11:52:26 +02:00
|
|
|
use alloc::{collections::BTreeMap, format, sync::Arc, vec::Vec};
|
2023-12-10 23:22:21 +02:00
|
|
|
use command::{IdentifyActiveNamespaceIdListRequest, IdentifyControllerRequest};
|
2023-12-11 21:13:33 +02:00
|
|
|
use device_api::{
|
2024-12-10 11:52:26 +02:00
|
|
|
device::Device,
|
2024-02-03 20:44:04 +02:00
|
|
|
interrupt::{InterruptAffinity, InterruptHandler},
|
2023-12-11 21:13:33 +02:00
|
|
|
};
|
2024-12-10 11:52:26 +02:00
|
|
|
use drive::NvmeNamespace;
|
|
|
|
use libk::{
|
|
|
|
device::manager::probe_partitions,
|
2024-12-17 19:12:39 +02:00
|
|
|
fs::devfs,
|
2024-12-10 11:52:26 +02:00
|
|
|
task::{cpu_count, cpu_index, runtime},
|
|
|
|
};
|
2024-12-05 11:47:38 +02:00
|
|
|
use libk_mm::{address::PhysicalAddress, device::DeviceMemoryIo, L3_PAGE_SIZE};
|
2024-02-05 12:59:23 +02:00
|
|
|
use libk_util::{
|
2023-12-11 21:13:33 +02:00
|
|
|
sync::{IrqGuard, IrqSafeSpinlock},
|
2024-02-05 12:59:23 +02:00
|
|
|
OneTimeInit,
|
2023-12-10 18:52:33 +02:00
|
|
|
};
|
2024-12-05 11:47:38 +02:00
|
|
|
use queue::PrpList;
|
2023-12-08 14:30:49 +02:00
|
|
|
use tock_registers::{
|
|
|
|
interfaces::{ReadWriteable, Readable, Writeable},
|
|
|
|
register_bitfields, register_structs,
|
|
|
|
registers::{ReadOnly, ReadWrite, WriteOnly},
|
|
|
|
};
|
2023-12-10 21:25:54 +02:00
|
|
|
use ygg_driver_pci::{
|
2024-02-03 20:44:04 +02:00
|
|
|
device::{PciDeviceInfo, PreferredInterruptMode},
|
|
|
|
PciCommandRegister, PciConfigurationSpace,
|
2023-12-10 21:25:54 +02:00
|
|
|
};
|
2024-12-20 23:00:43 +02:00
|
|
|
use yggdrasil_abi::{error::Error, io::FileMode};
|
2023-12-08 14:30:49 +02:00
|
|
|
|
|
|
|
use crate::{
|
2023-12-10 23:22:21 +02:00
|
|
|
command::{IoRead, IoWrite},
|
|
|
|
queue::{CompletionQueueEntry, SubmissionQueueEntry},
|
2023-12-08 14:30:49 +02:00
|
|
|
};
|
|
|
|
|
2023-12-08 22:49:33 +02:00
|
|
|
use self::{
|
|
|
|
command::{CreateIoCompletionQueue, CreateIoSubmissionQueue, SetFeatureRequest},
|
2023-12-08 23:19:12 +02:00
|
|
|
error::NvmeError,
|
2023-12-08 22:49:33 +02:00
|
|
|
queue::QueuePair,
|
|
|
|
};
|
2023-12-08 14:30:49 +02:00
|
|
|
|
|
|
|
mod command;
|
2023-12-10 12:51:53 +02:00
|
|
|
mod drive;
|
2023-12-08 23:19:12 +02:00
|
|
|
mod error;
|
2023-12-08 14:30:49 +02:00
|
|
|
mod queue;
|
|
|
|
|
2024-12-05 11:47:38 +02:00
|
|
|
pub const MAX_PAGES_PER_REQUEST: usize = 256;
|
|
|
|
// Use host page
|
|
|
|
pub const PAGE_SIZE: usize = L3_PAGE_SIZE;
|
|
|
|
|
2023-12-08 14:30:49 +02:00
|
|
|
register_bitfields! {
|
|
|
|
u32,
|
|
|
|
CC [
|
|
|
|
IOCQES OFFSET(20) NUMBITS(4) [],
|
|
|
|
IOSQES OFFSET(16) NUMBITS(4) [],
|
|
|
|
AMS OFFSET(11) NUMBITS(3) [],
|
|
|
|
MPS OFFSET(7) NUMBITS(4) [],
|
|
|
|
CSS OFFSET(4) NUMBITS(3) [
|
|
|
|
NvmCommandSet = 0
|
|
|
|
],
|
|
|
|
ENABLE OFFSET(0) NUMBITS(1) [],
|
|
|
|
],
|
|
|
|
CSTS [
|
|
|
|
CFS OFFSET(1) NUMBITS(1) [],
|
|
|
|
RDY OFFSET(0) NUMBITS(1) [],
|
|
|
|
],
|
|
|
|
AQA [
|
|
|
|
/// Admin Completion Queue Size in entries - 1
|
|
|
|
ACQS OFFSET(16) NUMBITS(12) [],
|
|
|
|
/// Admin Submission Queue Size in entries - 1
|
|
|
|
ASQS OFFSET(0) NUMBITS(12) [],
|
|
|
|
]
|
|
|
|
}
|
|
|
|
|
|
|
|
register_bitfields! {
|
|
|
|
u64,
|
|
|
|
CAP [
|
|
|
|
/// Maximum Queue Entries Supported - 1. i.e., 0 means maximum queue len of 1, 1 = 2 etc.
|
|
|
|
MQES OFFSET(0) NUMBITS(16) [],
|
|
|
|
/// Timeout. Represents the worst-case time the host software should wait for CSTS.RDY to
|
|
|
|
/// change its state.
|
|
|
|
TO OFFSET(24) NUMBITS(8) [],
|
|
|
|
/// Doorbell stride. Stride in bytes = pow(2, 2 + DSTRD).
|
|
|
|
DSTRD OFFSET(32) NUMBITS(4) [],
|
|
|
|
/// NVM Subsystem Reset Supported (see NVMe BS Section 3.7.1)
|
|
|
|
NSSRS OFFSET(36) NUMBITS(1) [],
|
|
|
|
/// Controller supports one or more I/O command sets
|
|
|
|
CSS_IO_COMMANDS OFFSET(43) NUMBITS(1) [],
|
|
|
|
/// Controller only supports admin commands and no I/O commands
|
|
|
|
CSS_ADMIN_ONLY OFFSET(44) NUMBITS(1) [],
|
|
|
|
/// Memory page size minimum (bytes = pow(2, 12 + MPSMIN))
|
|
|
|
MPSMIN OFFSET(48) NUMBITS(4) [],
|
|
|
|
/// Memory page size maximum -|-
|
|
|
|
MPSMAX OFFSET(52) NUMBITS(4) [],
|
|
|
|
]
|
|
|
|
}
|
|
|
|
|
|
|
|
register_structs! {
|
|
|
|
#[allow(non_snake_case)]
|
|
|
|
Regs {
|
|
|
|
(0x00 => CAP: ReadOnly<u64, CAP::Register>),
|
|
|
|
(0x08 => VS: ReadOnly<u32>),
|
|
|
|
(0x0C => INTMS: WriteOnly<u32>),
|
|
|
|
(0x10 => INTMC: WriteOnly<u32>),
|
|
|
|
(0x14 => CC: ReadWrite<u32, CC::Register>),
|
|
|
|
(0x18 => _0),
|
|
|
|
(0x1C => CSTS: ReadOnly<u32, CSTS::Register>),
|
|
|
|
(0x20 => _1),
|
|
|
|
(0x24 => AQA: ReadWrite<u32, AQA::Register>),
|
|
|
|
(0x28 => ASQ: ReadWrite<u64>),
|
|
|
|
(0x30 => ACQ: ReadWrite<u64>),
|
|
|
|
(0x38 => _2),
|
|
|
|
(0x2000 => @END),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct NvmeController {
|
|
|
|
regs: IrqSafeSpinlock<DeviceMemoryIo<'static, Regs>>,
|
2023-12-10 20:54:15 +02:00
|
|
|
admin_q: OneTimeInit<QueuePair>,
|
|
|
|
ioqs: OneTimeInit<Vec<QueuePair>>,
|
2023-12-11 21:13:33 +02:00
|
|
|
io_queue_count: AtomicUsize,
|
2024-12-10 11:52:26 +02:00
|
|
|
drive_table: IrqSafeSpinlock<BTreeMap<u32, Arc<NvmeNamespace>>>,
|
|
|
|
controller_id: OneTimeInit<u32>,
|
2023-12-08 22:49:33 +02:00
|
|
|
|
2024-02-03 20:44:04 +02:00
|
|
|
pci: PciDeviceInfo,
|
2023-12-11 21:13:33 +02:00
|
|
|
|
2023-12-08 22:49:33 +02:00
|
|
|
doorbell_shift: usize,
|
2024-12-05 11:47:38 +02:00
|
|
|
min_page_size: usize,
|
2023-12-08 14:30:49 +02:00
|
|
|
}
|
|
|
|
|
2023-12-10 20:54:15 +02:00
|
|
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
|
|
|
pub enum IoDirection {
|
|
|
|
Read,
|
|
|
|
Write,
|
|
|
|
}
|
|
|
|
|
2023-12-08 14:30:49 +02:00
|
|
|
impl Regs {
|
2023-12-08 22:49:33 +02:00
|
|
|
unsafe fn doorbell_ptr(&self, shift: usize, completion: bool, queue_index: usize) -> *mut u32 {
|
2023-12-08 14:30:49 +02:00
|
|
|
let doorbell_base = (self as *const Regs as *mut Regs).addr() + 0x1000;
|
2023-12-10 12:51:53 +02:00
|
|
|
let offset = ((queue_index << shift) + completion as usize) * 4;
|
2023-12-08 14:30:49 +02:00
|
|
|
(doorbell_base + offset) as *mut u32
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl NvmeController {
|
2023-12-11 21:13:33 +02:00
|
|
|
const ADMIN_QUEUE_SIZE: usize = 32;
|
|
|
|
const IO_QUEUE_SIZE: usize = 32;
|
2023-12-10 12:51:53 +02:00
|
|
|
|
2024-12-10 11:52:26 +02:00
|
|
|
async fn create_queues(&self) -> Result<(), NvmeError> {
|
2023-12-08 17:23:03 +02:00
|
|
|
let admin_q = self.admin_q.get();
|
2023-12-11 21:13:33 +02:00
|
|
|
let io_queue_count = self.io_queue_count.load(Ordering::Acquire);
|
2023-12-08 14:30:49 +02:00
|
|
|
|
2023-12-11 21:13:33 +02:00
|
|
|
log::info!(
|
|
|
|
"Creating {} queue pairs for nvme{}",
|
|
|
|
io_queue_count,
|
|
|
|
self.controller_id.get()
|
|
|
|
);
|
2023-12-09 15:27:09 +02:00
|
|
|
|
2023-12-08 22:49:33 +02:00
|
|
|
// Request a CQ/SQ pair for I/O
|
|
|
|
admin_q
|
2023-12-11 21:13:33 +02:00
|
|
|
.request_no_data(SetFeatureRequest::NumberOfQueues(
|
|
|
|
io_queue_count as _,
|
|
|
|
io_queue_count as _,
|
|
|
|
))
|
2023-12-08 23:19:12 +02:00
|
|
|
.await?;
|
2023-12-08 22:49:33 +02:00
|
|
|
|
2023-12-11 21:13:33 +02:00
|
|
|
let mut queues = Vec::new();
|
|
|
|
for i in 1..=io_queue_count {
|
|
|
|
let id = i as u32;
|
|
|
|
|
|
|
|
let (sq_doorbell, cq_doorbell) = unsafe { self.doorbell_pair(i) };
|
|
|
|
let queue = QueuePair::new(id, i, Self::IO_QUEUE_SIZE, sq_doorbell, cq_doorbell)
|
|
|
|
.map_err(NvmeError::MemoryError)?;
|
|
|
|
|
|
|
|
admin_q
|
|
|
|
.request_no_data(CreateIoCompletionQueue {
|
|
|
|
id,
|
|
|
|
vector: id,
|
|
|
|
size: Self::IO_QUEUE_SIZE,
|
|
|
|
data: queue.cq_physical_pointer(),
|
|
|
|
})
|
|
|
|
.await?;
|
|
|
|
|
|
|
|
admin_q
|
|
|
|
.request_no_data(CreateIoSubmissionQueue {
|
|
|
|
id,
|
|
|
|
cq_id: id,
|
|
|
|
size: Self::IO_QUEUE_SIZE,
|
|
|
|
data: queue.sq_physical_pointer(),
|
|
|
|
})
|
|
|
|
.await?;
|
|
|
|
|
|
|
|
queues.push(queue);
|
|
|
|
}
|
2023-12-08 14:30:49 +02:00
|
|
|
|
2023-12-11 21:13:33 +02:00
|
|
|
self.ioqs.init(queues);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2023-12-08 14:30:49 +02:00
|
|
|
|
2024-12-10 11:52:26 +02:00
|
|
|
async fn late_init(self: Arc<Self>) -> Result<(), NvmeError> {
|
|
|
|
register_nvme_controller(self.clone());
|
|
|
|
|
2023-12-11 21:13:33 +02:00
|
|
|
let io_queue_count = cpu_count();
|
|
|
|
self.io_queue_count.store(io_queue_count, Ordering::Release);
|
|
|
|
|
|
|
|
{
|
2024-02-03 20:44:04 +02:00
|
|
|
let range = self
|
|
|
|
.pci
|
2024-12-10 11:52:26 +02:00
|
|
|
.map_interrupt_multiple(0..io_queue_count + 1, InterruptAffinity::Any, self.clone())
|
2023-12-11 21:13:33 +02:00
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
// TODO handle different MSI range allocations
|
|
|
|
for (i, msi) in range.iter().enumerate() {
|
|
|
|
assert_eq!(i, msi.vector);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let admin_q = self.admin_q.get();
|
|
|
|
|
|
|
|
// Identify the controller
|
2024-12-05 11:47:38 +02:00
|
|
|
let identify = admin_q.request(IdentifyControllerRequest).await?;
|
2023-12-11 21:13:33 +02:00
|
|
|
|
2024-12-05 11:47:38 +02:00
|
|
|
let max_transfer_size = if identify.mdts == 0 {
|
|
|
|
// Pick some sane default value
|
2024-12-05 13:31:25 +02:00
|
|
|
256 * self.min_page_size
|
2024-12-05 11:47:38 +02:00
|
|
|
} else {
|
2024-12-05 13:31:25 +02:00
|
|
|
(1 << identify.mdts) * self.min_page_size
|
2024-12-05 11:47:38 +02:00
|
|
|
};
|
2023-12-11 21:13:33 +02:00
|
|
|
|
|
|
|
self.create_queues().await?;
|
2023-12-10 12:51:53 +02:00
|
|
|
|
|
|
|
// Identify namespaces
|
2024-12-05 11:47:38 +02:00
|
|
|
self.enumerate_namespaces(max_transfer_size).await?;
|
2023-12-10 12:51:53 +02:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2024-12-05 11:47:38 +02:00
|
|
|
async fn enumerate_namespaces(
|
2024-12-10 11:52:26 +02:00
|
|
|
self: &Arc<Self>,
|
2024-12-05 11:47:38 +02:00
|
|
|
max_transfer_size: usize,
|
|
|
|
) -> Result<(), NvmeError> {
|
2023-12-10 12:51:53 +02:00
|
|
|
let admin_q = self.admin_q.get();
|
|
|
|
|
|
|
|
let namespaces = admin_q
|
2023-12-10 20:54:15 +02:00
|
|
|
.request(IdentifyActiveNamespaceIdListRequest { start_id: 0 })
|
2023-12-10 12:51:53 +02:00
|
|
|
.await?;
|
|
|
|
|
|
|
|
let count = namespaces.entries.iter().position(|&x| x == 0).unwrap();
|
|
|
|
let list = &namespaces.entries[..count];
|
|
|
|
|
|
|
|
for &nsid in list {
|
2024-12-10 11:52:26 +02:00
|
|
|
match NvmeNamespace::create(self.clone(), nsid, max_transfer_size).await {
|
2023-12-10 12:51:53 +02:00
|
|
|
Ok(drive) => {
|
|
|
|
self.drive_table.lock().insert(nsid, drive);
|
|
|
|
}
|
|
|
|
Err(error) => {
|
2023-12-10 23:22:21 +02:00
|
|
|
log::warn!("Could not create nvme drive, nsid={}: {:?}", nsid, error);
|
2023-12-10 12:51:53 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-12-10 20:54:15 +02:00
|
|
|
pub async fn perform_io(
|
2024-12-10 11:52:26 +02:00
|
|
|
&self,
|
2023-12-10 12:51:53 +02:00
|
|
|
nsid: u32,
|
|
|
|
lba: u64,
|
2024-07-30 19:51:14 +03:00
|
|
|
lba_count: usize,
|
2023-12-10 20:54:15 +02:00
|
|
|
buffer_address: PhysicalAddress,
|
2024-12-05 11:47:38 +02:00
|
|
|
transfer_size: usize,
|
2023-12-10 20:54:15 +02:00
|
|
|
direction: IoDirection,
|
2023-12-10 12:51:53 +02:00
|
|
|
) -> Result<(), NvmeError> {
|
2024-12-05 11:47:38 +02:00
|
|
|
let prp_list = PrpList::from_buffer(buffer_address, transfer_size)?;
|
|
|
|
|
2023-12-11 21:13:33 +02:00
|
|
|
let _guard = IrqGuard::acquire();
|
|
|
|
let cpu_index = cpu_index();
|
2024-02-12 12:09:53 +02:00
|
|
|
let ioq = &self.ioqs.get()[cpu_index as usize];
|
2023-12-11 21:13:33 +02:00
|
|
|
|
2023-12-10 20:54:15 +02:00
|
|
|
let cmd_id = match direction {
|
|
|
|
IoDirection::Read => ioq.submit(
|
|
|
|
IoRead {
|
|
|
|
nsid,
|
|
|
|
lba,
|
2024-07-30 19:51:14 +03:00
|
|
|
count: lba_count as _,
|
2023-12-10 20:54:15 +02:00
|
|
|
},
|
2024-12-05 11:47:38 +02:00
|
|
|
&prp_list,
|
2023-12-10 20:54:15 +02:00
|
|
|
true,
|
2024-12-05 11:47:38 +02:00
|
|
|
)?,
|
2023-12-10 20:54:15 +02:00
|
|
|
IoDirection::Write => ioq.submit(
|
|
|
|
IoWrite {
|
|
|
|
nsid,
|
|
|
|
lba,
|
2024-07-30 19:51:14 +03:00
|
|
|
count: lba_count as _,
|
2023-12-10 20:54:15 +02:00
|
|
|
},
|
2024-12-05 11:47:38 +02:00
|
|
|
&prp_list,
|
2023-12-10 20:54:15 +02:00
|
|
|
true,
|
2024-12-05 11:47:38 +02:00
|
|
|
)?,
|
2023-12-10 20:54:15 +02:00
|
|
|
};
|
2023-12-10 12:51:53 +02:00
|
|
|
|
2024-12-05 13:31:25 +02:00
|
|
|
ioq.wait_for_completion(cmd_id, ()).await?;
|
2023-12-10 12:51:53 +02:00
|
|
|
|
|
|
|
Ok(())
|
2023-12-08 14:30:49 +02:00
|
|
|
}
|
2023-12-08 17:23:03 +02:00
|
|
|
|
2023-12-08 22:49:33 +02:00
|
|
|
unsafe fn doorbell_pair(&self, idx: usize) -> (*mut u32, *mut u32) {
|
|
|
|
let regs = self.regs.lock();
|
|
|
|
let sq_ptr = regs.doorbell_ptr(self.doorbell_shift, false, idx);
|
|
|
|
let cq_ptr = regs.doorbell_ptr(self.doorbell_shift, true, idx);
|
|
|
|
(sq_ptr, cq_ptr)
|
|
|
|
}
|
2023-12-08 14:30:49 +02:00
|
|
|
}
|
|
|
|
|
2024-02-03 20:44:04 +02:00
|
|
|
impl InterruptHandler for NvmeController {
|
2024-12-10 11:52:26 +02:00
|
|
|
fn handle_irq(self: Arc<Self>, vector: Option<usize>) -> bool {
|
2024-02-03 20:44:04 +02:00
|
|
|
let vector = vector.expect("Only MSI-X interrupts are supported");
|
|
|
|
|
2023-12-11 21:13:33 +02:00
|
|
|
if vector == 0 {
|
|
|
|
self.admin_q.get().process_completions() != 0
|
|
|
|
} else if vector <= self.io_queue_count.load(Ordering::Acquire)
|
|
|
|
&& let Some(ioqs) = self.ioqs.try_get()
|
|
|
|
{
|
|
|
|
ioqs[vector - 1].process_completions() != 0
|
|
|
|
} else {
|
|
|
|
false
|
2023-12-10 12:51:53 +02:00
|
|
|
}
|
2023-12-09 15:27:09 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-08 14:30:49 +02:00
|
|
|
impl Device for NvmeController {
|
2024-12-10 11:52:26 +02:00
|
|
|
unsafe fn init(self: Arc<Self>) -> Result<(), Error> {
|
2023-12-08 14:30:49 +02:00
|
|
|
let regs = self.regs.lock();
|
|
|
|
|
|
|
|
let timeout = Duration::from_millis(regs.CAP.read(CAP::TO) * 500);
|
2023-12-10 23:22:21 +02:00
|
|
|
log::debug!("Worst-case timeout: {:?}", timeout);
|
2023-12-08 14:30:49 +02:00
|
|
|
|
2024-07-28 12:53:30 +03:00
|
|
|
while regs.CSTS.matches_all(CSTS::RDY::SET) {
|
2023-12-08 14:30:49 +02:00
|
|
|
core::hint::spin_loop();
|
|
|
|
}
|
|
|
|
|
2023-12-11 21:13:33 +02:00
|
|
|
if Self::ADMIN_QUEUE_SIZE as u64 > regs.CAP.read(CAP::MQES) + 1 {
|
2023-12-08 14:30:49 +02:00
|
|
|
todo!(
|
|
|
|
"queue_slots too big, max = {}",
|
|
|
|
regs.CAP.read(CAP::MQES) + 1
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the admin queue (index 0)
|
2023-12-08 22:49:33 +02:00
|
|
|
let admin_sq_doorbell = unsafe { regs.doorbell_ptr(self.doorbell_shift, false, 0) };
|
|
|
|
let admin_cq_doorbell = unsafe { regs.doorbell_ptr(self.doorbell_shift, true, 0) };
|
2023-12-10 23:22:21 +02:00
|
|
|
log::debug!("sq_doorbell for adminq = {:p}", admin_sq_doorbell);
|
2023-12-09 15:27:09 +02:00
|
|
|
let admin_q = QueuePair::new(
|
2023-12-10 12:51:53 +02:00
|
|
|
0,
|
2023-12-09 15:27:09 +02:00
|
|
|
0,
|
2023-12-11 21:13:33 +02:00
|
|
|
Self::ADMIN_QUEUE_SIZE,
|
2023-12-09 15:27:09 +02:00
|
|
|
admin_sq_doorbell,
|
|
|
|
admin_cq_doorbell,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2023-12-08 14:30:49 +02:00
|
|
|
|
2023-12-11 21:13:33 +02:00
|
|
|
regs.AQA.modify(
|
|
|
|
AQA::ASQS.val(Self::ADMIN_QUEUE_SIZE as u32 - 1)
|
|
|
|
+ AQA::ACQS.val(Self::ADMIN_QUEUE_SIZE as u32 - 1),
|
|
|
|
);
|
2024-07-25 11:58:47 +03:00
|
|
|
regs.ASQ.set(admin_q.sq_physical_pointer().into());
|
|
|
|
regs.ACQ.set(admin_q.cq_physical_pointer().into());
|
2023-12-08 14:30:49 +02:00
|
|
|
|
|
|
|
// Configure the controller
|
|
|
|
const IOSQES: u32 = size_of::<SubmissionQueueEntry>().ilog2();
|
|
|
|
const IOCQES: u32 = size_of::<CompletionQueueEntry>().ilog2();
|
|
|
|
|
|
|
|
regs.CC.modify(
|
|
|
|
CC::IOCQES.val(IOCQES)
|
|
|
|
+ CC::IOSQES.val(IOSQES)
|
|
|
|
+ CC::MPS.val(0)
|
|
|
|
+ CC::CSS::NvmCommandSet,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Enable the controller
|
|
|
|
regs.CC.modify(CC::ENABLE::SET);
|
|
|
|
|
2023-12-10 23:22:21 +02:00
|
|
|
log::debug!("Reset the controller");
|
2023-12-08 14:30:49 +02:00
|
|
|
|
2024-11-01 20:47:37 +02:00
|
|
|
while !regs.CSTS.matches_any(&[CSTS::RDY::SET, CSTS::CFS::SET]) {
|
2023-12-08 14:30:49 +02:00
|
|
|
core::hint::spin_loop();
|
|
|
|
}
|
|
|
|
|
2024-07-28 12:53:30 +03:00
|
|
|
if regs.CSTS.matches_all(CSTS::CFS::SET) {
|
2023-12-08 14:30:49 +02:00
|
|
|
todo!("CFS set after reset!");
|
|
|
|
}
|
|
|
|
|
2023-12-08 17:23:03 +02:00
|
|
|
self.admin_q.init(admin_q);
|
2023-12-08 14:30:49 +02:00
|
|
|
|
|
|
|
// Schedule late_init task
|
2024-12-10 11:52:26 +02:00
|
|
|
runtime::spawn(self.clone().late_init())?;
|
2023-12-08 14:30:49 +02:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2024-12-10 11:52:26 +02:00
|
|
|
fn display_name(&self) -> &str {
|
2023-12-08 14:30:49 +02:00
|
|
|
"NVM Express Controller"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-11-19 19:33:08 +02:00
|
|
|
// TODO
|
|
|
|
unsafe impl Sync for NvmeController {}
|
|
|
|
|
2024-12-10 11:52:26 +02:00
|
|
|
static NVME_CONTROLLERS: IrqSafeSpinlock<Vec<Arc<NvmeController>>> =
|
2023-12-10 21:25:54 +02:00
|
|
|
IrqSafeSpinlock::new(Vec::new());
|
2023-12-09 15:27:09 +02:00
|
|
|
|
2024-12-10 11:52:26 +02:00
|
|
|
pub fn probe(info: &PciDeviceInfo) -> Result<Arc<dyn Device>, Error> {
|
2024-02-03 20:44:04 +02:00
|
|
|
let bar0 = info
|
|
|
|
.config_space
|
|
|
|
.bar(0)
|
|
|
|
.unwrap()
|
|
|
|
.as_memory()
|
|
|
|
.expect("Expected a memory BAR0");
|
2023-12-08 14:30:49 +02:00
|
|
|
|
2024-02-03 20:44:04 +02:00
|
|
|
info.init_interrupts(PreferredInterruptMode::Msi)?;
|
2023-12-08 14:30:49 +02:00
|
|
|
|
2023-12-10 21:25:54 +02:00
|
|
|
let mut cmd = PciCommandRegister::from_bits_retain(info.config_space.command());
|
|
|
|
cmd &= !(PciCommandRegister::DISABLE_INTERRUPTS | PciCommandRegister::ENABLE_IO);
|
|
|
|
cmd |= PciCommandRegister::ENABLE_MEMORY | PciCommandRegister::BUS_MASTER;
|
|
|
|
info.config_space.set_command(cmd.bits());
|
2023-12-08 14:30:49 +02:00
|
|
|
|
2024-02-03 20:44:04 +02:00
|
|
|
let regs = unsafe { DeviceMemoryIo::<Regs>::map(bar0, Default::default()) }?;
|
2023-12-08 22:49:33 +02:00
|
|
|
|
2023-12-10 21:25:54 +02:00
|
|
|
// Disable the controller
|
|
|
|
regs.CC.modify(CC::ENABLE::CLEAR);
|
2023-12-10 12:51:53 +02:00
|
|
|
|
2023-12-10 21:25:54 +02:00
|
|
|
let doorbell_shift = regs.CAP.read(CAP::DSTRD) as usize + 1;
|
2024-12-05 11:47:38 +02:00
|
|
|
let min_page_size = 1 << (regs.CAP.read(CAP::MPSMIN) + 12);
|
|
|
|
|
|
|
|
if min_page_size > PAGE_SIZE {
|
|
|
|
log::error!("Cannot support NVMe HC: min page size ({min_page_size}) > host page size ({PAGE_SIZE})");
|
|
|
|
return Err(Error::InvalidArgument);
|
|
|
|
}
|
2023-12-10 21:25:54 +02:00
|
|
|
|
2024-12-10 11:52:26 +02:00
|
|
|
let device = NvmeController {
|
2023-12-10 21:25:54 +02:00
|
|
|
regs: IrqSafeSpinlock::new(regs),
|
|
|
|
admin_q: OneTimeInit::new(),
|
|
|
|
ioqs: OneTimeInit::new(),
|
|
|
|
drive_table: IrqSafeSpinlock::new(BTreeMap::new()),
|
|
|
|
controller_id: OneTimeInit::new(),
|
2023-12-11 21:13:33 +02:00
|
|
|
|
2024-02-03 20:44:04 +02:00
|
|
|
pci: info.clone(),
|
2023-12-11 21:13:33 +02:00
|
|
|
|
|
|
|
io_queue_count: AtomicUsize::new(1),
|
2023-12-10 21:25:54 +02:00
|
|
|
doorbell_shift,
|
2024-12-05 11:47:38 +02:00
|
|
|
min_page_size,
|
2024-12-10 11:52:26 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
Ok(Arc::new(device))
|
2023-12-10 21:25:54 +02:00
|
|
|
}
|
2023-12-10 12:51:53 +02:00
|
|
|
|
2024-12-10 11:52:26 +02:00
|
|
|
pub fn register_nvme_controller(controller: Arc<NvmeController>) {
|
2023-12-10 12:51:53 +02:00
|
|
|
let mut list = NVME_CONTROLLERS.lock();
|
|
|
|
let id = list.len();
|
2024-12-10 11:52:26 +02:00
|
|
|
list.push(controller.clone());
|
|
|
|
controller.controller_id.init(id as u32);
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn register_nvme_namespace(namespace: Arc<NvmeNamespace>, probe: bool) {
|
|
|
|
let name = format!("nvme{}n{}", namespace.controller_id(), namespace.id());
|
|
|
|
log::info!("Register NVMe namespace: {name}");
|
2024-12-20 23:00:43 +02:00
|
|
|
devfs::add_named_block_device(namespace.clone(), name.clone(), FileMode::new(0o600)).ok();
|
2024-12-10 11:52:26 +02:00
|
|
|
|
|
|
|
if probe {
|
|
|
|
runtime::spawn(async move {
|
|
|
|
let name = name;
|
|
|
|
log::info!("Probing partitions for {name}");
|
|
|
|
probe_partitions(namespace, |index, partition| {
|
|
|
|
let partition_name = format!("{name}p{}", index + 1);
|
2024-12-20 23:00:43 +02:00
|
|
|
devfs::add_named_block_device(
|
|
|
|
Arc::new(partition),
|
|
|
|
partition_name,
|
|
|
|
FileMode::new(0o600),
|
|
|
|
)
|
|
|
|
.ok();
|
2024-12-10 11:52:26 +02:00
|
|
|
})
|
|
|
|
.await
|
|
|
|
.inspect_err(|error| log::error!("{name}: partition probe failed: {error:?}"))
|
|
|
|
})
|
|
|
|
.ok();
|
|
|
|
}
|
2023-12-10 12:51:53 +02:00
|
|
|
}
|