nvme: better prp list

This commit is contained in:
Mark Poliakov 2024-12-05 19:25:18 +02:00
parent 0be9d86344
commit f79cae5368
9 changed files with 91 additions and 75 deletions

View File

@ -93,28 +93,49 @@ pub struct QueuePair {
inner: IrqSafeSpinlock<Inner>,
}
pub enum PrpList {
None,
One(PhysicalAddress),
Two(PhysicalAddress, PhysicalAddress),
List(PhysicalAddress, PageBox<[PhysicalAddress]>),
pub struct PrpList {
prp1: PhysicalRegionPage,
prp2: PhysicalRegionPage,
list: Option<PageBox<[PhysicalAddress]>>,
}
impl PrpList {
pub const fn empty() -> Self {
Self {
prp1: PhysicalRegionPage::null(),
prp2: PhysicalRegionPage::null(),
list: None,
}
}
pub fn from_buffer(base: PhysicalAddress, size: usize) -> Result<Self, NvmeError> {
// TODO hardcoded page size
if base.into_u64() % 0x1000 != 0 {
todo!();
}
match size {
0 => Ok(Self::None),
_ if size <= 0x1000 => Ok(Self::One(base)),
_ if size <= 0x2000 => Ok(Self::Two(base, base.add(0x1000))),
0 => Ok(Self::empty()),
_ if size <= 0x1000 => Ok(Self {
prp1: PhysicalRegionPage::with_addr(base),
prp2: PhysicalRegionPage::null(),
list: None,
}),
_ if size <= 0x2000 => Ok(Self {
prp1: PhysicalRegionPage::with_addr(base),
prp2: PhysicalRegionPage::with_addr(base.add(0x1000)),
list: None,
}),
_ => {
let count = (size + 0xFFF) / 0x1000;
let list = PageBox::new_slice_with(|i| base.add((i + 1) * 0x1000), count - 1)
.map_err(NvmeError::MemoryError)?;
Ok(Self::List(base, list))
Ok(Self {
prp1: PhysicalRegionPage::with_addr(base),
prp2: PhysicalRegionPage::with_addr(unsafe { list.as_physical_address() }),
list: Some(list),
})
}
}
}
@ -339,25 +360,8 @@ impl QueuePair {
let mut inner = self.inner.lock();
let mut sqe = SubmissionQueueEntry::zeroed();
match ranges {
PrpList::None => {
sqe.data_pointer[0] = PhysicalRegionPage::null();
sqe.data_pointer[1] = PhysicalRegionPage::null();
}
&PrpList::One(buf0) => {
sqe.data_pointer[0] = PhysicalRegionPage::with_addr(buf0);
sqe.data_pointer[1] = PhysicalRegionPage::null();
}
&PrpList::Two(buf0, buf1) => {
sqe.data_pointer[0] = PhysicalRegionPage::with_addr(buf0);
sqe.data_pointer[1] = PhysicalRegionPage::with_addr(buf1);
}
PrpList::List(buf0, list) => {
sqe.data_pointer[0] = PhysicalRegionPage::with_addr(*buf0);
sqe.data_pointer[1] =
PhysicalRegionPage::with_addr(unsafe { list.as_physical_address() });
}
}
sqe.data_pointer[0] = ranges.prp1;
sqe.data_pointer[1] = ranges.prp2;
cmd.fill_sqe(&mut sqe);
@ -374,7 +378,7 @@ impl QueuePair {
}
pub async fn request_no_data<C: Command>(&self, req: C) -> Result<(), NvmeError> {
let list = PrpList::None;
let list = PrpList::empty();
let command_id = self.submit(req, &list, true)?;
self.wait_for_completion(command_id, ())
.await
@ -399,8 +403,6 @@ impl QueuePair {
let mut inner = self.inner.lock();
let mut n = 0;
let mut completion_list = Vec::new();
loop {
let (cmp, expected_phase) = inner.cq.at_head(n);
let cmp_phase = cmp.phase();
@ -411,29 +413,24 @@ impl QueuePair {
n += 1;
let command_id = cmp.command_id();
let sub_queue_id = cmp.sub_queue_id();
// TODO allow several sqs receive completions through one cq?
assert_eq!(sub_queue_id, self.id);
debug_assert_eq!(sub_queue_id, self.id);
let sub_queue_head = cmp.sub_queue_head();
let cmp = *cmp;
inner.sq.take_until(sub_queue_head);
completion_list.push(cmp);
if inner.pending.remove(&command_id) {
inner.completed.insert(command_id, cmp);
}
}
if n != 0 {
inner.cq.take(n);
}
for cmp in completion_list {
let command_id = cmp.command_id();
if inner.pending.remove(&command_id) {
inner.completed.insert(command_id, cmp);
}
}
if n != 0 {
self.completion_notify.wake_all();
}

View File

@ -29,36 +29,11 @@ impl RegularNode {
Ok(())
}
async fn read(&self, mut pos: u64, buffer: &mut [u8]) -> Result<usize, Error> {
async fn read(&self, pos: u64, buffer: &mut [u8]) -> Result<usize, Error> {
let holder = self.inode.get().await?;
let inode = holder.read();
if pos >= inode.size(&self.fs) {
return Ok(0);
}
let mut offset = 0;
let mut remaining = core::cmp::min(buffer.len(), (inode.size(&self.fs) - pos) as usize);
while remaining != 0 {
let block_index = pos / self.fs.block_size as u64;
let block_offset = (pos % self.fs.block_size as u64) as usize;
let amount = core::cmp::min(self.fs.block_size - block_offset, remaining);
self.fs
.with_inode_block(&inode, block_index as u32, |block| {
buffer[offset..offset + amount]
.copy_from_slice(&block[block_offset..block_offset + amount]);
Ok(())
})
.await?;
pos += amount as u64;
offset += amount;
remaining -= amount;
}
Ok(offset)
self.fs.read_inode_data(&inode, pos, buffer).await
}
async fn write(&self, mut pos: u64, buffer: &[u8]) -> Result<usize, Error> {

View File

@ -439,6 +439,41 @@ impl Ext2Fs {
Ok(())
}
pub async fn read_inode_data(
&self,
inode: &Inode,
mut pos: u64,
buffer: &mut [u8],
) -> Result<usize, Error> {
let size = inode.size(self);
if pos >= size {
return Ok(0);
}
let mut offset = 0;
let mut remaining = buffer.len().min((size - pos) as usize);
while remaining != 0 {
let block_index = pos / self.block_size as u64;
let block_offset = (pos % self.block_size as u64) as usize;
let amount = remaining.min(self.block_size - block_offset);
self.with_inode_block(&inode, block_index as u32, |block| {
buffer[offset..offset + amount]
.copy_from_slice(&block[block_offset..block_offset + amount]);
Ok(())
})
.await?;
pos += amount as u64;
offset += amount;
remaining -= amount;
}
Ok(offset)
}
pub async fn flush_inode_cache(&self) -> Result<(), Error> {
log::info!("Flushing inode cache");
self.inode_cache.get().flush().await

View File

@ -33,11 +33,12 @@ impl SymlinkNode {
let inode = inode.read();
let len = inode.size(&self.fs) as usize;
if len >= self.fs.block_size {
todo!()
if len > self.fs.block_size {
log::warn!("ext2: symlink size > block size");
return Err(Error::InvalidFile);
}
if buf.len() < len {
todo!();
return Err(Error::BufferTooSmall);
}
let mut write = self.cache.write();

View File

@ -435,11 +435,13 @@ impl Thread {
///
/// Will panic if no current thread is present. For try-style getter, see
/// [Thread::get_current].
#[inline]
pub fn current() -> CurrentThread {
Self::get_current().unwrap()
}
/// Returns the current thread on the CPU, if any is present
#[inline]
pub fn get_current() -> Option<CurrentThread> {
// IrqGuard is held throughout
let cpu = CpuImpl::<CpuQueue>::local();
@ -449,6 +451,7 @@ impl Thread {
}
/// Returns a thread for given `id`, if such exists
#[inline]
pub fn get(id: ThreadId) -> Option<Arc<Thread>> {
THREADS.read().get(id).cloned()
}

View File

@ -151,6 +151,7 @@ impl Device for LocalApic {
}
impl LocalApicInterface for LocalApic {
#[inline]
fn clear_interrupt(&self) {
self.regs.EndOfInterrupt.set(0);
}

View File

@ -140,11 +140,15 @@ impl log::Log for RingLoggerSink {
}
impl log::Log for KernelLoggerSink {
fn enabled(&self, _metadata: &log::Metadata) -> bool {
true
fn enabled(&self, metadata: &log::Metadata) -> bool {
metadata.target() != "io"
}
fn log(&self, record: &log::Record) {
if !self.enabled(record.metadata()) {
return;
}
if RING_AVAILABLE.load(Ordering::Acquire) {
RING_LOGGER_SINK.log(record);
}

View File

@ -7,7 +7,7 @@ use std::{
use sysutils::Input;
fn cat_file(stdout: &mut Stdout, path: &str) -> io::Result<()> {
let mut buf = [0; 4096];
let mut buf = [0; 512];
let mut reader = Input::open_str(path)?;
loop {

View File

@ -88,7 +88,7 @@ fn run<I: Read + Seek, O: Write>(
let mut offset = 0;
let mut bytes_read = 0;
// input.seek(SeekFrom::Start(src_position * src_block_size))?;
input.seek(SeekFrom::Start(src_position * src_block_size))?;
let mut total_delta = Duration::ZERO;