dev/block: rewrite block subsystem

This commit is contained in:
Mark Poliakov 2024-07-29 14:43:37 +03:00
parent 6864447b33
commit 404ea5a75e
16 changed files with 312 additions and 690 deletions

14
Cargo.lock generated
View File

@ -154,6 +154,17 @@ dependencies = [
"windows-sys",
]
[[package]]
name = "async-trait"
version = "0.1.81"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.66",
]
[[package]]
name = "atomic_enum"
version = "0.3.0"
@ -887,6 +898,7 @@ name = "libk"
version = "0.1.0"
dependencies = [
"abi-lib",
"async-trait",
"atomic_enum",
"bytemuck",
"cfg-if",
@ -1921,6 +1933,7 @@ dependencies = [
name = "ygg_driver_ahci"
version = "0.1.0"
dependencies = [
"async-trait",
"bytemuck",
"device-api",
"futures-util",
@ -1977,6 +1990,7 @@ dependencies = [
name = "ygg_driver_nvme"
version = "0.1.0"
dependencies = [
"async-trait",
"bytemuck",
"device-api",
"futures-util",

View File

@ -20,3 +20,4 @@ static_assertions = "1.1.0"
tock-registers = "0.8.1"
bytemuck = { version = "1.16.1", features = ["derive"] }
memoffset = "0.9.0"
async-trait = "0.1.81"

View File

@ -5,4 +5,17 @@ pub enum AhciError {
MemoryError(#[allow(dead_code)] Error),
RegionTooLarge,
DeviceError,
FeatureNotImplemented,
}
impl From<AhciError> for Error {
fn from(value: AhciError) -> Self {
match value {
// TODO: Error::DeviceError
AhciError::DeviceError => Error::InvalidArgument,
AhciError::RegionTooLarge => Error::InvalidArgument,
AhciError::MemoryError(err) => err,
AhciError::FeatureNotImplemented => Error::NotImplemented,
}
}
}

View File

@ -1,23 +1,24 @@
use core::{
pin::Pin,
future::poll_fn,
mem::MaybeUninit,
sync::atomic::{AtomicU32, Ordering},
task::{Context, Poll},
};
use alloc::{boxed::Box, string::String};
use async_trait::async_trait;
use bytemuck::Zeroable;
use futures_util::{task::AtomicWaker, Future};
use libk::vfs::block::{IoOperation, IoRequest, IoSubmissionId, NgBlockDevice};
use futures_util::task::AtomicWaker;
use libk::vfs::block::NgBlockDevice;
use libk_mm::{address::AsPhysicalAddress, device::DeviceMemoryIo, PageBox};
use libk_util::{sync::IrqSafeSpinlock, waker::QueueWaker, OneTimeInit};
use tock_registers::interfaces::{Readable, Writeable};
use yggdrasil_abi::error::Error;
use crate::{
command::{AtaCommand, AtaIdentify, AtaReadDmaEx},
data::{CommandListEntry, CommandTable, ReceivedFis, COMMAND_LIST_LENGTH},
error::AhciError,
regs::{CommandState, CommandStatus, PortRegs, IE, TFD},
regs::{PortRegs, CMD_PENDING, CMD_READY, IE, TFD},
AhciController, MAX_COMMANDS, SECTOR_SIZE,
};
@ -54,6 +55,32 @@ pub struct AhciPort {
command_available: QueueWaker,
}
struct SubmittedCommand<'a> {
port: &'a AhciPort,
index: usize,
}
impl<'a> SubmittedCommand<'a> {
pub async fn wait_for_completion(self) -> Result<(), AhciError> {
let result = poll_fn(|cx| self.port.poll_slot(cx, self.index)).await;
// Free the command without dropping it
self.port.free_command(self.index);
core::mem::forget(self);
result
}
}
impl<'a> Drop for SubmittedCommand<'a> {
fn drop(&mut self) {
panic!(
"Cannot drop command in flight: port{}, slot{}",
self.port.index, self.index
)
}
}
impl PortInner {
fn submit_command<C: AtaCommand>(
&mut self,
@ -127,7 +154,8 @@ impl AhciPort {
command_list,
received_fis,
};
let command_completion = [const { (AtomicWaker::new(), AtomicU32::new(0)) }; MAX_COMMANDS];
let command_completion =
[const { (AtomicWaker::new(), AtomicU32::new(CMD_READY)) }; MAX_COMMANDS];
let command_available = QueueWaker::new();
let command_allocation = IrqSafeSpinlock::new(0);
@ -167,88 +195,64 @@ impl AhciPort {
self.info.try_get()
}
async fn allocate_command(&self) -> usize {
poll_fn(|cx| {
self.command_available.register(cx.waker());
let mut state = self.command_allocation.lock();
if *state != u32::MAX {
self.command_available.remove(cx.waker());
for i in 0..MAX_COMMANDS {
if *state & (1 << i) == 0 {
*state |= 1 << i;
self.command_completion[i]
.1
.store(CMD_PENDING, Ordering::Release);
return Poll::Ready(i);
}
}
unreachable!()
} else {
Poll::Pending
}
})
.await
}
async fn submit<C: AtaCommand>(&self, command: &C) -> Result<SubmittedCommand, AhciError> {
let index = self.allocate_command().await;
if let Err(error) = self.inner.lock().submit_command(index, command) {
self.free_command(index);
return Err(error);
}
Ok(SubmittedCommand { port: self, index })
}
async fn perform_command<C: AtaCommand>(&self, command: C) -> Result<C::Response, AhciError> {
let slot = self.allocate_command().await?;
log::trace!(
"Submit command on port {}, cmd index = {}",
self.index,
slot
);
self.inner.lock().submit_command(slot, &command)?;
self.wait_for_completion(slot).await?;
self.free_command(slot);
// Run the command
self.submit(&command).await?.wait_for_completion().await?;
Ok(unsafe { command.into_response() })
}
fn allocate_command(&self) -> impl Future<Output = Result<usize, AhciError>> + '_ {
struct F<'f> {
waker: &'f QueueWaker,
state: &'f IrqSafeSpinlock<u32>,
}
impl<'f> Future for F<'f> {
type Output = Result<usize, AhciError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.waker.register(cx.waker());
let mut state = self.state.lock();
if *state != u32::MAX {
self.waker.remove(cx.waker());
for i in 0..MAX_COMMANDS {
if *state & (1 << i) == 0 {
*state |= 1 << i;
return Poll::Ready(Ok(i));
}
}
panic!("Unreachable");
} else {
Poll::Pending
}
}
}
let waker = &self.command_available;
let state = &self.command_allocation;
F { waker, state }
}
fn wait_for_completion(
&self,
index: usize,
) -> impl Future<Output = Result<(), AhciError>> + '_ {
struct F<'f> {
waker: &'f AtomicWaker,
status: &'f AtomicU32,
}
impl<'f> Future for F<'f> {
type Output = Result<(), AhciError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.status.load(Ordering::Acquire) {
0 => (),
1 => return Poll::Ready(Ok(())),
_ => return Poll::Ready(Err(AhciError::DeviceError)),
}
self.waker.register(cx.waker());
match self.status.load(Ordering::Acquire) {
0 => Poll::Pending,
1 => Poll::Ready(Ok(())),
_ => Poll::Ready(Err(AhciError::DeviceError)),
}
}
}
fn poll_slot(&self, cx: &mut Context<'_>, index: usize) -> Poll<Result<(), AhciError>> {
let (waker, status) = &self.command_completion[index];
F { status, waker }
match status.load(Ordering::Acquire) {
CMD_PENDING => (),
CMD_READY => return Poll::Ready(Ok(())),
_ => return Poll::Ready(Err(AhciError::DeviceError)),
}
waker.register(cx.waker());
match status.load(Ordering::Acquire) {
CMD_PENDING => Poll::Pending,
CMD_READY => Poll::Ready(Ok(())),
_ => Poll::Ready(Err(AhciError::DeviceError)),
}
}
fn free_command(&self, index: usize) {
@ -263,139 +267,57 @@ impl AhciPort {
pub fn handle_pending_interrupts(&self) -> bool {
let inner = self.inner.lock();
for i in 0..MAX_COMMANDS {
match inner.regs.clear_state(i) {
CommandState::Pending => (),
CommandState::Ready(status) => {
// TODO better error handling?
let val = match status {
CommandStatus::Success => 1,
_ => 2,
};
let Some(status) = inner.regs.clear_interrupt() else {
return false;
};
self.command_completion[i].1.store(val, Ordering::Release);
let ci = inner.regs.CI.get();
for i in 0..MAX_COMMANDS {
if ci & (1 << i) == 0 {
if self.command_completion[i]
.1
.swap(status.into(), Ordering::Release)
== CMD_PENDING
{
log::info!("port{}: completion on slot {}", self.index, i);
self.command_completion[i].0.wake();
}
}
}
true
}
}
#[async_trait]
impl NgBlockDevice for AhciPort {
type CompletionNotify = AtomicWaker;
type Error = AhciError;
fn bus_id(&self) -> u32 {
0
async fn read(
&self,
lba: u64,
buffer: &mut PageBox<[MaybeUninit<u8>]>,
) -> Result<(), AhciError> {
let command = AtaReadDmaEx::new(lba, buffer.len() / SECTOR_SIZE, &buffer);
self.submit(&command).await?.wait_for_completion().await
}
fn unit_id(&self) -> u32 {
self.index as u32
async fn write(&self, _lba: u64, _buffer: PageBox<[u8]>) -> Result<(), AhciError> {
// TODO AtaDmaWriteEx
Err(AhciError::FeatureNotImplemented)
}
fn block_size(&self) -> u64 {
SECTOR_SIZE as _
fn block_size(&self) -> usize {
SECTOR_SIZE
}
fn block_count(&self) -> u64 {
self.info.get().lba_count
fn block_count(&self) -> usize {
self.info().as_ref().map(|i| i.lba_count).unwrap() as _
}
fn max_blocks_per_request(&self) -> u64 {
fn max_blocks_per_request(&self) -> usize {
// TODO
1
}
async fn submit_request(&self, request: IoRequest<'_>) -> Result<IoSubmissionId, Error> {
// TODO better error handling
let slot = self.allocate_command().await.unwrap();
log::trace!(
"Submit command on port {}, cmd index = {}",
self.index,
slot
);
match request.operation {
IoOperation::Read { lba, count } => {
self.inner
.lock()
.submit_command(slot, &AtaReadDmaEx::new(lba, count, request.data))
.unwrap();
}
IoOperation::Write { .. } => todo!(),
}
Ok(IoSubmissionId {
queue_id: self.index,
command_id: slot,
})
}
fn poll_completion(&self, id: IoSubmissionId) -> Poll<Result<(), Error>> {
let (_, status) = &self.command_completion[id.command_id];
match status.load(Ordering::Acquire) {
0 => Poll::Pending,
1 => {
self.free_command(id.command_id);
log::debug!("COMMAND FINISHED");
Poll::Ready(Ok(()))
}
_ => todo!(), // Poll::Ready(Err(AhciError::DeviceError)),
}
}
fn completion_notify(&self, id: IoSubmissionId) -> &Self::CompletionNotify {
let (notify, _) = &self.command_completion[id.command_id];
notify
}
}
// impl BlockDevice for AhciPort {
// fn read(&'static self, mut pos: u64, buf: &mut [u8]) -> Result<usize, Error> {
// let info = self.info.try_get().ok_or(Error::PermissionDenied)?;
//
// let mut cache = self.cache.lock();
// let mut rem = buf.len();
// let mut off = 0;
//
// while rem != 0 {
// let lba = pos / SECTOR_SIZE as u64;
//
// if lba >= info.lba_count {
// break;
// }
//
// let block_offset = (pos % SECTOR_SIZE as u64) as usize;
// let count = core::cmp::min(SECTOR_SIZE - block_offset, rem);
//
// let block = cache.get_or_fetch_with(lba, |block| {
// block! {
// self.read_block(lba, block).await
// }?
// .map_err(|_| Error::InvalidOperation)
// })?;
//
// buf[off..off + count].copy_from_slice(&block[block_offset..block_offset + count]);
//
// rem -= count;
// off += count;
// pos += count as u64;
// }
//
// Ok(off)
// }
//
// fn write(&'static self, _pos: u64, _buf: &[u8]) -> Result<usize, Error> {
// todo!()
// }
//
// fn size(&self) -> Result<u64, Error> {
// let info = self.info.try_get().ok_or(Error::PermissionDenied)?;
// Ok(info.lba_count * SECTOR_SIZE as u64)
// }
//
// fn device_request(&self, _req: &mut DeviceRequest) -> Result<(), Error> {
// todo!()
// }
// }

View File

@ -112,17 +112,9 @@ pub enum Version {
V1_3_1,
}
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum CommandStatus {
Success,
TaskFileError,
}
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum CommandState {
Pending,
Ready(CommandStatus),
}
pub const CMD_PENDING: u32 = 1;
pub const CMD_READY: u32 = 0;
pub const CMD_ERROR: u32 = 2;
impl PortRegs {
pub const SIG_SATA: u32 = 0x101;
@ -161,28 +153,24 @@ impl PortRegs {
self.CLBU.set((address >> 32) as u32);
}
pub fn clear_state(&self, index: usize) -> CommandState {
pub fn clear_interrupt(&self) -> Option<u32> {
let is = self.IS.extract();
let ci = self.CI.get();
if is.get() == 0 {
return CommandState::Pending;
return None;
}
// Clear everything
self.IS.set(0xFFFFFFFF);
self.IS.set(u32::MAX);
if is.matches_any(IS::HBDS::SET + IS::HBFS::SET) {
todo!("Host communication error unhandled");
}
assert_eq!(ci & (1 << index), 0);
if is.matches_any(IS::TFES::SET + IS::IFS::SET + IS::OFS::SET) {
return CommandState::Ready(CommandStatus::TaskFileError);
Some(CMD_ERROR)
} else {
Some(CMD_READY)
}
CommandState::Ready(CommandStatus::Success)
}
}

View File

@ -19,3 +19,4 @@ futures-util = { version = "0.3.30", default-features = false, features = ["allo
static_assertions = "1.1.0"
tock-registers = "0.8.1"
bytemuck = { version = "1.16.1", features = ["derive"] }
async-trait = "0.1.81"

View File

@ -1,16 +1,12 @@
use core::task::Poll;
use core::mem::MaybeUninit;
use alloc::{boxed::Box, format};
use async_trait::async_trait;
use kernel_fs::devfs;
use libk::task::cpu_index;
use libk::vfs::block::{
probe_partitions, IoOperation, IoRequest, IoSubmissionId, NgBlockDevice, NgBlockDeviceWrapper,
};
use libk_mm::address::AsPhysicalAddress;
use libk_util::waker::QueueWaker;
use yggdrasil_abi::error::Error;
use libk::vfs::block::{probe_partitions, NgBlockDevice, NgBlockDeviceWrapper};
use libk_mm::{address::AsPhysicalAddress, PageBox};
use crate::command::{IdentifyNamespaceRequest, IoRead};
use crate::{command::IdentifyNamespaceRequest, IoDirection};
use super::{error::NvmeError, NvmeController};
@ -61,68 +57,48 @@ impl NvmeDrive {
}
}
#[async_trait]
impl NgBlockDevice for NvmeDrive {
type CompletionNotify = QueueWaker;
type Error = NvmeError;
fn bus_id(&self) -> u32 {
(*self.controller.controller_id.get()) as _
async fn read(
&self,
lba: u64,
buffer: &mut PageBox<[MaybeUninit<u8>]>,
) -> Result<(), NvmeError> {
self.controller
.perform_io(
self.nsid,
lba,
unsafe { buffer.as_physical_address() },
IoDirection::Read,
)
.await?;
Ok(())
}
async fn write(&self, lba: u64, buffer: PageBox<[u8]>) -> Result<(), NvmeError> {
self.controller
.perform_io(
self.nsid,
lba,
unsafe { buffer.as_physical_address() },
IoDirection::Write,
)
.await?;
Ok(())
}
fn unit_id(&self) -> u32 {
self.nsid
fn block_size(&self) -> usize {
self.lba_size as _
}
fn block_count(&self) -> usize {
self.total_lba_count as _
}
fn block_size(&self) -> u64 {
self.lba_size
}
fn block_count(&self) -> u64 {
self.total_lba_count
}
fn max_blocks_per_request(&self) -> u64 {
// TODO get from identify
fn max_blocks_per_request(&self) -> usize {
// TODO get from device
8
}
async fn submit_request(&self, request: IoRequest<'_>) -> Result<IoSubmissionId, Error> {
let queue_id = cpu_index();
let ioq = &self.controller.ioqs.get()[queue_id as usize];
let command_id = match request.operation {
IoOperation::Read { lba, count } => {
log::debug!(
"Submit read of {} lbas from ns {} to queue {}",
count,
self.nsid,
queue_id
);
let range = unsafe { request.data.as_physical_address() };
ioq.submit(
IoRead {
lba,
count: count as _,
nsid: self.nsid,
},
&[range],
true,
)
}
IoOperation::Write { .. } => todo!(),
};
Ok(IoSubmissionId {
queue_id: queue_id as _,
command_id: command_id as _,
})
}
fn poll_completion(&self, id: IoSubmissionId) -> Poll<Result<(), Error>> {
let ioq = &self.controller.ioqs.get()[id.queue_id];
ioq.poll_completion(id.command_id as _)
}
fn completion_notify(&self, id: IoSubmissionId) -> &QueueWaker {
&self.controller.ioqs.get()[id.queue_id].completion_notify
}
}

View File

@ -13,3 +13,13 @@ impl From<CommandError> for NvmeError {
Self::CommandError(value)
}
}
impl From<NvmeError> for Error {
fn from(value: NvmeError) -> Self {
match value {
NvmeError::MemoryError(err) => err,
// TODO Error::DeviceError
NvmeError::CommandError(_err) => Error::InvalidArgument,
}
}
}

View File

@ -28,6 +28,7 @@ crossbeam-queue = { version = "0.3.11", default-features = false, features = ["a
serde_json = { version = "1.0.111", default-features = false, features = ["alloc"] }
serde = { version = "1.0.193", features = ["derive"], default-features = false }
bytemuck = { version = "1.16.1", features = ["derive"] }
async-trait = "0.1.81"
[dependencies.elf]
version = "0.7.2"

View File

@ -2,6 +2,8 @@
#![cfg_attr(test, allow(unused_imports))]
#![allow(clippy::new_without_default)]
#![feature(
new_range_api,
associated_type_defaults,
maybe_uninit_slice,
step_trait,
const_trait_impl,

View File

@ -1,12 +1,15 @@
#![allow(unused)]
use core::{
fmt,
mem::MaybeUninit,
ops::Range,
pin::Pin,
task::{Context, Poll},
};
use alloc::boxed::Box;
use async_trait::async_trait;
use futures_util::{task::AtomicWaker, Future};
use libk_mm::{address::PhysicalAddress, table::MapAttributes, PageBox, PageProvider};
use libk_util::waker::QueueWaker;
@ -17,363 +20,110 @@ use crate::vfs::block::{
BlockDevice,
};
pub trait CompletionNotify {
fn wait_for_completion<'a, D: NgBlockDevice + 'a>(
&'a self,
device: &'a D,
id: IoSubmissionId,
) -> impl Future<Output = Result<(), Error>> + Send + '_;
}
#[async_trait]
pub trait NgBlockDevice: Sync {
type CompletionNotify: CompletionNotify;
type Error: fmt::Debug + Into<Error> = Error;
fn bus_id(&self) -> u32; // HBA, controller ID, etc.
fn unit_id(&self) -> u32; // Drive, slot, connector ID, etc.
fn block_size(&self) -> u64;
fn block_count(&self) -> u64;
fn max_blocks_per_request(&self) -> u64;
fn submit_request(
async fn read(
&self,
request: IoRequest,
) -> impl Future<Output = Result<IoSubmissionId, Error>> + Send;
lba: u64,
buffer: &mut PageBox<[MaybeUninit<u8>]>,
) -> Result<(), Self::Error>;
async fn write(&self, lba: u64, buffer: PageBox<[u8]>) -> Result<(), Self::Error>;
fn poll_completion(&self, id: IoSubmissionId) -> Poll<Result<(), Error>>;
fn completion_notify(&self, id: IoSubmissionId) -> &Self::CompletionNotify;
fn block_size(&self) -> usize;
fn block_count(&self) -> usize;
fn wait_for_completion(
&self,
id: IoSubmissionId,
) -> impl Future<Output = Result<(), Error>> + Send + '_
where
Self: Sized,
{
self.completion_notify(id).wait_for_completion(self, id)
fn max_blocks_per_request(&self) -> usize {
1
}
}
pub struct NgBlockDeviceWrapper<'a, D: NgBlockDevice + 'a> {
device: &'a D,
pub(crate) block_size: u64,
pub(crate) block_size: usize,
pub(crate) block_count: u64,
#[allow(unused)]
max_blocks_per_request: u64,
}
#[derive(Debug, PartialEq)]
struct BlockChunk {
lba_start: u64,
lba_count: usize,
buffer_offset: usize,
lba_offset: usize,
byte_count: usize,
}
struct BlockChunkIter {
remaining: usize,
buffer_offset: usize,
position: u64,
block_size: u64,
max_blocks_per_request: u64,
}
impl CompletionNotify for QueueWaker {
fn wait_for_completion<'a, D: NgBlockDevice + 'a>(
&'a self,
device: &'a D,
id: IoSubmissionId,
) -> impl Future<Output = Result<(), Error>> + Send + '_ {
struct F<'f, D: NgBlockDevice + 'f> {
device: &'f D,
notify: &'f QueueWaker,
id: IoSubmissionId,
}
impl<'f, D: NgBlockDevice + 'f> Future for F<'f, D> {
type Output = Result<(), Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.notify.register(cx.waker());
match self.device.poll_completion(self.id) {
Poll::Ready(result) => {
self.notify.remove(cx.waker());
Poll::Ready(result)
}
Poll::Pending => Poll::Pending,
}
}
}
F {
notify: self,
device,
id,
}
}
}
impl CompletionNotify for AtomicWaker {
fn wait_for_completion<'a, D: NgBlockDevice + 'a>(
&'a self,
device: &'a D,
id: IoSubmissionId,
) -> impl Future<Output = Result<(), Error>> + Send + '_ {
struct F<'f, D: NgBlockDevice + 'f> {
device: &'f D,
notify: &'f AtomicWaker,
id: IoSubmissionId,
}
impl<'f, D: NgBlockDevice + 'f> Future for F<'f, D> {
type Output = Result<(), Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if let Poll::Ready(result) = self.device.poll_completion(self.id) {
return Poll::Ready(result);
}
self.notify.register(cx.waker());
self.device.poll_completion(self.id)
}
}
F {
notify: self,
device,
id,
}
}
}
impl BlockChunk {
pub fn block_range(&self) -> Range<usize> {
self.lba_offset..self.lba_offset + self.byte_count
}
pub fn buffer_range(&self) -> Range<usize> {
self.buffer_offset..self.buffer_offset + self.byte_count
}
}
impl BlockChunkIter {
pub fn new(pos: u64, count: usize, lba_size: u64, max_lba_per_request: u64) -> Self {
Self {
remaining: count,
buffer_offset: 0,
position: pos,
block_size: lba_size,
max_blocks_per_request: max_lba_per_request,
}
}
}
impl Iterator for BlockChunkIter {
type Item = BlockChunk;
fn next(&mut self) -> Option<Self::Item> {
if self.remaining == 0 {
return None;
}
let lba_start = self.position / self.block_size;
let lba_end =
(self.position + self.remaining as u64 + self.block_size - 1) / self.block_size;
let lba_count = core::cmp::min(lba_end - lba_start, self.max_blocks_per_request);
let lba_offset = (self.position % self.block_size) as usize;
let byte_count = core::cmp::min(
(lba_count * self.block_size) as usize - lba_offset,
self.remaining,
);
let buffer_offset = self.buffer_offset;
self.position += byte_count as u64;
self.buffer_offset += byte_count;
self.remaining -= byte_count;
Some(BlockChunk {
lba_start,
lba_count: lba_count as usize,
buffer_offset,
lba_offset,
byte_count,
})
}
pub(crate) max_blocks_per_request: usize,
}
impl<'a, D: NgBlockDevice + 'a> NgBlockDeviceWrapper<'a, D> {
pub fn new(device: &'a D) -> &'a Self {
let block_size = device.block_size();
let block_count = device.block_count();
let max_blocks_per_request = device.max_blocks_per_request();
pub fn new(device: &'a D) -> &'static Self {
Box::leak(Box::new(Self {
device,
block_size,
block_count,
max_blocks_per_request,
block_size: device.block_size(),
block_count: device.block_count() as u64,
max_blocks_per_request: device.max_blocks_per_request(),
}))
}
async fn read_range_inner(&self, lba: u64, count: usize) -> Result<PageBox<[u8]>, Error> {
let mut data = PageBox::new_uninit_slice(self.block_size as usize * count)?;
fn lba_range(&self, byte_range: Range<u64>) -> (Range<u64>, usize) {
let lba_start = byte_range.start / self.block_size as u64;
let lba_end = (byte_range.end + self.block_size as u64 - 1) / self.block_size as u64;
let lba_off = (byte_range.start % self.block_size as u64) as usize;
let id = self
.device
.submit_request(IoRequest {
operation: IoOperation::Read { lba, count },
data: &mut data,
})
.await?;
(lba_start..lba_end, lba_off)
}
self.device.wait_for_completion(id).await?;
fn handle_drive_error(e: D::Error) -> Error {
log::error!("Drive error: {:?}", e);
e.into()
}
}
Ok(unsafe { data.assume_init_slice() })
#[async_trait]
impl<'a, D: NgBlockDevice + 'a> BlockDevice for NgBlockDeviceWrapper<'a, D> {
async fn read(&self, pos: u64, buf: &mut [u8]) -> Result<usize, Error> {
let (lba_range, lba_off) = self.lba_range(pos..pos + buf.len() as u64);
log::info!(
"read({:#x}, {}) x {:#x} -> LBA {:#x?}",
pos,
buf.len(),
self.block_size,
lba_range
);
let mut buffer = PageBox::new_uninit_slice(
(lba_range.end - lba_range.start) as usize * self.block_size,
)?;
self.device
.read(lba_range.start, &mut buffer)
.await
.map_err(Self::handle_drive_error)?;
let buffer = unsafe { buffer.assume_init_slice() };
buf.copy_from_slice(&buffer[lba_off..lba_off + buf.len()]);
Ok(buf.len())
}
async fn write(&self, pos: u64, buf: &[u8]) -> Result<usize, Error> {
todo!()
}
fn size(&self) -> Result<u64, Error> {
Ok(self.block_size as u64 * self.block_count)
}
}
impl<'a, D: NgBlockDevice + 'a> PageProvider for NgBlockDeviceWrapper<'a, D> {
fn get_page(&self, _offset: u64) -> Result<PhysicalAddress, Error> {
todo!()
}
fn release_page(&self, _offset: u64, _phys: PhysicalAddress) -> Result<(), Error> {
fn get_page(&self, offset: u64) -> Result<PhysicalAddress, Error> {
todo!()
}
fn clone_page(
&self,
_offset: u64,
_src_phys: PhysicalAddress,
_src_attrs: MapAttributes,
offset: u64,
src_phys: PhysicalAddress,
src_attrs: MapAttributes,
) -> Result<PhysicalAddress, Error> {
todo!()
}
}
impl<'a, D: NgBlockDevice + 'a> BlockDevice for NgBlockDeviceWrapper<'a, D> {
fn poll_read(
&self,
cx: &mut Context<'_>,
pos: u64,
buf: &mut [u8],
) -> Poll<Result<usize, Error>> {
todo!()
}
fn poll_write(&self, cx: &mut Context<'_>, pos: u64, buf: &[u8]) -> Poll<Result<usize, Error>> {
todo!()
}
// fn read(&'static self, pos: u64, buf: &mut [u8]) -> Result<usize, Error> {
// // TODO block cache
// block! {
// let mut bytes_read = 0;
// for chunk in
// BlockChunkIter::new(pos, buf.len(), self.block_size, self.max_blocks_per_request)
// {
// log::debug!(
// "Read chunk: lba_start={}, lba_count={}",
// chunk.lba_start,
// chunk.lba_count
// );
// let block = self.read_range_inner(chunk.lba_start, chunk.lba_count).await?;
// buf[chunk.buffer_range()].copy_from_slice(&block[chunk.block_range()]);
// bytes_read += chunk.byte_count;
// }
// Ok(bytes_read)
// }?
// }
// fn write(&'static self, _pos: u64, _buf: &[u8]) -> Result<usize, Error> {
// todo!()
// }
fn size(&self) -> Result<u64, Error> {
Ok(self.block_size * self.block_count)
}
fn device_request(&self, _req: &mut DeviceRequest) -> Result<(), Error> {
fn release_page(&self, offset: u64, phys: PhysicalAddress) -> Result<(), Error> {
todo!()
}
}
#[cfg(test)]
mod tests {
use crate::vfs::block::device::BlockChunk;
use super::BlockChunkIter;
#[test]
fn block_chunk_iter() {
let mut it = BlockChunkIter {
remaining: 512 * 9 + 1,
position: 123,
block_size: 512,
buffer_offset: 0,
max_blocks_per_request: 2,
};
assert_eq!(
it.next().unwrap(),
BlockChunk {
lba_start: 0,
lba_count: 2,
buffer_offset: 0,
lba_offset: 123,
byte_count: 901
}
);
assert_eq!(
it.next().unwrap(),
BlockChunk {
lba_start: 2,
lba_count: 2,
buffer_offset: 1024 - 123,
lba_offset: 0,
byte_count: 1024
}
);
assert_eq!(
it.next().unwrap(),
BlockChunk {
lba_start: 4,
lba_count: 2,
buffer_offset: 2 * 1024 - 123,
lba_offset: 0,
byte_count: 1024
}
);
assert_eq!(
it.next().unwrap(),
BlockChunk {
lba_start: 6,
lba_count: 2,
buffer_offset: 3 * 1024 - 123,
lba_offset: 0,
byte_count: 1024
}
);
assert_eq!(
it.next().unwrap(),
BlockChunk {
lba_start: 8,
lba_count: 2,
buffer_offset: 4 * 1024 - 123,
lba_offset: 0,
byte_count: 512 + 123 + 1
}
);
}
}

View File

@ -1,16 +1,14 @@
#![allow(missing_docs)]
use core::task::{Context, Poll};
use alloc::boxed::Box;
use async_trait::async_trait;
use libk_mm::PageProvider;
use yggdrasil_abi::{error::Error, io::DeviceRequest};
pub mod device;
// mod partition;
pub mod request;
pub use device::{NgBlockDevice, NgBlockDeviceWrapper};
pub use request::{IoOperation, IoRequest, IoSubmissionId};
// TODO
pub fn probe_partitions<
@ -20,64 +18,22 @@ pub fn probe_partitions<
_dev: &'static NgBlockDeviceWrapper<D>,
_callback: F,
) -> Result<(), Error> {
log::warn!("TODO: probe partitions");
Ok(())
// async fn probe_table<D: NgBlockDevice + 'static>(
// dev: &'static NgBlockDeviceWrapper<'static, D>,
// ) -> Result<Option<Vec<Partition<'static, D>>>, Error> {
// if let Some(partitions) = partition::probe_gpt(dev)? {
// return Ok(Some(partitions));
// }
// Ok(None)
// }
// runtime::spawn(async move {
// match probe_table(dev).await {
// Ok(Some(partitions)) => {
// // Create block devices for the partitions
// for (i, partition) in partitions.into_iter().enumerate() {
// let partition_blkdev = Box::leak(Box::new(partition));
// if let Err(error) = callback(i, partition_blkdev) {
// log::warn!("Could not add partition {}: {:?}", i, error);
// }
// }
// }
// Ok(None) => {
// log::warn!("Unknown or missing partition table");
// }
// Err(error) => {
// log::warn!("Could not probe partition table: {:?}", error);
// }
// }
// })
}
/// Block device interface
#[allow(unused)]
#[async_trait]
pub trait BlockDevice: PageProvider + Sync {
fn poll_read(
&self,
cx: &mut Context<'_>,
pos: u64,
buf: &mut [u8],
) -> Poll<Result<usize, Error>> {
Poll::Ready(Err(Error::NotImplemented))
async fn read(&self, pos: u64, buf: &mut [u8]) -> Result<usize, Error> {
Err(Error::NotImplemented)
}
fn poll_write(&self, cx: &mut Context<'_>, pos: u64, buf: &[u8]) -> Poll<Result<usize, Error>> {
Poll::Ready(Err(Error::NotImplemented))
async fn write(&self, pos: u64, buf: &[u8]) -> Result<usize, Error> {
Err(Error::NotImplemented)
}
// /// Reads data frmo the given offset of the device
// fn read(&'static self, pos: u64, buf: &mut [u8]) -> Result<usize, Error> {
// Err(Error::NotImplemented)
// }
// /// Writes the data to the given offset of the device
// fn write(&'static self, pos: u64, buf: &[u8]) -> Result<usize, Error> {
// Err(Error::NotImplemented)
// }
/// Returns the size of the block device in bytes
fn size(&self) -> Result<u64, Error> {
Err(Error::NotImplemented)
@ -96,13 +52,4 @@ pub trait BlockDevice: PageProvider + Sync {
fn device_request(&self, req: &mut DeviceRequest) -> Result<(), Error> {
Err(Error::NotImplemented)
}
// fn read_exact(&'static self, pos: u64, buf: &mut [u8]) -> Result<(), Error> {
// let count = self.read(pos, buf)?;
// if count == buf.len() {
// Ok(())
// } else {
// Err(Error::MissingData)
// }
// }
}

View File

@ -22,15 +22,19 @@ pub struct CharFile {
}
impl BlockFile {
pub fn read(&self, _buf: &mut [u8]) -> Result<usize, Error> {
todo!()
// let mut position = self.position.lock();
// let count = self.device.0.read(*position, buf)?;
// *position += count as u64;
// Ok(count)
pub fn read(&self, buf: &mut [u8]) -> Result<usize, Error> {
block!(self.read_async(buf).await)?
}
async fn read_async(&self, buf: &mut [u8]) -> Result<usize, Error> {
let mut position = self.position.lock();
let count = self.device.0.read(*position, buf).await?;
*position += count as u64;
Ok(count)
}
pub fn write(&self, _buf: &[u8]) -> Result<usize, Error> {
log::info!("BlockFile::write");
todo!()
// let mut position = self.position.lock();
// let count = self.device.0.write(*position, buf)?;

View File

@ -37,6 +37,17 @@ dependencies = [
"zerocopy",
]
[[package]]
name = "async-trait"
version = "0.1.81"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "atomic_enum"
version = "0.3.0"
@ -266,6 +277,7 @@ name = "libk"
version = "0.1.0"
dependencies = [
"abi-lib",
"async-trait",
"atomic_enum",
"bytemuck",
"cfg-if",

View File

@ -1,9 +1,6 @@
//! Abstract linear framebuffer device implementation
use core::{
ops::{Index, IndexMut},
task::{Context, Poll},
};
use core::ops::{Index, IndexMut};
use abi::{error::Error, io::DeviceRequest, process::ProcessId};
use device_api::Device;
@ -110,24 +107,6 @@ impl BlockDevice for LinearFramebuffer {
Ok(self.size as _)
}
fn poll_read(
&self,
_cx: &mut Context<'_>,
_pos: u64,
_buf: &mut [u8],
) -> Poll<Result<usize, Error>> {
todo!()
}
fn poll_write(
&self,
_cx: &mut Context<'_>,
_pos: u64,
_buf: &[u8],
) -> Poll<Result<usize, Error>> {
todo!()
}
fn is_readable(&self) -> bool {
false
}

View File

@ -45,6 +45,8 @@ pub(crate) fn panic_handler(pi: &core::panic::PanicInfo) -> ! {
static PANIC_HAPPENED: AtomicBool = AtomicBool::new(false);
fatalln!("panic_handler({:?})", pi);
if PANIC_HAPPENED
.compare_exchange(false, true, Ordering::Release, Ordering::Acquire)
.is_ok()