block: make block subsystem use DmaBuffer/DmaSlice

This commit is contained in:
Mark Poliakov 2025-02-07 01:34:36 +02:00
parent e812453a97
commit 7358852f67
14 changed files with 321 additions and 553 deletions

View File

@ -1,10 +1,10 @@
use core::mem::{size_of, MaybeUninit};
use device_api::dma::DmaAllocator;
use libk::dma::{BusAddress, DmaBuffer};
use libk::dma::{BusAddress, DmaBuffer, DmaSliceMut};
use tock_registers::register_structs;
use crate::{data::AtaString, error::AhciError, MAX_PRD_SIZE, SECTOR_SIZE};
use crate::{data::AtaString, error::AhciError, MAX_PRD_SIZE};
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
#[repr(u8)]
@ -85,10 +85,7 @@ impl AtaIdentify {
}
impl AtaReadDmaEx {
pub fn new(lba: u64, sector_count: usize, buffer: &mut DmaBuffer<[MaybeUninit<u8>]>) -> Self {
assert_eq!(buffer.len() % SECTOR_SIZE, 0);
assert_ne!(buffer.len(), 0);
pub fn new(lba: u64, sector_count: usize, buffer: DmaSliceMut<MaybeUninit<u8>>) -> Self {
Self {
lba,
sector_count,

View File

@ -10,9 +10,13 @@ use async_trait::async_trait;
use bytemuck::Zeroable;
use device_api::{device::Device, dma::DmaAllocator};
use futures_util::task::AtomicWaker;
use libk::{device::block::BlockDevice, dma::DmaBuffer, error::Error};
use libk::{
device::block::BlockDevice,
dma::{DmaBuffer, DmaSlice, DmaSliceMut},
error::Error,
};
use libk_mm::{
address::PhysicalAddress, device::DeviceMemoryIo, table::MapAttributes, PageProvider, PageSlice,
address::PhysicalAddress, device::DeviceMemoryIo, table::MapAttributes, PageProvider,
};
use libk_util::{sync::IrqSafeSpinlock, waker::QueueWaker, OneTimeInit};
use tock_registers::interfaces::{Readable, Writeable};
@ -305,31 +309,38 @@ impl AhciPort {
#[async_trait]
impl BlockDevice for AhciPort {
// TODO read directly into cache
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error> {
DmaBuffer::new_uninit_slice(&*self.ahci.dma, size)
}
async fn read_aligned(
&self,
position: u64,
buffer: &mut PageSlice<MaybeUninit<u8>>,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<(), Error> {
if position % SECTOR_SIZE as u64 != 0 {
return Err(Error::InvalidOperation);
}
if buffer.len() % SECTOR_SIZE != 0 {
log::warn!("ahci: misaligned buffer size: {}", buffer.len());
return Err(Error::InvalidOperation);
}
if position % SECTOR_SIZE as u64 != 0 {
log::warn!("ahci: misaligned read");
return Err(Error::InvalidOperation);
}
let mut dma_buffer = DmaBuffer::new_uninit_slice(&*self.ahci.dma, buffer.len())?;
let lba = position / SECTOR_SIZE as u64;
let lba_count = buffer.len() / SECTOR_SIZE;
if lba + lba_count as u64 >= self.block_count() {
log::warn!("ahci: read crosses medium end");
return Err(Error::InvalidOperation);
}
let command = AtaReadDmaEx::new(lba, buffer.len() / SECTOR_SIZE, &mut dma_buffer);
let command = AtaReadDmaEx::new(lba, lba_count, buffer);
self.submit(&command).await?.wait_for_completion().await?;
buffer.copy_from_slice(&dma_buffer[..]);
Ok(())
}
async fn write_aligned(&self, _position: u64, _buffer: &PageSlice<u8>) -> Result<(), Error> {
async fn write_aligned(&self, _position: u64, _buffer: DmaSlice<'_, u8>) -> Result<(), Error> {
// TODO AtaWriteDmaEx
Err(Error::NotImplemented)
}

View File

@ -3,7 +3,11 @@ use core::mem::MaybeUninit;
use alloc::{boxed::Box, sync::Arc};
use async_trait::async_trait;
use device_api::device::Device;
use libk::{device::block::BlockDevice, dma::DmaBuffer, error::Error};
use libk::{
device::block::BlockDevice,
dma::{DmaBuffer, DmaSlice, DmaSliceMut},
error::Error,
};
use libk_mm::{
address::{AsPhysicalAddress, PhysicalAddress},
table::MapAttributes,
@ -78,18 +82,27 @@ impl Device for NvmeNamespace {
#[async_trait]
impl BlockDevice for NvmeNamespace {
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error> {
DmaBuffer::new_uninit_slice(&*self.controller.dma, size)
}
// TODO read directly to cache
async fn read_aligned(
&self,
position: u64,
buffer: &mut PageSlice<MaybeUninit<u8>>,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<(), Error> {
debug_assert_eq!(position % self.block_size() as u64, 0);
if position % self.block_size() as u64 != 0 {
return Err(Error::InvalidOperation);
}
if buffer.len() % self.block_size() != 0 {
return Err(Error::InvalidOperation);
}
let lba = position / self.block_size() as u64;
debug_assert_eq!(buffer.len() % self.block_size(), 0);
let lba_count = buffer.len() / self.block_size();
let mut dma_buffer = DmaBuffer::new_uninit_slice(&*self.controller.dma, buffer.len())?;
if lba + lba_count as u64 > self.block_count() {
return Err(Error::InvalidOperation);
}
let result = self
.controller
@ -97,28 +110,29 @@ impl BlockDevice for NvmeNamespace {
self.nsid,
lba,
lba_count,
dma_buffer.bus_address(),
buffer.bus_address(),
buffer.len(),
IoDirection::Read,
)
.await;
log::info!(target: "io", "read #{lba}, {lba_count} blocks -> {result:?} @ {dma_buffer:p}");
if result.is_ok() {
buffer.copy_from_slice(&dma_buffer[..]);
}
log::info!("read #{lba}, {lba_count} blocks -> {result:?} @ {buffer:p}");
result.map_err(NvmeError::into)
}
async fn write_aligned(&self, position: u64, buffer: &PageSlice<u8>) -> Result<(), Error> {
debug_assert_eq!(position % self.block_size() as u64, 0);
async fn write_aligned(&self, position: u64, buffer: DmaSlice<'_, u8>) -> Result<(), Error> {
if position % self.block_size() as u64 != 0 {
return Err(Error::InvalidOperation);
}
if buffer.len() % self.block_size() != 0 {
return Err(Error::InvalidOperation);
}
let lba = position / self.block_size() as u64;
debug_assert_eq!(buffer.len() % self.block_size(), 0);
// let buffer_address = unsafe { buffer.as_physical_address() };
// debug_assert_eq!(buffer_address.into_u64() % self.block_size() as u64, 0);
let lba_count = buffer.len() / self.block_size();
if lba + lba_count as u64 > self.block_count() {
return Err(Error::InvalidOperation);
}
// TODO ArchitectureImpl::flush_data_cache()
#[cfg(target_arch = "x86_64")]
@ -126,20 +140,19 @@ impl BlockDevice for NvmeNamespace {
core::arch::asm!("wbinvd");
}
let dma_buffer = DmaBuffer::from_slice(&*self.controller.dma, &buffer[..])?;
let result = self
.controller
.perform_io(
self.nsid,
lba,
lba_count,
dma_buffer.bus_address(),
buffer.bus_address(),
buffer.len(),
IoDirection::Write,
)
.await;
log::info!(target: "io", "write -> #{lba}, {lba_count} blocks -> {result:?} @ {dma_buffer:p}");
log::info!(target: "io", "write -> #{lba}, {lba_count} blocks -> {result:?} @ {buffer:p}");
result.map_err(NvmeError::into)
}

View File

@ -12,11 +12,12 @@ use command::{ScsiReadCapacity, ScsiRequestSense, ScsiTestUnitReady};
use device_api::device::Device;
use libk::{
device::{block::BlockDevice, manager::probe_partitions},
dma::{DmaBuffer, DmaSlice, DmaSliceMut},
error::Error,
fs::devfs,
task::runtime,
};
use libk_mm::{address::PhysicalAddress, table::MapAttributes, PageProvider, PageSlice};
use libk_mm::{address::PhysicalAddress, table::MapAttributes, PageProvider};
use libk_util::{
sync::{spin_rwlock::IrqSafeRwLock, IrqSafeSpinlock},
OneTimeInit,
@ -105,49 +106,58 @@ impl ScsiDevice {
#[async_trait]
impl BlockDevice for ScsiDevice {
// TODO avoid copies by reading directly into the cache?
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error> {
self.transport.lock().allocate_buffer(size)
}
async fn read_aligned(
&self,
position: u64,
buffer: &mut PageSlice<MaybeUninit<u8>>,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<(), Error> {
if buffer.len() % self.lba_size != 0 {
log::warn!("scsi: buffer is not multiple of LBA size");
if position % self.lba_size as u64 != 0 {
log::warn!("scsi: misaligned read");
return Err(Error::InvalidArgument);
}
if buffer.len() % self.lba_size != 0 {
log::warn!("scsi: misaligned buffer size");
return Err(Error::InvalidArgument);
}
let lba_start = position / self.lba_size as u64;
let lba_count = buffer.len() / self.lba_size;
let lba_end = lba_start + lba_count as u64;
if lba_start.saturating_add(lba_count as u64) >= self.lba_count {
log::warn!("scsi: read beyond medium end");
return Err(Error::InvalidArgument);
}
let lba_end = lba_start + lba_count as u64;
let mut transport = self.transport.lock();
let mut offset = 0;
// TODO DmaSliceMut subslicing
let (buffer, range) = buffer.into_parts();
let mut offset = range.start;
for i in (0..lba_count).step_by(self.max_lba_per_request) {
let lba = lba_start + i as u64;
let end = (lba + self.max_lba_per_request as u64).min(lba_end);
let count = (end - lba) as usize;
let amount = count * self.lba_size;
let slice =
unsafe { MaybeUninit::slice_assume_init_mut(&mut buffer[offset..offset + amount]) };
let len = transport.read(0, lba, count as _, slice).await?;
let dst_slice = buffer.slice_mut(offset..offset + amount);
let len = transport.read(0, lba, count as u16, dst_slice).await?;
if len != amount {
return Err(Error::InvalidArgument);
}
offset += amount;
}
Ok(())
}
async fn write_aligned(&self, _position: u64, _buffer: &PageSlice<u8>) -> Result<(), Error> {
// TODO AtaWriteDmaEx
async fn write_aligned(&self, _position: u64, _buffer: DmaSlice<'_, u8>) -> Result<(), Error> {
Err(Error::NotImplemented)
}

View File

@ -1,17 +1,24 @@
use core::{mem::MaybeUninit, ops::Deref};
use alloc::boxed::Box;
use async_trait::async_trait;
use libk::error::Error;
use libk::{
dma::{DmaBuffer, DmaSliceMut},
error::Error,
};
use crate::command::ScsiCommand;
#[async_trait]
pub trait ScsiTransport: Send + Sync {
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error>;
/// Perform a no-data request
async fn perform_request_raw(
&mut self,
lun: u8,
request_data: &[u8],
response_buffer: &mut [u8],
response_buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<usize, Error>;
fn max_bytes_per_request(&self) -> usize;
@ -33,7 +40,7 @@ impl ScsiTransportWrapper {
lun: u8,
lba: u64,
lba_count: u16,
buffer: &mut [u8],
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<usize, Error> {
if lba >= u32::MAX as u64 {
return Err(Error::InvalidArgument);
@ -68,18 +75,28 @@ impl ScsiTransportWrapper {
[u8; R::RESPONSE_LEN]: Sized,
[u8; R::REQUEST_LEN]: Sized,
{
let mut response_buffer = [0; R::RESPONSE_LEN];
let mut response_buffer = self.allocate_buffer(R::RESPONSE_LEN)?;
let request_buffer = request.into_bytes();
let response_len = self
.inner
.perform_request_raw(lun, &request_buffer, &mut response_buffer)
.perform_request_raw(
lun,
&request_buffer,
response_buffer.slice_mut(0..R::RESPONSE_LEN),
)
.await?;
let response_bytes =
unsafe { MaybeUninit::slice_assume_init_ref(&response_buffer[..response_len]) };
R::parse_response(&response_buffer[..response_len])
R::parse_response(response_bytes)
}
}
pub fn max_bytes_per_request(&self) -> usize {
self.inner.max_bytes_per_request()
impl Deref for ScsiTransportWrapper {
type Target = dyn ScsiTransport;
fn deref(&self) -> &Self::Target {
self.inner.as_ref()
}
}

View File

@ -3,7 +3,10 @@ use core::mem::MaybeUninit;
use alloc::{boxed::Box, sync::Arc};
use async_trait::async_trait;
use bytemuck::{Pod, Zeroable};
use libk::{dma::DmaBuffer, error::Error};
use libk::{
dma::{DmaBuffer, DmaSliceMut},
error::Error,
};
use ygg_driver_scsi::{transport::ScsiTransport, ScsiDevice};
use crate::{
@ -50,7 +53,6 @@ struct Bbb {
device: Arc<UsbDeviceAccess>,
in_pipe: UsbBulkInPipeAccess,
out_pipe: UsbBulkOutPipeAccess,
buffer: DmaBuffer<[MaybeUninit<u8>]>,
last_tag: u32,
}
@ -62,12 +64,10 @@ impl Bbb {
in_pipe: UsbBulkInPipeAccess,
out_pipe: UsbBulkOutPipeAccess,
) -> Result<Self, UsbError> {
let buffer = in_pipe.allocate_dma_buffer(32768)?;
Ok(Self {
device,
in_pipe,
out_pipe,
buffer,
last_tag: 0,
})
}
@ -128,34 +128,37 @@ impl Bbb {
Ok(())
}
async fn read_response_data(&mut self, buffer: &mut [u8]) -> Result<usize, Error> {
if buffer.is_empty() {
async fn read_response_data(
&mut self,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<usize, Error> {
if buffer.len() == 0 {
return Ok(0);
}
let len = self
.in_pipe
.read_dma(&mut self.buffer, buffer.len())
.read_dma(buffer)
.await
.inspect_err(|error| log::error!("msc: DMA read error: {error:?}"))?;
let dma_slice = unsafe { MaybeUninit::slice_assume_init_ref(&self.buffer[..len]) };
buffer[..len].copy_from_slice(dma_slice);
Ok(len)
}
}
#[async_trait]
impl ScsiTransport for Bbb {
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error> {
Ok(self.in_pipe.allocate_dma_buffer(size)?)
}
// TODO DMA support for SCSI
async fn perform_request_raw(
&mut self,
lun: u8,
request_data: &[u8],
response_buffer: &mut [u8],
response_buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<usize, Error> {
if request_data.len() > 16 || response_buffer.len() > self.buffer.len() {
todo!()
// return Err(Error::InvalidArgument);
if request_data.len() > 16 || response_buffer.len() > self.max_bytes_per_request() {
return Err(Error::InvalidArgument);
}
let tag = self
@ -167,7 +170,7 @@ impl ScsiTransport for Bbb {
}
fn max_bytes_per_request(&self) -> usize {
self.buffer.len()
32768
}
}

View File

@ -2,7 +2,7 @@ use core::{mem::MaybeUninit, ops::Deref};
use alloc::boxed::Box;
use async_trait::async_trait;
use libk::dma::DmaBuffer;
use libk::dma::{DmaBuffer, DmaSlice, DmaSliceMut};
use crate::error::{TransferError, UsbError};
@ -10,15 +10,11 @@ use super::UsbGenericPipe;
#[async_trait]
pub trait UsbNormalPipeIn: UsbGenericPipe {
async fn read_dma(
&self,
buffer: &mut DmaBuffer<[MaybeUninit<u8>]>,
limit: usize,
) -> Result<usize, UsbError>;
async fn read_dma(&self, buffer: DmaSliceMut<'_, MaybeUninit<u8>>) -> Result<usize, UsbError>;
async fn read(&self, buffer: &mut [u8]) -> Result<usize, UsbError> {
let mut dma_buffer = self.allocate_dma_buffer(buffer.len())?;
let len = self.read_dma(&mut dma_buffer, buffer.len()).await?;
let len = self.read_dma(dma_buffer.slice_mut(0..buffer.len())).await?;
let dma_slice = unsafe { MaybeUninit::slice_assume_init_ref(&dma_buffer[..len]) };
buffer[..len].copy_from_slice(dma_slice);
Ok(len)
@ -35,14 +31,14 @@ pub trait UsbNormalPipeIn: UsbGenericPipe {
#[async_trait]
pub trait UsbNormalPipeOut: UsbGenericPipe {
async fn write_dma(&self, buffer: &DmaBuffer<[u8]>) -> Result<usize, UsbError>;
async fn write_dma(&self, buffer: DmaSlice<'_, u8>) -> Result<usize, UsbError>;
async fn write(&self, buffer: &[u8]) -> Result<usize, UsbError> {
let mut dma_buffer = self.allocate_dma_buffer(buffer.len())?;
MaybeUninit::copy_from_slice(&mut dma_buffer, buffer);
let dma_buffer = unsafe { DmaBuffer::assume_init_slice(dma_buffer) };
self.write_dma(&dma_buffer).await
self.write_dma(dma_buffer.slice(0..buffer.len())).await
}
}

View File

@ -2,7 +2,7 @@ use core::mem::MaybeUninit;
use alloc::{boxed::Box, sync::Arc};
use async_trait::async_trait;
use libk::dma::DmaBuffer;
use libk::dma::{DmaBuffer, DmaSlice, DmaSliceMut};
use ygg_driver_usb::{
error::{TransferError, UsbError},
pipe::{
@ -164,17 +164,12 @@ impl UsbGenericPipe for NormalInPipe {
#[async_trait]
impl UsbNormalPipeIn for NormalInPipe {
async fn read_dma(
&self,
buffer: &mut DmaBuffer<[MaybeUninit<u8>]>,
limit: usize,
) -> Result<usize, UsbError> {
let len = limit.min(buffer.len());
async fn read_dma(&self, buffer: DmaSliceMut<'_, MaybeUninit<u8>>) -> Result<usize, UsbError> {
let result = self
.ring
.normal_transfer(self.xhci.as_ref(), buffer.bus_address(), len)
.normal_transfer(self.xhci.as_ref(), buffer.bus_address(), buffer.len())
.await;
allow_short_packet(len, result)
allow_short_packet(buffer.len(), result)
}
}
@ -186,7 +181,7 @@ impl UsbGenericPipe for NormalOutPipe {
#[async_trait]
impl UsbNormalPipeOut for NormalOutPipe {
async fn write_dma(&self, buffer: &DmaBuffer<[u8]>) -> Result<usize, UsbError> {
async fn write_dma(&self, buffer: DmaSlice<'_, u8>) -> Result<usize, UsbError> {
self.ring
.normal_transfer(self.xhci.as_ref(), buffer.bus_address(), buffer.len())
.await

View File

@ -1,48 +1,37 @@
#![allow(clippy::missing_transmute_annotations)]
use core::{
marker::PhantomData,
mem::MaybeUninit,
ops::{Deref, DerefMut},
};
use alloc::sync::Arc;
use kernel_arch::mem::PhysicalMemoryAllocator;
use libk_mm::{address::PhysicalAddress, phys::GlobalPhysicalAllocator, PageBox};
use libk_util::{lru_hash_table::LruCache, sync::spin_rwlock::IrqSafeRwLock};
use yggdrasil_abi::error::Error;
use crate::task::sync::AsyncMutex;
use crate::{dma::DmaBuffer, task::sync::AsyncMutex};
use super::BlockDevice;
pub struct CachedSegment<
A: PhysicalMemoryAllocator<Address = PhysicalAddress> = GlobalPhysicalAllocator,
> {
data: PageBox<[u8], A>,
pub struct CachedSegment {
data: DmaBuffer<[u8]>,
dirty: bool,
}
pub struct UncachedCache<
A: PhysicalMemoryAllocator<Address = PhysicalAddress> = GlobalPhysicalAllocator,
> {
pub struct UncachedCache {
device: Arc<dyn BlockDevice>,
block_size: usize,
_pd: PhantomData<A>,
}
pub enum DeviceMapper<
A: PhysicalMemoryAllocator<Address = PhysicalAddress> = GlobalPhysicalAllocator,
> {
Uncached(UncachedCache<A>),
Cached(BlockCache<A>),
pub enum DeviceMapper {
Uncached(UncachedCache),
Cached(BlockCache),
}
pub struct BlockCache<
A: PhysicalMemoryAllocator<Address = PhysicalAddress> = GlobalPhysicalAllocator,
> {
pub struct BlockCache {
device: Arc<dyn BlockDevice>,
block_size: usize,
segment_size: usize,
cache: AsyncMutex<LruCache<u64, Arc<IrqSafeRwLock<CachedSegment<A>>>>>,
cache: AsyncMutex<LruCache<u64, Arc<IrqSafeRwLock<CachedSegment>>>>,
}
impl DeviceMapper {
@ -54,7 +43,7 @@ impl DeviceMapper {
bucket_count: usize,
filesystem: &str,
) -> Result<DeviceMapper, Error> {
DeviceMapper::cached_with_capacity_in(
BlockCache::with_capacity(
device,
block_size,
segment_size,
@ -62,6 +51,7 @@ impl DeviceMapper {
bucket_count,
filesystem,
)
.map(DeviceMapper::Cached)
}
pub fn uncached(
@ -69,35 +59,6 @@ impl DeviceMapper {
block_size: usize,
filesystem: &str,
) -> Result<DeviceMapper, Error> {
DeviceMapper::uncached_in(device, block_size, filesystem)
}
}
impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> DeviceMapper<A> {
pub fn cached_with_capacity_in(
device: Arc<dyn BlockDevice>,
block_size: usize,
segment_size: usize,
bucket_capacity: usize,
bucket_count: usize,
filesystem: &str,
) -> Result<DeviceMapper<A>, Error> {
BlockCache::<A>::with_capacity_in(
device,
block_size,
segment_size,
bucket_capacity,
bucket_count,
filesystem,
)
.map(DeviceMapper::<A>::Cached)
}
pub fn uncached_in(
device: Arc<dyn BlockDevice>,
block_size: usize,
filesystem: &str,
) -> Result<DeviceMapper<A>, Error> {
if block_size % device.block_size() != 0 {
log::error!(
"Couldn't create block mapper for {filesystem}: \
@ -105,11 +66,7 @@ impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> DeviceMapper<A> {
);
return Err(Error::InvalidArgument);
}
let uncache = UncachedCache::<A> {
device,
block_size,
_pd: PhantomData,
};
let uncache = UncachedCache { device, block_size };
Ok(Self::Uncached(uncache))
}
@ -154,7 +111,7 @@ impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> DeviceMapper<A> {
}
}
impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> UncachedCache<A> {
impl UncachedCache {
pub fn device(&self) -> &Arc<dyn BlockDevice> {
&self.device
}
@ -171,9 +128,11 @@ impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> UncachedCache<A> {
);
return Err(Error::InvalidArgument);
}
let mut data = PageBox::<_, A>::new_uninit_slice_in(self.block_size)?;
self.device.read_aligned(pos, data.as_slice_mut()).await?;
let result = mapper(unsafe { data.assume_init_slice_ref() })?;
let mut buffer = self.device.allocate_buffer(self.block_size)?;
self.device
.read_aligned(pos, buffer.slice_mut(0..self.block_size))
.await?;
let result = mapper(unsafe { MaybeUninit::slice_assume_init_ref(&buffer[..]) })?;
Ok(result)
}
@ -190,27 +149,31 @@ impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> UncachedCache<A> {
);
return Err(Error::InvalidArgument);
}
let mut data = PageBox::<_, A>::new_uninit_slice_in(self.block_size)?;
let mut buffer = self.device.allocate_buffer(self.block_size)?;
// No need to read a block only to then fully rewrite it
if size != self.block_size {
self.device.read_aligned(pos, data.as_slice_mut()).await?;
self.device
.read_aligned(pos, buffer.slice_mut(0..self.block_size))
.await?;
}
let mut data = unsafe { data.assume_init_slice() };
let result = mapper(&mut data[..])?;
self.device.write_aligned(pos, data.as_slice()).await?;
let mut buffer = unsafe { DmaBuffer::assume_init_slice(buffer) };
let result = mapper(&mut buffer[..])?;
self.device
.write_aligned(pos, buffer.slice(0..self.block_size))
.await?;
Ok(result)
}
}
impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> BlockCache<A> {
pub fn with_capacity_in(
impl BlockCache {
pub fn with_capacity(
device: Arc<dyn BlockDevice>,
block_size: usize,
segment_size: usize,
bucket_capacity: usize,
bucket_count: usize,
filesystem: &str,
) -> Result<BlockCache<A>, Error> {
) -> Result<Self, Error> {
if block_size % device.block_size() != 0 {
log::error!(
"Couldn't create block cache for {filesystem}: \
@ -251,17 +214,13 @@ impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> BlockCache<A> {
&self.device
}
async fn evict_block(
&self,
segment_position: u64,
block: Arc<IrqSafeRwLock<CachedSegment<A>>>,
) {
async fn evict_block(&self, segment_position: u64, block: Arc<IrqSafeRwLock<CachedSegment>>) {
let read = block.read();
if read.dirty {
assert_eq!(segment_position % self.segment_size as u64, 0);
if let Err(err) = self
.device
.write_aligned(segment_position, read.data.as_slice())
.write_aligned(segment_position, read.data.slice(0..self.segment_size))
.await
{
log::error!("Disk error: flushing block {}: {:?}", segment_position, err);
@ -272,12 +231,12 @@ impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> BlockCache<A> {
async fn fetch_block(
&self,
segment_position: u64,
) -> Result<Arc<IrqSafeRwLock<CachedSegment<A>>>, Error> {
let mut data = PageBox::new_uninit_slice_in(self.segment_size)?;
) -> Result<Arc<IrqSafeRwLock<CachedSegment>>, Error> {
let mut buffer = self.device.allocate_buffer(self.segment_size)?;
self.device
.read_aligned(segment_position, data.as_slice_mut())
.read_aligned(segment_position, buffer.slice_mut(0..self.segment_size))
.await?;
let data = unsafe { data.assume_init_slice() };
let data = unsafe { DmaBuffer::assume_init_slice(buffer) };
Ok(Arc::new(IrqSafeRwLock::new(CachedSegment {
data,
dirty: false,
@ -287,7 +246,7 @@ impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> BlockCache<A> {
async fn entry(
&self,
segment_position: u64,
) -> Result<Arc<IrqSafeRwLock<CachedSegment<A>>>, Error> {
) -> Result<Arc<IrqSafeRwLock<CachedSegment>>, Error> {
assert_eq!(segment_position % self.segment_size as u64, 0);
let mut lock = self.cache.lock().await;
let (value, evicted) = lock
@ -349,354 +308,23 @@ impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> BlockCache<A> {
}
}
impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> CachedSegment<A> {
impl CachedSegment {
pub fn set_dirty(&mut self) {
self.dirty = true;
}
}
impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> Deref for CachedSegment<A> {
type Target = PageBox<[u8], A>;
impl Deref for CachedSegment {
type Target = DmaBuffer<[u8]>;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> DerefMut for CachedSegment<A> {
impl DerefMut for CachedSegment {
fn deref_mut(&mut self) -> &mut Self::Target {
self.dirty = true;
&mut self.data
}
}
// #[cfg(test)]
// mod tests {
// use core::{
// ffi::c_void,
// mem::MaybeUninit,
// ptr::null_mut,
// sync::atomic::{AtomicBool, Ordering},
// };
// use std::{io, sync::Mutex};
//
// use async_trait::async_trait;
// use kernel_arch::mem::PhysicalMemoryAllocator;
// use libk_mm::{address::PhysicalAddress, PageBox, PageSlice};
// use yggdrasil_abi::error::Error;
//
// use crate::vfs::block::{BlockDevice, NgBlockDevice, NgBlockDeviceWrapper};
//
// use super::BlockCache;
//
// struct DummyBlock {
// block_size: usize,
// block_count: usize,
// deny_writes: AtomicBool,
// data: Mutex<Vec<u8>>,
// }
//
// struct PA;
//
// impl DummyBlock {
// pub fn new(block_size: usize, block_count: usize) -> Self {
// let mut data = vec![0; block_size * block_count];
// for i in 0..block_count {
// let block = &mut data[block_size * i..block_size * (i + 1)];
// block.fill(i as u8);
// }
// Self {
// data: Mutex::new(data),
// deny_writes: AtomicBool::new(false),
// block_size,
// block_count,
// }
// }
// }
//
// #[async_trait::async_trait]
// impl NgBlockDevice for DummyBlock {
// type Error = Error;
//
// async fn read(
// &self,
// lba: u64,
// buffer: &mut PageSlice<MaybeUninit<u8>>,
// ) -> Result<(), Error> {
// let start = lba as usize * self.block_size;
// let end = start + buffer.len();
//
// if end > self.block_count * self.block_size {
// return Err(Error::InvalidArgument);
// }
//
// let data = self.data.lock().unwrap();
// let buffer = unsafe { MaybeUninit::slice_assume_init_mut(&mut buffer[..]) };
// buffer.copy_from_slice(&data[start..end]);
//
// Ok(())
// }
//
// async fn write(&self, lba: u64, buffer: &PageSlice<u8>) -> Result<(), Error> {
// if self.deny_writes.load(Ordering::Acquire) {
// panic!("write() with deny_writes = true");
// }
//
// let start = lba as usize * self.block_size;
// let end = start + buffer.len();
//
// if end > self.block_count * self.block_size {
// return Err(Error::InvalidArgument);
// }
//
// let mut data = self.data.lock().unwrap();
// data[start..end].copy_from_slice(&buffer[..]);
//
// Ok(())
// }
//
// fn block_size(&self) -> usize {
// self.block_size
// }
//
// fn block_count(&self) -> usize {
// self.block_count
// }
// }
//
// impl PhysicalMemoryAllocator for PA {
// type Address = PhysicalAddress;
//
// unsafe fn free_page(page: Self::Address) {
// let base = page.try_into_usize().unwrap();
// let base = core::ptr::with_exposed_provenance_mut::<c_void>(base);
// if unsafe { libc::munmap(base, 0x1000) } != 0 {
// let err = io::Error::last_os_error();
// panic!("free_page: munmap returned {err}");
// }
// }
//
// fn allocate_page() -> Result<Self::Address, Error> {
// Self::allocate_contiguous_pages(1)
// }
//
// fn allocate_contiguous_pages(count: usize) -> Result<Self::Address, Error> {
// let base = unsafe {
// libc::mmap(
// null_mut(),
// count * 0x1000,
// libc::PROT_READ | libc::PROT_WRITE,
// libc::MAP_ANON | libc::MAP_PRIVATE,
// -1,
// 0,
// )
// };
// if base != libc::MAP_FAILED {
// let base = base.addr();
// Ok(PhysicalAddress::from_usize(base))
// } else {
// Err(Error::OutOfMemory)
// }
// }
// }
//
// const BS: usize = 1024;
//
// // The test must not crash with denied writes
// #[tokio::test]
// async fn test_no_modification() {
// let device = Box::leak(Box::new(DummyBlock::new(BS, 1024)));
// let wrapper = NgBlockDeviceWrapper::new(device);
// let cache = BlockCache::<PA>::with_capacity_in(wrapper, BS, BS, 64, 8);
//
// device.deny_writes.store(true, Ordering::Release);
// cache
// .try_with(1 * BS as u64, |block| {
// assert!(block.iter().all(|x| *x == 1));
// Ok(())
// })
// .await
// .unwrap();
// cache
// .try_with(2 * BS as u64, |block| {
// assert!(block.iter().all(|x| *x == 2));
// Ok(())
// })
// .await
// .unwrap();
//
// cache.flush().await;
// }
//
// #[tokio::test]
// async fn test_partial_modification() {
// let device = Box::leak(Box::new(DummyBlock::new(BS, 1024)));
// let wrapper = NgBlockDeviceWrapper::new(device);
// // 8 * 8
// let cache = BlockCache::<PA>::with_capacity_in(wrapper, BS, BS, 8, 8);
//
// const LBA: u64 = 1;
// cache
// .try_with_mut(LBA * BS as u64, 16, |block| {
// block[0..16].fill(0x12);
// Ok(())
// })
// .await
// .unwrap();
// cache.flush().await;
//
// {
// let mut buffer = PageBox::<_, PA>::new_uninit_slice_in(BS).unwrap();
// device.read(LBA, buffer.as_slice_mut()).await.unwrap();
// let buffer = unsafe { buffer.assume_init_slice() };
// buffer[0..16].iter().for_each(|&x| {
// assert_eq!(x, 0x12);
// });
// buffer[16..].iter().for_each(|&x| {
// assert_eq!(x, LBA as u8);
// });
// }
//
// cache
// .try_with_mut(LBA * BS as u64, 16, |block| {
// block[16..32].fill(0x23);
// Ok(())
// })
// .await
// .unwrap();
// cache
// .try_with_mut(LBA * BS as u64, 16, |block| {
// block[48..64].fill(0x34);
// Ok(())
// })
// .await
// .unwrap();
// cache
// .try_with_mut(LBA * BS as u64, 128, |block| {
// block[128..256].fill(0xF1);
// Ok(())
// })
// .await
// .unwrap();
// cache.flush().await;
//
// {
// let mut buffer = PageBox::<_, PA>::new_uninit_slice_in(BS).unwrap();
// device.read(LBA, buffer.as_slice_mut()).await.unwrap();
// let buffer = unsafe { buffer.assume_init_slice() };
// buffer[0..16].iter().for_each(|&x| {
// assert_eq!(x, 0x12);
// });
// buffer[16..32].iter().for_each(|&x| {
// assert_eq!(x, 0x23);
// });
// buffer[48..64].iter().for_each(|&x| {
// assert_eq!(x, 0x34);
// });
// buffer[128..256].iter().for_each(|&x| {
// assert_eq!(x, 0xF1);
// });
// buffer[32..48].iter().for_each(|&x| {
// assert_eq!(x, LBA as u8);
// });
// buffer[64..128].iter().for_each(|&x| {
// assert_eq!(x, LBA as u8);
// });
// buffer[256..].iter().for_each(|&x| {
// assert_eq!(x, LBA as u8);
// });
// }
// }
//
// #[tokio::test]
// async fn test_implicit_eviction() {
// let device = Box::leak(Box::new(DummyBlock::new(BS, 1024)));
// let wrapper = NgBlockDeviceWrapper::new(device);
// // 8 * 8
// let cache = BlockCache::<PA>::with_capacity_in(wrapper, BS, BS, 8, 8);
//
// fn mapper(x: u64) -> u8 {
// (x + 3) as u8
// }
//
// // Go through all blocks, fill those with some values
// for i in 0..1024 {
// cache
// .try_with_mut(i * BS as u64, BS, |block| {
// block.fill(mapper(i));
// Ok(())
// })
// .await
// .unwrap();
// }
// cache.flush().await;
//
// for i in 0..1024 {
// let mut buffer = PageBox::<_, PA>::new_uninit_slice_in(BS).unwrap();
// device.read(i, buffer.as_slice_mut()).await.unwrap();
// let buffer = unsafe { buffer.assume_init_slice() };
// assert!(buffer.iter().all(|x| *x == mapper(i)));
// }
//
// for i in 0..1023 {
// cache
// .try_with_mut(i * BS as u64, BS, |block| {
// block.fill(0x12);
// Ok(())
// })
// .await
// .unwrap();
// cache
// .try_with_mut((i + 1) * BS as u64, BS, |block| {
// block.fill(0x23);
// Ok(())
// })
// .await
// .unwrap();
// }
//
// for i in 0..1023 {
// cache
// .try_with_mut(i * BS as u64, BS, |block| {
// block.iter_mut().for_each(|x| *x += 1);
// Ok(())
// })
// .await
// .unwrap();
// cache
// .try_with_mut((i + 1) * BS as u64, BS, |block| {
// block.iter_mut().for_each(|x| *x += 2);
// Ok(())
// })
// .await
// .unwrap();
// }
//
// cache.flush().await;
//
// {
// let mut buffer = PageBox::<_, PA>::new_uninit_slice_in(BS).unwrap();
// device.read(0, buffer.as_slice_mut()).await.unwrap();
// let buffer = unsafe { buffer.assume_init_slice() };
// buffer.iter().for_each(|&x| {
// assert_eq!(x, 0x13, "block 0 mismatch");
// });
// }
// for i in 1..1023 {
// let mut buffer = PageBox::<_, PA>::new_uninit_slice_in(BS).unwrap();
// device.read(i, buffer.as_slice_mut()).await.unwrap();
// let buffer = unsafe { buffer.assume_init_slice() };
// buffer.iter().for_each(|&x| {
// assert_eq!(x, 0x15, "block {i} mismatch");
// });
// }
// {
// let mut buffer = PageBox::<_, PA>::new_uninit_slice_in(BS).unwrap();
// device.read(1023, buffer.as_slice_mut()).await.unwrap();
// let buffer = unsafe { buffer.assume_init_slice() };
// buffer.iter().for_each(|&x| {
// assert_eq!(x, 0x25, "block 1023 mismatch");
// });
// }
// }
// }

View File

@ -3,10 +3,13 @@ use core::{any::Any, mem::MaybeUninit, ops::Deref};
use alloc::{boxed::Box, sync::Arc};
use async_trait::async_trait;
use device_api::device::Device;
use libk_mm::{PageBox, PageProvider, PageSlice};
use libk_mm::PageProvider;
use yggdrasil_abi::error::Error;
use crate::vfs::{CommonImpl, NodeRef};
use crate::{
dma::{DmaBuffer, DmaSlice, DmaSliceMut},
vfs::{CommonImpl, NodeRef},
};
pub mod cache;
pub mod partition;
@ -90,15 +93,17 @@ impl Iterator for Chunked {
#[async_trait]
pub trait BlockDevice: Device + PageProvider {
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error>;
async fn read_aligned(
&self,
position: u64,
buffer: &mut PageSlice<MaybeUninit<u8>>,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<(), Error> {
let _ = (position, buffer);
Err(Error::NotImplemented)
}
async fn write_aligned(&self, position: u64, buffer: &PageSlice<u8>) -> Result<(), Error> {
async fn write_aligned(&self, position: u64, buffer: DmaSlice<'_, u8>) -> Result<(), Error> {
let _ = (position, buffer);
Err(Error::NotImplemented)
}
@ -108,13 +113,13 @@ pub trait BlockDevice: Device + PageProvider {
let Some((iter, max_lba_count)) = Chunked::begin(self, position, buffer.len()) else {
return Ok(0);
};
let mut read_buffer = PageBox::new_uninit_slice(max_lba_count * bs)?;
let mut read_buffer = self.allocate_buffer(max_lba_count * bs)?;
// let mut read_buffer = PageBox::new_uninit_slice(max_lba_count * bs)?;
let mut total = 0;
for (lba, block_count, block_offset, offset, amount) in iter {
let read_buffer_slice = read_buffer.as_slice_mut().subslice_mut(..block_count * bs);
self.read_aligned(lba * bs as u64, read_buffer_slice)
.await?;
let slice = read_buffer.slice_mut(0..block_count * bs);
self.read_aligned(lba * bs as u64, slice).await?;
let src = unsafe {
MaybeUninit::slice_assume_init_ref(
@ -143,19 +148,19 @@ pub trait BlockDevice: Device + PageProvider {
let amount = core::cmp::min(bs - block_offset, buf.len());
let mut block = PageBox::new_uninit_slice(bs)?;
let mut dma_buffer = self.allocate_buffer(bs)?;
if amount != bs {
// Need to read the block first -- it's modified partially
self.read_aligned(lba * bs as u64, block.as_slice_mut())
self.read_aligned(lba * bs as u64, dma_buffer.slice_mut(0..bs))
.await?;
}
let mut block = unsafe { block.assume_init_slice() };
let mut block = unsafe { DmaBuffer::assume_init_slice(dma_buffer) };
block[block_offset..block_offset + amount].copy_from_slice(&buf[..amount]);
// Write the block back
self.write_aligned(lba * bs as u64, block.as_slice())
self.write_aligned(lba * bs as u64, block.slice(0..bs))
.await?;
buf = &buf[amount..];

View File

@ -1,7 +1,6 @@
use alloc::{sync::Arc, vec::Vec};
use bytemuck::{Pod, Zeroable};
use libk_mm::PageBox;
use static_assertions::const_assert_eq;
use uuid::Uuid;
use yggdrasil_abi::error::Error;
@ -9,7 +8,7 @@ use crate::device::block::BlockDevice;
use super::Partition;
#[derive(Clone, Copy)]
#[derive(Clone, Copy, Pod, Zeroable)]
#[repr(C)]
struct GptHeader {
signature: [u8; 8],
@ -26,7 +25,7 @@ struct GptHeader {
partition_table_len: u32,
partition_table_entry_size: u32,
partition_table_crc32: u32,
_1: [u8; 420],
_1: u32,
}
#[derive(Clone, Copy, Zeroable, Pod)]
@ -39,18 +38,18 @@ struct GptEntry {
attrs: u64,
}
const_assert_eq!(size_of::<GptHeader>(), 512);
async unsafe fn read_struct_lba<T>(dev: &dyn BlockDevice, lba: u64) -> Result<PageBox<T>, Error> {
assert_eq!(size_of::<T>(), 512);
let mut data = PageBox::new_uninit()?;
dev.read_aligned(lba * 512, PageBox::as_bytes_mut(&mut data))
.await?;
Ok(data.assume_init())
async unsafe fn read_struct_lba<'b, T: Pod + 'b>(
dev: &dyn BlockDevice,
lba: u64,
buffer: &'b mut [u8],
) -> Result<&'b mut T, Error> {
dev.read_exact(lba * 512, buffer).await?;
Ok(bytemuck::from_bytes_mut(buffer))
}
pub(crate) async fn probe_gpt(dev: &Arc<dyn BlockDevice>) -> Result<Option<Vec<Partition>>, Error> {
let header = unsafe { read_struct_lba::<GptHeader>(dev.as_ref(), 1) }.await?;
let mut header = [0; size_of::<GptHeader>()];
let header = unsafe { read_struct_lba::<GptHeader>(dev.as_ref(), 1, &mut header) }.await?;
if &header.signature != b"EFI PART" {
// Not a GPT partition table

View File

@ -3,9 +3,11 @@ use core::mem::MaybeUninit;
use alloc::{boxed::Box, sync::Arc};
use async_trait::async_trait;
use device_api::device::Device;
use libk_mm::{address::PhysicalAddress, table::MapAttributes, PageProvider, PageSlice};
use libk_mm::{address::PhysicalAddress, table::MapAttributes, PageProvider};
use yggdrasil_abi::error::Error;
use crate::dma::{DmaBuffer, DmaSlice, DmaSliceMut};
use super::BlockDevice;
pub mod gpt;
@ -56,28 +58,24 @@ impl Device for Partition {
#[async_trait]
impl BlockDevice for Partition {
fn allocate_buffer(&self, size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error> {
self.device.allocate_buffer(size)
}
async fn read_aligned(
&self,
position: u64,
buffer: &mut PageSlice<MaybeUninit<u8>>,
buffer: DmaSliceMut<'_, MaybeUninit<u8>>,
) -> Result<(), Error> {
// TODO check against partition range
debug_assert_eq!(position % self.device.block_size() as u64, 0);
debug_assert_eq!(buffer.len() % self.device.block_size(), 0);
self.device
.read_aligned(self.lba_start * self.block_size() as u64 + position, buffer)
.await
// TODO check against partition bounds
let lba = self.lba_start * self.block_size() as u64 + position;
self.device.read_aligned(lba, buffer).await
}
async fn write_aligned(&self, position: u64, buffer: &PageSlice<u8>) -> Result<(), Error> {
// TODO check against partition range
debug_assert_eq!(position % self.device.block_size() as u64, 0);
debug_assert_eq!(buffer.len() % self.device.block_size(), 0);
self.device
.write_aligned(self.lba_start * self.block_size() as u64 + position, buffer)
.await
async fn write_aligned(&self, position: u64, buffer: DmaSlice<'_, u8>) -> Result<(), Error> {
// TODO check against partition bounds
let lba = self.lba_start * self.block_size() as u64 + position;
self.device.write_aligned(lba, buffer).await
}
fn block_size(&self) -> usize {

View File

@ -20,7 +20,7 @@ pub mod font;
pub use color::Color;
use crate::task::thread::Thread;
use crate::{dma::DmaBuffer, task::thread::Thread};
use super::block::BlockDevice;
@ -151,6 +151,10 @@ impl DisplayWrapper {
#[async_trait]
impl BlockDevice for DisplayWrapper {
fn allocate_buffer(&self, _size: usize) -> Result<DmaBuffer<[MaybeUninit<u8>]>, Error> {
Err(Error::NotImplemented)
}
async fn read(&self, _pos: u64, _buf: &mut [u8]) -> Result<usize, Error> {
Err(Error::InvalidOperation)
}

View File

@ -2,7 +2,7 @@ use core::{
alloc::Layout,
fmt,
mem::{self, MaybeUninit},
ops::{Deref, DerefMut, Sub},
ops::{Deref, DerefMut, Range, Sub},
ptr::{self, NonNull},
};
@ -44,6 +44,16 @@ pub struct DmaBuffer<T: ?Sized> {
page_count: usize,
}
pub struct DmaSlice<'a, T> {
buffer: &'a DmaBuffer<[T]>,
range: Range<usize>,
}
pub struct DmaSliceMut<'a, T> {
buffer: &'a mut DmaBuffer<[T]>,
range: Range<usize>,
}
#[derive(Clone, Copy, PartialEq, Eq, Debug, PartialOrd, Ord, Pod, Zeroable)]
#[repr(transparent)]
pub struct BusAddress(u64);
@ -179,6 +189,32 @@ impl<T> DmaBuffer<T> {
}
}
impl<T> DmaBuffer<[T]> {
fn slice_range_check(&self, range: &Range<usize>) {
assert!(
range.end <= self.len() && range.start <= self.len(),
"DMA buffer slice range out of bounds"
);
assert!(range.start <= range.end, "Invalid DMA slice range");
}
pub fn slice(&self, range: Range<usize>) -> DmaSlice<T> {
self.slice_range_check(&range);
DmaSlice {
buffer: self,
range,
}
}
pub fn slice_mut(&mut self, range: Range<usize>) -> DmaSliceMut<T> {
self.slice_range_check(&range);
DmaSliceMut {
buffer: self,
range,
}
}
}
impl<T: ?Sized> DmaBuffer<T> {
#[inline]
pub fn page_count(&self) -> usize {
@ -262,3 +298,59 @@ impl fmt::LowerHex for BusAddress {
fmt::LowerHex::fmt(&self.0, f)
}
}
impl<'a, T> DmaSlice<'a, T> {
pub fn bus_address(&self) -> BusAddress {
self.buffer.bus_address().add(self.range.start)
}
// TODO subslicing
pub fn into_parts(self) -> (&'a DmaBuffer<[T]>, Range<usize>) {
(self.buffer, self.range)
}
}
impl<T> Deref for DmaSlice<'_, T> {
type Target = [T];
fn deref(&self) -> &Self::Target {
&self.buffer[self.range.clone()]
}
}
impl<T> fmt::Pointer for DmaSlice<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:p}[{:?}]", *self.buffer, self.range)
}
}
impl<'a, T> DmaSliceMut<'a, T> {
pub fn bus_address(&self) -> BusAddress {
self.buffer.bus_address().add(self.range.start)
}
// TODO subslicing
pub fn into_parts(self) -> (&'a mut DmaBuffer<[T]>, Range<usize>) {
(self.buffer, self.range)
}
}
impl<T> Deref for DmaSliceMut<'_, T> {
type Target = [T];
fn deref(&self) -> &Self::Target {
&self.buffer[self.range.clone()]
}
}
impl<T> DerefMut for DmaSliceMut<'_, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.buffer[self.range.clone()]
}
}
impl<T> fmt::Pointer for DmaSliceMut<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:p}[{:?}]", *self.buffer, self.range)
}
}