ext2: better inode cache, mkdir

This commit is contained in:
Mark Poliakov 2024-12-04 18:28:27 +02:00
parent dd542ed176
commit 183ec14544
45 changed files with 2003 additions and 832 deletions

85
Cargo.lock generated
View File

@ -57,6 +57,21 @@ dependencies = [
"log",
]
[[package]]
name = "addr2line"
version = "0.24.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
dependencies = [
"gimli",
]
[[package]]
name = "adler2"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
[[package]]
name = "ahash"
version = "0.8.11"
@ -188,6 +203,21 @@ version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "backtrace"
version = "0.3.74"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a"
dependencies = [
"addr2line",
"cfg-if",
"libc",
"miniz_oxide",
"object",
"rustc-demangle",
"windows-targets",
]
[[package]]
name = "bit-set"
version = "0.5.3"
@ -519,6 +549,7 @@ dependencies = [
name = "ext2"
version = "0.1.0"
dependencies = [
"async-trait",
"bytemuck",
"libk",
"libk-mm",
@ -669,6 +700,12 @@ dependencies = [
"wasi",
]
[[package]]
name = "gimli"
version = "0.31.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
[[package]]
name = "git-version"
version = "0.3.9"
@ -974,6 +1011,7 @@ dependencies = [
"elf 0.7.2",
"futures-util",
"kernel-arch",
"libc",
"libk-device",
"libk-mm",
"libk-util",
@ -982,6 +1020,7 @@ dependencies = [
"serde",
"serde_json",
"static_assertions",
"tokio",
"uuid",
"yggdrasil-abi",
]
@ -1166,6 +1205,15 @@ dependencies = [
"bytemuck",
]
[[package]]
name = "miniz_oxide"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1"
dependencies = [
"adler2",
]
[[package]]
name = "num-derive"
version = "0.3.3"
@ -1187,6 +1235,15 @@ dependencies = [
"libm 0.2.11",
]
[[package]]
name = "object"
version = "0.36.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e"
dependencies = [
"memchr",
]
[[package]]
name = "once_cell"
version = "1.20.2"
@ -1409,6 +1466,12 @@ dependencies = [
"log",
]
[[package]]
name = "rustc-demangle"
version = "0.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
[[package]]
name = "rustc-std-workspace-alloc"
version = "1.0.0"
@ -1673,6 +1736,28 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b9e2fdb3a1e862c0661768b7ed25390811df1947a8acbfbefe09b47078d93c4"
[[package]]
name = "tokio"
version = "1.42.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551"
dependencies = [
"backtrace",
"pin-project-lite",
"tokio-macros",
]
[[package]]
name = "tokio-macros"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.87",
]
[[package]]
name = "toml"
version = "0.8.19"

View File

@ -72,6 +72,10 @@ aarch64-cpu = "10.0.0"
discrete_range_map = { git = "https://git.alnyan.me/yggdrasil/discrete_range_map.git" }
# Test dependencies
tokio = { version = "1.42.0", default-features = false }
libc = "*"
[workspace.dependencies.elf]
version = "0.7.2"
git = "https://git.alnyan.me/yggdrasil/yggdrasil-elf.git"

View File

@ -97,15 +97,19 @@ impl Architecture for ArchitectureImpl {
) -> Option<&Self::CpuFeatures> {
unimplemented!()
}
fn ipi_queue(_cpu_id: u32) -> Option<&'static IpiQueue<Self>> {
None
}
}
impl KernelTableManager for KernelTableManagerImpl {
fn virtualize(_phys: u64) -> usize {
unimplemented!()
fn virtualize(phys: u64) -> usize {
phys.try_into().unwrap()
}
fn physicalize(_virt: usize) -> u64 {
unimplemented!()
fn physicalize(virt: usize) -> u64 {
virt.try_into().unwrap()
}
unsafe fn map_device_pages(

View File

@ -463,7 +463,7 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
}
fn user(context: UserContextInfo) -> Result<Self, Error> {
const USER_TASK_PAGES: usize = 8;
const USER_TASK_PAGES: usize = 16;
let stack_base_phys = PA::allocate_contiguous_pages(USER_TASK_PAGES)?;
let stack_base = stack_base_phys.raw_virtualize::<K>();

View File

@ -1,10 +1,13 @@
use core::mem::MaybeUninit;
use core::{mem::MaybeUninit, time::Duration};
use alloc::{boxed::Box, format};
use async_trait::async_trait;
use kernel_fs::devfs;
use libk::vfs::block::{probe_partitions, NgBlockDevice, NgBlockDeviceWrapper};
use libk_mm::{address::AsPhysicalAddress, PageSlice};
use libk::{
task::runtime,
vfs::block::{probe_partitions, NgBlockDevice, NgBlockDeviceWrapper},
};
use libk_mm::{address::AsPhysicalAddress, PageBox, PageSlice};
use crate::{command::IdentifyNamespaceRequest, IoDirection};
@ -67,31 +70,39 @@ impl NgBlockDevice for NvmeDrive {
buffer: &mut PageSlice<MaybeUninit<u8>>,
) -> Result<(), NvmeError> {
debug_assert_eq!(buffer.len() % self.block_size(), 0);
let buffer_address = unsafe { buffer.as_physical_address() };
debug_assert_eq!(buffer_address.into_u64() % self.block_size() as u64, 0);
let lba_count = buffer.len() / self.block_size();
self.controller
.perform_io(
self.nsid,
lba,
lba_count,
unsafe { buffer.as_physical_address() },
IoDirection::Read,
)
.await
let result = self
.controller
.perform_io(self.nsid, lba, lba_count, buffer_address, IoDirection::Read)
.await;
log::info!(target: "io", "read #{lba}, {lba_count} blocks -> {result:?} @ {buffer_address:#x}");
result
}
async fn write(&self, lba: u64, buffer: &PageSlice<u8>) -> Result<(), NvmeError> {
debug_assert_eq!(buffer.len() % self.block_size(), 0);
let buffer_address = unsafe { buffer.as_physical_address() };
debug_assert_eq!(buffer_address.into_u64() % self.block_size() as u64, 0);
let lba_count = buffer.len() / self.block_size();
self.controller
let result = self
.controller
.perform_io(
self.nsid,
lba,
lba_count,
unsafe { buffer.as_physical_address() },
buffer_address,
IoDirection::Write,
)
.await
.await;
log::info!(target: "io", "write -> #{lba}, {lba_count} blocks -> {result:?} @ {buffer_address:#x}");
result
}
fn block_size(&self) -> usize {

View File

@ -1,4 +1,4 @@
#![feature(const_trait_impl, let_chains, if_let_guard)]
#![feature(const_trait_impl, let_chains, if_let_guard, maybe_uninit_slice)]
#![allow(missing_docs)]
#![no_std]

View File

@ -1,4 +1,5 @@
use core::{
future::poll_fn,
mem::size_of,
pin::Pin,
ptr::null_mut,
@ -282,55 +283,31 @@ impl QueuePair {
self.cq_base
}
// pub fn poll_completion(&self, command_id: u32) -> Poll<Result<(), Error>> {
// let mut inner = self.inner.lock();
// match inner.completed.remove(&command_id) {
// Some(result) if let Some(_error) = result.error() => todo!(),
// Some(_) => Poll::Ready(Ok(())),
// None => Poll::Pending,
// }
// }
pub fn wait_for_completion<'r, T: Unpin + 'r>(
&'r self,
pub async fn wait_for_completion<T: Unpin>(
&self,
command_id: u32,
result: T,
) -> impl Future<Output = Result<T, CommandError>> + 'r {
struct Fut<'r, R: Unpin + 'r> {
this: &'r QueuePair,
response: Option<R>,
command_id: u32,
}
) -> Result<T, CommandError> {
let mut response = Some(result);
poll_fn(|cx| {
let mut inner = self.inner.lock();
impl<'r, R: Unpin + 'r> Future for Fut<'r, R> {
type Output = Result<R, CommandError>;
if let Some(entry) = inner.completed.remove(&command_id) {
self.completion_notify.remove(cx.waker());
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.this.completion_notify.register(cx.waker());
let mut inner = self.this.inner.lock();
if let Some(entry) = inner.completed.remove(&self.command_id) {
self.this.completion_notify.remove(cx.waker());
let result = if let Some(error) = entry.error() {
Err(error)
} else {
Ok(self.response.take().unwrap())
};
Poll::Ready(result)
let result = if let Some(error) = entry.error() {
Err(error)
} else {
Poll::Pending
}
}
}
Ok(response.take().unwrap())
};
Fut {
this: self,
response: Some(result),
command_id,
}
Poll::Ready(result)
} else {
self.completion_notify.register(cx.waker());
Poll::Pending
}
})
.await
}
pub fn submit<C: Command>(&self, cmd: C, ranges: &[PhysicalAddress], set_pending: bool) -> u32 {

View File

@ -13,3 +13,4 @@ libk-util.workspace = true
bytemuck.workspace = true
static_assertions.workspace = true
log.workspace = true
async-trait.workspace = true

View File

@ -111,15 +111,10 @@ pub struct BlockGroupDescriptor {
pub inode_table: u32,
pub unallocated_blocks: u16,
pub unallocated_inodes: u16,
pub unallocated_directories: u16,
pub directories: u16,
_0: [u8; 14],
}
pub struct BlockGroupDescriptorTable {
pub(crate) data: Box<[u8]>,
pub(crate) len: usize,
}
#[derive(Debug, Clone, Copy, Pod, Zeroable)]
#[repr(C)]
pub struct Inode {
@ -161,37 +156,6 @@ pub struct Dirent {
pub type_indicator: u8,
}
impl BlockGroupDescriptorTable {
pub fn with_capacity(size: usize, len: usize) -> Self {
let data = vec![0; size].into_boxed_slice();
Self { data, len }
}
pub fn descriptor(&self, index: u32) -> &BlockGroupDescriptor {
let index = index as usize;
if index >= self.len {
panic!();
}
bytemuck::from_bytes(
&self.data[index * size_of::<BlockGroupDescriptor>()
..(index + 1) * size_of::<BlockGroupDescriptor>()],
)
}
pub fn descriptor_mut(&mut self, index: u32) -> &mut BlockGroupDescriptor {
let index = index as usize;
if index >= self.len {
panic!();
}
bytemuck::from_bytes_mut(
&mut self.data[index * size_of::<BlockGroupDescriptor>()
..(index + 1) * size_of::<BlockGroupDescriptor>()],
)
}
}
const_assert_eq!(size_of::<BlockGroupDescriptor>(), 32);
impl ExtendedSuperblock {
pub fn inode_size(&self) -> usize {
if self.base.version_major != 0 {
@ -271,13 +235,13 @@ impl InodeMode {
0x4000 => Some(FileType::Directory),
0x8000 => Some(FileType::File),
0xA000 => Some(FileType::Symlink),
val => todo!("Unhandled ext2 node type: {:#x}", val),
_ => None,
}
}
pub fn default_for_type(ty: FileType) -> Self {
match ty {
FileType::File => todo!(),
FileType::File => Self(0o644 | 0x8000),
FileType::Directory => Self(0o755 | 0x4000),
FileType::Symlink => todo!(),
_ => todo!(),

View File

@ -4,19 +4,26 @@ use alloc::sync::Arc;
use libk::{
block,
error::Error,
vfs::{CommonImpl, DirectoryImpl, DirectoryOpenPosition, Metadata, Node, NodeFlags, NodeRef},
vfs::{
CommonImpl, DirectoryImpl, DirectoryOpenPosition, Filesystem, Metadata, Node, NodeFlags,
NodeRef,
},
};
use yggdrasil_abi::{
io::{DirectoryEntry, FileType},
util::FixedString,
};
use crate::{data::FsRequiredFeatures, inode::InodeCache, Dirent, Ext2Fs, Inode};
use crate::{
data::FsRequiredFeatures,
file::RegularNode,
inode::{InodeAccess, InodeCache},
Dirent, Ext2Fs, Inode,
};
pub struct DirectoryNode {
fs: Arc<Ext2Fs>,
inode_cache: Arc<InodeCache>,
ino: u32,
pub(crate) inode: InodeAccess,
}
struct DirentIter<'a> {
@ -55,10 +62,6 @@ impl<'a> DirentIterMut<'a> {
continue;
}
log::debug!(
"Fit into {:?}",
self.offset..self.offset + dirent.ent_size as usize
);
let extra_space = dirent.ent_size as usize - new_aligned_size;
let aligned_size = if extra_space >= size_of::<Dirent>() {
@ -75,17 +78,12 @@ impl<'a> DirentIterMut<'a> {
type_indicator: 0, // TODO
ino,
};
log::debug!("Place entry: {:?}", self.offset..self.offset + aligned_size);
self.block[self.offset..self.offset + size_of::<Dirent>()]
.copy_from_slice(bytemuck::bytes_of(&new_dirent));
self.block[self.offset + size_of::<Dirent>()..self.offset + new_total_size]
.copy_from_slice(name);
if extra_space >= size_of::<Dirent>() {
log::debug!(
"Place dummy: {:?}",
self.offset + new_aligned_size..self.offset + new_aligned_size + extra_space
);
// Fit an extra dummy dirent
let dummy = Dirent {
ent_size: extra_space as _,
@ -152,32 +150,23 @@ impl<'a> Iterator for DirentIter<'a> {
}
impl DirectoryNode {
pub fn new(fs: Arc<Ext2Fs>, inode_cache: Arc<InodeCache>, ino: u32) -> NodeRef {
Node::directory(
Self {
fs,
inode_cache,
ino,
},
NodeFlags::empty(),
)
pub fn new(fs: Arc<Ext2Fs>, inode: InodeAccess) -> NodeRef {
Node::directory(Self { fs, inode }, NodeFlags::empty())
}
pub async fn create(
fs: Arc<Ext2Fs>,
inode_cache: Arc<InodeCache>,
inode: InodeAccess,
parent_ino: Option<u32>,
ino: u32,
) -> Result<NodeRef, Error> {
let this = Self {
fs,
inode_cache,
ino,
};
let ino = inode.ino();
let this = Self { fs, inode };
// fsck wants . as first entry, .. as second
this.create_entry(".", ino).await?;
if let Some(parent) = parent_ino {
this.create_entry("..", parent).await?;
}
this.create_entry(".", ino).await?;
Ok(Node::directory(this, NodeFlags::empty()))
}
@ -186,72 +175,85 @@ impl DirectoryNode {
assert!(name.len() < 255);
{
let mut inode = self.inode_cache.get_mut(ino).await?;
inode.inc_hard_count();
let mut holder = self.inode.cache().get_mut(ino).await?;
{
let mut child = holder.write();
child.inc_hard_count();
}
holder.put().await?;
}
let mut inode = self.inode_cache.get_mut(self.ino).await?;
let mut holder = self.inode.get_mut().await?;
{
let mut inode = holder.write();
let mut fit = false;
let n = inode.blocks(&self.fs) as u32;
for i in 0..n {
let fit_block = self
.fs
.with_inode_block_mut(&inode, i, |block| {
let mut iter = DirentIterMut::new(&self.fs, &mut block[..], 0);
if iter.try_fit(name, ino) {
Ok(true)
} else {
Ok(false)
}
})
.await?;
// Try to fit entry first
let mut fit = false;
let n = inode.blocks(&self.fs);
for i in 0..n {
let mut block = self.fs.inode_block_mut(&inode, i as u32).await?;
let mut iter = DirentIterMut::new(&self.fs, &mut block, 0);
if fit_block {
fit = true;
break;
}
}
if iter.try_fit(name, ino) {
block.set_dirty();
fit = true;
break;
if !fit {
// Allocate a new block
let block_index = inode.blocks(&self.fs) as u32;
// Grow the storage
inode
.reserve(
&self.fs,
(block_index as u64 + 1) * self.fs.block_size as u64,
)
.await?;
self.fs
.with_inode_block_mut(&inode, block_index, |block| {
block.fill(0);
// Place dirent
let total_len = size_of::<Dirent>() + name.len();
let aligned_len = (total_len + 3) & !3;
let dirent = Dirent {
ino,
ent_size: aligned_len as _,
type_indicator: 0, // TODO
name_length_low: name.len() as u8,
};
block[..size_of::<Dirent>()].copy_from_slice(bytemuck::bytes_of(&dirent));
block[size_of::<Dirent>()..total_len].copy_from_slice(name.as_bytes());
// Fill the rest with empty blocks
let dummy = Dirent {
ino: 0,
ent_size: (self.fs.block_size - aligned_len).try_into().unwrap(),
type_indicator: 0,
name_length_low: 0,
};
block[aligned_len..aligned_len + size_of::<Dirent>()]
.copy_from_slice(bytemuck::bytes_of(&dummy));
Ok(())
})
.await?;
}
}
if fit {
return Ok(());
}
// TODO increment child's hard ref count
// Allocate a new block
let block_index = inode.blocks(&self.fs) as u32;
// Grow the storage
inode
.reserve(
&self.fs,
(block_index as u64 + 1) * self.fs.block_size as u64,
)
.await?;
let mut block = self.fs.inode_block_mut(&inode, block_index).await?;
block.fill(0);
// Place dirent
let total_len = size_of::<Dirent>() + name.len();
let aligned_len = (total_len + 3) & !3;
let dirent = Dirent {
ino,
ent_size: aligned_len as _,
type_indicator: 0, // TODO
name_length_low: name.len() as u8,
};
log::debug!("Place dirent {:?}", 0..aligned_len);
block[..size_of::<Dirent>()].copy_from_slice(bytemuck::bytes_of(&dirent));
block[size_of::<Dirent>()..total_len].copy_from_slice(name.as_bytes());
// Fill the rest with empty blocks
let dummy = Dirent {
ino: 0,
ent_size: (self.fs.block_size - aligned_len).try_into().unwrap(),
type_indicator: 0,
name_length_low: 0,
};
log::debug!("Place spacer {:?}", aligned_len..self.fs.block_size);
block[aligned_len..aligned_len + size_of::<Dirent>()]
.copy_from_slice(bytemuck::bytes_of(&dummy));
block.set_dirty();
holder.put().await?;
Ok(())
}
@ -259,23 +261,32 @@ impl DirectoryNode {
async fn lookup_entry(&self, search_name: &str) -> Result<NodeRef, Error> {
assert!(search_name.len() < 255);
let inode = self.inode_cache.get(self.ino).await?;
let n = inode.blocks(&self.fs);
let inode = self.inode.get().await?;
let inode = inode.read();
let n = inode.blocks(&self.fs) as u32;
for i in 0..n {
let block = self.fs.inode_block(&inode, i as u32).await?;
let ino = self
.fs
.with_inode_block(&inode, i, |block| {
let iter = DirentIter::new(&self.fs, block, 0);
let iter = DirentIter::new(&self.fs, &block, 0);
for (dirent, name, _) in iter {
let Ok(name) = core::str::from_utf8(name) else {
continue;
};
for (dirent, name, _) in iter {
let Ok(name) = core::str::from_utf8(name) else {
continue;
};
if search_name == name {
return Ok(Some(dirent.ino));
}
}
if search_name == name {
let ino = dirent.ino;
return self.fs.load_node(ino).await;
}
Ok(None)
})
.await?;
if let Some(ino) = ino {
return self.fs.load_node(ino).await;
}
}
@ -287,7 +298,9 @@ impl DirectoryNode {
mut pos: u64,
entries: &mut [MaybeUninit<DirectoryEntry>],
) -> Result<(usize, u64), Error> {
let inode = self.inode_cache.get(self.ino).await?;
let inode = self.inode.get().await?;
let inode = inode.read();
let size = inode.size(&self.fs);
if pos >= inode.size(&self.fs) || entries.is_empty() {
return Ok((0, pos));
@ -301,29 +314,37 @@ impl DirectoryNode {
let index = pos / self.fs.block_size as u64;
let offset = (pos % self.fs.block_size as u64) as usize;
let block = self.fs.inode_block(&inode, index as u32).await?;
let (entry_count, new_pos) = self
.fs
.with_inode_block(&inode, index as u32, |block| {
let mut pos = pos;
let mut entry_count = 0;
let iter = DirentIter::new(&self.fs, &block, offset);
let iter = DirentIter::new(&self.fs, &block, offset);
let mut entry_count = 0;
for (dirent, name, entry_offset) in iter {
pos = (index * self.fs.block_size as u64) + entry_offset as u64;
for (dirent, name, entry_offset) in iter {
pos = (index * self.fs.block_size as u64) + entry_offset as u64;
if entry_count >= entries.len() {
break;
}
if entry_count >= entries.len() {
break;
}
// Point pos at last entry's end
pos += dirent.ent_size as u64;
pos += dirent.ent_size as u64;
let name = core::str::from_utf8(name).unwrap();
entries[entry_count].write(DirectoryEntry {
ty: None,
name: FixedString::from_str(name)?,
});
let name = core::str::from_utf8(name).unwrap();
entries[entry_count].write(DirectoryEntry {
ty: None,
name: FixedString::from_str(name)?,
});
entry_count += 1;
}
entry_count += 1;
}
Ok((entry_count, pos))
})
.await?;
pos = new_pos;
// If read any entries from the block, return
if entry_count != 0 {
@ -337,17 +358,23 @@ impl DirectoryNode {
}
impl CommonImpl for DirectoryNode {
fn size(&self, _node: &NodeRef) -> Result<u64, Error> {
let inode = block!(self.inode_cache.get(self.ino).await)??;
Ok(inode.size(&self.fs))
}
fn as_any(&self) -> &dyn Any {
self
}
fn filesystem(&self) -> Option<&dyn Filesystem> {
Some(self.fs.as_ref())
}
fn size(&self, _node: &NodeRef) -> Result<u64, Error> {
let inode = block!(self.inode.get().await)??;
let inode = inode.read();
Ok(inode.size(&self.fs))
}
fn metadata(&self, _node: &NodeRef) -> Result<Metadata, Error> {
let inode = block!(self.inode_cache.get(self.ino).await)??;
let inode = block!(self.inode.get().await)??;
let inode = inode.read();
Ok(inode.metadata())
}
}
@ -366,17 +393,20 @@ impl DirectoryImpl for DirectoryNode {
}
fn create_node(&self, _parent: &NodeRef, ty: FileType) -> Result<NodeRef, Error> {
let node = block!(self.fs.create_node(Some(self.ino), ty).await)??;
Ok(node)
if self.fs.force_readonly {
return Err(Error::ReadOnly);
}
block!(self.fs.create_node(Some(self.inode.ino()), ty).await)?
}
fn attach_node(&self, _parent: &NodeRef, child: &NodeRef, name: &str) -> Result<(), Error> {
if self.fs.force_readonly {
return Err(Error::ReadOnly);
}
// Check that child is ext2
// if child.data_as_any().is::<DirectoryNode>() {
// log::debug!("attach_node: ext2 dir");
// }
let child_ino = match child.data_as_any() {
data if let Some(dir) = data.downcast_ref::<DirectoryNode>() => dir.ino,
data if let Some(dir) = data.downcast_ref::<DirectoryNode>() => dir.inode.ino(),
data if let Some(file) = data.downcast_ref::<RegularNode>() => file.inode.ino(),
_ => return Err(Error::InvalidOperation),
};
block!(self.create_entry(name, child_ino).await)??;
@ -385,7 +415,11 @@ impl DirectoryImpl for DirectoryNode {
}
fn unlink_node(&self, _parent: &NodeRef, name: &str) -> Result<(), Error> {
todo!()
if self.fs.force_readonly {
return Err(Error::ReadOnly);
}
log::error!("ext2: unlink_node not implemented");
Err(Error::NotImplemented)
}
fn read_entries(

View File

@ -10,34 +10,33 @@ use libk::{
use libk_util::sync::LockMethod;
use yggdrasil_abi::io::OpenOptions;
use crate::{inode::InodeCache, Ext2Fs, Inode};
use crate::{
inode::{InodeAccess, InodeCache},
Ext2Fs, Inode,
};
pub struct RegularNode {
fs: Arc<Ext2Fs>,
inode_cache: Arc<InodeCache>,
ino: u32,
pub(crate) inode: InodeAccess,
}
impl RegularNode {
pub fn new(fs: Arc<Ext2Fs>, inode_cache: Arc<InodeCache>, ino: u32) -> NodeRef {
Node::regular(
Self {
fs,
inode_cache,
ino,
},
NodeFlags::empty(),
)
pub fn new(fs: Arc<Ext2Fs>, inode: InodeAccess) -> NodeRef {
Node::regular(Self { fs, inode }, NodeFlags::empty())
}
async fn resize(&self, new_size: u64) -> Result<(), Error> {
let mut inode = self.inode_cache.get_mut(self.ino).await?;
inode.resize(&self.fs, new_size).await
let mut holder = self.inode.get_mut().await?;
let mut inode = holder.write();
inode.resize(&self.fs, new_size).await?;
drop(inode);
holder.put().await?;
Ok(())
}
async fn read(&self, mut pos: u64, buffer: &mut [u8]) -> Result<usize, Error> {
let inode = self.inode_cache.get(self.ino).await?;
let holder = self.inode.get().await?;
let inode = holder.read();
if pos >= inode.size(&self.fs) {
return Ok(0);
@ -51,10 +50,13 @@ impl RegularNode {
let block_offset = (pos % self.fs.block_size as u64) as usize;
let amount = core::cmp::min(self.fs.block_size - block_offset, remaining);
let block = self.fs.inode_block(&inode, block_index as u32).await?;
buffer[offset..offset + amount]
.copy_from_slice(&block[block_offset..block_offset + amount]);
self.fs
.with_inode_block(&inode, block_index as u32, |block| {
buffer[offset..offset + amount]
.copy_from_slice(&block[block_offset..block_offset + amount]);
Ok(())
})
.await?;
pos += amount as u64;
offset += amount;
@ -65,9 +67,11 @@ impl RegularNode {
}
async fn write(&self, mut pos: u64, buffer: &[u8]) -> Result<usize, Error> {
let mut inode = self.inode_cache.get_mut(self.ino).await?;
let mut holder = self.inode.get_mut().await?;
let mut inode = holder.write();
inode.reserve(&self.fs, pos + buffer.len() as u64).await?;
let need_size = pos + buffer.len() as u64;
inode.reserve(&self.fs, need_size).await?;
let mut offset = 0;
let mut remaining = buffer.len();
@ -77,23 +81,30 @@ impl RegularNode {
let block_offset = (pos % self.fs.block_size as u64) as usize;
let amount = remaining.min(self.fs.block_size - block_offset);
let mut block = self.fs.inode_block_mut(&inode, block_index as u32).await?;
block[block_offset..block_offset + amount]
.copy_from_slice(&buffer[offset..offset + amount]);
self.fs
.with_inode_block_mut(&inode, block_index as u32, |block| {
block[block_offset..block_offset + amount]
.copy_from_slice(&buffer[offset..offset + amount]);
Ok(())
})
.await?;
pos += amount as u64;
offset += amount;
remaining -= amount;
}
drop(inode);
holder.put().await?;
Ok(offset)
}
}
impl CommonImpl for RegularNode {
fn metadata(&self, _node: &NodeRef) -> Result<Metadata, Error> {
let inode = block!(self.inode_cache.get(self.ino).await)??;
let inode = block!(self.inode.get().await)??;
let inode = inode.read();
Ok(inode.metadata())
}
@ -102,7 +113,8 @@ impl CommonImpl for RegularNode {
}
fn size(&self, _node: &NodeRef) -> Result<u64, Error> {
let inode = block!(self.inode_cache.get(self.ino).await)??;
let inode = block!(self.inode.get().await)??;
let inode = inode.read();
Ok(inode.size(&self.fs))
}
}

View File

@ -1,9 +1,18 @@
use core::{cmp::Ordering, ops::Deref};
use core::{
cmp::Ordering,
future::{AsyncDrop, Future},
ops::{Deref, DerefMut},
pin::Pin,
};
use alloc::sync::{Arc, Weak};
use libk::{
block,
error::Error,
task::sync::{AsyncMutex, MappedAsyncMutexGuard, Mutex},
task::{
runtime::spawn,
sync::{AsyncMutex, MappedAsyncMutexGuard, Mutex},
},
};
use libk_util::{
lru_hash_table::LruCache,
@ -15,126 +24,197 @@ use crate::{
Ext2Fs, Inode,
};
struct InodeHolder {
pub struct InodeHolder {
inode: Inode,
dirty: bool,
}
pub struct InodeRef {
_entry: Arc<IrqSafeRwLock<InodeHolder>>,
lock: IrqSafeRwLockReadGuard<'static, InodeHolder>,
pub struct CachedInodeRef {
entry: Arc<IrqSafeRwLock<InodeHolder>>,
}
pub struct InodeMut {
_entry: Arc<IrqSafeRwLock<InodeHolder>>,
lock: IrqSafeRwLockWriteGuard<'static, InodeHolder>,
pub struct UncachedInodeRef {
entry: IrqSafeRwLock<InodeHolder>,
}
pub enum InodeRef {
Cached(CachedInodeRef),
Uncached(UncachedInodeRef),
}
pub struct CachedInodeMut {
ino: u32,
cache: Arc<InodeCache>,
entry: Arc<IrqSafeRwLock<InodeHolder>>,
}
pub struct UncachedInodeMut {
ino: u32,
fs: Arc<Ext2Fs>,
put: bool,
data: IrqSafeRwLock<InodeHolder>,
}
pub enum InodeMut {
Cached(CachedInodeMut),
Uncached(UncachedInodeMut),
}
pub struct InodeCache {
fs: Arc<Ext2Fs>,
cache: AsyncMutex<LruCache<u32, Arc<IrqSafeRwLock<InodeHolder>>>>,
cache: Option<AsyncMutex<LruCache<u32, Arc<IrqSafeRwLock<InodeHolder>>>>>,
}
pub struct InodeAccess {
inode_cache: Arc<InodeCache>,
ino: u32,
}
impl InodeAccess {
pub fn new(inode_cache: Arc<InodeCache>, ino: u32) -> Self {
Self { inode_cache, ino }
}
pub fn ino(&self) -> u32 {
self.ino
}
pub fn cache(&self) -> &Arc<InodeCache> {
&self.inode_cache
}
pub async fn get(&self) -> Result<InodeRef, Error> {
self.inode_cache.get(self.ino).await
}
pub async fn get_mut(&self) -> Result<InodeMut, Error> {
self.inode_cache.get_mut(self.ino).await
}
}
impl InodeCache {
pub fn with_capacity(fs: Arc<Ext2Fs>, bucket_capacity: usize) -> Self {
Self {
fs,
cache: AsyncMutex::new(LruCache::with_capacity(bucket_capacity, 4)),
cache: Some(AsyncMutex::new(LruCache::with_capacity(bucket_capacity, 4))),
}
}
async fn evict_inode(&self, ino: u32, inode: Arc<IrqSafeRwLock<InodeHolder>>) {
pub fn uncached(fs: Arc<Ext2Fs>) -> Self {
Self { fs, cache: None }
}
async fn evict_inode(
&self,
ino: u32,
inode: Arc<IrqSafeRwLock<InodeHolder>>,
) -> Result<(), Error> {
let inode = inode.read();
if inode.dirty {
log::debug!("Flush dirty inode {ino}");
todo!();
self.fs.write_inode(ino, &inode.inode).await?;
}
Ok(())
}
async fn fetch_inode(&self, ino: u32) -> Result<Arc<IrqSafeRwLock<InodeHolder>>, Error> {
let inode = self.fs.read_inode(ino).await?;
log::error!("InodeHolder created");
Ok(Arc::new(IrqSafeRwLock::new(InodeHolder {
inode,
dirty: false,
})))
}
async fn entry<'a>(
&'a self,
ino: u32,
) -> Result<
MappedAsyncMutexGuard<
'a,
Arc<IrqSafeRwLock<InodeHolder>>,
LruCache<u32, Arc<IrqSafeRwLock<InodeHolder>>>,
>,
Error,
> {
async fn entry(&self, ino: u32) -> Result<Arc<IrqSafeRwLock<InodeHolder>>, Error> {
let Some(cache) = self.cache.as_ref() else {
log::warn!("Cannot use InodeCache::entry with no cache");
return Err(Error::InvalidOperation);
};
if ino < 1 || ino > self.fs.total_inodes {
return Err(Error::InvalidFile);
}
let key = ino - 1;
self.cache
.lock()
.await
.try_map_guard_async(|cache: &'a mut LruCache<_, _>| async move {
let (value, evicted) = cache
.try_get_or_insert_with_async(key, || self.fetch_inode(ino))
.await?;
let mut lock = cache.lock().await;
let (value, evicted) = lock
.try_get_or_insert_with_async(ino, || self.fetch_inode(ino))
.await?;
let value = value.clone();
if let Some((ino, holder)) = evicted {
self.evict_inode(ino, holder).await;
}
if let Some((ino, holder)) = evicted {
if let Err(error) = self.evict_inode(ino, holder).await {
log::error!("ext2: inode flush error: ino={ino}, error={error:?}");
}
}
Ok(value)
})
.await
Ok(value)
}
pub async fn get(&self, ino: u32) -> Result<InodeRef, Error> {
self.entry(ino).await.map(|e| InodeRef::new(ino, e.deref()))
if self.cache.is_some() {
let entry = self.entry(ino).await?;
let inode = CachedInodeRef { entry };
Ok(InodeRef::Cached(inode))
} else {
let inode = self.fs.read_inode(ino).await?;
let data = InodeHolder {
inode,
dirty: false,
};
let inode = UncachedInodeRef {
entry: IrqSafeRwLock::new(data),
};
Ok(InodeRef::Uncached(inode))
}
}
pub async fn get_mut(&self, ino: u32) -> Result<InodeMut, Error> {
self.entry(ino).await.map(|e| InodeMut::new(ino, e.deref()))
pub async fn get_mut(self: &Arc<Self>, ino: u32) -> Result<InodeMut, Error> {
if self.cache.is_some() {
let entry = self.entry(ino).await?;
let inode = CachedInodeMut {
entry,
cache: self.clone(),
ino,
};
Ok(InodeMut::Cached(inode))
} else {
let inode = self.fs.read_inode(ino).await?;
let data = InodeHolder {
inode,
dirty: false,
};
let inode = UncachedInodeMut {
fs: self.fs.clone(),
ino,
put: false,
data: IrqSafeRwLock::new(data),
};
Ok(InodeMut::Uncached(inode))
}
}
}
impl InodeRef {
fn new(ino: u32, entry: &Arc<IrqSafeRwLock<InodeHolder>>) -> Self {
let entry = entry.clone();
// Safety: ok, Arc instance is still held
let lock = unsafe { core::mem::transmute(entry.read()) };
Self {
lock,
_entry: entry,
pub async fn flush(&self) -> Result<(), Error> {
if let Some(cache) = self.cache.as_ref() {
let mut last_error = None;
let mut lock = cache.lock().await;
while let Some((ino, inode)) = lock.pop_entry() {
if let Err(error) = self.evict_inode(ino, inode).await {
log::error!("ext2: flush inode cache error: ino={ino}, error={error:?}");
last_error = Some(error);
}
}
match last_error {
None => Ok(()),
Some(error) => Err(error),
}
} else {
Ok(())
}
}
}
impl Deref for InodeRef {
type Target = Inode;
fn deref(&self) -> &Self::Target {
&self.lock.inode
}
}
impl InodeMut {
fn new(ino: u32, entry: &Arc<IrqSafeRwLock<InodeHolder>>) -> Self {
let entry = entry.clone();
// Safety: ok, Arc instance is still held
let lock = unsafe { core::mem::transmute(entry.write()) };
Self {
lock,
_entry: entry,
}
}
}
impl InodeMut {
impl Inode {
async fn grow_direct(
&mut self,
fs: &Ext2Fs,
@ -149,20 +229,38 @@ impl InodeMut {
for i in old_l0_capacity..new_l0_capacity {
let i = i as usize;
let block = fs.allocate_block().await?;
self.lock.inode.blocks.direct_blocks[i] = block;
self.lock.dirty = true;
self.blocks.direct_blocks[i] = block;
}
Ok(())
}
pub async fn resize(&mut self, fs: &Ext2Fs, size: u64) -> Result<(), Error> {
fn set_size(&mut self, fs: &Ext2Fs, size: u64) {
let block_count = size.div_ceil(fs.block_size as u64);
if fs
.write_features
.contains(FsReadonlyFeatures::FILE_SIZE_64_BIT)
{
self.size_upper = (size >> 32) as u32;
self.size_lower = size as u32;
} else {
if size > u32::MAX as u64 {
todo!("File too large")
}
self.size_lower = size as u32;
}
self.sector_count = block_count as u32 * (fs.block_size / 512) as u32;
}
pub async fn resize(&mut self, fs: &Ext2Fs, size: u64) -> Result<bool, Error> {
if size == self.size(fs) {
return Ok(());
return Ok(false);
}
let new_blocks = size.div_ceil(fs.block_size as u64);
let old_blocks = self.size(&fs).div_ceil(fs.block_size as u64);
let old_blocks = self.size(fs).div_ceil(fs.block_size as u64);
match old_blocks.cmp(&new_blocks) {
// Grow
@ -179,46 +277,90 @@ impl InodeMut {
Ordering::Equal => (),
}
if fs
.write_features
.contains(FsReadonlyFeatures::FILE_SIZE_64_BIT)
{
self.lock.inode.size_upper = (size >> 32) as u32;
self.lock.inode.size_lower = size as u32;
} else {
if size > u32::MAX as u64 {
todo!("File too large")
}
self.lock.inode.size_lower = size as u32;
}
self.set_size(fs, size);
self.lock.dirty = true;
Ok(())
}
pub async fn reserve(&mut self, fs: &Ext2Fs, capacity: u64) -> Result<(), Error> {
if capacity > self.size(fs) {
self.resize(fs, capacity).await?;
}
Ok(())
Ok(true)
}
pub fn inc_hard_count(&mut self) {
self.lock.inode.hard_links += 1;
self.lock.dirty = true;
self.hard_links += 1;
}
pub async fn reserve(&mut self, fs: &Ext2Fs, capacity: u64) -> Result<bool, Error> {
if capacity > self.size(fs) {
self.resize(fs, capacity).await
} else {
Ok(false)
}
}
}
impl Deref for InodeMut {
impl Deref for InodeHolder {
type Target = Inode;
fn deref(&self) -> &Self::Target {
&self.lock.inode
&self.inode
}
}
impl Drop for InodeHolder {
fn drop(&mut self) {
log::error!("InodeHolder dropped");
impl DerefMut for InodeHolder {
fn deref_mut(&mut self) -> &mut Self::Target {
self.dirty = true;
&mut self.inode
}
}
impl InodeMut {
pub fn read(&self) -> IrqSafeRwLockReadGuard<InodeHolder> {
match self {
Self::Cached(inode) => inode.entry.read(),
Self::Uncached(inode) => inode.data.read(),
}
}
pub fn write(&self) -> IrqSafeRwLockWriteGuard<InodeHolder> {
match self {
Self::Cached(inode) => inode.entry.write(),
Self::Uncached(inode) => inode.data.write(),
}
}
pub async fn put(&mut self) -> Result<(), Error> {
match self {
Self::Cached(_) => (),
Self::Uncached(inode) => {
log::info!("Write inode #{} back", inode.ino);
inode.put = true;
inode.fs.write_inode(inode.ino, &inode.data.read()).await?;
}
}
Ok(())
}
}
impl InodeRef {
pub fn read(&self) -> IrqSafeRwLockReadGuard<InodeHolder> {
match self {
Self::Cached(inode) => inode.entry.read(),
Self::Uncached(inode) => inode.entry.read(),
}
}
}
impl Drop for InodeMut {
fn drop(&mut self) {
match self {
Self::Uncached(inode) if !inode.put => {
// Do node writeback in background
let ino = inode.ino;
match block!(self.put().await) {
Err(error) | Ok(Err(error)) => {
log::error!("Drop for InodeMut (#{}) failed: {error:?}", ino);
}
Ok(Ok(())) => (),
}
}
_ => (),
}
}
}

View File

@ -1,24 +1,27 @@
#![feature(if_let_guard)]
#![feature(if_let_guard, async_drop, impl_trait_in_assoc_type)]
#![cfg_attr(not(test), no_std)]
#![allow(clippy::new_ret_no_self)]
extern crate alloc;
use alloc::sync::Arc;
use core::ops::DerefMut;
use alloc::{boxed::Box, sync::Arc};
use async_trait::async_trait;
use bytemuck::Zeroable;
use data::{FsReadonlyFeatures, FsRequiredFeatures, InodeBlockRefs, InodeMode};
use dir::DirectoryNode;
use file::RegularNode;
use inode::{InodeCache, InodeRef};
use inode::{InodeAccess, InodeCache};
use libk::{
error::Error,
task::sync::Mutex,
vfs::{
block::{
cache::{BlockCache, CachedBlockMut, CachedBlockRef},
cache::{BlockCache, CachedBlock, DeviceMapper},
BlockDevice,
},
NodeRef,
Filesystem, FilesystemMountOption, NodeRef,
},
};
use libk_util::{sync::spin_rwlock::IrqSafeRwLock, OneTimeInit};
@ -29,24 +32,29 @@ pub mod file;
pub mod inode;
pub mod symlink;
pub use data::{
BlockGroupDescriptor, BlockGroupDescriptorTable, Dirent, ExtendedSuperblock, Inode, Superblock,
};
pub use data::{BlockGroupDescriptor, Dirent, ExtendedSuperblock, Inode};
use symlink::SymlinkNode;
use yggdrasil_abi::io::FileType;
struct State {
superblock: ExtendedSuperblock,
bgdt: BlockGroupDescriptorTable,
dirty: bool,
}
struct Bgdt {
base: u32,
entry_count: usize,
block_count: usize,
}
pub struct Ext2Fs {
cache: BlockCache,
mapper: DeviceMapper,
inode_cache: OneTimeInit<Arc<InodeCache>>,
state: IrqSafeRwLock<State>,
bgdt: Bgdt,
total_inodes: u32,
total_blocks: u32,
block_group_inode_count: u32,
block_group_block_count: u32,
@ -63,14 +71,63 @@ pub struct Ext2Fs {
root: OneTimeInit<NodeRef>,
}
#[async_trait]
impl Filesystem for Ext2Fs {
async fn flush(&self) -> Result<(), Error> {
log::info!("ext2: flushing caches");
let mut last_err = None;
if let Err(error) = self.flush_superblock().await {
log::error!("ext2: superblock flush error: {error:?}");
last_err = Some(error);
}
if let Err(error) = self.flush_inode_cache().await {
log::error!("ext2: inode cache flush error {error:?}");
last_err = Some(error);
}
if let Err(error) = self.mapper.flush().await {
log::error!("ext2: block cache flush error {error:?}");
last_err = Some(error);
}
match last_err {
None => Ok(()),
Some(error) => Err(error),
}
}
fn display_name(&self) -> &'static str {
"ext2"
}
}
impl Ext2Fs {
pub async fn create(device: &'static dyn BlockDevice) -> Result<NodeRef, Error> {
let fs = Self::create_fs(device).await.inspect_err(|e| {
pub async fn create<'a, I: Iterator<Item = FilesystemMountOption<'a>>>(
device: &'static dyn BlockDevice,
options: I,
) -> Result<NodeRef, Error> {
let mut cached = true;
for option in options {
match option {
FilesystemMountOption::Sync => cached = false,
FilesystemMountOption::FsSpecific(opt) => {
log::warn!("ext2: ignoring unrecognized option {opt:?}");
}
}
}
let fs = Self::create_fs(device, cached).await.inspect_err(|e| {
log::error!("Ext2 init error: {:?}", e);
})?;
let fs = Arc::new(fs);
fs.inode_cache
.init(InodeCache::with_capacity(fs.clone(), 64).into());
// let inode_cache = InodeCache::uncached(fs.clone());
let inode_cache = match cached {
false => InodeCache::uncached(fs.clone()),
true => InodeCache::with_capacity(fs.clone(), 64),
};
fs.inode_cache.init(inode_cache.into());
let root = fs.load_node(data::ROOT_INODE).await?;
fs.root.init(root.clone());
@ -80,7 +137,7 @@ impl Ext2Fs {
fn handle_readonly_features(features: FsReadonlyFeatures) -> (bool, FsReadonlyFeatures) {
// TODO this implementation ignores backups
let mut supported =
let supported =
FsReadonlyFeatures::FILE_SIZE_64_BIT | FsReadonlyFeatures::SPARSE_SUPERBLOCKS;
let mut enabled = FsReadonlyFeatures::empty();
let mut readonly = false;
@ -106,7 +163,7 @@ impl Ext2Fs {
}
fn handle_required_features(features: FsRequiredFeatures) -> (bool, FsRequiredFeatures) {
let mut supported = FsRequiredFeatures::DIRENT_TYPE_FIELD;
let supported = FsRequiredFeatures::DIRENT_TYPE_FIELD;
let mut enabled = FsRequiredFeatures::empty();
let mut unsupported = false;
@ -129,10 +186,10 @@ impl Ext2Fs {
(!unsupported, enabled)
}
async fn create_fs(device: &'static dyn BlockDevice) -> Result<Self, Error> {
async fn create_fs(device: &'static dyn BlockDevice, cached: bool) -> Result<Self, Error> {
let mut superblock = ExtendedSuperblock::zeroed();
device
.read(
.read_exact(
data::SUPERBLOCK_OFFSET,
bytemuck::bytes_of_mut(&mut superblock),
)
@ -162,14 +219,19 @@ impl Ext2Fs {
}
let block_size = 1024usize << superblock.block_size_log2;
let bgdt_block_index = (data::SUPERBLOCK_OFFSET as usize).div_ceil(block_size);
let bgdt_block_index = (data::SUPERBLOCK_OFFSET as usize).div_ceil(block_size) as u32;
let bgdt_entry_count = superblock
.total_blocks
.div_ceil(superblock.block_group_block_count) as usize;
let bgdt_block_count =
(bgdt_entry_count * size_of::<BlockGroupDescriptor>()).div_ceil(block_size);
let bgdt = Bgdt {
base: bgdt_block_index,
block_count: bgdt_block_count,
entry_count: bgdt_entry_count,
};
log::info!(
"ext2 v{}.{}",
superblock.version_major,
@ -182,26 +244,20 @@ impl Ext2Fs {
bgdt_block_count * block_size
);
let mut bgdt = BlockGroupDescriptorTable::with_capacity(
bgdt_block_count * block_size,
bgdt_entry_count,
);
for i in 0..bgdt_block_count {
let disk_offset = (i + bgdt_block_index) as u64 * block_size as u64;
device
.read_exact(
disk_offset,
&mut bgdt.data[i * block_size..(i + 1) * block_size],
)
.await?;
}
let total_inodes = superblock.total_inodes;
let total_blocks = superblock.total_blocks;
let block_group_inode_count = superblock.block_group_inode_count;
let block_group_block_count = superblock.block_group_block_count;
log::info!("Inode size: {}", superblock.inode_size());
// TODO block cache produces data corruption
let cache = DeviceMapper::uncached(device, block_size);
// let cache = match cached {
// false => MaybeCache::uncached(device, block_size),
// true => MaybeCache::cached_with_capacity(device, block_size, 512),
// };
Ok(Self {
block_size,
inode_size: superblock.inode_size(),
@ -209,17 +265,18 @@ impl Ext2Fs {
pointers_per_block: block_size / size_of::<u32>(),
total_inodes,
total_blocks,
block_group_inode_count,
block_group_block_count,
// 128 × 8 cache
cache: BlockCache::with_capacity(device, block_size, 512),
mapper: cache,
inode_cache: OneTimeInit::new(),
state: IrqSafeRwLock::new(State {
superblock,
bgdt,
dirty: false,
}),
bgdt,
required_features,
write_features,
@ -230,29 +287,108 @@ impl Ext2Fs {
}
pub async fn load_node(self: &Arc<Self>, ino: u32) -> Result<NodeRef, Error> {
let cache = self.inode_cache.get().clone();
let inode = cache.get(ino).await?;
let mode = inode.mode;
drop(inode);
let cache = self.inode_cache.get();
let mode = cache.get(ino).await?.read().mode;
let inode = InodeAccess::new(cache.clone(), ino);
match mode.node_type() {
Some(FileType::Directory) => Ok(DirectoryNode::new(self.clone(), cache, ino)),
Some(FileType::File) => Ok(RegularNode::new(self.clone(), cache, ino)),
Some(FileType::Symlink) => Ok(SymlinkNode::new(self.clone(), cache, ino)),
e => todo!("Unhandled inode type: {e:?} ({mode:#x?})"),
Some(FileType::Directory) => Ok(DirectoryNode::new(self.clone(), inode)),
Some(FileType::File) => Ok(RegularNode::new(self.clone(), inode)),
Some(FileType::Symlink) => Ok(SymlinkNode::new(self.clone(), inode)),
e => {
log::error!("Unhandled inode type: {e:?} ({mode:#x?})");
Err(Error::InvalidArgument)
}
}
}
pub async fn block(&self, index: u32) -> Result<CachedBlockRef, Error> {
let address = index as u64 * self.block_size as u64;
self.cache.get(address).await
#[inline]
fn block_address(&self, index: u32) -> u64 {
index as u64 * self.block_size as u64
}
pub async fn block_mut(&self, index: u32) -> Result<CachedBlockMut, Error> {
let address = index as u64 * self.block_size as u64;
self.cache.get_mut(address).await
pub async fn with_block<T, F: FnOnce(&[u8]) -> Result<T, Error>>(
&self,
index: u32,
mapper: F,
) -> Result<T, Error> {
if index < 1 || index >= self.total_blocks {
return Err(Error::InvalidFile);
}
self.mapper
.try_with(self.block_address(index), mapper)
.await
}
fn inode(&self, ino: u32) -> Result<(u32, usize), Error> {
pub async fn with_block_mut<T, F: FnOnce(&mut [u8]) -> Result<T, Error>>(
&self,
index: u32,
mapper: F,
) -> Result<T, Error> {
if index < 1 || index >= self.total_blocks {
return Err(Error::InvalidFile);
}
self.mapper
.try_with_mut(self.block_address(index), mapper)
.await
}
pub async fn with_inode_block<T, F: FnOnce(&[u8]) -> Result<T, Error>>(
&self,
inode: &Inode,
block: u32,
mapper: F,
) -> Result<T, Error> {
let block_index = self.inode_block_index(inode, block).await?;
self.with_block(block_index, mapper).await
}
pub async fn with_inode_block_mut<T, F: FnOnce(&mut [u8]) -> Result<T, Error>>(
&self,
inode: &Inode,
block: u32,
mapper: F,
) -> Result<T, Error> {
let block_index = self.inode_block_index(inode, block).await?;
self.with_block_mut(block_index, mapper).await
}
async fn with_bgdt_entry<T, F: FnOnce(&BlockGroupDescriptor) -> Result<T, Error>>(
&self,
block_group: u32,
mapper: F,
) -> Result<T, Error> {
let offset = block_group as usize * size_of::<BlockGroupDescriptor>();
let block = offset / self.block_size;
let offset_in_block = offset % self.block_size;
self.with_block(block as u32 + self.bgdt.base, |block| {
let descriptor = bytemuck::from_bytes(
&block[offset_in_block..offset_in_block + size_of::<BlockGroupDescriptor>()],
);
mapper(descriptor)
})
.await
}
async fn with_bgdt_entry_mut<T, F: FnOnce(&mut BlockGroupDescriptor) -> Result<T, Error>>(
&self,
block_group: u32,
mapper: F,
) -> Result<T, Error> {
let offset = block_group as usize * size_of::<BlockGroupDescriptor>();
let block = offset / self.block_size;
let offset_in_block = offset % self.block_size;
self.with_block_mut(block as u32 + self.bgdt.base, |block| {
let descriptor = bytemuck::from_bytes_mut(
&mut block[offset_in_block..offset_in_block + size_of::<BlockGroupDescriptor>()],
);
mapper(descriptor)
})
.await
}
async fn inode(&self, ino: u32) -> Result<(u32, usize), Error> {
if ino < 1 || ino >= self.total_inodes {
return Err(Error::InvalidFile);
}
@ -260,101 +396,158 @@ impl Ext2Fs {
let ino_group = ino / self.block_group_inode_count;
let ino_in_group = ino % self.block_group_inode_count;
// block where the inode table lives
let inode_table = self.state.read().bgdt.descriptor(ino_group).inode_table;
let ino_block = inode_table + ino_in_group / self.inodes_per_block as u32;
let offset_in_block = (ino_in_group as usize % self.inodes_per_block) * self.inode_size;
let inode_table_block = self
.with_bgdt_entry(ino_group, |entry| Ok(entry.inode_table))
.await?;
let inode_block = inode_table_block + ino_in_group / self.inodes_per_block as u32;
assert!(offset_in_block < self.block_size);
Ok((ino_block, offset_in_block))
Ok((inode_block, offset_in_block))
}
pub async fn read_inode(&self, ino: u32) -> Result<Inode, Error> {
let (ino_block, offset_in_block) = self.inode(ino)?;
let block = self.block(ino_block).await?;
Ok(*bytemuck::from_bytes(
&block[offset_in_block..offset_in_block + size_of::<Inode>()],
))
let (ino_block, offset_in_block) = self.inode(ino).await?;
self.with_block(ino_block, |block| {
let inode =
bytemuck::from_bytes(&block[offset_in_block..offset_in_block + size_of::<Inode>()]);
Ok(*inode)
})
.await
}
pub async fn write_inode(&self, ino: u32, inode: &Inode) -> Result<(), Error> {
let (ino_block, offset_in_block) = self.inode(ino)?;
let mut block = self.block_mut(ino_block).await?;
block[offset_in_block..offset_in_block + size_of::<Inode>()]
.copy_from_slice(bytemuck::bytes_of(inode));
block.set_dirty();
let (ino_block, offset_in_block) = self.inode(ino).await?;
self.with_block_mut(ino_block, |block| {
block[offset_in_block..offset_in_block + size_of::<Inode>()]
.copy_from_slice(bytemuck::bytes_of(inode));
Ok(())
})
.await?;
Ok(())
}
pub async fn inode_block(&self, inode: &Inode, index: u32) -> Result<CachedBlockRef, Error> {
let block_index = self.inode_block_index(inode, index).await?;
self.block(block_index).await
}
pub async fn inode_block_mut(
&self,
inode: &Inode,
index: u32,
) -> Result<CachedBlockMut, Error> {
let block_index = self.inode_block_index(inode, index).await?;
self.block_mut(block_index).await
pub async fn flush_inode_cache(&self) -> Result<(), Error> {
log::info!("Flushing inode cache");
self.inode_cache.get().flush().await
}
pub async fn flush_superblock(&self) -> Result<(), Error> {
todo!()
let state = self.state.read();
if state.dirty {
log::info!("Flushing superblock");
self.mapper
.device()
.write_exact(
data::SUPERBLOCK_OFFSET,
bytemuck::bytes_of(&state.superblock),
)
.await?;
}
Ok(())
}
async fn allocate<
F: Fn(&mut BlockGroupDescriptor) -> Option<(u32, u32)>,
G: Fn(&mut ExtendedSuperblock),
>(
&self,
descriptor_mapper: F,
superblock_mapper: G,
) -> Result<u32, Error> {
let bit_per_block = self.block_size * 8;
for group_index in 0..self.bgdt.entry_count as u32 {
let bitmap = self
.with_bgdt_entry_mut(group_index, |descriptor| {
match descriptor_mapper(descriptor) {
Some(val) => Ok(Some(val)),
_ => Ok(None),
}
})
.await?;
if let Some((bitmap, group_item_count)) = bitmap {
let no = self
.with_block_mut(bitmap, |bitmap| {
for i in 0..bit_per_block.min(group_item_count as usize) {
let index = i / 8;
let bit = 1u8 << (i % 8);
if bitmap[index] & bit == 0 {
bitmap[index] |= bit;
return Ok(Some(i as u32));
}
}
Ok(None)
})
.await?
.expect("TODO: bgdt says there're things, but bitmap says there aren't");
{
let mut state = self.state.write();
superblock_mapper(&mut state.superblock);
state.dirty = true;
}
self.flush_superblock().await?;
return Ok(group_index * group_item_count + no);
}
}
todo!("ENOSPC")
}
async fn allocate_inode(&self, directory: bool) -> Result<u32, Error> {
let ino = self
.allocate(
|descriptor| {
if descriptor.unallocated_inodes == 0
|| (directory && descriptor.directories == u16::MAX)
{
None
} else {
if directory {
descriptor.directories += 1;
}
descriptor.unallocated_inodes -= 1;
Some((descriptor.inode_usage_bitmap, self.block_group_inode_count))
}
},
|superblock| {
superblock.total_unallocated_inodes -= 1;
},
)
.await?;
let ino = ino + 1;
log::debug!("ext2: allocated inode #{ino}");
Ok(ino)
}
async fn allocate_block(&self) -> Result<u32, Error> {
Err(Error::NotImplemented)
// let blocks_per_bitmap_block = self.block_size * 8;
// let index = {
// let mut state = self.state.write();
// let mut block_index = None;
let no = self
.allocate(
|descriptor| {
if descriptor.unallocated_blocks == 0 {
None
} else {
descriptor.unallocated_blocks -= 1;
Some((descriptor.block_usage_bitmap, self.block_group_block_count))
}
},
|superblock| {
superblock.total_unallocated_blocks -= 1;
},
)
.await?;
// for group_index in 0..state.bgdt.len as u32 {
// let descriptor = state.bgdt.descriptor_mut(group_index);
// if descriptor.unallocated_blocks == 0 {
// continue;
// }
log::debug!("ext2: allocated block #{no}");
// // Get block bitmap
// let bitmap_block = descriptor.block_usage_bitmap;
// // TODO handle more than 1 block
// let mut bitmap = self.block_mut(bitmap_block).await?;
// for i in 0..blocks_per_bitmap_block.min(self.block_group_block_count as usize) {
// let index = i / 8;
// let bit = 1u8 << (i % 8);
// if bitmap[index] & bit == 0 {
// let no = group_index * self.block_group_block_count + i as u32;
// bitmap[index] |= bit;
// bitmap.set_dirty();
// descriptor.unallocated_blocks -= 1;
// log::debug!(
// "Allocated block #{no} from bg #{group_index}, free blocks {}",
// descriptor.unallocated_blocks
// );
// block_index = Some(no);
// break;
// }
// }
// if block_index.is_some() {
// break;
// }
// }
// let block_index = block_index.expect("TODO: ENOSPC");
// state.dirty = true;
// block_index
// };
// Ok(index)
Ok(no)
}
async fn create_node(
@ -362,98 +555,53 @@ impl Ext2Fs {
parent: Option<u32>,
ty: FileType,
) -> Result<NodeRef, Error> {
Err(Error::NotImplemented)
// let inodes_per_bitmap_block = self.block_size * 8;
// let ino = {
// let mut state = self.state.write();
// let mut ino = None;
// for group_index in 0..state.bgdt.len as u32 {
// let descriptor = state.bgdt.descriptor_mut(group_index);
// if descriptor.unallocated_inodes == 0 {
// continue;
// }
let inode_cache = self.inode_cache.get();
let ino = self.allocate_inode(ty == FileType::Directory).await?;
// // Get inode bitmap
// let bitmap_block = descriptor.inode_usage_bitmap;
// // TODO handle more than 1 block
// let mut bitmap = self.block_mut(bitmap_block).await?;
log::info!("Allocated inode #{ino}");
// for i in 0..inodes_per_bitmap_block.min(self.block_group_inode_count as usize) {
// let index = i / 8;
// let bit = 1u8 << (i % 8);
// if bitmap[index] & bit == 0 {
// let no = group_index * self.block_group_inode_count + i as u32;
// bitmap[index] |= bit;
// bitmap.set_dirty();
// descriptor.unallocated_inodes -= 1;
// log::debug!(
// "Allocated inode #{no} from bg #{group_index}, free nodes {}",
// descriptor.unallocated_inodes
// );
// ino = Some(no);
// break;
// }
// }
let access = InodeAccess::new(inode_cache.clone(), ino);
// if ino.is_some() {
// break;
// }
// }
{
// Write initial inode
let mut inode = access.get_mut().await?;
// let ino = ino.expect("TODO: ENOSPC");
{
let mut data = inode.write();
// state.dirty = true;
// Create new inode struct
let mut value = Inode::zeroed();
value.mode = InodeMode::default_for_type(ty);
// ino
// };
// let _guard = DropGuard { ino };
**data = value;
}
// let mut inode = Inode {
// mode: InodeMode::default_for_type(ty),
// uid: 0,
// gid: 0,
// size_lower: 0,
// size_upper: 0,
// atime: 0,
// ctime: 0,
// mtime: 0,
// dtime: 0,
// flags: 0,
// generation: 0,
// facl: 0,
// frag_block_no: 0,
// os_val1: 0,
// os_val2: 0,
// sector_count: 0,
// hard_links: 0,
// blocks: InodeBlockRefs::empty(),
// };
inode.put().await?;
}
// // TODO dealloc ino if failed
// self.write_inode(ino, &inode).await?;
// TODO dealloc ino if failed
// self.write_inode(ino, &inode).await.unwrap();
// let node = match ty {
// FileType::Directory => {
// DirectoryNode::create(self.clone(), self.inode_cache.get().clone(), parent, ino)
// .await?
// }
// _ => todo!(),
// };
let node = match ty {
FileType::Directory => DirectoryNode::create(self.clone(), access, parent)
.await
.unwrap(),
FileType::File => RegularNode::new(self.clone(), access),
_ => todo!(),
};
// if ty == FileType::Directory {
// // TODO Populate the directory with "." and ".."
// }
// Ok(node)
Ok(node)
}
async fn read_index(&self, block_index: u32, index: usize) -> Result<u32, Error> {
let block = self.block(block_index).await?;
let indirect: &[u32] = unsafe {
core::slice::from_raw_parts(&block[0] as *const _ as *const _, self.pointers_per_block)
};
self.with_block(block_index, |block| {
let indirect: &[u32] = unsafe {
core::slice::from_raw_parts(block.as_ptr() as *const u32, self.pointers_per_block)
};
Ok(indirect[index])
Ok(indirect[index])
})
.await
}
async fn inode_block_index(&self, inode: &Inode, index: u32) -> Result<u32, Error> {

View File

@ -8,22 +8,23 @@ use libk::{
};
use libk_util::sync::spin_rwlock::IrqSafeRwLock;
use crate::{inode::InodeCache, Ext2Fs, Inode};
use crate::{
inode::{InodeAccess, InodeCache},
Ext2Fs, Inode,
};
pub struct SymlinkNode {
fs: Arc<Ext2Fs>,
inode_cache: Arc<InodeCache>,
ino: u32,
inode: InodeAccess,
cache: IrqSafeRwLock<Vec<u8>>,
}
impl SymlinkNode {
pub fn new(fs: Arc<Ext2Fs>, inode_cache: Arc<InodeCache>, ino: u32) -> NodeRef {
pub fn new(fs: Arc<Ext2Fs>, inode: InodeAccess) -> NodeRef {
Node::symlink(
Self {
fs,
ino,
inode_cache,
inode,
cache: IrqSafeRwLock::new(Vec::new()),
},
NodeFlags::empty(),
@ -31,31 +32,36 @@ impl SymlinkNode {
}
async fn read(&self, buf: &mut [u8]) -> Result<usize, Error> {
let inode = self.inode_cache.get(self.ino).await?;
let len = inode.size(&self.fs) as usize;
if len >= self.fs.block_size {
todo!()
}
if buf.len() < len {
todo!();
}
todo!()
// let inode = self.inode_cache.get(self.ino).await?;
// let len = inode.size(&self.fs) as usize;
// if len >= self.fs.block_size {
// todo!()
// }
// if buf.len() < len {
// todo!();
// }
let mut write = self.cache.write();
write.clear();
// let mut write = self.cache.write();
// write.clear();
// If length of symlink is lower than 60, data is stored directly in "block address"
// section of the inode
if len < 60 {
let bytes = unsafe { Self::link_from_inode_blocks(&inode, len) };
write.extend_from_slice(bytes);
buf[..len].copy_from_slice(bytes);
} else {
let block = self.fs.inode_block(&inode, 0).await?;
write.extend_from_slice(&block[..len]);
buf[..len].copy_from_slice(&block[..len]);
}
// // If length of symlink is lower than 60, data is stored directly in "block address"
// // section of the inode
// if len < 60 {
// let bytes = unsafe { Self::link_from_inode_blocks(&inode, len) };
// write.extend_from_slice(bytes);
// buf[..len].copy_from_slice(bytes);
// } else {
// self.fs
// .with_inode_block(&inode, 0, |block| {
// write.extend_from_slice(&block[..len]);
// buf[..len].copy_from_slice(&block[..len]);
// Ok(())
// })
// .await?;
// }
Ok(len)
// Ok(len)
}
unsafe fn link_from_inode_blocks(inode: &Inode, len: usize) -> &[u8] {
@ -66,7 +72,8 @@ impl SymlinkNode {
impl CommonImpl for SymlinkNode {
fn size(&self, _node: &NodeRef) -> Result<u64, Error> {
let inode = block!(self.inode_cache.get(self.ino).await)??;
let inode = block!(self.inode.get().await)??;
let inode = inode.read();
Ok(inode.size(&self.fs))
}
@ -75,7 +82,8 @@ impl CommonImpl for SymlinkNode {
}
fn metadata(&self, _node: &NodeRef) -> Result<Metadata, Error> {
let inode = block!(self.inode_cache.get(self.ino).await)??;
let inode = block!(self.inode.get().await)??;
let inode = inode.read();
Ok(inode.metadata())
}
}

View File

@ -1,4 +1,4 @@
#![feature(trait_alias, const_trait_impl, effects)]
#![feature(trait_alias)]
#![no_std]
#![allow(clippy::new_without_default)]

View File

@ -34,5 +34,9 @@ elf.workspace = true
uuid = { version = "1.10.0", features = ["bytemuck"], default-features = false }
lru = "0.12.3"
[dev-dependencies]
tokio = { workspace = true, features = ["rt", "macros"] }
libc.workspace = true
[lints]
workspace = true

View File

@ -13,16 +13,19 @@ extern crate alloc;
use core::{
alloc::Layout,
fmt,
marker::PhantomData,
mem::{size_of, MaybeUninit},
ops::{Deref, DerefMut},
slice::SliceIndex,
};
use address::Virtualize;
use kernel_arch::mem::PhysicalMemoryAllocator;
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
table::{MapAttributes, TableAllocator},
};
use phys::GlobalPhysicalAllocator;
use yggdrasil_abi::error::Error;
pub mod address;
@ -67,22 +70,64 @@ pub trait PageProvider: Send + Sync {
) -> Result<PhysicalAddress, Error>;
}
pub struct PageBox<T: ?Sized> {
pub struct PageBox<
T: ?Sized,
A: PhysicalMemoryAllocator<Address = PhysicalAddress> = GlobalPhysicalAllocator,
> {
value: *mut T,
page_count: usize,
_pd: PhantomData<A>,
}
pub struct PageSlice<T> {
data: [T],
}
impl<T> PageBox<T> {
impl<T> PageBox<T, GlobalPhysicalAllocator> {
pub fn new(init: T) -> Result<PageBox<T>, Error> {
PageBox::new_in(init)
}
pub fn new_slice(item: T, count: usize) -> Result<PageBox<[T]>, Error>
where
T: Copy,
{
PageBox::new_slice_in(item, count)
}
pub fn new_slice_with<F: Fn(usize) -> T>(f: F, count: usize) -> Result<PageBox<[T]>, Error> {
PageBox::new_slice_in_with(f, count)
}
pub fn new_uninit() -> Result<PageBox<MaybeUninit<T>>, Error> {
PageBox::new_uninit_in()
}
pub fn new_uninit_slice(count: usize) -> Result<PageBox<[MaybeUninit<T>]>, Error> {
PageBox::new_uninit_slice_in(count)
}
pub fn new_zeroed_slice(count: usize) -> Result<PageBox<[MaybeUninit<T>]>, Error> {
let (base, page_count) = PageBox::<MaybeUninit<T>>::alloc_slice(count, true)?;
let base_virt_ptr = base.virtualize() as *mut MaybeUninit<T>;
let value = core::ptr::slice_from_raw_parts_mut(base_virt_ptr, count);
let result = PageBox {
value,
page_count,
_pd: PhantomData,
};
result.trace_created();
Ok(result)
}
}
impl<T, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> PageBox<T, A> {
#[inline]
fn alloc_slice(count: usize, zeroed: bool) -> Result<(PhysicalAddress, usize), Error> {
// TODO hardcoded page sizes
let layout = Layout::array::<T>(count).unwrap();
let page_count = layout.size().div_ceil(L3_PAGE_SIZE);
let base = phys::alloc_pages_contiguous(page_count)?;
let base = A::allocate_contiguous_pages(page_count)?;
if zeroed {
let ptr = base.virtualize() as *mut u8;
let slice = unsafe { core::slice::from_raw_parts_mut(ptr, page_count * L3_PAGE_SIZE) };
@ -94,27 +139,32 @@ impl<T> PageBox<T> {
#[inline]
fn alloc() -> Result<(PhysicalAddress, usize), Error> {
let page_count = size_of::<T>().div_ceil(L3_PAGE_SIZE);
Ok((phys::alloc_pages_contiguous(page_count)?, page_count))
let phys = A::allocate_contiguous_pages(page_count)?;
Ok((phys, page_count))
}
pub fn new(init: T) -> Result<PageBox<T>, Error> {
let (base, page_count) = Self::alloc()?;
pub fn new_in(init: T) -> Result<PageBox<T, A>, Error> {
let (base, page_count) = PageBox::<T, A>::alloc()?;
let value = base.virtualize() as *mut T;
unsafe {
value.write(init);
}
let result = PageBox { value, page_count };
let result = PageBox {
value,
page_count,
_pd: PhantomData,
};
result.trace_created();
Ok(result)
}
pub fn new_slice(item: T, count: usize) -> Result<PageBox<[T]>, Error>
pub fn new_slice_in(item: T, count: usize) -> Result<PageBox<[T], A>, Error>
where
T: Copy,
{
let (base, page_count) = Self::alloc_slice(count, false)?;
let (base, page_count) = PageBox::<T, A>::alloc_slice(count, false)?;
let base_virt_ptr = base.virtualize() as *mut T;
let value = core::ptr::slice_from_raw_parts_mut(base_virt_ptr, count);
@ -124,13 +174,20 @@ impl<T> PageBox<T> {
}
}
let result = PageBox { value, page_count };
let result = PageBox {
value,
page_count,
_pd: PhantomData,
};
result.trace_created();
Ok(result)
}
pub fn new_slice_with<F: Fn(usize) -> T>(f: F, count: usize) -> Result<PageBox<[T]>, Error> {
let mut value = Self::new_uninit_slice(count)?;
pub fn new_slice_in_with<F: Fn(usize) -> T>(
f: F,
count: usize,
) -> Result<PageBox<[T], A>, Error> {
let mut value = PageBox::<T, A>::new_uninit_slice_in(count)?;
for i in 0..count {
value[i].write(f(i));
@ -139,34 +196,33 @@ impl<T> PageBox<T> {
Ok(unsafe { value.assume_init_slice() })
}
pub fn new_uninit() -> Result<PageBox<MaybeUninit<T>>, Error> {
let (base, page_count) = PageBox::<MaybeUninit<T>>::alloc()?;
pub fn new_uninit_in() -> Result<PageBox<MaybeUninit<T>, A>, Error> {
let (base, page_count) = PageBox::<MaybeUninit<T>, A>::alloc()?;
let value = base.virtualize() as *mut MaybeUninit<T>;
let result = PageBox { value, page_count };
let result = PageBox {
value,
page_count,
_pd: PhantomData,
};
result.trace_created();
Ok(result)
}
pub fn new_uninit_slice(count: usize) -> Result<PageBox<[MaybeUninit<T>]>, Error> {
let (base, page_count) = PageBox::<MaybeUninit<T>>::alloc_slice(count, false)?;
pub fn new_uninit_slice_in(count: usize) -> Result<PageBox<[MaybeUninit<T>], A>, Error> {
let (base, page_count) = PageBox::<MaybeUninit<T>, A>::alloc_slice(count, false)?;
let base_virt_ptr = base.virtualize() as *mut MaybeUninit<T>;
let value = core::ptr::slice_from_raw_parts_mut(base_virt_ptr, count);
let result = PageBox { value, page_count };
result.trace_created();
Ok(result)
}
pub fn new_zeroed_slice(count: usize) -> Result<PageBox<[MaybeUninit<T>]>, Error> {
let (base, page_count) = PageBox::<MaybeUninit<T>>::alloc_slice(count, true)?;
let base_virt_ptr = base.virtualize() as *mut MaybeUninit<T>;
let value = core::ptr::slice_from_raw_parts_mut(base_virt_ptr, count);
let result = PageBox { value, page_count };
let result = PageBox {
value,
page_count,
_pd: PhantomData,
};
result.trace_created();
Ok(result)
}
}
impl<T: ?Sized> PageBox<T> {
impl<T: ?Sized, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> PageBox<T, A> {
#[inline]
pub fn as_ptr(&self) -> *const T {
self.value as _
@ -193,22 +249,24 @@ impl<T: ?Sized> PageBox<T> {
}
}
impl<T> PageBox<[T]> {
pub fn from_iter_exact<I: IntoIterator<Item = T>>(it: I) -> Result<Self, Error>
impl<T> PageBox<T> {
pub fn from_iter_exact<I: IntoIterator<Item = T>>(it: I) -> Result<PageBox<[T]>, Error>
where
I::IntoIter: ExactSizeIterator,
{
let it = it.into_iter();
let mut slice = PageBox::new_uninit_slice(it.len())?;
let mut slice = PageBox::<T>::new_uninit_slice(it.len())?;
for (i, item) in it.enumerate() {
slice[i].write(item);
}
let slice = unsafe { slice.assume_init_slice() };
Ok(slice)
}
}
impl<T, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> PageBox<[T], A> {
pub fn as_slice(&self) -> &PageSlice<T> {
todo!()
unsafe { core::mem::transmute(&self[..]) }
}
pub fn as_slice_mut(&mut self) -> &mut PageSlice<T> {
@ -216,13 +274,13 @@ impl<T> PageBox<[T]> {
}
}
impl<T> PageBox<MaybeUninit<T>> {
impl<T, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> PageBox<MaybeUninit<T>, A> {
/// Consumes the [PageBox], returning a new one with [MaybeUninit] removed.
///
/// # Safety
///
/// See [MaybeUninit::assume_init_mut].
pub unsafe fn assume_init(self) -> PageBox<T> {
pub unsafe fn assume_init(self) -> PageBox<T, A> {
// SAFETY: Memory-safe, as:
// 1. MaybeUninit<T> is transparent
// 2. self.value still points to the same memory and is not deallocated
@ -232,7 +290,11 @@ impl<T> PageBox<MaybeUninit<T>> {
// Prevent deallocation of the PageBox with MaybeUninit
core::mem::forget(self);
PageBox { value, page_count }
PageBox {
value,
page_count,
_pd: PhantomData,
}
}
pub fn as_bytes_mut(p: &mut Self) -> &mut PageSlice<MaybeUninit<u8>> {
@ -240,13 +302,13 @@ impl<T> PageBox<MaybeUninit<T>> {
}
}
impl<T> PageBox<[MaybeUninit<T>]> {
impl<T, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> PageBox<[MaybeUninit<T>], A> {
/// Consumes the [PageBox], returning a new one with [MaybeUninit] removed.
///
/// # Safety
///
/// See [MaybeUninit::slice_assume_init_mut].
pub unsafe fn assume_init_slice(self) -> PageBox<[T]> {
pub unsafe fn assume_init_slice(self) -> PageBox<[T], A> {
// SAFETY: Memory-safe, as:
// 1. MaybeUninit<T> is transparent
// 2. self.value still points to the same memory and is not deallocated
@ -255,7 +317,11 @@ impl<T> PageBox<[MaybeUninit<T>]> {
core::mem::forget(self);
PageBox { value, page_count }
PageBox {
value,
page_count,
_pd: PhantomData,
}
}
/// Returns a reference to the slice data with [MaybeUninit] removed.
@ -289,14 +355,16 @@ impl<T> PageBox<[MaybeUninit<T>]> {
}
}
impl<T: ?Sized> AsPhysicalAddress for PageBox<T> {
impl<T: ?Sized, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> AsPhysicalAddress
for PageBox<T, A>
{
#[inline]
unsafe fn as_physical_address(&self) -> PhysicalAddress {
PhysicalAddress::from_virtualized(self.value.addr())
}
}
impl<T: ?Sized> Deref for PageBox<T> {
impl<T: ?Sized, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> Deref for PageBox<T, A> {
type Target = T;
#[inline(always)]
@ -305,14 +373,14 @@ impl<T: ?Sized> Deref for PageBox<T> {
}
}
impl<T: ?Sized> DerefMut for PageBox<T> {
impl<T: ?Sized, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> DerefMut for PageBox<T, A> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.value }
}
}
impl<T: ?Sized> Drop for PageBox<T> {
impl<T: ?Sized, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> Drop for PageBox<T, A> {
fn drop(&mut self) {
self.trace_dropped();
unsafe {
@ -323,35 +391,47 @@ impl<T: ?Sized> Drop for PageBox<T> {
for i in 0..self.page_count {
// SAFETY: Safe, page allocated only by this PageBox
unsafe {
phys::free_page(base.add(L3_PAGE_SIZE * i));
A::free_page(base.add(L3_PAGE_SIZE * i));
}
}
}
}
impl<T: ?Sized> fmt::Pointer for PageBox<T> {
impl<T: ?Sized, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> fmt::Pointer
for PageBox<T, A>
{
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.value.fmt(f)
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for PageBox<T> {
impl<T: ?Sized + fmt::Debug, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> fmt::Debug
for PageBox<T, A>
{
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(self.deref(), f)
}
}
impl<T: ?Sized + fmt::Display> fmt::Display for PageBox<T> {
impl<T: ?Sized + fmt::Display, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> fmt::Display
for PageBox<T, A>
{
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self.deref(), f)
}
}
unsafe impl<T: ?Sized + Send> Send for PageBox<T> {}
unsafe impl<T: ?Sized + Sync> Sync for PageBox<T> {}
unsafe impl<T: ?Sized + Send, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> Send
for PageBox<T, A>
{
}
unsafe impl<T: ?Sized + Sync, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> Sync
for PageBox<T, A>
{
}
impl<T> PageSlice<T> {
pub fn subslice_mut<R: SliceIndex<[T], Output = [T]>>(

View File

@ -22,7 +22,8 @@
slice_split_once,
arbitrary_self_types_pointers,
result_flattening,
negative_impls
negative_impls,
decl_macro
)]
extern crate alloc;

View File

@ -1,12 +1,22 @@
use core::task::{Context, Poll, Waker};
use core::{
pin::Pin,
task::{Context, Poll, Waker},
};
use alloc::{boxed::Box, format, sync::Arc};
use futures_util::{task::waker_ref, Future};
use alloc::{
boxed::Box,
format,
sync::{Arc, Weak},
};
use futures_util::{pin_mut, task::waker_ref, Future};
use kernel_arch::task::TaskContext;
use libk_util::waker::WakeWeak;
use yggdrasil_abi::error::Error;
use crate::task::{thread::Thread, TaskContextImpl};
use crate::task::{
thread::{CurrentThread, Thread},
TaskContextImpl,
};
use super::{
task::{Task, Termination},
@ -56,12 +66,15 @@ pub fn run_to_completion<'a, T, F: Future<Output = T> + 'a>(future: F) -> Result
let mut future = Box::pin(future);
// Make a weak ref for the waker
let weak = Thread::current().downgrade();
let (entered, weak) = {
let current = Thread::current();
(current.id, current.downgrade())
};
let waker = WakeWeak::weak_waker(weak.clone());
let waker = unsafe { Waker::from_raw(waker) };
loop {
let result = loop {
let context = &mut Context::from_waker(&waker);
match future.as_mut().poll(context) {
@ -69,10 +82,18 @@ pub fn run_to_completion<'a, T, F: Future<Output = T> + 'a>(future: F) -> Result
Poll::Pending => {
if let Some(thread) = unsafe { Thread::upgrade(&weak) } {
if let Err(error) = thread.suspend() {
log::warn!("Future cancelled: {error:?}");
break Err(error);
}
}
}
}
}
};
drop(future);
let current = Thread::current().id;
assert_eq!(current, entered);
result
}

View File

@ -85,10 +85,11 @@ impl<T> AsyncMutex<T> {
}
}
unsafe impl<T: Send> Send for AsyncMutex<T> {}
unsafe impl<T: Send> Sync for AsyncMutex<T> {}
unsafe impl<T: Sync> Send for AsyncMutex<T> {}
unsafe impl<T: Sync> Sync for AsyncMutex<T> {}
impl<T> !Send for AsyncMutexGuard<'_, T> {}
// impl<T> !Send for AsyncMutexGuard<'_, T> {}
unsafe impl<T: Sync> Send for AsyncMutexGuard<'_, T> {}
unsafe impl<T: Sync> Sync for AsyncMutexGuard<'_, T> {}
impl<'a, T> AsyncMutexGuard<'a, T> {

View File

@ -1,8 +1,17 @@
#![allow(clippy::missing_transmute_annotations)]
use core::ops::{Deref, DerefMut};
use core::{
future::Future,
marker::PhantomData,
ops::{Deref, DerefMut},
};
use alloc::sync::Arc;
use libk_mm::PageBox;
use kernel_arch::mem::PhysicalMemoryAllocator;
use libk_mm::{
address::{AsPhysicalAddress, PhysicalAddress},
phys::GlobalPhysicalAllocator,
PageBox,
};
use libk_util::{
lru_hash_table::LruCache,
sync::spin_rwlock::{IrqSafeRwLock, IrqSafeRwLockReadGuard, IrqSafeRwLockWriteGuard},
@ -13,25 +22,140 @@ use crate::task::sync::{AsyncMutex, MappedAsyncMutexGuard};
use super::BlockDevice;
pub struct CachedBlock {
data: PageBox<[u8]>,
pub struct CachedBlock<
A: PhysicalMemoryAllocator<Address = PhysicalAddress> = GlobalPhysicalAllocator,
> {
data: PageBox<[u8], A>,
dirty: bool,
}
pub struct CachedBlockRef {
_entry: Arc<IrqSafeRwLock<CachedBlock>>,
lock: IrqSafeRwLockReadGuard<'static, CachedBlock>,
}
pub struct CachedBlockMut {
_entry: Arc<IrqSafeRwLock<CachedBlock>>,
lock: IrqSafeRwLockWriteGuard<'static, CachedBlock>,
}
pub struct BlockCache {
pub struct UncachedCache<
A: PhysicalMemoryAllocator<Address = PhysicalAddress> = GlobalPhysicalAllocator,
> {
device: &'static dyn BlockDevice,
block_size: usize,
cache: AsyncMutex<LruCache<u64, Arc<IrqSafeRwLock<CachedBlock>>>>,
_pd: PhantomData<A>,
}
pub enum DeviceMapper<
A: PhysicalMemoryAllocator<Address = PhysicalAddress> = GlobalPhysicalAllocator,
> {
Uncached(UncachedCache<A>),
Cached(BlockCache<A>),
}
pub struct BlockCache<
A: PhysicalMemoryAllocator<Address = PhysicalAddress> = GlobalPhysicalAllocator,
> {
device: &'static dyn BlockDevice,
block_size: usize,
cache: AsyncMutex<LruCache<u64, Arc<IrqSafeRwLock<CachedBlock<A>>>>>,
}
impl DeviceMapper {
pub fn cached_with_capacity(
device: &'static dyn BlockDevice,
block_size: usize,
bucket_capacity: usize,
) -> DeviceMapper {
DeviceMapper::cached_with_capacity_in(device, block_size, bucket_capacity)
}
pub fn uncached(device: &'static dyn BlockDevice, block_size: usize) -> DeviceMapper {
DeviceMapper::uncached_in(device, block_size)
}
}
impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> DeviceMapper<A> {
pub fn cached_with_capacity_in(
device: &'static dyn BlockDevice,
block_size: usize,
bucket_capacity: usize,
) -> DeviceMapper<A> {
let cache = BlockCache::<A>::with_capacity_in(device, block_size, bucket_capacity);
DeviceMapper::<A>::Cached(cache)
}
pub fn uncached_in(device: &'static dyn BlockDevice, block_size: usize) -> DeviceMapper<A> {
if block_size % device.block_size() != 0 {
panic!("Cache block size is not multiple of device block size");
}
let uncache = UncachedCache::<A> {
device,
block_size,
_pd: PhantomData,
};
Self::Uncached(uncache)
}
pub fn device(&self) -> &'static dyn BlockDevice {
match self {
Self::Uncached(uncache) => uncache.device(),
Self::Cached(cache) => cache.device(),
}
}
pub async fn try_with<T, F: FnOnce(&[u8]) -> Result<T, Error>>(
&self,
pos: u64,
mapper: F,
) -> Result<T, Error> {
match self {
Self::Uncached(uncache) => uncache.try_with(pos, mapper).await,
Self::Cached(cache) => cache.try_with(pos, mapper).await,
}
}
pub async fn try_with_mut<T, F: FnOnce(&mut [u8]) -> Result<T, Error>>(
&self,
pos: u64,
mapper: F,
) -> Result<T, Error> {
match self {
Self::Uncached(uncache) => uncache.try_with_mut(pos, mapper).await,
Self::Cached(cache) => cache.try_with_mut(pos, mapper).await,
}
}
pub async fn flush(&self) -> Result<(), Error> {
match self {
Self::Uncached(_) => Ok(()),
Self::Cached(cache) => {
cache.flush().await;
Ok(())
}
}
}
}
impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> UncachedCache<A> {
pub fn device(&self) -> &'static dyn BlockDevice {
self.device
}
pub async fn try_with<T, F: FnOnce(&[u8]) -> Result<T, Error>>(
&self,
pos: u64,
mapper: F,
) -> Result<T, Error> {
let mut data = PageBox::<_, A>::new_uninit_slice_in(self.block_size)?;
self.device.read_aligned(pos, data.as_slice_mut()).await?;
let result = mapper(unsafe { data.assume_init_slice_ref() })?;
Ok(result)
}
pub async fn try_with_mut<T, F: FnOnce(&mut [u8]) -> Result<T, Error>>(
&self,
pos: u64,
mapper: F,
) -> Result<T, Error> {
let mut data = PageBox::<_, A>::new_uninit_slice_in(self.block_size)?;
self.device.read_aligned(pos, data.as_slice_mut()).await?;
let mut data = unsafe { data.assume_init_slice() };
let result = mapper(&mut data[..])?;
self.device.write_aligned(pos, data.as_slice()).await?;
Ok(result)
}
}
impl BlockCache {
@ -39,36 +163,43 @@ impl BlockCache {
device: &'static dyn BlockDevice,
block_size: usize,
bucket_capacity: usize,
) -> Self {
) -> BlockCache {
BlockCache::with_capacity_in(device, block_size, bucket_capacity)
}
}
impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> BlockCache<A> {
pub fn with_capacity_in(
device: &'static dyn BlockDevice,
block_size: usize,
bucket_capacity: usize,
) -> BlockCache<A> {
if block_size % device.block_size() != 0 {
panic!("Cache block size is not multiple of device block size");
}
Self {
BlockCache {
device,
block_size,
cache: AsyncMutex::new(LruCache::with_capacity(bucket_capacity, 8)),
}
}
async fn evict_block(&self, address: u64, block: Arc<IrqSafeRwLock<CachedBlock>>) {
pub fn device(&self) -> &'static dyn BlockDevice {
self.device
}
async fn evict_block(&self, pos: u64, block: Arc<IrqSafeRwLock<CachedBlock<A>>>) {
let read = block.read();
if read.dirty {
log::info!("Evict block {}", address);
if let Err(err) = self
.device
.write_aligned(address, read.data.as_slice())
.await
{
log::error!("Disk error: flushing block {}: {:?}", address, err);
if let Err(err) = self.device.write_aligned(pos, read.data.as_slice()).await {
log::error!("Disk error: flushing block {}: {:?}", pos, err);
}
}
}
async fn fetch_block(&self, address: u64) -> Result<Arc<IrqSafeRwLock<CachedBlock>>, Error> {
let mut data = PageBox::new_uninit_slice(self.block_size)?;
self.device
.read_aligned(address, data.as_slice_mut())
.await?;
async fn fetch_block(&self, pos: u64) -> Result<Arc<IrqSafeRwLock<CachedBlock<A>>>, Error> {
let mut data = PageBox::new_uninit_slice_in(self.block_size)?;
self.device.read_aligned(pos, data.as_slice_mut()).await?;
let data = unsafe { data.assume_init_slice() };
Ok(Arc::new(IrqSafeRwLock::new(CachedBlock {
data,
@ -76,122 +207,396 @@ impl BlockCache {
})))
}
#[allow(clippy::needless_lifetimes)]
async fn entry<'a>(
&'a self,
address: u64,
) -> Result<
MappedAsyncMutexGuard<
'a,
Arc<IrqSafeRwLock<CachedBlock>>,
LruCache<u64, Arc<IrqSafeRwLock<CachedBlock>>>,
>,
Error,
> {
debug_assert_eq!(address % self.block_size as u64, 0);
async fn entry(&self, pos: u64) -> Result<Arc<IrqSafeRwLock<CachedBlock<A>>>, Error> {
let mut lock = self.cache.lock().await;
let (value, evicted) = lock
.try_get_or_insert_with_async(pos, || self.fetch_block(pos))
.await?;
self.cache
.lock()
.await
.try_map_guard_async::<_, Error, _>(|cache: &'a mut LruCache<_, _>| async move {
let (value, evicted) = cache
.try_get_or_insert_with_async(address, || self.fetch_block(address))
.await?;
if let Some((pos, block)) = evicted {
self.evict_block(pos, block).await;
}
if let Some((address, block)) = evicted {
self.evict_block(address, block).await;
}
Ok(value)
})
.await
Ok(value.clone())
}
pub async fn get(&self, address: u64) -> Result<CachedBlockRef, Error> {
self.entry(address)
.await
.map(|e| CachedBlockRef::new(e.deref()))
pub async fn try_with<T, F: FnOnce(&[u8]) -> Result<T, Error>>(
&self,
pos: u64,
mapper: F,
) -> Result<T, Error> {
let block = self.entry(pos).await?;
let result = mapper(&block.read()[..])?;
Ok(result)
}
pub async fn get_mut(&self, address: u64) -> Result<CachedBlockMut, Error> {
self.entry(address)
.await
.map(|mut e| CachedBlockMut::new(e.deref_mut()))
pub async fn try_with_mut<T, F: FnOnce(&mut [u8]) -> Result<T, Error>>(
&self,
pos: u64,
mapper: F,
) -> Result<T, Error> {
let block = self.entry(pos).await?;
let mut block = block.write();
let result = mapper(&mut block[..])?;
block.dirty = true;
Ok(result)
}
pub async fn flush(&self) {
for (address, block) in self.cache.lock().await.flush() {
self.evict_block(address, block).await;
for (pos, block) in self.cache.lock().await.flush() {
self.evict_block(pos, block).await;
}
}
}
impl CachedBlock {
impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> CachedBlock<A> {
pub fn set_dirty(&mut self) {
self.dirty = true;
}
}
impl CachedBlockRef {
pub fn new(entry: &Arc<IrqSafeRwLock<CachedBlock>>) -> Self {
let entry = entry.clone();
// Safety: ok, Arc instance is still held
let lock = unsafe { core::mem::transmute(entry.read()) };
Self {
lock,
_entry: entry,
}
}
}
impl CachedBlockMut {
pub fn new(entry: &Arc<IrqSafeRwLock<CachedBlock>>) -> Self {
let entry = entry.clone();
// Safety: ok, Arc instance is still held
let lock = unsafe { core::mem::transmute(entry.write()) };
Self {
lock,
_entry: entry,
}
}
pub fn set_dirty(&mut self) {
self.lock.set_dirty();
}
}
impl Deref for CachedBlockRef {
type Target = PageBox<[u8]>;
fn deref(&self) -> &Self::Target {
self.lock.deref()
}
}
impl Deref for CachedBlockMut {
type Target = PageBox<[u8]>;
fn deref(&self) -> &Self::Target {
self.lock.deref()
}
}
impl DerefMut for CachedBlockMut {
fn deref_mut(&mut self) -> &mut Self::Target {
self.lock.deref_mut()
}
}
impl Deref for CachedBlock {
type Target = PageBox<[u8]>;
impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> Deref for CachedBlock<A> {
type Target = PageBox<[u8], A>;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl DerefMut for CachedBlock {
impl<A: PhysicalMemoryAllocator<Address = PhysicalAddress>> DerefMut for CachedBlock<A> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.dirty = true;
&mut self.data
}
}
#[cfg(test)]
mod tests {
use core::{
ffi::c_void,
mem::MaybeUninit,
ptr::null_mut,
sync::atomic::{AtomicBool, Ordering},
};
use std::{io, sync::Mutex};
use async_trait::async_trait;
use kernel_arch::mem::PhysicalMemoryAllocator;
use libk_mm::{address::PhysicalAddress, PageBox, PageSlice};
use yggdrasil_abi::error::Error;
use crate::vfs::block::{BlockDevice, NgBlockDevice, NgBlockDeviceWrapper};
use super::BlockCache;
struct DummyBlock {
block_size: usize,
block_count: usize,
deny_writes: AtomicBool,
data: Mutex<Vec<u8>>,
}
struct PA;
impl DummyBlock {
pub fn new(block_size: usize, block_count: usize) -> Self {
let mut data = vec![0; block_size * block_count];
for i in 0..block_count {
let block = &mut data[block_size * i..block_size * (i + 1)];
block.fill(i as u8);
}
Self {
data: Mutex::new(data),
deny_writes: AtomicBool::new(false),
block_size,
block_count,
}
}
}
#[async_trait::async_trait]
impl NgBlockDevice for DummyBlock {
type Error = Error;
async fn read(
&self,
lba: u64,
buffer: &mut PageSlice<MaybeUninit<u8>>,
) -> Result<(), Error> {
let start = lba as usize * self.block_size;
let end = start + buffer.len();
if end > self.block_count * self.block_size {
return Err(Error::InvalidArgument);
}
let data = self.data.lock().unwrap();
let buffer = unsafe { MaybeUninit::slice_assume_init_mut(&mut buffer[..]) };
buffer.copy_from_slice(&data[start..end]);
Ok(())
}
async fn write(&self, lba: u64, buffer: &PageSlice<u8>) -> Result<(), Error> {
if self.deny_writes.load(Ordering::Acquire) {
panic!("write() with deny_writes = true");
}
let start = lba as usize * self.block_size;
let end = start + buffer.len();
if end > self.block_count * self.block_size {
return Err(Error::InvalidArgument);
}
let mut data = self.data.lock().unwrap();
data[start..end].copy_from_slice(&buffer[..]);
Ok(())
}
fn block_size(&self) -> usize {
self.block_size
}
fn block_count(&self) -> usize {
self.block_count
}
}
impl PhysicalMemoryAllocator for PA {
type Address = PhysicalAddress;
unsafe fn free_page(page: Self::Address) {
let base = page.try_into_usize().unwrap();
let base = core::ptr::with_exposed_provenance_mut::<c_void>(base);
if unsafe { libc::munmap(base, 0x1000) } != 0 {
let err = io::Error::last_os_error();
panic!("free_page: munmap returned {err}");
}
}
fn allocate_page() -> Result<Self::Address, Error> {
Self::allocate_contiguous_pages(1)
}
fn allocate_contiguous_pages(count: usize) -> Result<Self::Address, Error> {
let base = unsafe {
libc::mmap(
null_mut(),
count * 0x1000,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_ANON | libc::MAP_PRIVATE,
-1,
0,
)
};
if base != libc::MAP_FAILED {
let base = base.addr();
Ok(PhysicalAddress::from_usize(base))
} else {
Err(Error::OutOfMemory)
}
}
}
const BS: usize = 1024;
// The test must not crash with denied writes
#[tokio::test]
async fn test_no_modification() {
let device = Box::leak(Box::new(DummyBlock::new(BS, 1024)));
let wrapper = NgBlockDeviceWrapper::new(device);
let cache = BlockCache::<PA>::with_capacity_in(wrapper, BS, 64);
device.deny_writes.store(true, Ordering::Release);
cache
.try_with(1 * BS as u64, |block| {
assert!(block.iter().all(|x| *x == 1));
Ok(())
})
.await
.unwrap();
cache
.try_with(2 * BS as u64, |block| {
assert!(block.iter().all(|x| *x == 2));
Ok(())
})
.await
.unwrap();
cache.flush().await;
}
#[tokio::test]
async fn test_partial_modification() {
let device = Box::leak(Box::new(DummyBlock::new(BS, 1024)));
let wrapper = NgBlockDeviceWrapper::new(device);
// 8 * 8
let cache = BlockCache::<PA>::with_capacity_in(wrapper, BS, 8);
const LBA: u64 = 1;
cache
.try_with_mut(LBA * BS as u64, |block, _| {
block[0..16].fill(0x12);
Ok(())
})
.await
.unwrap();
cache.flush().await;
{
let mut buffer = PageBox::<_, PA>::new_uninit_slice_in(BS).unwrap();
device.read(LBA, buffer.as_slice_mut()).await.unwrap();
let buffer = unsafe { buffer.assume_init_slice() };
buffer[0..16].iter().for_each(|&x| {
assert_eq!(x, 0x12);
});
buffer[16..].iter().for_each(|&x| {
assert_eq!(x, LBA as u8);
});
}
cache
.try_with_mut(LBA * BS as u64, |block, _| {
block[16..32].fill(0x23);
Ok(())
})
.await
.unwrap();
cache
.try_with_mut(LBA * BS as u64, |block, _| {
block[48..64].fill(0x34);
Ok(())
})
.await
.unwrap();
cache
.try_with_mut(LBA * BS as u64, |block, _| {
block[128..256].fill(0xF1);
Ok(())
})
.await
.unwrap();
cache.flush().await;
{
let mut buffer = PageBox::<_, PA>::new_uninit_slice_in(BS).unwrap();
device.read(LBA, buffer.as_slice_mut()).await.unwrap();
let buffer = unsafe { buffer.assume_init_slice() };
buffer[0..16].iter().for_each(|&x| {
assert_eq!(x, 0x12);
});
buffer[16..32].iter().for_each(|&x| {
assert_eq!(x, 0x23);
});
buffer[48..64].iter().for_each(|&x| {
assert_eq!(x, 0x34);
});
buffer[128..256].iter().for_each(|&x| {
assert_eq!(x, 0xF1);
});
buffer[32..48].iter().for_each(|&x| {
assert_eq!(x, LBA as u8);
});
buffer[64..128].iter().for_each(|&x| {
assert_eq!(x, LBA as u8);
});
buffer[256..].iter().for_each(|&x| {
assert_eq!(x, LBA as u8);
});
}
}
#[tokio::test]
async fn test_implicit_eviction() {
let device = Box::leak(Box::new(DummyBlock::new(BS, 1024)));
let wrapper = NgBlockDeviceWrapper::new(device);
// 8 * 8
let cache = BlockCache::<PA>::with_capacity_in(wrapper, BS, 8);
fn mapper(x: u64) -> u8 {
(x + 3) as u8
}
// Go through all blocks, fill those with some values
for i in 0..1024 {
cache
.try_with_mut(i * BS as u64, |block, _| {
block.fill(mapper(i));
Ok(())
})
.await
.unwrap();
}
cache.flush().await;
for i in 0..1024 {
let mut buffer = PageBox::<_, PA>::new_uninit_slice_in(BS).unwrap();
device.read(i, buffer.as_slice_mut()).await.unwrap();
let buffer = unsafe { buffer.assume_init_slice() };
assert!(buffer.iter().all(|x| *x == mapper(i)));
}
for i in 0..1023 {
cache
.try_with_mut(i * BS as u64, |block, _| {
block.fill(0x12);
Ok(())
})
.await
.unwrap();
cache
.try_with_mut((i + 1) * BS as u64, |block, _| {
block.fill(0x23);
Ok(())
})
.await
.unwrap();
}
for i in 0..1023 {
cache
.try_with_mut(i * BS as u64, |block, _| {
block.iter_mut().for_each(|x| *x += 1);
Ok(())
})
.await
.unwrap();
cache
.try_with_mut((i + 1) * BS as u64, |block, _| {
block.iter_mut().for_each(|x| *x += 2);
Ok(())
})
.await
.unwrap();
}
cache.flush().await;
{
let mut buffer = PageBox::<_, PA>::new_uninit_slice_in(BS).unwrap();
device.read(0, buffer.as_slice_mut()).await.unwrap();
let buffer = unsafe { buffer.assume_init_slice() };
buffer.iter().for_each(|&x| {
assert_eq!(x, 0x13, "block 0 mismatch");
});
}
for i in 1..1023 {
let mut buffer = PageBox::<_, PA>::new_uninit_slice_in(BS).unwrap();
device.read(i, buffer.as_slice_mut()).await.unwrap();
let buffer = unsafe { buffer.assume_init_slice() };
buffer.iter().for_each(|&x| {
assert_eq!(x, 0x15, "block {i} mismatch");
});
}
{
let mut buffer = PageBox::<_, PA>::new_uninit_slice_in(BS).unwrap();
device.read(1023, buffer.as_slice_mut()).await.unwrap();
let buffer = unsafe { buffer.assume_init_slice() };
buffer.iter().for_each(|&x| {
assert_eq!(x, 0x25, "block 1023 mismatch");
});
}
}
}

View File

@ -152,8 +152,41 @@ impl<'a, D: NgBlockDevice + 'a> BlockDevice for NgBlockDeviceWrapper<'a, D> {
Ok(len)
}
async fn write(&self, pos: u64, buf: &[u8]) -> Result<usize, Error> {
todo!()
async fn write(&self, mut pos: u64, mut buf: &[u8]) -> Result<usize, Error> {
let len = buf.len();
let mut remaining = buf.len();
while remaining != 0 {
let block_offset = pos as usize % self.block_size;
let lba = pos / self.block_size as u64;
let amount = core::cmp::min(self.block_size - block_offset, buf.len());
let mut block = PageBox::new_uninit_slice(self.block_size)?;
if amount != self.block_size {
// Need to read the block first -- it's modified partially
self.device
.read(lba, block.as_slice_mut())
.await
.map_err(Self::handle_drive_error)?;
}
let mut block = unsafe { block.assume_init_slice() };
block[block_offset..block_offset + amount].copy_from_slice(&buf[..amount]);
// Write the block back
self.device
.write(lba, block.as_slice())
.await
.map_err(Self::handle_drive_error)?;
buf = &buf[amount..];
remaining -= amount;
pos += amount as u64;
}
Ok(len)
}
fn size(&self) -> Result<u64, Error> {

View File

@ -89,8 +89,7 @@ impl<'a, D: NgBlockDevice + 'a> BlockDevice for Partition<'a, D> {
|| start + buf.len() as u64 >= self.end_byte()
|| buf.len() % self.device.block_size != 0
{
// TODO fallback to unaligned read
todo!()
return Err(Error::InvalidArgument);
}
self.device.read_aligned(start, buf).await
@ -102,8 +101,7 @@ impl<'a, D: NgBlockDevice + 'a> BlockDevice for Partition<'a, D> {
|| start + buf.len() as u64 >= self.end_byte()
|| buf.len() % self.device.block_size != 0
{
// TODO fallback to unaligned write
todo!()
return Err(Error::InvalidArgument);
}
self.device.write_aligned(start, buf).await

View File

@ -36,9 +36,15 @@ impl BlockFile {
Ok(count)
}
pub fn write(&self, _buf: &[u8]) -> Result<usize, Error> {
log::info!("BlockFile::write");
todo!()
async fn write_async(&self, buf: &[u8]) -> Result<usize, Error> {
let mut position = self.position.lock();
let count = self.device.0.write(*position, buf).await?;
*position += count as u64;
Ok(count)
}
pub fn write(&self, buf: &[u8]) -> Result<usize, Error> {
block!(self.write_async(buf).await)?
// let mut position = self.position.lock();
// let count = self.device.0.write(*position, buf)?;
// *position += count as u64;

View File

@ -0,0 +1,46 @@
use alloc::{boxed::Box, vec::Vec};
use async_trait::async_trait;
use libk_util::sync::spin_rwlock::IrqSafeRwLock;
use yggdrasil_abi::error::Error;
use super::NodeRef;
#[async_trait]
pub trait Filesystem: Sync {
async fn flush(&self) -> Result<(), Error> {
Ok(())
}
fn display_name(&self) -> &'static str;
}
static FILESYSTEM_ROOTS: IrqSafeRwLock<Vec<NodeRef>> = IrqSafeRwLock::new(Vec::new());
pub enum FilesystemMountOption<'a> {
// Common options
Sync,
// Filesystem-specific options
FsSpecific(&'a str),
}
pub fn parse_mount_options(options: Option<&str>) -> impl Iterator<Item = FilesystemMountOption> {
options
.unwrap_or("")
.trim()
.split(" ")
.map(|word| match word {
"sync" => FilesystemMountOption::Sync,
_ => FilesystemMountOption::FsSpecific(word),
})
}
pub fn register_root(root: NodeRef) {
if let Some(fs) = root.filesystem() {
log::info!("Register filesystem root: {}", fs.display_name());
FILESYSTEM_ROOTS.write().push(root);
}
}
pub fn roots() -> &'static IrqSafeRwLock<Vec<NodeRef>> {
&FILESYSTEM_ROOTS
}

View File

@ -277,7 +277,7 @@ impl IoContext {
return Err(Error::DoesNotExist);
}
let parent = self.find(at, parent, false, false)?;
let parent = self.find(at, parent, false, true)?;
let access = self.check_access(Action::Write, &parent)?;
if directory {

View File

@ -9,6 +9,7 @@ pub(crate) mod asyncio;
pub(crate) mod channel;
pub(crate) mod device;
pub(crate) mod file;
pub(crate) mod filesystem;
pub(crate) mod ioctx;
pub(crate) mod node;
pub(crate) mod path;
@ -23,6 +24,10 @@ pub(crate) mod traits;
pub use channel::{Channel, ChannelDescriptor, Message, MessagePayload, Subscription};
pub use device::CharDevice;
pub use file::{DirectoryOpenPosition, File, FileRef, FileSet, InstanceData};
pub use filesystem::{
parse_mount_options, register_root, roots as filesystem_roots, Filesystem,
FilesystemMountOption,
};
pub use ioctx::{Action, IoContext};
pub use node::{
impls, AccessToken, CommonImpl, CreateInfo, DirectoryImpl, Metadata, Node, NodeFlags, NodeRef,

View File

@ -30,6 +30,8 @@ use crate::vfs::{
PseudoTerminalMaster, PseudoTerminalSlave,
};
use super::Filesystem;
/// Wrapper type for a [Node] shared reference
pub type NodeRef = Arc<Node>;
@ -209,6 +211,11 @@ impl Node {
)
}
/// Returns a [Filesystem] this node belongs to.
pub fn filesystem(&self) -> Option<&dyn Filesystem> {
self.data_as_common().filesystem()
}
/// Returns the impl data of the node as `dyn Any`
pub fn data_as_any(&self) -> &dyn Any {
match &self.data {

View File

@ -6,7 +6,10 @@ use yggdrasil_abi::{
io::{DirectoryEntry, FileType, OpenOptions},
};
use crate::vfs::file::{DirectoryOpenPosition, InstanceData};
use crate::vfs::{
file::{DirectoryOpenPosition, InstanceData},
Filesystem,
};
use super::{Metadata, NodeRef};
@ -18,6 +21,11 @@ pub trait CommonImpl: Send + Sync {
unimplemented!();
}
/// Returns a [Filesystem] this node belongs to.
fn filesystem(&self) -> Option<&dyn Filesystem> {
None
}
/// Fetches the metadata of the file from underlying storage
fn metadata(&self, node: &NodeRef) -> Result<Metadata, Error> {
Err(Error::NotImplemented)

View File

@ -24,9 +24,16 @@ fn syscall_inner(frame: &mut SyscallFrame) {
}
}
let code = frame.rax;
if code == usize::from(SyscallFunction::CreateDirectory) {
log::debug!("frame in = {frame:#x?}");
}
let result = raw_syscall_handler(frame.rax, &frame.args);
frame.rax = result as _;
if code == usize::from(SyscallFunction::CreateDirectory) {
log::debug!("frame out = {frame:#x?}");
}
}
extern "C" fn __x86_64_syscall_handler(frame: *mut SyscallFrame) {

View File

@ -111,6 +111,9 @@ impl log::Log for DebugSinkWrapper {
"raw" => {
write!(writer, "{prefix}{args}{suffix}").ok();
}
"io" => {
writeln!(writer, "[io] {args}").ok();
}
_ => {
writeln!(writer, "{prefix}{file}:{line}: {args}{suffix}").ok();
}

View File

@ -7,7 +7,7 @@ use ext2::Ext2Fs;
use kernel_fs::devfs;
use libk::{
block, random,
vfs::{impls::read_fn_node, IoContext, NodeRef},
vfs::{self, impls::read_fn_node, register_root, IoContext, NodeRef},
};
use libk_mm::{
address::{PhysicalAddress, Virtualize},
@ -67,16 +67,21 @@ pub fn create_filesystem(ioctx: &mut IoContext, options: &MountOptions) -> Resul
ioctx.find(None, path, true, true)
});
match fs_name {
"devfs" => Ok(devfs::root().clone()),
"sysfs" => Ok(sysfs::root().clone()),
let options = vfs::parse_mount_options(options.options);
let root = match fs_name {
"devfs" => devfs::root().clone(),
"sysfs" => sysfs::root().clone(),
"ext2" if let Some(source) = source => {
let source = source?;
let device = source.as_block_device()?;
block!(Ext2Fs::create(device).await)?
block!(Ext2Fs::create(device, options).await)??
}
_ => Err(Error::InvalidArgument),
}
_ => return Err(Error::InvalidArgument),
};
register_root(root.clone());
Ok(root)
}
/// Adds "pseudo"-devices to the filesystem (i.e. /dev/random)

View File

@ -3,9 +3,10 @@ use core::{mem::MaybeUninit, time::Duration};
use abi::{
error::Error,
io::{
ChannelPublisherId, DeviceRequest, DirectoryEntry, FileAttr, FileMetadataUpdate, FileMode,
MessageDestination, OpenOptions, PipeOptions, PollControl, RawFd, ReceivedMessageMetadata,
SeekFrom, SentMessage, TerminalOptions, TerminalSize, TimerOptions,
ChannelPublisherId, DeviceRequest, DirectoryEntry, FileAttr, FileControl,
FileMetadataUpdate, FileMode, FilesystemControl, MessageDestination, OpenOptions,
PipeOptions, PollControl, RawFd, ReceivedMessageMetadata, SeekFrom, SentMessage,
TerminalOptions, TerminalSize, TimerOptions,
},
process::{ProcessWait, WaitFlags},
};
@ -13,7 +14,7 @@ use alloc::boxed::Box;
use libk::{
block,
task::thread::Thread,
vfs::{self, File, MessagePayload, Read, Seek, Write},
vfs::{self, File, Filesystem, MessagePayload, Read, Seek, Write},
};
use crate::syscall::{run_with_io, run_with_io_at};
@ -432,3 +433,43 @@ pub(crate) fn receive_message(
}
})
}
pub(crate) fn file_control(fd: RawFd, control: &mut FileControl) -> Result<(), Error> {
todo!()
}
pub(crate) fn filesystem_control(
fd: Option<RawFd>,
control: &mut FilesystemControl,
) -> Result<(), Error> {
match (fd, control) {
// Flush all caches
(None, FilesystemControl::FlushCache) => {
block! {
let roots = vfs::filesystem_roots();
for root in roots.read().iter() {
if let Some(fs) = root.filesystem() {
if let Err(error) = fs.flush().await {
log::error!("{:?} flush: {error:?}", fs.display_name());
}
}
}
}?;
Ok(())
}
// Specific filesystem action
(Some(fd), FilesystemControl::FlushCache) => {
let thread = Thread::current();
let process = thread.process();
run_with_io(&process, |io| {
let file = io.files.file(fd)?;
let node = file.node().ok_or(Error::InvalidOperation)?;
if let Some(fs) = node.filesystem() {
block!(fs.flush().await)??;
}
Ok(())
})
}
}
}

View File

@ -74,7 +74,6 @@ pub fn raw_syscall_handler(func: usize, args: &[usize]) -> u64 {
let Ok(func) = SyscallFunction::try_from(func) else {
todo!("Undefined syscall: {}", func);
};
let args = unsafe { core::mem::transmute(args) };
let result = generated::handle_syscall(func, args);

View File

@ -190,6 +190,8 @@ struct MountOptions<'a> {
pub source: Option<&'a str>,
/// Filesystem to use when mounting. Empty means "deduce automatically" and source must be set.
pub filesystem: Option<&'a str>,
/// Extra filesystem options.
pub options: Option<&'a str>,
/// Path to a directory to mount the filesystem to
pub target: &'a str,
}

View File

@ -29,6 +29,8 @@ extern {
type ReceivedMessageMetadata = yggdrasil_abi::io::ReceivedMessageMetadata;
type DeviceRequest = yggdrasil_abi::io::DeviceRequest;
type FileMetadataUpdate = yggdrasil_abi::io::FileMetadataUpdate;
type FileControl = yggdrasil_abi::io::FileControl;
type FilesystemControl = yggdrasil_abi::io::FilesystemControl;
type DebugOperation = yggdrasil_abi::debug::DebugOperation;
type DebugFrame = yggdrasil_abi::debug::DebugFrame;
@ -67,6 +69,8 @@ syscall get_system_info(info: &mut SystemInfo) -> Result<()>;
syscall mount(opts: &MountOptions<'_>) -> Result<()>;
syscall unmount(opts: &UnmountOptions) -> Result<()>;
syscall load_module(path: &str) -> Result<()>;
syscall file_control(fd: RawFd, control: &mut FileControl) -> Result<()>;
syscall filesystem_control(fd: Option<RawFd>, control: &mut FilesystemControl) -> Result<()>;
// Memory management
syscall map_memory(

View File

@ -4,6 +4,10 @@ use abi_lib::SyscallRegister;
use super::FileMode;
/// Describes an action done on a file descriptor
#[derive(Debug)]
pub enum FileControl {}
/// Describes an operation to perform when updating certain file metadata elements
#[derive(Debug, Clone)]
pub enum FileMetadataUpdateMode {

View File

@ -0,0 +1,5 @@
/// Describes an action performed on a filesystem
#[derive(Debug)]
pub enum FilesystemControl {
FlushCache,
}

View File

@ -1,6 +1,7 @@
mod channel;
mod device;
mod file;
mod filesystem;
mod input;
mod terminal;
@ -10,7 +11,8 @@ pub use crate::generated::{
};
pub use channel::{ChannelPublisherId, MessageDestination, ReceivedMessageMetadata, SentMessage};
pub use device::DeviceRequest;
pub use file::{FileMetadataUpdate, FileMetadataUpdateMode, SeekFrom};
pub use file::{FileControl, FileMetadataUpdate, FileMetadataUpdateMode, SeekFrom};
pub use filesystem::FilesystemControl;
pub use input::{KeyboardKey, KeyboardKeyCode, KeyboardKeyEvent};
pub use terminal::{
TerminalControlCharacters, TerminalInputOptions, TerminalLineOptions, TerminalOptions,

View File

@ -100,6 +100,10 @@ path = "src/sysmon.rs"
name = "date"
path = "src/date.rs"
[[bin]]
name = "sync"
path = "src/sync.rs"
[[bin]]
name = "tst"
path = "src/tst.rs"

View File

@ -5,7 +5,7 @@ use std::{
fmt,
fs::{read_dir, FileType, Metadata},
io,
path::Path,
path::{Path, PathBuf}, process::ExitCode,
};
#[cfg(unix)]
@ -24,7 +24,7 @@ pub struct Args {
#[arg(short, long)]
human_readable: bool,
paths: Vec<String>,
paths: Vec<PathBuf>,
}
trait DisplayBit {
@ -191,6 +191,10 @@ fn list_directory(path: &Path) -> io::Result<Vec<Entry>> {
});
}
entries.sort_by(|a, b| {
Ord::cmp(&a.name, &b.name)
});
Ok(entries)
}
@ -201,36 +205,58 @@ fn list(opts: &Args, path: &Path) -> io::Result<()> {
for entry in entries {
println!("{}", entry.display_with(opts));
}
Ok(())
} else {
// TODO fetch info
println!("{}", path.display());
}
let attrs = path.symlink_metadata()?;
Ok(())
let target = attrs.is_symlink().then(|| match path.read_link() {
Ok(res) => res.display().to_string(),
Err(_) => "???".into(),
});
let entry = Entry {
name: path.display().to_string(),
ty: Some(attrs.file_type()),
attrs: Some(attrs),
target,
};
println!("{}", entry.display_with(opts));
Ok(())
}
}
fn list_wrap<P: AsRef<Path>>(opts: &Args, path: P) {
let path = path.as_ref();
match list(opts, path) {
Ok(_) => (),
Err(e) => {
eprintln!("{}: {}", path.display(), e);
fn run_inner(opts: &Args, paths: &[PathBuf], print_names: bool) -> Vec<Result<(), io::Error>> {
let mut results = vec![];
for path in paths {
if print_names {
println!("{}: ", path.display());
}
results.push(list(opts, path).inspect_err(|error| {
eprintln!("{}: {error}", path.display());
}));
}
results
}
fn run(opts: &Args) -> Vec<Result<(), io::Error>> {
if opts.paths.is_empty() {
run_inner(opts, &[".".into()], false)
} else {
run_inner(opts, &opts.paths, opts.paths.len() > 1)
}
}
pub fn main() {
pub fn main() -> ExitCode {
let args = Args::parse();
if args.paths.is_empty() {
list_wrap(&args, ".");
} else {
for path in args.paths.iter() {
if args.paths.len() > 1 {
println!("{}:", path);
}
list_wrap(&args, path);
}
}
let results = run(&args);
let code = match results.iter().any(|e| e.is_err()) {
false => ExitCode::SUCCESS,
true => ExitCode::FAILURE
};
code
}

View File

@ -10,6 +10,8 @@ use clap::Parser;
struct Args {
#[arg(short)]
ty: Option<String>,
#[arg(short)]
option: Vec<String>,
source: String,
target: Option<String>,
}
@ -24,12 +26,15 @@ fn main() -> ExitCode {
};
let target = args.target.as_deref().unwrap_or(args.source.as_str());
let filesystem = args.ty.as_deref();
let options = (!args.option.is_empty()).then(|| args.option.join(" "));
let options = options.as_deref();
// Permissions are not yet implemented, lol
let result = unsafe {
let options = MountOptions {
source,
filesystem,
options,
target,
};

View File

@ -0,0 +1,17 @@
use std::process::ExitCode;
use yggdrasil_abi::io::FilesystemControl;
fn run() -> Result<(), yggdrasil_rt::Error> {
unsafe { yggdrasil_rt::sys::filesystem_control(None, &mut FilesystemControl::FlushCache) }
}
pub fn main() -> ExitCode {
match run() {
Ok(()) => ExitCode::SUCCESS,
Err(error) => {
eprintln!("Error: {error:?}");
ExitCode::FAILURE
}
}
}

View File

@ -1,2 +1,43 @@
fn main() {
#![feature(yggdrasil_os)]
use std::{
fs::OpenOptions,
io::{self, Read, Seek, SeekFrom, Write},
};
fn perform_test<F: Read + Write + Seek>(file: &mut F, lba: u64) -> Result<(), io::Error> {
let mut write_back = [0; 4096];
let mut read_back = [0; 4096];
file.seek(SeekFrom::Start(lba * 512)).unwrap();
file.read_exact(&mut write_back).unwrap();
for i in 0..4096 {
if i % 2 == 0 {
write_back[i] = (i + lba as usize) as u8;
}
}
file.seek(SeekFrom::Start(lba * 512)).unwrap();
file.write_all(&write_back).unwrap();
file.seek(SeekFrom::Start(lba * 512)).unwrap();
file.read_exact(&mut read_back).unwrap();
assert_eq!(read_back, write_back);
Ok(())
}
fn main() {
let mut file = OpenOptions::new()
.write(true)
.read(true)
.truncate(false)
.open("/dev/nvme0n1")
.unwrap();
for i in 0..256 {
perform_test(&mut file, i).unwrap();
}
}

View File

@ -41,6 +41,7 @@ const PROGRAMS: &[(&str, &str)] = &[
("sha256sum", "bin/sha256sum"),
("sysmon", "bin/sysmon"),
("date", "bin/date"),
("sync", "bin/sync"),
("tst", "bin/tst"),
// netutils
("netconf", "sbin/netconf"),