fs: implement in-memory writable filesystem

This commit is contained in:
Mark Poliakov 2023-07-21 17:09:43 +03:00
parent 69a413f6bb
commit 64233db3df
14 changed files with 1261 additions and 314 deletions

View File

@ -8,6 +8,7 @@ edition = "2021"
[dependencies]
yggdrasil-abi = { git = "https://git.alnyan.me/yggdrasil/yggdrasil-abi.git" }
vfs = { path = "lib/vfs" }
memfs = { path = "lib/memfs" }
aarch64-cpu = "9.3.1"
atomic_enum = "0.2.0"

15
lib/memfs/Cargo.toml Normal file
View File

@ -0,0 +1,15 @@
[package]
name = "memfs"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
yggdrasil-abi = { git = "https://git.alnyan.me/yggdrasil/yggdrasil-abi.git" }
vfs = { path = "../vfs" }
static_assertions = "1.1.0"
[features]
default = []
test-io = []

260
lib/memfs/src/block.rs Normal file
View File

@ -0,0 +1,260 @@
//! Block management interfaces and structures
use core::{
marker::PhantomData,
mem::{size_of, MaybeUninit},
ops::{Deref, DerefMut},
ptr::NonNull,
};
use yggdrasil_abi::error::Error;
/// Number of bytes in a block
pub const SIZE: usize = 4096;
/// Maximum number of indirection pointers a block can hold
pub const ENTRY_COUNT: usize = SIZE / size_of::<usize>();
/// Interface for a block allocator
///
/// # Safety
///
/// This trait is unsafe to implement because it has to provide and accept raw data pointers of
/// exactly [SIZE].
pub unsafe trait BlockAllocator: 'static {
/// Allocates a contiguous block of size [SIZE]
fn alloc() -> Result<NonNull<u8>, Error>;
/// Dealocates a block.
///
/// # Safety
///
/// Unsafe: accepts arbitrary data pointers.
unsafe fn dealloc(block: NonNull<u8>);
}
#[repr(transparent)]
struct BlockRaw<'a, A: BlockAllocator> {
inner: Option<&'a mut [u8; SIZE]>,
_pd: PhantomData<A>,
}
/// Block containing file data
#[repr(transparent)]
pub struct BlockData<'a, A: BlockAllocator> {
inner: BlockRaw<'a, A>,
}
/// Block containing indirection pointers to other blocks
#[repr(transparent)]
pub struct BlockIndirect<'a, A: BlockAllocator> {
inner: BlockRaw<'a, A>,
}
impl<'a, A: BlockAllocator> BlockRaw<'a, A> {
const fn null() -> Self {
Self {
inner: None,
_pd: PhantomData,
}
}
fn new() -> Result<Self, Error> {
let ptr = A::alloc()?;
unsafe { Ok(Self::from_raw(ptr)) }
}
unsafe fn from_raw(ptr: NonNull<u8>) -> Self {
Self {
inner: Some(&mut *(ptr.as_ptr() as *mut _)),
_pd: PhantomData,
}
}
unsafe fn as_uninit_indirect_mut(
&mut self,
) -> &'a mut [MaybeUninit<BlockData<'a, A>>; ENTRY_COUNT] {
unsafe { &mut *(self.inner.as_ref().unwrap().as_ptr() as *mut _) }
}
#[inline]
fn is_null(&self) -> bool {
self.inner.is_none()
}
}
impl<A: BlockAllocator> Drop for BlockRaw<'_, A> {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
unsafe {
A::dealloc(NonNull::new_unchecked(inner as *mut _));
}
}
}
}
// Data block
impl<'a, A: BlockAllocator> BlockData<'a, A> {
/// Dummy entry representing a missing block
pub const fn null() -> Self {
Self {
inner: BlockRaw::null(),
}
}
/// Allocates a new block for data
pub fn new() -> Result<Self, Error> {
Ok(Self {
inner: BlockRaw::new()?,
})
}
/// Replaces self with a null block and drops any data that might've been allocated
pub fn set_null(&mut self) {
self.inner = BlockRaw::null();
}
/// Returns `true` if the block this structure refers to has not yet been allocated
#[inline]
pub fn is_null(&self) -> bool {
self.inner.is_null()
}
}
impl<A: BlockAllocator> Deref for BlockData<'_, A> {
type Target = [u8; SIZE];
fn deref(&self) -> &Self::Target {
self.inner.inner.as_ref().unwrap()
}
}
impl<A: BlockAllocator> DerefMut for BlockData<'_, A> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner.inner.as_mut().unwrap()
}
}
// Indirect block
impl<'a, A: BlockAllocator> BlockIndirect<'a, A> {
/// Dummy entry representing a missing block
pub const fn null() -> Self {
Self {
inner: BlockRaw::null(),
}
}
/// Allocates a new indirection block
pub fn new() -> Result<Self, Error> {
let mut inner = BlockRaw::new()?;
for item in unsafe { inner.as_uninit_indirect_mut() } {
item.write(BlockData::null());
}
Ok(Self { inner })
}
/// Returns `true` if the block this structure refers to has not yet been allocated
#[inline]
pub fn is_null(&self) -> bool {
self.inner.is_null()
}
}
impl<'a, A: BlockAllocator> Deref for BlockIndirect<'a, A> {
type Target = [BlockData<'a, A>; ENTRY_COUNT];
fn deref(&self) -> &Self::Target {
unsafe { &*(self.inner.inner.as_ref().unwrap().as_ptr() as *const _) }
}
}
impl<'a, A: BlockAllocator> DerefMut for BlockIndirect<'a, A> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *(self.inner.inner.as_mut().unwrap().as_mut_ptr() as *mut _) }
}
}
impl<'a, A: BlockAllocator> Drop for BlockIndirect<'a, A> {
fn drop(&mut self) {
if self.is_null() {
return;
}
for item in self.iter_mut() {
item.set_null();
}
}
}
#[cfg(test)]
mod tests {
use core::sync::atomic::Ordering;
use std::vec::Vec;
use crate::block::{BlockData, BlockIndirect};
#[test]
fn block_indirect_allocation() {
test_allocator_with_counter!(A_COUNTER, A);
const N: usize = 7;
const M: usize = 13;
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
{
let mut indirect = Vec::new();
// Allocate indirect blocks
{
for _ in 0..N {
indirect.push(BlockIndirect::<A>::new().unwrap());
}
}
assert_eq!(A_COUNTER.load(Ordering::Acquire), N);
// Allocate L1 indirection blocks
{
for l1_block in indirect.iter_mut() {
for i in 0..M {
let l0_block = BlockData::new().unwrap();
l1_block[i] = l0_block;
}
}
}
// N * M data blocks and N indirection blocks
assert_eq!(A_COUNTER.load(Ordering::Acquire), N * M + N);
// Drop 1 indirect block for test
indirect.pop();
assert_eq!(A_COUNTER.load(Ordering::Acquire), (N - 1) * M + (N - 1));
}
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
}
#[test]
fn block_allocation() {
test_allocator_with_counter!(A_COUNTER, A);
const N: usize = 13;
{
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
{
let mut s = Vec::new();
for _ in 0..N {
let mut block = BlockData::<A>::new().unwrap();
block.fill(1);
s.push(block);
}
assert_eq!(A_COUNTER.load(Ordering::Acquire), N);
}
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
}
}
}

459
lib/memfs/src/bvec.rs Normal file
View File

@ -0,0 +1,459 @@
//! Block vector management structures
use core::{
cmp::Ordering,
mem::MaybeUninit,
ops::{Index, IndexMut},
};
use yggdrasil_abi::error::Error;
use crate::block::{self, BlockAllocator, BlockData, BlockIndirect};
// 16.125M total
const L0_BLOCKS: usize = 32; // 128K in L0
const L1_BLOCKS: usize = 8; // 16M in L1
/// Block vector for efficient in-memory files
pub struct BVec<'a, A: BlockAllocator> {
capacity: usize,
size: usize,
l0: [BlockData<'a, A>; L0_BLOCKS],
l1: [BlockIndirect<'a, A>; L1_BLOCKS],
}
impl<'a, A: BlockAllocator> BVec<'a, A> {
/// Creates an empty block vector.
///
/// # Note
///
/// The function is guaranteed to make no allocations before the vector is actually written to.
pub fn new() -> Self {
let mut l0 = MaybeUninit::uninit_array();
let mut l1 = MaybeUninit::uninit_array();
for it in l0.iter_mut() {
it.write(BlockData::null());
}
for it in l1.iter_mut() {
it.write(BlockIndirect::null());
}
Self {
capacity: 0,
size: 0,
l0: unsafe { MaybeUninit::array_assume_init(l0) },
l1: unsafe { MaybeUninit::array_assume_init(l1) },
}
}
/// Returns the size of the data inside this vector
#[inline]
pub const fn size(&self) -> usize {
self.size
}
fn grow_l1(&mut self, old_l1_cap: usize, new_l1_cap: usize) -> Result<(), Error> {
for i in old_l1_cap..new_l1_cap {
assert!(self.l1[i].is_null());
self.l1[i] = BlockIndirect::new()?;
}
Ok(())
}
fn shrink_l1(&mut self, old_l1_cap: usize, new_l1_cap: usize) {
debug_assert!(new_l1_cap <= old_l1_cap);
for i in new_l1_cap..old_l1_cap {
assert!(!self.l1[i].is_null());
self.l1[i] = BlockIndirect::null();
}
}
#[inline]
fn caps(cap: usize) -> (usize, usize) {
let l0_cap = core::cmp::min(cap, L0_BLOCKS);
let l1_cap = if cap > L0_BLOCKS {
core::cmp::min(
(cap - L0_BLOCKS + block::ENTRY_COUNT - 1) / block::ENTRY_COUNT,
L1_BLOCKS,
)
} else {
0
};
(l0_cap, l1_cap)
}
/// Resizes the vector to hold exactly `new_capacity` data blocks
pub fn resize(&mut self, new_capacity: usize) -> Result<(), Error> {
// TODO handle L2 capacity
match new_capacity.cmp(&self.capacity) {
Ordering::Less => {
let (_, new_l1_cap) = Self::caps(new_capacity);
let (_, old_l1_cap) = Self::caps(self.capacity);
// Shrink data blocks
for index in new_capacity..self.capacity {
let block = &mut self[index];
assert!(!block.is_null());
block.set_null();
}
// Shrink L1 blocks
self.shrink_l1(old_l1_cap, new_l1_cap);
}
Ordering::Greater => {
let (_, new_l1_cap) = Self::caps(new_capacity);
let (_, old_l1_cap) = Self::caps(self.capacity);
// Allocate L1 indirection blocks
assert!(new_l1_cap >= old_l1_cap);
if new_l1_cap > old_l1_cap {
self.grow_l1(old_l1_cap, new_l1_cap)?;
}
// Grow data blocks
for index in self.capacity..new_capacity {
let block = unsafe { self.index_unchecked_mut(index) };
assert!(block.is_null());
*block = BlockData::new()?;
}
}
Ordering::Equal => (),
}
self.capacity = new_capacity;
Ok(())
}
fn ensure_write_capacity(&mut self, pos: usize, need_to_write: usize) -> Result<(), Error> {
let current_capacity = self.capacity * block::SIZE;
let need_capacity =
(core::cmp::max(pos + need_to_write, self.size) + block::SIZE - 1) / block::SIZE;
if need_capacity > current_capacity {
self.resize(need_capacity)
} else {
Ok(())
}
}
/// Writes data to the vector, growing it if needed
pub fn write(&mut self, pos: u64, data: &[u8]) -> Result<usize, Error> {
let mut pos = pos as usize;
// if pos > self.size {
// return Err(Error::InvalidFile);
// }
let mut rem = data.len();
let mut doff = 0usize;
self.ensure_write_capacity(pos, rem)?;
if pos + rem > self.size {
self.size = pos + rem;
}
while rem > 0 {
let index = pos / block::SIZE;
let offset = pos % block::SIZE;
let count = core::cmp::min(rem, block::SIZE - offset);
let block = &mut self[index];
let dst = &mut block[offset..offset + count];
let src = &data[doff..doff + count];
dst.copy_from_slice(src);
doff += count;
pos += count;
rem -= count;
}
Ok(doff)
}
/// Reads data from the vector
pub fn read(&self, pos: u64, data: &mut [u8]) -> Result<usize, Error> {
let mut pos = pos as usize;
if pos > self.size {
return Err(Error::InvalidFile);
}
let mut rem = core::cmp::min(self.size - pos, data.len());
let mut doff = 0usize;
while rem > 0 {
let index = pos / block::SIZE;
let offset = pos % block::SIZE;
let count = core::cmp::min(block::SIZE - offset, rem);
let block = &self[index];
let src = &block[offset..offset + count];
let dst = &mut data[doff..doff + count];
dst.copy_from_slice(src);
doff += count;
pos += count;
rem -= count;
}
Ok(doff)
}
unsafe fn index_unchecked(&self, mut index: usize) -> &BlockData<'a, A> {
if index < L0_BLOCKS {
return &self.l0[index];
}
index -= L0_BLOCKS;
if index < L1_BLOCKS * block::ENTRY_COUNT {
let l1i = index / block::ENTRY_COUNT;
let l0i = index % block::ENTRY_COUNT;
let l1r = &self.l1[l1i];
assert!(!l1r.is_null());
return &l1r[l0i];
}
todo!();
}
unsafe fn index_unchecked_mut(&mut self, mut index: usize) -> &mut BlockData<'a, A> {
if index < L0_BLOCKS {
return &mut self.l0[index];
}
index -= L0_BLOCKS;
if index < L1_BLOCKS * block::ENTRY_COUNT {
let l1i = index / block::ENTRY_COUNT;
let l0i = index % block::ENTRY_COUNT;
let l1r = &mut self.l1[l1i];
assert!(!l1r.is_null());
return &mut l1r[l0i];
}
todo!()
}
}
impl<'a, A: BlockAllocator> Index<usize> for BVec<'a, A> {
type Output = BlockData<'a, A>;
fn index(&self, index: usize) -> &Self::Output {
if index > self.capacity {
panic!(
"Block index out of bounds: capacity={}, index={}",
self.capacity, index
);
}
unsafe { self.index_unchecked(index) }
}
}
impl<'a, A: BlockAllocator> IndexMut<usize> for BVec<'a, A> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
if index > self.capacity {
panic!(
"Block index out of bounds: capacity={}, index={}",
self.capacity, index
);
}
unsafe { self.index_unchecked_mut(index) }
}
}
impl<'a, A: BlockAllocator> TryFrom<&[u8]> for BVec<'a, A> {
type Error = Error;
fn try_from(value: &[u8]) -> Result<Self, Self::Error> {
let mut res = Self::new();
res.write(0, value)?;
assert_eq!(res.size(), value.len());
Ok(res)
}
}
#[cfg(test)]
mod bvec_allocation {
use core::sync::atomic::Ordering;
use crate::{
block,
bvec::{BVec, L0_BLOCKS, L1_BLOCKS},
};
#[test]
fn bvec_grow_shrink() {
test_allocator_with_counter!(A_COUNTER, A);
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
{
let mut bvec = BVec::<A>::new();
assert_eq!(
A_COUNTER.load(Ordering::Acquire),
0,
"BVec should not allocate on creation"
);
const N: usize = 123;
bvec.resize(N).unwrap();
// N data blocks (12 in L0 + 111 in L1)
assert_eq!(A_COUNTER.load(Ordering::Acquire), N + 1);
// Test the index interface
for i in 0..N {
assert!(!bvec[i].is_null(), "Index {} must be allocated", i);
}
// Test the data structure
for i in 0..L0_BLOCKS {
assert!(!bvec.l0[i].is_null());
}
assert!(!bvec.l1[0].is_null());
for i in L0_BLOCKS..N {
let l1i = (i - L0_BLOCKS) / block::ENTRY_COUNT;
let l0i = (i - L0_BLOCKS) % block::ENTRY_COUNT;
let l1r = &bvec.l1[l1i];
assert!(!l1r.is_null());
assert!(!l1r[l0i].is_null());
}
for i in 1..L1_BLOCKS {
assert!(bvec.l1[i].is_null());
}
// Shrink to 100 blocks, test if L1 is still allocated
const M: usize = 100;
bvec.resize(M).unwrap();
assert_eq!(A_COUNTER.load(Ordering::Acquire), M + 1);
// Test the index interface
for i in 0..M {
assert!(!bvec[i].is_null(), "Index {} must be allocated", i);
}
// Test the data structure
for i in 0..L0_BLOCKS {
assert!(!bvec.l0[i].is_null());
}
assert!(!bvec.l1[0].is_null());
for i in L0_BLOCKS..M {
let l1i = (i - L0_BLOCKS) / block::ENTRY_COUNT;
let l0i = (i - L0_BLOCKS) % block::ENTRY_COUNT;
let l1r = &bvec.l1[l1i];
assert!(!l1r.is_null());
assert!(!l1r[l0i].is_null());
}
for i in M..N {
let l1i = (i - L0_BLOCKS) / block::ENTRY_COUNT;
let l0i = (i - L0_BLOCKS) % block::ENTRY_COUNT;
let l1r = &bvec.l1[l1i];
assert!(!l1r.is_null());
assert!(l1r[l0i].is_null());
}
for i in 1..L1_BLOCKS {
assert!(bvec.l1[i].is_null());
}
// Shrink to 13 blocks, test if L1 got deallocated
const O: usize = 13;
bvec.resize(O).unwrap();
assert_eq!(A_COUNTER.load(Ordering::Acquire), O);
}
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
}
}
#[cfg(all(test, feature = "test-io"))]
mod bvec_io {
use crate::{block, bvec::L0_BLOCKS};
use super::BVec;
#[test]
fn test_bvec_write() {
test_allocator_with_counter!(A_COUNTER, A);
{
let data = [1, 2, 3, 4, 5];
let mut bvec = BVec::<A>::new();
// Write at 0
assert_eq!(bvec.write(0, &data).unwrap(), data.len());
assert_eq!(bvec.capacity, 1);
assert_eq!(bvec.size(), data.len());
assert_eq!(&bvec[0][..bvec.size()], &data[..]);
// Write at 3
assert_eq!(bvec.write(3, &data).unwrap(), data.len());
assert_eq!(bvec.capacity, 1);
assert_eq!(bvec.size(), 3 + data.len());
assert_eq!(&bvec[0][..bvec.size()], &[1, 2, 3, 1, 2, 3, 4, 5]);
}
{
let data = [5, 4, 3, 2, 1];
let mut bvec = BVec::<A>::new();
// Write at the end of L0-region
assert_eq!(
bvec.write((L0_BLOCKS * block::SIZE) as u64, &data).unwrap(),
data.len()
);
// L0_BLOCKS + 1 L1 data block
assert_eq!(bvec.capacity, L0_BLOCKS + 1);
assert_eq!(bvec.size(), L0_BLOCKS * block::SIZE + data.len());
assert_eq!(&bvec[L0_BLOCKS][..data.len()], &data[..]);
// Write at zero
assert_eq!(bvec.write(0, &data).unwrap(), data.len());
assert_eq!(bvec.capacity, L0_BLOCKS + 1);
assert_eq!(bvec.size(), L0_BLOCKS * block::SIZE + data.len());
assert_eq!(&bvec[0][..data.len()], &data[..]);
// Test write crossing L0 block boundary
assert_eq!(
bvec.write((block::SIZE - 3) as u64, &data).unwrap(),
data.len()
);
assert_eq!(bvec.capacity, L0_BLOCKS + 1);
assert_eq!(bvec.size(), L0_BLOCKS * block::SIZE + data.len());
assert_eq!(&bvec[0][block::SIZE - 3..], &[5, 4, 3]);
assert_eq!(&bvec[1][..2], &[2, 1]);
// Test write crossing L0-L1 boundary
assert_eq!(
bvec.write((L0_BLOCKS * block::SIZE) as u64 - 2, &data)
.unwrap(),
data.len()
);
assert_eq!(bvec.capacity, L0_BLOCKS + 1);
assert_eq!(bvec.size(), L0_BLOCKS * block::SIZE + data.len());
assert_eq!(&bvec[L0_BLOCKS - 1][block::SIZE - 2..], &[5, 4]);
assert_eq!(&bvec[L0_BLOCKS][..data.len()], &[3, 2, 1, 2, 1]);
}
}
}

32
lib/memfs/src/dir.rs Normal file
View File

@ -0,0 +1,32 @@
use core::marker::PhantomData;
use alloc::boxed::Box;
use vfs::{Vnode, VnodeImpl, VnodeKind, VnodeRef};
use yggdrasil_abi::error::Error;
use crate::{block::BlockAllocator, bvec::BVec, file::FileNode};
pub(crate) struct DirectoryNode<A: BlockAllocator> {
_pd: PhantomData<A>,
}
impl<A: BlockAllocator> VnodeImpl for DirectoryNode<A> {
fn create(&mut self, _at: &VnodeRef, name: &str, kind: VnodeKind) -> Result<VnodeRef, Error> {
let child = Vnode::new(name, kind);
match kind {
VnodeKind::Directory => child.set_data(Box::new(Self { _pd: PhantomData })),
VnodeKind::Regular => child.set_data(Box::new(FileNode {
data: BVec::<A>::new(),
})),
_ => todo!(),
}
Ok(child)
}
}
impl<A: BlockAllocator> DirectoryNode<A> {
pub fn new() -> Self {
Self { _pd: PhantomData }
}
}

37
lib/memfs/src/file.rs Normal file
View File

@ -0,0 +1,37 @@
use vfs::{VnodeImpl, VnodeRef};
use yggdrasil_abi::{
error::Error,
io::{FileMode, OpenOptions},
};
use crate::{block::BlockAllocator, bvec::BVec};
pub(crate) struct FileNode<A: BlockAllocator> {
pub(crate) data: BVec<'static, A>,
}
impl<A: BlockAllocator> VnodeImpl for FileNode<A> {
fn open(&mut self, _node: &VnodeRef, opts: OpenOptions, _mode: FileMode) -> Result<u64, Error> {
if opts.contains(OpenOptions::APPEND) {
Ok(self.data.size() as u64)
} else {
Ok(0)
}
}
fn close(&mut self, _node: &VnodeRef) -> Result<(), Error> {
Ok(())
}
fn read(&mut self, _node: &VnodeRef, pos: u64, data: &mut [u8]) -> Result<usize, Error> {
self.data.read(pos, data)
}
fn write(&mut self, _node: &VnodeRef, pos: u64, data: &[u8]) -> Result<usize, Error> {
self.data.write(pos, data)
}
fn size(&mut self, _node: &VnodeRef) -> Result<u64, Error> {
Ok(self.data.size() as u64)
}
}

287
lib/memfs/src/lib.rs Normal file
View File

@ -0,0 +1,287 @@
//! In-memory filesystem driver
#![no_std]
#![warn(missing_docs)]
#![allow(clippy::new_without_default)]
#![feature(
const_mut_refs,
maybe_uninit_uninit_array,
const_maybe_uninit_uninit_array,
maybe_uninit_array_assume_init
)]
use core::{
any::Any,
cell::{Ref, RefCell},
marker::PhantomData,
};
use alloc::{boxed::Box, rc::Rc};
use block::BlockAllocator;
use vfs::{BlockDevice, Filesystem, Vnode, VnodeKind, VnodeRef};
use yggdrasil_abi::{error::Error, path};
use crate::{bvec::BVec, dir::DirectoryNode, file::FileNode, tar::TarIterator};
#[cfg(test)]
extern crate std;
extern crate alloc;
#[cfg(test)]
macro_rules! test_allocator_with_counter {
($counter:ident, $allocator:ident) => {
static $counter: core::sync::atomic::AtomicUsize = core::sync::atomic::AtomicUsize::new(0);
struct $allocator;
unsafe impl $crate::block::BlockAllocator for $allocator {
fn alloc() -> Result<core::ptr::NonNull<u8>, yggdrasil_abi::error::Error> {
let b = std::boxed::Box::into_raw(std::boxed::Box::new([0; $crate::block::SIZE]));
$counter.fetch_add(1, core::sync::atomic::Ordering::Release);
Ok(unsafe { core::ptr::NonNull::new_unchecked(b as _) })
}
unsafe fn dealloc(block: core::ptr::NonNull<u8>) {
$counter.fetch_sub(1, core::sync::atomic::Ordering::Release);
drop(std::boxed::Box::from_raw(
block.as_ptr() as *mut [u8; $crate::block::SIZE]
));
}
}
};
}
pub mod block;
pub mod bvec;
mod dir;
mod file;
mod tar;
/// In-memory read/write filesystem
pub struct MemoryFilesystem<A: BlockAllocator> {
root: RefCell<Option<VnodeRef>>,
_pd: PhantomData<A>,
}
impl<A: BlockAllocator> Filesystem for MemoryFilesystem<A> {
fn dev(self: Rc<Self>) -> Option<&'static dyn BlockDevice> {
todo!()
}
fn data(&self) -> Option<Ref<dyn Any>> {
todo!()
}
fn root(self: Rc<Self>) -> Result<VnodeRef, Error> {
Ok(self.root.borrow().clone().unwrap())
}
}
impl<A: BlockAllocator> MemoryFilesystem<A> {
fn make_path(
self: &Rc<Self>,
at: &VnodeRef,
path: &str,
kind: VnodeKind,
create: bool,
) -> Result<VnodeRef, Error> {
if path.is_empty() {
return Ok(at.clone());
}
let (element, rest) = path::split_left(path);
assert!(!element.is_empty());
let node = at.lookup(element);
let node = match node {
Some(node) => node,
None => {
if !create {
return Err(Error::DoesNotExist);
}
let node = self.create_node_initial(element, kind);
at.add_child(node.clone());
node
}
};
if rest.is_empty() {
Ok(node)
} else {
assert!(node.is_directory());
self.make_path(&node, rest, kind, create)
}
}
fn create_node_initial(self: &Rc<Self>, name: &str, kind: VnodeKind) -> VnodeRef {
assert!(!name.is_empty());
assert!(!name.contains('/'));
let node = Vnode::new(name, kind);
node.set_fs(self.clone());
match kind {
VnodeKind::Directory => node.set_data(Box::new(DirectoryNode::<A>::new())),
VnodeKind::Regular => {}
_ => todo!(),
}
node
}
fn from_slice_internal(self: &Rc<Self>, tar_data: &'static [u8]) -> Result<VnodeRef, Error> {
let root = Vnode::new("", VnodeKind::Directory);
root.set_fs(self.clone());
root.set_data(Box::new(DirectoryNode::<A>::new()));
// 1. Create paths in tar
for item in TarIterator::new(tar_data) {
let Ok((hdr, _)) = item else {
return Err(Error::InvalidArgument);
};
let path = hdr.name.as_str()?.trim_matches('/');
let (dirname, filename) = path::split_right(path);
let parent = self.make_path(&root, dirname, VnodeKind::Directory, true)?;
let node = self.create_node_initial(filename, hdr.node_kind());
parent.add_child(node);
}
// 2. Associate files with their data
for item in TarIterator::new(tar_data) {
let Ok((hdr, data)) = item else {
panic!("Unreachable");
};
if hdr.node_kind() == VnodeKind::Regular {
let data = data.unwrap();
let path = hdr.name.as_str()?.trim_matches('/');
let node = self.make_path(&root, path, VnodeKind::Directory, false)?;
let bvec = BVec::<A>::try_from(data)?;
assert_eq!(bvec.size(), data.len());
node.set_data(Box::new(FileNode { data: bvec }));
}
}
Ok(root)
}
/// Constructs a filesystem tree from a tar image in memory
pub fn from_slice(tar_data: &'static [u8]) -> Result<Rc<Self>, Error> {
let fs = Rc::new(Self {
root: RefCell::new(None),
_pd: PhantomData,
});
let root = fs.from_slice_internal(tar_data)?;
fs.root.replace(Some(root));
Ok(fs)
}
/// Constructs an empty memory filesystem
pub fn empty() -> Rc<Self> {
let fs = Rc::new(Self {
root: RefCell::new(None),
_pd: PhantomData,
});
let root = Vnode::new("", VnodeKind::Directory);
root.set_data(Box::new(DirectoryNode::<A>::new()));
root.set_fs(fs.clone());
fs.root.replace(Some(root));
fs
}
}
#[cfg(test)]
mod tests {
use std::rc::Rc;
use vfs::{Filesystem, IoContext, Read, Seek, SeekFrom, VnodeKind, Write};
use yggdrasil_abi::io::{FileMode, OpenOptions};
use crate::MemoryFilesystem;
#[test]
fn test_memfs_construction() {
fn check_file(ioctx: &IoContext, path: &str, expected_data: &str) {
let node = ioctx.find(None, path, false, false).unwrap();
assert_eq!(node.kind(), VnodeKind::Regular);
assert_eq!(node.size().unwrap(), expected_data.len() as u64);
let file = node.open(OpenOptions::READ, FileMode::empty()).unwrap();
let mut buf = [0; 512];
assert_eq!(
file.borrow_mut().read(&mut buf).unwrap(),
expected_data.len()
);
assert_eq!(&buf[..expected_data.len()], expected_data.as_bytes());
}
static TEST_IMAGE: &[u8] = include_bytes!("../test/test_image.tar");
test_allocator_with_counter!(A_COUNTER, A);
let fs = MemoryFilesystem::<A>::from_slice(TEST_IMAGE).unwrap();
let root = fs.root().unwrap();
let ioctx = IoContext::new(root.clone());
assert!(Rc::ptr_eq(
&root,
&ioctx.find(None, "/", false, false).unwrap()
));
check_file(&ioctx, "/test1.txt", include_str!("../test/test1.txt"));
}
#[test]
fn test_memfs_create_and_write() {
test_allocator_with_counter!(A_COUNTER, A);
let fs = MemoryFilesystem::<A>::empty();
let root = fs.root().unwrap();
let ioctx = IoContext::new(root.clone());
// Create, write, seek and read file
{
// TODO CREATE option handling
root.create("test1.txt", VnodeKind::Regular).unwrap();
let file = ioctx
.open(
None,
"/test1.txt",
OpenOptions::WRITE | OpenOptions::READ,
FileMode::empty(),
)
.unwrap();
let write_data = [1, 2, 3, 4];
let mut read_data = [0; 512];
let mut file = file.borrow_mut();
assert_eq!(file.write(&write_data).unwrap(), write_data.len());
assert_eq!(file.seek(SeekFrom::Start(0)).unwrap(), 0);
assert_eq!(file.read(&mut read_data).unwrap(), write_data.len());
assert_eq!(&read_data[..write_data.len()], &write_data[..]);
}
// Create a directory
{
// TODO read directory
root.create("dir1", VnodeKind::Directory).unwrap();
let dir1 = ioctx.find(None, "/dir1", false, false).unwrap();
let node = dir1.create("file1.txt", VnodeKind::Regular).unwrap();
assert!(Rc::ptr_eq(
&ioctx.find(None, "/dir1/file1.txt", false, false).unwrap(),
&node
));
}
}
}

136
lib/memfs/src/tar.rs Normal file
View File

@ -0,0 +1,136 @@
use vfs::VnodeKind;
use yggdrasil_abi::error::Error;
#[repr(C)]
pub(crate) struct OctalField<const N: usize> {
data: [u8; N],
}
#[repr(C)]
pub(crate) struct TarString<const N: usize> {
data: [u8; N],
}
pub(crate) struct TarIterator<'a> {
data: &'a [u8],
offset: usize,
zero_blocks: usize,
}
#[repr(packed)]
pub(crate) struct TarEntry {
pub name: TarString<100>,
_mode: OctalField<8>,
_uid: OctalField<8>,
_gid: OctalField<8>,
pub size: OctalField<12>,
_mtime: OctalField<12>,
_checksum: OctalField<8>,
type_: u8,
_link_name: TarString<100>,
_magic: [u8; 8],
_user: TarString<32>,
_group: TarString<32>,
_dev_major: OctalField<8>,
_dev_minor: OctalField<8>,
_prefix: TarString<155>,
__pad: [u8; 12],
}
impl<'a> TarIterator<'a> {
pub const fn new(data: &'a [u8]) -> Self {
Self {
data,
offset: 0,
zero_blocks: 0,
}
}
}
impl<'a> Iterator for TarIterator<'a> {
type Item = Result<(&'a TarEntry, Option<&'a [u8]>), Error>;
fn next(&mut self) -> Option<Self::Item> {
loop {
if self.offset + 512 > self.data.len() {
break None;
}
let hdr_ptr = &self.data[self.offset..];
let hdr = unsafe { &*(hdr_ptr.as_ptr() as *const TarEntry) };
if hdr.is_empty() {
if self.zero_blocks == 1 {
self.offset = self.data.len();
return None;
}
self.zero_blocks += 1;
continue;
}
let (data, size_aligned) = match hdr.type_ {
0 | b'0' => {
let size = usize::from(&hdr.size);
if self.offset + 512 + size > self.data.len() {
return Some(Err(Error::InvalidArgument));
}
let data = &self.data[self.offset + 512..self.offset + 512 + size];
let size_aligned = (size + 511) & !511;
(Some(data), size_aligned)
}
// Directory
b'5' => (None, 0),
_ => todo!("Unknown node kind: {}", hdr.type_),
};
self.offset += size_aligned + 512;
break Some(Ok((hdr, data)));
}
}
}
impl<const N: usize> From<&OctalField<N>> for usize {
fn from(value: &OctalField<N>) -> Self {
let mut acc = 0;
for i in 0..N {
if value.data[i] == 0 {
break;
}
acc <<= 3;
acc |= (value.data[i] - b'0') as usize;
}
acc
}
}
impl<const N: usize> TarString<N> {
pub fn as_str(&self) -> Result<&str, Error> {
core::str::from_utf8(&self.data[..self.len()]).map_err(|_| Error::InvalidArgument)
}
pub fn len(&self) -> usize {
for i in 0..N {
if self.data[i] == 0 {
return i;
}
}
N
}
}
impl TarEntry {
pub fn is_empty(&self) -> bool {
self.name.data[0] == 0
}
pub fn node_kind(&self) -> VnodeKind {
match self.type_ {
0 | b'0' => VnodeKind::Regular,
b'5' => VnodeKind::Directory,
_ => todo!(),
}
}
}

View File

@ -0,0 +1 @@
This is another test file

1
lib/memfs/test/test1.txt Normal file
View File

@ -0,0 +1 @@
This is a test file

Binary file not shown.

View File

@ -1,12 +1,21 @@
//! Filesystem implementations
use core::ptr::NonNull;
use memfs::block::{self, BlockAllocator};
use vfs::VnodeRef;
use yggdrasil_abi::{error::Error, io::MountOptions};
use crate::util::OneTimeInit;
use crate::{
mem::{
self,
phys::{self, PageUsage},
ConvertAddress,
},
util::OneTimeInit,
};
pub mod devfs;
pub mod tar;
/// Describes in-memory filesystem image used as initial root
pub struct Initrd {
@ -21,6 +30,24 @@ pub struct Initrd {
/// Holds reference to the data of initrd as well as its page-aligned physical memory region
pub static INITRD_DATA: OneTimeInit<Initrd> = OneTimeInit::new();
/// Implementation of [memfs::block::BlockAllocator] for the kernel
pub struct FileBlockAllocator;
unsafe impl BlockAllocator for FileBlockAllocator {
fn alloc() -> Result<NonNull<u8>, Error> {
// TODO make this a static assertion
assert_eq!(block::SIZE, 4096);
let page = phys::alloc_page(PageUsage::Used)?;
Ok(unsafe { NonNull::new_unchecked(page.virtualize() as *mut _) })
}
unsafe fn dealloc(block: NonNull<u8>) {
let page = block.as_ptr() as usize;
assert!(page > mem::KERNEL_VIRT_OFFSET);
todo!("Release physical memory");
}
}
/// Constructs an instance of a filesystem for given set of [MountOptions]
pub fn create_filesystem(options: &MountOptions) -> Result<VnodeRef, Error> {
let fs_name = options.filesystem.unwrap();

View File

@ -1,310 +0,0 @@
//! In-memory filesystem implementation
use abi::{error::Error, io::FileMode};
use alloc::{boxed::Box, rc::Rc};
use vfs::{Filesystem, Vnode, VnodeImpl, VnodeKind, VnodeRef};
use yggdrasil_abi::io::OpenOptions;
use crate::util::OneTimeInit;
#[repr(C)]
struct OctalField<const N: usize> {
data: [u8; N],
}
#[repr(C)]
struct TarString<const N: usize> {
data: [u8; N],
}
struct TarIterator<'a> {
data: &'a [u8],
offset: usize,
zero_blocks: usize,
}
#[repr(packed)]
struct TarEntry {
name: TarString<100>,
_mode: OctalField<8>,
_uid: OctalField<8>,
_gid: OctalField<8>,
size: OctalField<12>,
_mtime: OctalField<12>,
_checksum: OctalField<8>,
type_: u8,
_link_name: TarString<100>,
_magic: [u8; 8],
_user: TarString<32>,
_group: TarString<32>,
_dev_major: OctalField<8>,
_dev_minor: OctalField<8>,
_prefix: TarString<155>,
__pad: [u8; 12],
}
impl<'a> TarIterator<'a> {
pub const fn new(data: &'a [u8]) -> Self {
Self {
data,
offset: 0,
zero_blocks: 0,
}
}
}
impl<'a> Iterator for TarIterator<'a> {
type Item = Result<(&'a TarEntry, Option<&'a [u8]>), Error>;
fn next(&mut self) -> Option<Self::Item> {
loop {
if self.offset + 512 > self.data.len() {
break None;
}
let hdr_ptr = &self.data[self.offset..];
let hdr = unsafe { &*(hdr_ptr.as_ptr() as *const TarEntry) };
if hdr.is_empty() {
if self.zero_blocks == 1 {
self.offset = self.data.len();
return None;
}
self.zero_blocks += 1;
continue;
}
let (data, size_aligned) = match hdr.type_ {
0 | b'0' => {
let size = usize::from(&hdr.size);
if self.offset + 512 + size > self.data.len() {
return Some(Err(Error::InvalidArgument));
}
let data = &self.data[self.offset + 512..self.offset + 512 + size];
let size_aligned = (size + 511) & !511;
(Some(data), size_aligned)
}
// Directory
b'5' => (None, 0),
_ => todo!("Unknown node kind: {}", hdr.type_),
};
self.offset += size_aligned + 512;
break Some(Ok((hdr, data)));
}
}
}
impl<const N: usize> From<&OctalField<N>> for usize {
fn from(value: &OctalField<N>) -> Self {
let mut acc = 0;
for i in 0..N {
if value.data[i] == 0 {
break;
}
acc <<= 3;
acc |= (value.data[i] - b'0') as usize;
}
acc
}
}
impl<const N: usize> TarString<N> {
pub fn as_str(&self) -> Result<&str, Error> {
core::str::from_utf8(&self.data[..self.len()]).map_err(|_| Error::InvalidArgument)
}
pub fn len(&self) -> usize {
for i in 0..N {
if self.data[i] == 0 {
return i;
}
}
N
}
}
impl TarEntry {
pub fn is_empty(&self) -> bool {
self.name.data[0] == 0
}
pub fn node_kind(&self) -> VnodeKind {
match self.type_ {
0 | b'0' => VnodeKind::Regular,
b'5' => VnodeKind::Directory,
_ => todo!(),
}
}
}
/// tar-image based in-memory filesystem
pub struct TarFilesystem {
root: OneTimeInit<VnodeRef>,
}
impl Filesystem for TarFilesystem {
fn dev(self: Rc<Self>) -> Option<&'static dyn vfs::BlockDevice> {
todo!()
}
fn root(self: Rc<Self>) -> Result<VnodeRef, Error> {
self.root.try_get().cloned().ok_or(Error::DoesNotExist)
}
fn data(&self) -> Option<core::cell::Ref<dyn core::any::Any>> {
todo!()
}
}
struct DirInode;
struct RegularInode {
data: &'static [u8],
}
impl VnodeImpl for DirInode {
fn create(&mut self, _at: &VnodeRef, name: &str, kind: VnodeKind) -> Result<VnodeRef, Error> {
let child = Vnode::new(name, kind);
match kind {
VnodeKind::Directory => child.set_data(Box::new(DirInode)),
VnodeKind::Regular => (),
_ => todo!(),
}
Ok(child)
}
}
impl VnodeImpl for RegularInode {
fn open(&mut self, _node: &VnodeRef, opts: OpenOptions, _mode: FileMode) -> Result<u64, Error> {
if opts.contains(OpenOptions::WRITE) {
panic!("TODO: tarfs write");
}
Ok(0)
}
fn close(&mut self, _node: &VnodeRef) -> Result<(), Error> {
Ok(())
}
fn read(&mut self, _node: &VnodeRef, pos: u64, data: &mut [u8]) -> Result<usize, Error> {
let pos = pos as usize;
if pos > self.data.len() {
return Err(Error::InvalidFile);
}
let rem = core::cmp::min(self.data.len() - pos, data.len());
data[..rem].copy_from_slice(&self.data[pos..pos + rem]);
Ok(rem)
}
fn size(&mut self, _node: &VnodeRef) -> Result<u64, Error> {
Ok(self.data.len() as u64)
}
}
impl TarFilesystem {
fn make_path(
self: &Rc<Self>,
at: &VnodeRef,
path: &str,
kind: VnodeKind,
create: bool,
) -> Result<VnodeRef, Error> {
debugln!("make_path {:?}", path);
if path.is_empty() {
return Ok(at.clone());
}
let (element, rest) = abi::path::split_left(path);
assert!(!element.is_empty());
let node = at.lookup(element);
let node = match node {
Some(node) => node,
None => {
if !create {
debugln!("path {:?} does not exist", path);
return Err(Error::DoesNotExist);
}
infoln!("Create {:?}", element);
let node = self.create_node_initial(element, kind);
at.add_child(node.clone());
node
}
};
if rest.is_empty() {
Ok(node)
} else {
assert!(node.is_directory());
self.make_path(&node, rest, kind, create)
}
}
fn create_node_initial(self: &Rc<Self>, name: &str, kind: VnodeKind) -> VnodeRef {
assert!(!name.is_empty());
assert!(!name.contains('/'));
let node = Vnode::new(name, kind);
node.set_fs(self.clone());
match kind {
VnodeKind::Directory => node.set_data(Box::new(DirInode)),
VnodeKind::Regular => {}
_ => todo!(),
}
node
}
fn from_slice_internal(self: &Rc<Self>, tar_data: &'static [u8]) -> Result<VnodeRef, Error> {
let root = Vnode::new("", VnodeKind::Directory);
root.set_fs(self.clone());
root.set_data(Box::new(DirInode));
// 1. Create paths in tar
for item in TarIterator::new(tar_data) {
let Ok((hdr, _)) = item else {
warnln!("Tar image is truncated");
return Err(Error::InvalidArgument);
};
let path = hdr.name.as_str()?.trim_matches('/');
infoln!("path = {:?}", path);
let (dirname, filename) = abi::path::split_right(path);
let parent = self.make_path(&root, dirname, VnodeKind::Directory, true)?;
let node = self.create_node_initial(filename, hdr.node_kind());
parent.add_child(node);
}
// 2. Associate files with their data
for item in TarIterator::new(tar_data) {
let Ok((hdr, data)) = item else {
panic!("Unreachable");
};
if hdr.node_kind() == VnodeKind::Regular {
let data = data.unwrap();
let path = hdr.name.as_str()?.trim_matches('/');
let node = self.make_path(&root, path, VnodeKind::Directory, false)?;
node.set_data(Box::new(RegularInode { data }));
}
}
Ok(root)
}
/// Constructs a filesystem tree from a tar image in memory
pub fn from_slice(tar_data: &'static [u8]) -> Result<Rc<Self>, Error> {
let fs = Rc::new(TarFilesystem {
root: OneTimeInit::new(),
});
let root = fs.from_slice_internal(tar_data)?;
fs.root.init(root);
Ok(fs)
}
}

View File

@ -19,7 +19,8 @@ use abi::{
error::Error,
io::{FileMode, OpenOptions},
};
use fs::{tar::TarFilesystem, INITRD_DATA};
use fs::{FileBlockAllocator, INITRD_DATA};
use memfs::MemoryFilesystem;
use task::process::Process;
use vfs::{Filesystem, IoContext, VnodeRef};
@ -42,7 +43,7 @@ pub mod util;
fn setup_root() -> Result<VnodeRef, Error> {
let initrd_data = INITRD_DATA.get();
let fs = TarFilesystem::from_slice(initrd_data.data)?;
let fs = MemoryFilesystem::<FileBlockAllocator>::from_slice(initrd_data.data).unwrap();
fs.root()
}