feat(vfs): file read from initrd in kernel
This commit is contained in:
parent
0dbadd52d0
commit
6d8f0d01ef
10
Cargo.lock
generated
10
Cargo.lock
generated
@ -108,7 +108,9 @@ dependencies = [
|
||||
"cortex-a",
|
||||
"error",
|
||||
"fdt-rs",
|
||||
"memfs",
|
||||
"tock-registers",
|
||||
"vfs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -121,6 +123,14 @@ checksum = "7b2f96d100e1cf1929e7719b7edb3b90ab5298072638fccd77be9ce942ecdfce"
|
||||
name = "libusr"
|
||||
version = "0.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "memfs"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"error",
|
||||
"vfs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memoffset"
|
||||
version = "0.5.6"
|
||||
|
@ -10,6 +10,7 @@ edition = "2018"
|
||||
[workspace]
|
||||
members = [
|
||||
"fs/vfs",
|
||||
"fs/memfs",
|
||||
"kernel",
|
||||
"libusr",
|
||||
"init",
|
||||
|
7
Makefile
7
Makefile
@ -60,7 +60,8 @@ all: kernel
|
||||
kernel:
|
||||
cd kernel && cargo build $(CARGO_BUILD_OPTS)
|
||||
cd init && cargo build --target=../etc/$(ARCH)-osdev5.json -Z build-std=core,alloc,compiler_builtins
|
||||
cp target/$(ARCH)-osdev5/debug/init $(O)/initrd.img
|
||||
echo "This is a test file" >$(O)/test.txt
|
||||
cd $(O) && tar cf initrd.img test.txt
|
||||
ifeq ($(ARCH),aarch64)
|
||||
$(LLVM_BASE)/llvm-strip -o $(O)/kernel.strip $(O)/kernel
|
||||
$(LLVM_BASE)/llvm-size $(O)/kernel.strip
|
||||
@ -79,6 +80,10 @@ ifeq ($(MACH),orangepi3)
|
||||
$(O)/uImage
|
||||
endif
|
||||
|
||||
test:
|
||||
cd fs/vfs && cargo test
|
||||
cd fs/memfs && cargo test
|
||||
|
||||
clean:
|
||||
cargo clean
|
||||
|
||||
|
10
fs/memfs/Cargo.toml
Normal file
10
fs/memfs/Cargo.toml
Normal file
@ -0,0 +1,10 @@
|
||||
[package]
|
||||
name = "memfs"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
vfs = { path = "../vfs" }
|
||||
error = { path = "../../error" }
|
132
fs/memfs/src/block.rs
Normal file
132
fs/memfs/src/block.rs
Normal file
@ -0,0 +1,132 @@
|
||||
use core::mem::{size_of, MaybeUninit};
|
||||
use core::ops::{Deref, DerefMut};
|
||||
use error::Errno;
|
||||
|
||||
pub const SIZE: usize = 4096;
|
||||
pub const ENTRY_COUNT: usize = SIZE / size_of::<usize>();
|
||||
|
||||
// Should be the same as "usize" in layout
|
||||
pub struct BlockRef<'a, A: BlockAllocator + Copy> {
|
||||
inner: Option<&'a mut [u8; SIZE]>,
|
||||
alloc: MaybeUninit<A>,
|
||||
}
|
||||
|
||||
pub unsafe trait BlockAllocator {
|
||||
fn alloc(&self) -> *mut u8;
|
||||
unsafe fn dealloc(&self, block: *mut u8);
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator + Copy> BlockRef<'a, A> {
|
||||
pub fn new(alloc: A) -> Result<Self, Errno> {
|
||||
assert!(size_of::<A>() == 0);
|
||||
let ptr = alloc.alloc();
|
||||
if ptr.is_null() {
|
||||
Err(Errno::OutOfMemory)
|
||||
} else {
|
||||
Ok(unsafe { Self::from_raw(alloc, ptr) })
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_indirect(alloc: A) -> Result<Self, Errno> {
|
||||
let mut res = Self::new(alloc)?;
|
||||
for it in res.as_mut_ref_array().iter_mut() {
|
||||
it.write(BlockRef::null());
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub const fn null() -> Self {
|
||||
Self {
|
||||
inner: None,
|
||||
alloc: MaybeUninit::uninit(),
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn from_raw(alloc: A, data: *mut u8) -> Self {
|
||||
Self {
|
||||
inner: Some(&mut *(data as *mut _)),
|
||||
alloc: MaybeUninit::new(alloc),
|
||||
}
|
||||
}
|
||||
|
||||
pub const fn is_null(&self) -> bool {
|
||||
self.inner.is_none()
|
||||
}
|
||||
|
||||
pub fn as_mut_ref_array(&mut self) -> &mut [MaybeUninit<BlockRef<'a, A>>; ENTRY_COUNT] {
|
||||
assert_eq!(size_of::<Self>(), 8);
|
||||
unsafe { &mut *(self.deref_mut() as *mut _ as *mut _) }
|
||||
}
|
||||
|
||||
pub fn as_ref_array(&self) -> &[MaybeUninit<BlockRef<'a, A>>; ENTRY_COUNT] {
|
||||
assert_eq!(size_of::<Self>(), 8);
|
||||
unsafe { &*(self.deref() as *const _ as *const _) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator + Copy> Drop for BlockRef<'a, A> {
|
||||
fn drop(&mut self) {
|
||||
if let Some(inner) = self.inner.take() {
|
||||
unsafe {
|
||||
self.alloc
|
||||
.assume_init_ref()
|
||||
.dealloc(inner as *mut _ as *mut _);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator + Copy> Deref for BlockRef<'a, A> {
|
||||
type Target = [u8; SIZE];
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.inner.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator + Copy> DerefMut for BlockRef<'a, A> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.inner.as_mut().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::boxed::Box;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
static A_COUNTER: AtomicUsize = AtomicUsize::new(0);
|
||||
#[test]
|
||||
fn block_allocator() {
|
||||
#[derive(Clone, Copy)]
|
||||
struct A;
|
||||
unsafe impl BlockAllocator for A {
|
||||
fn alloc(&self) -> *mut u8 {
|
||||
let b = Box::leak(Box::new([0; SIZE]));
|
||||
A_COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
b.as_mut_ptr() as *mut _
|
||||
}
|
||||
unsafe fn dealloc(&self, ptr: *mut u8) {
|
||||
A_COUNTER.fetch_sub(1, Ordering::SeqCst);
|
||||
drop(Box::from_raw(ptr as *mut [u8; SIZE]));
|
||||
}
|
||||
}
|
||||
const N: usize = 13;
|
||||
{
|
||||
let mut s: [MaybeUninit<BlockRef<A>>; N] = MaybeUninit::uninit_array();
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
|
||||
for i in 0..N {
|
||||
let mut block = BlockRef::new(A {}).unwrap();
|
||||
block.fill(1);
|
||||
s[i].write(block);
|
||||
}
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), N);
|
||||
for i in 0..N {
|
||||
unsafe {
|
||||
s[i].assume_init_drop();
|
||||
}
|
||||
}
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
|
||||
}
|
||||
}
|
||||
}
|
611
fs/memfs/src/bvec.rs
Normal file
611
fs/memfs/src/bvec.rs
Normal file
@ -0,0 +1,611 @@
|
||||
use crate::{block, BlockAllocator, BlockRef};
|
||||
use core::cmp::{max, min};
|
||||
use core::marker::PhantomData;
|
||||
use core::mem::MaybeUninit;
|
||||
use core::ops::{Index, IndexMut};
|
||||
use error::Errno;
|
||||
const L0_BLOCKS: usize = 32; // 128K
|
||||
const L1_BLOCKS: usize = 8; // 16M
|
||||
pub struct Bvec<'a, A: BlockAllocator + Copy> {
|
||||
capacity: usize,
|
||||
size: usize,
|
||||
l0: [MaybeUninit<BlockRef<'a, A>>; L0_BLOCKS],
|
||||
l1: [MaybeUninit<BlockRef<'a, A>>; L1_BLOCKS],
|
||||
l2: MaybeUninit<BlockRef<'a, A>>,
|
||||
alloc: A,
|
||||
}
|
||||
impl<'a, A: BlockAllocator + Copy> Bvec<'a, A> {
|
||||
pub fn new(alloc: A) -> Self {
|
||||
let mut res = Self {
|
||||
capacity: 0,
|
||||
size: 0,
|
||||
l0: MaybeUninit::uninit_array(),
|
||||
l1: MaybeUninit::uninit_array(),
|
||||
l2: MaybeUninit::uninit(),
|
||||
alloc,
|
||||
};
|
||||
for it in res.l0.iter_mut() {
|
||||
it.write(BlockRef::null());
|
||||
}
|
||||
for it in res.l1.iter_mut() {
|
||||
it.write(BlockRef::null());
|
||||
}
|
||||
res.l2.write(BlockRef::null());
|
||||
res
|
||||
}
|
||||
pub fn resize(&mut self, cap: usize) -> Result<(), Errno> {
|
||||
if cap <= self.capacity {
|
||||
let mut curr = self.capacity;
|
||||
while curr != cap {
|
||||
curr -= 1;
|
||||
let mut index = curr;
|
||||
if index >= L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT {
|
||||
index -= L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT;
|
||||
let l1i = index / block::ENTRY_COUNT;
|
||||
let l0i = index % block::ENTRY_COUNT;
|
||||
let l2r = unsafe { self.l2.assume_init_mut() };
|
||||
assert!(!l2r.is_null());
|
||||
let l1r = unsafe { l2r.as_mut_ref_array()[l1i].assume_init_mut() };
|
||||
assert!(!l1r.is_null());
|
||||
let l0r = unsafe { l1r.as_mut_ref_array()[l0i].assume_init_mut() };
|
||||
assert!(!l0r.is_null());
|
||||
*l0r = BlockRef::null();
|
||||
if l0i == 0 {
|
||||
*l1r = BlockRef::null();
|
||||
}
|
||||
if index == 0 {
|
||||
*l2r = BlockRef::null();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if index >= L0_BLOCKS {
|
||||
index -= L0_BLOCKS;
|
||||
let l1i = index / block::ENTRY_COUNT;
|
||||
let l0i = index % block::ENTRY_COUNT;
|
||||
let l1r = unsafe { self.l1[l1i].assume_init_mut() };
|
||||
assert!(!l1r.is_null());
|
||||
let l0r = unsafe { l1r.as_mut_ref_array()[l0i].assume_init_mut() };
|
||||
assert!(!l0r.is_null());
|
||||
*l0r = BlockRef::null();
|
||||
if l0i == 0 {
|
||||
*l1r = BlockRef::null();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
let l0r = unsafe { self.l0[index].assume_init_mut() };
|
||||
assert!(!l0r.is_null());
|
||||
*l0r = BlockRef::null();
|
||||
continue;
|
||||
unimplemented!();
|
||||
}
|
||||
} else {
|
||||
for mut index in self.capacity..cap {
|
||||
if index < L0_BLOCKS {
|
||||
let l0r = unsafe { self.l0[index].assume_init_mut() };
|
||||
assert!(l0r.is_null());
|
||||
*l0r = BlockRef::new(self.alloc)?;
|
||||
continue;
|
||||
}
|
||||
index -= L0_BLOCKS;
|
||||
if index < L1_BLOCKS * block::ENTRY_COUNT {
|
||||
let l1i = index / block::ENTRY_COUNT;
|
||||
let l0i = index % block::ENTRY_COUNT;
|
||||
let l1r = unsafe { self.l1[l1i].assume_init_mut() };
|
||||
if l1r.is_null() {
|
||||
*l1r = BlockRef::new_indirect(self.alloc)?;
|
||||
}
|
||||
let l0r = unsafe { l1r.as_mut_ref_array()[l0i].assume_init_mut() };
|
||||
assert!(l0r.is_null());
|
||||
*l0r = BlockRef::new(self.alloc)?;
|
||||
continue;
|
||||
}
|
||||
index -= L1_BLOCKS * block::ENTRY_COUNT;
|
||||
if index < block::ENTRY_COUNT * block::ENTRY_COUNT {
|
||||
let l1i = index / block::ENTRY_COUNT;
|
||||
let l0i = index % block::ENTRY_COUNT;
|
||||
let l2r = unsafe { self.l2.assume_init_mut() };
|
||||
if l2r.is_null() {
|
||||
*l2r = BlockRef::new_indirect(self.alloc)?;
|
||||
}
|
||||
let l1r = unsafe { l2r.as_mut_ref_array()[l1i].assume_init_mut() };
|
||||
if l1r.is_null() {
|
||||
*l1r = BlockRef::new_indirect(self.alloc)?;
|
||||
}
|
||||
let l0r = unsafe { l1r.as_mut_ref_array()[l0i].assume_init_mut() };
|
||||
assert!(l0r.is_null());
|
||||
*l0r = BlockRef::new(self.alloc)?;
|
||||
continue;
|
||||
}
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
self.capacity = cap;
|
||||
Ok(())
|
||||
}
|
||||
pub fn write(&mut self, mut pos: usize, data: &[u8]) -> Result<usize, Errno> {
|
||||
if pos > self.size {
|
||||
return Err(Errno::InvalidArgument);
|
||||
}
|
||||
let mut rem = data.len();
|
||||
let mut doff = 0usize;
|
||||
if pos + rem > self.size {
|
||||
self.size = pos + rem;
|
||||
self.resize((pos + rem + block::SIZE - 1) / block::SIZE)?;
|
||||
}
|
||||
while rem > 0 {
|
||||
let index = pos / block::SIZE;
|
||||
let off = pos % block::SIZE;
|
||||
let count = min(block::SIZE - off, rem);
|
||||
let block = &mut self[index];
|
||||
let dst = &mut block[off..off + count];
|
||||
let src = &data[doff..doff + count];
|
||||
dst.copy_from_slice(src);
|
||||
doff += count;
|
||||
pos += count;
|
||||
rem -= count;
|
||||
}
|
||||
Ok(doff)
|
||||
}
|
||||
pub fn read(&self, mut pos: usize, data: &mut [u8]) -> Result<usize, Errno> {
|
||||
if pos > self.size {
|
||||
return Err(Errno::InvalidArgument);
|
||||
}
|
||||
let mut rem = min(self.size - pos, data.len());
|
||||
let mut doff = 0usize;
|
||||
while rem > 0 {
|
||||
let index = pos / block::SIZE;
|
||||
let off = pos % block::SIZE;
|
||||
let count = min(block::SIZE - off, rem);
|
||||
let block = &self[index];
|
||||
let src = &block[off..off + count];
|
||||
let dst = &mut data[doff..doff + count];
|
||||
dst.copy_from_slice(src);
|
||||
doff += count;
|
||||
pos += count;
|
||||
rem -= count;
|
||||
}
|
||||
Ok(doff)
|
||||
}
|
||||
}
|
||||
impl<'a, A: BlockAllocator + Copy> Index<usize> for Bvec<'a, A> {
|
||||
type Output = BlockRef<'a, A>;
|
||||
fn index(&self, mut index: usize) -> &Self::Output {
|
||||
if index >= self.capacity {
|
||||
panic!(
|
||||
"Index exceeds bvec capacity ({} >= {})",
|
||||
index, self.capacity
|
||||
);
|
||||
}
|
||||
if index < L0_BLOCKS {
|
||||
return unsafe { self.l0[index].assume_init_ref() };
|
||||
}
|
||||
index -= L0_BLOCKS;
|
||||
if index < L1_BLOCKS * block::ENTRY_COUNT {
|
||||
return unsafe {
|
||||
let l1 = self.l1[index / block::ENTRY_COUNT].assume_init_ref();
|
||||
l1.as_ref_array()[index % block::ENTRY_COUNT].assume_init_ref()
|
||||
};
|
||||
}
|
||||
index -= L1_BLOCKS * block::ENTRY_COUNT;
|
||||
if index < block::ENTRY_COUNT * block::ENTRY_COUNT {
|
||||
return unsafe {
|
||||
let l2 = self.l2.assume_init_ref();
|
||||
let l1 = l2.as_ref_array()[index / block::ENTRY_COUNT].assume_init_ref();
|
||||
l1.as_ref_array()[index % block::ENTRY_COUNT].assume_init_ref()
|
||||
};
|
||||
}
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
impl<'a, A: BlockAllocator + Copy> IndexMut<usize> for Bvec<'a, A> {
|
||||
fn index_mut(&mut self, mut index: usize) -> &mut Self::Output {
|
||||
if index >= self.capacity {
|
||||
panic!(
|
||||
"Index exceeds bvec capacity ({} >= {})",
|
||||
index, self.capacity
|
||||
);
|
||||
}
|
||||
if index < L0_BLOCKS {
|
||||
return unsafe { self.l0[index].assume_init_mut() };
|
||||
}
|
||||
index -= L0_BLOCKS;
|
||||
if index < L1_BLOCKS * block::ENTRY_COUNT {
|
||||
return unsafe {
|
||||
let l1 = self.l1[index / block::ENTRY_COUNT].assume_init_mut();
|
||||
l1.as_mut_ref_array()[index % block::ENTRY_COUNT].assume_init_mut()
|
||||
};
|
||||
}
|
||||
index -= L1_BLOCKS * block::ENTRY_COUNT;
|
||||
if index < block::ENTRY_COUNT * block::ENTRY_COUNT {
|
||||
return unsafe {
|
||||
let l2 = self.l2.assume_init_mut();
|
||||
let l1 = l2.as_mut_ref_array()[index / block::ENTRY_COUNT].assume_init_mut();
|
||||
l1.as_mut_ref_array()[index % block::ENTRY_COUNT].assume_init_mut()
|
||||
};
|
||||
}
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
impl<'a, A: BlockAllocator + Copy> Drop for Bvec<'a, A> {
|
||||
fn drop(&mut self) {
|
||||
for i in 0..min(L0_BLOCKS, self.capacity) {
|
||||
unsafe {
|
||||
self.l0[i].assume_init_drop();
|
||||
}
|
||||
}
|
||||
if self.capacity > L0_BLOCKS {}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "test_bvec")]
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::boxed::Box;
|
||||
use std::mem::MaybeUninit;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
static A_COUNTER: AtomicUsize = AtomicUsize::new(0);
|
||||
#[derive(Clone, Copy)]
|
||||
struct TestAlloc;
|
||||
unsafe impl BlockAllocator for TestAlloc {
|
||||
fn alloc(&self) -> *mut u8 {
|
||||
let b = Box::leak(Box::new([0; block::SIZE]));
|
||||
eprintln!("alloc {:p}", b);
|
||||
b.as_mut_ptr() as *mut _
|
||||
}
|
||||
unsafe fn dealloc(&self, ptr: *mut u8) {
|
||||
eprintln!("drop {:p}", ptr);
|
||||
drop(Box::from_raw(ptr as *mut [u8; block::SIZE]));
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn bvec_allocation() {
|
||||
#[derive(Clone, Copy)]
|
||||
struct A;
|
||||
unsafe impl BlockAllocator for A {
|
||||
fn alloc(&self) -> *mut u8 {
|
||||
let b = Box::leak(Box::new([0; block::SIZE]));
|
||||
A_COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
b.as_mut_ptr() as *mut _
|
||||
}
|
||||
unsafe fn dealloc(&self, ptr: *mut u8) {
|
||||
A_COUNTER.fetch_sub(1, Ordering::SeqCst);
|
||||
drop(Box::from_raw(ptr as *mut [u8; block::SIZE]));
|
||||
}
|
||||
}
|
||||
let mut bvec = Bvec::new(A {});
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
|
||||
bvec.resize(123).unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
let l1r = bvec.l1[0].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
for i in 0..123 - L0_BLOCKS {
|
||||
assert!(!l1r.as_ref_array()[i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), 123 + 1);
|
||||
bvec.resize(123 + block::ENTRY_COUNT).unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
for i in 0..(123 + block::ENTRY_COUNT) - L0_BLOCKS {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = bvec.l1[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
A_COUNTER.load(Ordering::Acquire),
|
||||
123 + block::ENTRY_COUNT + 2
|
||||
);
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT)
|
||||
.unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = bvec.l1[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
A_COUNTER.load(Ordering::Acquire),
|
||||
L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS
|
||||
);
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 4)
|
||||
.unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = bvec.l1[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
let l2r = bvec.l2.assume_init_ref();
|
||||
assert!(!l2r.is_null());
|
||||
for i in 0..block::ENTRY_COUNT * 4 {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = l2r.as_ref_array()[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
A_COUNTER.load(Ordering::Acquire),
|
||||
L0_BLOCKS + // L0
|
||||
L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS + // L1
|
||||
block::ENTRY_COUNT * 4 + 4 + 1
|
||||
);
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 3 + 1)
|
||||
.unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = bvec.l1[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
let l2r = bvec.l2.assume_init_ref();
|
||||
assert!(!l2r.is_null());
|
||||
for i in 0..block::ENTRY_COUNT * 3 + 1 {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = l2r.as_ref_array()[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
A_COUNTER.load(Ordering::Acquire),
|
||||
L0_BLOCKS + // L0
|
||||
L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS + // L1
|
||||
block::ENTRY_COUNT * 3 + 1 + 4 + 1
|
||||
);
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 2 + 1)
|
||||
.unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = bvec.l1[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
let l2r = bvec.l2.assume_init_ref();
|
||||
assert!(!l2r.is_null());
|
||||
for i in 0..block::ENTRY_COUNT * 2 + 1 {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = l2r.as_ref_array()[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
A_COUNTER.load(Ordering::Acquire),
|
||||
L0_BLOCKS + // L0
|
||||
L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS + // L1
|
||||
block::ENTRY_COUNT * 2 + 1 + 3 + 1
|
||||
);
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 1)
|
||||
.unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = bvec.l1[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
let l2r = bvec.l2.assume_init_ref();
|
||||
assert!(!l2r.is_null());
|
||||
let l1r = l2r.as_ref_array()[0].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[0].assume_init_ref().is_null());
|
||||
}
|
||||
assert_eq!(
|
||||
A_COUNTER.load(Ordering::Acquire),
|
||||
L0_BLOCKS + // L0
|
||||
L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS + // L1
|
||||
1 + 1 + 1
|
||||
);
|
||||
bvec.resize(L0_BLOCKS + 3 * block::ENTRY_COUNT + 1).unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
for i in 0..3 * block::ENTRY_COUNT + 1 {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = bvec.l1[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
let l2r = bvec.l2.assume_init_ref();
|
||||
assert!(l2r.is_null());
|
||||
}
|
||||
assert_eq!(
|
||||
A_COUNTER.load(Ordering::Acquire),
|
||||
L0_BLOCKS + // L0
|
||||
3 * block::ENTRY_COUNT + 1 + 4
|
||||
);
|
||||
bvec.resize(L0_BLOCKS).unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
assert!(bvec.l1[0].assume_init_ref().is_null());
|
||||
}
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), L0_BLOCKS);
|
||||
bvec.resize(12).unwrap();
|
||||
unsafe {
|
||||
for i in 0..12 {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), 12);
|
||||
bvec.resize(0).unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
|
||||
}
|
||||
#[test]
|
||||
fn bvec_index_l0() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS).unwrap();
|
||||
for i in 0..L0_BLOCKS {
|
||||
let block = &bvec[i];
|
||||
assert_eq!(block as *const _, bvec.l0[i].as_ptr());
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn bvec_index_l1() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + block::ENTRY_COUNT * 2 + 3).unwrap();
|
||||
for i in 0..block::ENTRY_COUNT * 2 + 3 {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let block = &bvec[i + L0_BLOCKS];
|
||||
let l1r = unsafe { bvec.l1[l1i].assume_init_ref() };
|
||||
assert_eq!(block as *const _, l1r.as_ref_array()[l0i].as_ptr());
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn bvec_index_l2() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 3)
|
||||
.unwrap();
|
||||
for i in 0..3 {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let block = &bvec[i + L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT];
|
||||
let l2r = unsafe { bvec.l2.assume_init_ref() };
|
||||
let l1r = unsafe { l2r.as_ref_array()[l1i].assume_init_ref() };
|
||||
assert_eq!(block as *const _, l1r.as_ref_array()[l0i].as_ptr());
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l0_0() {
|
||||
let bvec = Bvec::new(TestAlloc {});
|
||||
let _block = &bvec[0];
|
||||
}
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l0_1() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(13).unwrap();
|
||||
let _block = &bvec[15];
|
||||
}
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l1_0() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(13).unwrap();
|
||||
let _block = &bvec[L0_BLOCKS + 2];
|
||||
}
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l1_1() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + block::ENTRY_COUNT * 2 + 3).unwrap();
|
||||
let _block = &bvec[L0_BLOCKS + block::ENTRY_COUNT * 2 + 6];
|
||||
}
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l1_2() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + block::ENTRY_COUNT * 2 + 3).unwrap();
|
||||
let _block = &bvec[L0_BLOCKS + block::ENTRY_COUNT * 3 + 1];
|
||||
}
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l2_0() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(13).unwrap();
|
||||
let _block = &bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 3];
|
||||
}
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l2_1() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + block::ENTRY_COUNT * 3 + 13)
|
||||
.unwrap();
|
||||
let _block = &bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 3];
|
||||
}
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l2_2() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 6)
|
||||
.unwrap();
|
||||
let _block = &bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 8];
|
||||
}
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l2_3() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 2 + 7)
|
||||
.unwrap();
|
||||
let _block =
|
||||
&bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 2 + 13];
|
||||
}
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l2_4() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 2 + 13)
|
||||
.unwrap();
|
||||
let _block = &bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 3 + 2];
|
||||
}
|
||||
#[test]
|
||||
fn bvec_write_read() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
const N: usize = block::SIZE * (L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 3);
|
||||
let mut data = vec![0u8; N];
|
||||
for i in 0..N {
|
||||
data[i] = (i & 0xFF) as u8;
|
||||
}
|
||||
assert_eq!(bvec.write(0, &data[..]), Ok(N));
|
||||
let mut buf = vec![0u8; 327];
|
||||
let mut off = 0usize;
|
||||
let mut rem = N;
|
||||
while rem != 0 {
|
||||
let count = min(rem, buf.len());
|
||||
assert_eq!(bvec.read(off, &mut buf[..]), Ok(count));
|
||||
for i in 0..count {
|
||||
assert_eq!(buf[i], ((i + off) & 0xFF) as u8);
|
||||
}
|
||||
rem -= count;
|
||||
off += count;
|
||||
}
|
||||
}
|
||||
}
|
237
fs/memfs/src/lib.rs
Normal file
237
fs/memfs/src/lib.rs
Normal file
@ -0,0 +1,237 @@
|
||||
#![feature(
|
||||
const_fn_trait_bound,
|
||||
const_mut_refs,
|
||||
maybe_uninit_extra,
|
||||
maybe_uninit_uninit_array
|
||||
)]
|
||||
#![no_std]
|
||||
|
||||
extern crate alloc;
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate std;
|
||||
|
||||
use alloc::{boxed::Box, rc::Rc};
|
||||
use core::cell::RefCell;
|
||||
use error::Errno;
|
||||
use vfs::{node::VnodeData, Filesystem, Vnode, VnodeImpl, VnodeKind, VnodeRef};
|
||||
|
||||
pub mod block;
|
||||
pub use block::{BlockAllocator, BlockRef};
|
||||
pub mod bvec;
|
||||
use bvec::Bvec;
|
||||
pub mod tar;
|
||||
use tar::{Tar, TarIterator};
|
||||
|
||||
pub struct Ramfs<A: BlockAllocator + Copy + 'static> {
|
||||
root: RefCell<Option<VnodeRef>>,
|
||||
alloc: A,
|
||||
}
|
||||
|
||||
pub struct FileInode<'a, A: BlockAllocator + Copy + 'static> {
|
||||
data: Bvec<'a, A>,
|
||||
}
|
||||
|
||||
pub struct DirInode;
|
||||
|
||||
impl<'a, A: BlockAllocator + Copy + 'static> VnodeImpl for FileInode<'a, A> {
|
||||
fn create(&mut self, _parent: VnodeRef, _node: VnodeRef) -> Result<(), Errno> {
|
||||
panic!()
|
||||
}
|
||||
|
||||
fn remove(&mut self, _parent: VnodeRef, _name: &str) -> Result<(), Errno> {
|
||||
panic!()
|
||||
}
|
||||
|
||||
fn open(&mut self, node: VnodeRef) -> Result<usize, Errno> {
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn close(&mut self, node: VnodeRef) -> Result<(), Errno> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read(&mut self, node: VnodeRef, pos: usize, data: &mut [u8]) -> Result<usize, Errno> {
|
||||
self.data.read(pos, data)
|
||||
}
|
||||
|
||||
fn write(&mut self, node: VnodeRef, pos: usize, data: &[u8]) -> Result<usize, Errno> {
|
||||
self.data.write(pos, data)
|
||||
}
|
||||
|
||||
fn truncate(&mut self, _node: VnodeRef, size: usize) -> Result<(), Errno> {
|
||||
self.data.resize((size + 4095) / 4096)
|
||||
}
|
||||
}
|
||||
|
||||
impl VnodeImpl for DirInode {
|
||||
fn create(&mut self, _parent: VnodeRef, _node: VnodeRef) -> Result<(), Errno> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove(&mut self, _parent: VnodeRef, _name: &str) -> Result<(), Errno> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn open(&mut self, node: VnodeRef) -> Result<usize, Errno> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn close(&mut self, node: VnodeRef) -> Result<(), Errno> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn read(&mut self, node: VnodeRef, pos: usize, data: &mut [u8]) -> Result<usize, Errno> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn write(&mut self, node: VnodeRef, pos: usize, data: &[u8]) -> Result<usize, Errno> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn truncate(&mut self, _node: VnodeRef, _size: usize) -> Result<(), Errno> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: BlockAllocator + Copy + 'static> Filesystem for Ramfs<A> {
|
||||
fn root(self: Rc<Self>) -> Result<VnodeRef, Errno> {
|
||||
self.root.borrow().clone().ok_or(Errno::DoesNotExist)
|
||||
}
|
||||
|
||||
fn create_node(self: Rc<Self>, name: &str, kind: VnodeKind) -> Result<VnodeRef, Errno> {
|
||||
let mut node = Vnode::new(name, kind);
|
||||
let data: Box<dyn VnodeImpl> = match kind {
|
||||
VnodeKind::Regular => Box::new(FileInode {
|
||||
data: Bvec::new(self.alloc),
|
||||
}),
|
||||
VnodeKind::Directory => Box::new(DirInode {}),
|
||||
};
|
||||
node.set_data(VnodeData {
|
||||
fs: self,
|
||||
node: data,
|
||||
});
|
||||
Ok(node)
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: BlockAllocator + Copy + 'static> Ramfs<A> {
|
||||
pub fn open(base: *const u8, size: usize, alloc: A) -> Result<Rc<Self>, Errno> {
|
||||
let mut res = Rc::new(Self {
|
||||
root: RefCell::new(None),
|
||||
alloc,
|
||||
});
|
||||
unsafe {
|
||||
*res.root.borrow_mut() = Some(res.clone().load_tar(base, size)?);
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
fn make_path(
|
||||
self: Rc<Self>,
|
||||
at: VnodeRef,
|
||||
path: &str,
|
||||
kind: VnodeKind,
|
||||
do_create: bool,
|
||||
) -> Result<VnodeRef, Errno> {
|
||||
let (element, rest) = vfs::util::path_component_left(path);
|
||||
assert!(!element.is_empty());
|
||||
|
||||
let node_kind = if rest.is_empty() {
|
||||
kind
|
||||
} else {
|
||||
VnodeKind::Directory
|
||||
};
|
||||
|
||||
let node = at.lookup(element);
|
||||
let node = match node {
|
||||
Some(node) => node,
|
||||
None => {
|
||||
if !do_create {
|
||||
return Err(Errno::DoesNotExist);
|
||||
}
|
||||
let node = self.clone().create_node(element, node_kind)?;
|
||||
at.attach(node.clone());
|
||||
node
|
||||
}
|
||||
};
|
||||
|
||||
if rest.is_empty() {
|
||||
Ok(node)
|
||||
} else {
|
||||
self.make_path(node, rest, kind, do_create)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn load_tar(self: Rc<Self>, base: *const u8, size: usize) -> Result<VnodeRef, Errno> {
|
||||
let root = self.clone().create_node("", VnodeKind::Directory)?;
|
||||
|
||||
// 1. Create all the paths in TAR
|
||||
for block in TarIterator::new(base, base.add(size)) {
|
||||
let node =
|
||||
self.clone()
|
||||
.make_path(root.clone(), block.path()?, block.node_kind(), true)?;
|
||||
assert_eq!(node.kind(), block.node_kind());
|
||||
}
|
||||
|
||||
// 2. Setup data blocks
|
||||
for block in TarIterator::new(base, base.add(size)) {
|
||||
if block.is_file() {
|
||||
let size = block.size();
|
||||
let node = self.clone().make_path(
|
||||
root.clone(),
|
||||
block.path()?,
|
||||
block.node_kind(),
|
||||
false,
|
||||
)?;
|
||||
|
||||
node.truncate(size).unwrap();
|
||||
let res = node.write(0, block.data()).unwrap();
|
||||
if res != size {
|
||||
panic!("Expected to write {}B, got {}B", size, res);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(root)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use alloc::boxed::Box;
|
||||
use vfs::Ioctx;
|
||||
|
||||
#[test]
|
||||
fn ramfs_open() {
|
||||
#[derive(Clone, Copy)]
|
||||
struct A;
|
||||
unsafe impl BlockAllocator for A {
|
||||
fn alloc(&self) -> *mut u8 {
|
||||
let b = Box::leak(Box::new([0; block::SIZE]));
|
||||
b.as_mut_ptr() as *mut _
|
||||
}
|
||||
unsafe fn dealloc(&self, ptr: *mut u8) {
|
||||
drop(Box::from_raw(ptr as *mut [u8; block::SIZE]));
|
||||
}
|
||||
}
|
||||
unsafe impl Sync for A {}
|
||||
|
||||
let data = include_str!("../test/test1.tar");
|
||||
let fs = Ramfs::open(data.as_ptr(), data.bytes().len(), A {}).unwrap();
|
||||
|
||||
let root = fs.root().unwrap();
|
||||
let ioctx = Ioctx::new(root.clone());
|
||||
|
||||
assert!(Rc::ptr_eq(&ioctx.find(None, "/").unwrap(), &root));
|
||||
|
||||
let node = ioctx.find(None, "/test1.txt").unwrap();
|
||||
let mut file = node.open().unwrap();
|
||||
let mut buf = [0u8; 1024];
|
||||
|
||||
assert_eq!(file.read(&mut buf).unwrap(), 20);
|
||||
let s = core::str::from_utf8(&buf[..20]).unwrap();
|
||||
assert_eq!(s, "This is a test file\n");
|
||||
}
|
||||
}
|
112
fs/memfs/src/tar.rs
Normal file
112
fs/memfs/src/tar.rs
Normal file
@ -0,0 +1,112 @@
|
||||
use vfs::VnodeKind;
|
||||
use error::Errno;
|
||||
|
||||
#[repr(packed)]
|
||||
#[allow(dead_code)]
|
||||
pub struct Tar {
|
||||
name: [u8; 100],
|
||||
mode: [u8; 8],
|
||||
uid: [u8; 8],
|
||||
gid: [u8; 8],
|
||||
size: [u8; 12],
|
||||
mtime: [u8; 12],
|
||||
checksum: [u8; 8],
|
||||
type_: u8,
|
||||
link_name: [u8; 100],
|
||||
magic: [u8; 8],
|
||||
user: [u8; 32],
|
||||
group: [u8; 32],
|
||||
dev_major: [u8; 8],
|
||||
dev_minor: [u8; 8],
|
||||
prefix: [u8; 155],
|
||||
}
|
||||
|
||||
pub struct TarIterator {
|
||||
address: *const u8,
|
||||
limit: *const u8,
|
||||
zero_blocks: usize,
|
||||
}
|
||||
|
||||
impl TarIterator {
|
||||
pub const fn new(
|
||||
address: *const u8,
|
||||
limit: *const u8,
|
||||
) -> Self {
|
||||
Self {
|
||||
address,
|
||||
limit,
|
||||
zero_blocks: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for TarIterator {
|
||||
type Item = &'static Tar;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.address >= self.limit || self.zero_blocks == 2 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let bytes: &[u8; 512] = unsafe { (self.address as *const [u8; 512]).as_ref() }.unwrap();
|
||||
if bytes.iter().all(|&x| x == 0) {
|
||||
self.zero_blocks += 1;
|
||||
self.address = unsafe { self.address.add(512) };
|
||||
self.next()
|
||||
} else {
|
||||
let block: &Tar = unsafe { (self.address as *const Tar).as_ref() }.unwrap();
|
||||
self.zero_blocks = 0;
|
||||
self.address = unsafe { self.address.add(512 + align_up(block.size())) };
|
||||
Some(block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Tar {
|
||||
pub fn is_file(&self) -> bool {
|
||||
self.type_ == 0 || self.type_ == b'0'
|
||||
}
|
||||
|
||||
pub fn size(&self) -> usize {
|
||||
from_octal(&self.size)
|
||||
}
|
||||
|
||||
pub fn path(&self) -> Result<&str, Errno> {
|
||||
let zero_index = self.name.iter().position(|&c| c == 0).unwrap();
|
||||
core::str::from_utf8(&self.name[..zero_index]).map_err(|_| Errno::InvalidArgument)
|
||||
}
|
||||
|
||||
pub fn node_kind(&self) -> VnodeKind {
|
||||
match self.type_ {
|
||||
0 | b'0' => VnodeKind::Regular,
|
||||
b'5' => VnodeKind::Directory,
|
||||
p => panic!("Unrecognized tar entry type: '{}'", p as char),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn data(&self) -> &[u8] {
|
||||
unsafe {
|
||||
core::slice::from_raw_parts(
|
||||
((self as *const _ as usize) + 512) as *const _,
|
||||
self.size(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn from_octal(oct: &[u8]) -> usize {
|
||||
let mut res = 0usize;
|
||||
for &byte in oct {
|
||||
if byte == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
res <<= 3;
|
||||
res |= (byte - b'0') as usize;
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
const fn align_up(size: usize) -> usize {
|
||||
(size + 511) & !511
|
||||
}
|
BIN
fs/memfs/test/test1.tar
Normal file
BIN
fs/memfs/test/test1.tar
Normal file
Binary file not shown.
1
fs/memfs/test/test1.txt
Normal file
1
fs/memfs/test/test1.txt
Normal file
@ -0,0 +1 @@
|
||||
This is a test file
|
@ -76,6 +76,10 @@ mod tests {
|
||||
fn write(&mut self, _node: VnodeRef, _pos: usize, _data: &[u8]) -> Result<usize, Errno> {
|
||||
Err(Errno::NotImplemented)
|
||||
}
|
||||
|
||||
fn truncate(&mut self, _node: VnodeRef, _size: usize) -> Result<(), Errno> {
|
||||
Err(Errno::NotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
impl Filesystem for DummyFs {
|
||||
|
@ -102,6 +102,10 @@ mod tests {
|
||||
fn write(&mut self, _node: VnodeRef, _pos: usize, _data: &[u8]) -> Result<usize, Errno> {
|
||||
Err(Errno::NotImplemented)
|
||||
}
|
||||
|
||||
fn truncate(&mut self, _node: VnodeRef, _size: usize) -> Result<(), Errno> {
|
||||
Err(Errno::NotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
impl Filesystem for DummyFs {
|
||||
|
@ -45,6 +45,7 @@ pub trait VnodeImpl {
|
||||
fn open(&mut self, node: VnodeRef /* TODO open mode */) -> Result<usize, Errno>;
|
||||
fn close(&mut self, node: VnodeRef) -> Result<(), Errno>;
|
||||
|
||||
fn truncate(&mut self, node: VnodeRef, size: usize) -> Result<(), Errno>;
|
||||
fn read(&mut self, node: VnodeRef, pos: usize, data: &mut [u8]) -> Result<usize, Errno>;
|
||||
fn write(&mut self, node: VnodeRef, pos: usize, data: &[u8]) -> Result<usize, Errno>;
|
||||
}
|
||||
@ -77,6 +78,11 @@ impl Vnode {
|
||||
self.kind == VnodeKind::Directory
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub const fn kind(&self) -> VnodeKind {
|
||||
self.kind
|
||||
}
|
||||
|
||||
// Tree operations
|
||||
|
||||
pub fn attach(self: &VnodeRef, child: VnodeRef) {
|
||||
@ -188,6 +194,30 @@ impl Vnode {
|
||||
Err(Errno::NotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write(self: &VnodeRef, pos: usize, buf: &[u8]) -> Result<usize, Errno> {
|
||||
if self.kind != VnodeKind::Regular {
|
||||
return Err(Errno::IsADirectory);
|
||||
}
|
||||
|
||||
if let Some(ref mut data) = *self.data.borrow_mut() {
|
||||
data.node.write(self.clone(), pos, buf)
|
||||
} else {
|
||||
Err(Errno::NotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn truncate(self: &VnodeRef, size: usize) -> Result<(), Errno> {
|
||||
if self.kind != VnodeKind::Regular {
|
||||
return Err(Errno::IsADirectory);
|
||||
}
|
||||
|
||||
if let Some(ref mut data) = *self.data.borrow_mut() {
|
||||
data.node.truncate(self.clone(), size)
|
||||
} else {
|
||||
Err(Errno::NotImplemented)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Vnode {
|
||||
@ -228,6 +258,10 @@ mod tests {
|
||||
fn write(&mut self, _node: VnodeRef, _pos: usize, _data: &[u8]) -> Result<usize, Errno> {
|
||||
Err(Errno::NotImplemented)
|
||||
}
|
||||
|
||||
fn truncate(&mut self, _node: VnodeRef, _size: usize) -> Result<(), Errno> {
|
||||
Err(Errno::NotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
impl Filesystem for DummyFs {
|
||||
|
@ -11,6 +11,8 @@ test = false
|
||||
|
||||
[dependencies]
|
||||
error = { path = "../error" }
|
||||
vfs = { path = "../fs/vfs" }
|
||||
memfs = { path = "../fs/memfs" }
|
||||
cfg-if = "1.x.x"
|
||||
tock-registers = "0.7.x"
|
||||
fdt-rs = { version = "0.x.x", default-features = false }
|
||||
|
21
kernel/src/fs/mod.rs
Normal file
21
kernel/src/fs/mod.rs
Normal file
@ -0,0 +1,21 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use crate::mem::{self, phys::{self, PageUsage}};
|
||||
use memfs::BlockAllocator;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct MemfsBlockAlloc;
|
||||
|
||||
unsafe impl BlockAllocator for MemfsBlockAlloc {
|
||||
fn alloc(&self) -> *mut u8 {
|
||||
if let Ok(page) = phys::alloc_page(PageUsage::Kernel) {
|
||||
mem::virtualize(page) as *mut u8
|
||||
} else {
|
||||
core::ptr::null_mut()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, data: *mut u8) {
|
||||
todo!()
|
||||
}
|
||||
}
|
@ -10,7 +10,7 @@
|
||||
const_panic,
|
||||
panic_info_message,
|
||||
alloc_error_handler,
|
||||
const_btree_new,
|
||||
const_btree_new
|
||||
)]
|
||||
#![no_std]
|
||||
#![no_main]
|
||||
@ -27,12 +27,13 @@ pub mod debug;
|
||||
|
||||
pub mod arch;
|
||||
pub mod dev;
|
||||
pub mod fs;
|
||||
pub mod mem;
|
||||
pub mod proc;
|
||||
pub mod sync;
|
||||
pub mod util;
|
||||
#[allow(missing_docs)]
|
||||
pub mod syscall;
|
||||
pub mod util;
|
||||
|
||||
#[panic_handler]
|
||||
fn panic_handler(pi: &core::panic::PanicInfo) -> ! {
|
||||
|
@ -57,15 +57,31 @@ pub unsafe fn enter(initrd: Option<(usize, usize)>) -> ! {
|
||||
if let Some((start, end)) = initrd {
|
||||
let initrd = Box::into_raw(Box::new((mem::virtualize(start), mem::virtualize(end))));
|
||||
|
||||
for _ in 0..4 {
|
||||
spawn!(fn (initrd_ptr: usize) {
|
||||
debugln!("Running kernel init process");
|
||||
spawn!(fn (initrd_ptr: usize) {
|
||||
use memfs::Ramfs;
|
||||
use vfs::{Filesystem, Ioctx};
|
||||
use crate::fs::MemfsBlockAlloc;
|
||||
debugln!("Running kernel init process");
|
||||
|
||||
let (start, _end) = unsafe { *(initrd_ptr as *const (usize, usize)) };
|
||||
Process::execve(|space| elf::load_elf(space, start as *const u8), 0).unwrap();
|
||||
panic!("This code should not run");
|
||||
}, initrd as usize);
|
||||
}
|
||||
let (start, end) = unsafe { *(initrd_ptr as *const (usize, usize)) };
|
||||
let size = end - start;
|
||||
|
||||
let fs = Ramfs::open(start as *mut u8, size, MemfsBlockAlloc {}).unwrap();
|
||||
let root = fs.root().unwrap();
|
||||
let ioctx = Ioctx::new(root);
|
||||
|
||||
// Open a test file
|
||||
let node = ioctx.find(None, "/test.txt").unwrap();
|
||||
let mut file = node.open().unwrap();
|
||||
let mut buf = [0u8; 16];
|
||||
|
||||
while let Ok(count) = file.read(&mut buf) {
|
||||
if count == 0 {
|
||||
break;
|
||||
}
|
||||
debugln!("Read {} bytes: {:?}", count, &buf[0..count]);
|
||||
}
|
||||
}, initrd as usize);
|
||||
}
|
||||
SCHED.enter();
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user