ext2: begin ext2 rework

This commit is contained in:
Mark Poliakov 2024-12-02 19:02:18 +02:00
parent 8743124b68
commit dd542ed176
9 changed files with 933 additions and 88 deletions

View File

@ -4,7 +4,10 @@ use alloc::{boxed::Box, vec};
use bytemuck::{Pod, Zeroable};
use libk::vfs::Metadata;
use static_assertions::const_assert_eq;
use yggdrasil_abi::io::{FileMode, FileType, GroupId, UserId};
use yggdrasil_abi::{
bitflags,
io::{FileMode, FileType, GroupId, UserId},
};
use crate::Ext2Fs;
@ -14,6 +17,35 @@ pub const ROOT_INODE: u32 = 2;
pub const DIRECT_BLOCK_COUNT: usize = 12;
bitflags! {
#[derive(Zeroable, Pod)]
pub struct FsOptionalFeatures: u32 {
const PREALLOCATE_DIR_BLOCKS: bit 0;
const AFS_INODES: bit 1;
const JOURNAL: bit 2;
const INODE_EXT_ATTR: bit 3;
const FS_RESIZE: bit 4;
const DIR_HASH_INDEX: bit 5;
}
}
bitflags! {
#[derive(Zeroable, Pod)]
pub struct FsRequiredFeatures: u32 {
const COMPRESSION: bit 0;
const DIRENT_TYPE_FIELD: bit 1;
}
}
bitflags! {
#[derive(Zeroable, Pod)]
pub struct FsReadonlyFeatures: u32 {
const SPARSE_SUPERBLOCKS: bit 0;
const FILE_SIZE_64_BIT: bit 1;
const DIR_BTREE: bit 2;
}
}
#[derive(Debug, Clone, Copy, Pod, Zeroable)]
#[repr(transparent)]
pub struct InodeMode(u16);
@ -55,9 +87,9 @@ pub struct ExtendedSuperblock {
pub first_non_reserved_inode: u32,
pub inode_struct_size: u16,
pub superblock_block_group_number: u16,
pub optional_features: u32,
pub required_features: u32,
pub readonly_features: u32,
pub optional_features: FsOptionalFeatures,
pub required_features: FsRequiredFeatures,
pub readonly_features: FsReadonlyFeatures,
pub filesystem_id: [u8; 16],
pub volume_name: [u8; 16],
pub last_mount_path: [u8; 64],
@ -145,6 +177,17 @@ impl BlockGroupDescriptorTable {
..(index + 1) * size_of::<BlockGroupDescriptor>()],
)
}
pub fn descriptor_mut(&mut self, index: u32) -> &mut BlockGroupDescriptor {
let index = index as usize;
if index >= self.len {
panic!();
}
bytemuck::from_bytes_mut(
&mut self.data[index * size_of::<BlockGroupDescriptor>()
..(index + 1) * size_of::<BlockGroupDescriptor>()],
)
}
}
const_assert_eq!(size_of::<BlockGroupDescriptor>(), 32);
@ -158,13 +201,21 @@ impl ExtendedSuperblock {
}
}
pub fn required_features(&self) -> u32 {
pub fn required_features(&self) -> FsRequiredFeatures {
if self.base.version_major != 0 {
self.required_features
} else {
todo!()
}
}
pub fn readonly_features(&self) -> FsReadonlyFeatures {
if self.base.version_major != 0 {
self.readonly_features
} else {
todo!()
}
}
}
impl Deref for ExtendedSuperblock {
@ -183,7 +234,18 @@ impl DerefMut for ExtendedSuperblock {
impl Inode {
pub fn blocks(&self, fs: &Ext2Fs) -> usize {
(self.size_lower as usize).div_ceil(fs.block_size)
self.size(fs).div_ceil(fs.block_size as u64) as usize
}
pub fn size(&self, fs: &Ext2Fs) -> u64 {
let mut size = self.size_lower as u64;
if fs
.write_features
.contains(FsReadonlyFeatures::FILE_SIZE_64_BIT)
{
size |= (self.size_upper as u64) << 32;
}
size
}
pub fn user_id(&self) -> UserId {
@ -213,6 +275,15 @@ impl InodeMode {
}
}
pub fn default_for_type(ty: FileType) -> Self {
match ty {
FileType::File => todo!(),
FileType::Directory => Self(0o755 | 0x4000),
FileType::Symlink => todo!(),
_ => todo!(),
}
}
pub fn permissions(&self) -> FileMode {
unsafe { FileMode::from_raw(self.0 as u32 & 0o777) }
}
@ -223,3 +294,14 @@ impl From<InodeMode> for u16 {
value.0
}
}
impl InodeBlockRefs {
pub fn empty() -> Self {
Self {
direct_blocks: [0; DIRECT_BLOCK_COUNT],
indirect_block_l1: 0,
indirect_block_l2: 0,
indirect_block_l3: 0,
}
}
}

View File

@ -11,12 +11,11 @@ use yggdrasil_abi::{
util::FixedString,
};
use crate::{Dirent, Ext2Fs, Inode};
use crate::{data::FsRequiredFeatures, inode::InodeCache, Dirent, Ext2Fs, Inode};
pub struct DirectoryNode {
fs: Arc<Ext2Fs>,
inode: Inode,
#[allow(unused)]
inode_cache: Arc<InodeCache>,
ino: u32,
}
@ -26,6 +25,84 @@ struct DirentIter<'a> {
offset: usize,
}
struct DirentIterMut<'a> {
fs: &'a Ext2Fs,
block: &'a mut [u8],
offset: usize,
}
impl<'a> DirentIterMut<'a> {
pub fn new(fs: &'a Ext2Fs, block: &'a mut [u8], offset: usize) -> Self {
Self { fs, block, offset }
}
pub fn try_fit(&mut self, name: &str, ino: u32) -> bool {
let name = name.as_bytes();
let new_total_size = size_of::<Dirent>() + name.len();
let new_aligned_size = (new_total_size + 3) & !3;
loop {
if self.offset + new_aligned_size >= self.block.len() {
return false;
}
let entry_end = self.offset + size_of::<Dirent>();
let dirent: &Dirent = bytemuck::from_bytes(&self.block[self.offset..entry_end]);
if dirent.ino != 0 || (dirent.ent_size as usize) < new_aligned_size {
self.offset += dirent.ent_size as usize;
continue;
}
log::debug!(
"Fit into {:?}",
self.offset..self.offset + dirent.ent_size as usize
);
let extra_space = dirent.ent_size as usize - new_aligned_size;
let aligned_size = if extra_space >= size_of::<Dirent>() {
// Fit another dummy entry after this one
new_aligned_size
} else {
// Fill the whole entry
dirent.ent_size as usize
};
let new_dirent = Dirent {
ent_size: new_aligned_size as _,
name_length_low: name.len() as _,
type_indicator: 0, // TODO
ino,
};
log::debug!("Place entry: {:?}", self.offset..self.offset + aligned_size);
self.block[self.offset..self.offset + size_of::<Dirent>()]
.copy_from_slice(bytemuck::bytes_of(&new_dirent));
self.block[self.offset + size_of::<Dirent>()..self.offset + new_total_size]
.copy_from_slice(name);
if extra_space >= size_of::<Dirent>() {
log::debug!(
"Place dummy: {:?}",
self.offset + new_aligned_size..self.offset + new_aligned_size + extra_space
);
// Fit an extra dummy dirent
let dummy = Dirent {
ent_size: extra_space as _,
name_length_low: 0,
type_indicator: 0,
ino: 0,
};
self.block[self.offset + new_aligned_size
..self.offset + new_aligned_size + size_of::<Dirent>()]
.copy_from_slice(bytemuck::bytes_of(&dummy));
}
return true;
}
}
}
impl<'a> DirentIter<'a> {
pub fn new(fs: &'a Ext2Fs, block: &'a [u8], offset: usize) -> Self {
Self { fs, block, offset }
@ -52,8 +129,12 @@ impl<'a> Iterator for DirentIter<'a> {
let mut name_len = dirent.name_length_low as usize;
if self.fs.superblock.required_features() & (1 << 1) == 0 {
name_len = (dirent.name_length_low as usize) << 8;
if !self
.fs
.required_features
.contains(FsRequiredFeatures::DIRENT_TYPE_FIELD)
{
todo!()
}
// TODO handle broken dirent fields?
@ -71,17 +152,118 @@ impl<'a> Iterator for DirentIter<'a> {
}
impl DirectoryNode {
pub fn new(fs: Arc<Ext2Fs>, inode: Inode, ino: u32) -> NodeRef {
Node::directory(Self { fs, inode, ino }, NodeFlags::empty())
pub fn new(fs: Arc<Ext2Fs>, inode_cache: Arc<InodeCache>, ino: u32) -> NodeRef {
Node::directory(
Self {
fs,
inode_cache,
ino,
},
NodeFlags::empty(),
)
}
pub async fn create(
fs: Arc<Ext2Fs>,
inode_cache: Arc<InodeCache>,
parent_ino: Option<u32>,
ino: u32,
) -> Result<NodeRef, Error> {
let this = Self {
fs,
inode_cache,
ino,
};
if let Some(parent) = parent_ino {
this.create_entry("..", parent).await?;
}
this.create_entry(".", ino).await?;
Ok(Node::directory(this, NodeFlags::empty()))
}
async fn create_entry(&self, name: &str, ino: u32) -> Result<(), Error> {
assert!(name.len() < 255);
{
let mut inode = self.inode_cache.get_mut(ino).await?;
inode.inc_hard_count();
}
let mut inode = self.inode_cache.get_mut(self.ino).await?;
// Try to fit entry first
let mut fit = false;
let n = inode.blocks(&self.fs);
for i in 0..n {
let mut block = self.fs.inode_block_mut(&inode, i as u32).await?;
let mut iter = DirentIterMut::new(&self.fs, &mut block, 0);
if iter.try_fit(name, ino) {
block.set_dirty();
fit = true;
break;
}
}
if fit {
return Ok(());
}
// TODO increment child's hard ref count
// Allocate a new block
let block_index = inode.blocks(&self.fs) as u32;
// Grow the storage
inode
.reserve(
&self.fs,
(block_index as u64 + 1) * self.fs.block_size as u64,
)
.await?;
let mut block = self.fs.inode_block_mut(&inode, block_index).await?;
block.fill(0);
// Place dirent
let total_len = size_of::<Dirent>() + name.len();
let aligned_len = (total_len + 3) & !3;
let dirent = Dirent {
ino,
ent_size: aligned_len as _,
type_indicator: 0, // TODO
name_length_low: name.len() as u8,
};
log::debug!("Place dirent {:?}", 0..aligned_len);
block[..size_of::<Dirent>()].copy_from_slice(bytemuck::bytes_of(&dirent));
block[size_of::<Dirent>()..total_len].copy_from_slice(name.as_bytes());
// Fill the rest with empty blocks
let dummy = Dirent {
ino: 0,
ent_size: (self.fs.block_size - aligned_len).try_into().unwrap(),
type_indicator: 0,
name_length_low: 0,
};
log::debug!("Place spacer {:?}", aligned_len..self.fs.block_size);
block[aligned_len..aligned_len + size_of::<Dirent>()]
.copy_from_slice(bytemuck::bytes_of(&dummy));
block.set_dirty();
Ok(())
}
async fn lookup_entry(&self, search_name: &str) -> Result<NodeRef, Error> {
let n = self.inode.blocks(&self.fs);
assert!(search_name.len() < 255);
let inode = self.inode_cache.get(self.ino).await?;
let n = inode.blocks(&self.fs);
for i in 0..n {
let block = self.fs.inode_block(&self.inode, i as u32).await?;
let block = self.fs.inode_block(&inode, i as u32).await?;
let iter = DirentIter::new(&self.fs, &block, 0);
@ -105,15 +287,21 @@ impl DirectoryNode {
mut pos: u64,
entries: &mut [MaybeUninit<DirectoryEntry>],
) -> Result<(usize, u64), Error> {
if pos >= self.inode.size_lower as u64 || entries.is_empty() {
let inode = self.inode_cache.get(self.ino).await?;
let size = inode.size(&self.fs);
if pos >= inode.size(&self.fs) || entries.is_empty() {
return Ok((0, pos));
}
loop {
if pos >= size {
return Ok((0, pos));
}
let index = pos / self.fs.block_size as u64;
let offset = (pos % self.fs.block_size as u64) as usize;
let block = self.fs.inode_block(&self.inode, index as u32).await?;
let block = self.fs.inode_block(&inode, index as u32).await?;
let iter = DirentIter::new(&self.fs, &block, offset);
@ -150,7 +338,8 @@ impl DirectoryNode {
impl CommonImpl for DirectoryNode {
fn size(&self, _node: &NodeRef) -> Result<u64, Error> {
Ok(self.inode.size_lower as _)
let inode = block!(self.inode_cache.get(self.ino).await)??;
Ok(inode.size(&self.fs))
}
fn as_any(&self) -> &dyn Any {
@ -158,7 +347,8 @@ impl CommonImpl for DirectoryNode {
}
fn metadata(&self, _node: &NodeRef) -> Result<Metadata, Error> {
Ok(self.inode.metadata())
let inode = block!(self.inode_cache.get(self.ino).await)??;
Ok(inode.metadata())
}
}
@ -175,16 +365,27 @@ impl DirectoryImpl for DirectoryNode {
block!(self.lookup_entry(search_name).await)?
}
fn create_node(&self, _parent: &NodeRef, _ty: FileType) -> Result<NodeRef, Error> {
Err(Error::ReadOnly)
fn create_node(&self, _parent: &NodeRef, ty: FileType) -> Result<NodeRef, Error> {
let node = block!(self.fs.create_node(Some(self.ino), ty).await)??;
Ok(node)
}
fn attach_node(&self, _parent: &NodeRef, _child: &NodeRef, _name: &str) -> Result<(), Error> {
Err(Error::ReadOnly)
fn attach_node(&self, _parent: &NodeRef, child: &NodeRef, name: &str) -> Result<(), Error> {
// Check that child is ext2
// if child.data_as_any().is::<DirectoryNode>() {
// log::debug!("attach_node: ext2 dir");
// }
let child_ino = match child.data_as_any() {
data if let Some(dir) = data.downcast_ref::<DirectoryNode>() => dir.ino,
_ => return Err(Error::InvalidOperation),
};
block!(self.create_entry(name, child_ino).await)??;
Ok(())
}
fn unlink_node(&self, _parent: &NodeRef, _name: &str) -> Result<(), Error> {
Err(Error::ReadOnly)
fn unlink_node(&self, _parent: &NodeRef, name: &str) -> Result<(), Error> {
todo!()
}
fn read_entries(

View File

@ -1,45 +1,57 @@
use core::any::Any;
use core::{any::Any, cmp::Ordering, ops::Deref};
use alloc::sync::Arc;
use libk::{
block,
error::Error,
task::sync::Mutex,
vfs::{CommonImpl, InstanceData, Metadata, Node, NodeFlags, NodeRef, RegularImpl},
};
use libk_util::sync::LockMethod;
use yggdrasil_abi::io::OpenOptions;
use crate::{Ext2Fs, Inode};
use crate::{inode::InodeCache, Ext2Fs, Inode};
pub struct RegularNode {
fs: Arc<Ext2Fs>,
inode: Inode,
#[allow(unused)]
inode_cache: Arc<InodeCache>,
ino: u32,
}
impl RegularNode {
pub fn new(fs: Arc<Ext2Fs>, inode: Inode, ino: u32) -> NodeRef {
Node::regular(Self { fs, inode, ino }, NodeFlags::empty())
pub fn new(fs: Arc<Ext2Fs>, inode_cache: Arc<InodeCache>, ino: u32) -> NodeRef {
Node::regular(
Self {
fs,
inode_cache,
ino,
},
NodeFlags::empty(),
)
}
fn size(&self) -> u64 {
self.inode.size_lower as _
async fn resize(&self, new_size: u64) -> Result<(), Error> {
let mut inode = self.inode_cache.get_mut(self.ino).await?;
inode.resize(&self.fs, new_size).await
}
async fn read(&self, mut pos: u64, buffer: &mut [u8]) -> Result<usize, Error> {
if pos >= self.size() {
let inode = self.inode_cache.get(self.ino).await?;
if pos >= inode.size(&self.fs) {
return Ok(0);
}
let mut offset = 0;
let mut remaining = core::cmp::min(buffer.len(), (self.size() - pos) as usize);
let mut remaining = core::cmp::min(buffer.len(), (inode.size(&self.fs) - pos) as usize);
while remaining != 0 {
let block_index = pos / self.fs.block_size as u64;
let block_offset = (pos % self.fs.block_size as u64) as usize;
let amount = core::cmp::min(self.fs.block_size - block_offset, remaining);
let block = self.fs.inode_block(&self.inode, block_index as u32).await?;
let block = self.fs.inode_block(&inode, block_index as u32).await?;
buffer[offset..offset + amount]
.copy_from_slice(&block[block_offset..block_offset + amount]);
@ -51,11 +63,38 @@ impl RegularNode {
Ok(offset)
}
async fn write(&self, mut pos: u64, buffer: &[u8]) -> Result<usize, Error> {
let mut inode = self.inode_cache.get_mut(self.ino).await?;
inode.reserve(&self.fs, pos + buffer.len() as u64).await?;
let mut offset = 0;
let mut remaining = buffer.len();
while remaining != 0 {
let block_index = pos / self.fs.block_size as u64;
let block_offset = (pos % self.fs.block_size as u64) as usize;
let amount = remaining.min(self.fs.block_size - block_offset);
let mut block = self.fs.inode_block_mut(&inode, block_index as u32).await?;
block[block_offset..block_offset + amount]
.copy_from_slice(&buffer[offset..offset + amount]);
pos += amount as u64;
offset += amount;
remaining -= amount;
}
Ok(offset)
}
}
impl CommonImpl for RegularNode {
fn metadata(&self, _node: &NodeRef) -> Result<Metadata, Error> {
Ok(self.inode.metadata())
let inode = block!(self.inode_cache.get(self.ino).await)??;
Ok(inode.metadata())
}
fn as_any(&self) -> &dyn Any {
@ -63,7 +102,8 @@ impl CommonImpl for RegularNode {
}
fn size(&self, _node: &NodeRef) -> Result<u64, Error> {
Ok(self.size())
let inode = block!(self.inode_cache.get(self.ino).await)??;
Ok(inode.size(&self.fs))
}
}
@ -73,10 +113,9 @@ impl RegularImpl for RegularNode {
_node: &NodeRef,
opts: OpenOptions,
) -> Result<(u64, Option<InstanceData>), Error> {
if opts.contains(OpenOptions::WRITE) {
if self.fs.force_readonly && opts.contains(OpenOptions::WRITE) {
return Err(Error::ReadOnly);
}
Ok((0, None))
}
@ -84,18 +123,24 @@ impl RegularImpl for RegularNode {
Ok(())
}
fn truncate(&self, _node: &NodeRef, _new_size: u64) -> Result<(), Error> {
Err(Error::ReadOnly)
fn truncate(&self, _node: &NodeRef, new_size: u64) -> Result<(), Error> {
if self.fs.force_readonly {
return Err(Error::ReadOnly);
}
block!(self.resize(new_size).await)?
}
fn write(
&self,
_node: &NodeRef,
_instance: Option<&InstanceData>,
_pos: u64,
_buf: &[u8],
pos: u64,
buf: &[u8],
) -> Result<usize, Error> {
Err(Error::ReadOnly)
if self.fs.force_readonly {
return Err(Error::ReadOnly);
}
block!(self.write(pos, buf).await)?
}
fn read(

View File

@ -0,0 +1,224 @@
use core::{cmp::Ordering, ops::Deref};
use alloc::sync::{Arc, Weak};
use libk::{
error::Error,
task::sync::{AsyncMutex, MappedAsyncMutexGuard, Mutex},
};
use libk_util::{
lru_hash_table::LruCache,
sync::spin_rwlock::{IrqSafeRwLock, IrqSafeRwLockReadGuard, IrqSafeRwLockWriteGuard},
};
use crate::{
data::{FsReadonlyFeatures, DIRECT_BLOCK_COUNT},
Ext2Fs, Inode,
};
struct InodeHolder {
inode: Inode,
dirty: bool,
}
pub struct InodeRef {
_entry: Arc<IrqSafeRwLock<InodeHolder>>,
lock: IrqSafeRwLockReadGuard<'static, InodeHolder>,
}
pub struct InodeMut {
_entry: Arc<IrqSafeRwLock<InodeHolder>>,
lock: IrqSafeRwLockWriteGuard<'static, InodeHolder>,
}
pub struct InodeCache {
fs: Arc<Ext2Fs>,
cache: AsyncMutex<LruCache<u32, Arc<IrqSafeRwLock<InodeHolder>>>>,
}
impl InodeCache {
pub fn with_capacity(fs: Arc<Ext2Fs>, bucket_capacity: usize) -> Self {
Self {
fs,
cache: AsyncMutex::new(LruCache::with_capacity(bucket_capacity, 4)),
}
}
async fn evict_inode(&self, ino: u32, inode: Arc<IrqSafeRwLock<InodeHolder>>) {
let inode = inode.read();
if inode.dirty {
log::debug!("Flush dirty inode {ino}");
todo!();
}
}
async fn fetch_inode(&self, ino: u32) -> Result<Arc<IrqSafeRwLock<InodeHolder>>, Error> {
let inode = self.fs.read_inode(ino).await?;
log::error!("InodeHolder created");
Ok(Arc::new(IrqSafeRwLock::new(InodeHolder {
inode,
dirty: false,
})))
}
async fn entry<'a>(
&'a self,
ino: u32,
) -> Result<
MappedAsyncMutexGuard<
'a,
Arc<IrqSafeRwLock<InodeHolder>>,
LruCache<u32, Arc<IrqSafeRwLock<InodeHolder>>>,
>,
Error,
> {
if ino < 1 || ino > self.fs.total_inodes {
return Err(Error::InvalidFile);
}
let key = ino - 1;
self.cache
.lock()
.await
.try_map_guard_async(|cache: &'a mut LruCache<_, _>| async move {
let (value, evicted) = cache
.try_get_or_insert_with_async(key, || self.fetch_inode(ino))
.await?;
if let Some((ino, holder)) = evicted {
self.evict_inode(ino, holder).await;
}
Ok(value)
})
.await
}
pub async fn get(&self, ino: u32) -> Result<InodeRef, Error> {
self.entry(ino).await.map(|e| InodeRef::new(ino, e.deref()))
}
pub async fn get_mut(&self, ino: u32) -> Result<InodeMut, Error> {
self.entry(ino).await.map(|e| InodeMut::new(ino, e.deref()))
}
}
impl InodeRef {
fn new(ino: u32, entry: &Arc<IrqSafeRwLock<InodeHolder>>) -> Self {
let entry = entry.clone();
// Safety: ok, Arc instance is still held
let lock = unsafe { core::mem::transmute(entry.read()) };
Self {
lock,
_entry: entry,
}
}
}
impl Deref for InodeRef {
type Target = Inode;
fn deref(&self) -> &Self::Target {
&self.lock.inode
}
}
impl InodeMut {
fn new(ino: u32, entry: &Arc<IrqSafeRwLock<InodeHolder>>) -> Self {
let entry = entry.clone();
// Safety: ok, Arc instance is still held
let lock = unsafe { core::mem::transmute(entry.write()) };
Self {
lock,
_entry: entry,
}
}
}
impl InodeMut {
async fn grow_direct(
&mut self,
fs: &Ext2Fs,
old_capacity: u64,
new_capacity: u64,
) -> Result<(), Error> {
let old_l0_capacity = old_capacity.min(DIRECT_BLOCK_COUNT as u64);
let new_l0_capacity = new_capacity.min(DIRECT_BLOCK_COUNT as u64);
debug_assert!(old_l0_capacity <= new_l0_capacity);
for i in old_l0_capacity..new_l0_capacity {
let i = i as usize;
let block = fs.allocate_block().await?;
self.lock.inode.blocks.direct_blocks[i] = block;
self.lock.dirty = true;
}
Ok(())
}
pub async fn resize(&mut self, fs: &Ext2Fs, size: u64) -> Result<(), Error> {
if size == self.size(fs) {
return Ok(());
}
let new_blocks = size.div_ceil(fs.block_size as u64);
let old_blocks = self.size(&fs).div_ceil(fs.block_size as u64);
match old_blocks.cmp(&new_blocks) {
// Grow
Ordering::Less => {
if new_blocks > DIRECT_BLOCK_COUNT as u64 {
todo!();
}
log::debug!("Grow inode: {old_blocks} -> {new_blocks} blocks");
self.grow_direct(fs, old_blocks, new_blocks).await?;
}
// Shrink
Ordering::Greater => todo!(),
// No change
Ordering::Equal => (),
}
if fs
.write_features
.contains(FsReadonlyFeatures::FILE_SIZE_64_BIT)
{
self.lock.inode.size_upper = (size >> 32) as u32;
self.lock.inode.size_lower = size as u32;
} else {
if size > u32::MAX as u64 {
todo!("File too large")
}
self.lock.inode.size_lower = size as u32;
}
self.lock.dirty = true;
Ok(())
}
pub async fn reserve(&mut self, fs: &Ext2Fs, capacity: u64) -> Result<(), Error> {
if capacity > self.size(fs) {
self.resize(fs, capacity).await?;
}
Ok(())
}
pub fn inc_hard_count(&mut self) {
self.lock.inode.hard_links += 1;
self.lock.dirty = true;
}
}
impl Deref for InodeMut {
type Target = Inode;
fn deref(&self) -> &Self::Target {
&self.lock.inode
}
}
impl Drop for InodeHolder {
fn drop(&mut self) {
log::error!("InodeHolder dropped");
}
}

View File

@ -1,3 +1,4 @@
#![feature(if_let_guard)]
#![cfg_attr(not(test), no_std)]
#![allow(clippy::new_ret_no_self)]
@ -5,23 +6,27 @@ extern crate alloc;
use alloc::sync::Arc;
use bytemuck::Zeroable;
use data::{FsReadonlyFeatures, FsRequiredFeatures, InodeBlockRefs, InodeMode};
use dir::DirectoryNode;
use file::RegularNode;
use inode::{InodeCache, InodeRef};
use libk::{
error::Error,
task::sync::Mutex,
vfs::{
block::{
cache::{BlockCache, CachedBlockRef},
cache::{BlockCache, CachedBlockMut, CachedBlockRef},
BlockDevice,
},
NodeRef,
},
};
use libk_util::OneTimeInit;
use libk_util::{sync::spin_rwlock::IrqSafeRwLock, OneTimeInit};
mod data;
pub mod dir;
pub mod file;
pub mod inode;
pub mod symlink;
pub use data::{
@ -30,17 +35,31 @@ pub use data::{
use symlink::SymlinkNode;
use yggdrasil_abi::io::FileType;
struct State {
superblock: ExtendedSuperblock,
bgdt: BlockGroupDescriptorTable,
dirty: bool,
}
pub struct Ext2Fs {
cache: BlockCache,
superblock: ExtendedSuperblock,
inode_cache: OneTimeInit<Arc<InodeCache>>,
state: IrqSafeRwLock<State>,
bgdt: BlockGroupDescriptorTable,
total_inodes: u32,
block_group_inode_count: u32,
block_group_block_count: u32,
inode_size: usize,
block_size: usize,
inodes_per_block: usize,
pointers_per_block: usize,
required_features: FsRequiredFeatures,
write_features: FsReadonlyFeatures,
force_readonly: bool,
root: OneTimeInit<NodeRef>,
}
@ -50,6 +69,8 @@ impl Ext2Fs {
log::error!("Ext2 init error: {:?}", e);
})?;
let fs = Arc::new(fs);
fs.inode_cache
.init(InodeCache::with_capacity(fs.clone(), 64).into());
let root = fs.load_node(data::ROOT_INODE).await?;
fs.root.init(root.clone());
@ -57,6 +78,57 @@ impl Ext2Fs {
Ok(root)
}
fn handle_readonly_features(features: FsReadonlyFeatures) -> (bool, FsReadonlyFeatures) {
// TODO this implementation ignores backups
let mut supported =
FsReadonlyFeatures::FILE_SIZE_64_BIT | FsReadonlyFeatures::SPARSE_SUPERBLOCKS;
let mut enabled = FsReadonlyFeatures::empty();
let mut readonly = false;
for feature in features.set_fields() {
let name = match feature {
FsReadonlyFeatures::FILE_SIZE_64_BIT => "file-size-64-bit",
FsReadonlyFeatures::DIR_BTREE => "dir-btree",
FsReadonlyFeatures::SPARSE_SUPERBLOCKS => "sparse-superblock",
_ => "unknown-feature",
};
if !supported.contains(feature) {
log::warn!("ext2: unsupported write feature {name:?}");
readonly = true;
} else {
log::debug!("ext2: enable write feature: {name:?}");
enabled |= feature;
}
}
(readonly, enabled)
}
fn handle_required_features(features: FsRequiredFeatures) -> (bool, FsRequiredFeatures) {
let mut supported = FsRequiredFeatures::DIRENT_TYPE_FIELD;
let mut enabled = FsRequiredFeatures::empty();
let mut unsupported = false;
for feature in features.set_fields() {
let name = match feature {
FsRequiredFeatures::DIRENT_TYPE_FIELD => "dirent-type-field",
FsRequiredFeatures::COMPRESSION => "compression",
_ => "unknown-feature",
};
if !supported.contains(feature) {
log::warn!("ext2: unsupported required feature: {name:?}");
unsupported = true;
} else {
log::debug!("ext2: enable required feature: {name:?}");
enabled |= feature;
}
}
(!unsupported, enabled)
}
async fn create_fs(device: &'static dyn BlockDevice) -> Result<Self, Error> {
let mut superblock = ExtendedSuperblock::zeroed();
device
@ -75,6 +147,20 @@ impl Ext2Fs {
return Err(Error::InvalidArgument);
}
let required = superblock.required_features();
let readonly = superblock.readonly_features();
let (supported, required_features) = Self::handle_required_features(required);
if !supported {
log::warn!("ext2: filesystem not supported");
return Err(Error::NotImplemented);
}
let (force_readonly, write_features) = Self::handle_readonly_features(readonly);
if force_readonly {
log::warn!("ext2: cannot mount as read-write, mounting read-only");
}
let block_size = 1024usize << superblock.block_size_log2;
let bgdt_block_index = (data::SUPERBLOCK_OFFSET as usize).div_ceil(block_size);
@ -110,6 +196,10 @@ impl Ext2Fs {
.await?;
}
let total_inodes = superblock.total_inodes;
let block_group_inode_count = superblock.block_group_inode_count;
let block_group_block_count = superblock.block_group_block_count;
log::info!("Inode size: {}", superblock.inode_size());
Ok(Self {
@ -118,27 +208,38 @@ impl Ext2Fs {
inodes_per_block: block_size / superblock.inode_size(),
pointers_per_block: block_size / size_of::<u32>(),
total_inodes,
block_group_inode_count,
block_group_block_count,
// 128 × 8 cache
cache: BlockCache::with_capacity(device, block_size, 512),
superblock,
bgdt,
inode_cache: OneTimeInit::new(),
state: IrqSafeRwLock::new(State {
superblock,
bgdt,
dirty: false,
}),
required_features,
write_features,
force_readonly,
root: OneTimeInit::new(),
})
}
fn create_node(self: &Arc<Self>, inode: Inode, ino: u32) -> Result<NodeRef, Error> {
match inode.mode.node_type() {
Some(FileType::Directory) => Ok(DirectoryNode::new(self.clone(), inode, ino)),
Some(FileType::File) => Ok(RegularNode::new(self.clone(), inode, ino)),
Some(FileType::Symlink) => Ok(SymlinkNode::new(self.clone(), inode, ino)),
e => todo!("Unhandled inode type: {:?} ({:#x?})", e, inode.mode),
}
}
pub async fn load_node(self: &Arc<Self>, ino: u32) -> Result<NodeRef, Error> {
let inode = self.read_inode(ino).await?;
self.create_node(inode, ino)
let cache = self.inode_cache.get().clone();
let inode = cache.get(ino).await?;
let mode = inode.mode;
drop(inode);
match mode.node_type() {
Some(FileType::Directory) => Ok(DirectoryNode::new(self.clone(), cache, ino)),
Some(FileType::File) => Ok(RegularNode::new(self.clone(), cache, ino)),
Some(FileType::Symlink) => Ok(SymlinkNode::new(self.clone(), cache, ino)),
e => todo!("Unhandled inode type: {e:?} ({mode:#x?})"),
}
}
pub async fn block(&self, index: u32) -> Result<CachedBlockRef, Error> {
@ -146,20 +247,32 @@ impl Ext2Fs {
self.cache.get(address).await
}
pub async fn read_inode(&self, ino: u32) -> Result<Inode, Error> {
if ino < 1 || ino >= self.superblock.total_inodes {
todo!()
pub async fn block_mut(&self, index: u32) -> Result<CachedBlockMut, Error> {
let address = index as u64 * self.block_size as u64;
self.cache.get_mut(address).await
}
fn inode(&self, ino: u32) -> Result<(u32, usize), Error> {
if ino < 1 || ino >= self.total_inodes {
return Err(Error::InvalidFile);
}
let ino = ino - 1;
let ino_group = ino / self.superblock.block_group_inode_count;
let ino_in_group = ino % self.superblock.block_group_inode_count;
let ino_block = self.bgdt.descriptor(ino_group).inode_table
+ ino_in_group / self.inodes_per_block as u32;
let ino_group = ino / self.block_group_inode_count;
let ino_in_group = ino % self.block_group_inode_count;
// block where the inode table lives
let inode_table = self.state.read().bgdt.descriptor(ino_group).inode_table;
let ino_block = inode_table + ino_in_group / self.inodes_per_block as u32;
let offset_in_block = (ino_in_group as usize % self.inodes_per_block) * self.inode_size;
assert!(offset_in_block < self.block_size);
Ok((ino_block, offset_in_block))
}
pub async fn read_inode(&self, ino: u32) -> Result<Inode, Error> {
let (ino_block, offset_in_block) = self.inode(ino)?;
let block = self.block(ino_block).await?;
Ok(*bytemuck::from_bytes(
@ -167,11 +280,173 @@ impl Ext2Fs {
))
}
pub async fn write_inode(&self, ino: u32, inode: &Inode) -> Result<(), Error> {
let (ino_block, offset_in_block) = self.inode(ino)?;
let mut block = self.block_mut(ino_block).await?;
block[offset_in_block..offset_in_block + size_of::<Inode>()]
.copy_from_slice(bytemuck::bytes_of(inode));
block.set_dirty();
Ok(())
}
pub async fn inode_block(&self, inode: &Inode, index: u32) -> Result<CachedBlockRef, Error> {
let block_index = self.inode_block_index(inode, index).await?;
self.block(block_index).await
}
pub async fn inode_block_mut(
&self,
inode: &Inode,
index: u32,
) -> Result<CachedBlockMut, Error> {
let block_index = self.inode_block_index(inode, index).await?;
self.block_mut(block_index).await
}
pub async fn flush_superblock(&self) -> Result<(), Error> {
todo!()
}
async fn allocate_block(&self) -> Result<u32, Error> {
Err(Error::NotImplemented)
// let blocks_per_bitmap_block = self.block_size * 8;
// let index = {
// let mut state = self.state.write();
// let mut block_index = None;
// for group_index in 0..state.bgdt.len as u32 {
// let descriptor = state.bgdt.descriptor_mut(group_index);
// if descriptor.unallocated_blocks == 0 {
// continue;
// }
// // Get block bitmap
// let bitmap_block = descriptor.block_usage_bitmap;
// // TODO handle more than 1 block
// let mut bitmap = self.block_mut(bitmap_block).await?;
// for i in 0..blocks_per_bitmap_block.min(self.block_group_block_count as usize) {
// let index = i / 8;
// let bit = 1u8 << (i % 8);
// if bitmap[index] & bit == 0 {
// let no = group_index * self.block_group_block_count + i as u32;
// bitmap[index] |= bit;
// bitmap.set_dirty();
// descriptor.unallocated_blocks -= 1;
// log::debug!(
// "Allocated block #{no} from bg #{group_index}, free blocks {}",
// descriptor.unallocated_blocks
// );
// block_index = Some(no);
// break;
// }
// }
// if block_index.is_some() {
// break;
// }
// }
// let block_index = block_index.expect("TODO: ENOSPC");
// state.dirty = true;
// block_index
// };
// Ok(index)
}
async fn create_node(
self: &Arc<Self>,
parent: Option<u32>,
ty: FileType,
) -> Result<NodeRef, Error> {
Err(Error::NotImplemented)
// let inodes_per_bitmap_block = self.block_size * 8;
// let ino = {
// let mut state = self.state.write();
// let mut ino = None;
// for group_index in 0..state.bgdt.len as u32 {
// let descriptor = state.bgdt.descriptor_mut(group_index);
// if descriptor.unallocated_inodes == 0 {
// continue;
// }
// // Get inode bitmap
// let bitmap_block = descriptor.inode_usage_bitmap;
// // TODO handle more than 1 block
// let mut bitmap = self.block_mut(bitmap_block).await?;
// for i in 0..inodes_per_bitmap_block.min(self.block_group_inode_count as usize) {
// let index = i / 8;
// let bit = 1u8 << (i % 8);
// if bitmap[index] & bit == 0 {
// let no = group_index * self.block_group_inode_count + i as u32;
// bitmap[index] |= bit;
// bitmap.set_dirty();
// descriptor.unallocated_inodes -= 1;
// log::debug!(
// "Allocated inode #{no} from bg #{group_index}, free nodes {}",
// descriptor.unallocated_inodes
// );
// ino = Some(no);
// break;
// }
// }
// if ino.is_some() {
// break;
// }
// }
// let ino = ino.expect("TODO: ENOSPC");
// state.dirty = true;
// ino
// };
// let _guard = DropGuard { ino };
// let mut inode = Inode {
// mode: InodeMode::default_for_type(ty),
// uid: 0,
// gid: 0,
// size_lower: 0,
// size_upper: 0,
// atime: 0,
// ctime: 0,
// mtime: 0,
// dtime: 0,
// flags: 0,
// generation: 0,
// facl: 0,
// frag_block_no: 0,
// os_val1: 0,
// os_val2: 0,
// sector_count: 0,
// hard_links: 0,
// blocks: InodeBlockRefs::empty(),
// };
// // TODO dealloc ino if failed
// self.write_inode(ino, &inode).await?;
// let node = match ty {
// FileType::Directory => {
// DirectoryNode::create(self.clone(), self.inode_cache.get().clone(), parent, ino)
// .await?
// }
// _ => todo!(),
// };
// if ty == FileType::Directory {
// // TODO Populate the directory with "." and ".."
// }
// Ok(node)
}
async fn read_index(&self, block_index: u32, index: usize) -> Result<u32, Error> {
let block = self.block(block_index).await?;
let indirect: &[u32] = unsafe {

View File

@ -8,24 +8,22 @@ use libk::{
};
use libk_util::sync::spin_rwlock::IrqSafeRwLock;
use crate::{Ext2Fs, Inode};
use crate::{inode::InodeCache, Ext2Fs, Inode};
pub struct SymlinkNode {
fs: Arc<Ext2Fs>,
inode: Inode,
#[allow(unused)]
inode_cache: Arc<InodeCache>,
ino: u32,
cache: IrqSafeRwLock<Vec<u8>>,
}
impl SymlinkNode {
pub fn new(fs: Arc<Ext2Fs>, inode: Inode, ino: u32) -> NodeRef {
pub fn new(fs: Arc<Ext2Fs>, inode_cache: Arc<InodeCache>, ino: u32) -> NodeRef {
Node::symlink(
Self {
fs,
inode,
ino,
inode_cache,
cache: IrqSafeRwLock::new(Vec::new()),
},
NodeFlags::empty(),
@ -33,10 +31,14 @@ impl SymlinkNode {
}
async fn read(&self, buf: &mut [u8]) -> Result<usize, Error> {
let len = self.inode.size_lower as usize;
let inode = self.inode_cache.get(self.ino).await?;
let len = inode.size(&self.fs) as usize;
if len >= self.fs.block_size {
todo!()
}
if buf.len() < len {
todo!();
}
let mut write = self.cache.write();
write.clear();
@ -44,11 +46,11 @@ impl SymlinkNode {
// If length of symlink is lower than 60, data is stored directly in "block address"
// section of the inode
if len < 60 {
let bytes = unsafe { self.link_from_inode_blocks(len) };
let bytes = unsafe { Self::link_from_inode_blocks(&inode, len) };
write.extend_from_slice(bytes);
buf[..len].copy_from_slice(bytes);
} else {
let block = self.fs.inode_block(&self.inode, 0).await?;
let block = self.fs.inode_block(&inode, 0).await?;
write.extend_from_slice(&block[..len]);
buf[..len].copy_from_slice(&block[..len]);
}
@ -56,15 +58,16 @@ impl SymlinkNode {
Ok(len)
}
unsafe fn link_from_inode_blocks(&self, len: usize) -> &[u8] {
unsafe fn link_from_inode_blocks(inode: &Inode, len: usize) -> &[u8] {
debug_assert!(len < 60);
&bytemuck::bytes_of(&self.inode.blocks)[..len]
&bytemuck::bytes_of(&inode.blocks)[..len]
}
}
impl CommonImpl for SymlinkNode {
fn size(&self, _node: &NodeRef) -> Result<u64, Error> {
Ok(self.inode.size_lower as _)
let inode = block!(self.inode_cache.get(self.ino).await)??;
Ok(inode.size(&self.fs))
}
fn as_any(&self) -> &dyn Any {
@ -72,18 +75,18 @@ impl CommonImpl for SymlinkNode {
}
fn metadata(&self, _node: &NodeRef) -> Result<Metadata, Error> {
Ok(self.inode.metadata())
let inode = block!(self.inode_cache.get(self.ino).await)??;
Ok(inode.metadata())
}
}
impl SymlinkImpl for SymlinkNode {
fn read_link(&self, buf: &mut [u8]) -> Result<usize, Error> {
if buf.len() < self.inode.size_lower as usize {
todo!()
}
{
let read = self.cache.read();
if buf.len() < read.len() {
todo!();
}
if !read.is_empty() {
buf[..read.len()].copy_from_slice(&read[..]);
return Ok(read.len());

View File

@ -154,6 +154,10 @@ impl CachedBlockMut {
_entry: entry,
}
}
pub fn set_dirty(&mut self) {
self.lock.set_dirty();
}
}
impl Deref for CachedBlockRef {

View File

@ -181,7 +181,8 @@ impl Node {
if !self.flags.contains(NodeFlags::IN_MEMORY_PROPS) {
// Update permissions in the real node
todo!();
// todo!();
log::error!("TODO: update real node metadata");
}
Ok(())

View File

@ -159,6 +159,7 @@ macro_rules! bitflags_impl_default {
#[macro_export]
macro_rules! bitflags {
(
$(#[derive($($struct_derive:tt)+)])?
$(#[doc = $struct_doc:expr])?
$(#[default = $struct_default:tt])?
$vis:vis struct $name:ident: $repr:ty {
@ -171,6 +172,7 @@ macro_rules! bitflags {
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
#[repr(transparent)]
$(#[derive($($struct_derive)+)])?
$(#[doc = $struct_doc])?
$vis struct $name($repr);
@ -179,6 +181,14 @@ macro_rules! bitflags {
$(#[doc = $field_doc])?
pub const $field_name: $name = $name(1 << $field_offset);
)+
pub fn fields() -> impl Iterator<Item = Self> {
[$(Self::$field_name,)+].into_iter()
}
pub fn set_fields(&self) -> impl Iterator<Item = Self> + '_ {
Self::fields().filter(|&f| self.contains(f))
}
}
$crate::bitflags_impl_common!($name, $repr);