use core::{ cmp::Ordering, ops::{Deref, DerefMut}, }; use alloc::sync::Arc; use libk::{block, error::Error, task::sync::AsyncMutex, vfs::Metadata}; use libk_util::{ lru_hash_table::LruCache, sync::spin_rwlock::{IrqSafeRwLock, IrqSafeRwLockReadGuard, IrqSafeRwLockWriteGuard}, }; use yggdrasil_abi::io::FileType; use crate::{ data::{FsReadonlyFeatures, InodeMode, DIRECT_BLOCK_COUNT}, Ext2Fs, Inode, }; pub struct InodeHolder { inode: Inode, dirty: bool, } pub struct CachedInodeRef { entry: Arc>, } pub struct UncachedInodeRef { entry: IrqSafeRwLock, } pub enum InodeRef { Cached(CachedInodeRef), Uncached(UncachedInodeRef), } pub struct CachedInodeMut { entry: Arc>, } pub struct UncachedInodeMut { ino: u32, fs: Arc, put: bool, data: IrqSafeRwLock, } pub enum InodeMut { Cached(CachedInodeMut), Uncached(UncachedInodeMut), } pub struct InodeCache { fs: Arc, cache: Option>>>>, } pub struct InodeAccess { inode_cache: Arc, ino: u32, } impl InodeAccess { pub fn new(inode_cache: Arc, ino: u32) -> Self { Self { inode_cache, ino } } pub fn ino(&self) -> u32 { self.ino } pub fn cache(&self) -> &Arc { &self.inode_cache } pub async fn get(&self) -> Result { self.inode_cache.get(self.ino).await } pub async fn get_mut(&self) -> Result { self.inode_cache.get_mut(self.ino).await } pub async fn update_metadata(&self, metadata: &Metadata) -> Result<(), Error> { let uid = metadata .uid .bits() .try_into() .map_err(|_| Error::InvalidArgument)?; let gid = metadata .gid .bits() .try_into() .map_err(|_| Error::InvalidArgument)?; let mut holder = self.get_mut().await?; { let mut inode = holder.write(); inode.mtime = metadata.mtime as _; inode.atime = metadata.mtime as _; inode.ctime = metadata.ctime as _; inode.mode.update_permissions(metadata.mode); inode.uid = uid; inode.gid = gid; } holder.put().await } } impl InodeCache { pub fn with_capacity(fs: Arc, bucket_capacity: usize) -> Self { Self { fs, cache: Some(AsyncMutex::new(LruCache::with_capacity(bucket_capacity, 4))), } } pub fn uncached(fs: Arc) -> Self { Self { fs, cache: None } } async fn evict_inode( &self, ino: u32, inode: Arc>, ) -> Result<(), Error> { let inode = inode.read(); if inode.dirty { log::debug!("Flush dirty inode {ino}"); self.fs.write_inode(ino, &inode.inode).await?; } Ok(()) } async fn fetch_inode(&self, ino: u32) -> Result>, Error> { let inode = self.fs.read_inode(ino).await?; Ok(Arc::new(IrqSafeRwLock::new(InodeHolder { inode, dirty: false, }))) } async fn entry(&self, ino: u32) -> Result>, Error> { let Some(cache) = self.cache.as_ref() else { log::warn!("Cannot use InodeCache::entry with no cache"); return Err(Error::InvalidOperation); }; if ino < 1 || ino > self.fs.total_inodes { return Err(Error::InvalidFile); } let mut lock = cache.lock().await; let (value, evicted) = lock .try_get_or_insert_with_async(ino, || self.fetch_inode(ino)) .await?; let value = value.clone(); if let Some((ino, holder)) = evicted { if let Err(error) = self.evict_inode(ino, holder).await { log::error!("ext2: inode flush error: ino={ino}, error={error:?}"); } } Ok(value) } pub async fn get(&self, ino: u32) -> Result { if self.cache.is_some() { let entry = self.entry(ino).await?; let inode = CachedInodeRef { entry }; Ok(InodeRef::Cached(inode)) } else { let inode = self.fs.read_inode(ino).await?; let data = InodeHolder { inode, dirty: false, }; let inode = UncachedInodeRef { entry: IrqSafeRwLock::new(data), }; Ok(InodeRef::Uncached(inode)) } } pub async fn get_mut(self: &Arc, ino: u32) -> Result { if self.cache.is_some() { let entry = self.entry(ino).await?; let inode = CachedInodeMut { entry }; Ok(InodeMut::Cached(inode)) } else { let inode = self.fs.read_inode(ino).await?; let data = InodeHolder { inode, dirty: false, }; let inode = UncachedInodeMut { fs: self.fs.clone(), ino, put: false, data: IrqSafeRwLock::new(data), }; Ok(InodeMut::Uncached(inode)) } } pub async fn flush(&self) -> Result<(), Error> { if let Some(cache) = self.cache.as_ref() { let mut last_error = None; let mut lock = cache.lock().await; while let Some((ino, inode)) = lock.pop_entry() { if let Err(error) = self.evict_inode(ino, inode).await { log::error!("ext2: flush inode cache error: ino={ino}, error={error:?}"); last_error = Some(error); } } match last_error { None => Ok(()), Some(error) => Err(error), } } else { Ok(()) } } } impl Inode { async fn grow_direct( &mut self, fs: &Ext2Fs, old_capacity: u64, new_capacity: u64, ) -> Result<(), Error> { let old_l0_capacity = old_capacity.min(DIRECT_BLOCK_COUNT as u64); let new_l0_capacity = new_capacity.min(DIRECT_BLOCK_COUNT as u64); debug_assert!(old_l0_capacity <= new_l0_capacity); log::debug!("Grow L0: {old_l0_capacity} -> {new_l0_capacity}"); for i in old_l0_capacity..new_l0_capacity { let i = i as usize; let block = fs.allocate_block().await?; self.blocks.direct_blocks[i] = block; } Ok(()) } async fn grow_l1( &mut self, fs: &Ext2Fs, old_capacity: u64, new_capacity: u64, ) -> Result<(), Error> { let old_l1_capacity = old_capacity .saturating_sub(DIRECT_BLOCK_COUNT as u64) .min(fs.pointers_per_block as u64); let new_l1_capacity = new_capacity .saturating_sub(DIRECT_BLOCK_COUNT as u64) .min(fs.pointers_per_block as u64); log::debug!("Grow L1: {old_l1_capacity} -> {new_l1_capacity}"); debug_assert!(old_l1_capacity <= new_l1_capacity); if old_l1_capacity == 0 && new_l1_capacity != 0 { // Allocate an indirect block let block = fs.allocate_block().await?; self.blocks.indirect_block_l1 = block; } for i in old_l1_capacity..new_l1_capacity { // Allocate inner blocks debug_assert_ne!(self.blocks.indirect_block_l1, 0); let i = i as usize; let block = fs.allocate_block().await?; fs.write_index(self.blocks.indirect_block_l1, i, block) .await?; } Ok(()) } async fn grow_l2( &mut self, fs: &Ext2Fs, old_capacity: u64, new_capacity: u64, ) -> Result<(), Error> { let old_l2_capacity = (old_capacity as usize) .saturating_sub(DIRECT_BLOCK_COUNT + fs.pointers_per_block) .min(fs.pointers_per_block * fs.pointers_per_block); let new_l2_capacity = (new_capacity as usize) .saturating_sub(DIRECT_BLOCK_COUNT + fs.pointers_per_block) .min(fs.pointers_per_block * fs.pointers_per_block); let old_l2_l0 = old_l2_capacity.div_ceil(fs.pointers_per_block); let new_l2_l0 = new_l2_capacity.div_ceil(fs.pointers_per_block); log::debug!( "Grow L2: {old_l2_capacity} ({old_l2_l0} L2-1) -> {new_l2_capacity} ({new_l2_l0} L2-1)" ); if old_l2_capacity == 0 && new_l2_capacity != 0 { // Allocate L2 indirect block let block = fs.allocate_block().await?; self.blocks.indirect_block_l2 = block; } // Grow L2 direct-indirect block for i in old_l2_l0..new_l2_l0 { debug_assert_ne!(self.blocks.indirect_block_l2, 0); let block = fs.allocate_block().await?; fs.write_index(self.blocks.indirect_block_l2, i, block) .await?; } // Grow L2 indirect-indirect blocks for i in old_l2_capacity..new_l2_capacity { debug_assert_ne!(self.blocks.indirect_block_l2, 0); let l1i = i / fs.pointers_per_block; let l0i = i % fs.pointers_per_block; let indirect = fs.read_index(self.blocks.indirect_block_l2, l1i).await?; debug_assert_ne!(indirect, 0); let block = fs.allocate_block().await?; fs.write_index(indirect, l0i, block).await?; } Ok(()) } fn set_size(&mut self, fs: &Ext2Fs, size: u64) { let block_count = size.div_ceil(fs.block_size as u64); if fs .write_features .contains(FsReadonlyFeatures::FILE_SIZE_64_BIT) { self.size_upper = (size >> 32) as u32; self.size_lower = size as u32; } else { if size > u32::MAX as u64 { todo!("File too large") } self.size_lower = size as u32; } self.sector_count = block_count as u32 * (fs.block_size / 512) as u32; } pub async fn resize(&mut self, fs: &Ext2Fs, size: u64) -> Result { if size == self.size(fs) { return Ok(false); } // TODO check max inode size let new_blocks = size.div_ceil(fs.block_size as u64); let old_blocks = self.size(fs).div_ceil(fs.block_size as u64); if new_blocks as usize > DIRECT_BLOCK_COUNT + fs.pointers_per_block + fs.pointers_per_block * fs.pointers_per_block { log::warn!("ext2: only L0/L1 are supported"); return Err(Error::InvalidArgument); } match old_blocks.cmp(&new_blocks) { // Grow Ordering::Less => { log::debug!("Grow inode: {old_blocks} -> {new_blocks} blocks"); self.grow_direct(fs, old_blocks, new_blocks).await?; self.grow_l1(fs, old_blocks, new_blocks).await?; self.grow_l2(fs, old_blocks, new_blocks).await?; } // Shrink Ordering::Greater => todo!(), // No change Ordering::Equal => (), } self.set_size(fs, size); Ok(true) } pub fn inc_hard_count(&mut self) { self.hard_links += 1; } pub async fn reserve(&mut self, fs: &Ext2Fs, capacity: u64) -> Result { if capacity > self.size(fs) { self.resize(fs, capacity).await } else { Ok(false) } } } impl Deref for InodeHolder { type Target = Inode; fn deref(&self) -> &Self::Target { &self.inode } } impl DerefMut for InodeHolder { fn deref_mut(&mut self) -> &mut Self::Target { self.dirty = true; &mut self.inode } } impl InodeMut { pub fn read(&self) -> IrqSafeRwLockReadGuard { match self { Self::Cached(inode) => inode.entry.read(), Self::Uncached(inode) => inode.data.read(), } } pub fn write(&self) -> IrqSafeRwLockWriteGuard { match self { Self::Cached(inode) => inode.entry.write(), Self::Uncached(inode) => inode.data.write(), } } pub async fn put(&mut self) -> Result<(), Error> { match self { Self::Cached(_) => (), Self::Uncached(inode) => { log::info!("Write inode #{} back", inode.ino); inode.put = true; inode.fs.write_inode(inode.ino, &inode.data.read()).await?; } } Ok(()) } } impl InodeRef { pub fn read(&self) -> IrqSafeRwLockReadGuard { match self { Self::Cached(inode) => inode.entry.read(), Self::Uncached(inode) => inode.entry.read(), } } } impl Drop for InodeMut { fn drop(&mut self) { match self { Self::Uncached(inode) if !inode.put => { // Do node writeback in background let ino = inode.ino; match block!(self.put().await) { Err(error) | Ok(Err(error)) => { log::error!("Drop for InodeMut (#{}) failed: {error:?}", ino); } Ok(Ok(())) => (), } } _ => (), } } }