ext2: remove useless code from inode cache

This commit is contained in:
Mark Poliakov 2025-01-02 11:46:47 +02:00
parent d597197ca2
commit 68d0568af3
4 changed files with 518 additions and 653 deletions

View File

@ -1,648 +0,0 @@
use core::{
cmp::Ordering,
ops::{AsyncFnOnce, Deref, DerefMut},
};
use alloc::sync::Arc;
use bytemuck::Zeroable;
use libk::{
block,
error::Error,
task::sync::AsyncMutex,
time::real_time,
vfs::{Metadata, NodeRef},
};
use libk_util::{
lru_hash_table::LruCache,
sync::spin_rwlock::{IrqSafeRwLock, IrqSafeRwLockReadGuard, IrqSafeRwLockWriteGuard},
};
use yggdrasil_abi::io::{FileMode, FileType};
use crate::{
data::{FsReadonlyFeatures, InodeMode, DIRECT_BLOCK_COUNT},
dir::DirectoryNode,
file::RegularNode,
Ext2Fs, Inode,
};
pub struct InodeHolder {
inode: Inode,
dirty: bool,
}
pub struct CachedInodeRef {
entry: Arc<IrqSafeRwLock<InodeHolder>>,
}
pub struct UncachedInodeRef {
entry: IrqSafeRwLock<InodeHolder>,
}
pub enum InodeRef {
Cached(CachedInodeRef),
Uncached(UncachedInodeRef),
}
pub struct CachedInodeMut {
entry: Arc<IrqSafeRwLock<InodeHolder>>,
}
pub struct UncachedInodeMut {
ino: u32,
fs: Arc<Ext2Fs>,
put: bool,
data: IrqSafeRwLock<InodeHolder>,
}
pub enum InodeMut {
Cached(CachedInodeMut),
Uncached(UncachedInodeMut),
}
pub struct InodeCache {
fs: Arc<Ext2Fs>,
cache: Option<AsyncMutex<LruCache<u32, Arc<IrqSafeRwLock<InodeHolder>>>>>,
}
pub struct InodeAccess {
inode_cache: Arc<InodeCache>,
ino: u32,
}
impl InodeAccess {
pub fn new(inode_cache: Arc<InodeCache>, ino: u32) -> Self {
Self { inode_cache, ino }
}
pub fn ino(&self) -> u32 {
self.ino
}
pub fn cache(&self) -> &Arc<InodeCache> {
&self.inode_cache
}
pub async fn map<T, F: FnOnce(&Inode) -> Result<T, Error>>(
&self,
mapper: F,
) -> Result<T, Error> {
let inode = self.inode_cache.get(self.ino).await?;
let result = {
let lock = inode.read();
mapper(&lock)
};
result
}
pub async fn map_mut<T, F: FnOnce(&mut Inode) -> Result<T, Error>>(
&self,
mapper: F,
) -> Result<T, Error> {
let mut inode = self.inode_cache.get_mut(self.ino).await?;
let result = {
let mut lock = inode.write();
mapper(&mut lock)
};
inode.put().await?;
result
}
pub async fn amap<T, F: AsyncFnOnce(&Inode) -> Result<T, Error>>(
&self,
mapper: F,
) -> Result<T, Error> {
let inode = self.inode_cache.get(self.ino).await?;
let result = {
let lock = inode.read();
mapper(&*lock).await
};
result
}
pub async fn amap_mut<T, F: AsyncFnOnce(&mut Inode) -> Result<T, Error>>(
&self,
mapper: F,
) -> Result<T, Error> {
let mut inode = self.inode_cache.get_mut(self.ino).await?;
let result = {
let mut lock = inode.write();
mapper(&mut *lock).await
};
inode.put().await?;
result
}
pub async fn map_blocks<T, F: Fn(&Inode, usize, &[u8]) -> Result<Option<T>, Error>>(
&self,
mapper: F,
) -> Result<Option<T>, Error> {
self.amap(async |inode| {
let block_count = inode.blocks(&self.inode_cache.fs);
for index in 0..block_count {
let result = self
.inode_cache
.fs
.with_inode_block(inode, index as u32, |block| mapper(inode, index, block))
.await?;
if let Some(result) = result {
return Ok(Some(result));
}
}
Ok(None)
})
.await
}
pub async fn map_blocks_mut<T, F: Fn(&Inode, usize, &mut [u8]) -> Result<Option<T>, Error>>(
&self,
mapper: F,
) -> Result<Option<T>, Error> {
self.amap(async |inode| {
let block_count = inode.blocks(&self.inode_cache.fs);
for index in 0..block_count {
let result = self
.inode_cache
.fs
.with_inode_block_mut(inode, index as u32, 0, |block| {
mapper(inode, index, block)
})
.await?;
if let Some(result) = result {
return Ok(Some(result));
}
}
Ok(None)
})
.await
}
pub async fn metadata(&self) -> Result<Metadata, Error> {
self.map(|inode| Ok(inode.metadata(&self.inode_cache.fs, self.ino)))
.await
}
pub async fn size(&self) -> Result<u64, Error> {
self.map(|inode| Ok(inode.size(&self.inode_cache.fs))).await
}
pub async fn update_metadata(&self, metadata: &Metadata) -> Result<(), Error> {
let uid = metadata
.uid
.bits()
.try_into()
.map_err(|_| Error::InvalidArgument)?;
let gid = metadata
.gid
.bits()
.try_into()
.map_err(|_| Error::InvalidArgument)?;
self.map_mut(|inode| {
inode.mtime = metadata.mtime as _;
inode.atime = metadata.mtime as _;
inode.ctime = metadata.ctime as _;
inode.mode.update_permissions(metadata.mode);
inode.uid = uid;
inode.gid = gid;
Ok(())
})
.await
}
async fn populate_inode(
fs: &Arc<Ext2Fs>,
ty: FileType,
mode: FileMode,
parent_ino: Option<u32>,
ino: u32,
) -> Result<NodeRef, Error> {
log::info!("ext2: allocated inode #{ino}");
let now = real_time().seconds as u32;
let mut imode = InodeMode::default_for_type(ty);
imode.update_permissions(mode);
fs.write_inode(
ino,
&Inode {
ctime: now,
mtime: now,
atime: now,
mode: imode,
..Inode::zeroed()
},
)
.await?;
let this = InodeAccess::new(fs.inode_cache.get().clone(), ino);
let fs = fs.clone();
let node = match ty {
FileType::Directory => DirectoryNode::create(fs, this, parent_ino).await?,
FileType::File => RegularNode::new(fs, this),
FileType::Symlink => todo!(),
_ => return Err(Error::NotImplemented),
};
Ok(node)
}
pub async fn allocate(
fs: &Arc<Ext2Fs>,
ty: FileType,
mode: FileMode,
parent_ino: Option<u32>,
) -> Result<NodeRef, Error> {
if parent_ino.is_none() && ty != FileType::Directory {
log::warn!("ext2: cannot allocate non-directory inode without a parent");
return Err(Error::InvalidOperation);
}
if !matches!(ty, FileType::Symlink | FileType::Directory | FileType::File) {
log::warn!("ext2: cannot allocate inode of type {ty:?}");
return Err(Error::InvalidOperation);
}
let ino = fs.allocate_inode(ty == FileType::Directory).await?;
match Self::populate_inode(fs, ty, mode, parent_ino, ino).await {
Ok(node) => Ok(node),
Err(error) => {
log::warn!("ext2: couldn't set up inode #{ino}: {error:?}");
// TODO free the inode and flush it from the cache
Err(error)
}
}
}
}
impl InodeCache {
pub fn with_capacity(fs: Arc<Ext2Fs>, bucket_capacity: usize) -> Self {
Self {
fs,
cache: Some(AsyncMutex::new(LruCache::with_capacity(bucket_capacity, 4))),
}
}
pub fn uncached(fs: Arc<Ext2Fs>) -> Self {
Self { fs, cache: None }
}
async fn evict_inode(
&self,
ino: u32,
inode: Arc<IrqSafeRwLock<InodeHolder>>,
) -> Result<(), Error> {
let inode = inode.read();
if inode.dirty {
log::debug!("Flush dirty inode {ino}");
self.fs.write_inode(ino, &inode.inode).await?;
}
Ok(())
}
async fn fetch_inode(&self, ino: u32) -> Result<Arc<IrqSafeRwLock<InodeHolder>>, Error> {
let inode = self.fs.read_inode(ino).await?;
Ok(Arc::new(IrqSafeRwLock::new(InodeHolder {
inode,
dirty: false,
})))
}
async fn entry(&self, ino: u32) -> Result<Arc<IrqSafeRwLock<InodeHolder>>, Error> {
let Some(cache) = self.cache.as_ref() else {
log::warn!("Cannot use InodeCache::entry with no cache");
return Err(Error::InvalidOperation);
};
if ino < 1 || ino > self.fs.total_inodes {
return Err(Error::InvalidFile);
}
let mut lock = cache.lock().await;
let (value, evicted) = lock
.try_get_or_insert_with_async(ino, || self.fetch_inode(ino))
.await?;
let value = value.clone();
if let Some((ino, holder)) = evicted {
if let Err(error) = self.evict_inode(ino, holder).await {
log::error!("ext2: inode flush error: ino={ino}, error={error:?}");
}
}
Ok(value)
}
async fn get(&self, ino: u32) -> Result<InodeRef, Error> {
if self.cache.is_some() {
let entry = self.entry(ino).await?;
let inode = CachedInodeRef { entry };
Ok(InodeRef::Cached(inode))
} else {
let inode = self.fs.read_inode(ino).await?;
let data = InodeHolder {
inode,
dirty: false,
};
let inode = UncachedInodeRef {
entry: IrqSafeRwLock::new(data),
};
Ok(InodeRef::Uncached(inode))
}
}
async fn get_mut(self: &Arc<Self>, ino: u32) -> Result<InodeMut, Error> {
if self.cache.is_some() {
let entry = self.entry(ino).await?;
let inode = CachedInodeMut { entry };
Ok(InodeMut::Cached(inode))
} else {
let inode = self.fs.read_inode(ino).await?;
let data = InodeHolder {
inode,
dirty: false,
};
let inode = UncachedInodeMut {
fs: self.fs.clone(),
ino,
put: false,
data: IrqSafeRwLock::new(data),
};
Ok(InodeMut::Uncached(inode))
}
}
pub async fn flush(&self) -> Result<(), Error> {
if let Some(cache) = self.cache.as_ref() {
let mut last_error = None;
let mut lock = cache.lock().await;
while let Some((ino, inode)) = lock.pop_entry() {
if let Err(error) = self.evict_inode(ino, inode).await {
log::error!("ext2: flush inode cache error: ino={ino}, error={error:?}");
last_error = Some(error);
}
}
match last_error {
None => Ok(()),
Some(error) => Err(error),
}
} else {
Ok(())
}
}
}
impl Inode {
async fn grow_direct(
&mut self,
fs: &Ext2Fs,
old_capacity: u64,
new_capacity: u64,
) -> Result<(), Error> {
let old_l0_capacity = old_capacity.min(DIRECT_BLOCK_COUNT as u64);
let new_l0_capacity = new_capacity.min(DIRECT_BLOCK_COUNT as u64);
debug_assert!(old_l0_capacity <= new_l0_capacity);
log::debug!("Grow L0: {old_l0_capacity} -> {new_l0_capacity}");
for i in old_l0_capacity..new_l0_capacity {
let i = i as usize;
let block = fs.allocate_block().await?;
self.blocks.direct_blocks[i] = block;
}
Ok(())
}
async fn grow_l1(
&mut self,
fs: &Ext2Fs,
old_capacity: u64,
new_capacity: u64,
) -> Result<(), Error> {
let old_l1_capacity = old_capacity
.saturating_sub(DIRECT_BLOCK_COUNT as u64)
.min(fs.pointers_per_block as u64);
let new_l1_capacity = new_capacity
.saturating_sub(DIRECT_BLOCK_COUNT as u64)
.min(fs.pointers_per_block as u64);
log::debug!("Grow L1: {old_l1_capacity} -> {new_l1_capacity}");
debug_assert!(old_l1_capacity <= new_l1_capacity);
if old_l1_capacity == 0 && new_l1_capacity != 0 {
// Allocate an indirect block
let block = fs.allocate_block().await?;
self.blocks.indirect_block_l1 = block;
}
for i in old_l1_capacity..new_l1_capacity {
// Allocate inner blocks
debug_assert_ne!(self.blocks.indirect_block_l1, 0);
let i = i as usize;
let block = fs.allocate_block().await?;
fs.write_index(self.blocks.indirect_block_l1, i, block)
.await?;
}
Ok(())
}
async fn grow_l2(
&mut self,
fs: &Ext2Fs,
old_capacity: u64,
new_capacity: u64,
) -> Result<(), Error> {
let old_l2_capacity = (old_capacity as usize)
.saturating_sub(DIRECT_BLOCK_COUNT + fs.pointers_per_block)
.min(fs.pointers_per_block * fs.pointers_per_block);
let new_l2_capacity = (new_capacity as usize)
.saturating_sub(DIRECT_BLOCK_COUNT + fs.pointers_per_block)
.min(fs.pointers_per_block * fs.pointers_per_block);
let old_l2_l0 = old_l2_capacity.div_ceil(fs.pointers_per_block);
let new_l2_l0 = new_l2_capacity.div_ceil(fs.pointers_per_block);
log::debug!(
"Grow L2: {old_l2_capacity} ({old_l2_l0} L2-1) -> {new_l2_capacity} ({new_l2_l0} L2-1)"
);
if old_l2_capacity == 0 && new_l2_capacity != 0 {
// Allocate L2 indirect block
let block = fs.allocate_block().await?;
self.blocks.indirect_block_l2 = block;
}
// Grow L2 direct-indirect block
for i in old_l2_l0..new_l2_l0 {
debug_assert_ne!(self.blocks.indirect_block_l2, 0);
let block = fs.allocate_block().await?;
fs.write_index(self.blocks.indirect_block_l2, i, block)
.await?;
}
// Grow L2 indirect-indirect blocks
for i in old_l2_capacity..new_l2_capacity {
debug_assert_ne!(self.blocks.indirect_block_l2, 0);
let l1i = i / fs.pointers_per_block;
let l0i = i % fs.pointers_per_block;
let indirect = fs.read_index(self.blocks.indirect_block_l2, l1i).await?;
debug_assert_ne!(indirect, 0);
let block = fs.allocate_block().await?;
fs.write_index(indirect, l0i, block).await?;
}
Ok(())
}
fn set_size(&mut self, fs: &Ext2Fs, size: u64) {
let block_count = size.div_ceil(fs.block_size as u64);
if fs
.write_features
.contains(FsReadonlyFeatures::FILE_SIZE_64_BIT)
{
self.size_upper = (size >> 32) as u32;
self.size_lower = size as u32;
} else {
if size > u32::MAX as u64 {
todo!("File too large")
}
self.size_lower = size as u32;
}
self.sector_count = block_count as u32 * (fs.block_size / 512) as u32;
}
pub async fn resize(&mut self, fs: &Ext2Fs, size: u64) -> Result<bool, Error> {
if size == self.size(fs) {
return Ok(false);
}
// TODO check max inode size
let new_blocks = size.div_ceil(fs.block_size as u64);
let old_blocks = self.size(fs).div_ceil(fs.block_size as u64);
if new_blocks as usize
> DIRECT_BLOCK_COUNT
+ fs.pointers_per_block
+ fs.pointers_per_block * fs.pointers_per_block
{
log::warn!("ext2: only L0/L1 are supported");
return Err(Error::InvalidArgument);
}
match old_blocks.cmp(&new_blocks) {
// Grow
Ordering::Less => {
log::debug!("Grow inode: {old_blocks} -> {new_blocks} blocks");
self.grow_direct(fs, old_blocks, new_blocks).await?;
self.grow_l1(fs, old_blocks, new_blocks).await?;
self.grow_l2(fs, old_blocks, new_blocks).await?;
}
// Shrink
Ordering::Greater => todo!(),
// No change
Ordering::Equal => (),
}
self.set_size(fs, size);
Ok(true)
}
pub fn inc_hard_count(&mut self) {
self.hard_links += 1;
}
pub async fn reserve(&mut self, fs: &Ext2Fs, capacity: u64) -> Result<bool, Error> {
if capacity > self.size(fs) {
self.resize(fs, capacity).await
} else {
Ok(false)
}
}
}
impl Deref for InodeHolder {
type Target = Inode;
fn deref(&self) -> &Self::Target {
&self.inode
}
}
impl DerefMut for InodeHolder {
fn deref_mut(&mut self) -> &mut Self::Target {
self.dirty = true;
&mut self.inode
}
}
impl InodeMut {
pub fn read(&self) -> IrqSafeRwLockReadGuard<InodeHolder> {
match self {
Self::Cached(inode) => inode.entry.read(),
Self::Uncached(inode) => inode.data.read(),
}
}
pub fn write(&self) -> IrqSafeRwLockWriteGuard<InodeHolder> {
match self {
Self::Cached(inode) => inode.entry.write(),
Self::Uncached(inode) => inode.data.write(),
}
}
pub async fn put(&mut self) -> Result<(), Error> {
match self {
Self::Cached(_) => (),
Self::Uncached(inode) => {
log::info!("Write inode #{} back", inode.ino);
inode.put = true;
inode.fs.write_inode(inode.ino, &inode.data.read()).await?;
}
}
Ok(())
}
}
impl InodeRef {
pub fn read(&self) -> IrqSafeRwLockReadGuard<InodeHolder> {
match self {
Self::Cached(inode) => inode.entry.read(),
Self::Uncached(inode) => inode.entry.read(),
}
}
}
impl Drop for InodeMut {
fn drop(&mut self) {
match self {
Self::Uncached(inode) if !inode.put => {
// Do node writeback in background
let ino = inode.ino;
match block!(self.put().await) {
Err(error) | Ok(Err(error)) => {
log::error!("Drop for InodeMut (#{}) failed: {error:?}", ino);
}
Ok(Ok(())) => (),
}
}
_ => (),
}
}
}

View File

@ -0,0 +1,329 @@
use core::ops::{AsyncFnOnce, Deref, DerefMut};
use alloc::sync::Arc;
use bytemuck::Zeroable;
use libk::{
error::Error,
task::sync::AsyncMutex,
time::real_time,
vfs::{Metadata, NodeRef},
};
use libk_util::{lru_hash_table::LruCache, sync::spin_rwlock::IrqSafeRwLock};
use yggdrasil_abi::io::{FileMode, FileType};
use crate::{data::InodeMode, dir::DirectoryNode, file::RegularNode, Ext2Fs, Inode};
pub struct InodeHolder {
inode: Inode,
dirty: bool,
}
pub struct InodeCache {
fs: Arc<Ext2Fs>,
synchronous: bool,
cache: AsyncMutex<LruCache<u32, Arc<IrqSafeRwLock<InodeHolder>>>>,
}
pub struct InodeAccess {
inode_cache: Arc<InodeCache>,
ino: u32,
}
impl InodeAccess {
pub fn new(inode_cache: Arc<InodeCache>, ino: u32) -> Self {
Self { inode_cache, ino }
}
pub fn ino(&self) -> u32 {
self.ino
}
pub fn cache(&self) -> &Arc<InodeCache> {
&self.inode_cache
}
pub async fn map<T, F: FnOnce(&Inode) -> Result<T, Error>>(
&self,
mapper: F,
) -> Result<T, Error> {
let inode = self.inode_cache.entry(self.ino).await?;
let lock = inode.read();
mapper(&lock.inode)
}
pub async fn map_mut<T, F: FnOnce(&mut Inode) -> Result<T, Error>>(
&self,
mapper: F,
) -> Result<T, Error> {
let inode = self.inode_cache.entry(self.ino).await?;
let mut lock = inode.write();
let result = mapper(&mut lock.inode);
self.inode_cache.put(self.ino, &mut lock).await?;
result
}
pub async fn amap<T, F: AsyncFnOnce(&Inode) -> Result<T, Error>>(
&self,
mapper: F,
) -> Result<T, Error> {
let inode = self.inode_cache.entry(self.ino).await?;
let lock = inode.read();
mapper(&lock.inode).await
}
pub async fn amap_mut<T, F: AsyncFnOnce(&mut Inode) -> Result<T, Error>>(
&self,
mapper: F,
) -> Result<T, Error> {
let inode = self.inode_cache.entry(self.ino).await?;
let mut lock = inode.write();
let result = mapper(&mut lock.inode).await;
self.inode_cache.put(self.ino, &mut lock).await?;
result
}
pub async fn map_blocks<T, F: Fn(&Inode, usize, &[u8]) -> Result<Option<T>, Error>>(
&self,
mapper: F,
) -> Result<Option<T>, Error> {
self.amap(async |inode| {
let block_count = inode.blocks(&self.inode_cache.fs);
for index in 0..block_count {
let result = self
.inode_cache
.fs
.with_inode_block(inode, index as u32, |block| mapper(inode, index, block))
.await?;
if let Some(result) = result {
return Ok(Some(result));
}
}
Ok(None)
})
.await
}
pub async fn map_blocks_mut<T, F: Fn(&Inode, usize, &mut [u8]) -> Result<Option<T>, Error>>(
&self,
mapper: F,
) -> Result<Option<T>, Error> {
self.amap(async |inode| {
let block_count = inode.blocks(&self.inode_cache.fs);
for index in 0..block_count {
let result = self
.inode_cache
.fs
.with_inode_block_mut(inode, index as u32, 0, |block| {
mapper(inode, index, block)
})
.await?;
if let Some(result) = result {
return Ok(Some(result));
}
}
Ok(None)
})
.await
}
pub async fn metadata(&self) -> Result<Metadata, Error> {
self.map(|inode| Ok(inode.metadata(&self.inode_cache.fs, self.ino)))
.await
}
pub async fn size(&self) -> Result<u64, Error> {
self.map(|inode| Ok(inode.size(&self.inode_cache.fs))).await
}
pub async fn update_metadata(&self, metadata: &Metadata) -> Result<(), Error> {
let uid = metadata
.uid
.bits()
.try_into()
.map_err(|_| Error::InvalidArgument)?;
let gid = metadata
.gid
.bits()
.try_into()
.map_err(|_| Error::InvalidArgument)?;
self.map_mut(|inode| {
inode.mtime = metadata.mtime as _;
inode.atime = metadata.mtime as _;
inode.ctime = metadata.ctime as _;
inode.mode.update_permissions(metadata.mode);
inode.uid = uid;
inode.gid = gid;
Ok(())
})
.await
}
async fn populate_inode(
fs: &Arc<Ext2Fs>,
ty: FileType,
mode: FileMode,
parent_ino: Option<u32>,
ino: u32,
) -> Result<NodeRef, Error> {
log::info!("ext2: allocated inode #{ino}");
let now = real_time().seconds as u32;
let mut imode = InodeMode::default_for_type(ty);
imode.update_permissions(mode);
fs.write_inode(
ino,
&Inode {
ctime: now,
mtime: now,
atime: now,
mode: imode,
..Inode::zeroed()
},
)
.await?;
let this = InodeAccess::new(fs.inode_cache.get().clone(), ino);
let fs = fs.clone();
let node = match ty {
FileType::Directory => DirectoryNode::create(fs, this, parent_ino).await?,
FileType::File => RegularNode::new(fs, this),
FileType::Symlink => todo!(),
_ => return Err(Error::NotImplemented),
};
Ok(node)
}
pub async fn allocate(
fs: &Arc<Ext2Fs>,
ty: FileType,
mode: FileMode,
parent_ino: Option<u32>,
) -> Result<NodeRef, Error> {
if parent_ino.is_none() && ty != FileType::Directory {
log::warn!("ext2: cannot allocate non-directory inode without a parent");
return Err(Error::InvalidOperation);
}
if !matches!(ty, FileType::Symlink | FileType::Directory | FileType::File) {
log::warn!("ext2: cannot allocate inode of type {ty:?}");
return Err(Error::InvalidOperation);
}
let ino = fs.allocate_inode(ty == FileType::Directory).await?;
match Self::populate_inode(fs, ty, mode, parent_ino, ino).await {
Ok(node) => Ok(node),
Err(error) => {
log::warn!("ext2: couldn't set up inode #{ino}: {error:?}");
// TODO free the inode and flush it from the cache
Err(error)
}
}
}
}
impl InodeCache {
pub fn with_capacity(fs: Arc<Ext2Fs>, bucket_capacity: usize, synchronous: bool) -> Self {
Self {
fs,
synchronous,
cache: AsyncMutex::new(LruCache::with_capacity(bucket_capacity, 4)),
}
}
async fn evict_inode(
&self,
ino: u32,
inode: Arc<IrqSafeRwLock<InodeHolder>>,
) -> Result<(), Error> {
let inode = inode.read();
if inode.dirty {
assert!(!self.synchronous);
log::debug!("Flush dirty inode {ino}");
self.fs.write_inode(ino, &inode.inode).await?;
}
Ok(())
}
async fn fetch_inode(&self, ino: u32) -> Result<Arc<IrqSafeRwLock<InodeHolder>>, Error> {
let inode = self.fs.read_inode(ino).await?;
Ok(Arc::new(IrqSafeRwLock::new(InodeHolder {
inode,
dirty: false,
})))
}
async fn entry(&self, ino: u32) -> Result<Arc<IrqSafeRwLock<InodeHolder>>, Error> {
if ino < 1 || ino > self.fs.total_inodes {
return Err(Error::InvalidFile);
}
let mut lock = self.cache.lock().await;
let (value, evicted) = lock
.try_get_or_insert_with_async(ino, || self.fetch_inode(ino))
.await?;
let value = value.clone();
if let Some((ino, holder)) = evicted {
if let Err(error) = self.evict_inode(ino, holder).await {
log::error!("ext2: inode flush error: ino={ino}, error={error:?}");
}
}
Ok(value)
}
async fn put(&self, ino: u32, holder: &mut InodeHolder) -> Result<(), Error> {
if self.synchronous {
// Immediately write-back
self.fs.write_inode(ino, &holder.inode).await
} else {
// Mark dirty
holder.dirty = true;
Ok(())
}
}
pub async fn flush(&self) -> Result<(), Error> {
let mut last_error = None;
let mut lock = self.cache.lock().await;
while let Some((ino, inode)) = lock.pop_entry() {
if let Err(error) = self.evict_inode(ino, inode).await {
log::error!("ext2: flush inode #{ino} error: {error:?}");
last_error = Some(error);
}
}
match last_error {
None => Ok(()),
Some(error) => Err(error),
}
}
}
impl Deref for InodeHolder {
type Target = Inode;
fn deref(&self) -> &Self::Target {
&self.inode
}
}
impl DerefMut for InodeHolder {
fn deref_mut(&mut self) -> &mut Self::Target {
self.dirty = true;
&mut self.inode
}
}

View File

@ -0,0 +1,188 @@
use core::cmp::Ordering;
use libk::error::Error;
use crate::{
data::{FsReadonlyFeatures, DIRECT_BLOCK_COUNT},
Ext2Fs, Inode,
};
pub mod cache;
pub use cache::{InodeAccess, InodeCache, InodeHolder};
impl Inode {
async fn grow_direct(
&mut self,
fs: &Ext2Fs,
old_capacity: u64,
new_capacity: u64,
) -> Result<(), Error> {
let old_l0_capacity = old_capacity.min(DIRECT_BLOCK_COUNT as u64);
let new_l0_capacity = new_capacity.min(DIRECT_BLOCK_COUNT as u64);
debug_assert!(old_l0_capacity <= new_l0_capacity);
log::debug!("Grow L0: {old_l0_capacity} -> {new_l0_capacity}");
for i in old_l0_capacity..new_l0_capacity {
let i = i as usize;
let block = fs.allocate_block().await?;
self.blocks.direct_blocks[i] = block;
}
Ok(())
}
async fn grow_l1(
&mut self,
fs: &Ext2Fs,
old_capacity: u64,
new_capacity: u64,
) -> Result<(), Error> {
let old_l1_capacity = old_capacity
.saturating_sub(DIRECT_BLOCK_COUNT as u64)
.min(fs.pointers_per_block as u64);
let new_l1_capacity = new_capacity
.saturating_sub(DIRECT_BLOCK_COUNT as u64)
.min(fs.pointers_per_block as u64);
log::debug!("Grow L1: {old_l1_capacity} -> {new_l1_capacity}");
debug_assert!(old_l1_capacity <= new_l1_capacity);
if old_l1_capacity == 0 && new_l1_capacity != 0 {
// Allocate an indirect block
let block = fs.allocate_block().await?;
self.blocks.indirect_block_l1 = block;
}
for i in old_l1_capacity..new_l1_capacity {
// Allocate inner blocks
debug_assert_ne!(self.blocks.indirect_block_l1, 0);
let i = i as usize;
let block = fs.allocate_block().await?;
fs.write_index(self.blocks.indirect_block_l1, i, block)
.await?;
}
Ok(())
}
async fn grow_l2(
&mut self,
fs: &Ext2Fs,
old_capacity: u64,
new_capacity: u64,
) -> Result<(), Error> {
let old_l2_capacity = (old_capacity as usize)
.saturating_sub(DIRECT_BLOCK_COUNT + fs.pointers_per_block)
.min(fs.pointers_per_block * fs.pointers_per_block);
let new_l2_capacity = (new_capacity as usize)
.saturating_sub(DIRECT_BLOCK_COUNT + fs.pointers_per_block)
.min(fs.pointers_per_block * fs.pointers_per_block);
let old_l2_l0 = old_l2_capacity.div_ceil(fs.pointers_per_block);
let new_l2_l0 = new_l2_capacity.div_ceil(fs.pointers_per_block);
log::debug!(
"Grow L2: {old_l2_capacity} ({old_l2_l0} L2-1) -> {new_l2_capacity} ({new_l2_l0} L2-1)"
);
if old_l2_capacity == 0 && new_l2_capacity != 0 {
// Allocate L2 indirect block
let block = fs.allocate_block().await?;
self.blocks.indirect_block_l2 = block;
}
// Grow L2 direct-indirect block
for i in old_l2_l0..new_l2_l0 {
debug_assert_ne!(self.blocks.indirect_block_l2, 0);
let block = fs.allocate_block().await?;
fs.write_index(self.blocks.indirect_block_l2, i, block)
.await?;
}
// Grow L2 indirect-indirect blocks
for i in old_l2_capacity..new_l2_capacity {
debug_assert_ne!(self.blocks.indirect_block_l2, 0);
let l1i = i / fs.pointers_per_block;
let l0i = i % fs.pointers_per_block;
let indirect = fs.read_index(self.blocks.indirect_block_l2, l1i).await?;
debug_assert_ne!(indirect, 0);
let block = fs.allocate_block().await?;
fs.write_index(indirect, l0i, block).await?;
}
Ok(())
}
fn set_size(&mut self, fs: &Ext2Fs, size: u64) {
let block_count = size.div_ceil(fs.block_size as u64);
if fs
.write_features
.contains(FsReadonlyFeatures::FILE_SIZE_64_BIT)
{
self.size_upper = (size >> 32) as u32;
self.size_lower = size as u32;
} else {
if size > u32::MAX as u64 {
todo!("File too large")
}
self.size_lower = size as u32;
}
self.sector_count = block_count as u32 * (fs.block_size / 512) as u32;
}
pub async fn resize(&mut self, fs: &Ext2Fs, size: u64) -> Result<bool, Error> {
if size == self.size(fs) {
return Ok(false);
}
// TODO check max inode size
let new_blocks = size.div_ceil(fs.block_size as u64);
let old_blocks = self.size(fs).div_ceil(fs.block_size as u64);
if new_blocks as usize
> DIRECT_BLOCK_COUNT
+ fs.pointers_per_block
+ fs.pointers_per_block * fs.pointers_per_block
{
log::warn!("ext2: only L0/L1 are supported");
return Err(Error::InvalidArgument);
}
match old_blocks.cmp(&new_blocks) {
// Grow
Ordering::Less => {
log::debug!("Grow inode: {old_blocks} -> {new_blocks} blocks");
self.grow_direct(fs, old_blocks, new_blocks).await?;
self.grow_l1(fs, old_blocks, new_blocks).await?;
self.grow_l2(fs, old_blocks, new_blocks).await?;
}
// Shrink
Ordering::Greater => todo!(),
// No change
Ordering::Equal => (),
}
self.set_size(fs, size);
Ok(true)
}
pub fn inc_hard_count(&mut self) {
self.hard_links += 1;
}
pub async fn reserve(&mut self, fs: &Ext2Fs, capacity: u64) -> Result<bool, Error> {
if capacity > self.size(fs) {
self.resize(fs, capacity).await
} else {
Ok(false)
}
}
}

View File

@ -107,11 +107,7 @@ impl Ext2Fs {
log::error!("Ext2 init error: {:?}", e);
})?;
let fs = Arc::new(fs);
// let inode_cache = InodeCache::uncached(fs.clone());
let inode_cache = match cached {
false => InodeCache::uncached(fs.clone()),
true => InodeCache::with_capacity(fs.clone(), 64),
};
let inode_cache = InodeCache::with_capacity(fs.clone(), 64, !cached);
fs.inode_cache.init(inode_cache.into());