295 lines
9.4 KiB
Rust

use core::cmp::Ordering;
use libk::error::Error;
use crate::{
data::{FsReadonlyFeatures, DIRECT_BLOCK_COUNT},
Ext2Fs, Inode,
};
pub mod cache;
pub use cache::{InodeAccess, InodeCache, InodeHolder};
impl Inode {
#[inline(always)]
fn l0_capacity(blocks: u64) -> u64 {
blocks.min(DIRECT_BLOCK_COUNT as u64)
}
#[inline(always)]
fn l1_capacity(fs: &Ext2Fs, blocks: u64) -> u64 {
blocks
.saturating_sub(DIRECT_BLOCK_COUNT as u64)
.min(fs.pointers_per_block as u64)
}
#[inline(always)]
fn l2_capacity(fs: &Ext2Fs, blocks: u64) -> (u64, u64) {
let l2_l1 = blocks
.saturating_sub((DIRECT_BLOCK_COUNT + fs.pointers_per_block) as u64)
.min((fs.pointers_per_block * fs.pointers_per_block) as u64);
let l2_l0 = l2_l1.div_ceil(fs.pointers_per_block as u64);
(l2_l0, l2_l1)
}
async fn grow_direct(
&mut self,
fs: &Ext2Fs,
old_capacity: u64,
new_capacity: u64,
) -> Result<(), Error> {
let old_l0_capacity = Self::l0_capacity(old_capacity);
let new_l0_capacity = Self::l0_capacity(new_capacity);
log::debug!("Grow L0: {old_l0_capacity} -> {new_l0_capacity}");
debug_assert!(old_l0_capacity <= new_l0_capacity);
for i in old_l0_capacity..new_l0_capacity {
let i = i as usize;
let block = fs.allocate_block().await?;
self.blocks.direct_blocks[i] = block;
}
Ok(())
}
async fn shrink_direct(
&mut self,
fs: &Ext2Fs,
old_capacity: u64,
new_capacity: u64,
) -> Result<(), Error> {
let old_l0_capacity = Self::l0_capacity(old_capacity);
let new_l0_capacity = Self::l0_capacity(new_capacity);
log::debug!("Shrink L0: {old_l0_capacity} -> {new_l0_capacity}");
debug_assert!(old_l0_capacity >= new_l0_capacity);
for i in new_l0_capacity..old_l0_capacity {
let i = i as usize;
fs.free_block(self.blocks.direct_blocks[i]).await?;
self.blocks.direct_blocks[i] = 0;
}
Ok(())
}
async fn grow_l1(
&mut self,
fs: &Ext2Fs,
old_capacity: u64,
new_capacity: u64,
) -> Result<(), Error> {
let old_l1_capacity = Self::l1_capacity(fs, old_capacity);
let new_l1_capacity = Self::l1_capacity(fs, new_capacity);
log::debug!("Grow L1: {old_l1_capacity} -> {new_l1_capacity}");
debug_assert!(old_l1_capacity <= new_l1_capacity);
if old_l1_capacity == 0 && new_l1_capacity != 0 {
// Allocate an indirect block
let block = fs.allocate_block().await?;
self.blocks.indirect_block_l1 = block;
}
for i in old_l1_capacity..new_l1_capacity {
// Allocate inner blocks
debug_assert_ne!(self.blocks.indirect_block_l1, 0);
let i = i as usize;
let block = fs.allocate_block().await?;
fs.write_index(self.blocks.indirect_block_l1, i, block)
.await?;
}
Ok(())
}
async fn shrink_l1(
&mut self,
fs: &Ext2Fs,
old_capacity: u64,
new_capacity: u64,
) -> Result<(), Error> {
let old_l1_capacity = Self::l1_capacity(fs, old_capacity);
let new_l1_capacity = Self::l1_capacity(fs, new_capacity);
log::debug!("Shrink L1: {old_l1_capacity} -> {new_l1_capacity}");
debug_assert!(old_l1_capacity >= new_l1_capacity);
for i in new_l1_capacity..old_l1_capacity {
debug_assert_ne!(self.blocks.indirect_block_l1, 0);
let i = i as usize;
let index = fs.remove_index(self.blocks.indirect_block_l1, i).await?;
fs.free_block(index).await?;
}
if old_l1_capacity != 0 && new_l1_capacity == 0 {
// Free the L1 indirect block
fs.free_block(self.blocks.indirect_block_l1).await?;
self.blocks.indirect_block_l1 = 0;
}
Ok(())
}
async fn grow_l2(
&mut self,
fs: &Ext2Fs,
old_capacity: u64,
new_capacity: u64,
) -> Result<(), Error> {
let (old_l2_l0, old_l2_l1) = Self::l2_capacity(fs, old_capacity);
let (new_l2_l0, new_l2_l1) = Self::l2_capacity(fs, new_capacity);
log::debug!("Grow L2: {old_l2_l1} ({old_l2_l0} L2-1) -> {new_l2_l1} ({new_l2_l0} L2-1)");
debug_assert!(old_l2_l1 <= new_l2_l1);
debug_assert!(old_l2_l0 <= new_l2_l0);
if old_l2_l1 == 0 && new_l2_l1 != 0 {
// Allocate L2 indirect block
let block = fs.allocate_block().await?;
self.blocks.indirect_block_l2 = block;
}
// Grow L2 direct-indirect block
for i in old_l2_l0..new_l2_l0 {
debug_assert_ne!(self.blocks.indirect_block_l2, 0);
let i = i as usize;
let block = fs.allocate_block().await?;
fs.write_index(self.blocks.indirect_block_l2, i, block)
.await?;
}
// Grow L2 indirect-indirect blocks
for i in old_l2_l1..new_l2_l1 {
debug_assert_ne!(self.blocks.indirect_block_l2, 0);
let i = i as usize;
let l1i = i / fs.pointers_per_block;
let l0i = i % fs.pointers_per_block;
let indirect = fs.read_index(self.blocks.indirect_block_l2, l1i).await?;
debug_assert_ne!(indirect, 0);
let block = fs.allocate_block().await?;
fs.write_index(indirect, l0i, block).await?;
}
Ok(())
}
async fn shrink_l2(
&mut self,
fs: &Ext2Fs,
old_capacity: u64,
new_capacity: u64,
) -> Result<(), Error> {
let (old_l2_l0, old_l2_l1) = Self::l2_capacity(fs, old_capacity);
let (new_l2_l0, new_l2_l1) = Self::l2_capacity(fs, new_capacity);
log::debug!("Grow L2: {old_l2_l1} ({old_l2_l0} L2-1) -> {new_l2_l1} ({new_l2_l0} L2-1)");
debug_assert!(old_l2_l1 >= new_l2_l1);
debug_assert!(old_l2_l0 >= new_l2_l0);
// Shrink L2 indirect-indirect blocks
for i in new_l2_l1..old_l2_l1 {
debug_assert_ne!(self.blocks.indirect_block_l2, 0);
let i = i as usize;
let l1i = i / fs.pointers_per_block;
let l0i = i % fs.pointers_per_block;
let indirect = fs.read_index(self.blocks.indirect_block_l2, l1i).await?;
debug_assert_ne!(indirect, 0);
let block = fs.remove_index(indirect, l0i).await?;
fs.free_block(block).await?;
}
// Shrink L2 direct-indirect block
for i in new_l2_l0..old_l2_l0 {
debug_assert_ne!(self.blocks.indirect_block_l2, 0);
let i = i as usize;
let block = fs.remove_index(self.blocks.indirect_block_l2, i).await?;
fs.free_block(block).await?;
}
if new_l2_l1 == 0 && old_l2_l1 != 0 {
// Free L2 indirect block
fs.free_block(self.blocks.indirect_block_l2).await?;
self.blocks.indirect_block_l2 = 0;
}
Ok(())
}
fn set_size(&mut self, fs: &Ext2Fs, size: u64) {
let block_count = size.div_ceil(fs.block_size as u64);
if fs
.write_features
.contains(FsReadonlyFeatures::FILE_SIZE_64_BIT)
{
self.size_upper = (size >> 32) as u32;
self.size_lower = size as u32;
} else {
if size > u32::MAX as u64 {
todo!("File too large")
}
self.size_lower = size as u32;
}
self.sector_count = block_count as u32 * (fs.block_size / 512) as u32;
}
pub async fn resize(&mut self, fs: &Ext2Fs, size: u64) -> Result<bool, Error> {
if size == self.size(fs) {
return Ok(false);
}
// TODO check max inode size
let new_blocks = size.div_ceil(fs.block_size as u64);
let old_blocks = self.size(fs).div_ceil(fs.block_size as u64);
if new_blocks as usize
> DIRECT_BLOCK_COUNT
+ fs.pointers_per_block
+ fs.pointers_per_block * fs.pointers_per_block
{
log::warn!("ext2: only L0/L1 are supported");
return Err(Error::InvalidArgument);
}
match old_blocks.cmp(&new_blocks) {
// Grow
Ordering::Less => {
log::debug!("Grow inode: {old_blocks} -> {new_blocks} blocks");
self.grow_direct(fs, old_blocks, new_blocks).await?;
self.grow_l1(fs, old_blocks, new_blocks).await?;
self.grow_l2(fs, old_blocks, new_blocks).await?;
}
// Shrink
Ordering::Greater => {
log::debug!("Shrink inode: {old_blocks} -> {new_blocks} blocks");
self.shrink_l2(fs, old_blocks, new_blocks).await?;
self.shrink_l1(fs, old_blocks, new_blocks).await?;
self.shrink_direct(fs, old_blocks, new_blocks).await?;
}
// No change
Ordering::Equal => (),
}
self.set_size(fs, size);
Ok(true)
}
pub fn inc_hard_count(&mut self) {
self.hard_links += 1;
}
pub async fn reserve(&mut self, fs: &Ext2Fs, capacity: u64) -> Result<bool, Error> {
if capacity > self.size(fs) {
self.resize(fs, capacity).await
} else {
Ok(false)
}
}
}