memfs: add support for L2 blocks in bvec

This commit is contained in:
Mark Poliakov 2024-11-28 09:05:23 +02:00
parent 2e769df015
commit 669a0b7b9c
3 changed files with 236 additions and 42 deletions

View File

@ -38,6 +38,9 @@ static DUMMY_INTERRUPT_MASK: AtomicBool = AtomicBool::new(true);
impl Architecture for ArchitectureImpl {
type PerCpuData = ();
type CpuFeatures = ();
type BreakpointType = u8;
const BREAKPOINT_VALUE: Self::BreakpointType = 0x00;
fn local_cpu() -> *mut Self::PerCpuData {
unimplemented!()
@ -82,6 +85,18 @@ impl Architecture for ArchitectureImpl {
fn halt() -> ! {
unimplemented!()
}
fn cpu_enabled_features<S: Scheduler>(
_cpu: &kernel_arch_interface::cpu::CpuImpl<Self, S>,
) -> Option<&Self::CpuFeatures> {
unimplemented!()
}
fn cpu_available_features<S: Scheduler>(
_cpu: &kernel_arch_interface::cpu::CpuImpl<Self, S>,
) -> Option<&Self::CpuFeatures> {
unimplemented!()
}
}
impl KernelTableManager for KernelTableManagerImpl {
@ -169,6 +184,10 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator> TaskContext<K, PA>
fn kernel_closure<F: FnOnce() -> ! + Send + 'static>(_f: F) -> Result<Self, Error> {
unimplemented!()
}
fn set_thread_pointer(&self, _tp: usize) {
unimplemented!()
}
}
#[no_mangle]

View File

@ -53,6 +53,12 @@ pub struct BlockIndirect<'a, A: BlockAllocator> {
inner: BlockRaw<'a, A>,
}
/// Block containing indirection pointers to indirection pointers to data blocks
#[repr(transparent)]
pub struct BlockIndirectIndirect<'a, A: BlockAllocator> {
inner: BlockRaw<'a, A>,
}
impl<'a, A: BlockAllocator> BlockRef<'a, A> {
const fn null() -> Self {
Self {
@ -248,6 +254,11 @@ impl<A: BlockAllocator> BlockIndirect<'_, A> {
Ok(Self { inner })
}
/// Replaces self with a null block and drops any data that might've been allocated
pub fn set_null(&mut self) {
self.inner = BlockRaw::null();
}
/// Returns `true` if the block this structure refers to has not yet been allocated
#[inline]
pub fn is_null(&self) -> bool {
@ -281,6 +292,57 @@ impl<A: BlockAllocator> Drop for BlockIndirect<'_, A> {
}
}
// L2 indirect block
impl<A: BlockAllocator> BlockIndirectIndirect<'_, A> {
/// Dummy entry representing a missing block
pub const fn null() -> Self {
Self {
inner: BlockRaw::null(),
}
}
/// Allocates a new indirection block
pub fn new() -> Result<Self, Error> {
let mut inner = BlockRaw::new()?;
for item in unsafe { inner.as_uninit_indirect_mut() } {
item.write(BlockData::null());
}
Ok(Self { inner })
}
/// Returns `true` if the block this structure refers to has not yet been allocated
#[inline]
pub fn is_null(&self) -> bool {
self.inner.is_null()
}
}
impl<'a, A: BlockAllocator> Deref for BlockIndirectIndirect<'a, A> {
type Target = [BlockIndirect<'a, A>; ENTRY_COUNT];
fn deref(&self) -> &Self::Target {
unsafe { &*(self.inner.inner.as_ref() as *const _ as *const _) }
}
}
impl<A: BlockAllocator> DerefMut for BlockIndirectIndirect<'_, A> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *(self.inner.inner.as_mut() as *mut _ as *mut _) }
}
}
impl<A: BlockAllocator> Drop for BlockIndirectIndirect<'_, A> {
fn drop(&mut self) {
if self.is_null() {
return;
}
for item in self.iter_mut() {
item.set_null();
}
}
}
#[cfg(test)]
mod tests {
use core::sync::atomic::Ordering;

View File

@ -7,11 +7,10 @@ use core::{
use yggdrasil_abi::error::Error;
use crate::block::{self, BlockAllocator, BlockData, BlockIndirect};
use crate::block::{self, BlockAllocator, BlockData, BlockIndirect, BlockIndirectIndirect};
// 16.125M total
const L0_BLOCKS: usize = 32; // 128K in L0
const L1_BLOCKS: usize = 16; // 16M in L1
const L1_BLOCKS: usize = 16; // 32M in L1
/// Block vector for efficient in-memory files
pub struct BVec<'a, A: BlockAllocator> {
@ -19,6 +18,7 @@ pub struct BVec<'a, A: BlockAllocator> {
size: usize,
l0: [BlockData<'a, A>; L0_BLOCKS],
l1: [BlockIndirect<'a, A>; L1_BLOCKS],
l2: BlockIndirectIndirect<'a, A>,
}
impl<'a, A: BlockAllocator> BVec<'a, A> {
@ -44,6 +44,7 @@ impl<'a, A: BlockAllocator> BVec<'a, A> {
size: 0,
l0: unsafe { MaybeUninit::array_assume_init(l0) },
l1: unsafe { MaybeUninit::array_assume_init(l1) },
l2: BlockIndirectIndirect::null(),
}
}
@ -71,61 +72,111 @@ impl<'a, A: BlockAllocator> BVec<'a, A> {
self.size
}
fn grow_l1(&mut self, old_l1_cap: usize, new_l1_cap: usize) -> Result<(), Error> {
for i in old_l1_cap..new_l1_cap {
assert!(self.l1[i].is_null());
self.l1[i] = BlockIndirect::new()?;
}
Ok(())
}
fn shrink_l1(&mut self, old_l1_cap: usize, new_l1_cap: usize) {
debug_assert!(new_l1_cap <= old_l1_cap);
for i in new_l1_cap..old_l1_cap {
assert!(!self.l1[i].is_null());
self.l1[i] = BlockIndirect::null();
}
#[inline]
fn l1_indirect_count(cap: usize) -> usize {
core::cmp::min(
cap.saturating_sub(L0_BLOCKS).div_ceil(block::ENTRY_COUNT),
L1_BLOCKS,
)
}
#[inline]
fn caps(cap: usize) -> (usize, usize) {
let l0_cap = core::cmp::min(cap, L0_BLOCKS);
let l1_cap = if cap > L0_BLOCKS {
core::cmp::min((cap - L0_BLOCKS).div_ceil(block::ENTRY_COUNT), L1_BLOCKS)
} else {
0
};
(l0_cap, l1_cap)
fn l2_indirect_count(cap: usize) -> usize {
cap.saturating_sub(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT)
.div_ceil(block::ENTRY_COUNT)
}
fn grow_l1(&mut self, old: usize, new: usize) -> Result<(), Error> {
let old_l1_cap = Self::l1_indirect_count(old);
let new_l1_cap = Self::l1_indirect_count(new);
debug_assert!(old_l1_cap <= new_l1_cap);
for l1 in &mut self.l1[old_l1_cap..new_l1_cap] {
debug_assert!(l1.is_null());
*l1 = BlockIndirect::new()?;
}
Ok(())
}
fn grow_l2(&mut self, old: usize, new: usize) -> Result<(), Error> {
let old_l2_cap = Self::l2_indirect_count(old);
let new_l2_cap = Self::l2_indirect_count(new);
if old_l2_cap == 0 && new_l2_cap != 0 {
// Allocate the indirect->indirect block
self.l2 = BlockIndirectIndirect::new()?;
}
if old_l2_cap < new_l2_cap {
debug_assert!(!self.l2.is_null());
for l2 in &mut self.l2[old_l2_cap..new_l2_cap] {
*l2 = BlockIndirect::new()?;
}
}
Ok(())
}
fn shrink_l1(&mut self, old: usize, new: usize) {
let old_l1_cap = Self::l1_indirect_count(old);
let new_l1_cap = Self::l1_indirect_count(new);
debug_assert!(old_l1_cap >= new_l1_cap);
for l1 in &mut self.l1[new_l1_cap..old_l1_cap] {
debug_assert!(!l1.is_null());
*l1 = BlockIndirect::null();
}
}
fn shrink_l2(&mut self, old: usize, new: usize) {
let old_l2_cap = Self::l2_indirect_count(old);
let new_l2_cap = Self::l2_indirect_count(new);
debug_assert!(old_l2_cap >= new_l2_cap);
if new_l2_cap < old_l2_cap {
debug_assert!(!self.l2.is_null());
for l2 in &mut self.l2[new_l2_cap..old_l2_cap] {
l2.set_null();
}
}
if new_l2_cap == 0 && old_l2_cap != 0 {
// Free the L2 block itself
self.l2 = BlockIndirectIndirect::null();
}
}
/// Resizes the vector to hold exactly `new_capacity` data blocks
pub fn resize(&mut self, new_capacity: usize) -> Result<(), Error> {
// TODO handle L2 capacity
if new_capacity
> L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * block::ENTRY_COUNT
{
log::warn!("Requested file capacity too large: {new_capacity}");
return Err(Error::OutOfMemory);
}
match new_capacity.cmp(&self.capacity) {
Ordering::Less => {
let (_, new_l1_cap) = Self::caps(new_capacity);
let (_, old_l1_cap) = Self::caps(self.capacity);
// Shrink data blocks
for index in new_capacity..self.capacity {
let block = &mut self[index];
assert!(!block.is_null());
debug_assert!(!block.is_null());
block.set_null();
}
// Shrink L1 blocks
self.shrink_l1(old_l1_cap, new_l1_cap);
// Shrink L1 and L2 blocks
self.shrink_l1(self.capacity, new_capacity);
self.shrink_l2(self.capacity, new_capacity);
}
Ordering::Greater => {
let (_, new_l1_cap) = Self::caps(new_capacity);
let (_, old_l1_cap) = Self::caps(self.capacity);
// Allocate L1 indirection blocks
assert!(new_l1_cap >= old_l1_cap);
if new_l1_cap > old_l1_cap {
self.grow_l1(old_l1_cap, new_l1_cap)?;
}
// Allocate more L1 and L2 blocks if necessary
self.grow_l1(self.capacity, new_capacity)?;
self.grow_l2(self.capacity, new_capacity)?;
// Grow data blocks
for index in self.capacity..new_capacity {
@ -238,8 +289,18 @@ impl<'a, A: BlockAllocator> BVec<'a, A> {
return &l1r[l0i];
}
index -= L1_BLOCKS * block::ENTRY_COUNT;
if index < block::ENTRY_COUNT * block::ENTRY_COUNT {
let l2_outer = index / block::ENTRY_COUNT;
let l2_inner = index % block::ENTRY_COUNT;
todo!();
let l2 = &self.l2[l2_outer];
debug_assert!(!l2.is_null());
return &l2[l2_inner];
} else {
panic!("Block index too large");
}
}
unsafe fn index_unchecked_mut(&mut self, mut index: usize) -> &mut BlockData<'a, A> {
@ -256,8 +317,18 @@ impl<'a, A: BlockAllocator> BVec<'a, A> {
return &mut l1r[l0i];
}
index -= L1_BLOCKS * block::ENTRY_COUNT;
if index < block::ENTRY_COUNT * block::ENTRY_COUNT {
let l2_outer = index / block::ENTRY_COUNT;
let l2_inner = index % block::ENTRY_COUNT;
todo!()
let l2 = &mut self.l2[l2_outer];
debug_assert!(!l2.is_null());
return &mut l2[l2_inner];
} else {
panic!("Block index too large");
}
}
}
@ -309,6 +380,48 @@ mod bvec_allocation {
bvec::{BVec, L0_BLOCKS, L1_BLOCKS},
};
#[test]
fn bvec_test_capacity() {
test_allocator_with_counter!(A_COUNTER, A);
let capacity = 200000; // ~781MiB file
assert_eq!(BVec::<A>::l1_indirect_count(capacity), L1_BLOCKS);
assert_eq!(BVec::<A>::l2_indirect_count(capacity), 375);
}
#[test]
fn bvec_grow_shrink_l2() {
test_allocator_with_counter!(A_COUNTER, A);
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
{
let mut bvec = BVec::<A>::new();
// L0 + L1 + 1000 L2 blocks
const N: usize = L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 1000;
bvec.resize(N).unwrap();
assert_eq!(A_COUNTER.load(Ordering::Acquire), N + L1_BLOCKS + 1 + 2);
// Grow some more
const M: usize = N + 10000;
bvec.resize(M).unwrap();
assert_eq!(A_COUNTER.load(Ordering::Acquire), M + L1_BLOCKS + 1 + 22);
// Shrink back to N
bvec.resize(N).unwrap();
assert_eq!(A_COUNTER.load(Ordering::Acquire), N + L1_BLOCKS + 1 + 2);
// Shrink to less than L2
const O: usize = L0_BLOCKS + 3;
bvec.resize(O).unwrap();
assert_eq!(A_COUNTER.load(Ordering::Acquire), O + 1);
}
}
#[test]
fn bvec_grow_shrink() {
test_allocator_with_counter!(A_COUNTER, A);