fs/ext2: read-only basic impl

This commit is contained in:
Mark Poliakov 2024-07-30 17:46:50 +03:00
parent f9b2c64edb
commit c7d5294f86
27 changed files with 1580 additions and 54 deletions

45
Cargo.lock generated
View File

@ -78,6 +78,12 @@ dependencies = [
"memchr",
]
[[package]]
name = "allocator-api2"
version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"
[[package]]
name = "aml"
version = "0.16.4"
@ -509,6 +515,19 @@ dependencies = [
"windows-sys",
]
[[package]]
name = "ext2"
version = "0.1.0"
dependencies = [
"bytemuck",
"libk",
"libk-mm",
"libk-util",
"log",
"static_assertions",
"yggdrasil-abi",
]
[[package]]
name = "fallible-iterator"
version = "0.2.0"
@ -684,6 +703,10 @@ name = "hashbrown"
version = "0.14.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
dependencies = [
"ahash",
"allocator-api2",
]
[[package]]
name = "heck"
@ -911,8 +934,11 @@ dependencies = [
"libk-mm",
"libk-util",
"log",
"lru",
"serde",
"serde_json",
"static_assertions",
"uuid",
"yggdrasil-abi",
]
@ -1034,6 +1060,15 @@ version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "lru"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc"
dependencies = [
"hashbrown",
]
[[package]]
name = "memchr"
version = "2.7.2"
@ -1650,6 +1685,15 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
[[package]]
name = "uuid"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314"
dependencies = [
"bytemuck",
]
[[package]]
name = "vcpkg"
version = "0.2.15"
@ -2116,6 +2160,7 @@ dependencies = [
"device-api-macros",
"device-tree",
"elf 0.7.2",
"ext2",
"futures-util",
"git-version",
"kernel-arch",

View File

@ -34,6 +34,7 @@ ygg_driver_input = { path = "driver/input" }
kernel-fs = { path = "driver/fs/kernel-fs" }
memfs = { path = "driver/fs/memfs" }
ext2 = { path = "driver/fs/ext2" }
atomic_enum = "0.3.0"
bitflags = "2.6.0"

View File

@ -267,13 +267,13 @@ impl NvmeController {
let cpu_index = cpu_index();
let ioq = &self.ioqs.get()[cpu_index as usize];
log::debug!(
"{:?} ioq #{}, nsid={}, lba={:#x}",
direction,
cpu_index,
nsid,
lba
);
// log::debug!(
// "{:?} ioq #{}, nsid={}, lba={:#x}",
// direction,
// cpu_index,
// nsid,
// lba
// );
let cmd_id = match direction {
IoDirection::Read => ioq.submit(

View File

@ -0,0 +1,15 @@
[package]
name = "ext2"
version = "0.1.0"
edition = "2021"
authors = ["Mark Poliakov <mark@alnyan.me>"]
[dependencies]
yggdrasil-abi = { path = "../../../../lib/abi" }
libk = { path = "../../../libk" }
libk-mm = { path = "../../../libk/libk-mm" }
libk-util = { path = "../../../libk/libk-util" }
bytemuck = { version = "1.16.1", features = ["derive"] }
static_assertions = "1.1.0"
log = "0.4.22"

View File

@ -0,0 +1,214 @@
use core::{any::Any, mem::MaybeUninit, ops::Range, str::FromStr};
use alloc::{sync::Arc, vec};
use libk::{
block,
error::Error,
task::sync::{MappedAsyncMutexGuard, Mutex},
vfs::{
block::cache::{CachedBlock, CachedBlockRef},
CommonImpl, DirectoryImpl, DirectoryOpenPosition, InstanceData, Metadata, Node, NodeFlags,
NodeRef, RegularImpl,
},
};
use libk_util::lru_hash_table::LruCache;
use yggdrasil_abi::{
io::{DirectoryEntry, FileMode, FileType, GroupId, OpenOptions, UserId},
util::FixedString,
};
use crate::{Dirent, Ext2Fs, Inode};
pub struct DirectoryNode {
fs: Arc<Ext2Fs>,
inode: Inode,
ino: u32,
}
struct DirentIterExt<'a> {
fs: &'a Ext2Fs,
inode: &'a Inode,
offset: usize,
current: Option<CachedBlockRef>,
}
struct DirentIter<'a> {
fs: &'a Ext2Fs,
block: &'a [u8],
offset: usize,
}
impl<'a> DirentIter<'a> {
pub fn new(fs: &'a Ext2Fs, block: &'a [u8], offset: usize) -> Self {
Self { fs, block, offset }
}
}
impl<'a> Iterator for DirentIter<'a> {
type Item = (&'a Dirent, &'a [u8], usize);
fn next(&mut self) -> Option<Self::Item> {
loop {
if self.offset + size_of::<Dirent>() >= self.block.len() {
return None;
}
let entry_end = self.offset + size_of::<Dirent>();
let dirent: &Dirent = bytemuck::from_bytes(&self.block[self.offset..entry_end]);
if dirent.ino == 0 {
// TODO skip over empty/padding dirents
self.offset += dirent.ent_size as usize;
continue;
}
let mut name_len = dirent.name_length_low as usize;
if self.fs.superblock.required_features() & (1 << 1) == 0 {
name_len = (dirent.name_length_low as usize) << 8;
}
// TODO handle broken dirent fields?
assert!(entry_end + name_len <= self.block.len());
assert!(dirent.ent_size as usize >= size_of::<Dirent>());
let name = &self.block[entry_end..entry_end + name_len];
let offset = self.offset;
self.offset += dirent.ent_size as usize;
return Some((dirent, name, offset));
}
}
}
impl DirectoryNode {
pub fn new(fs: Arc<Ext2Fs>, inode: Inode, ino: u32) -> NodeRef {
Node::directory(Self { fs, inode, ino }, NodeFlags::empty())
}
async fn lookup_entry(&self, search_name: &str) -> Result<NodeRef, Error> {
let n = self.inode.blocks(&self.fs);
assert!(search_name.len() < 255);
for i in 0..n {
let block = self.fs.inode_block(&self.inode, i as u32).await?;
let iter = DirentIter::new(&self.fs, &block, 0);
for (dirent, name, _) in iter {
let Ok(name) = core::str::from_utf8(name) else {
continue;
};
if search_name == name {
let ino = dirent.ino;
return self.fs.load_node(ino).await;
}
}
}
Err(Error::DoesNotExist)
}
async fn read_entries(
&self,
mut pos: u64,
entries: &mut [MaybeUninit<DirectoryEntry>],
) -> Result<(usize, u64), Error> {
if pos >= self.inode.size_lower as u64 || entries.is_empty() {
return Ok((0, pos));
}
loop {
let index = pos / self.fs.block_size as u64;
let offset = (pos % self.fs.block_size as u64) as usize;
let block = self.fs.inode_block(&self.inode, index as u32).await?;
let iter = DirentIter::new(&self.fs, &block, offset);
let mut entry_count = 0;
for (dirent, name, entry_offset) in iter {
pos = (index * self.fs.block_size as u64) + entry_offset as u64;
if entry_count >= entries.len() {
break;
}
// Point pos at last entry's end
pos += dirent.ent_size as u64;
let name = core::str::from_utf8(name).unwrap();
entries[entry_count].write(DirectoryEntry {
ty: None,
name: FixedString::from_str(name)?,
});
entry_count += 1;
}
// If read any entries from the block, return
if entry_count != 0 {
return Ok((entry_count, pos));
}
// Otherwise go to next block
pos = (index + 1) * self.fs.block_size as u64;
}
}
}
impl CommonImpl for DirectoryNode {
fn size(&self, node: &NodeRef) -> Result<u64, Error> {
Ok(self.inode.size_lower as _)
}
fn as_any(&self) -> &dyn Any {
self
}
fn metadata(&self, node: &NodeRef) -> Result<Metadata, Error> {
Ok(Metadata {
uid: unsafe { UserId::from_raw(self.inode.uid as _) },
gid: unsafe { GroupId::from_raw(self.inode.gid as _) },
mode: unsafe { FileMode::from_raw(self.inode.mode as u32 & 0o777) },
})
}
}
impl DirectoryImpl for DirectoryNode {
fn open(&self, _node: &NodeRef) -> Result<DirectoryOpenPosition, Error> {
Ok(DirectoryOpenPosition::FromPhysical(0))
}
fn len(&self, _node: &NodeRef) -> Result<usize, Error> {
Err(Error::NotImplemented)
}
fn lookup(&self, _node: &NodeRef, search_name: &str) -> Result<NodeRef, Error> {
block!(self.lookup_entry(search_name).await)?
}
fn create_node(&self, _parent: &NodeRef, _ty: FileType) -> Result<NodeRef, Error> {
Err(Error::ReadOnly)
}
fn attach_node(&self, _parent: &NodeRef, _child: &NodeRef, _name: &str) -> Result<(), Error> {
Err(Error::ReadOnly)
}
fn unlink_node(&self, _parent: &NodeRef, _name: &str) -> Result<(), Error> {
Err(Error::ReadOnly)
}
fn read_entries(
&self,
_node: &NodeRef,
pos: u64,
entries: &mut [MaybeUninit<DirectoryEntry>],
) -> Result<(usize, u64), Error> {
block!(self.read_entries(pos, entries).await)?
}
}

View File

@ -0,0 +1,113 @@
use core::any::Any;
use alloc::{sync::Arc, vec};
use libk::{
block,
error::Error,
vfs::{block, CommonImpl, InstanceData, Metadata, Node, NodeFlags, NodeRef, RegularImpl},
};
use yggdrasil_abi::io::{FileMode, GroupId, OpenOptions, UserId};
use crate::{Ext2Fs, Inode};
pub struct RegularNode {
fs: Arc<Ext2Fs>,
inode: Inode,
ino: u32,
}
impl RegularNode {
pub fn new(fs: Arc<Ext2Fs>, inode: Inode, ino: u32) -> NodeRef {
Node::regular(Self { fs, inode, ino }, NodeFlags::empty())
}
fn size(&self) -> u64 {
self.inode.size_lower as _
}
async fn read(&self, mut pos: u64, buffer: &mut [u8]) -> Result<usize, Error> {
if pos >= self.size() {
return Ok(0);
}
let mut offset = 0;
let mut remaining = core::cmp::min(buffer.len(), (self.size() - pos) as usize);
while remaining != 0 {
let block_index = pos / self.fs.block_size as u64;
let block_offset = (pos % self.fs.block_size as u64) as usize;
let amount = core::cmp::min(self.fs.block_size - block_offset, remaining);
let block = self.fs.inode_block(&self.inode, block_index as u32).await?;
buffer[offset..offset + amount]
.copy_from_slice(&block[block_offset..block_offset + amount]);
pos += amount as u64;
offset += amount;
remaining -= amount;
}
Ok(offset)
}
}
impl CommonImpl for RegularNode {
fn metadata(&self, node: &NodeRef) -> Result<Metadata, Error> {
Ok(Metadata {
uid: unsafe { UserId::from_raw(self.inode.uid as _) },
gid: unsafe { GroupId::from_raw(self.inode.gid as _) },
mode: unsafe { FileMode::from_raw(self.inode.mode as u32 & 0o777) },
})
}
fn as_any(&self) -> &dyn Any {
self
}
fn size(&self, node: &NodeRef) -> Result<u64, Error> {
Ok(self.size())
}
}
impl RegularImpl for RegularNode {
fn open(
&self,
_node: &NodeRef,
opts: OpenOptions,
) -> Result<(u64, Option<InstanceData>), Error> {
if opts.contains(OpenOptions::WRITE) {
return Err(Error::ReadOnly);
}
Ok((0, None))
}
fn close(&self, _node: &NodeRef, _instance: Option<&InstanceData>) -> Result<(), Error> {
Ok(())
}
fn truncate(&self, node: &NodeRef, new_size: u64) -> Result<(), Error> {
Err(Error::ReadOnly)
}
fn write(
&self,
node: &NodeRef,
instance: Option<&InstanceData>,
pos: u64,
buf: &[u8],
) -> Result<usize, Error> {
Err(Error::ReadOnly)
}
fn read(
&self,
_node: &NodeRef,
_instance: Option<&InstanceData>,
pos: u64,
buf: &mut [u8],
) -> Result<usize, Error> {
block!(self.read(pos, buf).await)?
}
}

View File

@ -0,0 +1,380 @@
#![cfg_attr(not(test), no_std)]
extern crate alloc;
use core::{
any::Any,
mem::MaybeUninit,
ops::{Deref, DerefMut},
pin::Pin,
sync::atomic::AtomicBool,
task::{Context, Poll},
};
use alloc::{boxed::Box, sync::Arc, vec, vec::Vec};
use bytemuck::{Pod, Zeroable};
use dir::DirectoryNode;
use file::RegularNode;
use libk::{
error::Error,
vfs::{
block::{
cache::{BlockCache, CachedBlock, CachedBlockRef},
BlockDevice,
},
CommonImpl, Metadata, NodeRef,
},
};
use libk_mm::PageBox;
use libk_util::OneTimeInit;
use static_assertions::const_assert_eq;
pub mod dir;
pub mod file;
pub const SUPERBLOCK_OFFSET: u64 = 1024;
pub const EXT2_SIGNATURE: u16 = 0xEF53;
pub const BGDT_BLOCK_NUMBER: u32 = 2;
pub struct Ext2Fs {
cache: BlockCache,
superblock: ExtendedSuperblock,
bgdt: BlockGroupDescriptorTable,
inode_size: usize,
block_size: usize,
inodes_per_block: usize,
root: OneTimeInit<NodeRef>,
}
#[derive(Debug, Clone, Copy, Pod, Zeroable)]
#[repr(C)]
pub struct Superblock {
total_inodes: u32,
total_blocks: u32,
root_reserved_blocks: u32,
total_unallocated_blocks: u32,
total_unallocated_inodes: u32,
superblock_number: u32,
block_size_log2: u32,
fragment_size_log2: u32,
block_group_block_count: u32,
block_group_fragment_count: u32,
block_group_inode_count: u32,
last_mount_time: u32,
last_written_time: u32,
mounts_since_fsck: u16,
mounts_allowed_between_fsck: u16,
signature: u16,
state: u16,
error_behavior: u16,
version_minor: u16,
last_fsck_time: u32,
fsck_interval: u32,
creator_os_id: u32,
version_major: u32,
root_user_id: u16,
root_group_id: u16,
}
#[derive(Debug, Clone, Copy, Pod, Zeroable)]
#[repr(C)]
pub struct ExtendedSuperblock {
base: Superblock,
first_non_reserved_inode: u32,
inode_struct_size: u16,
superblock_block_group_number: u16,
optional_features: u32,
required_features: u32,
readonly_features: u32,
filesystem_id: [u8; 16],
volume_name: [u8; 16],
last_mount_path: [u8; 64],
compression_algorithms: u32,
file_prealloc_block_count: u8,
directory_prealloc_block_count: u8,
_0: u16,
journal_id: [u8; 16],
journal_inode: u32,
journal_device: u32,
orphan_inode_list_head: u32,
}
#[derive(Debug, Clone, Copy, Pod, Zeroable)]
#[repr(C)]
pub struct BlockGroupDescriptor {
block_usage_bitmap: u32,
inode_usage_bitmap: u32,
inode_table: u32,
unallocated_blocks: u16,
unallocated_inodes: u16,
unallocated_directories: u16,
_0: [u8; 14],
}
struct BlockGroupDescriptorTable {
data: Box<[u8]>,
len: usize,
}
#[derive(Debug, Clone, Copy, Pod, Zeroable)]
#[repr(C)]
pub struct Inode {
mode: u16,
uid: u16,
size_lower: u32,
atime: u32,
ctime: u32,
mtime: u32,
dtime: u32,
gid: u16,
hard_links: u16,
sector_count: u32,
flags: u32,
os_val1: u32,
direct_blocks: [u32; 12],
indirect_block_l1: u32,
indirect_block_l2: u32,
indirect_block_l3: u32,
generation: u32,
facl: u32,
size_upper: u32,
frag_block_no: u32,
os_val2: u32,
}
#[derive(Debug, Clone, Copy, Pod, Zeroable)]
#[repr(C)]
pub struct Dirent {
ino: u32,
ent_size: u16,
name_length_low: u8,
type_indicator: u8,
}
pub struct BlockReader<'a> {
fs: &'a Ext2Fs,
inode: &'a Inode,
len: u32,
index: u32,
}
impl<'a> BlockReader<'a> {
pub fn new(fs: &'a Ext2Fs, inode: &'a Inode) -> Self {
let len = if inode.mode & 0xF000 == 0x4000 {
(inode.size_lower as u64 / fs.block_size as u64) as u32
} else {
todo!()
};
Self {
fs,
inode,
len,
index: 0,
}
}
pub async fn next(&mut self) -> Option<Result<CachedBlockRef, Error>> {
if self.index >= self.len {
return None;
}
let block = self.fs.inode_block(self.inode, self.index).await;
if block.is_ok() {
self.index += 1;
}
Some(block)
}
}
impl BlockGroupDescriptorTable {
pub fn with_capacity(size: usize, len: usize) -> Self {
let data = vec![0; size].into_boxed_slice();
Self { data, len }
}
pub fn descriptor(&self, index: u32) -> &BlockGroupDescriptor {
let index = index as usize;
if index >= self.len {
panic!();
}
bytemuck::from_bytes(
&self.data[index * size_of::<BlockGroupDescriptor>()
..(index + 1) * size_of::<BlockGroupDescriptor>()],
)
}
}
const_assert_eq!(size_of::<BlockGroupDescriptor>(), 32);
impl Ext2Fs {
pub async fn create(device: &'static dyn BlockDevice) -> Result<NodeRef, Error> {
let fs = Self::create_fs(device).await.map_err(|e| {
log::error!("Ext2 init error: {:?}", e);
e
})?;
let fs = Arc::new(fs);
let root = fs.load_node(2).await?;
fs.root.init(root.clone());
Ok(root)
}
async fn create_fs(device: &'static dyn BlockDevice) -> Result<Self, Error> {
let mut superblock = ExtendedSuperblock::zeroed();
device
.read(SUPERBLOCK_OFFSET, bytemuck::bytes_of_mut(&mut superblock))
.await?;
if superblock.signature != EXT2_SIGNATURE {
log::warn!(
"Invalid ext2 signature: expected {:#x}, got {:#x}",
EXT2_SIGNATURE,
superblock.signature
);
return Err(Error::InvalidArgument);
}
let block_size = 1024usize << superblock.block_size_log2;
let bgdt_entry_count = ((superblock.total_blocks + superblock.block_group_block_count - 1)
/ superblock.block_group_block_count) as usize;
let bgdt_block_count =
(bgdt_entry_count * size_of::<BlockGroupDescriptor>() + block_size - 1) / block_size;
log::info!(
"ext2 v{}.{}",
superblock.version_major,
superblock.version_minor
);
log::info!("Block groups: {}", bgdt_entry_count);
log::info!(
"BGDT size: {} blocks ({} bytes)",
bgdt_block_count,
bgdt_block_count * block_size
);
let mut bgdt = BlockGroupDescriptorTable::with_capacity(
bgdt_block_count * block_size,
bgdt_entry_count,
);
for i in 0..bgdt_block_count {
let disk_offset = (i as u64 + 1) * block_size as u64;
device
.read_exact(
disk_offset,
&mut bgdt.data[i * block_size..(i + 1) * block_size],
)
.await?;
}
log::info!("Inode size: {}", superblock.inode_size());
Ok(Self {
block_size,
inode_size: superblock.inode_size(),
inodes_per_block: block_size / superblock.inode_size(),
// 128 × 8 cache
cache: BlockCache::with_capacity(device, block_size, 128),
superblock,
bgdt,
root: OneTimeInit::new(),
})
}
fn create_node(self: &Arc<Self>, inode: Inode, ino: u32) -> Result<NodeRef, Error> {
match inode.mode & 0xF000 {
// Directory
0x4000 => Ok(DirectoryNode::new(self.clone(), inode, ino)),
// Regular file
0x8000 => Ok(RegularNode::new(self.clone(), inode, ino)),
_ => todo!("Unknown file type: {:#x}", inode.mode),
}
}
pub async fn load_node(self: &Arc<Self>, ino: u32) -> Result<NodeRef, Error> {
let inode = self.read_inode(ino).await?;
self.create_node(inode, ino)
}
pub async fn block(&self, index: u32) -> Result<CachedBlockRef, Error> {
let address = index as u64 * self.block_size as u64;
self.cache.get(address).await
}
pub async fn read_inode(&self, ino: u32) -> Result<Inode, Error> {
if ino < 1 || ino >= self.superblock.total_inodes {
todo!()
}
let ino = ino - 1;
let ino_group = ino / self.superblock.block_group_inode_count;
let ino_in_group = ino % self.superblock.block_group_inode_count;
let ino_block = self.bgdt.descriptor(ino_group).inode_table
+ ino_in_group / self.inodes_per_block as u32;
let offset_in_block = (ino_in_group as usize % self.inodes_per_block) * self.inode_size;
assert!(offset_in_block < self.block_size);
let block = self.block(ino_block).await?;
Ok(*bytemuck::from_bytes(
&block[offset_in_block..offset_in_block + size_of::<Inode>()],
))
}
pub async fn inode_block(&self, inode: &Inode, index: u32) -> Result<CachedBlockRef, Error> {
let block_index = self.inode_block_index(inode, index).await?;
self.block(block_index).await
}
async fn inode_block_index(&self, inode: &Inode, index: u32) -> Result<u32, Error> {
if index < 12 {
Ok(inode.direct_blocks[index as usize])
} else {
todo!()
}
}
}
impl ExtendedSuperblock {
pub fn inode_size(&self) -> usize {
if self.base.version_major != 0 {
self.inode_struct_size as _
} else {
128
}
}
pub fn required_features(&self) -> u32 {
if self.base.version_major != 0 {
self.required_features
} else {
todo!()
}
}
}
impl Deref for ExtendedSuperblock {
type Target = Superblock;
fn deref(&self) -> &Self::Target {
&self.base
}
}
impl DerefMut for ExtendedSuperblock {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.base
}
}
impl Inode {
pub fn blocks(&self, fs: &Ext2Fs) -> usize {
(self.size_lower as usize + fs.block_size - 1) / fs.block_size
}
}

View File

@ -29,6 +29,9 @@ serde_json = { version = "1.0.111", default-features = false, features = ["alloc
serde = { version = "1.0.193", features = ["derive"], default-features = false }
bytemuck = { version = "1.16.1", features = ["derive"] }
async-trait = "0.1.81"
static_assertions = "1.1.0"
uuid = { version = "1.10.0", features = ["bytemuck"], default-features = false }
lru = "0.12.3"
[dependencies.elf]
version = "0.7.2"

View File

@ -1,5 +1,8 @@
#![no_std]
#![feature(
linked_list_cursors,
async_fn_traits,
async_closure,
maybe_uninit_slice,
new_uninit,
allocator_api,
@ -20,6 +23,7 @@ use core::{
pub mod event;
pub mod hash_table;
pub mod io;
pub mod lru_hash_table;
pub mod queue;
pub mod ring;
pub mod sync;

View File

@ -0,0 +1,236 @@
use core::{
hash::{BuildHasher, Hash},
ops::AsyncFnOnce,
};
use alloc::{
collections::{linked_list, LinkedList},
vec::Vec,
};
use crate::hash_table::DefaultHashBuilder;
struct LruCacheBucket<K, V> {
data: LinkedList<(K, V)>,
entry_count: usize,
capacity: usize,
}
enum EntryMut<'a, K, V> {
Vacant(&'a mut LruCacheBucket<K, V>),
Occupied(&'a mut V),
}
pub struct LruCache<K, V, H: BuildHasher = DefaultHashBuilder> {
buckets: Vec<LruCacheBucket<K, V>>,
hasher: H,
}
pub struct FlushIter<'a, K, V, H: BuildHasher> {
cache: &'a mut LruCache<K, V, H>,
}
impl<'a, K, V, H: BuildHasher> Iterator for FlushIter<'a, K, V, H> {
type Item = (K, V);
fn next(&mut self) -> Option<Self::Item> {
self.cache.pop_entry()
}
}
impl<K, V> LruCacheBucket<K, V> {
pub const fn empty(capacity: usize) -> Self {
Self {
data: LinkedList::new(),
capacity,
entry_count: 0,
}
}
fn pop_front(&mut self) -> Option<(K, V)> {
self.data.pop_front()
}
}
impl<K: Eq, V> LruCacheBucket<K, V> {
pub fn entry_mut(&mut self, key: &K) -> EntryMut<K, V> {
let mut cursor = self.data.cursor_front_mut();
loop {
let Some(node) = cursor.current() else {
break;
};
// If entry found corresponding to the key, remove it and reinsert at the front
if &node.0 == key {
// Safety: just checked it's not None above, so safe
let (key, value) = unsafe { cursor.remove_current().unwrap_unchecked() };
drop(cursor);
self.data.push_front((key, value));
// Safety: just pushed here, so safe
return EntryMut::Occupied(
&mut unsafe { self.data.front_mut().unwrap_unchecked() }.1,
);
}
cursor.move_next();
}
EntryMut::Vacant(self)
}
fn insert(&mut self, key: K, value: V) -> (&mut V, Option<(K, V)>) {
let evicted = if self.entry_count + 1 > self.capacity {
self.data.pop_back()
} else {
self.entry_count += 1;
None
};
self.data.push_front((key, value));
(
&mut unsafe { self.data.front_mut().unwrap_unchecked() }.1,
evicted,
)
}
fn pop(&mut self, search_key: &K) -> Option<V> {
let mut cursor = self.data.cursor_front_mut();
while let Some((key, _)) = cursor.current() {
if key == search_key {
return cursor.remove_current().map(|v| v.1);
}
}
None
}
}
impl<K, V> LruCache<K, V, DefaultHashBuilder> {
pub fn with_capacity(bucket_capacity: usize, bucket_count: usize) -> Self {
let mut buckets = Vec::new();
for _ in 0..bucket_count {
buckets.push(LruCacheBucket::empty(bucket_capacity));
}
Self {
buckets,
hasher: DefaultHashBuilder::new(),
}
}
}
impl<K, V, H: BuildHasher> LruCache<K, V, H> {
pub fn pop_entry(&mut self) -> Option<(K, V)> {
self.buckets
.iter_mut()
.find_map(|bucket| bucket.pop_front())
}
pub fn flush(&mut self) -> FlushIter<K, V, H> {
FlushIter { cache: self }
}
}
impl<K: Hash + Eq, V, H: BuildHasher> LruCache<K, V, H> {
fn bucket_mut(&mut self, key: &K) -> &mut LruCacheBucket<K, V> {
let h = self.hasher.hash_one(key);
let bucket_count = self.buckets.len();
&mut self.buckets[h as usize % bucket_count]
}
pub async fn try_get_or_insert_with_async<'a, F, E>(
&'a mut self,
key: K,
supplier: F,
) -> Result<(&'a mut V, Option<(K, V)>), E>
where
F: AsyncFnOnce() -> Result<V, E>,
{
let h = self.hasher.hash_one(&key);
let bucket_count = self.buckets.len();
let bucket = &mut self.buckets[h as usize % bucket_count];
match bucket.entry_mut(&key) {
EntryMut::Vacant(bucket) => {
let value = supplier.async_call_once(()).await?;
let (value, evicted) = bucket.insert(key, value);
Ok((value, evicted))
}
EntryMut::Occupied(value) => Ok((value, None)),
}
}
pub fn try_get_or_insert_with<F, E>(
&mut self,
key: K,
supplier: F,
) -> Result<(&mut V, Option<(K, V)>), E>
where
F: FnOnce() -> Result<V, E>,
{
let h = self.hasher.hash_one(&key);
let bucket_count = self.buckets.len();
let bucket = &mut self.buckets[h as usize % bucket_count];
match bucket.entry_mut(&key) {
EntryMut::Vacant(bucket) => {
let value = supplier()?;
let (value, evicted) = bucket.insert(key, value);
Ok((value, evicted))
}
EntryMut::Occupied(value) => Ok((value, None)),
}
}
pub fn pop(&mut self, key: &K) -> Option<V> {
self.bucket_mut(key).pop(key)
}
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
match self.bucket_mut(key).entry_mut(key) {
EntryMut::Occupied(value) => Some(value),
_ => None,
}
}
}
#[cfg(test)]
mod tests {
use super::LruCache;
#[test]
fn lru_test_fill() {
let mut cache = LruCache::with_capacity(4, 1);
let (_, e) = cache
.try_get_or_insert_with(0, || Ok::<i32, i32>(1))
.unwrap();
assert_eq!(e, None);
let (_, e) = cache
.try_get_or_insert_with(1, || Ok::<i32, i32>(2))
.unwrap();
assert_eq!(e, None);
let (_, e) = cache
.try_get_or_insert_with(2, || Ok::<i32, i32>(3))
.unwrap();
assert_eq!(e, None);
let (_, e) = cache
.try_get_or_insert_with(3, || Ok::<i32, i32>(4))
.unwrap();
assert_eq!(e, None);
// Refresh the 0, 1 keys
cache.get_mut(&0);
cache.get_mut(&1);
let (_, e) = cache
.try_get_or_insert_with(4, || Ok::<i32, i32>(5))
.unwrap();
assert_eq!(e, Some((2, 3)));
}
}

View File

@ -35,6 +35,15 @@ impl RwLockInner {
}
}
#[inline]
fn downgrade_write(&self) {
// Acquire read lock
let value = self.acquire_read_raw();
assert_eq!(value & Self::LOCKED_WRITE, Self::LOCKED_WRITE);
// Release write lock
self.value.fetch_nand(Self::LOCKED_WRITE, Ordering::Release);
}
#[inline]
fn acquire_read_raw(&self) -> usize {
let value = self.value.fetch_add(Self::LOCKED_READ, Ordering::Acquire);
@ -119,6 +128,11 @@ impl<T> IrqSafeRwLock<T> {
}
}
#[inline]
unsafe fn downgrade(&self) {
self.inner.downgrade_write();
}
unsafe fn release_read(&self) {
self.inner.release_read();
}
@ -145,6 +159,31 @@ impl<'a, T> Drop for IrqSafeRwLockReadGuard<'a, T> {
}
}
impl<'a, T> IrqSafeRwLockReadGuard<'a, T> {
pub fn get(guard: &Self) -> *const T {
guard.lock.value.get()
}
}
impl<'a, T> IrqSafeRwLockWriteGuard<'a, T> {
pub fn downgrade(guard: IrqSafeRwLockWriteGuard<'a, T>) -> IrqSafeRwLockReadGuard<'a, T> {
let lock = guard.lock;
let irq_guard = IrqGuard::acquire();
// Read lock still held
core::mem::forget(guard);
// Read lock acquired, write lock released
unsafe {
lock.downgrade();
}
IrqSafeRwLockReadGuard {
lock,
_guard: irq_guard,
}
}
}
impl<'a, T> Deref for IrqSafeRwLockWriteGuard<'a, T> {
type Target = T;

View File

@ -2,6 +2,8 @@
#![cfg_attr(test, allow(unused_imports))]
#![allow(clippy::new_without_default)]
#![feature(
async_fn_traits,
async_closure,
new_range_api,
associated_type_defaults,
maybe_uninit_slice,

View File

@ -52,7 +52,7 @@ pub fn spawn<T: Termination, F: Future<Output = T> + Send + 'static>(
}
/// Runs a [Future] to its completion on the current thread
pub fn run_to_completion<'a, T, F: Future<Output = T> + Send + 'a>(future: F) -> Result<T, Error> {
pub fn run_to_completion<'a, T, F: Future<Output = T> + 'a>(future: F) -> Result<T, Error> {
let mut future = Box::pin(future);
// Make a weak ref for the waker

View File

@ -1,7 +1,8 @@
use core::{
cell::UnsafeCell,
future::poll_fn,
ops::{Deref, DerefMut},
marker::PhantomData,
ops::{AsyncFnOnce, Deref, DerefMut},
sync::atomic::{AtomicBool, AtomicU32, Ordering},
task::{Context, Poll},
};
@ -39,6 +40,11 @@ pub struct MutexGuard<'a, T> {
lock: &'a ThreadedMutexInner,
}
pub struct MappedAsyncMutexGuard<'a, U, T> {
mutex: &'a AsyncMutex<T>,
value: *mut U,
}
impl<T> AsyncMutex<T> {
pub fn new(value: T) -> Self {
Self {
@ -85,6 +91,24 @@ unsafe impl<T> Sync for AsyncMutex<T> {}
unsafe impl<'a, T> Send for AsyncMutexGuard<'a, T> {}
unsafe impl<'a, T> Sync for AsyncMutexGuard<'a, T> {}
impl<'a, T> AsyncMutexGuard<'a, T> {
pub async fn try_map_guard_async<
U: 'a,
E,
F: AsyncFnOnce(&'a mut T) -> Result<&'a mut U, E>,
>(
self,
mapper: F,
) -> Result<MappedAsyncMutexGuard<'a, U, T>, E> {
let mutex = self.mutex;
core::mem::forget(self);
let value = mapper
.async_call_once((unsafe { &mut *mutex.get() },))
.await?;
Ok(MappedAsyncMutexGuard { mutex, value })
}
}
impl<'a, T> Deref for AsyncMutexGuard<'a, T> {
type Target = T;
@ -107,6 +131,31 @@ impl<'a, T> Drop for AsyncMutexGuard<'a, T> {
}
}
unsafe impl<'a, U, T> Send for MappedAsyncMutexGuard<'a, U, T> {}
unsafe impl<'a, U, T> Sync for MappedAsyncMutexGuard<'a, U, T> {}
impl<'a, U, T> Deref for MappedAsyncMutexGuard<'a, U, T> {
type Target = U;
fn deref(&self) -> &Self::Target {
unsafe { &*self.value }
}
}
impl<'a, U, T> DerefMut for MappedAsyncMutexGuard<'a, U, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.value }
}
}
impl<'a, U, T> Drop for MappedAsyncMutexGuard<'a, U, T> {
fn drop(&mut self) {
unsafe {
self.mutex.force_unlock();
}
}
}
impl ThreadedMutexInner {
const UNLOCKED: u32 = 0;
const LOCKED: u32 = 1;

View File

@ -0,0 +1,129 @@
use core::ops::{Deref, DerefMut};
use alloc::sync::Arc;
use libk_mm::PageBox;
use libk_util::{
lru_hash_table::LruCache,
sync::spin_rwlock::{IrqSafeRwLock, IrqSafeRwLockReadGuard},
};
use yggdrasil_abi::error::Error;
use crate::task::sync::{AsyncMutex, MappedAsyncMutexGuard};
use super::BlockDevice;
pub struct CachedBlock {
data: PageBox<[u8]>,
dirty: bool,
}
pub struct CachedBlockRef {
entry: Arc<IrqSafeRwLock<CachedBlock>>,
lock: IrqSafeRwLockReadGuard<'static, CachedBlock>,
}
pub struct BlockCache {
device: &'static dyn BlockDevice,
block_size: usize,
cache: AsyncMutex<LruCache<u64, Arc<IrqSafeRwLock<CachedBlock>>>>,
}
impl BlockCache {
pub fn with_capacity(
device: &'static dyn BlockDevice,
block_size: usize,
bucket_capacity: usize,
) -> Self {
if block_size % device.block_size() != 0 {
panic!("Cache block size is not multiple of device block size");
}
Self {
device,
block_size,
cache: AsyncMutex::new(LruCache::with_capacity(bucket_capacity, 8)),
}
}
async fn evict_block(&self, address: u64, block: Arc<IrqSafeRwLock<CachedBlock>>) {
let read = block.read();
if read.dirty {
log::info!("Evict block {}", address);
if let Err(err) = self.device.write_exact(address, &read.data).await {
log::error!("Disk error: flushing block {}: {:?}", address, err);
}
}
}
pub async fn get<'a>(&'a self, address: u64) -> Result<CachedBlockRef, Error> {
debug_assert_eq!(address % self.block_size as u64, 0);
let entry: MappedAsyncMutexGuard<_, LruCache<u64, Arc<IrqSafeRwLock<CachedBlock>>>> = self
.cache
.lock()
.await
.try_map_guard_async::<_, Error, _>(|cache: &'a mut LruCache<_, _>| async move {
let (value, evicted) = cache
.try_get_or_insert_with_async(address, || async move {
let mut data = PageBox::new_slice(0, self.block_size)?;
self.device.read_exact(address, &mut data).await?;
Ok(Arc::new(IrqSafeRwLock::new(CachedBlock {
data,
dirty: false,
})))
})
.await?;
if evicted.is_some() {
todo!()
}
Ok(value)
})
.await?;
Ok(CachedBlockRef::new(entry.deref()))
}
pub async fn flush(&self) {
for (address, block) in self.cache.lock().await.flush() {
self.evict_block(address, block).await;
}
}
}
impl CachedBlock {
pub fn set_dirty(&mut self) {
self.dirty = true;
}
}
impl CachedBlockRef {
pub fn new(entry: &Arc<IrqSafeRwLock<CachedBlock>>) -> Self {
let entry = entry.clone();
// Safety: ok, Arc instance is still held
let lock = unsafe { core::mem::transmute(entry.read()) };
Self { lock, entry }
}
}
impl Deref for CachedBlockRef {
type Target = PageBox<[u8]>;
fn deref(&self) -> &Self::Target {
self.lock.deref()
}
}
impl Deref for CachedBlock {
type Target = PageBox<[u8]>;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl DerefMut for CachedBlock {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.data
}
}

View File

@ -75,29 +75,32 @@ impl<'a, D: NgBlockDevice + 'a> NgBlockDeviceWrapper<'a, D> {
#[async_trait]
impl<'a, D: NgBlockDevice + 'a> BlockDevice for NgBlockDeviceWrapper<'a, D> {
async fn read(&self, pos: u64, buf: &mut [u8]) -> Result<usize, Error> {
let (lba_range, lba_off) = self.lba_range(pos..pos + buf.len() as u64);
async fn read(&self, mut pos: u64, mut buf: &mut [u8]) -> Result<usize, Error> {
let len = buf.len();
let mut remaining = buf.len();
log::info!(
"read({:#x}, {}) x {:#x} -> LBA {:#x?}",
pos,
buf.len(),
self.block_size,
lba_range
);
while remaining != 0 {
let mut block = PageBox::new_uninit_slice(self.block_size)?;
let mut buffer = PageBox::new_uninit_slice(
(lba_range.end - lba_range.start) as usize * self.block_size,
)?;
self.device
.read(lba_range.start, &mut buffer)
.await
.map_err(Self::handle_drive_error)?;
let buffer = unsafe { buffer.assume_init_slice() };
let block_offset = pos as usize % self.block_size;
let lba = pos / self.block_size as u64;
buf.copy_from_slice(&buffer[lba_off..lba_off + buf.len()]);
let amount = core::cmp::min(self.block_size - block_offset, buf.len());
Ok(buf.len())
self.device
.read(lba, &mut block)
.await
.map_err(Self::handle_drive_error)?;
let block = unsafe { block.assume_init_slice() };
buf[..amount].copy_from_slice(&block[block_offset..block_offset + amount]);
buf = &mut buf[amount..];
remaining -= amount;
pos += amount as u64;
}
Ok(len)
}
async fn write(&self, pos: u64, buf: &[u8]) -> Result<usize, Error> {
@ -107,6 +110,10 @@ impl<'a, D: NgBlockDevice + 'a> BlockDevice for NgBlockDeviceWrapper<'a, D> {
fn size(&self) -> Result<u64, Error> {
Ok(self.block_size as u64 * self.block_count)
}
fn block_size(&self) -> usize {
self.block_size
}
}
impl<'a, D: NgBlockDevice + 'a> PageProvider for NgBlockDeviceWrapper<'a, D> {

View File

@ -1,25 +1,67 @@
#![allow(missing_docs)]
use alloc::boxed::Box;
use alloc::{boxed::Box, vec::Vec};
use async_trait::async_trait;
use libk_mm::PageProvider;
use libk_mm::{PageBox, PageProvider};
use partition::Partition;
use yggdrasil_abi::{error::Error, io::DeviceRequest};
pub mod cache;
pub mod device;
pub mod partition;
pub mod request;
pub use device::{NgBlockDevice, NgBlockDeviceWrapper};
// TODO
use crate::task::runtime;
pub async fn probe_partitions_async<
D: NgBlockDevice + 'static,
F: Fn(usize, &'static dyn BlockDevice) -> Result<(), Error> + Send + 'static,
>(
dev: &'static NgBlockDeviceWrapper<'static, D>,
callback: F,
) -> Result<(), Error> {
async fn probe_table<D: NgBlockDevice + 'static>(
dev: &'static NgBlockDeviceWrapper<'static, D>,
) -> Result<Option<Vec<Partition<'static, D>>>, Error> {
if let Some(partitions) = partition::probe_gpt(dev).await? {
return Ok(Some(partitions));
}
Ok(None)
}
match probe_table(dev).await {
Ok(Some(partitions)) => {
// Create block devices for the partitions
for (i, partition) in partitions.into_iter().enumerate() {
let partition_blkdev: &Partition<'static, D> = Box::leak(Box::new(partition));
if let Err(error) = callback(i, partition_blkdev) {
log::warn!("Could not add partition {}: {:?}", i, error);
}
}
}
Ok(None) => {
log::warn!("Unknown or missing partition table");
}
Err(error) => {
log::warn!("Could not probe partition table: {:?}", error);
}
}
Ok(())
}
pub fn probe_partitions<
D: NgBlockDevice + 'static,
F: Fn(usize, &'static dyn BlockDevice) -> Result<(), Error> + Send + 'static,
>(
_dev: &'static NgBlockDeviceWrapper<D>,
_callback: F,
dev: &'static NgBlockDeviceWrapper<'static, D>,
callback: F,
) -> Result<(), Error> {
log::warn!("TODO: probe partitions");
Ok(())
runtime::spawn(probe_partitions_async(dev, callback))
}
/// Block device interface
@ -34,6 +76,24 @@ pub trait BlockDevice: PageProvider + Sync {
Err(Error::NotImplemented)
}
async fn read_exact(&self, pos: u64, buf: &mut [u8]) -> Result<(), Error> {
let count = self.read(pos, buf).await?;
if count == buf.len() {
Ok(())
} else {
Err(Error::InvalidOperation)
}
}
async fn write_exact(&self, pos: u64, buf: &[u8]) -> Result<(), Error> {
let count = self.write(pos, buf).await?;
if count == buf.len() {
Ok(())
} else {
Err(Error::InvalidOperation)
}
}
/// Returns the size of the block device in bytes
fn size(&self) -> Result<u64, Error> {
Err(Error::NotImplemented)
@ -48,6 +108,10 @@ pub trait BlockDevice: PageProvider + Sync {
true
}
fn block_size(&self) -> usize {
512
}
/// Performs a device-specific function
fn device_request(&self, req: &mut DeviceRequest) -> Result<(), Error> {
Err(Error::NotImplemented)

View File

@ -0,0 +1,165 @@
use core::mem::{size_of, MaybeUninit};
use alloc::{boxed::Box, vec, vec::Vec};
use async_trait::async_trait;
use bytemuck::{Pod, Zeroable};
use libk_mm::{address::PhysicalAddress, table::MapAttributes, PageBox, PageProvider};
use static_assertions::const_assert_eq;
use uuid::Uuid;
use yggdrasil_abi::{error::Error, io::DeviceRequest};
use super::{BlockDevice, NgBlockDevice, NgBlockDeviceWrapper};
pub struct Partition<'a, D: NgBlockDevice + 'a> {
pub device: &'a NgBlockDeviceWrapper<'a, D>,
pub lba_start: u64,
pub lba_end: u64,
}
#[derive(Clone, Copy)]
#[repr(C)]
struct GptHeader {
signature: [u8; 8],
revision: u32,
header_size: u32,
crc32: u32,
_0: u32,
header_lba: u64,
alternate_header_lba: u64,
first_usable_lba: u64,
last_usable_lba: u64,
guid: [u8; 16],
partition_table_lba: u64,
partition_table_len: u32,
partition_table_entry_size: u32,
partition_table_crc32: u32,
_1: [u8; 420],
}
#[derive(Clone, Copy, Zeroable, Pod)]
#[repr(C)]
struct GptEntry {
type_guid: Uuid,
part_guid: Uuid,
lba_start: u64,
lba_end: u64,
attrs: u64,
}
const_assert_eq!(size_of::<GptHeader>(), 512);
impl<'a, D: NgBlockDevice + 'a> Partition<'a, D> {
fn end_byte(&self) -> u64 {
self.lba_end * self.device.block_size as u64
}
fn start_byte(&self) -> u64 {
self.lba_start * self.device.block_size as u64
}
}
impl<'a, D: NgBlockDevice + 'a> PageProvider for Partition<'a, D> {
fn get_page(&self, offset: u64) -> Result<PhysicalAddress, Error> {
todo!()
}
fn clone_page(
&self,
offset: u64,
src_phys: PhysicalAddress,
src_attrs: MapAttributes,
) -> Result<PhysicalAddress, Error> {
todo!()
}
fn release_page(&self, offset: u64, phys: PhysicalAddress) -> Result<(), Error> {
todo!()
}
}
#[async_trait]
impl<'a, D: NgBlockDevice + 'a> BlockDevice for Partition<'a, D> {
async fn read(&self, pos: u64, buf: &mut [u8]) -> Result<usize, Error> {
if pos >= self.end_byte() {
return Ok(0);
}
let start = self.start_byte() + pos;
let end = core::cmp::min(start + buf.len() as u64, self.end_byte());
let count = (end - start) as usize;
if count == 0 {
todo!()
}
self.device.read(start, &mut buf[..count]).await
}
async fn write(&self, pos: u64, buf: &[u8]) -> Result<usize, Error> {
if pos >= self.end_byte() {
return Ok(0);
}
let start = self.start_byte() + pos;
let end = core::cmp::min(start + buf.len() as u64, self.end_byte());
let count = (end - start) as usize;
self.device.write(start, &buf[..count]).await
}
fn size(&self) -> Result<u64, Error> {
Ok((self.lba_end - self.lba_start) * self.device.block_size as u64)
}
fn device_request(&self, req: &mut DeviceRequest) -> Result<(), Error> {
self.device.device_request(req)
}
}
async unsafe fn read_struct_lba<T>(dev: &'static dyn BlockDevice, lba: u64) -> Result<T, Error> {
assert_eq!(size_of::<T>(), 512);
let mut data = MaybeUninit::<T>::uninit();
let buffer = core::slice::from_raw_parts_mut(data.as_mut_ptr() as *mut u8, 512);
dev.read_exact(lba * 512, buffer).await?;
Ok(data.assume_init())
}
pub(crate) async fn probe_gpt<D: NgBlockDevice + 'static>(
dev: &'static NgBlockDeviceWrapper<'static, D>,
) -> Result<Option<Vec<Partition<'static, D>>>, Error> {
let header = unsafe { read_struct_lba::<GptHeader>(dev, 1) }.await?;
if &header.signature != b"EFI PART" {
// Not a GPT partition table
return Ok(None);
}
let pt_entsize = header.partition_table_entry_size as usize;
let pt_len = header.partition_table_len as usize;
let mut pt_data = PageBox::new_slice(0, pt_len * pt_entsize)?;
log::info!("pt_entsize = {}, pt_len = {}", pt_entsize, pt_len);
assert!(size_of::<GptEntry>() <= pt_entsize);
dev.read_exact(header.partition_table_lba * 512, &mut pt_data)
.await?;
let mut partitions = vec![];
for i in 0..pt_len {
let pt_entry_data = &pt_data[i * pt_entsize..i * pt_entsize + size_of::<GptEntry>()];
let pt_entry: &GptEntry = bytemuck::from_bytes(pt_entry_data);
if pt_entry.type_guid.is_nil() {
break;
}
partitions.push(Partition {
device: dev,
lba_start: pt_entry.lba_start,
lba_end: pt_entry.lba_end,
});
}
Ok(Some(partitions))
}

View File

@ -59,9 +59,10 @@ impl DirectoryFile {
break;
};
let ty = node.ty();
entries[off].write(DirectoryEntry { name, ty });
entries[off].write(DirectoryEntry {
name,
ty: Some(node.ty()),
});
off += 1;
rem -= 1;

View File

@ -631,7 +631,7 @@ mod tests {
let (name, node) = &self.entries[i];
let entry = DirectoryEntry {
name: FixedString::from_str(name)?,
ty: node.ty(),
ty: Some(node.ty()),
};
entries[i].write(entry);
@ -667,15 +667,15 @@ mod tests {
&[
DirectoryEntry {
name: FixedString::from_str("f1").unwrap(),
ty: FileType::File,
ty: Some(FileType::File),
},
DirectoryEntry {
name: FixedString::from_str("f2").unwrap(),
ty: FileType::File,
ty: Some(FileType::File),
},
DirectoryEntry {
name: FixedString::from_str("f3").unwrap(),
ty: FileType::File
ty: Some(FileType::File)
}
]
);
@ -712,15 +712,15 @@ mod tests {
&[
DirectoryEntry {
name: FixedString::from_str(".").unwrap(),
ty: FileType::Directory
ty: Some(FileType::Directory)
},
DirectoryEntry {
name: FixedString::from_str("..").unwrap(),
ty: FileType::Directory
ty: Some(FileType::Directory)
},
DirectoryEntry {
name: FixedString::from_str("child1").unwrap(),
ty: FileType::Directory
ty: Some(FileType::Directory)
}
]
);

View File

@ -326,6 +326,13 @@ impl Node {
Ok(cache.insert((String::new(), target)).1.clone())
}
pub fn as_block_device(&self) -> Result<&'static dyn BlockDevice, Error> {
match &self.data {
NodeImpl::Block(dev) => Ok(dev.0),
_ => Err(Error::InvalidFile),
}
}
}
impl fmt::Debug for Node {

View File

@ -77,8 +77,10 @@ impl Node {
}
// TODO lookup in real FS
Err(Error::DoesNotExist)
match dir.imp.lookup(self, name) {
Err(Error::NotImplemented) => Err(Error::DoesNotExist),
res => res,
}
}
/// Creates an entry within a directory with given [CreateInfo].

View File

@ -37,6 +37,12 @@ dependencies = [
"zerocopy",
]
[[package]]
name = "allocator-api2"
version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"
[[package]]
name = "async-trait"
version = "0.1.81"
@ -193,6 +199,10 @@ name = "hashbrown"
version = "0.14.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"
dependencies = [
"ahash",
"allocator-api2",
]
[[package]]
name = "itertools"
@ -290,8 +300,11 @@ dependencies = [
"libk-mm",
"libk-util",
"log",
"lru",
"serde",
"serde_json",
"static_assertions",
"uuid",
"yggdrasil-abi",
]
@ -354,6 +367,15 @@ version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "lru"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc"
dependencies = [
"hashbrown",
]
[[package]]
name = "memtables"
version = "0.1.0"
@ -511,6 +533,15 @@ version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "uuid"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314"
dependencies = [
"bytemuck",
]
[[package]]
name = "version_check"
version = "0.9.4"

View File

@ -2,10 +2,12 @@
use core::ptr::NonNull;
use abi::path::Path;
use ext2::Ext2Fs;
use kernel_fs::devfs;
use libk::{
random,
vfs::{impls::read_fn_node, NodeRef},
block, random,
vfs::{impls::read_fn_node, IoContext, NodeRef},
};
use libk_mm::{
address::{PhysicalAddress, Virtualize},
@ -51,13 +53,29 @@ unsafe impl BlockAllocator for FileBlockAllocator {
}
/// Constructs an instance of a filesystem for given set of [MountOptions]
pub fn create_filesystem(options: &MountOptions) -> Result<NodeRef, Error> {
let fs_name = options.filesystem.unwrap();
pub fn create_filesystem(ioctx: &mut IoContext, options: &MountOptions) -> Result<NodeRef, Error> {
let Some(fs_name) = options.filesystem else {
log::warn!("TODO: mount without filesystem type/fs probing not yet implemented");
return Err(Error::NotImplemented);
};
let source = options.source.map(|path| {
let path = Path::from_str(path);
if !path.is_absolute() {
todo!();
}
ioctx.find(None, path, true, true)
});
match fs_name {
"devfs" => Ok(devfs::root().clone()),
"sysfs" => Ok(sysfs::root().clone()),
_ => todo!(),
"ext2" if let Some(source) = source => {
let source = source?;
let device = source.as_block_device()?;
block!(Ext2Fs::create(device).await)?
}
_ => Err(Error::InvalidArgument),
}
}

View File

@ -1,5 +1,6 @@
//! osdev-x kernel crate
#![feature(
if_let_guard,
step_trait,
decl_macro,
naked_functions,

View File

@ -40,7 +40,7 @@ pub(crate) fn mount(options: &MountOptions<'_>) -> Result<(), Error> {
let process = thread.process();
run_with_io(&process, |mut io| {
let fs_root = fs::create_filesystem(options)?;
let fs_root = fs::create_filesystem(io.ioctx_mut(), options)?;
io.ioctx_mut().mount(options.target, fs_root)?;
Ok(())
})

View File

@ -86,7 +86,7 @@ struct DirectoryEntry {
/// Name of the entry
pub name: FixedString<256>,
/// Type of the entry
pub ty: FileType,
pub ty: Option<FileType>,
}
enum PollControl(u32) {