Compare commits

...

3 Commits

23 changed files with 2223 additions and 609 deletions

14
Cargo.lock generated
View File

@ -2647,6 +2647,19 @@ dependencies = [
"yggdrasil-abi",
]
[[package]]
name = "ygg_driver_fat32"
version = "0.1.0"
dependencies = [
"async-trait",
"bytemuck",
"device-api",
"libk",
"libk-util",
"log",
"yggdrasil-abi",
]
[[package]]
name = "ygg_driver_input"
version = "0.1.0"
@ -2895,6 +2908,7 @@ dependencies = [
"yboot-proto",
"ygg_driver_acpi",
"ygg_driver_ahci",
"ygg_driver_fat32",
"ygg_driver_input",
"ygg_driver_net_core",
"ygg_driver_net_loopback",

View File

@ -38,6 +38,7 @@ ygg_driver_net_rtl81xx.path = "driver/net/rtl81xx"
memfs = { path = "driver/fs/memfs" }
ext2 = { path = "driver/fs/ext2" }
ygg_driver_fat32.path = "driver/fs/fat32"
log.workspace = true
bitflags.workspace = true

View File

@ -5,7 +5,7 @@ use std::{
};
use kernel_arch_interface::{
cpu::IpiQueue,
cpu::{CpuData, IpiQueue},
mem::{
DeviceMemoryAttributes, KernelTableManager, PhysicalMemoryAllocator, RawDeviceMemoryMapping,
},
@ -36,17 +36,21 @@ pub struct TaskContextImpl<K: KernelTableManager, PA: PhysicalMemoryAllocator>(
static DUMMY_INTERRUPT_MASK: AtomicBool = AtomicBool::new(true);
pub struct DummyCpuData;
impl CpuData for DummyCpuData {}
impl Architecture for ArchitectureImpl {
type PerCpuData = ();
type PerCpuData = DummyCpuData;
type CpuFeatures = ();
type BreakpointType = u8;
const BREAKPOINT_VALUE: Self::BreakpointType = 0x00;
fn local_cpu() -> *mut Self::PerCpuData {
fn local_cpu() -> *mut () {
unimplemented!()
}
unsafe fn set_local_cpu(_cpu: *mut Self::PerCpuData) {
unsafe fn set_local_cpu(_cpu: *mut ()) {
unimplemented!()
}
@ -154,7 +158,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
unimplemented!()
}
fn as_address_with_asid(&self) -> u64 {
fn as_address_with_asid(&self) -> (u64, u64) {
unimplemented!()
}
}

View File

@ -32,6 +32,7 @@ use libk_util::{
OneTimeInit,
};
use queue::PrpList;
use regs::{CAP, CC};
use tock_registers::{
interfaces::{ReadWriteable, Readable, Writeable},
register_bitfields, register_structs,
@ -53,83 +54,19 @@ use self::{
command::{CreateIoCompletionQueue, CreateIoSubmissionQueue, SetFeatureRequest},
error::NvmeError,
queue::QueuePair,
regs::Regs,
};
mod command;
mod drive;
mod error;
mod queue;
mod regs;
pub const MAX_PAGES_PER_REQUEST: usize = 256;
// Use host page
pub const PAGE_SIZE: usize = L3_PAGE_SIZE;
register_bitfields! {
u32,
CC [
IOCQES OFFSET(20) NUMBITS(4) [],
IOSQES OFFSET(16) NUMBITS(4) [],
AMS OFFSET(11) NUMBITS(3) [],
MPS OFFSET(7) NUMBITS(4) [],
CSS OFFSET(4) NUMBITS(3) [
NvmCommandSet = 0
],
ENABLE OFFSET(0) NUMBITS(1) [],
],
CSTS [
CFS OFFSET(1) NUMBITS(1) [],
RDY OFFSET(0) NUMBITS(1) [],
],
AQA [
/// Admin Completion Queue Size in entries - 1
ACQS OFFSET(16) NUMBITS(12) [],
/// Admin Submission Queue Size in entries - 1
ASQS OFFSET(0) NUMBITS(12) [],
]
}
register_bitfields! {
u64,
CAP [
/// Maximum Queue Entries Supported - 1. i.e., 0 means maximum queue len of 1, 1 = 2 etc.
MQES OFFSET(0) NUMBITS(16) [],
/// Timeout. Represents the worst-case time the host software should wait for CSTS.RDY to
/// change its state.
TO OFFSET(24) NUMBITS(8) [],
/// Doorbell stride. Stride in bytes = pow(2, 2 + DSTRD).
DSTRD OFFSET(32) NUMBITS(4) [],
/// NVM Subsystem Reset Supported (see NVMe BS Section 3.7.1)
NSSRS OFFSET(36) NUMBITS(1) [],
/// Controller supports one or more I/O command sets
CSS_IO_COMMANDS OFFSET(43) NUMBITS(1) [],
/// Controller only supports admin commands and no I/O commands
CSS_ADMIN_ONLY OFFSET(44) NUMBITS(1) [],
/// Memory page size minimum (bytes = pow(2, 12 + MPSMIN))
MPSMIN OFFSET(48) NUMBITS(4) [],
/// Memory page size maximum -|-
MPSMAX OFFSET(52) NUMBITS(4) [],
]
}
register_structs! {
#[allow(non_snake_case)]
Regs {
(0x00 => CAP: ReadOnly<u64, CAP::Register>),
(0x08 => VS: ReadOnly<u32>),
(0x0C => INTMS: WriteOnly<u32>),
(0x10 => INTMC: WriteOnly<u32>),
(0x14 => CC: ReadWrite<u32, CC::Register>),
(0x18 => _0),
(0x1C => CSTS: ReadOnly<u32, CSTS::Register>),
(0x20 => _1),
(0x24 => AQA: ReadWrite<u32, AQA::Register>),
(0x28 => ASQ: ReadWrite<u64>),
(0x30 => ACQ: ReadWrite<u64>),
(0x38 => _2),
(0x2000 => @END),
}
}
pub struct NvmeController {
regs: IrqSafeSpinlock<DeviceMemoryIo<'static, Regs>>,
admin_q: OneTimeInit<QueuePair>,
@ -366,21 +303,9 @@ impl Device for NvmeController {
let timeout = Duration::from_millis(regs.CAP.read(CAP::TO) * 500);
log::debug!("Worst-case timeout: {:?}", timeout);
while regs.CSTS.matches_all(CSTS::RDY::SET) {
core::hint::spin_loop();
}
if Self::ADMIN_QUEUE_SIZE as u64 > regs.CAP.read(CAP::MQES) + 1 {
todo!(
"queue_slots too big, max = {}",
regs.CAP.read(CAP::MQES) + 1
);
}
// Setup the admin queue (index 0)
let admin_sq_doorbell = unsafe { regs.doorbell_ptr(self.doorbell_shift, false, 0) };
let admin_cq_doorbell = unsafe { regs.doorbell_ptr(self.doorbell_shift, true, 0) };
log::debug!("sq_doorbell for adminq = {:p}", admin_sq_doorbell);
let admin_q = QueuePair::new(
&*self.dma,
0,
@ -388,39 +313,18 @@ impl Device for NvmeController {
Self::ADMIN_QUEUE_SIZE,
admin_sq_doorbell,
admin_cq_doorbell,
)
.unwrap();
)?;
regs.AQA.modify(
AQA::ASQS.val(Self::ADMIN_QUEUE_SIZE as u32 - 1)
+ AQA::ACQS.val(Self::ADMIN_QUEUE_SIZE as u32 - 1),
);
regs.ASQ.set(admin_q.sq_bus_pointer().into_u64());
regs.ACQ.set(admin_q.cq_bus_pointer().into_u64());
regs.configure_admin_queue(
admin_q.sq_bus_pointer(),
admin_q.cq_bus_pointer(),
Self::ADMIN_QUEUE_SIZE,
Self::ADMIN_QUEUE_SIZE,
)?;
// Configure the controller
const IOSQES: u32 = size_of::<SubmissionQueueEntry>().ilog2();
const IOCQES: u32 = size_of::<CompletionQueueEntry>().ilog2();
regs.CC.modify(
CC::IOCQES.val(IOCQES)
+ CC::IOSQES.val(IOSQES)
+ CC::MPS.val(0)
+ CC::CSS::NvmCommandSet,
);
// Enable the controller
regs.CC.modify(CC::ENABLE::SET);
log::debug!("Reset the controller");
while !regs.CSTS.matches_any(&[CSTS::RDY::SET, CSTS::CFS::SET]) {
core::hint::spin_loop();
}
if regs.CSTS.matches_all(CSTS::CFS::SET) {
todo!("CFS set after reset!");
}
regs.configure_controller();
regs.enable_controller(10000000)?;
self.admin_q.init(admin_q);
@ -498,7 +402,7 @@ pci_driver! {
let regs = unsafe { DeviceMemoryIo::<Regs>::map(bar0, Default::default()) }?;
// Disable the controller
regs.CC.modify(CC::ENABLE::CLEAR);
regs.disable_controller(10000000)?;
let doorbell_shift = regs.CAP.read(CAP::DSTRD) as usize + 1;
let min_page_size = 1 << (regs.CAP.read(CAP::MPSMIN) + 12);

View File

@ -0,0 +1,150 @@
use libk::{dma::BusAddress, error::Error};
use tock_registers::{
interfaces::{ReadWriteable, Readable, Writeable},
register_bitfields, register_structs,
registers::{ReadOnly, ReadWrite, WriteOnly},
};
use crate::queue::{CompletionQueueEntry, SubmissionQueueEntry};
register_bitfields! {
u32,
pub CC [
IOCQES OFFSET(20) NUMBITS(4) [],
IOSQES OFFSET(16) NUMBITS(4) [],
AMS OFFSET(11) NUMBITS(3) [],
MPS OFFSET(7) NUMBITS(4) [],
CSS OFFSET(4) NUMBITS(3) [
NvmCommandSet = 0
],
ENABLE OFFSET(0) NUMBITS(1) [],
],
pub CSTS [
CFS OFFSET(1) NUMBITS(1) [],
RDY OFFSET(0) NUMBITS(1) [],
],
pub AQA [
/// Admin Completion Queue Size in entries - 1
ACQS OFFSET(16) NUMBITS(12) [],
/// Admin Submission Queue Size in entries - 1
ASQS OFFSET(0) NUMBITS(12) [],
]
}
register_bitfields! {
u64,
pub CAP [
/// Maximum Queue Entries Supported - 1. i.e., 0 means maximum queue len of 1, 1 = 2 etc.
MQES OFFSET(0) NUMBITS(16) [],
/// Timeout. Represents the worst-case time the host software should wait for CSTS.RDY to
/// change its state.
TO OFFSET(24) NUMBITS(8) [],
/// Doorbell stride. Stride in bytes = pow(2, 2 + DSTRD).
DSTRD OFFSET(32) NUMBITS(4) [],
/// NVM Subsystem Reset Supported (see NVMe BS Section 3.7.1)
NSSRS OFFSET(36) NUMBITS(1) [],
/// Controller supports one or more I/O command sets
CSS_IO_COMMANDS OFFSET(43) NUMBITS(1) [],
/// Controller only supports admin commands and no I/O commands
CSS_ADMIN_ONLY OFFSET(44) NUMBITS(1) [],
/// Memory page size minimum (bytes = pow(2, 12 + MPSMIN))
MPSMIN OFFSET(48) NUMBITS(4) [],
/// Memory page size maximum -|-
MPSMAX OFFSET(52) NUMBITS(4) [],
]
}
register_structs! {
#[allow(non_snake_case)]
pub Regs {
(0x00 => pub CAP: ReadOnly<u64, CAP::Register>),
(0x08 => pub VS: ReadOnly<u32>),
(0x0C => pub INTMS: WriteOnly<u32>),
(0x10 => pub INTMC: WriteOnly<u32>),
(0x14 => pub CC: ReadWrite<u32, CC::Register>),
(0x18 => _0),
(0x1C => pub CSTS: ReadOnly<u32, CSTS::Register>),
(0x20 => _1),
(0x24 => AQA: ReadWrite<u32, AQA::Register>),
(0x28 => ASQ: ReadWrite<u64>),
(0x30 => ACQ: ReadWrite<u64>),
(0x38 => _2),
(0x2000 => @END),
}
}
impl Regs {
pub fn configure_admin_queue(
&self,
submission_queue_pointer: BusAddress,
completion_queue_pointer: BusAddress,
submission_queue_size: usize,
completion_queue_size: usize,
) -> Result<(), Error> {
let max_queue_size = self.CAP.read(CAP::MQES) + 1;
if submission_queue_size as u64 > max_queue_size {
log::error!("admin submission queue too large");
return Err(Error::InvalidArgument);
}
if completion_queue_size as u64 > max_queue_size {
log::error!("admin completion queue too large");
return Err(Error::InvalidArgument);
}
self.AQA.write(
AQA::ASQS.val(submission_queue_size as u32 - 1)
+ AQA::ACQS.val(completion_queue_size as u32 - 1),
);
self.ASQ.set(submission_queue_pointer.into_u64());
self.ACQ.set(completion_queue_pointer.into_u64());
Ok(())
}
pub fn configure_controller(&self) {
const IOSQES: u32 = size_of::<SubmissionQueueEntry>().ilog2();
const IOCQES: u32 = size_of::<CompletionQueueEntry>().ilog2();
self.CC.modify(
CC::IOCQES.val(IOCQES)
+ CC::IOSQES.val(IOSQES)
+ CC::MPS.val(0)
+ CC::CSS::NvmCommandSet,
);
}
pub fn enable_controller(&self, mut timeout_cycles: u64) -> Result<(), Error> {
self.CC.modify(CC::ENABLE::SET);
while timeout_cycles > 0 && !self.CSTS.matches_any(&[CSTS::RDY::SET, CSTS::CFS::SET]) {
timeout_cycles -= 1;
core::hint::spin_loop();
}
if timeout_cycles == 0 {
return Err(Error::TimedOut);
}
if self.CSTS.matches_all(CSTS::CFS::SET) {
log::error!("nvme: controller fatal status after enable");
return Err(Error::InvalidArgument);
}
Ok(())
}
pub fn disable_controller(&self, mut timeout_cycles: u64) -> Result<(), Error> {
self.CC.modify(CC::ENABLE::CLEAR);
while timeout_cycles > 0 && self.CSTS.matches_all(CSTS::RDY::SET) {
timeout_cycles -= 1;
core::hint::spin_loop();
}
if timeout_cycles > 0 {
Ok(())
} else {
Err(Error::TimedOut)
}
}
}

View File

@ -0,0 +1,14 @@
[package]
name = "ygg_driver_fat32"
version = "0.1.0"
edition = "2024"
[dependencies]
yggdrasil-abi.workspace = true
device-api.workspace = true
libk.workspace = true
libk-util.workspace = true
bytemuck.workspace = true
log.workspace = true
async-trait.workspace = true

View File

@ -0,0 +1,176 @@
use core::fmt;
use libk_util::{get_le_u16, get_le_u32};
use crate::FsLayout;
#[derive(Debug, Clone, Copy)]
#[repr(C)]
pub struct Bpb<'a> {
bytes: &'a [u8],
}
#[derive(Debug, Clone, Copy)]
#[repr(C)]
pub struct Fat32Ebpb<'a> {
bytes: &'a [u8],
}
#[derive(Debug, Clone, Copy)]
#[repr(C)]
pub struct Fat32FsInfo {
bytes: [u8; 512],
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(transparent)]
pub struct ClusterNumber(pub u32);
#[derive(PartialEq, Eq)]
#[repr(transparent)]
pub struct FatStr([u8]);
impl<'a> Bpb<'a> {
pub fn from_bytes(bytes: &'a [u8]) -> Self {
assert_eq!(bytes.len(), 512);
Self { bytes }
}
pub fn bytes_per_sector(&self) -> usize {
get_le_u16(&self.bytes[0x0B..]) as usize
}
pub fn sectors_per_cluster(&self) -> usize {
self.bytes[0x0D] as usize
}
pub fn reserved_sectors(&self) -> usize {
get_le_u16(&self.bytes[0x0E..]) as usize
}
pub fn fat_count(&self) -> usize {
self.bytes[0x10] as usize
}
pub fn root_directory_entries(&self) -> usize {
get_le_u16(&self.bytes[0x11..]) as usize
}
pub fn total_sectors_16(&self) -> u16 {
get_le_u16(&self.bytes[0x13..])
}
pub fn fat_size_16(&self) -> u16 {
get_le_u16(&self.bytes[0x16..])
}
pub fn total_sectors(&self) -> u64 {
let small = self.total_sectors_16() as u64;
if small == 0 {
get_le_u32(&self.bytes[0x20..]) as u64
} else {
small
}
}
}
impl<'a> Fat32Ebpb<'a> {
pub fn from_bytes(bytes: &'a [u8]) -> Self {
assert_eq!(bytes.len(), 512);
Self { bytes }
}
pub fn sectors_per_fat(&self) -> usize {
get_le_u32(&self.bytes[0x24..]) as usize
}
pub fn root_directory_cluster(&self) -> ClusterNumber {
ClusterNumber(get_le_u32(&self.bytes[0x2C..]))
}
pub fn fsinfo_sector(&self) -> u64 {
get_le_u16(&self.bytes[0x30..]) as u64
}
pub fn volume_label(&self) -> &FatStr {
FatStr::new(&self.bytes[0x47..0x52])
}
}
impl FatStr {
pub fn new(bytes: &[u8]) -> &Self {
let len = bytes
.iter()
.rposition(|&c| c != b' ' && c != 0)
.map(|len| len + 1)
.unwrap_or(0);
unsafe { core::mem::transmute(&bytes[..len]) }
}
#[inline]
pub fn from_str(str: &str) -> &Self {
unsafe { core::mem::transmute(str.trim_end_matches(' ').as_bytes()) }
}
#[inline]
pub fn len(&self) -> usize {
self.0.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
#[inline]
pub fn as_bytes(&self) -> &[u8] {
unsafe { core::mem::transmute(self) }
}
#[inline]
pub fn eq_ignore_case(&self, other: &Self) -> bool {
self.0.eq_ignore_ascii_case(&other.0)
}
}
impl fmt::Display for FatStr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for &byte in self.0.iter() {
write!(f, "{}", byte.to_ascii_uppercase() as char)?;
}
Ok(())
}
}
impl fmt::Debug for FatStr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "\"")?;
for &byte in self.0.iter() {
write!(f, "{}", byte.to_ascii_uppercase() as char)?;
}
write!(f, "\"")
}
}
impl Fat32FsInfo {
const SIGNATURE0: u32 = 0x41615252;
const SIGNATURE1: u32 = 0x61417272;
pub fn from_bytes(bytes: [u8; 512]) -> Self {
Self { bytes }
}
pub fn is_valid_fat32(&self) -> bool {
let signature0 = &self.bytes[0..4];
let signature1 = &self.bytes[484..488];
signature0 == &Self::SIGNATURE0.to_le_bytes()
&& signature1 == &Self::SIGNATURE1.to_le_bytes()
}
}
impl ClusterNumber {
pub fn first_sector(&self, layout: &FsLayout) -> u64 {
layout.first_data_sector + (self.0 as u64 - 2) * layout.sectors_per_cluster as u64
}
}

View File

@ -0,0 +1,441 @@
use core::{any::Any, fmt, mem::MaybeUninit};
use alloc::sync::Arc;
use libk::{
block,
error::Error,
vfs::{
CommonImpl, CreateInfo, DirectoryImpl, DirectoryOpenPosition, Filename, Metadata, Node,
NodeFlags, NodeRef,
},
};
use libk_util::{
get_le_u16, get_le_u32,
string::{chars_equal_ignore_case, Utf16LeStr},
};
use yggdrasil_abi::{
io::{DirectoryEntry, FileMode, GroupId, UserId},
util::FixedString,
};
use crate::{
data::{ClusterNumber, FatStr},
file::FileNode,
Fat32Fs,
};
pub const DIRENT_SIZE: usize = 32;
const ATTR_READ_ONLY: u8 = 0x01;
const ATTR_HIDDEN: u8 = 0x02;
const ATTR_SYSTEM: u8 = 0x04;
const ATTR_VOLUME_ID: u8 = 0x08;
const ATTR_DIRECTORY: u8 = 0x10;
const ATTR_ARCHIVE: u8 = 0x20;
const ATTR_LFN: u8 = ATTR_READ_ONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_VOLUME_ID;
const ATTR_LFN_MASK: u8 =
ATTR_READ_ONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_VOLUME_ID | ATTR_DIRECTORY | ATTR_ARCHIVE;
const LFN_LAST: u8 = 0x40;
#[derive(Debug)]
struct FatDirectoryEntry<'a> {
long_filename: Option<&'a Utf16LeStr>,
short_filename: ShortFilename<'a>,
is_directory: bool,
first_cluster: ClusterNumber,
size_bytes: u64,
}
pub struct DirectoryNode {
pub(crate) fs: Arc<Fat32Fs>,
pub(crate) cluster: ClusterNumber,
pub(crate) size_bytes: u64,
pub(crate) metadata: Metadata,
// Will be used when metadata needs to be updated
#[allow(unused)]
pub(crate) parent: Option<ClusterNumber>,
}
#[derive(PartialEq, Eq)]
struct ShortFilename<'a> {
filename: &'a FatStr,
extension: &'a FatStr,
}
impl DirectoryNode {
async fn iterate<F: FnMut(usize, FatDirectoryEntry) -> Result<bool, Error>>(
&self,
mut predicate: F,
start_position: u64,
limit: usize,
) -> Result<(usize, u64), Error> {
assert_eq!(start_position % DIRENT_SIZE as u64, 0);
let mut lfn_buffer = [0; 512];
let mut remaining = limit;
let mut offset = 0;
let mut position = start_position;
let entries_per_sector = self.fs.layout.bytes_per_sector / DIRENT_SIZE;
let mut end_of_dir = false;
let mut entry_count = 0;
while remaining != 0 && !end_of_dir {
let sector_in_cluster = position / self.fs.layout.bytes_per_sector as u64;
if sector_in_cluster >= self.fs.layout.sectors_per_cluster as u64 {
todo!("TODO: handle multi-cluster directories");
}
let offset_in_sector = (position % self.fs.layout.bytes_per_sector as u64) as usize;
let max_entries = core::cmp::min(
limit - offset,
entries_per_sector - offset_in_sector / DIRENT_SIZE,
);
self.fs
.with_cluster_sector(self.cluster, sector_in_cluster, |buffer| {
for i in 0..max_entries {
let offset = offset_in_sector + i * DIRENT_SIZE;
let entry = &buffer[offset..offset + DIRENT_SIZE];
if entry[0] == 0 {
end_of_dir = true;
break;
}
// If name[0] == 0xE5, the entry is free
if entry[0] == 0xE5 {
continue;
}
let attr = entry[11];
if attr & ATTR_LFN_MASK == ATTR_LFN {
// LFN entry
let ord = ((entry[0] & !LFN_LAST) as usize - 1) * 2;
lfn_buffer[ord * 13..ord * 13 + 10].copy_from_slice(&entry[1..11]);
lfn_buffer[ord * 13 + 10..ord * 13 + 22]
.copy_from_slice(&entry[14..26]);
lfn_buffer[ord * 13 + 22..ord * 13 + 26]
.copy_from_slice(&entry[28..32]);
} else {
let mut lfn_length = 0;
for i in (0..lfn_buffer.len()).step_by(2) {
let word = get_le_u16(&lfn_buffer[i..]);
if word == 0 || word == 0xFFFF {
lfn_length = i;
break;
}
}
let long_filename = if lfn_length > 0 {
Utf16LeStr::from_utf16le(&lfn_buffer[..lfn_length]).ok()
} else {
None
};
let short_filename = ShortFilename {
filename: FatStr::new(&entry[..8]),
extension: FatStr::new(&entry[8..11]),
};
let is_directory = match attr & (ATTR_DIRECTORY | ATTR_VOLUME_ID) {
0x00 => false,
ATTR_DIRECTORY => true,
ATTR_VOLUME_ID => continue,
_ => continue,
};
let first_cluster_hi = get_le_u16(&entry[20..]);
let first_cluster_lo = get_le_u16(&entry[26..]);
let size_bytes = get_le_u32(&entry[28..]) as u64;
let first_cluster =
((first_cluster_hi as u32) << 16) | (first_cluster_lo as u32);
let first_cluster = ClusterNumber(first_cluster);
let entry = FatDirectoryEntry {
long_filename,
short_filename,
is_directory,
first_cluster,
size_bytes,
};
if !predicate(entry_count, entry)? {
end_of_dir = true;
break;
}
entry_count += 1;
lfn_buffer.fill(0);
}
}
Ok(())
})
.await?;
position += (max_entries * DIRENT_SIZE) as u64;
offset += max_entries;
remaining -= max_entries;
}
Ok((entry_count, position))
}
async fn read_entries_inner(
&self,
position: u64,
buffer: &mut [MaybeUninit<DirectoryEntry>],
) -> Result<(usize, u64), Error> {
if position >= self.size_bytes {
return Ok((0, position));
}
let limit = core::cmp::min(
((self.size_bytes - position) / DIRENT_SIZE as u64) as usize,
buffer.len(),
);
let (count, position) = self
.iterate(
|index, entry| {
let mut name = FixedString::empty();
if let Some(lfn) = entry.long_filename {
name.append_from_chars(lfn.chars()).unwrap();
} else {
name.append_from_bytes(entry.short_filename.filename.as_bytes());
if !entry.short_filename.extension.is_empty() {
name.append_from_bytes(b".");
name.append_from_bytes(entry.short_filename.extension.as_bytes());
}
}
let dirent = DirectoryEntry { ty: None, name };
buffer[index].write(dirent);
Ok(true)
},
position,
limit,
)
.await?;
assert_eq!(position % DIRENT_SIZE as u64, 0);
Ok((count, position))
}
async fn lookup_inner(&self, name: &Filename) -> Result<NodeRef, Error> {
let mut found = None;
self.iterate(
|_, entry| {
if entry.name_equals(name) {
found = Some((entry.is_directory, entry.first_cluster, entry.size_bytes));
Ok(false)
} else {
Ok(true)
}
},
0,
(self.size_bytes / DIRENT_SIZE as u64) as usize,
)
.await?;
let (is_directory, cluster, size) = found.ok_or(Error::DoesNotExist)?;
let metadata = Metadata {
uid: UserId::root(),
gid: GroupId::root(),
mode: if is_directory {
FileMode::default_dir()
} else {
FileMode::default_file()
},
inode: Some(cluster.0),
ctime: 0,
mtime: 0,
block_count: size.div_ceil(self.fs.layout.bytes_per_sector as u64),
block_size: self.fs.layout.bytes_per_sector as u64,
};
if is_directory {
let directory = DirectoryNode {
fs: self.fs.clone(),
cluster,
size_bytes: size,
parent: Some(self.cluster),
metadata,
};
Ok(Node::directory(
directory,
NodeFlags::empty(),
Some(metadata),
Some(self.fs.clone()),
))
} else {
let file = FileNode {
fs: self.fs.clone(),
cluster,
size_bytes: size,
parent: Some(self.cluster),
metadata,
};
Ok(Node::regular(
file,
NodeFlags::empty(),
Some(metadata),
Some(self.fs.clone()),
))
}
}
}
impl CommonImpl for DirectoryNode {
fn size(&self, _node: &NodeRef) -> Result<u64, Error> {
Ok(self.size_bytes)
}
fn metadata(&self, _node: &NodeRef) -> Result<Metadata, Error> {
Ok(self.metadata.clone())
}
fn set_metadata(&self, _node: &NodeRef, _metadata: &Metadata) -> Result<(), Error> {
Err(Error::ReadOnly)
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl DirectoryImpl for DirectoryNode {
fn create_node(&self, _parent: &NodeRef, _info: &CreateInfo) -> Result<NodeRef, Error> {
Err(Error::ReadOnly)
}
fn attach_node(
&self,
_parent: &NodeRef,
_child: &NodeRef,
_name: &Filename,
) -> Result<(), Error> {
Err(Error::ReadOnly)
}
fn unlink_node(
&self,
_parent: &NodeRef,
_child: &NodeRef,
_name: &Filename,
) -> Result<(), Error> {
Err(Error::ReadOnly)
}
fn open(&self, _node: &NodeRef) -> Result<DirectoryOpenPosition, Error> {
Ok(DirectoryOpenPosition::FromPhysical(0))
}
fn lookup(&self, _node: &NodeRef, name: &Filename) -> Result<NodeRef, Error> {
// TODO validate FAT32 filename
block!(self.lookup_inner(name).await)?
}
fn read_entries(
&self,
_node: &NodeRef,
pos: u64,
entries: &mut [MaybeUninit<DirectoryEntry>],
) -> Result<(usize, u64), Error> {
block!(self.read_entries_inner(pos, entries).await)?
}
fn len(&self, _node: &NodeRef) -> Result<usize, Error> {
Ok(self.size_bytes as usize / DIRENT_SIZE)
}
}
impl FatDirectoryEntry<'_> {
pub fn name_equals(&self, predicate: &str) -> bool {
if let Some(lfn) = self.long_filename {
// Match by LFN
chars_equal_ignore_case(lfn.chars(), predicate.chars())
} else {
// Match by SFN
let Some(predicate) = ShortFilename::from_str(predicate) else {
return false;
};
predicate.eq_ignore_case(&self.short_filename)
}
}
}
impl<'a> ShortFilename<'a> {
pub fn from_str(s: &'a str) -> Option<Self> {
let (filename, extension) = match s.split_once('.') {
Some(xs) => xs,
None => (s, ""),
};
let filename = FatStr::from_str(filename);
let extension = FatStr::from_str(extension);
if filename.is_empty() || filename.len() > 8 || extension.len() > 3 {
return None;
}
Some(Self {
filename,
extension,
})
}
pub fn eq_ignore_case(&self, other: &Self) -> bool {
self.filename.eq_ignore_case(other.filename)
&& self.extension.eq_ignore_case(other.extension)
}
}
impl fmt::Debug for ShortFilename<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "\"{}", self.filename)?;
if !self.extension.is_empty() {
write!(f, ".{}", self.extension)?;
}
write!(f, "\"")
}
}
#[cfg(test)]
mod tests {
use crate::{data::FatStr, directory::ShortFilename};
#[test]
fn test_short_filename_from_str() {
assert_eq!(
ShortFilename::from_str("a1b2c3d4.txt"),
Some(ShortFilename {
filename: FatStr::new(b"a1b2c3d4"),
extension: FatStr::new(b"txt")
})
);
assert_eq!(
ShortFilename::from_str("a1b2c3d4"),
Some(ShortFilename {
filename: FatStr::new(b"a1b2c3d4"),
extension: FatStr::new(b"")
})
);
assert_eq!(ShortFilename::from_str("a1b2c3d4e5.txt"), None);
assert_eq!(ShortFilename::from_str("a1b2c3d4.long"), None);
assert_eq!(ShortFilename::from_str("."), None);
assert_eq!(ShortFilename::from_str(""), None);
assert_eq!(ShortFilename::from_str(".ext"), None)
}
#[test]
fn test_short_filename_ignore_case_eq() {
assert!(ShortFilename::eq_ignore_case(
&ShortFilename::from_str("abcdefgh.txt").unwrap(),
&ShortFilename::from_str("AbCdEfGh.txT").unwrap()
));
}
}

View File

@ -0,0 +1,156 @@
use core::any::Any;
use alloc::{sync::Arc, vec, vec::Vec};
use libk::{
block,
error::Error,
task::sync::AsyncMutex,
vfs::{CommonImpl, InstanceData, Metadata, NodeRef, RegularImpl},
};
use yggdrasil_abi::io::OpenOptions;
use crate::{data::ClusterNumber, Fat32Fs};
pub struct FileNode {
pub(crate) fs: Arc<Fat32Fs>,
pub(crate) cluster: ClusterNumber,
pub(crate) size_bytes: u64,
pub(crate) metadata: Metadata,
// Will be used when metadata needs to be updated
#[allow(unused)]
pub(crate) parent: Option<ClusterNumber>,
}
// TODO use a "sliding window" to minimize memory usage when working with large files?
struct OpenedFile {
cluster_chain: AsyncMutex<Vec<ClusterNumber>>,
}
impl OpenedFile {
fn new(first_cluster: ClusterNumber) -> Self {
Self {
cluster_chain: AsyncMutex::new(vec![first_cluster]),
}
}
async fn seek(&self, file: &FileNode, cluster_index: usize) -> Result<ClusterNumber, Error> {
let mut chain = self.cluster_chain.lock().await;
if cluster_index >= chain.len() {
let last = *chain.last().unwrap();
file.fs
.iterate_clusters(last, cluster_index + 1 - chain.len(), |cluster| {
chain.push(cluster);
})
.await?;
assert_eq!(chain.len(), cluster_index + 1);
}
Ok(chain[cluster_index])
}
}
impl FileNode {
async fn read_inner(
&self,
instance: &OpenedFile,
mut pos: u64,
buffer: &mut [u8],
) -> Result<usize, Error> {
if pos >= self.size_bytes {
return Ok(0);
}
let len = buffer.len().min((self.size_bytes - pos) as usize);
let mut rem = len;
let mut offset = 0;
let bps = self.fs.layout.bytes_per_sector as u64;
let spc = self.fs.layout.sectors_per_cluster as u64;
while rem != 0 {
let cluster_index = pos / (bps * spc);
let sector_in_cluster = (pos / bps) % spc;
let offset_in_sector = (pos % bps) as usize;
let amount = rem.min(bps as usize - offset_in_sector);
let cluster = instance.seek(self, cluster_index as usize).await?;
self.fs
.with_cluster_sector(cluster, sector_in_cluster, |data| {
buffer[offset..offset + amount]
.copy_from_slice(&data[offset_in_sector..offset_in_sector + amount]);
Ok(())
})
.await?;
rem -= amount;
offset += amount;
pos += amount as u64;
}
Ok(offset)
}
}
impl CommonImpl for FileNode {
fn metadata(&self, _node: &NodeRef) -> Result<Metadata, Error> {
Ok(self.metadata)
}
fn size(&self, _node: &NodeRef) -> Result<u64, Error> {
Ok(self.size_bytes)
}
fn set_metadata(&self, _node: &NodeRef, _metadata: &Metadata) -> Result<(), Error> {
Err(Error::ReadOnly)
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl RegularImpl for FileNode {
fn open(
&self,
_node: &NodeRef,
opts: OpenOptions,
) -> Result<(u64, Option<InstanceData>), Error> {
if opts.contains_any(OpenOptions::TRUNCATE | OpenOptions::APPEND | OpenOptions::WRITE) {
return Err(Error::ReadOnly);
}
let instance = Arc::new(OpenedFile::new(self.cluster));
Ok((0, Some(instance)))
}
fn close(&self, _node: &NodeRef, _instance: Option<&InstanceData>) -> Result<(), Error> {
Ok(())
}
fn read(
&self,
_node: &NodeRef,
instance: Option<&InstanceData>,
pos: u64,
buf: &mut [u8],
) -> Result<usize, Error> {
let instance = instance
.and_then(|p| p.downcast_ref::<OpenedFile>())
.ok_or(Error::InvalidFile)?;
block!(self.read_inner(instance, pos, buf).await)?
}
fn write(
&self,
_node: &NodeRef,
_instance: Option<&InstanceData>,
_pos: u64,
_buf: &[u8],
) -> Result<usize, Error> {
Err(Error::ReadOnly)
}
fn truncate(&self, _node: &NodeRef, _new_size: u64) -> Result<(), Error> {
Err(Error::ReadOnly)
}
}

View File

@ -0,0 +1,225 @@
#![no_std]
use alloc::sync::Arc;
use async_trait::async_trait;
use data::{Bpb, ClusterNumber, Fat32Ebpb, Fat32FsInfo};
use directory::{DirectoryNode, DIRENT_SIZE};
use libk::{
device::block::{cache::DeviceMapper, BlockDevice},
error::Error,
vfs::{Filesystem, FilesystemMountOption, Metadata, Node, NodeFlags, NodeRef},
};
use libk_util::get_le_u32;
use yggdrasil_abi::io::{FileMode, GroupId, UserId};
extern crate alloc;
mod data;
mod directory;
mod file;
pub struct FsLayout {
// From BPB/EBPB
bytes_per_sector: usize,
total_sectors: u64,
sectors_per_fat: usize,
sectors_per_cluster: usize,
root_directory_cluster: ClusterNumber,
root_directory_entries: usize,
first_fat_sector: u64,
// Computed
first_data_sector: u64,
}
pub struct Fat32Fs {
mapper: DeviceMapper,
pub layout: FsLayout,
}
#[async_trait]
impl Filesystem for Fat32Fs {
fn display_name(&self) -> &'static str {
"fat32"
}
}
impl Fat32Fs {
pub async fn create<'a, I: IntoIterator<Item = FilesystemMountOption<'a>>>(
device: Arc<dyn BlockDevice>,
options: I,
) -> Result<NodeRef, Error> {
let mut cached = true;
for option in options {
match option {
FilesystemMountOption::Sync => cached = false,
_ => (),
}
}
let fs = Arc::new(Self::create_fs(device, cached).await?);
// Setup root node
let root_metadata = Metadata {
uid: UserId::root(),
gid: GroupId::root(),
mode: FileMode::default_dir(),
ctime: 0,
mtime: 0,
inode: Some(fs.layout.root_directory_cluster.0),
block_size: fs.layout.bytes_per_sector as u64,
block_count: 0,
};
let root_directory = DirectoryNode {
fs: fs.clone(),
cluster: fs.layout.root_directory_cluster,
parent: None,
size_bytes: (fs.layout.root_directory_entries * DIRENT_SIZE) as u64,
metadata: root_metadata,
};
let root_node = Node::directory(root_directory, NodeFlags::empty(), None, Some(fs.clone()));
Ok(root_node)
}
async fn create_fs(device: Arc<dyn BlockDevice>, cached: bool) -> Result<Fat32Fs, Error> {
let mut boot_sector = [0; 512];
let mut fsinfo = [0; 512];
device.read_exact(0, &mut boot_sector).await?;
let bpb = Bpb::from_bytes(&boot_sector);
let ebpb = Fat32Ebpb::from_bytes(&boot_sector);
let fsinfo_sector = ebpb.fsinfo_sector();
let bytes_per_sector = bpb.bytes_per_sector();
let total_sectors = bpb.total_sectors();
if bpb.fat_size_16() != 0 {
log::warn!("fat32: not a FAT32 filesystem");
return Err(Error::InvalidArgument);
}
if fsinfo_sector >= total_sectors {
log::warn!("fat32: FSInfo sector is beyond filesystem end");
return Err(Error::InvalidArgument);
}
device
.read_exact(bytes_per_sector as u64 * fsinfo_sector, &mut fsinfo)
.await?;
let fsinfo = Fat32FsInfo::from_bytes(fsinfo);
if !fsinfo.is_valid_fat32() {
log::warn!("fat32: not a FAT32 filesystem");
return Err(Error::InvalidArgument);
}
let sectors_per_fat = ebpb.sectors_per_fat();
let sectors_per_cluster = bpb.sectors_per_cluster();
let root_directory_cluster = ebpb.root_directory_cluster();
let root_directory_entries = bpb.root_directory_entries();
// + RootDirSectors, but RootDirSectors = 0 on FAT32
let first_data_sector = bpb.reserved_sectors() + sectors_per_fat * bpb.fat_count();
let first_fat_sector = bpb.reserved_sectors();
log::info!("fat32: mounted {:?}", ebpb.volume_label());
log::info!(
"fat32: sector {}B, cluster {}B",
bytes_per_sector,
sectors_per_cluster * bytes_per_sector
);
let layout = FsLayout {
bytes_per_sector,
total_sectors,
sectors_per_fat,
sectors_per_cluster,
root_directory_cluster,
root_directory_entries,
first_fat_sector: first_fat_sector as u64,
first_data_sector: first_data_sector as u64,
};
let mapper = if cached {
DeviceMapper::cached_with_capacity(
device,
// Most often 512
layout.bytes_per_sector,
// 512 * 16 * 4 = 32768
(layout.bytes_per_sector * layout.sectors_per_cluster) * 4,
128,
64,
"fat32",
)?
} else {
DeviceMapper::uncached(device, layout.bytes_per_sector, "fat32")?
};
Ok(Self { layout, mapper })
}
async fn with_cluster_sector<T, F: FnOnce(&[u8]) -> Result<T, Error>>(
&self,
cluster: ClusterNumber,
sector_in_cluster: u64,
mapper: F,
) -> Result<T, Error> {
let sector = cluster.first_sector(&self.layout) + sector_in_cluster;
if sector >= self.layout.total_sectors {
log::warn!("fat32: sector {sector} beyond filesystem end");
return Err(Error::InvalidArgument);
}
self.mapper
.try_with(sector * self.layout.bytes_per_sector as u64, mapper)
.await
}
async fn with_fat_sector<T, F: FnOnce(&[u8]) -> Result<T, Error>>(
&self,
fat_sector: u64,
mapper: F,
) -> Result<T, Error> {
if fat_sector >= self.layout.sectors_per_fat as u64 {
log::warn!("fat32: FAT sector {fat_sector} outside of FAT size");
return Err(Error::InvalidArgument);
}
let sector = self.layout.first_fat_sector + fat_sector;
self.mapper
.try_with(sector * self.layout.bytes_per_sector as u64, mapper)
.await
}
async fn iterate_clusters<F: FnMut(ClusterNumber)>(
&self,
start: ClusterNumber,
count: usize,
mut mapper: F,
) -> Result<(), Error> {
let mut current = start;
for _ in 0..count {
let offset_in_fat = current.0 as usize * size_of::<u32>();
let sector_in_fat = (offset_in_fat / self.layout.bytes_per_sector) as u64;
let offset_in_sector = offset_in_fat % self.layout.bytes_per_sector;
let cluster = self
.with_fat_sector(sector_in_fat, |buffer| {
let number = get_le_u32(&buffer[offset_in_sector..]) & 0x0FFFFFFF;
Ok(number)
})
.await?;
if cluster >= 0x0FFFFFF7 {
return Err(Error::InvalidFile);
}
let cluster = ClusterNumber(cluster);
mapper(cluster);
current = cluster;
}
Ok(())
}
}

View File

@ -5,7 +5,8 @@
maybe_uninit_slice,
allocator_api,
let_chains,
const_trait_impl
const_trait_impl,
str_from_utf16_endian
)]
#![allow(clippy::new_without_default)]
@ -24,6 +25,7 @@ pub mod io;
pub mod lru_hash_table;
pub mod queue;
pub mod ring;
pub mod string;
pub mod sync;
pub mod waker;
@ -152,6 +154,14 @@ impl<T, const N: usize> DerefMut for StaticVector<T, N> {
}
}
pub fn get_le_u16(bytes: &[u8]) -> u16 {
u16::from_le_bytes([bytes[0], bytes[1]])
}
pub fn get_le_u32(bytes: &[u8]) -> u32 {
u32::from_le_bytes([bytes[0], bytes[1], bytes[2], bytes[3]])
}
#[cfg(test)]
mod tests {
use crate::RangeExt;

View File

@ -0,0 +1,194 @@
use core::{fmt, slice::ChunksExact};
use crate::get_le_u16;
#[derive(Debug)]
pub enum Utf16Error {
InvalidLength,
TrailingSurrogates,
}
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
pub struct Utf16LeStr {
raw: [u8],
}
pub struct Utf16LeIter<'a> {
chars: ChunksExact<'a, u8>,
}
impl Utf16LeStr {
pub fn from_utf16le(raw: &[u8]) -> Result<&Self, Utf16Error> {
Self::validate(raw)?;
Ok(unsafe { Self::from_utf16le_unchecked(raw) })
}
pub unsafe fn from_utf16le_unchecked(raw: &[u8]) -> &Self {
core::mem::transmute(raw)
}
pub fn chars(&self) -> Utf16LeIter {
Utf16LeIter {
chars: self.raw.chunks_exact(size_of::<u16>()),
}
}
fn validate(raw: &[u8]) -> Result<(), Utf16Error> {
if raw.len() % 2 != 0 {
return Err(Utf16Error::InvalidLength);
}
let mut iter = raw.chunks_exact(size_of::<u16>());
while let Some(chunk) = iter.next() {
let word = get_le_u16(chunk);
if is_low_surrogate(word) {
return Err(Utf16Error::TrailingSurrogates);
}
if is_high_surrogate(word) {
let Some(chunk) = iter.next() else {
return Err(Utf16Error::TrailingSurrogates);
};
let word = get_le_u16(chunk);
if !is_low_surrogate(word) {
return Err(Utf16Error::TrailingSurrogates);
}
}
}
Ok(())
}
}
impl Iterator for Utf16LeIter<'_> {
type Item = char;
fn next(&mut self) -> Option<Self::Item> {
let chunk = self.chars.next()?;
let word = get_le_u16(chunk);
if !is_high_surrogate(word) {
Some(unsafe { core::char::from_u32_unchecked(word as u32) })
} else {
let chunk = unsafe { self.chars.next().unwrap_unchecked() };
let next = get_le_u16(chunk);
let ch = (((word as u32 - 0xD800) << 10) | (next as u32 - 0xDC00)) + 0x10000;
Some(unsafe { core::char::from_u32_unchecked(ch) })
}
}
}
impl fmt::Debug for Utf16LeStr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "\"")?;
for ch in self.chars() {
// TODO escaping
write!(f, "{ch}")?;
}
write!(f, "\"")
}
}
fn is_high_surrogate(word: u16) -> bool {
word & 0xFC00 == 0xD800
}
fn is_low_surrogate(word: u16) -> bool {
word & 0xFC00 == 0xDC00
}
pub fn char_equal_ignore_case(c0: char, c1: char) -> bool {
let mut i0 = c0.to_lowercase();
let mut i1 = c1.to_lowercase();
loop {
match (i0.next(), i1.next()) {
(Some(c0), Some(c1)) if c0 == c1 => (),
(None, None) => return true,
(_, _) => return false,
}
}
}
pub fn chars_equal_ignore_case<I0: Iterator<Item = char>, I1: Iterator<Item = char>>(
mut i0: I0,
mut i1: I1,
) -> bool {
loop {
match (i0.next(), i1.next()) {
(Some(c0), Some(c1)) if char_equal_ignore_case(c0, c1) => (),
(None, None) => return true,
(_, _) => return false,
}
}
}
#[cfg(test)]
mod tests {
use alloc::vec::Vec;
use crate::string::chars_equal_ignore_case;
use super::Utf16LeStr;
fn encode_utf16le<S: AsRef<str> + ?Sized>(s: &S) -> Vec<u8> {
s.as_ref()
.encode_utf16()
.flat_map(|word| word.to_le_bytes())
.collect()
}
fn chars_equal<I0: Iterator<Item = char>, I1: Iterator<Item = char>>(
mut i0: I0,
mut i1: I1,
) -> bool {
loop {
match (i0.next(), i1.next()) {
(Some(c0), Some(c1)) if c0 == c1 => (),
(None, None) => return true,
(_, _) => return false,
}
}
}
#[test]
fn test_chars_equal_ignore_case() {
assert!(!chars_equal_ignore_case("abc".chars(), "def".chars()));
assert!(!chars_equal_ignore_case("abc".chars(), "ab".chars()));
assert!(!chars_equal_ignore_case("abc".chars(), "abcd".chars()));
assert!(chars_equal_ignore_case("abc".chars(), "abc".chars()));
assert!(chars_equal_ignore_case("aBc".chars(), "ABc".chars()));
assert!(chars_equal_ignore_case(
"Україна".chars(),
"УКРАЇНА".chars()
));
assert!(chars_equal_ignore_case("日本".chars(), "日本".chars()));
}
#[test]
fn test_utf16le_str() {
let input = "abcdef";
let bytes = encode_utf16le(input);
let utf16 = Utf16LeStr::from_utf16le(&bytes).unwrap();
assert!(chars_equal(utf16.chars(), input.chars()));
let input = "юнікод";
let bytes = encode_utf16le(input);
let utf16 = Utf16LeStr::from_utf16le(&bytes).unwrap();
assert!(chars_equal(utf16.chars(), input.chars()));
let input = "世界";
let bytes = encode_utf16le(input);
let utf16 = Utf16LeStr::from_utf16le(&bytes).unwrap();
assert!(chars_equal(utf16.chars(), input.chars()));
let input = "\u{01F995}";
let bytes = encode_utf16le(input);
let utf16 = Utf16LeStr::from_utf16le(&bytes).unwrap();
let codepoint = utf16.chars().next().unwrap();
assert_eq!(codepoint, '\u{01F995}');
}
}

View File

@ -18,7 +18,7 @@ pub struct DebugOptions {
impl Default for DebugOptions {
fn default() -> Self {
Self {
serial_level: LogLevel::Debug, // LogLevel::Info,
serial_level: LogLevel::Info,
display_level: LogLevel::Info,
disable_program_trace: false,
}

View File

@ -1,492 +0,0 @@
//! Utilities for debug information logging
// TODO
#![allow(missing_docs)]
use core::{
fmt::{self, Arguments},
str::FromStr,
sync::atomic::{AtomicBool, Ordering},
};
use alloc::{
format,
string::{String, ToString},
sync::Arc,
};
use libk_util::{
ring::RingBuffer,
sync::{
spin_rwlock::{IrqSafeRwLock, IrqSafeRwLockReadGuard},
IrqSafeSpinlock,
},
OneTimeInit, StaticVector,
};
use yggdrasil_abi::error::Error;
use crate::{
arch::Cpu,
config,
fs::sysfs::{
self,
attribute::{StringAttribute, StringAttributeOps},
object::KObject,
},
task::{process::Process, thread::Thread},
time::monotonic_time,
};
const MAX_DEBUG_SINKS: usize = 8;
const RING_LOGGER_CAPACITY: usize = 65536;
static RING_AVAILABLE: AtomicBool = AtomicBool::new(false);
static SERIAL_SINK_SET_UP: AtomicBool = AtomicBool::new(false);
static DEBUG_LOCK: IrqSafeSpinlock<()> = IrqSafeSpinlock::new(());
struct KernelLoggerSink;
/// Locking log sink for dumping panic info
pub struct PanicLoggerSink<'a> {
lock: IrqSafeRwLockReadGuard<'a, StaticVector<DebugSinkWrapper, MAX_DEBUG_SINKS>>,
}
struct RingLoggerInner {
data: RingBuffer<u8>,
}
/// Logger sink which collects output to an internal ring buffer
pub struct RingLoggerSink {
inner: IrqSafeSpinlock<RingLoggerInner>,
}
/// Defines the severity of the message
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum LogLevel {
/// Very verbose low-level debugging information
Trace,
/// Debugging and verbose information
Debug,
/// General information about transitions in the system state
Info,
/// Non-critical abnormalities or notices
Warning,
/// Failures of non-essential components
Error,
/// Irrecoverable errors which result in kernel panic
Fatal,
}
/// Generic interface for debug output
pub trait DebugSink: Sync {
/// Sends a single byte to the output
fn putc(&self, c: u8) -> Result<(), Error>;
/// Sends a string of bytes to the output
fn puts(&self, s: &str) -> Result<(), Error> {
for &byte in s.as_bytes() {
self.putc(byte)?;
}
Ok(())
}
/// Returns `true` if the device supports vt100-like control sequences
fn supports_control_sequences(&self) -> bool {
false
}
}
#[derive(Clone)]
pub enum DebugSinkWrapper {
Arc(LogLevel, Arc<dyn DebugSink>),
Static(LogLevel, &'static dyn DebugSink),
}
unsafe impl Send for DebugSinkWrapper {}
unsafe impl Sync for DebugSinkWrapper {}
// #[derive(Clone, Copy)]
// struct DebugSinkWrapper {
// // inner: &'static dyn DebugSink,
// level: LogLevel,
// }
struct SinkWriter<'a> {
sink: &'a dyn DebugSink,
}
impl fmt::Write for SinkWriter<'_> {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.sink.puts(s).map_err(|_| fmt::Error)
}
}
impl DebugSinkWrapper {
#[inline]
pub fn sink(&self) -> &dyn DebugSink {
match self {
Self::Arc(_, arc) => arc.as_ref(),
Self::Static(_, sink) => *sink,
}
}
#[inline]
pub fn level(&self) -> LogLevel {
match self {
Self::Arc(level, _) => *level,
Self::Static(level, _) => *level,
}
}
pub fn set_level(&mut self, target: LogLevel) {
match self {
Self::Arc(level, _) => *level = target,
Self::Static(level, _) => *level = target,
}
}
}
impl log::Log for DebugSinkWrapper {
fn enabled(&self, metadata: &log::Metadata) -> bool {
if LogLevel::from(metadata.level()) < self.level() {
return false;
}
if metadata.target() == "program" && config::get().debug.disable_program_trace {
return false;
}
true
}
fn log(&self, record: &log::Record) {
use core::fmt::Write;
let level = LogLevel::from(record.level());
let sink = self.sink();
let cpu = Cpu::try_local().map(|c| c.id());
let line = record.line().unwrap_or(0);
let args = record.args();
let (prefix, suffix) = if sink.supports_control_sequences() {
(level.log_prefix(), level.log_suffix())
} else {
("", "")
};
let mut writer = SinkWriter { sink };
let now = monotonic_time();
let s = now.seconds();
match record.target() {
":program" => {
write!(writer, "{prefix}{s:06}:").ok();
if let Some(cpu) = cpu {
write!(writer, "{cpu}:").ok();
} else {
write!(writer, "?:").ok();
}
write!(writer, "ptrace:{args}").ok();
}
":raw" => {
write!(writer, "{prefix}{args}{suffix}").ok();
}
target => {
write!(writer, "{prefix}{s:06}:").ok();
if let Some(cpu) = cpu {
write!(writer, "{cpu}:").ok();
} else {
write!(writer, "?:").ok();
}
writeln!(writer, "{target}:{line}: {args}{suffix}").ok();
}
}
}
fn flush(&self) {}
}
impl log::Log for RingLoggerSink {
fn enabled(&self, metadata: &log::Metadata) -> bool {
metadata.target() != "program"
}
fn log(&self, record: &log::Record) {
use fmt::Write;
let mut lock = self.inner.lock();
let file = record.file().unwrap_or("<???>");
let line = record.line().unwrap_or(0);
writeln!(lock, "{}:{}: {}", file, line, record.args()).ok();
}
fn flush(&self) {}
}
impl log::Log for KernelLoggerSink {
fn enabled(&self, metadata: &log::Metadata) -> bool {
metadata.target() != "io"
}
fn log(&self, record: &log::Record) {
if !self.enabled(record.metadata()) {
return;
}
let _guard = DEBUG_LOCK.lock();
if RING_AVAILABLE.load(Ordering::Acquire) {
RING_LOGGER_SINK.log(record);
}
for sink in DEBUG_SINKS.read().iter() {
if sink.enabled(record.metadata()) {
sink.log(record);
}
}
}
fn flush(&self) {}
}
impl LogLevel {
fn log_prefix(self) -> &'static str {
match self {
LogLevel::Trace => "",
LogLevel::Debug => "",
LogLevel::Info => "\x1b[36m\x1b[1m",
LogLevel::Warning => "\x1b[33m\x1b[1m",
LogLevel::Error => "\x1b[31m\x1b[1m",
LogLevel::Fatal => "\x1b[38;2;255;0;0m\x1b[1m",
}
}
fn log_suffix(self) -> &'static str {
match self {
LogLevel::Trace => "",
LogLevel::Debug => "",
LogLevel::Info => "\x1b[0m",
LogLevel::Warning => "\x1b[0m",
LogLevel::Error => "\x1b[0m",
LogLevel::Fatal => "\x1b[0m",
}
}
}
impl RingLoggerSink {
const fn new() -> Self {
Self {
inner: IrqSafeSpinlock::new(RingLoggerInner {
data: RingBuffer::with_capacity(RING_LOGGER_CAPACITY),
}),
}
}
/// Reads data from the sink without blocking and waiting for more to arrive
pub fn read(&self, pos: usize, buffer: &mut [u8]) -> usize {
unsafe { self.inner.lock().data.read_all_static(pos, buffer) }
}
fn init_buffer(&self) -> Result<(), Error> {
self.inner.lock().data.reserve()
}
}
impl fmt::Write for RingLoggerInner {
fn write_str(&mut self, s: &str) -> fmt::Result {
for ch in s.bytes() {
self.data.write(ch);
}
Ok(())
}
}
impl From<log::Level> for LogLevel {
fn from(value: log::Level) -> Self {
match value {
log::Level::Trace => Self::Trace,
log::Level::Debug => Self::Debug,
log::Level::Info => Self::Info,
log::Level::Warn => Self::Warning,
log::Level::Error => Self::Error,
}
}
}
impl PanicLoggerSink<'_> {
/// Locks the global logger, allowing the panicking CPU to dump its panic info without being
/// interrupted
pub fn lock() -> Self {
Self {
lock: DEBUG_SINKS.read(),
}
}
/// Prints a formatted message to the panic log
pub fn log_fmt(&self, args: Arguments) {
use log::Log;
self.log(
&log::Record::builder()
.level(log::Level::Error)
.target(":raw")
.args(args)
.build(),
)
}
}
impl log::Log for PanicLoggerSink<'_> {
fn enabled(&self, _metadata: &log::Metadata) -> bool {
true
}
fn log(&self, record: &log::Record) {
for sink in self.lock.iter() {
if sink.enabled(record.metadata()) {
sink.log(record);
}
}
}
fn flush(&self) {}
}
/// Logs a formatted message to the panic output
pub macro panic_log($sink:expr, $($args:tt)+) {
$sink.log_fmt(format_args!($($args)+))
}
static LOGGER: KernelLoggerSink = KernelLoggerSink;
static DEBUG_SINKS: IrqSafeRwLock<StaticVector<DebugSinkWrapper, MAX_DEBUG_SINKS>> =
IrqSafeRwLock::new(StaticVector::new());
/// See [RingLoggerSink]
pub static RING_LOGGER_SINK: RingLoggerSink = RingLoggerSink::new();
fn make_sysfs_sink_object(index: usize) -> Arc<KObject<usize>> {
struct Level;
impl StringAttributeOps for Level {
type Data = usize;
const NAME: &'static str = "level";
const LIMIT: usize = 16;
const WRITEABLE: bool = true;
fn read(state: &Self::Data) -> Result<String, Error> {
let sinks = DEBUG_SINKS.read();
let sink = sinks.get(*state).ok_or(Error::InvalidFile)?;
Ok(sink.level().to_string())
}
fn write(state: &Self::Data, value: &str) -> Result<(), Error> {
let level = LogLevel::from_str(value)?;
let mut sinks = DEBUG_SINKS.write();
let sink = sinks.get_mut(*state).ok_or(Error::InvalidFile)?;
sink.set_level(level);
Ok(())
}
}
let object = KObject::new(index);
object.add_attribute(StringAttribute::from(Level)).ok();
object
}
fn add_sink_inner(sink: DebugSinkWrapper) {
let index = {
let mut sinks = DEBUG_SINKS.write();
let index = sinks.len();
sinks.push(sink);
index
};
if let Some(debug) = sysfs::debug() {
debug
.add_object(format!("{index}"), make_sysfs_sink_object(index))
.ok();
}
}
/// Adds a debugging output sink
pub fn add_sink(sink: Arc<dyn DebugSink>, level: LogLevel) {
add_sink_inner(DebugSinkWrapper::Arc(level, sink.clone()));
}
pub fn add_serial_sink(sink: Arc<dyn DebugSink>, level: LogLevel) {
if SERIAL_SINK_SET_UP.swap(true, Ordering::Acquire) {
return;
}
add_sink(sink, level);
}
pub fn add_early_sink(sink: &'static dyn DebugSink, level: LogLevel) {
add_sink_inner(DebugSinkWrapper::Static(level, sink));
}
pub fn disable_early_sinks() {
let mut sinks = DEBUG_SINKS.write();
// TODO proper sink storage/manipulation
for sink in sinks.iter_mut() {
if let DebugSinkWrapper::Static(level, _) = sink {
*level = LogLevel::Fatal;
}
}
}
/// Print a trace message coming from a process
pub fn program_trace(process: &Process, _thread: &Thread, message: &str) {
log::debug!(
target: ":program",
"{} ({}) {message}\n",
process.name,
process.id,
);
}
pub fn init_logger() {
static LOGGER_SET_UP: OneTimeInit<()> = OneTimeInit::new();
LOGGER_SET_UP.or_init_with(|| {
log::set_logger(&LOGGER)
.map(|_| log::set_max_level(log::LevelFilter::Trace))
.ok();
});
}
/// Resets the debugging terminal by clearing it
pub fn init() {
if RING_LOGGER_SINK.init_buffer().is_ok() {
RING_AVAILABLE.store(true, Ordering::Release);
}
init_logger();
}
impl fmt::Display for LogLevel {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let level = match self {
Self::Trace => "trace",
Self::Debug => "debug",
Self::Info => "info",
Self::Warning => "warn",
Self::Error => "error",
Self::Fatal => "fatal",
};
f.write_str(level)
}
}
impl FromStr for LogLevel {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"trace" | "t" => Ok(Self::Trace),
"debug" | "d" => Ok(Self::Debug),
"info" | "i" => Ok(Self::Info),
"warn" | "w" => Ok(Self::Warning),
"error" | "e" => Ok(Self::Error),
"fatal" | "f" => Ok(Self::Fatal),
_ => Err(Error::InvalidArgument),
}
}
}

View File

@ -0,0 +1,240 @@
//! Utilities for debug information logging
use alloc::{
string::{String, ToString},
sync::Arc,
};
use core::{fmt, str::FromStr};
use ring::RingLoggerSink;
use libk_util::{sync::IrqSafeSpinlock, OneTimeInit};
use sink::DEBUG_SINKS;
use yggdrasil_abi::error::Error;
use crate::{
fs::sysfs::{
attribute::{StringAttribute, StringAttributeOps},
object::KObject,
},
task::{process::Process, thread::Thread},
};
mod panic;
mod ring;
mod sink;
pub use panic::{panic_log, PanicLoggerSink};
pub use ring::add_kernel_log_file;
pub use sink::{add_early_sink, add_serial_sink, add_sink, DebugSink};
static DEBUG_LOCK: IrqSafeSpinlock<()> = IrqSafeSpinlock::new(());
struct KernelLoggerSink;
/// Defines the severity of the message
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum LogLevel {
/// Very verbose low-level debugging information
Trace,
/// Debugging and verbose information
Debug,
/// General information about transitions in the system state
Info,
/// Non-critical abnormalities or notices
Warning,
/// Failures of non-essential components
Error,
/// Irrecoverable errors which result in kernel panic
Fatal,
}
impl LogLevel {
fn log_prefix(self) -> &'static str {
match self {
LogLevel::Trace => "",
LogLevel::Debug => "",
LogLevel::Info => "\x1b[36m\x1b[1m",
LogLevel::Warning => "\x1b[33m\x1b[1m",
LogLevel::Error => "\x1b[31m\x1b[1m",
LogLevel::Fatal => "\x1b[38;2;255;0;0m\x1b[1m",
}
}
fn log_suffix(self) -> &'static str {
match self {
LogLevel::Trace => "",
LogLevel::Debug => "",
LogLevel::Info => "\x1b[0m",
LogLevel::Warning => "\x1b[0m",
LogLevel::Error => "\x1b[0m",
LogLevel::Fatal => "\x1b[0m",
}
}
}
impl From<log::Level> for LogLevel {
fn from(value: log::Level) -> Self {
match value {
log::Level::Trace => Self::Trace,
log::Level::Debug => Self::Debug,
log::Level::Info => Self::Info,
log::Level::Warn => Self::Warning,
log::Level::Error => Self::Error,
}
}
}
impl log::Log for KernelLoggerSink {
fn enabled(&self, metadata: &log::Metadata) -> bool {
metadata.target() != "io"
}
fn log(&self, record: &log::Record) {
if !self.enabled(record.metadata()) {
return;
}
RingLoggerSink.log(record);
let _guard = DEBUG_LOCK.lock();
for sink in DEBUG_SINKS.read().iter() {
if sink.enabled(record.metadata()) {
sink.log(record);
}
}
}
fn flush(&self) {}
}
static LOGGER: KernelLoggerSink = KernelLoggerSink;
fn make_sysfs_sink_object(index: usize) -> Arc<KObject<usize>> {
struct Level;
impl StringAttributeOps for Level {
type Data = usize;
const NAME: &'static str = "level";
const LIMIT: usize = 16;
const WRITEABLE: bool = true;
fn read(state: &Self::Data) -> Result<String, Error> {
let sinks = DEBUG_SINKS.read();
let sink = sinks.get(*state).ok_or(Error::InvalidFile)?;
Ok(sink.level().to_string())
}
fn write(state: &Self::Data, value: &str) -> Result<(), Error> {
let level = LogLevel::from_str(value)?;
let mut sinks = DEBUG_SINKS.write();
let sink = sinks.get_mut(*state).ok_or(Error::InvalidFile)?;
sink.set_level(level);
Ok(())
}
}
let object = KObject::new(index);
object.add_attribute(StringAttribute::from(Level)).ok();
object
}
/// Print a trace message coming from a process
pub fn program_trace(process: &Process, _thread: &Thread, message: &str) {
log::debug!(
target: ":program",
"{} ({}) {message}\n",
process.name,
process.id,
);
}
/// Initializes kernel logging output. Prior to this call, no log entries are recorded.
pub fn init_logger() {
static LOGGER_SET_UP: OneTimeInit<()> = OneTimeInit::new();
LOGGER_SET_UP.or_init_with(|| {
log::set_logger(&LOGGER)
.map(|_| log::set_max_level(log::LevelFilter::Trace))
.ok();
});
}
/// Resets the debugging terminal by clearing it
pub fn init() {
init_logger();
}
/// Print a hex dump into the kernel log with given level
pub fn hex_dump(level: log::Level, bytes: &[u8], start_address: u64, with_chars: bool) {
if bytes.is_empty() {
return;
}
let line_width = 16;
let address_width = (start_address + bytes.len() as u64).ilog10() as usize + 1;
for i in (0..bytes.len()).step_by(line_width) {
log::log!(target: ":raw", level, "{:0width$X}: ", start_address + i as u64, width = address_width);
for j in 0..line_width {
if i + j < bytes.len() {
log::log!(target: ":raw", level, "{:02X}", bytes[i + j]);
} else {
log::log!(target: ":raw", level, " ");
}
if j % 2 != 0 {
log::log!(target: ":raw", level, " ");
}
}
if with_chars {
for j in 0..line_width {
if i + j >= bytes.len() {
break;
}
let ch = if bytes[i + j].is_ascii_graphic() {
bytes[i + j]
} else {
b'.'
};
log::log!(target: ":raw", level, "{}", ch as char);
}
}
log::log!(target: ":raw", level, "\n");
}
}
impl fmt::Display for LogLevel {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let level = match self {
Self::Trace => "trace",
Self::Debug => "debug",
Self::Info => "info",
Self::Warning => "warn",
Self::Error => "error",
Self::Fatal => "fatal",
};
f.write_str(level)
}
}
impl FromStr for LogLevel {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"trace" | "t" => Ok(Self::Trace),
"debug" | "d" => Ok(Self::Debug),
"info" | "i" => Ok(Self::Info),
"warn" | "w" => Ok(Self::Warning),
"error" | "e" => Ok(Self::Error),
"fatal" | "f" => Ok(Self::Fatal),
_ => Err(Error::InvalidArgument),
}
}
}

View File

@ -0,0 +1,54 @@
use core::fmt::Arguments;
use libk_util::{sync::spin_rwlock::IrqSafeRwLockReadGuard, StaticVector};
use super::sink::{DebugSinkWrapper, DEBUG_SINKS, MAX_DEBUG_SINKS};
/// Locking log sink for dumping panic info
pub struct PanicLoggerSink<'a> {
lock: IrqSafeRwLockReadGuard<'a, StaticVector<DebugSinkWrapper, MAX_DEBUG_SINKS>>,
}
impl PanicLoggerSink<'_> {
/// Locks the global logger, allowing the panicking CPU to dump its panic info without being
/// interrupted
pub fn lock() -> Self {
Self {
lock: DEBUG_SINKS.read(),
}
}
/// Prints a formatted message to the panic log
pub fn log_fmt(&self, args: Arguments) {
use log::Log;
self.log(
&log::Record::builder()
.level(log::Level::Error)
.target(":raw")
.args(args)
.build(),
)
}
}
impl log::Log for PanicLoggerSink<'_> {
fn enabled(&self, _metadata: &log::Metadata) -> bool {
true
}
fn log(&self, record: &log::Record) {
for sink in self.lock.iter() {
if sink.enabled(record.metadata()) {
sink.log(record);
}
}
}
fn flush(&self) {}
}
/// Logs a formatted message to the panic output
pub macro panic_log($sink:expr, $($args:tt)+) {
$sink.log_fmt(format_args!($($args)+))
}

View File

@ -0,0 +1,270 @@
use core::{any::Any, fmt, future::poll_fn, task::Poll};
use alloc::sync::Arc;
use libk_util::{sync::spin_rwlock::IrqSafeRwLock, waker::QueueWaker};
use yggdrasil_abi::{
error::Error,
io::{FileMode, OpenOptions},
};
use crate::{
task::sync::AsyncMutex,
vfs::{CommonImpl, Filename, InstanceData, Metadata, Node, NodeFlags, NodeRef, RegularImpl},
};
const RECORD_SIZE: usize = 128;
const FIXED_LOG_SIZE: usize = 256;
struct LogFile;
pub(super) struct RingLoggerSink;
struct WriteBuffer {
text: [u8; 255],
len: usize,
}
struct Record {
len: usize,
level: log::Level,
}
impl fmt::Write for WriteBuffer {
fn write_str(&mut self, s: &str) -> fmt::Result {
let bytes = s.as_bytes();
let available = self.text.len() - self.len;
let amount = available.min(bytes.len());
self.text[self.len..self.len + amount].copy_from_slice(&bytes[..amount]);
self.len += amount;
Ok(())
}
}
static mut FIXED_LOG_BUFFER: [u8; FIXED_LOG_SIZE * RECORD_SIZE] = [0; FIXED_LOG_SIZE * RECORD_SIZE];
static mut FIXED_LOG_RECORDS: [Record; FIXED_LOG_SIZE] = [const {
Record {
len: 0,
level: log::Level::Trace,
}
}; FIXED_LOG_SIZE];
#[allow(static_mut_refs)]
static LOG_BUFFER: IrqSafeRwLock<LogBuffer> = IrqSafeRwLock::new(unsafe {
LogBuffer::new(LogBufferStorage::Fixed {
meta: &mut FIXED_LOG_RECORDS,
data: &mut FIXED_LOG_BUFFER,
})
});
static LOG_NOTIFY: QueueWaker = QueueWaker::new();
enum LogBufferStorage {
Fixed {
meta: &'static mut [Record; FIXED_LOG_SIZE],
data: &'static mut [u8; FIXED_LOG_SIZE * RECORD_SIZE],
},
}
struct LogBuffer {
storage: LogBufferStorage,
head: usize,
tail: usize,
filled: bool,
}
struct OpenLogFile {
seq: AsyncMutex<usize>,
}
impl LogBufferStorage {
fn record_mut(&mut self, index: usize) -> (&mut Record, &mut [u8]) {
match self {
Self::Fixed { meta, data } => {
let index = index % meta.len();
(&mut meta[index], &mut data[index * RECORD_SIZE..])
}
}
}
fn record(&self, index: usize) -> (&Record, &[u8]) {
match self {
Self::Fixed { meta, data } => {
let index = index % meta.len();
(&meta[index], &data[index * RECORD_SIZE..])
}
}
}
fn len(&self) -> usize {
match self {
Self::Fixed { meta, .. } => meta.len(),
}
}
}
impl LogBuffer {
const fn new(storage: LogBufferStorage) -> Self {
Self {
storage,
head: 0,
tail: 0,
filled: false,
}
}
fn write(&mut self, level: log::Level, text: &[u8]) {
let len = text.len().min(RECORD_SIZE);
let (meta, data) = self.storage.record_mut(self.head);
meta.len = len;
meta.level = level;
data[..len].copy_from_slice(&text[..len]);
self.head = self.head.wrapping_add(1);
if self.filled {
self.tail = self.head.wrapping_sub(self.storage.len() - 1);
} else if self.head == self.storage.len() {
self.filled = true;
}
}
}
impl CommonImpl for LogFile {
fn metadata(&self, _node: &NodeRef) -> Result<Metadata, Error> {
unreachable!()
}
fn size(&self, _node: &NodeRef) -> Result<u64, Error> {
Ok(0)
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl RegularImpl for LogFile {
fn open(
&self,
_node: &NodeRef,
opts: OpenOptions,
) -> Result<(u64, Option<InstanceData>), Error> {
if opts.contains_any(OpenOptions::WRITE | OpenOptions::APPEND | OpenOptions::TRUNCATE) {
return Err(Error::ReadOnly);
}
let instance = Arc::new(OpenLogFile {
seq: AsyncMutex::new(LOG_BUFFER.read().tail),
});
Ok((0, Some(instance)))
}
fn read(
&self,
_node: &NodeRef,
instance: Option<&InstanceData>,
_pos: u64,
buf: &mut [u8],
) -> Result<usize, Error> {
if buf.len() < 4 {
return Err(Error::InvalidArgument);
}
let instance: &OpenLogFile = instance
.and_then(|t| t.downcast_ref())
.ok_or(Error::InvalidFile)?;
block! {
let mut seq = instance.seq.lock().await;
let (len, new_seq) = read_log_line(*seq, buf).await;
*seq = new_seq;
Ok(len)
}?
}
fn close(&self, _node: &NodeRef, _instance: Option<&InstanceData>) -> Result<(), Error> {
Ok(())
}
}
impl log::Log for RingLoggerSink {
fn enabled(&self, metadata: &log::Metadata) -> bool {
metadata.level() != log::Level::Trace
}
fn log(&self, record: &log::Record) {
use core::fmt::Write;
if !self.enabled(record.metadata()) {
return;
}
let mut write = WriteBuffer {
text: [0; 255],
len: 0,
};
let result = if record.target() == ":raw" || record.target() == ":program" {
write!(write, "{}", record.args())
} else {
write!(write, "{}\n", record.args())
};
if let Ok(()) = result {
LOG_BUFFER
.write()
.write(record.level(), &write.text[..write.len]);
LOG_NOTIFY.wake_all();
}
}
fn flush(&self) {}
}
/// Adds a kernel log file to the parent node
pub fn add_kernel_log_file(parent: &NodeRef) {
let node = Node::regular(
LogFile,
NodeFlags::IN_MEMORY_SIZE | NodeFlags::IN_MEMORY_PROPS,
Some(Metadata::now_root(FileMode::new(0o400))),
None,
);
let filename = unsafe { Filename::from_str_unchecked("log") };
parent.add_child(filename, node).ok();
}
async fn read_log_line(seq: usize, buffer: &mut [u8]) -> (usize, usize) {
poll_fn(|cx| {
if let Some(out) = try_read_log_line(seq, buffer) {
LOG_NOTIFY.remove(cx.waker());
Poll::Ready(out)
} else {
LOG_NOTIFY.register(cx.waker());
Poll::Pending
}
})
.await
}
fn try_read_log_line(seq: usize, buffer: &mut [u8]) -> Option<(usize, usize)> {
let lock = LOG_BUFFER.read();
if seq == lock.head {
return None;
}
let available = lock.head.wrapping_sub(seq);
let ridx = if available < lock.storage.len() {
seq
} else {
lock.tail
};
let (meta, data) = lock.storage.record(ridx);
let new_seq = ridx.wrapping_add(1);
let len = core::cmp::min(meta.len, buffer.len() - 2);
buffer[0] = match meta.level {
log::Level::Trace => b't',
log::Level::Debug => b'd',
log::Level::Info => b'i',
log::Level::Warn => b'w',
log::Level::Error => b'e',
};
buffer[1] = b':';
buffer[2..len + 2].copy_from_slice(&data[..len]);
Some((len + 2, new_seq))
}

View File

@ -0,0 +1,186 @@
use core::{
fmt,
sync::atomic::{AtomicBool, Ordering},
};
use alloc::{format, sync::Arc};
use libk_util::{sync::spin_rwlock::IrqSafeRwLock, StaticVector};
use yggdrasil_abi::error::Error;
use crate::{arch::Cpu, config, fs::sysfs, time::monotonic_time};
use super::{make_sysfs_sink_object, LogLevel};
/// Generic interface for debug output
pub trait DebugSink: Sync {
/// Sends a single byte to the output
fn putc(&self, c: u8) -> Result<(), Error>;
/// Sends a string of bytes to the output
fn puts(&self, s: &str) -> Result<(), Error> {
for &byte in s.as_bytes() {
self.putc(byte)?;
}
Ok(())
}
/// Returns `true` if the device supports vt100-like control sequences
fn supports_control_sequences(&self) -> bool {
false
}
}
#[derive(Clone)]
pub enum DebugSinkWrapper {
Arc(LogLevel, Arc<dyn DebugSink>),
Static(LogLevel, &'static dyn DebugSink),
}
struct SinkWriter<'a> {
sink: &'a dyn DebugSink,
}
unsafe impl Send for DebugSinkWrapper {}
unsafe impl Sync for DebugSinkWrapper {}
impl fmt::Write for SinkWriter<'_> {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.sink.puts(s).map_err(|_| fmt::Error)
}
}
impl DebugSinkWrapper {
#[inline]
pub fn sink(&self) -> &dyn DebugSink {
match self {
Self::Arc(_, arc) => arc.as_ref(),
Self::Static(_, sink) => *sink,
}
}
#[inline]
pub fn level(&self) -> LogLevel {
match self {
Self::Arc(level, _) => *level,
Self::Static(level, _) => *level,
}
}
pub fn set_level(&mut self, target: LogLevel) {
match self {
Self::Arc(level, _) => *level = target,
Self::Static(level, _) => *level = target,
}
}
}
impl log::Log for DebugSinkWrapper {
fn enabled(&self, metadata: &log::Metadata) -> bool {
if LogLevel::from(metadata.level()) < self.level() {
return false;
}
if metadata.target() == "program" && config::get().debug.disable_program_trace {
return false;
}
true
}
fn log(&self, record: &log::Record) {
use core::fmt::Write;
let level = LogLevel::from(record.level());
let sink = self.sink();
let cpu = Cpu::try_local().map(|c| c.id());
let line = record.line().unwrap_or(0);
let args = record.args();
let (prefix, suffix) = if sink.supports_control_sequences() {
(level.log_prefix(), level.log_suffix())
} else {
("", "")
};
let mut writer = SinkWriter { sink };
let now = monotonic_time();
let s = now.seconds();
match record.target() {
":program" => {
write!(writer, "{prefix}{s:06}:").ok();
if let Some(cpu) = cpu {
write!(writer, "{cpu}:").ok();
} else {
write!(writer, "?:").ok();
}
write!(writer, "ptrace:{args}").ok();
}
":raw" => {
write!(writer, "{prefix}{args}{suffix}").ok();
}
target => {
write!(writer, "{prefix}{s:06}:").ok();
if let Some(cpu) = cpu {
write!(writer, "{cpu}:").ok();
} else {
write!(writer, "?:").ok();
}
writeln!(writer, "{target}:{line}: {args}{suffix}").ok();
}
}
}
fn flush(&self) {}
}
pub(super) const MAX_DEBUG_SINKS: usize = 8;
pub(super) static DEBUG_SINKS: IrqSafeRwLock<StaticVector<DebugSinkWrapper, MAX_DEBUG_SINKS>> =
IrqSafeRwLock::new(StaticVector::new());
static SERIAL_SINK_SET_UP: AtomicBool = AtomicBool::new(false);
/// Adds a debugging output sink
pub fn add_sink(sink: Arc<dyn DebugSink>, level: LogLevel) {
add_sink_inner(DebugSinkWrapper::Arc(level, sink.clone()));
}
/// Adds a serial debug output sink. Only one will be added, rest are ignored.
pub fn add_serial_sink(sink: Arc<dyn DebugSink>, level: LogLevel) {
if SERIAL_SINK_SET_UP.swap(true, Ordering::Acquire) {
return;
}
add_sink(sink, level);
}
/// Adds an early output sink, which is disabled by a call to [disable_early_sinks].
pub fn add_early_sink(sink: &'static dyn DebugSink, level: LogLevel) {
add_sink_inner(DebugSinkWrapper::Static(level, sink));
}
/// Disables eraly output sinks
pub fn disable_early_sinks() {
let mut sinks = DEBUG_SINKS.write();
// TODO proper sink storage/manipulation
for sink in sinks.iter_mut() {
if let DebugSinkWrapper::Static(level, _) = sink {
*level = LogLevel::Fatal;
}
}
}
fn add_sink_inner(sink: DebugSinkWrapper) {
let index = {
let mut sinks = DEBUG_SINKS.write();
let index = sinks.len();
sinks.push(sink);
index
};
if let Some(debug) = sysfs::debug() {
debug
.add_object(format!("{index}"), make_sysfs_sink_object(index))
.ok();
}
}

View File

@ -4,7 +4,7 @@ use alloc::sync::Arc;
use libk_util::OneTimeInit;
use yggdrasil_abi::{error::Error, io::FileMode};
use crate::vfs::{impls::MemoryDirectory, path::OwnedFilename, Metadata, Node, NodeFlags};
use crate::vfs::{impls::MemoryDirectory, path::OwnedFilename, Metadata, Node, NodeFlags, NodeRef};
use super::attribute::Attribute;
@ -39,6 +39,10 @@ impl<D> KObject<D> {
self.node.add_child(filename, child.node.clone())
}
pub fn node(&self) -> &NodeRef {
&self.node
}
pub(super) fn data(&self) -> &D {
&self.data
}

View File

@ -18,6 +18,7 @@ use libk_util::OneTimeInit;
use memfs::block::{self, BlockAllocator};
// use memfs::block::{self, BlockAllocator};
use static_assertions::const_assert_eq;
use ygg_driver_fat32::Fat32Fs;
use yggdrasil_abi::{error::Error, io::MountOptions};
pub use pseudo::add_pseudo_devices;
@ -89,6 +90,11 @@ fn create_filesystem<'a, I: IntoIterator<Item = FilesystemMountOption<'a>>>(
let device = source.as_block_device()?;
block!(Ext2Fs::create(device.clone(), options).await)??
}
"fat32" if let Some(source) = source => {
let source = source?;
let device = source.as_block_device()?;
block!(Fat32Fs::create(device.clone(), options).await)??
}
_ => return Err(Error::InvalidArgument),
};

View File

@ -43,6 +43,7 @@ use git_version::git_version;
use kernel_arch::{Architecture, ArchitectureImpl};
use libk::{
arch::Cpu,
debug,
fs::{
devfs,
sysfs::{
@ -115,9 +116,11 @@ fn register_sysfs_attributes() {
}
let kernel = sysfs::kernel().unwrap();
let kernel_node = kernel.node();
kernel.add_attribute(StringAttribute::from(Version)).ok();
kernel.add_attribute(StringAttribute::from(Arch)).ok();
debug::add_kernel_log_file(kernel_node);
}
/// Common kernel main function. Must be called for BSP processor only.

View File

@ -19,6 +19,32 @@ impl<const N: usize> FixedString<N> {
}
}
/// Copies character codepoints, encoding them in UTF-8, into this [FixedString]. Returns
/// false if the result cannot fit into the string.
pub fn append_from_chars<I: Iterator<Item = char>>(&mut self, it: I) -> Result<usize, usize> {
let start = self.len;
for ch in it {
let ch_len = ch.len_utf8();
if self.len + ch_len > N {
return Err(self.len - start);
}
ch.encode_utf8(&mut self.data[self.len..]);
self.len += ch_len;
}
Ok(self.len - start)
}
/// Appends bytes to the string. Returns false and does not change the string if there is not
/// enough space to fit the whole `bytes` slice there.
pub fn append_from_bytes(&mut self, bytes: &[u8]) -> bool {
if self.len + bytes.len() > N {
return false;
}
self.data[self.len..self.len + bytes.len()].copy_from_slice(bytes);
self.len += bytes.len();
true
}
/// Returns the length of the string
pub const fn len(&self) -> usize {
self.len
@ -93,3 +119,30 @@ pub fn from_bytes<T>(bytes: &[u8]) -> &T {
assert_eq!(bytes.len(), core::mem::size_of::<T>());
unsafe { &*(&bytes[0] as *const u8 as *const T) }
}
#[cfg(test)]
mod tests {
use super::FixedString;
#[test]
fn test_fixed_string_append() {
let mut fixed = FixedString::<16>::empty();
assert!(fixed.append_from_bytes(b"test"));
assert!(fixed.append_from_bytes(b"2test"));
assert!(fixed.append_from_bytes(b"3test"));
assert_eq!(fixed, "test2test3test");
assert!(!fixed.append_from_bytes(b"4test"));
assert_eq!(fixed, "test2test3test");
let mut fixed = FixedString::<256>::empty();
assert_eq!(fixed.append_from_chars("test".chars()), Ok(4));
assert_eq!(fixed, "test");
assert_eq!(
fixed.append_from_chars("very0very1very2very3very4longfilename.txt".chars()),
Ok(41)
);
assert_eq!(fixed, "testvery0very1very2very3very4longfilename.txt");
assert_eq!(fixed.append_from_chars("юнікод".chars()), Ok(12));
assert_eq!(fixed, "testvery0very1very2very3very4longfilename.txtюнікод");
}
}

View File

@ -158,6 +158,7 @@ pub fn check_all(env: BuildEnv, action: CheckAction) -> Result<(), Error> {
pub fn test_all(env: BuildEnv) -> Result<(), Error> {
for path in [
"kernel/driver/fs/memfs",
"kernel/driver/fs/fat32",
"lib/abi",
"kernel/libk",
"kernel/libk/libk-util",