Compare commits
3 Commits
74eedfe540
...
605ab4fe09
Author | SHA1 | Date | |
---|---|---|---|
605ab4fe09 | |||
7140af8ccf | |||
efdf8b2c0b |
17
Cargo.lock
generated
17
Cargo.lock
generated
@ -124,6 +124,15 @@ dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ramfs"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"error",
|
||||
"spin",
|
||||
"vfs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc_version"
|
||||
version = "0.2.3"
|
||||
@ -191,3 +200,11 @@ name = "unsafe_unwrap"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1230ec65f13e0f9b28d789da20d2d419511893ea9dac2c1f4ef67b8b14e5da80"
|
||||
|
||||
[[package]]
|
||||
name = "vfs"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"error",
|
||||
"spin",
|
||||
]
|
||||
|
@ -7,5 +7,7 @@ edition = "2018"
|
||||
members = [
|
||||
"kernel",
|
||||
"error",
|
||||
"address"
|
||||
"address",
|
||||
"fs/vfs",
|
||||
"fs/ramfs"
|
||||
]
|
||||
|
14
build.sh
14
build.sh
@ -4,10 +4,19 @@ if [ -z "${MACH}" ]; then
|
||||
MACH=rpi3b
|
||||
fi
|
||||
|
||||
if [ -z "${PROFILE}" ]; then
|
||||
PROFILE=debug
|
||||
fi
|
||||
|
||||
LLVM_DIR=$(llvm-config --bindir)
|
||||
ARCH=aarch64-unknown-none-${MACH}
|
||||
CARGO_FEATURES="mach_$MACH"
|
||||
CARGO_ARGS="--target ../etc/aarch64-unknown-none-$MACH.json \
|
||||
--features mach_$MACH,fdt-rs"
|
||||
--features $CARGO_FEATURES"
|
||||
|
||||
if [ "$PROFILE" = "release" ]; then
|
||||
CARGO_ARGS="$CARGO_ARGS --release"
|
||||
fi
|
||||
|
||||
set -e
|
||||
|
||||
@ -21,4 +30,5 @@ case $1 in
|
||||
;;
|
||||
esac
|
||||
cd ..
|
||||
${LLVM_DIR}/llvm-objcopy -O binary target/${ARCH}/debug/kernel target/${ARCH}/debug/kernel.bin
|
||||
${LLVM_DIR}/llvm-objcopy -O binary target/${ARCH}/${PROFILE}/kernel \
|
||||
target/${ARCH}/${PROFILE}/kernel.bin
|
||||
|
@ -2,5 +2,8 @@
|
||||
|
||||
#[derive(PartialEq, Debug, Clone, Copy)]
|
||||
pub enum Errno {
|
||||
OutOfMemory
|
||||
InvalidArgument,
|
||||
DoesNotExist,
|
||||
NotADirectory,
|
||||
OutOfMemory,
|
||||
}
|
||||
|
11
fs/ramfs/Cargo.toml
Normal file
11
fs/ramfs/Cargo.toml
Normal file
@ -0,0 +1,11 @@
|
||||
[package]
|
||||
name = "ramfs"
|
||||
version = "0.1.0"
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
spin = "*"
|
||||
vfs = { path = "../vfs" }
|
||||
error = { path = "../../error" }
|
146
fs/ramfs/src/block.rs
Normal file
146
fs/ramfs/src/block.rs
Normal file
@ -0,0 +1,146 @@
|
||||
use core::mem::{size_of, MaybeUninit};
|
||||
use core::ops::{Deref, DerefMut};
|
||||
use error::Errno;
|
||||
|
||||
pub const SIZE: usize = 4096;
|
||||
pub const ENTRY_COUNT: usize = SIZE / size_of::<usize>();
|
||||
|
||||
pub trait BlockAllocator {
|
||||
fn alloc(&self) -> *mut u8;
|
||||
unsafe fn free(&self, ptr: *mut u8);
|
||||
}
|
||||
|
||||
pub struct BlockRef<'a, A: BlockAllocator + Copy> {
|
||||
inner: Option<&'a mut [u8; SIZE]>,
|
||||
alloc: MaybeUninit<A>,
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator + Copy> BlockRef<'a, A> {
|
||||
pub fn new(alloc: A) -> Result<Self, Errno> {
|
||||
assert!(size_of::<A>() == 0);
|
||||
let ptr = alloc.alloc();
|
||||
if ptr.is_null() {
|
||||
Err(Errno::OutOfMemory)
|
||||
} else {
|
||||
Ok(unsafe { Self::from_raw(alloc, ptr) })
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_indirect(alloc: A) -> Result<Self, Errno> {
|
||||
let mut res = Self::new(alloc)?;
|
||||
for it in res.as_mut_ref_array().iter_mut() {
|
||||
it.write(BlockRef::null());
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub fn null() -> Self {
|
||||
Self {
|
||||
inner: None,
|
||||
alloc: MaybeUninit::uninit(),
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn from_raw(alloc: A, data: *mut u8) -> Self {
|
||||
Self {
|
||||
inner: Some(&mut *(data as *mut _)),
|
||||
alloc: MaybeUninit::new(alloc),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_null(&self) -> bool {
|
||||
self.inner.is_none()
|
||||
}
|
||||
|
||||
pub fn as_mut_ref_array(&mut self) -> &mut [MaybeUninit<BlockRef<'a, A>>; ENTRY_COUNT] {
|
||||
assert_eq!(size_of::<Self>(), 8);
|
||||
unsafe { &mut *(self.deref_mut() as *mut _ as *mut _) }
|
||||
}
|
||||
|
||||
pub fn as_ref_array(&self) -> &[MaybeUninit<BlockRef<'a, A>>; ENTRY_COUNT] {
|
||||
assert_eq!(size_of::<Self>(), 8);
|
||||
unsafe { &*(self.deref() as *const _ as *const _) }
|
||||
}
|
||||
|
||||
pub fn zero(&mut self) {
|
||||
if let Some(inner) = self.inner.as_mut() {
|
||||
inner.fill(0);
|
||||
} else {
|
||||
panic!("Tried to fill a NULL blockref");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator + Copy> Drop for BlockRef<'a, A> {
|
||||
fn drop(&mut self) {
|
||||
if let Some(inner) = self.inner.take() {
|
||||
unsafe {
|
||||
self.alloc.assume_init_ref().free(inner as *mut _ as *mut _);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator + Copy> Deref for BlockRef<'a, A> {
|
||||
type Target = [u8; SIZE];
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.inner.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator + Copy> DerefMut for BlockRef<'a, A> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.inner.as_mut().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::boxed::Box;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
static A_COUNTER: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
#[test]
|
||||
fn block_allocator() {
|
||||
#[derive(Clone, Copy)]
|
||||
struct A;
|
||||
impl BlockAllocator for A {
|
||||
fn alloc(&self) -> *mut u8 {
|
||||
let b = Box::leak(Box::<[u8; SIZE]>::new_uninit());
|
||||
A_COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
b.as_mut_ptr() as *mut _
|
||||
}
|
||||
|
||||
unsafe fn free(&self, ptr: *mut u8) {
|
||||
A_COUNTER.fetch_sub(1, Ordering::SeqCst);
|
||||
drop(Box::from_raw(ptr as *mut [u8; SIZE]));
|
||||
}
|
||||
}
|
||||
|
||||
const N: usize = 13;
|
||||
{
|
||||
let mut s: [MaybeUninit<BlockRef<A>>; N] = MaybeUninit::uninit_array();
|
||||
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
|
||||
|
||||
for i in 0..N {
|
||||
let mut block = BlockRef::new(A {}).unwrap();
|
||||
block.fill(1);
|
||||
s[i].write(block);
|
||||
}
|
||||
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), N);
|
||||
|
||||
for i in 0..N {
|
||||
unsafe {
|
||||
s[i].assume_init_drop();
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
|
||||
}
|
||||
}
|
||||
}
|
691
fs/ramfs/src/bvec.rs
Normal file
691
fs/ramfs/src/bvec.rs
Normal file
@ -0,0 +1,691 @@
|
||||
use crate::{block, BlockAllocator, BlockRef};
|
||||
use core::cmp::{max, min};
|
||||
use core::marker::PhantomData;
|
||||
use core::mem::MaybeUninit;
|
||||
use core::ops::{Index, IndexMut};
|
||||
use error::Errno;
|
||||
|
||||
const L0_BLOCKS: usize = 32; // 128K
|
||||
const L1_BLOCKS: usize = 8; // 16M
|
||||
|
||||
pub struct Bvec<'a, A: BlockAllocator + Copy> {
|
||||
capacity: usize,
|
||||
size: usize,
|
||||
l0: [MaybeUninit<BlockRef<'a, A>>; L0_BLOCKS],
|
||||
l1: [MaybeUninit<BlockRef<'a, A>>; L1_BLOCKS],
|
||||
l2: MaybeUninit<BlockRef<'a, A>>,
|
||||
alloc: A,
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator + Copy> Bvec<'a, A> {
|
||||
pub fn new(alloc: A) -> Self {
|
||||
let mut res = Self {
|
||||
capacity: 0,
|
||||
size: 0,
|
||||
l0: MaybeUninit::uninit_array(),
|
||||
l1: MaybeUninit::uninit_array(),
|
||||
l2: MaybeUninit::uninit(),
|
||||
alloc,
|
||||
};
|
||||
for it in res.l0.iter_mut() {
|
||||
it.write(BlockRef::null());
|
||||
}
|
||||
for it in res.l1.iter_mut() {
|
||||
it.write(BlockRef::null());
|
||||
}
|
||||
res.l2.write(BlockRef::null());
|
||||
res
|
||||
}
|
||||
|
||||
pub fn resize(&mut self, cap: usize) -> Result<(), Errno> {
|
||||
if cap <= self.capacity {
|
||||
let mut curr = self.capacity;
|
||||
while curr != cap {
|
||||
curr -= 1;
|
||||
let mut index = curr;
|
||||
|
||||
if index >= L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT {
|
||||
index -= L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT;
|
||||
|
||||
let l1i = index / block::ENTRY_COUNT;
|
||||
let l0i = index % block::ENTRY_COUNT;
|
||||
|
||||
let l2r = unsafe { self.l2.assume_init_mut() };
|
||||
assert!(!l2r.is_null());
|
||||
let l1r = unsafe { l2r.as_mut_ref_array()[l1i].assume_init_mut() };
|
||||
assert!(!l1r.is_null());
|
||||
let l0r = unsafe { l1r.as_mut_ref_array()[l0i].assume_init_mut() };
|
||||
assert!(!l0r.is_null());
|
||||
|
||||
*l0r = BlockRef::null();
|
||||
if l0i == 0 {
|
||||
*l1r = BlockRef::null();
|
||||
}
|
||||
if index == 0 {
|
||||
*l2r = BlockRef::null();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if index >= L0_BLOCKS {
|
||||
index -= L0_BLOCKS;
|
||||
|
||||
let l1i = index / block::ENTRY_COUNT;
|
||||
let l0i = index % block::ENTRY_COUNT;
|
||||
|
||||
let l1r = unsafe { self.l1[l1i].assume_init_mut() };
|
||||
assert!(!l1r.is_null());
|
||||
let l0r = unsafe { l1r.as_mut_ref_array()[l0i].assume_init_mut() };
|
||||
assert!(!l0r.is_null());
|
||||
|
||||
*l0r = BlockRef::null();
|
||||
if l0i == 0 {
|
||||
*l1r = BlockRef::null();
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
let l0r = unsafe { self.l0[index].assume_init_mut() };
|
||||
assert!(!l0r.is_null());
|
||||
*l0r = BlockRef::null();
|
||||
continue;
|
||||
|
||||
unimplemented!();
|
||||
}
|
||||
} else {
|
||||
for mut index in self.capacity..cap {
|
||||
if index < L0_BLOCKS {
|
||||
let l0r = unsafe { self.l0[index].assume_init_mut() };
|
||||
assert!(l0r.is_null());
|
||||
*l0r = BlockRef::new(self.alloc)?;
|
||||
continue;
|
||||
}
|
||||
index -= L0_BLOCKS;
|
||||
if index < L1_BLOCKS * block::ENTRY_COUNT {
|
||||
let l1i = index / block::ENTRY_COUNT;
|
||||
let l0i = index % block::ENTRY_COUNT;
|
||||
|
||||
let l1r = unsafe { self.l1[l1i].assume_init_mut() };
|
||||
if l1r.is_null() {
|
||||
*l1r = BlockRef::new_indirect(self.alloc)?;
|
||||
}
|
||||
|
||||
let l0r = unsafe { l1r.as_mut_ref_array()[l0i].assume_init_mut() };
|
||||
assert!(l0r.is_null());
|
||||
*l0r = BlockRef::new(self.alloc)?;
|
||||
|
||||
continue;
|
||||
}
|
||||
index -= L1_BLOCKS * block::ENTRY_COUNT;
|
||||
if index < block::ENTRY_COUNT * block::ENTRY_COUNT {
|
||||
let l1i = index / block::ENTRY_COUNT;
|
||||
let l0i = index % block::ENTRY_COUNT;
|
||||
|
||||
let l2r = unsafe { self.l2.assume_init_mut() };
|
||||
if l2r.is_null() {
|
||||
*l2r = BlockRef::new_indirect(self.alloc)?;
|
||||
}
|
||||
|
||||
let l1r = unsafe { l2r.as_mut_ref_array()[l1i].assume_init_mut() };
|
||||
if l1r.is_null() {
|
||||
*l1r = BlockRef::new_indirect(self.alloc)?;
|
||||
}
|
||||
|
||||
let l0r = unsafe { l1r.as_mut_ref_array()[l0i].assume_init_mut() };
|
||||
assert!(l0r.is_null());
|
||||
*l0r = BlockRef::new(self.alloc)?;
|
||||
|
||||
continue;
|
||||
}
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
|
||||
self.capacity = cap;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write(&mut self, mut pos: usize, data: &[u8]) -> Result<usize, Errno> {
|
||||
if pos > self.size {
|
||||
return Err(Errno::InvalidArgument);
|
||||
}
|
||||
|
||||
let mut rem = data.len();
|
||||
let mut doff = 0usize;
|
||||
|
||||
if pos + rem > self.size {
|
||||
self.size = pos + rem;
|
||||
self.resize((pos + rem + block::SIZE - 1) / block::SIZE)?;
|
||||
}
|
||||
|
||||
while rem > 0 {
|
||||
let index = pos / block::SIZE;
|
||||
let off = pos % block::SIZE;
|
||||
let count = min(block::SIZE - off, rem);
|
||||
|
||||
let block = &mut self[index];
|
||||
|
||||
let dst = &mut block[off..off + count];
|
||||
let src = &data[doff..doff + count];
|
||||
dst.copy_from_slice(src);
|
||||
|
||||
doff += count;
|
||||
pos += count;
|
||||
rem -= count;
|
||||
}
|
||||
|
||||
Ok(doff)
|
||||
}
|
||||
|
||||
pub fn read(&self, mut pos: usize, data: &mut [u8]) -> Result<usize, Errno> {
|
||||
if pos > self.size {
|
||||
return Err(Errno::InvalidArgument);
|
||||
}
|
||||
|
||||
let mut rem = min(self.size - pos, data.len());
|
||||
let mut doff = 0usize;
|
||||
|
||||
while rem > 0 {
|
||||
let index = pos / block::SIZE;
|
||||
let off = pos % block::SIZE;
|
||||
let count = min(block::SIZE - off, rem);
|
||||
|
||||
let block = &self[index];
|
||||
|
||||
let src = &block[off..off + count];
|
||||
let dst = &mut data[doff..doff + count];
|
||||
dst.copy_from_slice(src);
|
||||
|
||||
doff += count;
|
||||
pos += count;
|
||||
rem -= count;
|
||||
}
|
||||
|
||||
Ok(doff)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator + Copy> Index<usize> for Bvec<'a, A> {
|
||||
type Output = BlockRef<'a, A>;
|
||||
fn index(&self, mut index: usize) -> &Self::Output {
|
||||
if index >= self.capacity {
|
||||
panic!(
|
||||
"Index exceeds bvec capacity ({} >= {})",
|
||||
index, self.capacity
|
||||
);
|
||||
}
|
||||
|
||||
if index < L0_BLOCKS {
|
||||
return unsafe { self.l0[index].assume_init_ref() };
|
||||
}
|
||||
index -= L0_BLOCKS;
|
||||
if index < L1_BLOCKS * block::ENTRY_COUNT {
|
||||
return unsafe {
|
||||
let l1 = self.l1[index / block::ENTRY_COUNT].assume_init_ref();
|
||||
l1.as_ref_array()[index % block::ENTRY_COUNT].assume_init_ref()
|
||||
};
|
||||
}
|
||||
index -= L1_BLOCKS * block::ENTRY_COUNT;
|
||||
if index < block::ENTRY_COUNT * block::ENTRY_COUNT {
|
||||
return unsafe {
|
||||
let l2 = self.l2.assume_init_ref();
|
||||
let l1 = l2.as_ref_array()[index / block::ENTRY_COUNT].assume_init_ref();
|
||||
l1.as_ref_array()[index % block::ENTRY_COUNT].assume_init_ref()
|
||||
};
|
||||
}
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator + Copy> IndexMut<usize> for Bvec<'a, A> {
|
||||
fn index_mut(&mut self, mut index: usize) -> &mut Self::Output {
|
||||
if index >= self.capacity {
|
||||
panic!(
|
||||
"Index exceeds bvec capacity ({} >= {})",
|
||||
index, self.capacity
|
||||
);
|
||||
}
|
||||
|
||||
if index < L0_BLOCKS {
|
||||
return unsafe { self.l0[index].assume_init_mut() };
|
||||
}
|
||||
index -= L0_BLOCKS;
|
||||
if index < L1_BLOCKS * block::ENTRY_COUNT {
|
||||
return unsafe {
|
||||
let l1 = self.l1[index / block::ENTRY_COUNT].assume_init_mut();
|
||||
l1.as_mut_ref_array()[index % block::ENTRY_COUNT].assume_init_mut()
|
||||
};
|
||||
}
|
||||
index -= L1_BLOCKS * block::ENTRY_COUNT;
|
||||
if index < block::ENTRY_COUNT * block::ENTRY_COUNT {
|
||||
return unsafe {
|
||||
let l2 = self.l2.assume_init_mut();
|
||||
let l1 = l2.as_mut_ref_array()[index / block::ENTRY_COUNT].assume_init_mut();
|
||||
l1.as_mut_ref_array()[index % block::ENTRY_COUNT].assume_init_mut()
|
||||
};
|
||||
}
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, A: BlockAllocator + Copy> Drop for Bvec<'a, A> {
|
||||
fn drop(&mut self) {
|
||||
for i in 0..min(L0_BLOCKS, self.capacity) {
|
||||
unsafe {
|
||||
self.l0[i].assume_init_drop();
|
||||
}
|
||||
}
|
||||
if self.capacity > L0_BLOCKS {}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "test_bvec")]
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::boxed::Box;
|
||||
use std::mem::MaybeUninit;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
static A_COUNTER: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct TestAlloc;
|
||||
impl BlockAllocator for TestAlloc {
|
||||
fn alloc(&self) -> *mut u8 {
|
||||
let b = Box::leak(Box::<[u8; block::SIZE]>::new_uninit());
|
||||
eprintln!("alloc {:p}", b);
|
||||
b.as_mut_ptr() as *mut _
|
||||
}
|
||||
|
||||
unsafe fn free(&self, ptr: *mut u8) {
|
||||
eprintln!("drop {:p}", ptr);
|
||||
drop(Box::from_raw(ptr as *mut [u8; block::SIZE]));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bvec_allocation() {
|
||||
#[derive(Clone, Copy)]
|
||||
struct A;
|
||||
impl BlockAllocator for A {
|
||||
fn alloc(&self) -> *mut u8 {
|
||||
let b = Box::leak(Box::<[u8; block::SIZE]>::new_uninit());
|
||||
A_COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
b.as_mut_ptr() as *mut _
|
||||
}
|
||||
|
||||
unsafe fn free(&self, ptr: *mut u8) {
|
||||
A_COUNTER.fetch_sub(1, Ordering::SeqCst);
|
||||
drop(Box::from_raw(ptr as *mut [u8; block::SIZE]));
|
||||
}
|
||||
}
|
||||
|
||||
let mut bvec = Bvec::new(A {});
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
|
||||
|
||||
bvec.resize(123).unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
let l1r = bvec.l1[0].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
for i in 0..123 - L0_BLOCKS {
|
||||
assert!(!l1r.as_ref_array()[i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), 123 + 1);
|
||||
|
||||
bvec.resize(123 + block::ENTRY_COUNT).unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
for i in 0..(123 + block::ENTRY_COUNT) - L0_BLOCKS {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = bvec.l1[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
A_COUNTER.load(Ordering::Acquire),
|
||||
123 + block::ENTRY_COUNT + 2
|
||||
);
|
||||
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT)
|
||||
.unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = bvec.l1[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
A_COUNTER.load(Ordering::Acquire),
|
||||
L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS
|
||||
);
|
||||
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 4)
|
||||
.unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = bvec.l1[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
let l2r = bvec.l2.assume_init_ref();
|
||||
assert!(!l2r.is_null());
|
||||
for i in 0..block::ENTRY_COUNT * 4 {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = l2r.as_ref_array()[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
A_COUNTER.load(Ordering::Acquire),
|
||||
L0_BLOCKS + // L0
|
||||
L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS + // L1
|
||||
block::ENTRY_COUNT * 4 + 4 + 1
|
||||
);
|
||||
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 3 + 1)
|
||||
.unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = bvec.l1[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
let l2r = bvec.l2.assume_init_ref();
|
||||
assert!(!l2r.is_null());
|
||||
for i in 0..block::ENTRY_COUNT * 3 + 1 {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = l2r.as_ref_array()[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
A_COUNTER.load(Ordering::Acquire),
|
||||
L0_BLOCKS + // L0
|
||||
L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS + // L1
|
||||
block::ENTRY_COUNT * 3 + 1 + 4 + 1
|
||||
);
|
||||
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 2 + 1)
|
||||
.unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = bvec.l1[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
let l2r = bvec.l2.assume_init_ref();
|
||||
assert!(!l2r.is_null());
|
||||
for i in 0..block::ENTRY_COUNT * 2 + 1 {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = l2r.as_ref_array()[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
A_COUNTER.load(Ordering::Acquire),
|
||||
L0_BLOCKS + // L0
|
||||
L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS + // L1
|
||||
block::ENTRY_COUNT * 2 + 1 + 3 + 1
|
||||
);
|
||||
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 1)
|
||||
.unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = bvec.l1[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
let l2r = bvec.l2.assume_init_ref();
|
||||
assert!(!l2r.is_null());
|
||||
let l1r = l2r.as_ref_array()[0].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[0].assume_init_ref().is_null());
|
||||
}
|
||||
assert_eq!(
|
||||
A_COUNTER.load(Ordering::Acquire),
|
||||
L0_BLOCKS + // L0
|
||||
L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS + // L1
|
||||
1 + 1 + 1
|
||||
);
|
||||
|
||||
bvec.resize(L0_BLOCKS + 3 * block::ENTRY_COUNT + 1).unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
for i in 0..3 * block::ENTRY_COUNT + 1 {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let l1r = bvec.l1[l1i].assume_init_ref();
|
||||
assert!(!l1r.is_null());
|
||||
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
|
||||
}
|
||||
let l2r = bvec.l2.assume_init_ref();
|
||||
assert!(l2r.is_null());
|
||||
}
|
||||
assert_eq!(
|
||||
A_COUNTER.load(Ordering::Acquire),
|
||||
L0_BLOCKS + // L0
|
||||
3 * block::ENTRY_COUNT + 1 + 4
|
||||
);
|
||||
|
||||
bvec.resize(L0_BLOCKS).unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
assert!(bvec.l1[0].assume_init_ref().is_null());
|
||||
}
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), L0_BLOCKS);
|
||||
|
||||
bvec.resize(12).unwrap();
|
||||
unsafe {
|
||||
for i in 0..12 {
|
||||
assert!(!bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), 12);
|
||||
|
||||
bvec.resize(0).unwrap();
|
||||
unsafe {
|
||||
for i in 0..L0_BLOCKS {
|
||||
assert!(bvec.l0[i].assume_init_ref().is_null());
|
||||
}
|
||||
}
|
||||
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bvec_index_l0() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS).unwrap();
|
||||
|
||||
for i in 0..L0_BLOCKS {
|
||||
let block = &bvec[i];
|
||||
assert_eq!(block as *const _, bvec.l0[i].as_ptr());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bvec_index_l1() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + block::ENTRY_COUNT * 2 + 3).unwrap();
|
||||
|
||||
for i in 0..block::ENTRY_COUNT * 2 + 3 {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let block = &bvec[i + L0_BLOCKS];
|
||||
let l1r = unsafe { bvec.l1[l1i].assume_init_ref() };
|
||||
assert_eq!(block as *const _, l1r.as_ref_array()[l0i].as_ptr());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bvec_index_l2() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 3)
|
||||
.unwrap();
|
||||
|
||||
for i in 0..3 {
|
||||
let l1i = i / block::ENTRY_COUNT;
|
||||
let l0i = i % block::ENTRY_COUNT;
|
||||
let block = &bvec[i + L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT];
|
||||
let l2r = unsafe { bvec.l2.assume_init_ref() };
|
||||
let l1r = unsafe { l2r.as_ref_array()[l1i].assume_init_ref() };
|
||||
assert_eq!(block as *const _, l1r.as_ref_array()[l0i].as_ptr());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l0_0() {
|
||||
let bvec = Bvec::new(TestAlloc {});
|
||||
let _block = &bvec[0];
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l0_1() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(13).unwrap();
|
||||
let _block = &bvec[15];
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l1_0() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(13).unwrap();
|
||||
let _block = &bvec[L0_BLOCKS + 2];
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l1_1() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + block::ENTRY_COUNT * 2 + 3).unwrap();
|
||||
let _block = &bvec[L0_BLOCKS + block::ENTRY_COUNT * 2 + 6];
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l1_2() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + block::ENTRY_COUNT * 2 + 3).unwrap();
|
||||
let _block = &bvec[L0_BLOCKS + block::ENTRY_COUNT * 3 + 1];
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l2_0() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(13).unwrap();
|
||||
let _block = &bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 3];
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l2_1() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + block::ENTRY_COUNT * 3 + 13)
|
||||
.unwrap();
|
||||
let _block = &bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 3];
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l2_2() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 6)
|
||||
.unwrap();
|
||||
let _block = &bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 8];
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l2_3() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 2 + 7)
|
||||
.unwrap();
|
||||
let _block =
|
||||
&bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 2 + 13];
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn bvec_index_invalid_l2_4() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 2 + 13)
|
||||
.unwrap();
|
||||
let _block = &bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 3 + 2];
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bvec_write_read() {
|
||||
let mut bvec = Bvec::new(TestAlloc {});
|
||||
const N: usize = block::SIZE * (L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 3);
|
||||
let mut data = vec![0u8; N];
|
||||
for i in 0..N {
|
||||
data[i] = (i & 0xFF) as u8;
|
||||
}
|
||||
assert_eq!(bvec.write(0, &data[..]), Ok(N));
|
||||
|
||||
let mut buf = vec![0u8; 327];
|
||||
let mut off = 0usize;
|
||||
let mut rem = N;
|
||||
while rem != 0 {
|
||||
let count = min(rem, buf.len());
|
||||
assert_eq!(bvec.read(off, &mut buf[..]), Ok(count));
|
||||
|
||||
for i in 0..count {
|
||||
assert_eq!(buf[i], ((i + off) & 0xFF) as u8);
|
||||
}
|
||||
|
||||
rem -= count;
|
||||
off += count;
|
||||
}
|
||||
}
|
||||
}
|
53
fs/ramfs/src/lib.rs
Normal file
53
fs/ramfs/src/lib.rs
Normal file
@ -0,0 +1,53 @@
|
||||
#![no_std]
|
||||
#![feature(new_uninit, maybe_uninit_uninit_array, maybe_uninit_extra)]
|
||||
|
||||
extern crate alloc;
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate std;
|
||||
|
||||
use alloc::rc::Rc;
|
||||
use core::cell::RefCell;
|
||||
use error::Errno;
|
||||
use spin::Mutex;
|
||||
use vfs::{Node, NodeRef, NodeType};
|
||||
|
||||
mod block;
|
||||
pub use block::{BlockAllocator, BlockRef};
|
||||
mod bvec;
|
||||
use bvec::Bvec;
|
||||
|
||||
pub struct Ramfs<'a> {
|
||||
root: NodeRef,
|
||||
allocator: &'a Mutex<dyn BlockAllocator>,
|
||||
}
|
||||
|
||||
pub struct NodeData {
|
||||
}
|
||||
|
||||
fn load_tar(base: *const u8, size: usize) -> Result<NodeRef, Errno> {
|
||||
let root = Node::directory(b"");
|
||||
// TODO
|
||||
Ok(root)
|
||||
}
|
||||
|
||||
pub fn open(
|
||||
base: *const u8,
|
||||
size: usize,
|
||||
allocator: &Mutex<dyn BlockAllocator>,
|
||||
) -> Result<Ramfs, Errno> {
|
||||
let root = load_tar(base, size)?;
|
||||
|
||||
Ok(Ramfs { root, allocator })
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn ramfs_open() {
|
||||
let data = include_str!("../test/test0.tar");
|
||||
//let fs = open(data.as_ptr(), data.bytes().len()).unwrap();
|
||||
}
|
||||
}
|
2
fs/ramfs/test/file0.txt
Normal file
2
fs/ramfs/test/file0.txt
Normal file
@ -0,0 +1,2 @@
|
||||
This is a file
|
||||
File0
|
BIN
fs/ramfs/test/test0.tar
Normal file
BIN
fs/ramfs/test/test0.tar
Normal file
Binary file not shown.
10
fs/vfs/Cargo.toml
Normal file
10
fs/vfs/Cargo.toml
Normal file
@ -0,0 +1,10 @@
|
||||
[package]
|
||||
name = "vfs"
|
||||
version = "0.1.0"
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
error = { path = "../../error" }
|
||||
spin = "*"
|
187
fs/vfs/src/ioctx.rs
Normal file
187
fs/vfs/src/ioctx.rs
Normal file
@ -0,0 +1,187 @@
|
||||
use crate::{path_element, Node, NodeRef};
|
||||
use alloc::rc::Rc;
|
||||
use error::Errno;
|
||||
|
||||
pub struct Ioctx {
|
||||
pub root: NodeRef,
|
||||
pub cwd: NodeRef,
|
||||
}
|
||||
|
||||
impl Ioctx {
|
||||
fn lookup_or_load(parent: NodeRef, name: &[u8]) -> Result<NodeRef, Errno> {
|
||||
let p = parent.borrow_mut();
|
||||
|
||||
if !p.is_directory() {
|
||||
return Err(Errno::NotADirectory);
|
||||
}
|
||||
|
||||
if let Some(node) = p.children().find(|&node| node.borrow().name() == name) {
|
||||
return Ok(node.clone());
|
||||
}
|
||||
|
||||
if let Some(ops) = p.ops.as_ref() {
|
||||
todo!();
|
||||
}
|
||||
|
||||
Err(Errno::DoesNotExist)
|
||||
}
|
||||
|
||||
fn _find(&self, mut at: NodeRef, path: &[u8]) -> Result<NodeRef, Errno> {
|
||||
let mut child_path: &[u8];
|
||||
let mut element_name: &[u8];
|
||||
|
||||
if path.is_empty() {
|
||||
return Ok(at);
|
||||
}
|
||||
|
||||
child_path = path;
|
||||
loop {
|
||||
let r = path_element(child_path);
|
||||
element_name = r.0;
|
||||
child_path = r.1;
|
||||
|
||||
match element_name {
|
||||
b"." => {
|
||||
if child_path.is_empty() {
|
||||
return Ok(at);
|
||||
}
|
||||
}
|
||||
b".." => {
|
||||
let parent = at.borrow().parent().unwrap_or(at.clone());
|
||||
at = parent;
|
||||
if child_path.is_empty() {
|
||||
return Ok(at);
|
||||
}
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
if element_name.is_empty() && child_path.is_empty() {
|
||||
return Ok(at);
|
||||
}
|
||||
assert!(!element_name.is_empty());
|
||||
let child = Self::lookup_or_load(at, element_name)?;
|
||||
|
||||
if child_path.is_empty() {
|
||||
Ok(child)
|
||||
} else {
|
||||
self._find(child, child_path)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn find(&self, at: Option<NodeRef>, mut path: &[u8]) -> Result<NodeRef, Errno> {
|
||||
if path.is_empty() {
|
||||
return at.ok_or(Errno::DoesNotExist);
|
||||
}
|
||||
|
||||
let at = if path[0] == b'/' {
|
||||
let index = path
|
||||
.iter()
|
||||
.position(|&x| x != b'/')
|
||||
.unwrap_or(path.len() - 1);
|
||||
path = &path[index..];
|
||||
self.root.clone()
|
||||
} else if let Some(node) = at {
|
||||
node
|
||||
} else {
|
||||
self.cwd.clone()
|
||||
};
|
||||
|
||||
self._find(at, path)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::Node;
|
||||
|
||||
#[test]
|
||||
fn ioctx_find_at() {
|
||||
let r = Node::directory(b"");
|
||||
let d0 = Node::directory(b"dir0");
|
||||
let d1 = Node::directory(b"dir1");
|
||||
let d0d0 = Node::directory(b"dir0");
|
||||
let d0f0 = Node::file(b"file0");
|
||||
|
||||
Node::attach(r.clone(), d0.clone());
|
||||
Node::attach(r.clone(), d1.clone());
|
||||
Node::attach(d0.clone(), d0d0.clone());
|
||||
Node::attach(d0.clone(), d0f0.clone());
|
||||
|
||||
let ioctx = Ioctx {
|
||||
root: r.clone(),
|
||||
cwd: r.clone(),
|
||||
};
|
||||
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(r.clone()), b"dir0").as_ref().unwrap(),
|
||||
&d0
|
||||
));
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(d0.clone()), b"dir0").as_ref().unwrap(),
|
||||
&d0d0
|
||||
));
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(r.clone()), b"dir0/dir0").as_ref().unwrap(),
|
||||
&d0d0
|
||||
));
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(d0.clone()), b"file0").as_ref().unwrap(),
|
||||
&d0f0
|
||||
));
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(r.clone()), b"dir0/file0").as_ref().unwrap(),
|
||||
&d0f0
|
||||
));
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(d0.clone()), b"../dir1").as_ref().unwrap(),
|
||||
&d1
|
||||
));
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(d0.clone()), b"./dir0").as_ref().unwrap(),
|
||||
&d0d0
|
||||
));
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(d0.clone()), b"././dir0/.").as_ref().unwrap(),
|
||||
&d0d0
|
||||
));
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(d0.clone()), b"././dir0/./").as_ref().unwrap(),
|
||||
&d0d0
|
||||
));
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(d0.clone()), b"././dir0/").as_ref().unwrap(),
|
||||
&d0d0
|
||||
));
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(d0.clone()), b"././dir0/..").as_ref().unwrap(),
|
||||
&d0
|
||||
));
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(d0.clone()), b"././dir0/../..").as_ref().unwrap(),
|
||||
&r
|
||||
));
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(d0.clone()), b"././dir0/../../../").as_ref().unwrap(),
|
||||
&r
|
||||
));
|
||||
|
||||
// TODO make these illegal
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(d0.clone()), b"././file0/..").as_ref().unwrap(),
|
||||
&d0
|
||||
));
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(d0.clone()), b"././file0/").as_ref().unwrap(),
|
||||
&d0f0
|
||||
));
|
||||
assert!(Rc::ptr_eq(
|
||||
ioctx.find(Some(d0.clone()), b"././file0/.").as_ref().unwrap(),
|
||||
&d0f0
|
||||
));
|
||||
|
||||
ioctx.find(Some(r.clone()), b"dir0/dir1").expect_err("");
|
||||
}
|
||||
}
|
28
fs/vfs/src/lib.rs
Normal file
28
fs/vfs/src/lib.rs
Normal file
@ -0,0 +1,28 @@
|
||||
#![no_std]
|
||||
|
||||
#[allow(unused_imports)]
|
||||
#[macro_use]
|
||||
extern crate alloc;
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate std;
|
||||
|
||||
use error::Errno;
|
||||
|
||||
pub mod node;
|
||||
mod util;
|
||||
pub use node::{Node, NodeOperations, NodeRef, NodeType};
|
||||
mod ioctx;
|
||||
pub use ioctx::Ioctx;
|
||||
|
||||
pub fn path_element(path: &[u8]) -> (&[u8], &[u8]) {
|
||||
if let Some(mut index) = path.iter().position(|&x| x == b'/') {
|
||||
let elem = &path[..index];
|
||||
while index < path.len() && path[index] == b'/' {
|
||||
index += 1;
|
||||
}
|
||||
(elem, &path[index..])
|
||||
} else {
|
||||
(path, b"")
|
||||
}
|
||||
}
|
181
fs/vfs/src/node.rs
Normal file
181
fs/vfs/src/node.rs
Normal file
@ -0,0 +1,181 @@
|
||||
use crate::util::{iter::LockedIterator, FixedStr};
|
||||
use alloc::{boxed::Box, rc::Rc, vec::Vec};
|
||||
use core::any::Any;
|
||||
use core::cell::RefCell;
|
||||
use core::ffi::c_void;
|
||||
use core::fmt;
|
||||
use core::ptr::null_mut;
|
||||
use error::Errno;
|
||||
use spin::Mutex;
|
||||
|
||||
pub type NodeRef = Rc<RefCell<Node>>;
|
||||
|
||||
pub enum NodeType {
|
||||
Regular,
|
||||
Directory { children: Vec<NodeRef> },
|
||||
}
|
||||
|
||||
pub const NODE_MEMORY: u32 = 1 << 0;
|
||||
|
||||
pub struct NodeOperations {
|
||||
lookup: Option<fn(parent: NodeRef, name: &[u8]) -> Result<NodeRef, Errno>>,
|
||||
drop: Option<fn(node: &mut Node)>,
|
||||
}
|
||||
|
||||
pub struct Node {
|
||||
name: FixedStr<64>,
|
||||
typ: NodeType,
|
||||
flags: u32,
|
||||
|
||||
pub(crate) ops: Option<&'static NodeOperations>,
|
||||
pub data: *mut c_void,
|
||||
|
||||
target: Option<NodeRef>,
|
||||
parent: Option<NodeRef>,
|
||||
}
|
||||
|
||||
impl Node {
|
||||
pub fn new(name: &[u8], typ: NodeType) -> Self {
|
||||
let mut r = Self {
|
||||
name: FixedStr::empty(),
|
||||
typ,
|
||||
flags: 0,
|
||||
|
||||
ops: None,
|
||||
data: null_mut(),
|
||||
|
||||
parent: None,
|
||||
target: None,
|
||||
};
|
||||
r.name.copy_from_slice(name);
|
||||
r
|
||||
}
|
||||
|
||||
pub fn directory(name: &[u8]) -> Rc<RefCell<Self>> {
|
||||
Rc::new(RefCell::new(Self::new(
|
||||
name,
|
||||
NodeType::Directory { children: vec![] },
|
||||
)))
|
||||
}
|
||||
|
||||
pub fn file(name: &[u8]) -> Rc<RefCell<Self>> {
|
||||
Rc::new(RefCell::new(Self::new(name, NodeType::Regular)))
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &FixedStr<64> {
|
||||
&self.name
|
||||
}
|
||||
|
||||
pub fn parent(&self) -> Option<NodeRef> {
|
||||
self.parent.clone()
|
||||
}
|
||||
|
||||
pub fn is_directory(&self) -> bool {
|
||||
matches!(&self.typ, NodeType::Directory { .. })
|
||||
}
|
||||
|
||||
pub fn children(&self) -> impl Iterator<Item = &NodeRef> {
|
||||
let lock = TREE_MUTEX.lock();
|
||||
match &self.typ {
|
||||
NodeType::Directory { children } => LockedIterator::new(children.iter(), lock),
|
||||
_ => panic!("Not a directory"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn children_mut(&mut self) -> impl Iterator<Item = &mut NodeRef> {
|
||||
let lock = TREE_MUTEX.lock();
|
||||
match &mut self.typ {
|
||||
NodeType::Directory { children } => LockedIterator::new(children.iter_mut(), lock),
|
||||
_ => panic!("Not a directory"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn attach(parent: NodeRef, child: NodeRef) {
|
||||
let _lock = TREE_MUTEX.lock();
|
||||
assert!(child.borrow().parent.is_none());
|
||||
match &mut parent.borrow_mut().typ {
|
||||
NodeType::Directory { children } => children.push(child.clone()),
|
||||
_ => panic!("Not a directory"),
|
||||
}
|
||||
child.borrow_mut().parent.replace(parent.clone());
|
||||
}
|
||||
|
||||
pub fn detach(child: NodeRef) {
|
||||
let _lock = TREE_MUTEX.lock();
|
||||
assert!(child.borrow().parent.is_some());
|
||||
let parent = child.borrow_mut().parent.take().unwrap();
|
||||
match &mut parent.borrow_mut().typ {
|
||||
NodeType::Directory { children } => {
|
||||
children.remove(children.iter().position(|x| Rc::ptr_eq(x, &child)).unwrap());
|
||||
}
|
||||
_ => panic!("Not a directory"),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Node {
|
||||
fn drop(&mut self) {
|
||||
if let Some(ops) = self.ops.as_ref() {
|
||||
if let Some(do_drop) = ops.drop.as_ref() {
|
||||
do_drop(self);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Node {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Node {{ name={:?}, typ=??? }}", self.name)
|
||||
}
|
||||
}
|
||||
|
||||
static TREE_MUTEX: Mutex<()> = Mutex::new(());
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use core::any::{type_name, TypeId};
|
||||
use core::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
#[test]
|
||||
fn node_new() {
|
||||
let r = Node::directory(b"");
|
||||
let n = r.borrow();
|
||||
assert_eq!(n.name, b""[..]);
|
||||
assert!(matches!(n.typ, NodeType::Directory { .. }));
|
||||
assert!(n.ops.is_none());
|
||||
assert!(n.target.is_none());
|
||||
assert!(n.parent.is_none());
|
||||
|
||||
let r = Node::file(b"file1");
|
||||
let n = r.borrow();
|
||||
assert_eq!(n.name, b"file1"[..]);
|
||||
assert!(matches!(n.typ, NodeType::Regular));
|
||||
assert!(n.ops.is_none());
|
||||
assert!(n.target.is_none());
|
||||
assert!(n.parent.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn node_attach() {
|
||||
let r0 = Node::directory(b"");
|
||||
let r1 = Node::directory(b"1234");
|
||||
|
||||
// Attach
|
||||
Node::attach(r0.clone(), r1.clone());
|
||||
|
||||
assert!(Rc::ptr_eq(r1.borrow().parent.as_ref().unwrap(), &r0));
|
||||
{
|
||||
let n0 = r0.borrow();
|
||||
let mut it = n0.children();
|
||||
assert!(Rc::ptr_eq(&it.next().unwrap(), &r1));
|
||||
assert!(it.next().is_none());
|
||||
}
|
||||
|
||||
// Detach
|
||||
Node::detach(r1.clone());
|
||||
|
||||
assert!(r1.borrow().parent.is_none());
|
||||
assert_eq!(r0.borrow().children().count(), 0);
|
||||
}
|
||||
}
|
22
fs/vfs/src/util/iter.rs
Normal file
22
fs/vfs/src/util/iter.rs
Normal file
@ -0,0 +1,22 @@
|
||||
use spin::MutexGuard;
|
||||
|
||||
pub struct LockedIterator<'a, T: ?Sized, I: Iterator<Item = T>> {
|
||||
inner: I,
|
||||
#[allow(dead_code)]
|
||||
lock: MutexGuard<'a, ()>,
|
||||
}
|
||||
|
||||
impl<'a, T, I: Iterator<Item = T>> Iterator for LockedIterator<'a, T, I> {
|
||||
type Item = T;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.inner.next()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, I: Iterator<Item = T>> LockedIterator<'a, T, I> {
|
||||
#[inline]
|
||||
pub fn new(inner: I, lock: MutexGuard<'a, ()>) -> Self {
|
||||
Self { inner, lock }
|
||||
}
|
||||
}
|
94
fs/vfs/src/util/mod.rs
Normal file
94
fs/vfs/src/util/mod.rs
Normal file
@ -0,0 +1,94 @@
|
||||
use alloc::boxed::Box;
|
||||
use core::alloc::Layout;
|
||||
use core::ffi::c_void;
|
||||
use core::fmt;
|
||||
|
||||
pub mod iter;
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct FixedStr<const N: usize> {
|
||||
inner: [u8; N],
|
||||
}
|
||||
|
||||
impl<const N: usize> FixedStr<N> {
|
||||
pub fn new(s: &[u8]) -> Self {
|
||||
let mut r = Self::empty();
|
||||
r.copy_from_slice(s);
|
||||
r
|
||||
}
|
||||
|
||||
pub const fn empty() -> Self {
|
||||
Self { inner: [0; N] }
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.inner.iter().position(|&n| n == 0).unwrap_or(N)
|
||||
}
|
||||
|
||||
pub fn copy_from_slice(&mut self, src: &[u8]) {
|
||||
let src_len = src.len();
|
||||
if src_len == 0 {
|
||||
self.inner[0] = 0;
|
||||
return;
|
||||
}
|
||||
if src_len >= N {
|
||||
panic!("String buffer overflow");
|
||||
}
|
||||
let dst = &mut self.inner[..src_len];
|
||||
dst.copy_from_slice(src);
|
||||
self.inner[src_len] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> PartialEq<[u8]> for FixedStr<N> {
|
||||
fn eq(&self, other: &[u8]) -> bool {
|
||||
let self_len = self.len();
|
||||
if self_len != other.len() {
|
||||
return false;
|
||||
}
|
||||
&self.inner[..self_len] == other
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> fmt::Display for FixedStr<N> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
for byte in self.inner {
|
||||
if byte == 0 {
|
||||
break;
|
||||
}
|
||||
write!(f, "{}", byte as char)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> fmt::Debug for FixedStr<N> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Display::fmt(self, f)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn fixed_str_display() {
|
||||
let s = FixedStr::<64>::new(b"test");
|
||||
assert_eq!(format!("{}", s), "test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fixed_str_length() {
|
||||
assert_eq!(FixedStr::<64>::empty().len(), 0);
|
||||
assert_eq!(FixedStr::<64>::new(b"test1").len(), 5);
|
||||
assert_eq!(FixedStr::<6>::new(b"test1").len(), 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fixed_str_eq_slice() {
|
||||
assert_eq!(FixedStr::<64>::empty(), b""[..]);
|
||||
assert_eq!(FixedStr::<64>::new(b"1234"), b"1234"[..]);
|
||||
}
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
use crate::arch::{intrin, smp};
|
||||
use crate::arch::intrin;
|
||||
use core::mem::MaybeUninit;
|
||||
use core::ops::{Deref, DerefMut};
|
||||
use core::sync::atomic::AtomicUsize;
|
||||
@ -8,19 +8,20 @@ pub struct Cpu {
|
||||
index: u32, // 0x00
|
||||
}
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct CpuRef {
|
||||
inner: &'static mut Cpu,
|
||||
}
|
||||
|
||||
impl Cpu {
|
||||
fn new(index: u32) -> Self {
|
||||
Self { index }
|
||||
Self {
|
||||
index,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub unsafe fn get_raw() -> &'static mut Cpu {
|
||||
&mut *(intrin::read_tpidr_el1() as *mut _)
|
||||
CPU.assume_init_mut()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@ -30,16 +31,13 @@ impl Cpu {
|
||||
|
||||
#[inline]
|
||||
pub fn get() -> CpuRef {
|
||||
// TODO lock
|
||||
CpuRef {
|
||||
inner: unsafe { Self::get_raw() },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for CpuRef {
|
||||
fn drop(&mut self) {
|
||||
// TODO release
|
||||
pub unsafe fn init(index: u32) {
|
||||
CPU.write(Self::new(index));
|
||||
}
|
||||
}
|
||||
|
||||
@ -57,14 +55,7 @@ impl DerefMut for CpuRef {
|
||||
}
|
||||
}
|
||||
|
||||
pub static mut CPUS: [MaybeUninit<Cpu>; smp::MAX_CPU] = MaybeUninit::uninit_array();
|
||||
pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(1);
|
||||
|
||||
fn set_cpu(cpu: *mut Cpu) {
|
||||
unsafe {
|
||||
intrin::write_tpidr_el1(cpu as usize);
|
||||
}
|
||||
}
|
||||
static mut CPU: MaybeUninit<Cpu> = MaybeUninit::uninit();
|
||||
|
||||
#[inline(always)]
|
||||
pub fn get_phys_id() -> usize {
|
||||
@ -73,7 +64,6 @@ pub fn get_phys_id() -> usize {
|
||||
|
||||
pub fn init(index: usize) {
|
||||
unsafe {
|
||||
CPUS[index].write(Cpu::new(index as u32));
|
||||
set_cpu(CPUS[index].as_mut_ptr());
|
||||
Cpu::init(index as u32);
|
||||
}
|
||||
}
|
||||
|
@ -28,9 +28,20 @@ struct ExceptionContext {
|
||||
far: usize,
|
||||
}
|
||||
|
||||
impl ExceptionContext {
|
||||
pub fn dump(&self) {
|
||||
debugln!(" x0 = {:#018x}, x1 = {:#018x}", self.x0, self.x1);
|
||||
debugln!(" x2 = {:#018x}, x3 = {:#018x}", self.x2, self.x3);
|
||||
debugln!(" x4 = {:#018x}, x5 = {:#018x}", self.x4, self.x5);
|
||||
debugln!(" x6 = {:#018x}, x7 = {:#018x}", self.x6, self.x7);
|
||||
debugln!(" x8 = {:#018x}, x9 = {:#018x}", self.x8, self.x9);
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
extern "C" fn exc_handler(_context: ExceptionContext) -> ! {
|
||||
debug!("Unhandled exception\n");
|
||||
extern "C" fn exc_handler(ctx: ExceptionContext) -> ! {
|
||||
debugln!("Unhandled exception");
|
||||
debugln!("FAR = {:#018x}, ESR = {:#018x}", ctx.far, ctx.esr);
|
||||
|
||||
loop {}
|
||||
}
|
||||
|
@ -1,3 +1,10 @@
|
||||
pub fn delay(mut p: usize) {
|
||||
while p != 0 {
|
||||
nop();
|
||||
p -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub unsafe fn disable_irq() {
|
||||
llvm_asm!("msr daifset, #0xF");
|
||||
@ -8,6 +15,21 @@ pub unsafe fn enable_irq() {
|
||||
llvm_asm!("msr daifclr, #0xF");
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub unsafe fn save_irq() -> usize {
|
||||
let mut out: usize;
|
||||
llvm_asm!(r#"
|
||||
mrs $0, daif
|
||||
msr daifset, #0xF
|
||||
"#:"=r"(out));
|
||||
out
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub unsafe fn restore_irq(state: usize) {
|
||||
llvm_asm!("msr daif, $0"::"r"(state));
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn nop() {
|
||||
unsafe {
|
||||
|
@ -12,7 +12,7 @@ impl Aux {
|
||||
const AUX_ENABLES_MUART: u32 = 1 << 0;
|
||||
|
||||
pub unsafe fn enable_uart(&self) {
|
||||
let tmp = mmio_read(Self::REG_AUX_ENABLES);
|
||||
let tmp: u32 = mmio_read(Self::REG_AUX_ENABLES);
|
||||
mmio_write(Self::REG_AUX_ENABLES, tmp | Self::AUX_ENABLES_MUART);
|
||||
}
|
||||
}
|
||||
@ -61,8 +61,7 @@ impl SerialDevice for AuxUart {
|
||||
|
||||
impl InterruptHandler for AuxUart {
|
||||
fn do_irq(&self, _irq: u32) {
|
||||
let byte = unsafe { mmio_read(Self::REG_AUX_MU_IO) } as u8;
|
||||
debugln!("{}", byte as char);
|
||||
let _byte = unsafe { mmio_read::<u32>(Self::REG_AUX_MU_IO) } as u8;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -59,7 +59,7 @@ impl InterruptController for Qa7Intc {
|
||||
match irq {
|
||||
super::IRQ_LOCAL_TIMER => {
|
||||
let phys_core_id = cpu::get_phys_id();
|
||||
let tmp = mmio_read(Self::REG_TIMER_INTC + 4 * phys_core_id);
|
||||
let tmp: u32 = mmio_read(Self::REG_TIMER_INTC + 4 * phys_core_id);
|
||||
mmio_write(
|
||||
Self::REG_TIMER_INTC + 4 * phys_core_id,
|
||||
tmp | Self::INTC_CNTPNSIRQ_IRQ,
|
||||
@ -74,7 +74,7 @@ impl InterruptController for Qa7Intc {
|
||||
}
|
||||
|
||||
fn is_irq_pending(&self, irq: u32) -> bool {
|
||||
unsafe { mmio_read(Self::REG_INT_SRC) & (1 << irq) != 0 }
|
||||
unsafe { mmio_read::<u32>(Self::REG_INT_SRC) & (1 << irq) != 0 }
|
||||
}
|
||||
|
||||
unsafe fn clear_irq(&self, _irq: u32) {}
|
||||
@ -95,9 +95,9 @@ impl InterruptController for Bcm2837Intc {
|
||||
|
||||
fn is_irq_pending(&self, irq: u32) -> bool {
|
||||
if irq < 32 {
|
||||
unsafe { mmio_read(Self::REG_PENDING_IRQ1) & (1 << irq) != 0 }
|
||||
unsafe { mmio_read::<u32>(Self::REG_PENDING_IRQ1) & (1 << irq) != 0 }
|
||||
} else if irq < 64 {
|
||||
unsafe { mmio_read(Self::REG_PENDING_IRQ2) & (1 << (irq - 32)) != 0 }
|
||||
unsafe { mmio_read::<u32>(Self::REG_PENDING_IRQ2) & (1 << (irq - 32)) != 0 }
|
||||
} else {
|
||||
false
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
use crate::{
|
||||
arch::{cpu, intrin, mmio_read, mmio_write, smp::{self, IpiDelivery, IpiMessage}},
|
||||
arch::{cpu, intrin, mmio_read, mmio_write},
|
||||
KernelSpace,
|
||||
};
|
||||
use address::{PhysicalAddress, VirtualAddress};
|
||||
@ -28,56 +28,59 @@ struct Message {
|
||||
}
|
||||
|
||||
static mut MESSAGE: Message = Message { data: [0; 36] };
|
||||
pub static CORE_MBOX0: CoreMailbox = CoreMailbox { index: 0 };
|
||||
|
||||
pub struct CoreMailbox {
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl IpiDelivery for CoreMailbox {
|
||||
fn enable(&self) {
|
||||
let phys_core_id = cpu::get_phys_id();
|
||||
unsafe {
|
||||
mmio_write(Self::REG_INTC + phys_core_id * 4, 1 << self.index);
|
||||
}
|
||||
}
|
||||
|
||||
fn send_ipi(target_id: u32, message: IpiMessage) {
|
||||
unsafe {
|
||||
mmio_write(Self::REG_SET + target_id as usize * 16, 1 << (message as u32));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CoreMailbox {
|
||||
const REG_INTC: PhysicalAddress = PhysicalAddress::new(0x40000050);
|
||||
const REG_SET: PhysicalAddress = PhysicalAddress::new(0x40000080);
|
||||
const REG_RDCLR: PhysicalAddress = PhysicalAddress::new(0x400000C0);
|
||||
|
||||
pub fn do_irq(&self) {
|
||||
let phys_core_id = cpu::get_phys_id();
|
||||
let value = unsafe { mmio_read(Self::REG_RDCLR + phys_core_id * 16 + self.index * 4) };
|
||||
if value != 0 {
|
||||
macro_rules! test_ipi {
|
||||
($value:expr, $msg:expr) => {
|
||||
if $value & (1 << ($msg as u32)) != 0 {
|
||||
smp::handle_ipi($msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test_ipi!(value, IpiMessage::Halt);
|
||||
test_ipi!(value, IpiMessage::Tick);
|
||||
|
||||
unsafe {
|
||||
mmio_write(
|
||||
Self::REG_RDCLR + phys_core_id * 16 + self.index * 4,
|
||||
0xFFFFFFFF,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//pub static CORE_MBOX0: CoreMailbox = CoreMailbox { index: 0 };
|
||||
//
|
||||
//pub struct CoreMailbox {
|
||||
// index: usize,
|
||||
//}
|
||||
//
|
||||
//impl IpiDelivery for CoreMailbox {
|
||||
// fn enable(&self) {
|
||||
// let phys_core_id = cpu::get_phys_id();
|
||||
// unsafe {
|
||||
// mmio_write(Self::REG_INTC + phys_core_id * 4, 1 << self.index);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// fn send_ipi(target_id: u32, message: IpiMessage) {
|
||||
// unsafe {
|
||||
// mmio_write(
|
||||
// Self::REG_SET + target_id as usize * 16,
|
||||
// 1 << (message as u32),
|
||||
// );
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//impl CoreMailbox {
|
||||
// const REG_INTC: PhysicalAddress = PhysicalAddress::new(0x40000050);
|
||||
// const REG_SET: PhysicalAddress = PhysicalAddress::new(0x40000080);
|
||||
// const REG_RDCLR: PhysicalAddress = PhysicalAddress::new(0x400000C0);
|
||||
//
|
||||
// pub fn do_irq(&self) {
|
||||
// let phys_core_id = cpu::get_phys_id();
|
||||
// let value: u32 = unsafe { mmio_read(Self::REG_RDCLR + phys_core_id * 16 + self.index * 4) };
|
||||
// if value != 0 {
|
||||
// macro_rules! test_ipi {
|
||||
// ($value:expr, $msg:expr) => {
|
||||
// if $value & (1 << ($msg as u32)) != 0 {
|
||||
// smp::handle_ipi($msg);
|
||||
// }
|
||||
// };
|
||||
// }
|
||||
//
|
||||
// test_ipi!(value, IpiMessage::Halt);
|
||||
// test_ipi!(value, IpiMessage::Tick);
|
||||
//
|
||||
// unsafe {
|
||||
// mmio_write::<u32>(
|
||||
// Self::REG_RDCLR + phys_core_id * 16 + self.index * 4,
|
||||
// 0xFFFFFFFF,
|
||||
// );
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
unsafe fn call(ch: u32) -> Result<(), ()> {
|
||||
let value = (usize::from(PhysicalAddress::from(
|
||||
@ -85,7 +88,7 @@ unsafe fn call(ch: u32) -> Result<(), ()> {
|
||||
)) as u32)
|
||||
| (ch & 0xF);
|
||||
|
||||
while mmio_read(MBOX_STATUS) & MBOX_STATUS_FULL != 0 {
|
||||
while mmio_read::<u32>(MBOX_STATUS) & MBOX_STATUS_FULL != 0 {
|
||||
llvm_asm!("nop");
|
||||
}
|
||||
|
||||
@ -93,11 +96,11 @@ unsafe fn call(ch: u32) -> Result<(), ()> {
|
||||
mmio_write(MBOX_WRITE, value);
|
||||
|
||||
loop {
|
||||
while mmio_read(MBOX_STATUS) & MBOX_STATUS_EMPTY != 0 {
|
||||
while mmio_read::<u32>(MBOX_STATUS) & MBOX_STATUS_EMPTY != 0 {
|
||||
llvm_asm!("nop");
|
||||
}
|
||||
|
||||
if mmio_read(MBOX_READ) == value {
|
||||
if mmio_read::<u32>(MBOX_READ) == value {
|
||||
if MESSAGE.data[1] == MBOX_RESPONSE {
|
||||
return Ok(());
|
||||
} else {
|
||||
|
@ -4,7 +4,6 @@ use address::PhysicalAddress;
|
||||
pub mod aux;
|
||||
pub mod intc;
|
||||
pub mod mbox;
|
||||
pub mod smp;
|
||||
pub mod timer;
|
||||
|
||||
pub const IRQ_LOCAL_TIMER: u32 = 1;
|
||||
|
@ -1,42 +0,0 @@
|
||||
use crate::{
|
||||
arch::{cpu::CPU_COUNT, intrin},
|
||||
mem::phys::{self, PageUsage},
|
||||
KernelSpace,
|
||||
};
|
||||
use address::VirtualAddress;
|
||||
use core::hint;
|
||||
use core::sync::atomic::Ordering;
|
||||
|
||||
pub const MAX_CPU: usize = 4;
|
||||
|
||||
pub fn cpu_ready(_index: usize) {
|
||||
CPU_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
fn wakeup_single_ap() {
|
||||
extern "C" {
|
||||
static mut ap_wakeup_lock: u64;
|
||||
static mut ap_init_value: u64;
|
||||
}
|
||||
|
||||
let stack_bottom_phys = phys::alloc_contiguous_pages(PageUsage::Kernel, 4).unwrap();
|
||||
let stack_bottom = VirtualAddress::<KernelSpace>::from(stack_bottom_phys);
|
||||
|
||||
let old_count = CPU_COUNT.load(Ordering::SeqCst);
|
||||
|
||||
unsafe {
|
||||
core::ptr::write_volatile(&mut ap_init_value, stack_bottom.into());
|
||||
intrin::dsb_sy();
|
||||
core::ptr::write_volatile(&mut ap_wakeup_lock, 0);
|
||||
}
|
||||
|
||||
while CPU_COUNT.load(Ordering::SeqCst) == old_count {
|
||||
hint::spin_loop();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wakeup_ap_cpus() {
|
||||
for _ in 1..4 {
|
||||
wakeup_single_ap();
|
||||
}
|
||||
}
|
@ -28,62 +28,4 @@
|
||||
.set PAGE_ATTR_SHIFT, 2
|
||||
|
||||
.set KERNEL_OFFSET, 0xFFFFFF8000000000
|
||||
|
||||
.macro __exc_save_ctx
|
||||
sub sp, sp, #192
|
||||
|
||||
stp x0, x1, [sp, #0]
|
||||
stp x2, x3, [sp, #16]
|
||||
stp x4, x5, [sp, #32]
|
||||
stp x6, x7, [sp, #48]
|
||||
stp x8, x9, [sp, #64]
|
||||
stp x10, x11, [sp, #80]
|
||||
stp x12, x13, [sp, #96]
|
||||
stp x14, x15, [sp, #112]
|
||||
stp x16, x17, [sp, #128]
|
||||
stp x18, x29, [sp, #144]
|
||||
|
||||
mrs x0, elr_el1
|
||||
stp x30, x0, [sp, #160]
|
||||
.endm
|
||||
|
||||
.macro __exc_restore_ctx
|
||||
ldp x30, x0, [sp, #160]
|
||||
msr elr_el1, x0
|
||||
|
||||
ldp x0, x1, [sp, #0]
|
||||
ldp x2, x3, [sp, #16]
|
||||
ldp x4, x5, [sp, #32]
|
||||
ldp x6, x7, [sp, #48]
|
||||
ldp x8, x9, [sp, #64]
|
||||
ldp x10, x11, [sp, #80]
|
||||
ldp x12, x13, [sp, #96]
|
||||
ldp x14, x15, [sp, #112]
|
||||
ldp x16, x17, [sp, #128]
|
||||
ldp x18, x29, [sp, #144]
|
||||
|
||||
add sp, sp, #192
|
||||
.endm
|
||||
|
||||
.macro __callee_save_ctx
|
||||
sub sp, sp, #96
|
||||
|
||||
stp x19, x20, [sp, #0]
|
||||
stp x21, x22, [sp, #16]
|
||||
stp x23, x24, [sp, #32]
|
||||
stp x25, x26, [sp, #48]
|
||||
stp x27, x29, [sp, #64]
|
||||
stp xzr, lr, [sp, #80]
|
||||
.endm
|
||||
|
||||
.macro __callee_restore_ctx
|
||||
ldp x19, x20, [sp, #0]
|
||||
ldp x21, x22, [sp, #16]
|
||||
ldp x23, x24, [sp, #32]
|
||||
ldp x25, x26, [sp, #48]
|
||||
ldp x27, x29, [sp, #64]
|
||||
ldp xzr, lr, [sp, #80]
|
||||
|
||||
add sp, sp, #96
|
||||
.endm
|
||||
.endif
|
||||
|
@ -2,7 +2,6 @@ use crate::KernelSpace;
|
||||
use address::{PhysicalAddress, VirtualAddress};
|
||||
|
||||
pub mod cpu;
|
||||
pub mod smp;
|
||||
pub mod exception;
|
||||
pub mod intrin;
|
||||
pub mod timer;
|
||||
@ -17,14 +16,21 @@ cfg_if! {
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn mmio_write(addr: PhysicalAddress, value: u32) {
|
||||
pub trait MmioSize {}
|
||||
impl MmioSize for u32 {}
|
||||
impl MmioSize for i32 {}
|
||||
impl MmioSize for u8 {}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn mmio_write<T: MmioSize>(addr: PhysicalAddress, value: T) {
|
||||
core::ptr::write_volatile(
|
||||
VirtualAddress::<KernelSpace>::from(addr).as_mut_ptr(),
|
||||
value,
|
||||
);
|
||||
}
|
||||
|
||||
pub unsafe fn mmio_read(addr: PhysicalAddress) -> u32 {
|
||||
#[inline]
|
||||
pub unsafe fn mmio_read<T: MmioSize>(addr: PhysicalAddress) -> T {
|
||||
core::ptr::read_volatile(
|
||||
VirtualAddress::<KernelSpace>::from(addr).as_mut_ptr(),
|
||||
)
|
||||
|
@ -1,99 +0,0 @@
|
||||
use crate::{
|
||||
arch::{
|
||||
cpu::{self, Cpu, CPU_COUNT},
|
||||
intrin,
|
||||
},
|
||||
entry_common,
|
||||
};
|
||||
use core::sync::atomic::Ordering;
|
||||
|
||||
pub trait IpiDelivery {
|
||||
fn enable(&self);
|
||||
fn send_ipi(target_id: u32, message: IpiMessage);
|
||||
}
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "mach_rpi3b")] {
|
||||
use super::mach_bcm283x::{mbox, smp as smp_impl};
|
||||
|
||||
pub type IpiDeliveryImpl = mbox::CoreMailbox;
|
||||
use mbox::CORE_MBOX0 as IPI_IMPL;
|
||||
} else {
|
||||
// Dummy SMP implementation
|
||||
mod smp_impl {
|
||||
use super::{IpiDelivery, IpiMessage};
|
||||
pub const MAX_CPU: usize = 1;
|
||||
|
||||
pub struct IpiDeliveryImpl;
|
||||
|
||||
impl IpiDelivery for IpiDeliveryImpl {
|
||||
fn enable(&self) {}
|
||||
fn send_ipi(_target_id: u32, _message: IpiMessage) {}
|
||||
}
|
||||
|
||||
pub(super) fn cpu_ready(_index: usize) {}
|
||||
pub(super) fn wakeup_ap_cpus() {}
|
||||
|
||||
pub static IPI_IMPL: IpiDeliveryImpl = IpiDeliveryImpl;
|
||||
}
|
||||
|
||||
pub use smp_impl::{IpiDeliveryImpl, IPI_IMPL};
|
||||
}
|
||||
}
|
||||
|
||||
pub use smp_impl::MAX_CPU;
|
||||
#[derive(Clone, Copy, PartialEq, Debug)]
|
||||
#[repr(u32)]
|
||||
pub enum IpiMessage {
|
||||
Halt = 0,
|
||||
Tick = 1,
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
extern "C" fn kernel_ap_main() -> ! {
|
||||
let index = cpu::get_phys_id();
|
||||
debugln!("cpu{}: ap wake up", index);
|
||||
cpu::init(index);
|
||||
|
||||
smp_impl::cpu_ready(index);
|
||||
|
||||
entry_common();
|
||||
}
|
||||
|
||||
pub unsafe fn send_ipi(mask: usize, message: IpiMessage) {
|
||||
let self_index = Cpu::get().index();
|
||||
|
||||
for index in 0..CPU_COUNT.load(Ordering::Relaxed) {
|
||||
if (1 << index) & mask != 0 && self_index != index as u32 {
|
||||
// TODO cpus must be in phys order?
|
||||
IpiDeliveryImpl::send_ipi(index as u32, message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_ipi(message: IpiMessage) {
|
||||
debugln!("cpu{} received ipi: {:?}", Cpu::get().index(), message);
|
||||
|
||||
match message {
|
||||
IpiMessage::Halt => {
|
||||
unsafe {
|
||||
intrin::disable_irq();
|
||||
}
|
||||
loop {
|
||||
unsafe {
|
||||
intrin::disable_irq();
|
||||
}
|
||||
intrin::nop();
|
||||
}
|
||||
}
|
||||
IpiMessage::Tick => {}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wakeup_ap_cpus() {
|
||||
smp_impl::wakeup_ap_cpus();
|
||||
}
|
||||
|
||||
pub fn init_ipi_delivery() {
|
||||
IPI_IMPL.enable();
|
||||
}
|
@ -1,5 +1,6 @@
|
||||
use crate::{
|
||||
arch::{intrin, machine},
|
||||
arch::{intrin, machine, cpu::Cpu},
|
||||
proc::sched,
|
||||
dev::{
|
||||
irq::{self, InterruptController, InterruptHandler},
|
||||
Device,
|
||||
@ -24,9 +25,9 @@ impl Device for ArmTimer {
|
||||
|
||||
impl InterruptHandler for ArmTimer {
|
||||
fn do_irq(&self, _irq: u32) {
|
||||
debugln!("T");
|
||||
unsafe {
|
||||
intrin::write_cntp_tval_el0(10000000);
|
||||
sched::sched_yield();
|
||||
intrin::write_cntp_tval_el0(100000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,42 @@
|
||||
// vi:ft=asm :
|
||||
.include "kernel/src/arch/macros.S"
|
||||
|
||||
.macro __exc_save_ctx
|
||||
sub sp, sp, #192
|
||||
|
||||
stp x0, x1, [sp, #0]
|
||||
stp x2, x3, [sp, #16]
|
||||
stp x4, x5, [sp, #32]
|
||||
stp x6, x7, [sp, #48]
|
||||
stp x8, x9, [sp, #64]
|
||||
stp x10, x11, [sp, #80]
|
||||
stp x12, x13, [sp, #96]
|
||||
stp x14, x15, [sp, #112]
|
||||
stp x16, x17, [sp, #128]
|
||||
stp x18, x29, [sp, #144]
|
||||
|
||||
mrs x0, elr_el1
|
||||
stp x30, x0, [sp, #160]
|
||||
.endm
|
||||
|
||||
.macro __exc_restore_ctx
|
||||
ldp x30, x0, [sp, #160]
|
||||
msr elr_el1, x0
|
||||
|
||||
ldp x0, x1, [sp, #0]
|
||||
ldp x2, x3, [sp, #16]
|
||||
ldp x4, x5, [sp, #32]
|
||||
ldp x6, x7, [sp, #48]
|
||||
ldp x8, x9, [sp, #64]
|
||||
ldp x10, x11, [sp, #80]
|
||||
ldp x12, x13, [sp, #96]
|
||||
ldp x14, x15, [sp, #112]
|
||||
ldp x16, x17, [sp, #128]
|
||||
ldp x18, x29, [sp, #144]
|
||||
|
||||
add sp, sp, #192
|
||||
.endm
|
||||
|
||||
.section .rodata
|
||||
.global el1_vectors
|
||||
.p2align 7
|
||||
|
@ -8,32 +8,9 @@ _entry:
|
||||
ands x1, x1, #3
|
||||
beq _entry_bsp
|
||||
|
||||
1:
|
||||
adr x8, ap_wakeup_lock
|
||||
mov x9, #1
|
||||
_ap_loop:
|
||||
// Will acquire exclusive access to [x8]
|
||||
ldaxr x0, [x8]
|
||||
// Will try to write 1 into [x8], failing if
|
||||
// any other PE has acquired exclusive access at this point
|
||||
stxr w1, x9, [x8]
|
||||
// Store failed, jump back
|
||||
cbnz w1, _ap_loop
|
||||
// [x8] data wasn't zero, jump back
|
||||
cbnz x0, _ap_loop
|
||||
_ap_wakeup:
|
||||
adr x0, ap_init_value
|
||||
|
||||
mov x1, #KERNEL_OFFSET
|
||||
|
||||
// Kernel stack
|
||||
ldr x10, [x0]
|
||||
|
||||
// Entry
|
||||
adr x11, kernel_ap_main
|
||||
add x11, x11, x1
|
||||
|
||||
b _entry_ap
|
||||
1:
|
||||
b 1b
|
||||
|
||||
.section .text
|
||||
_entry_bsp:
|
||||
@ -44,14 +21,21 @@ _entry_bsp:
|
||||
// Setup paging tables
|
||||
// This is done once for all PEs
|
||||
adr x0, kernel_l1
|
||||
mov x1, #PAGE_PRESENT
|
||||
orr x1, x1, #PAGE_ACCESSED
|
||||
orr x1, x1, #PAGE_ISH
|
||||
|
||||
mov x2, #PAGE_PRESENT
|
||||
orr x2, x2, #PAGE_ACCESSED
|
||||
orr x2, x2, #PAGE_ISH
|
||||
|
||||
mov x1, x2
|
||||
str x1, [x0]
|
||||
|
||||
orr x1, x1, #1 << 30
|
||||
orr x1, x2, #1 << 30
|
||||
str x1, [x0, #8]
|
||||
|
||||
ldr x1, =0x4010000000
|
||||
orr x1, x2, x1
|
||||
str x1, [x0, #16]
|
||||
|
||||
// Load BSP stack
|
||||
mov x0, #KERNEL_OFFSET
|
||||
adr x10, bsp_stack_top
|
||||
|
@ -1,6 +1,9 @@
|
||||
use crate::dev::serial::{SerialDevice, SERIAL0};
|
||||
use crate::{
|
||||
arch::intrin,
|
||||
dev::serial::{SerialDevice, SERIAL0},
|
||||
sync::Spin as Mutex,
|
||||
};
|
||||
use core::fmt;
|
||||
use spin::Mutex;
|
||||
|
||||
struct Debug;
|
||||
|
||||
@ -33,5 +36,9 @@ macro_rules! debugln {
|
||||
|
||||
pub fn debug_fmt(args: fmt::Arguments<'_>) {
|
||||
use fmt::Write;
|
||||
let u = unsafe { intrin::save_irq() };
|
||||
write!(DEBUG.lock(), "{}", args).unwrap();
|
||||
unsafe {
|
||||
intrin::restore_irq(u);
|
||||
}
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ cfg_if! {
|
||||
pub fn get_intc() -> &'static impl InterruptController {
|
||||
&INTC
|
||||
}
|
||||
} else {
|
||||
} else if #[cfg(feature = "mach_virt")] {
|
||||
pub mod gic;
|
||||
use gic::Gic;
|
||||
use crate::arch::machine;
|
||||
|
@ -37,7 +37,7 @@ impl InterruptController for Gic {
|
||||
|
||||
fn is_irq_pending(&self, irq: u32) -> bool {
|
||||
unsafe {
|
||||
mmio_read(self.gicd_base + Self::GICD_ICPENDR + ((irq >> 3) & !0x3) as usize)
|
||||
mmio_read::<u32>(self.gicd_base + Self::GICD_ICPENDR + ((irq >> 3) & !0x3) as usize)
|
||||
& (1 << (irq & 0x1F))
|
||||
!= 0
|
||||
}
|
||||
|
@ -3,6 +3,8 @@ pub mod serial;
|
||||
|
||||
pub mod pl011;
|
||||
pub mod pl031;
|
||||
pub mod virtio;
|
||||
pub mod pcie;
|
||||
|
||||
pub trait Device {
|
||||
fn name(&self) -> &'static str;
|
||||
|
58
kernel/src/dev/pcie.rs
Normal file
58
kernel/src/dev/pcie.rs
Normal file
@ -0,0 +1,58 @@
|
||||
use crate::{arch::mmio_read, dev::Device, mem::EcamSpace};
|
||||
use address::{PhysicalAddress, VirtualAddress};
|
||||
use core::ops::Index;
|
||||
|
||||
pub struct HostPci {
|
||||
base: PhysicalAddress,
|
||||
}
|
||||
|
||||
pub struct PcieFunctionConfig {
|
||||
base: PhysicalAddress,
|
||||
}
|
||||
|
||||
impl Device for HostPci {
|
||||
fn name(&self) -> &'static str {
|
||||
"Host PCI(e) Controller"
|
||||
}
|
||||
|
||||
unsafe fn enable(&self) {
|
||||
let func = PcieFunctionConfig { base: self.base };
|
||||
debugln!("{:#010x}", func.readl(0));
|
||||
debugln!(":");
|
||||
}
|
||||
|
||||
unsafe fn disable(&self) {}
|
||||
}
|
||||
|
||||
impl HostPci {
|
||||
pub const fn new(base: PhysicalAddress) -> Self {
|
||||
Self { base }
|
||||
}
|
||||
}
|
||||
|
||||
impl PcieFunctionConfig {
|
||||
#[inline(always)]
|
||||
pub unsafe fn readl(&self, off: usize) -> u32 {
|
||||
let addr = VirtualAddress::<EcamSpace>::from(self.base + off);
|
||||
core::ptr::read_volatile(addr.as_ptr())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub unsafe fn readw(&self, off: usize) -> u16 {
|
||||
(self.readl(off & !0x3) | (0xFFFF << (off & 0x3))) as u16
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn device_id(&self) -> u16 {
|
||||
unsafe { self.readw(2) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn vendor_id(&self) -> u16 {
|
||||
unsafe { self.readw(0) }
|
||||
}
|
||||
}
|
||||
|
||||
pub const fn func_offset(bus: u8, dev: u8, func: u8) -> usize {
|
||||
((bus as usize) << 20) | ((dev as usize) << 15) | ((func as usize) << 12)
|
||||
}
|
@ -10,10 +10,9 @@ pub struct Pl011 {
|
||||
|
||||
impl InterruptHandler for Pl011 {
|
||||
fn do_irq(&self, _irq: u32) {
|
||||
let tmp = unsafe { mmio_read(self.base + Self::UARTRIS) };
|
||||
let tmp: u32 = unsafe { mmio_read(self.base + Self::UARTRIS) };
|
||||
if tmp & Self::UARTRIS_RXRIS != 0 {
|
||||
let ch = unsafe { mmio_read(self.base + Self::UARTDR) } as u8;
|
||||
debugln!("{}", ch as char);
|
||||
let _ch = unsafe { mmio_read::<u32>(self.base + Self::UARTDR) } as u8;
|
||||
unsafe {
|
||||
mmio_write(self.base + Self::UARTICR, Self::UARTICR_RXIC);
|
||||
}
|
||||
@ -45,7 +44,7 @@ impl Device for Pl011 {
|
||||
impl SerialDevice for Pl011 {
|
||||
fn send(&self, ch: u8) {
|
||||
unsafe {
|
||||
while mmio_read(self.base + Self::UARTFR) & Self::UARTFR_BUSY != 0 {}
|
||||
while mmio_read::<u32>(self.base + Self::UARTFR) & Self::UARTFR_BUSY != 0 {}
|
||||
mmio_write(self.base + Self::UARTDR, ch as u32);
|
||||
}
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ impl Device for Pl031 {
|
||||
}
|
||||
|
||||
unsafe fn enable(&self) {
|
||||
let tmp = mmio_read(self.base + Self::RTCDR);
|
||||
let tmp: u32 = mmio_read(self.base + Self::RTCDR);
|
||||
mmio_write(self.base + Self::RTCMR, tmp + 1);
|
||||
|
||||
mmio_write(self.base + Self::RTCIMSC, 1);
|
||||
@ -38,7 +38,7 @@ impl Device for Pl031 {
|
||||
|
||||
impl InterruptHandler for Pl031 {
|
||||
fn do_irq(&self, _irq: u32) {
|
||||
let time_int = unsafe { mmio_read(self.base + Self::RTCDR) };
|
||||
let time_int: u32 = unsafe { mmio_read(self.base + Self::RTCDR) };
|
||||
unsafe {
|
||||
mmio_write(self.base + Self::RTCICR, 1);
|
||||
mmio_write(self.base + Self::RTCMR, time_int + 1);
|
||||
|
25
kernel/src/dev/virtio.rs
Normal file
25
kernel/src/dev/virtio.rs
Normal file
@ -0,0 +1,25 @@
|
||||
use crate::dev::Device;
|
||||
use address::PhysicalAddress;
|
||||
|
||||
pub struct Display {
|
||||
base: PhysicalAddress
|
||||
}
|
||||
|
||||
impl Device for Display {
|
||||
fn name(&self) -> &'static str {
|
||||
"VirtIO GPU"
|
||||
}
|
||||
|
||||
unsafe fn enable(&self) {
|
||||
}
|
||||
|
||||
unsafe fn disable(&self) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
impl Display {
|
||||
pub const fn new(base: PhysicalAddress) -> Self {
|
||||
Self { base }
|
||||
}
|
||||
}
|
@ -9,7 +9,7 @@ use address::{PhysicalAddress, VirtualAddress};
|
||||
use core::mem::MaybeUninit;
|
||||
use fdt_rs::{
|
||||
base::{DevTree, DevTreeProp},
|
||||
index::DevTreeIndex,
|
||||
index::{DevTreeIndex, DevTreeIndexNode, DevTreeIndexProp},
|
||||
prelude::*,
|
||||
};
|
||||
|
||||
@ -17,10 +17,72 @@ use fdt_rs::{
|
||||
struct FdtManager {
|
||||
fdt: DevTree<'static>,
|
||||
index: DevTreeIndex<'static, 'static>,
|
||||
address_cells: u32,
|
||||
size_cells: u32,
|
||||
}
|
||||
|
||||
impl FdtManager {
|
||||
fn dump(&self) {
|
||||
self.dump_node(&self.index.root(), 0);
|
||||
}
|
||||
|
||||
fn dump_node(&self, node: &DevTreeIndexNode, depth: usize) {
|
||||
for _ in 0..depth {
|
||||
debug!(" ");
|
||||
}
|
||||
debugln!("\"{}\" {{", node.name().unwrap());
|
||||
|
||||
for prop in node.props() {
|
||||
for _ in 0..=depth {
|
||||
debug!(" ");
|
||||
}
|
||||
let name = prop.name().unwrap();
|
||||
debug!("\"{}\" = ", name);
|
||||
match name {
|
||||
"compatible" => debug!("\"{}\"", prop.str().unwrap()),
|
||||
"#size-cells" | "#address-cells" => debug!("{}", prop.u32(0).unwrap()),
|
||||
"reg" => {
|
||||
debug!("<");
|
||||
for i in 0..prop.length() / 4 {
|
||||
debug!("{:#010x}", prop.u32(i).unwrap());
|
||||
if i != prop.length() / 4 - 1 {
|
||||
debug!(", ");
|
||||
}
|
||||
}
|
||||
debug!(">");
|
||||
}
|
||||
_ => debug!("..."),
|
||||
}
|
||||
debugln!(";");
|
||||
}
|
||||
|
||||
for child in node.children() {
|
||||
self.dump_node(&child, depth + 1);
|
||||
}
|
||||
|
||||
for _ in 0..depth {
|
||||
debug!(" ");
|
||||
}
|
||||
debugln!("}}");
|
||||
}
|
||||
}
|
||||
|
||||
static mut FDT_MANAGER: MaybeUninit<FdtManager> = MaybeUninit::uninit();
|
||||
|
||||
fn read_cell_index_prop(prop: &DevTreeIndexProp, offset: usize, cells: u32) -> Option<usize> {
|
||||
if cells == 2 {
|
||||
// Read as two u32s
|
||||
let high = prop.u32(offset).ok()? as usize;
|
||||
let low = prop.u32(offset + 1).ok()? as usize;
|
||||
|
||||
Some(low | (high << 32))
|
||||
} else {
|
||||
let val = prop.u32(offset).ok()?;
|
||||
|
||||
Some(val as usize)
|
||||
}
|
||||
}
|
||||
|
||||
fn read_cell_prop(prop: &DevTreeProp, offset: usize, cells: u32) -> Option<usize> {
|
||||
if cells == 2 {
|
||||
// Read as two u32s
|
||||
@ -47,8 +109,25 @@ pub fn init(fdt_base_phys: PhysicalAddress) {
|
||||
let index =
|
||||
DevTreeIndex::new(fdt, unsafe { pages_virt.as_slice_mut(page_count * 0x1000) }).unwrap();
|
||||
|
||||
let root = index.root();
|
||||
let mut address_cells = None;
|
||||
let mut size_cells = None;
|
||||
for prop in root.props() {
|
||||
match prop.name().unwrap() {
|
||||
"#address-cells" => address_cells = Some(prop.u32(0).unwrap()),
|
||||
"#size-cells" => size_cells = Some(prop.u32(0).unwrap()),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe {
|
||||
FDT_MANAGER.write(FdtManager { fdt, index });
|
||||
FDT_MANAGER.write(FdtManager {
|
||||
address_cells: address_cells.unwrap(),
|
||||
size_cells: size_cells.unwrap(),
|
||||
fdt,
|
||||
index,
|
||||
});
|
||||
FDT_MANAGER.assume_init_ref().dump();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,8 @@
|
||||
llvm_asm,
|
||||
const_panic,
|
||||
maybe_uninit_uninit_array,
|
||||
alloc_error_handler
|
||||
alloc_error_handler,
|
||||
const_fn_trait_bound
|
||||
)]
|
||||
#![no_std]
|
||||
#![no_main]
|
||||
@ -22,24 +23,16 @@ pub mod dev;
|
||||
pub mod fdt;
|
||||
pub mod mem;
|
||||
pub mod time;
|
||||
pub mod proc;
|
||||
pub mod sync;
|
||||
|
||||
pub use mem::KernelSpace;
|
||||
|
||||
use address::PhysicalAddress;
|
||||
use arch::{timer, cpu, intrin, smp};
|
||||
use arch::{timer, cpu, intrin};
|
||||
use dev::irq::{self, InterruptController};
|
||||
|
||||
pub fn entry_common() -> ! {
|
||||
smp::init_ipi_delivery();
|
||||
|
||||
unsafe {
|
||||
irq::init();
|
||||
timer::enable_local_timer();
|
||||
intrin::enable_irq();
|
||||
}
|
||||
|
||||
loop {}
|
||||
}
|
||||
use dev::virtio::Display;
|
||||
use dev::pcie::HostPci;
|
||||
|
||||
#[no_mangle]
|
||||
extern "C" fn kernel_bsp_main(fdt_base: PhysicalAddress) -> ! {
|
||||
@ -67,8 +60,20 @@ extern "C" fn kernel_bsp_main(fdt_base: PhysicalAddress) -> ! {
|
||||
}
|
||||
|
||||
debug!("BSP init finished\n");
|
||||
//smp::wakeup_ap_cpus();
|
||||
entry_common();
|
||||
unsafe {
|
||||
irq::init();
|
||||
timer::enable_local_timer();
|
||||
}
|
||||
|
||||
let mut bus = HostPci::new(PhysicalAddress::new(0x10000000));
|
||||
unsafe {
|
||||
use dev::Device;
|
||||
bus.enable();
|
||||
}
|
||||
//let mut display = Display::new();
|
||||
|
||||
loop {}
|
||||
//proc::enter();
|
||||
}
|
||||
|
||||
use core::panic::PanicInfo;
|
||||
@ -78,8 +83,5 @@ fn panic_handler(pi: &PanicInfo) -> ! {
|
||||
intrin::disable_irq();
|
||||
}
|
||||
debug!("PANIC: {:?}\n", pi);
|
||||
unsafe {
|
||||
smp::send_ipi(usize::MAX, smp::IpiMessage::Halt);
|
||||
}
|
||||
loop {}
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ use crate::{
|
||||
use address::VirtualAddress;
|
||||
use core::alloc::{GlobalAlloc, Layout};
|
||||
use core::convert::TryFrom;
|
||||
use core::mem::{size_of, MaybeUninit};
|
||||
use core::mem::size_of;
|
||||
use core::ptr::null_mut;
|
||||
use spin::Mutex;
|
||||
|
||||
@ -19,7 +19,7 @@ const HEAP_SIZE: usize = 16 << 20; // 16MiB
|
||||
|
||||
struct Heap {
|
||||
start: VirtualAddress<KernelSpace>,
|
||||
mutex: MaybeUninit<Mutex<()>>,
|
||||
mutex: Option<Mutex<()>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -37,7 +37,7 @@ unsafe impl GlobalAlloc for Heap {
|
||||
let count = ((layout.size() + 15) & !15) as u32;
|
||||
|
||||
// NOTE: that shouldn't be optimized away
|
||||
let _lock = self.mutex.assume_init_ref().lock();
|
||||
let _lock = self.mutex.as_ref().unwrap().lock();
|
||||
|
||||
// Check if the memory is corrupted
|
||||
let mut block_it = self.first();
|
||||
@ -71,7 +71,7 @@ unsafe impl GlobalAlloc for Heap {
|
||||
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
// NOTE: that shouldn't be optimized away
|
||||
let _lock = self.mutex.assume_init_ref().lock();
|
||||
let _lock = self.mutex.as_ref().unwrap().lock();
|
||||
let address = VirtualAddress::from_ptr(ptr);
|
||||
|
||||
// Check heap boundaries
|
||||
@ -124,7 +124,7 @@ impl Heap {
|
||||
block.next = null_mut();
|
||||
Heap {
|
||||
start: addr,
|
||||
mutex: MaybeUninit::new(Mutex::new(())),
|
||||
mutex: Some(Mutex::new(())),
|
||||
}
|
||||
}
|
||||
|
||||
@ -135,7 +135,7 @@ impl Heap {
|
||||
#[allow(dead_code)]
|
||||
fn dump(&self) {
|
||||
// NOTE: that shouldn't be optimized away
|
||||
let _lock = unsafe { self.mutex.assume_init_ref().lock() };
|
||||
let _lock = self.mutex.as_ref().unwrap().lock();
|
||||
|
||||
let mut block_it = self.first();
|
||||
while let Some(ref mut block) = block_it {
|
||||
@ -187,7 +187,7 @@ impl HeapBlock {
|
||||
#[global_allocator]
|
||||
static mut KERNEL_HEAP: Heap = Heap {
|
||||
start: VirtualAddress::null(),
|
||||
mutex: MaybeUninit::uninit(),
|
||||
mutex: None,
|
||||
};
|
||||
|
||||
#[alloc_error_handler]
|
||||
|
@ -12,6 +12,15 @@ impl AddressSpace for KernelSpace {
|
||||
}
|
||||
impl TrivialConvert for KernelSpace {}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, PartialOrd)]
|
||||
pub struct EcamSpace;
|
||||
impl AddressSpace for EcamSpace {
|
||||
const NAME: &'static str = "ecam";
|
||||
const OFFSET: usize = 0xFFFFFF8080000000;
|
||||
const LIMIT: usize = 0xFFFFFF8080000000 + (1 << 30);
|
||||
}
|
||||
impl TrivialConvert for EcamSpace {}
|
||||
|
||||
pub const PAGE_SIZE: usize = 0x1000;
|
||||
|
||||
pub fn kernel_end_phys() -> PhysicalAddress {
|
||||
|
@ -2,8 +2,8 @@ use super::{PageInfo, PageUsage};
|
||||
use crate::{mem::PAGE_SIZE, KernelSpace};
|
||||
use address::{PhysicalAddress, VirtualAddress};
|
||||
use core::mem;
|
||||
use error::Errno;
|
||||
use spin::Mutex;
|
||||
use error::Errno;
|
||||
|
||||
pub unsafe trait Manager {
|
||||
fn alloc_page(&mut self, pu: PageUsage) -> Result<PhysicalAddress, Errno>;
|
||||
@ -18,7 +18,7 @@ pub unsafe trait Manager {
|
||||
}
|
||||
|
||||
pub struct SimpleManager {
|
||||
pages: &'static mut [Mutex<PageInfo>],
|
||||
pages: &'static mut [PageInfo],
|
||||
base_index: usize,
|
||||
}
|
||||
|
||||
@ -28,17 +28,17 @@ impl SimpleManager {
|
||||
at: PhysicalAddress,
|
||||
count: usize,
|
||||
) -> Self {
|
||||
let pages: &'static mut [Mutex<PageInfo>] =
|
||||
let pages: &'static mut [PageInfo] =
|
||||
VirtualAddress::<KernelSpace>::from(at).as_slice_mut(count);
|
||||
|
||||
// Initialize uninit pages
|
||||
for index in 0..count {
|
||||
mem::forget(mem::replace(
|
||||
&mut pages[index],
|
||||
Mutex::new(PageInfo {
|
||||
PageInfo {
|
||||
refcount: 0,
|
||||
usage: PageUsage::Reserved,
|
||||
}),
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
@ -49,7 +49,7 @@ impl SimpleManager {
|
||||
}
|
||||
|
||||
pub(super) unsafe fn add_page(&mut self, addr: PhysicalAddress) {
|
||||
let mut page = self.pages[addr.page_index() - self.base_index].lock();
|
||||
let page = &mut self.pages[addr.page_index() - self.base_index];
|
||||
assert!(page.refcount == 0 && page.usage == PageUsage::Reserved);
|
||||
page.usage = PageUsage::Available;
|
||||
|
||||
@ -62,7 +62,7 @@ impl SimpleManager {
|
||||
unsafe impl Manager for SimpleManager {
|
||||
fn alloc_page(&mut self, pu: PageUsage) -> Result<PhysicalAddress, Errno> {
|
||||
for index in 0..self.pages.len() {
|
||||
let mut page = self.pages[index].lock();
|
||||
let page = &mut self.pages[index];
|
||||
|
||||
if page.usage == PageUsage::Available {
|
||||
page.usage = pu;
|
||||
@ -79,13 +79,13 @@ unsafe impl Manager for SimpleManager {
|
||||
) -> Result<PhysicalAddress, Errno> {
|
||||
'l0: for i in 0..self.pages.len() {
|
||||
for j in 0..count {
|
||||
if self.pages[i + j].lock().usage != PageUsage::Available {
|
||||
if self.pages[i + j].usage != PageUsage::Available {
|
||||
continue 'l0;
|
||||
}
|
||||
}
|
||||
|
||||
for j in 0..count {
|
||||
let mut page = self.pages[i + j].lock();
|
||||
let page = &mut self.pages[i + j];
|
||||
assert!(page.usage == PageUsage::Available);
|
||||
page.usage = pu;
|
||||
page.refcount = 1;
|
||||
|
@ -1,4 +1,5 @@
|
||||
use super::PAGE_SIZE;
|
||||
use core::panic::Location;
|
||||
use address::PhysicalAddress;
|
||||
use core::convert::TryFrom;
|
||||
use core::mem::size_of;
|
||||
|
@ -1,9 +1,30 @@
|
||||
.include "kernel/src/arch/macros.S"
|
||||
|
||||
.section .text
|
||||
.global context_enter_kernel
|
||||
.global context_switch_to
|
||||
.global context_switch
|
||||
|
||||
.macro __callee_save_ctx
|
||||
sub sp, sp, #96
|
||||
|
||||
stp x19, x20, [sp, #0]
|
||||
stp x21, x22, [sp, #16]
|
||||
stp x23, x24, [sp, #32]
|
||||
stp x25, x26, [sp, #48]
|
||||
stp x27, x29, [sp, #64]
|
||||
stp xzr, lr, [sp, #80]
|
||||
.endm
|
||||
|
||||
.macro __callee_restore_ctx
|
||||
ldp x19, x20, [sp, #0]
|
||||
ldp x21, x22, [sp, #16]
|
||||
ldp x23, x24, [sp, #32]
|
||||
ldp x25, x26, [sp, #48]
|
||||
ldp x27, x29, [sp, #64]
|
||||
ldp xzr, lr, [sp, #80]
|
||||
|
||||
add sp, sp, #96
|
||||
.endm
|
||||
|
||||
context_enter_kernel:
|
||||
mov x0, #5
|
||||
msr spsr_el1, x0
|
||||
@ -14,10 +35,10 @@ context_enter_kernel:
|
||||
context_switch:
|
||||
msr daifset, #0xF
|
||||
|
||||
// Store old callee-saved regs on stack
|
||||
// Store old callee-saved regs
|
||||
__callee_save_ctx
|
||||
|
||||
// Store old stack pointer
|
||||
// Store old stack
|
||||
mov x19, sp
|
||||
str x19, [x1]
|
||||
context_switch_to:
|
||||
@ -27,8 +48,7 @@ context_switch_to:
|
||||
ldr x0, [x0]
|
||||
mov sp, x0
|
||||
|
||||
// Load new callee-saved regs on stack
|
||||
// Load new callee-saved regs from stack
|
||||
__callee_restore_ctx
|
||||
|
||||
// Simulate/perform a return
|
||||
ret
|
||||
|
@ -1,72 +1,68 @@
|
||||
use crate::{
|
||||
mem::phys::{self, PageUsage},
|
||||
KernelSpace,
|
||||
};
|
||||
use address::VirtualAddress;
|
||||
use core::mem::size_of;
|
||||
use crate::{KernelSpace, mem::phys::{self, PageUsage}};
|
||||
use address::{VirtualAddress};
|
||||
|
||||
global_asm!(include_str!("context.S"));
|
||||
|
||||
#[repr(C)]
|
||||
pub(super) struct Context {
|
||||
pub kernel_sp: VirtualAddress<KernelSpace>, // 0x00
|
||||
cpu_id: u32, // 0x08
|
||||
pub struct Context {
|
||||
sp: VirtualAddress<KernelSpace>, // 0x00
|
||||
}
|
||||
|
||||
struct StackBuilder {
|
||||
struct WriteStack {
|
||||
bp: VirtualAddress<KernelSpace>,
|
||||
sp: VirtualAddress<KernelSpace>,
|
||||
}
|
||||
|
||||
impl Context {
|
||||
pub fn new_kernel(entry: usize, arg: usize) -> Context {
|
||||
let kstack_phys = phys::alloc_contiguous_pages(PageUsage::Kernel, 4).unwrap();
|
||||
let mut stack = unsafe { StackBuilder::new(kstack_phys.into(), 4096 * 4) };
|
||||
|
||||
debug!("Stack bounds: {:?}..{:?}\n", stack.sp, stack.bp);
|
||||
|
||||
stack.push(entry); // ELR before ERET
|
||||
stack.push(arg);
|
||||
|
||||
stack.push(context_enter_kernel as usize); // x30 LR
|
||||
stack.push(0usize); // padding
|
||||
stack.push(0usize); // x29
|
||||
stack.push(0usize); // x27
|
||||
stack.push(0usize); // x26
|
||||
stack.push(0usize); // x25
|
||||
stack.push(0usize); // x24
|
||||
stack.push(0usize); // x23
|
||||
stack.push(0usize); // x22
|
||||
stack.push(0usize); // x21
|
||||
stack.push(0usize); // x20
|
||||
stack.push(0usize); // x19
|
||||
|
||||
Context {
|
||||
kernel_sp: stack.sp,
|
||||
cpu_id: u32::MAX,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StackBuilder {
|
||||
pub unsafe fn new(bp: VirtualAddress<KernelSpace>, size: usize) -> Self {
|
||||
Self { bp, sp: bp + size }
|
||||
}
|
||||
|
||||
pub fn push<A: Into<usize>>(&mut self, value: A) {
|
||||
if self.sp == self.bp {
|
||||
panic!("Stack overflow");
|
||||
}
|
||||
self.sp -= size_of::<usize>();
|
||||
unsafe {
|
||||
core::ptr::write(self.sp.as_mut_ptr(), value.into());
|
||||
}
|
||||
}
|
||||
sp: VirtualAddress<KernelSpace>
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
fn context_enter_kernel();
|
||||
|
||||
pub(super) fn context_switch_to(dst: *mut Context);
|
||||
pub(super) fn context_switch(dst: *mut Context, src: *mut Context);
|
||||
|
||||
fn context_enter_kernel();
|
||||
}
|
||||
|
||||
impl Context {
|
||||
pub fn new_kernel(entry: usize, arg: usize) -> Self {
|
||||
let pages = phys::alloc_contiguous_pages(PageUsage::Kernel, 4).unwrap();
|
||||
let stack_bottom = VirtualAddress::<KernelSpace>::from(pages);
|
||||
let mut stack = WriteStack::new(stack_bottom, 4 * 4096);
|
||||
|
||||
stack.push(entry);
|
||||
stack.push(arg);
|
||||
|
||||
stack.push(context_enter_kernel as usize); // x30 LR
|
||||
stack.push(0); // xzr
|
||||
stack.push(0); // x29
|
||||
stack.push(0); // x27
|
||||
stack.push(0); // x26
|
||||
stack.push(0); // x25
|
||||
stack.push(0); // x24
|
||||
stack.push(0); // x23
|
||||
stack.push(0); // x22
|
||||
stack.push(0); // x21
|
||||
stack.push(0); // x20
|
||||
stack.push(0); // x19
|
||||
|
||||
Context {
|
||||
sp: stack.sp,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WriteStack {
|
||||
pub fn new(bottom: VirtualAddress<KernelSpace>, size: usize) -> Self {
|
||||
Self {
|
||||
bp: bottom,
|
||||
sp: bottom + size,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push(&mut self, value: usize) {
|
||||
if self.sp == self.bp {
|
||||
panic!("Stack overflow");
|
||||
}
|
||||
self.sp -= 8;
|
||||
unsafe {
|
||||
core::ptr::write(self.sp.as_mut_ptr(), value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,265 +1,72 @@
|
||||
use crate::arch::{
|
||||
cpu::{self, get_cpu},
|
||||
intrin,
|
||||
use crate::{
|
||||
arch::{cpu::Cpu, intrin},
|
||||
};
|
||||
use core::mem::MaybeUninit;
|
||||
use core::ptr::null_mut;
|
||||
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use core::sync::atomic::{AtomicU32, Ordering};
|
||||
use spin::Mutex;
|
||||
|
||||
pub mod context;
|
||||
use context::{context_switch, context_switch_to, Context};
|
||||
pub mod sched;
|
||||
|
||||
pub use context::Context;
|
||||
pub use sched::Scheduler;
|
||||
|
||||
#[repr(C)]
|
||||
pub struct Process {
|
||||
context: Context,
|
||||
cpu: Option<u32>,
|
||||
|
||||
sched_prev: *mut Process,
|
||||
sched_next: *mut Process,
|
||||
}
|
||||
id: u32,
|
||||
cpu_id: Option<u32>,
|
||||
|
||||
pub struct Queue {
|
||||
head: *mut Process,
|
||||
current: *mut Process,
|
||||
size: usize,
|
||||
}
|
||||
|
||||
pub struct Scheduler {
|
||||
queue: Mutex<Queue>,
|
||||
ready: AtomicBool,
|
||||
idle: MaybeUninit<Process>,
|
||||
queue_prev: *mut Process,
|
||||
queue_next: *mut Process,
|
||||
}
|
||||
|
||||
impl Process {
|
||||
fn new_kernel(entry: usize, arg: usize) -> Self {
|
||||
pub fn new_kernel(entry: usize, arg: usize) -> Self {
|
||||
Self {
|
||||
context: Context::new_kernel(entry, arg),
|
||||
cpu: None,
|
||||
|
||||
sched_prev: null_mut(),
|
||||
sched_next: null_mut(),
|
||||
}
|
||||
}
|
||||
id: new_pid(),
|
||||
cpu_id: None,
|
||||
|
||||
pub fn exit(&mut self) -> ! {
|
||||
unsafe {
|
||||
get_cpu().scheduler.unqueue(self);
|
||||
}
|
||||
panic!("This code should not run");
|
||||
}
|
||||
|
||||
pub fn this() -> &'static mut Process {
|
||||
// TODO Process can be rescheduled to some other CPU after/during this call
|
||||
unsafe { &mut *get_cpu().scheduler.queue.lock().current }
|
||||
}
|
||||
}
|
||||
|
||||
impl Scheduler {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
queue: Mutex::new(Queue {
|
||||
head: null_mut(),
|
||||
current: null_mut(),
|
||||
size: 0,
|
||||
}),
|
||||
ready: AtomicBool::new(false),
|
||||
idle: MaybeUninit::uninit(),
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn queue(&mut self, proc: *mut Process) {
|
||||
let mut lock = self.queue.lock();
|
||||
|
||||
if !lock.head.is_null() {
|
||||
let queue_tail = (*lock.head).sched_prev;
|
||||
(*queue_tail).sched_next = proc;
|
||||
(*proc).sched_prev = queue_tail;
|
||||
(*lock.head).sched_prev = proc;
|
||||
(*proc).sched_next = lock.head;
|
||||
} else {
|
||||
(*proc).sched_prev = proc;
|
||||
(*proc).sched_next = proc;
|
||||
lock.head = proc;
|
||||
}
|
||||
|
||||
lock.size += 1;
|
||||
}
|
||||
|
||||
unsafe fn unqueue(&mut self, proc: *mut Process) {
|
||||
intrin::disable_irq();
|
||||
let mut lock = self.queue.lock();
|
||||
assert!((*proc).cpu.unwrap() == get_cpu().cpu_id);
|
||||
|
||||
// Can only unqueue current task
|
||||
let sched_prev = (*proc).sched_prev;
|
||||
let sched_next = (*proc).sched_next;
|
||||
|
||||
(*proc).sched_next = null_mut();
|
||||
(*proc).sched_prev = null_mut();
|
||||
|
||||
if sched_next == proc {
|
||||
lock.head = null_mut();
|
||||
drop(lock);
|
||||
let ptr = self.idle.as_mut_ptr();
|
||||
self.switch_to(ptr);
|
||||
panic!("This code should not run (yet)");
|
||||
}
|
||||
|
||||
if proc == lock.head {
|
||||
lock.head = sched_next;
|
||||
}
|
||||
|
||||
(*sched_next).sched_prev = sched_prev;
|
||||
(*sched_prev).sched_next = sched_next;
|
||||
|
||||
drop(lock);
|
||||
self.switch_to(sched_next);
|
||||
}
|
||||
|
||||
unsafe fn switch_to(&mut self, proc: *mut Process) {
|
||||
intrin::disable_irq();
|
||||
let mut lock = self.queue.lock();
|
||||
let from = lock.current;
|
||||
lock.current = proc;
|
||||
(*proc).cpu = Some(get_cpu().cpu_id);
|
||||
|
||||
if from.is_null() {
|
||||
drop(lock);
|
||||
context_switch_to(&mut (*proc).context);
|
||||
} else {
|
||||
drop(lock);
|
||||
context_switch(&mut (*proc).context, &mut (*from).context);
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn enter(&mut self) -> ! {
|
||||
let lock = self.queue.lock();
|
||||
self.ready.store(true, Ordering::Release);
|
||||
|
||||
let proc = if let Some(first) = lock.head.as_mut() {
|
||||
first
|
||||
} else {
|
||||
self.idle.as_mut_ptr()
|
||||
};
|
||||
drop(lock);
|
||||
self.switch_to(proc);
|
||||
|
||||
panic!("This code should not run");
|
||||
}
|
||||
|
||||
unsafe fn init_idle(&mut self) {
|
||||
self.idle.write(Process::new_kernel(idle_fn as usize, 0));
|
||||
}
|
||||
|
||||
unsafe fn sched(&mut self) {
|
||||
let mut lock = self.queue.lock();
|
||||
|
||||
let from = lock.current;
|
||||
assert!(!from.is_null());
|
||||
let from = &mut *from;
|
||||
|
||||
let to = if !from.sched_next.is_null() {
|
||||
from.sched_next
|
||||
} else if !lock.head.is_null() {
|
||||
lock.head
|
||||
} else {
|
||||
self.idle.as_mut_ptr()
|
||||
};
|
||||
|
||||
assert!(!to.is_null());
|
||||
|
||||
drop(lock);
|
||||
|
||||
self.switch_to(to);
|
||||
}
|
||||
}
|
||||
|
||||
fn idle_fn(_arg: usize) {
|
||||
loop {
|
||||
unsafe {
|
||||
intrin::enable_irq();
|
||||
intrin::wfi();
|
||||
queue_prev: null_mut(),
|
||||
queue_next: null_mut(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn sched_yield() {
|
||||
let cpu = get_cpu();
|
||||
if cpu.scheduler.ready.load(Ordering::Acquire) {
|
||||
unsafe {
|
||||
cpu.scheduler.sched();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn sched_queue(proc: *mut Process) {
|
||||
let mut min_idx = 0;
|
||||
let mut min_val = usize::MAX;
|
||||
for index in 0..cpu::CPU_COUNT.load(Ordering::Relaxed) {
|
||||
let lock = unsafe { cpu::CPUS[index].assume_init_ref().scheduler.queue.lock() };
|
||||
if lock.size < min_val {
|
||||
min_idx = index;
|
||||
min_val = lock.size;
|
||||
}
|
||||
}
|
||||
|
||||
debugln!("Queue to cpu{}", min_idx);
|
||||
unsafe {
|
||||
cpu::CPUS[min_idx].assume_init_mut().scheduler.queue(proc);
|
||||
};
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn make_delay(d: usize) {
|
||||
for _ in 0..d {
|
||||
crate::arch::intrin::nop();
|
||||
}
|
||||
pub fn new_pid() -> u32 {
|
||||
LAST_PID.fetch_add(1, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
extern "C" fn f0(arg: usize) {
|
||||
for _ in 0..600 + arg * 600 {
|
||||
make_delay(10000);
|
||||
|
||||
unsafe {
|
||||
COUNTERS[arg]
|
||||
.assume_init_mut()
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
debugln!("Exit task #{}", arg);
|
||||
Process::this().exit();
|
||||
}
|
||||
|
||||
pub static mut COUNTERS: [MaybeUninit<AtomicUsize>; cpu::MAX_CPU * 2] = MaybeUninit::uninit_array();
|
||||
static TASK_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
static mut C: [MaybeUninit<Process>; cpu::MAX_CPU * 2] = MaybeUninit::uninit_array();
|
||||
|
||||
pub fn spawn_task() {
|
||||
let c = TASK_COUNT.load(Ordering::Acquire);
|
||||
if c >= unsafe { COUNTERS.len() } {
|
||||
return;
|
||||
}
|
||||
TASK_COUNT.fetch_add(1, Ordering::Release);
|
||||
unsafe {
|
||||
debugln!("Start task #{}", c);
|
||||
COUNTERS[c].write(AtomicUsize::new(0));
|
||||
C[c].write(Process::new_kernel(f0 as usize, c));
|
||||
sched_queue(C[c].assume_init_mut());
|
||||
loop {
|
||||
debug!("{}", arg);
|
||||
}
|
||||
}
|
||||
|
||||
static LAST_PID: AtomicU32 = AtomicU32::new(1);
|
||||
static mut S: [MaybeUninit<Process>; 2] = MaybeUninit::uninit_array();
|
||||
|
||||
pub fn enter() -> ! {
|
||||
unsafe {
|
||||
let cpu = get_cpu();
|
||||
//let mut cpu = Cpu::get();
|
||||
//cpu.scheduler.init();
|
||||
let mut sched = Scheduler::get();
|
||||
|
||||
debug!("Setting up a task for cpu{}\n", cpu.cpu_id);
|
||||
let id = cpu.cpu_id as usize;
|
||||
S[0].write(Process::new_kernel(f0 as usize, 0));
|
||||
S[1].write(Process::new_kernel(f0 as usize, 1));
|
||||
|
||||
// Initialize the idle task
|
||||
cpu.scheduler.init_idle();
|
||||
sched.queue(S[0].as_mut_ptr());
|
||||
//cpu.scheduler.queue(S[0].as_mut_ptr());
|
||||
intrin::delay(4000);
|
||||
sched.queue(S[1].as_mut_ptr());
|
||||
//cpu.scheduler.queue(S[1].as_mut_ptr());
|
||||
|
||||
debug!("Entering scheduler on cpu{}\n", cpu.cpu_id);
|
||||
cpu.scheduler.enter();
|
||||
sched.enter();
|
||||
//cpu.scheduler.enter();
|
||||
}
|
||||
loop {}
|
||||
}
|
||||
|
174
kernel/src/proc/sched.rs
Normal file
174
kernel/src/proc/sched.rs
Normal file
@ -0,0 +1,174 @@
|
||||
use crate::{
|
||||
arch::{cpu, intrin},
|
||||
proc::{
|
||||
context::{context_switch, context_switch_to},
|
||||
Process,
|
||||
},
|
||||
sync::Spin as Mutex
|
||||
};
|
||||
use core::mem::MaybeUninit;
|
||||
use core::ptr::null_mut;
|
||||
use core::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
pub struct Queue {
|
||||
head: *mut Process,
|
||||
current: *mut Process,
|
||||
size: usize,
|
||||
}
|
||||
|
||||
pub struct Scheduler {
|
||||
queue: Mutex<Queue>,
|
||||
ready: AtomicBool,
|
||||
idle: MaybeUninit<Process>,
|
||||
cpu_index: usize,
|
||||
}
|
||||
|
||||
impl Queue {
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
head: null_mut(),
|
||||
current: null_mut(),
|
||||
size: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Scheduler {
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
queue: Mutex::new(Queue::new()),
|
||||
ready: AtomicBool::new(false),
|
||||
idle: MaybeUninit::uninit(),
|
||||
cpu_index: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn init(&mut self) {
|
||||
self.idle.write(Process::new_kernel(idle_fn as usize, 0));
|
||||
}
|
||||
|
||||
pub fn is_ready(&self) -> bool {
|
||||
self.ready.load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
pub fn queue(&mut self, proc: *mut Process) {
|
||||
let irq_state = unsafe { intrin::save_irq() };
|
||||
let mut queue = self.queue.lock();
|
||||
|
||||
unsafe {
|
||||
if !queue.head.is_null() {
|
||||
let queue_tail = (*queue.head).queue_prev;
|
||||
(*queue_tail).queue_next = proc;
|
||||
(*proc).queue_prev = queue_tail;
|
||||
(*queue.head).queue_prev = proc;
|
||||
(*proc).queue_next = queue.head;
|
||||
} else {
|
||||
(*proc).queue_prev = proc;
|
||||
(*proc).queue_next = proc;
|
||||
queue.head = proc;
|
||||
}
|
||||
}
|
||||
|
||||
queue.size += 1;
|
||||
unsafe {
|
||||
intrin::restore_irq(irq_state);
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn switch_to(&mut self, dst: *mut Process) {
|
||||
intrin::disable_irq();
|
||||
|
||||
let mut queue = self.queue.lock();
|
||||
let src = queue.current;
|
||||
|
||||
if src == dst {
|
||||
return;
|
||||
}
|
||||
assert!((*dst).cpu_id.is_none());
|
||||
|
||||
queue.current = dst;
|
||||
(*dst).cpu_id = Some(self.cpu_index as u32);
|
||||
|
||||
if src.is_null() {
|
||||
drop(queue);
|
||||
context_switch_to(&mut (*dst).context);
|
||||
} else {
|
||||
(*src).cpu_id = None;
|
||||
drop(queue);
|
||||
context_switch(&mut (*dst).context, &mut (*src).context);
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn enter(&mut self) -> ! {
|
||||
intrin::disable_irq();
|
||||
|
||||
let queue = self.queue.lock();
|
||||
self.ready.store(true, Ordering::Release);
|
||||
|
||||
let initial = if let Some(first) = queue.head.as_mut() {
|
||||
first
|
||||
} else {
|
||||
self.idle.as_mut_ptr()
|
||||
};
|
||||
|
||||
drop(queue);
|
||||
debugln!("cpu{}: N -> {:p}", self.cpu_index, initial);
|
||||
self.switch_to(initial);
|
||||
panic!("This code should not run");
|
||||
}
|
||||
|
||||
pub unsafe fn switch(&mut self) {
|
||||
intrin::disable_irq();
|
||||
|
||||
let mut queue = self.queue.lock();
|
||||
|
||||
let src = queue.current;
|
||||
assert!(!src.is_null());
|
||||
let src = &mut *src;
|
||||
|
||||
let dst = if !src.queue_next.is_null() {
|
||||
src.queue_next
|
||||
} else if !queue.head.is_null() {
|
||||
queue.head
|
||||
} else {
|
||||
self.idle.as_mut_ptr()
|
||||
};
|
||||
|
||||
assert!(!dst.is_null());
|
||||
//debugln!("cpu{}: {:p} -> {:p}", self.cpu_index, src, dst);
|
||||
drop(queue);
|
||||
|
||||
self.switch_to(dst);
|
||||
}
|
||||
|
||||
pub unsafe fn get() -> &'static mut Self {
|
||||
&mut SCHEDULER
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Send for Queue {}
|
||||
|
||||
extern "C" fn idle_fn(_arg: usize) {
|
||||
loop {}
|
||||
}
|
||||
|
||||
static mut SCHEDULER: Scheduler = Scheduler::new();
|
||||
|
||||
pub fn queue(proc: *mut Process) {
|
||||
unsafe {
|
||||
Scheduler::get().queue(proc);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn sched_yield() {
|
||||
unsafe {
|
||||
let mut sched = Scheduler::get();
|
||||
if sched.is_ready() {
|
||||
sched.switch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unqueue(proc: *mut Process) {
|
||||
todo!()
|
||||
}
|
78
kernel/src/sync.rs
Normal file
78
kernel/src/sync.rs
Normal file
@ -0,0 +1,78 @@
|
||||
use crate::arch::{intrin, cpu::{self, Cpu}};
|
||||
use core::cell::UnsafeCell;
|
||||
use core::ops::{Deref, DerefMut};
|
||||
use core::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
pub struct Spin<T: ?Sized + Send> {
|
||||
inner: AtomicUsize,
|
||||
value: UnsafeCell<T>,
|
||||
}
|
||||
|
||||
pub struct SpinGuard<'a, T: ?Sized + Send> {
|
||||
inner: &'a AtomicUsize,
|
||||
value: &'a mut T,
|
||||
irq_state: usize,
|
||||
}
|
||||
|
||||
impl<T: Send> Spin<T> {
|
||||
#[inline(always)]
|
||||
pub const fn new(value: T) -> Self {
|
||||
Self {
|
||||
inner: AtomicUsize::new(usize::MAX),
|
||||
value: UnsafeCell::new(value),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized + Send> Spin<T> {
|
||||
#[inline]
|
||||
pub fn lock(&self) -> SpinGuard<T> {
|
||||
let irq_state = unsafe { intrin::save_irq() };
|
||||
let cpu_id = cpu::get_phys_id();
|
||||
|
||||
while self
|
||||
.inner
|
||||
.compare_exchange_weak(usize::MAX, cpu_id as usize, Ordering::Acquire, Ordering::Relaxed)
|
||||
.is_err()
|
||||
{
|
||||
while self.inner.load(Ordering::Relaxed) != usize::MAX {
|
||||
intrin::nop();
|
||||
}
|
||||
}
|
||||
|
||||
SpinGuard {
|
||||
inner: &self.inner,
|
||||
value: unsafe { &mut *self.value.get() },
|
||||
irq_state,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized + Send> Sync for Spin<T> {}
|
||||
unsafe impl<T: ?Sized + Send> Send for Spin<T> {}
|
||||
|
||||
impl<'a, T: ?Sized + Send> Drop for SpinGuard<'a, T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
self.inner.store(usize::MAX, Ordering::Release);
|
||||
unsafe {
|
||||
intrin::restore_irq(self.irq_state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized + Send> Deref for SpinGuard<'a, T> {
|
||||
type Target = T;
|
||||
|
||||
#[inline(always)]
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized + Send> DerefMut for SpinGuard<'a, T> {
|
||||
#[inline(always)]
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.value
|
||||
}
|
||||
}
|
9
qemu.sh
9
qemu.sh
@ -6,14 +6,18 @@ if [ -z "${MACH}" ]; then
|
||||
MACH=rpi3b
|
||||
fi
|
||||
|
||||
if [ -z "${PROFILE}" ]; then
|
||||
PROFILE=debug
|
||||
fi
|
||||
|
||||
if [ -z "$QEMU_BIN" ]; then
|
||||
QEMU_BIN=qemu-system-aarch64
|
||||
fi
|
||||
|
||||
ARCH=aarch64-unknown-none-${MACH}
|
||||
KERNEL=target/${ARCH}/debug/kernel
|
||||
KERNEL=target/${ARCH}/${PROFILE}/kernel
|
||||
|
||||
QEMU_OPTS="-chardev stdio,nowait,id=char0,mux=on \
|
||||
QEMU_OPTS="-chardev stdio,wait=off,id=char0,mux=on \
|
||||
-mon chardev=char0"
|
||||
|
||||
if [ "$QEMU_DINT" = 1 ]; then
|
||||
@ -39,6 +43,7 @@ esac
|
||||
QEMU_OPTS="$QEMU_OPTS \
|
||||
-kernel ${KERNEL} \
|
||||
-display none \
|
||||
-device rtl8139 \
|
||||
-s"
|
||||
|
||||
./build.sh
|
||||
|
Loading…
x
Reference in New Issue
Block a user