Compare commits

..

No commits in common. "master" and "060cac1565b9c3ad5ea00fed1d0b0fbc58b652e2" have entirely different histories.

76 changed files with 298 additions and 5232 deletions

5
.cargo/config.toml Normal file
View File

@ -0,0 +1,5 @@
[unstable]
build-std = ["core", "compiler_builtins"]
[build]
target = "etc/aarch64-unknown-none-rpi3b.json"

203
Cargo.lock generated
View File

@ -2,209 +2,6 @@
# It is not intended for manual editing.
version = 3
[[package]]
name = "address"
version = "0.1.0"
dependencies = [
"error",
]
[[package]]
name = "autocfg"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "endian-type-rs"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6419a5c75e40011b9fe0174db3fe24006ab122fbe1b7e9cc5974b338a755c76"
[[package]]
name = "error"
version = "0.1.0"
[[package]]
name = "fallible-iterator"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
[[package]]
name = "fdt-rs"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99a40cabc11c8258822a593f5c51f2d9f4923e715ca9e2a0630cf77ae15f390b"
dependencies = [
"endian-type-rs",
"fallible-iterator",
"memoffset",
"num-derive",
"num-traits",
"rustc_version",
"static_assertions",
"unsafe_unwrap",
]
[[package]]
name = "kernel"
version = "0.1.0"
dependencies = [
"address",
"cfg-if",
"error",
"fdt-rs",
"spin",
]
[[package]]
name = "lock_api"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb"
dependencies = [
"scopeguard",
]
[[package]]
name = "memoffset"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa"
dependencies = [
"autocfg",
]
[[package]]
name = "num-derive"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "num-traits"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
dependencies = [
"autocfg",
]
[[package]]
name = "osdev4"
version = "0.1.0"
[[package]]
name = "proc-macro2"
version = "1.0.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d"
dependencies = [
"unicode-xid",
]
[[package]]
name = "quote"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
dependencies = [
"proc-macro2",
]
[[package]]
name = "ramfs"
version = "0.1.0"
dependencies = [
"error",
"spin",
"vfs",
]
[[package]]
name = "rustc_version"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
dependencies = [
"semver",
]
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "semver"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
dependencies = [
"semver-parser",
]
[[package]]
name = "semver-parser"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
[[package]]
name = "spin"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5"
dependencies = [
"lock_api",
]
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "syn"
version = "1.0.75"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7f58f7e8eaa0009c5fec437aabf511bd9933e4b2d7407bd05273c01a8906ea7"
dependencies = [
"proc-macro2",
"quote",
"unicode-xid",
]
[[package]]
name = "unicode-xid"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
[[package]]
name = "unsafe_unwrap"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1230ec65f13e0f9b28d789da20d2d419511893ea9dac2c1f4ef67b8b14e5da80"
[[package]]
name = "vfs"
version = "0.1.0"
dependencies = [
"error",
"spin",
]

View File

@ -3,11 +3,6 @@ name = "osdev4"
version = "0.1.0"
edition = "2018"
[workspace]
members = [
"kernel",
"error",
"address",
"fs/vfs",
"fs/ramfs"
]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

View File

@ -1,9 +0,0 @@
[package]
name = "address"
version = "0.1.0"
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
error = { path = "../error" }

View File

@ -1,19 +0,0 @@
//! Type-safe wrappers for different address kinds
#![no_std]
#![feature(
step_trait,
const_fn_trait_bound
)]
// #![warn(missing_docs)]
#[cfg(test)]
#[macro_use]
extern crate std;
pub mod virt;
pub mod phys;
trait Address {}
pub use phys::PhysicalAddress;
pub use virt::{AddressSpace, NoTrivialConvert, TrivialConvert, VirtualAddress};

View File

@ -1,177 +0,0 @@
use crate::{AddressSpace, TrivialConvert, VirtualAddress};
use core::convert::TryFrom;
use core::fmt;
use core::iter::Step;
use core::ops::{Add, AddAssign, Sub, SubAssign};
#[repr(transparent)]
#[derive(PartialEq, PartialOrd, Copy, Clone)]
pub struct PhysicalAddress(usize);
// Arithmetic
impl<A: Into<usize>> Add<A> for PhysicalAddress {
type Output = Self;
#[inline(always)]
fn add(self, rhs: A) -> Self {
// Will panic on overflow
Self::from(self.0 + rhs.into())
}
}
impl<A: Into<usize>> AddAssign<A> for PhysicalAddress {
#[inline(always)]
fn add_assign(&mut self, rhs: A) {
// Will panic on overflow
*self = Self::from(self.0 + rhs.into());
}
}
impl Sub<usize> for PhysicalAddress {
type Output = Self;
#[inline(always)]
fn sub(self, rhs: usize) -> Self {
Self::from(self.0 - rhs)
}
}
impl SubAssign<usize> for PhysicalAddress {
#[inline(always)]
fn sub_assign(&mut self, rhs: usize) {
*self = Self::from(self.0 - rhs);
}
}
// Construction
impl From<usize> for PhysicalAddress {
fn from(p: usize) -> Self {
Self(p)
}
}
#[cfg(target_pointer_width = "64")]
impl From<u64> for PhysicalAddress {
fn from(p: u64) -> Self {
Self(p as usize)
}
}
impl PhysicalAddress {
pub const fn new(value: usize) -> Self {
Self(value)
}
pub const fn add(self, value: usize) -> Self {
Self(self.0 + value)
}
#[inline(always)]
pub fn diff(start: PhysicalAddress, end: PhysicalAddress) -> isize {
if end >= start {
isize::try_from(end.0 - start.0).expect("Address subtraction overflowed")
} else {
-isize::try_from(start.0 - end.0).expect("Address subtraction overflowed")
}
}
#[inline(always)]
pub fn diff_unchecked(start: PhysicalAddress, end: PhysicalAddress) -> isize {
end.0 as isize - start.0 as isize
}
#[inline(always)]
pub const fn is_paligned(self) -> bool {
return self.0 & 0xFFF == 0
}
#[inline(always)]
pub const fn page_index(self) -> usize {
self.0 >> 12
}
}
// Trivial conversion PhysicalAddress -> VirtualAddress
impl<T: AddressSpace + TrivialConvert> From<PhysicalAddress> for VirtualAddress<T> {
fn from(p: PhysicalAddress) -> Self {
VirtualAddress::from(p.0 + T::OFFSET)
}
}
impl From<PhysicalAddress> for usize {
#[inline(always)]
fn from(p: PhysicalAddress) -> Self {
p.0 as usize
}
}
#[cfg(target_pointer_width = "64")]
impl From<PhysicalAddress> for u64 {
#[inline(always)]
fn from(p: PhysicalAddress) -> Self {
p.0 as u64
}
}
// Formatting
impl fmt::Debug for PhysicalAddress {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "<phys {:#018x}>", self.0)
}
}
// Step
impl Step for PhysicalAddress {
#[inline]
fn steps_between(_p0: &Self, _p1: &Self) -> Option<usize> {
todo!()
}
#[inline]
fn forward_checked(p: Self, steps: usize) -> Option<Self> {
p.0.checked_add(steps).map(Self::from)
}
#[inline]
fn backward_checked(p: Self, steps: usize) -> Option<Self> {
p.0.checked_sub(steps).map(Self::from)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{AddressSpace, NoTrivialConvert, TrivialConvert, VirtualAddress};
#[derive(Copy, Clone, PartialEq, PartialOrd)]
struct S0;
impl AddressSpace for S0 {
const NAME: &'static str = "S0";
const OFFSET: usize = 0x8000;
const LIMIT: usize = Self::OFFSET + 0x4000;
}
impl TrivialConvert for S0 {}
#[derive(Copy, Clone, PartialEq, PartialOrd)]
struct S1;
impl AddressSpace for S1 {
const NAME: &'static str = "S1";
const OFFSET: usize = 0;
const LIMIT: usize = 0;
}
impl NoTrivialConvert for S1 {}
#[test]
fn test_virt_convert_valid() {
let p0 = PhysicalAddress::from(0x1234usize);
assert_eq!(
VirtualAddress::<S0>::from(p0),
VirtualAddress::<S0>::from(0x9234usize)
);
}
#[test]
#[should_panic]
fn test_virt_convert_invalid() {
let p0 = PhysicalAddress::from(0x4321usize);
let _v = VirtualAddress::<S0>::from(p0);
}
}

View File

@ -1,364 +0,0 @@
use super::PhysicalAddress;
use core::convert::TryFrom;
use core::fmt;
use core::iter::Step;
use core::marker::PhantomData;
use core::ops::{Add, AddAssign, Neg, Sub, SubAssign};
pub trait AddressSpace: Copy + Clone + PartialEq + PartialOrd {
const NAME: &'static str;
const OFFSET: usize;
const LIMIT: usize;
}
pub trait NoTrivialConvert {}
pub trait TrivialConvert {}
#[repr(transparent)]
#[derive(Copy, Clone, PartialOrd, PartialEq)]
pub struct VirtualAddress<Kind: AddressSpace>(usize, PhantomData<Kind>);
// Arithmetic
impl<T: AddressSpace> Add<usize> for VirtualAddress<T> {
type Output = Self;
#[inline(always)]
fn add(self, rhs: usize) -> Self {
// Will panic on overflow
Self::from(self.0 + rhs)
}
}
impl<T: AddressSpace> AddAssign<usize> for VirtualAddress<T> {
#[inline(always)]
fn add_assign(&mut self, rhs: usize) {
// Will panic on overflow
*self = Self::from(self.0 + rhs);
}
}
impl<T: AddressSpace> Sub<usize> for VirtualAddress<T> {
type Output = Self;
#[inline(always)]
fn sub(self, rhs: usize) -> Self {
// Will panic on underflow
Self::from(self.0 - rhs)
}
}
impl<T: AddressSpace> SubAssign<usize> for VirtualAddress<T> {
#[inline(always)]
fn sub_assign(&mut self, rhs: usize) {
// Will panic on underflow
*self = Self::from(self.0 - rhs);
}
}
// Trivial conversion VirtualAddress -> PhysicalAddress
impl<T: AddressSpace + TrivialConvert> From<VirtualAddress<T>> for PhysicalAddress {
#[inline(always)]
fn from(virt: VirtualAddress<T>) -> Self {
assert!(virt.0 < T::LIMIT);
PhysicalAddress::from(virt.0 - T::OFFSET)
}
}
// Formatting
impl<T: AddressSpace> fmt::Debug for VirtualAddress<T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "<{} {:#018x}>", T::NAME, self.0)
}
}
impl<T: AddressSpace> VirtualAddress<T> {
#[inline(always)]
pub const fn null() -> Self {
Self(0, PhantomData)
}
pub fn try_subtract(self, p: usize) -> Option<Self> {
let (res, overflow) = self.0.overflowing_sub(p);
if overflow || res < T::OFFSET || res >= T::LIMIT {
None
} else {
Some(Self(res, PhantomData))
}
}
#[inline]
pub fn diff(start: Self, end: Self) -> isize {
if end >= start {
isize::try_from(end.0 - start.0).expect("Address subtraction overflowed")
} else {
-isize::try_from(start.0 - end.0).expect("Address subtraction overflowed")
}
}
#[inline(always)]
pub fn try_diff(start: Self, end: Self) -> Option<isize> {
if end >= start {
isize::try_from(end.0 - start.0).ok()
} else {
isize::try_from(start.0 - end.0).map(Neg::neg).ok()
}
}
#[inline(always)]
pub unsafe fn as_slice_mut<U>(self, count: usize) -> &'static mut [U] {
core::slice::from_raw_parts_mut(self.0 as *mut _, count)
}
#[inline(always)]
pub fn as_mut_ptr<U>(self) -> *mut U {
self.0 as *mut U
}
#[inline(always)]
pub fn as_ptr<U>(self) -> *const U {
self.0 as *const U
}
#[inline(always)]
pub unsafe fn as_mut<U>(self) -> Option<&'static mut U> {
(self.0 as *mut U).as_mut()
}
#[inline(always)]
pub unsafe fn from_ptr<U>(r: *const U) -> Self {
Self::from(r as usize)
}
#[inline(always)]
pub unsafe fn from_ref<U>(r: &U) -> Self {
Self(r as *const U as usize, PhantomData)
}
}
// Step
impl<T: AddressSpace> Step for VirtualAddress<T> {
#[inline]
fn steps_between(_p0: &Self, _p1: &Self) -> Option<usize> {
todo!()
}
#[inline]
fn forward_checked(p: Self, steps: usize) -> Option<Self> {
p.0.checked_add(steps).map(Self::from)
}
#[inline]
fn backward_checked(p: Self, steps: usize) -> Option<Self> {
p.0.checked_sub(steps).map(Self::from)
}
}
// Conversion into VirtualAddress
impl<T: AddressSpace> From<usize> for VirtualAddress<T> {
#[inline(always)]
fn from(p: usize) -> Self {
if T::LIMIT > 0 {
assert!(p >= T::OFFSET && p < T::LIMIT);
}
Self(p, PhantomData)
}
}
#[cfg(target_pointer_width = "64")]
impl<T: AddressSpace> From<u64> for VirtualAddress<T> {
#[inline(always)]
fn from(p: u64) -> Self {
Self::from(p as usize)
}
}
// Conversion from VirtualAddress
impl<T: AddressSpace> From<VirtualAddress<T>> for usize {
#[inline(always)]
fn from(p: VirtualAddress<T>) -> Self {
p.0
}
}
#[cfg(target_pointer_width = "64")]
impl<T: AddressSpace> From<VirtualAddress<T>> for u64 {
#[inline(always)]
fn from(p: VirtualAddress<T>) -> Self {
p.0 as u64
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::PhysicalAddress;
#[derive(Copy, Clone, PartialEq, PartialOrd)]
struct S0;
impl AddressSpace for S0 {
const NAME: &'static str = "S0";
const OFFSET: usize = 0x8000;
const LIMIT: usize = Self::OFFSET + 0x4000;
}
impl TrivialConvert for S0 {}
#[derive(Copy, Clone, PartialEq, PartialOrd)]
struct S1;
impl AddressSpace for S1 {
const NAME: &'static str = "S1";
const OFFSET: usize = 0;
const LIMIT: usize = 0;
}
impl NoTrivialConvert for S1 {}
#[test]
fn test_trivial_construct_valid() {
for i in 0x8000usize..0xC000 {
VirtualAddress::<S0>::from(i);
}
}
#[test]
#[should_panic]
fn test_trivial_construct_invalid_0() {
let _v = VirtualAddress::<S0>::from(0x1234usize);
}
#[test]
#[should_panic]
fn test_trivial_construct_invalid_1() {
let _v = VirtualAddress::<S0>::from(0xD123usize);
}
#[test]
fn test_trivial_convert() {
let v0 = VirtualAddress::<S0>::from(0x8123usize);
assert_eq!(
PhysicalAddress::from(v0),
PhysicalAddress::from(0x123usize)
);
}
#[test]
fn test_add_valid() {
let v0 = VirtualAddress::<S0>::from(0x8100usize);
assert_eq!(VirtualAddress::<S0>::from(0x8223usize), v0 + 0x123usize);
}
#[test]
#[should_panic]
fn test_add_overflow() {
let v0 = VirtualAddress::<S0>::from(0x8100usize);
let _v = v0 - 0xF123usize;
}
#[test]
fn test_subtract_valid() {
let v0 = VirtualAddress::<S0>::from(0x8100usize);
assert_eq!(VirtualAddress::<S0>::from(0x8023usize), v0 - 0xDDusize);
}
#[test]
#[should_panic]
fn test_subtract_overflow() {
let v0 = VirtualAddress::<S0>::from(0x8100usize);
let _v = v0 - 0x1234usize;
}
#[test]
fn test_try_subtract() {
let v0 = VirtualAddress::<S0>::from(0x8100usize);
assert_eq!(v0.try_subtract(0x1234usize), None);
assert_eq!(
v0.try_subtract(0x12usize),
Some(VirtualAddress::<S0>::from(0x80EEusize))
);
}
#[test]
fn test_add_assign_valid() {
let mut v0 = VirtualAddress::<S0>::from(0x8100usize);
v0 += 0x123usize;
assert_eq!(v0, VirtualAddress::<S0>::from(0x8223usize));
}
#[test]
fn test_sub_assign_valid() {
let mut v0 = VirtualAddress::<S0>::from(0x8321usize);
v0 -= 0x123usize;
assert_eq!(v0, VirtualAddress::<S0>::from(0x81FEusize));
}
#[test]
#[should_panic]
fn test_sub_assign_overflow() {
let mut v0 = VirtualAddress::<S0>::from(0x8321usize);
v0 -= 0x1234usize;
}
#[test]
#[should_panic]
fn test_add_assign_overflow() {
let mut v0 = VirtualAddress::<S0>::from(0x8321usize);
v0 += 0xF234usize;
}
#[test]
fn test_format() {
let v0 = VirtualAddress::<S0>::from(0x8123usize);
assert_eq!(&format!("{:?}", v0), "<S0 0x0000000000008123>");
}
#[test]
fn test_diff() {
let v0 = VirtualAddress::<S0>::from(0x8123usize);
let v1 = VirtualAddress::<S0>::from(0x8321usize);
// Ok
assert_eq!(VirtualAddress::diff(v0, v1), 510);
assert_eq!(VirtualAddress::diff(v1, v0), -510);
assert_eq!(VirtualAddress::diff(v0, v0), 0);
assert_eq!(VirtualAddress::diff(v1, v1), 0);
}
#[test]
#[should_panic]
fn test_diff_overflow() {
let v0 = VirtualAddress::<S1>::from(0usize);
let v1 = VirtualAddress::<S1>::from(usize::MAX);
let _v = VirtualAddress::diff(v0, v1);
}
#[test]
fn test_step() {
let mut count = 0;
for _ in VirtualAddress::<S0>::from(0x8000usize)..VirtualAddress::<S0>::from(0x8300usize) {
count += 1;
}
assert_eq!(count, 0x300);
let mut count = 0;
for _ in (VirtualAddress::<S0>::from(0x8000usize)..VirtualAddress::<S0>::from(0x8300usize))
.step_by(0x100)
{
count += 1;
}
assert_eq!(count, 3);
let mut count = 0;
for _ in
(VirtualAddress::<S0>::from(0x8000usize)..VirtualAddress::<S0>::from(0x8300usize)).rev()
{
count += 1;
}
assert_eq!(count, 0x300);
let mut count = 0;
for _ in (VirtualAddress::<S0>::from(0x8000usize)..VirtualAddress::<S0>::from(0x8300usize))
.rev()
.step_by(0x100)
{
count += 1;
}
assert_eq!(count, 3);
}
}

Binary file not shown.

View File

@ -1,34 +0,0 @@
#!/bin/sh
if [ -z "${MACH}" ]; then
MACH=rpi3b
fi
if [ -z "${PROFILE}" ]; then
PROFILE=debug
fi
LLVM_DIR=$(llvm-config --bindir)
ARCH=aarch64-unknown-none-${MACH}
CARGO_FEATURES="mach_$MACH"
CARGO_ARGS="--target ../etc/aarch64-unknown-none-$MACH.json \
--features $CARGO_FEATURES"
if [ "$PROFILE" = "release" ]; then
CARGO_ARGS="$CARGO_ARGS --release"
fi
set -e
cd kernel
case $1 in
check)
cargo check $CARGO_ARGS
;;
build|"")
cargo build $CARGO_ARGS
;;
esac
cd ..
${LLVM_DIR}/llvm-objcopy -O binary target/${ARCH}/${PROFILE}/kernel \
target/${ARCH}/${PROFILE}/kernel.bin

View File

@ -1,8 +0,0 @@
[package]
name = "error"
version = "0.1.0"
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

View File

@ -1,9 +0,0 @@
#![no_std]
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum Errno {
InvalidArgument,
DoesNotExist,
NotADirectory,
OutOfMemory,
}

View File

@ -7,23 +7,18 @@ SECTIONS {
.text : AT(. - KERNEL_OFFSET) ALIGN(4K) {
KEEP(*(.text.boot))
*(.text*)
*(.text)
}
.rodata : AT(. - KERNEL_OFFSET) ALIGN(4K) {
*(.rodata*)
*(.rodata)
}
.data : AT(. - KERNEL_OFFSET) ALIGN(4K) {
*(.data*)
*(.data)
}
.bss : AT(. - KERNEL_OFFSET) ALIGN(4K) {
*(COMMON)
*(.bss*)
. = ALIGN(4K);
PROVIDE(__kernel_end = .);
PROVIDE(__kernel_end_phys = . - KERNEL_OFFSET);
*(.bss)
}
}

View File

@ -1,5 +0,0 @@
symbol-file target/aarch64-unknown-none-virt/debug/kernel
target remote :1234
layout asm
layout regs
set scheduler-locking on

View File

@ -1,23 +0,0 @@
{
"arch": "aarch64",
"data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128",
"executables": true,
"linker": "rust-lld",
"linker-flavor": "ld.lld",
"linker-is-gnu": true,
"llvm-target": "aarch64-unknown-none",
"features": "+a53,+strict-align",
"max-atomic-width": 128,
"os": "none",
"panic-strategy": "abort",
"position-independent-executables": false,
"target-c-int-width": "32",
"target-endian": "little",
"target-pointer-width": "64",
"disable-redzone": true,
"pre-link-args": {
"ld.lld": [
"-Tetc/aarch64-unknown-none-virt.ld"
]
}
}

View File

@ -1,29 +0,0 @@
ENTRY(_entry);
KERNEL_OFFSET = 0xFFFFFF8000000000;
SECTIONS {
. = 0x40080000 + KERNEL_OFFSET;
.text : AT(. - KERNEL_OFFSET) ALIGN(4K) {
KEEP(*(.text.boot))
*(.text*)
}
.rodata : AT(. - KERNEL_OFFSET) ALIGN(4K) {
*(.rodata*)
}
.data : AT(. - KERNEL_OFFSET) ALIGN(4K) {
*(.data*)
}
.bss : AT(. - KERNEL_OFFSET) ALIGN(4K) {
*(COMMON)
*(.bss*)
. = ALIGN(4K);
PROVIDE(__kernel_end = .);
PROVIDE(__kernel_end_phys = . - KERNEL_OFFSET);
}
}

View File

@ -1,4 +1,4 @@
symbol-file target/aarch64-unknown-none-rpi3b/debug/kernel
symbol-file target/aarch64-unknown-none-rpi3b/debug/osdev4
target remote :1234
layout asm
layout regs

View File

@ -1,11 +0,0 @@
[package]
name = "ramfs"
version = "0.1.0"
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
spin = "*"
vfs = { path = "../vfs" }
error = { path = "../../error" }

View File

@ -1,146 +0,0 @@
use core::mem::{size_of, MaybeUninit};
use core::ops::{Deref, DerefMut};
use error::Errno;
pub const SIZE: usize = 4096;
pub const ENTRY_COUNT: usize = SIZE / size_of::<usize>();
pub trait BlockAllocator {
fn alloc(&self) -> *mut u8;
unsafe fn free(&self, ptr: *mut u8);
}
pub struct BlockRef<'a, A: BlockAllocator + Copy> {
inner: Option<&'a mut [u8; SIZE]>,
alloc: MaybeUninit<A>,
}
impl<'a, A: BlockAllocator + Copy> BlockRef<'a, A> {
pub fn new(alloc: A) -> Result<Self, Errno> {
assert!(size_of::<A>() == 0);
let ptr = alloc.alloc();
if ptr.is_null() {
Err(Errno::OutOfMemory)
} else {
Ok(unsafe { Self::from_raw(alloc, ptr) })
}
}
pub fn new_indirect(alloc: A) -> Result<Self, Errno> {
let mut res = Self::new(alloc)?;
for it in res.as_mut_ref_array().iter_mut() {
it.write(BlockRef::null());
}
Ok(res)
}
pub fn null() -> Self {
Self {
inner: None,
alloc: MaybeUninit::uninit(),
}
}
pub unsafe fn from_raw(alloc: A, data: *mut u8) -> Self {
Self {
inner: Some(&mut *(data as *mut _)),
alloc: MaybeUninit::new(alloc),
}
}
pub fn is_null(&self) -> bool {
self.inner.is_none()
}
pub fn as_mut_ref_array(&mut self) -> &mut [MaybeUninit<BlockRef<'a, A>>; ENTRY_COUNT] {
assert_eq!(size_of::<Self>(), 8);
unsafe { &mut *(self.deref_mut() as *mut _ as *mut _) }
}
pub fn as_ref_array(&self) -> &[MaybeUninit<BlockRef<'a, A>>; ENTRY_COUNT] {
assert_eq!(size_of::<Self>(), 8);
unsafe { &*(self.deref() as *const _ as *const _) }
}
pub fn zero(&mut self) {
if let Some(inner) = self.inner.as_mut() {
inner.fill(0);
} else {
panic!("Tried to fill a NULL blockref");
}
}
}
impl<'a, A: BlockAllocator + Copy> Drop for BlockRef<'a, A> {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
unsafe {
self.alloc.assume_init_ref().free(inner as *mut _ as *mut _);
}
}
}
}
impl<'a, A: BlockAllocator + Copy> Deref for BlockRef<'a, A> {
type Target = [u8; SIZE];
fn deref(&self) -> &Self::Target {
self.inner.as_ref().unwrap()
}
}
impl<'a, A: BlockAllocator + Copy> DerefMut for BlockRef<'a, A> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner.as_mut().unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::boxed::Box;
use std::sync::atomic::{AtomicUsize, Ordering};
static A_COUNTER: AtomicUsize = AtomicUsize::new(0);
#[test]
fn block_allocator() {
#[derive(Clone, Copy)]
struct A;
impl BlockAllocator for A {
fn alloc(&self) -> *mut u8 {
let b = Box::leak(Box::<[u8; SIZE]>::new_uninit());
A_COUNTER.fetch_add(1, Ordering::SeqCst);
b.as_mut_ptr() as *mut _
}
unsafe fn free(&self, ptr: *mut u8) {
A_COUNTER.fetch_sub(1, Ordering::SeqCst);
drop(Box::from_raw(ptr as *mut [u8; SIZE]));
}
}
const N: usize = 13;
{
let mut s: [MaybeUninit<BlockRef<A>>; N] = MaybeUninit::uninit_array();
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
for i in 0..N {
let mut block = BlockRef::new(A {}).unwrap();
block.fill(1);
s[i].write(block);
}
assert_eq!(A_COUNTER.load(Ordering::Acquire), N);
for i in 0..N {
unsafe {
s[i].assume_init_drop();
}
}
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
}
}
}

View File

@ -1,691 +0,0 @@
use crate::{block, BlockAllocator, BlockRef};
use core::cmp::{max, min};
use core::marker::PhantomData;
use core::mem::MaybeUninit;
use core::ops::{Index, IndexMut};
use error::Errno;
const L0_BLOCKS: usize = 32; // 128K
const L1_BLOCKS: usize = 8; // 16M
pub struct Bvec<'a, A: BlockAllocator + Copy> {
capacity: usize,
size: usize,
l0: [MaybeUninit<BlockRef<'a, A>>; L0_BLOCKS],
l1: [MaybeUninit<BlockRef<'a, A>>; L1_BLOCKS],
l2: MaybeUninit<BlockRef<'a, A>>,
alloc: A,
}
impl<'a, A: BlockAllocator + Copy> Bvec<'a, A> {
pub fn new(alloc: A) -> Self {
let mut res = Self {
capacity: 0,
size: 0,
l0: MaybeUninit::uninit_array(),
l1: MaybeUninit::uninit_array(),
l2: MaybeUninit::uninit(),
alloc,
};
for it in res.l0.iter_mut() {
it.write(BlockRef::null());
}
for it in res.l1.iter_mut() {
it.write(BlockRef::null());
}
res.l2.write(BlockRef::null());
res
}
pub fn resize(&mut self, cap: usize) -> Result<(), Errno> {
if cap <= self.capacity {
let mut curr = self.capacity;
while curr != cap {
curr -= 1;
let mut index = curr;
if index >= L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT {
index -= L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT;
let l1i = index / block::ENTRY_COUNT;
let l0i = index % block::ENTRY_COUNT;
let l2r = unsafe { self.l2.assume_init_mut() };
assert!(!l2r.is_null());
let l1r = unsafe { l2r.as_mut_ref_array()[l1i].assume_init_mut() };
assert!(!l1r.is_null());
let l0r = unsafe { l1r.as_mut_ref_array()[l0i].assume_init_mut() };
assert!(!l0r.is_null());
*l0r = BlockRef::null();
if l0i == 0 {
*l1r = BlockRef::null();
}
if index == 0 {
*l2r = BlockRef::null();
}
continue;
}
if index >= L0_BLOCKS {
index -= L0_BLOCKS;
let l1i = index / block::ENTRY_COUNT;
let l0i = index % block::ENTRY_COUNT;
let l1r = unsafe { self.l1[l1i].assume_init_mut() };
assert!(!l1r.is_null());
let l0r = unsafe { l1r.as_mut_ref_array()[l0i].assume_init_mut() };
assert!(!l0r.is_null());
*l0r = BlockRef::null();
if l0i == 0 {
*l1r = BlockRef::null();
}
continue;
}
let l0r = unsafe { self.l0[index].assume_init_mut() };
assert!(!l0r.is_null());
*l0r = BlockRef::null();
continue;
unimplemented!();
}
} else {
for mut index in self.capacity..cap {
if index < L0_BLOCKS {
let l0r = unsafe { self.l0[index].assume_init_mut() };
assert!(l0r.is_null());
*l0r = BlockRef::new(self.alloc)?;
continue;
}
index -= L0_BLOCKS;
if index < L1_BLOCKS * block::ENTRY_COUNT {
let l1i = index / block::ENTRY_COUNT;
let l0i = index % block::ENTRY_COUNT;
let l1r = unsafe { self.l1[l1i].assume_init_mut() };
if l1r.is_null() {
*l1r = BlockRef::new_indirect(self.alloc)?;
}
let l0r = unsafe { l1r.as_mut_ref_array()[l0i].assume_init_mut() };
assert!(l0r.is_null());
*l0r = BlockRef::new(self.alloc)?;
continue;
}
index -= L1_BLOCKS * block::ENTRY_COUNT;
if index < block::ENTRY_COUNT * block::ENTRY_COUNT {
let l1i = index / block::ENTRY_COUNT;
let l0i = index % block::ENTRY_COUNT;
let l2r = unsafe { self.l2.assume_init_mut() };
if l2r.is_null() {
*l2r = BlockRef::new_indirect(self.alloc)?;
}
let l1r = unsafe { l2r.as_mut_ref_array()[l1i].assume_init_mut() };
if l1r.is_null() {
*l1r = BlockRef::new_indirect(self.alloc)?;
}
let l0r = unsafe { l1r.as_mut_ref_array()[l0i].assume_init_mut() };
assert!(l0r.is_null());
*l0r = BlockRef::new(self.alloc)?;
continue;
}
unimplemented!();
}
}
self.capacity = cap;
Ok(())
}
pub fn write(&mut self, mut pos: usize, data: &[u8]) -> Result<usize, Errno> {
if pos > self.size {
return Err(Errno::InvalidArgument);
}
let mut rem = data.len();
let mut doff = 0usize;
if pos + rem > self.size {
self.size = pos + rem;
self.resize((pos + rem + block::SIZE - 1) / block::SIZE)?;
}
while rem > 0 {
let index = pos / block::SIZE;
let off = pos % block::SIZE;
let count = min(block::SIZE - off, rem);
let block = &mut self[index];
let dst = &mut block[off..off + count];
let src = &data[doff..doff + count];
dst.copy_from_slice(src);
doff += count;
pos += count;
rem -= count;
}
Ok(doff)
}
pub fn read(&self, mut pos: usize, data: &mut [u8]) -> Result<usize, Errno> {
if pos > self.size {
return Err(Errno::InvalidArgument);
}
let mut rem = min(self.size - pos, data.len());
let mut doff = 0usize;
while rem > 0 {
let index = pos / block::SIZE;
let off = pos % block::SIZE;
let count = min(block::SIZE - off, rem);
let block = &self[index];
let src = &block[off..off + count];
let dst = &mut data[doff..doff + count];
dst.copy_from_slice(src);
doff += count;
pos += count;
rem -= count;
}
Ok(doff)
}
}
impl<'a, A: BlockAllocator + Copy> Index<usize> for Bvec<'a, A> {
type Output = BlockRef<'a, A>;
fn index(&self, mut index: usize) -> &Self::Output {
if index >= self.capacity {
panic!(
"Index exceeds bvec capacity ({} >= {})",
index, self.capacity
);
}
if index < L0_BLOCKS {
return unsafe { self.l0[index].assume_init_ref() };
}
index -= L0_BLOCKS;
if index < L1_BLOCKS * block::ENTRY_COUNT {
return unsafe {
let l1 = self.l1[index / block::ENTRY_COUNT].assume_init_ref();
l1.as_ref_array()[index % block::ENTRY_COUNT].assume_init_ref()
};
}
index -= L1_BLOCKS * block::ENTRY_COUNT;
if index < block::ENTRY_COUNT * block::ENTRY_COUNT {
return unsafe {
let l2 = self.l2.assume_init_ref();
let l1 = l2.as_ref_array()[index / block::ENTRY_COUNT].assume_init_ref();
l1.as_ref_array()[index % block::ENTRY_COUNT].assume_init_ref()
};
}
unimplemented!();
}
}
impl<'a, A: BlockAllocator + Copy> IndexMut<usize> for Bvec<'a, A> {
fn index_mut(&mut self, mut index: usize) -> &mut Self::Output {
if index >= self.capacity {
panic!(
"Index exceeds bvec capacity ({} >= {})",
index, self.capacity
);
}
if index < L0_BLOCKS {
return unsafe { self.l0[index].assume_init_mut() };
}
index -= L0_BLOCKS;
if index < L1_BLOCKS * block::ENTRY_COUNT {
return unsafe {
let l1 = self.l1[index / block::ENTRY_COUNT].assume_init_mut();
l1.as_mut_ref_array()[index % block::ENTRY_COUNT].assume_init_mut()
};
}
index -= L1_BLOCKS * block::ENTRY_COUNT;
if index < block::ENTRY_COUNT * block::ENTRY_COUNT {
return unsafe {
let l2 = self.l2.assume_init_mut();
let l1 = l2.as_mut_ref_array()[index / block::ENTRY_COUNT].assume_init_mut();
l1.as_mut_ref_array()[index % block::ENTRY_COUNT].assume_init_mut()
};
}
unimplemented!();
}
}
impl<'a, A: BlockAllocator + Copy> Drop for Bvec<'a, A> {
fn drop(&mut self) {
for i in 0..min(L0_BLOCKS, self.capacity) {
unsafe {
self.l0[i].assume_init_drop();
}
}
if self.capacity > L0_BLOCKS {}
}
}
#[cfg(feature = "test_bvec")]
#[cfg(test)]
mod tests {
use super::*;
use std::boxed::Box;
use std::mem::MaybeUninit;
use std::sync::atomic::{AtomicUsize, Ordering};
static A_COUNTER: AtomicUsize = AtomicUsize::new(0);
#[derive(Clone, Copy)]
struct TestAlloc;
impl BlockAllocator for TestAlloc {
fn alloc(&self) -> *mut u8 {
let b = Box::leak(Box::<[u8; block::SIZE]>::new_uninit());
eprintln!("alloc {:p}", b);
b.as_mut_ptr() as *mut _
}
unsafe fn free(&self, ptr: *mut u8) {
eprintln!("drop {:p}", ptr);
drop(Box::from_raw(ptr as *mut [u8; block::SIZE]));
}
}
#[test]
fn bvec_allocation() {
#[derive(Clone, Copy)]
struct A;
impl BlockAllocator for A {
fn alloc(&self) -> *mut u8 {
let b = Box::leak(Box::<[u8; block::SIZE]>::new_uninit());
A_COUNTER.fetch_add(1, Ordering::SeqCst);
b.as_mut_ptr() as *mut _
}
unsafe fn free(&self, ptr: *mut u8) {
A_COUNTER.fetch_sub(1, Ordering::SeqCst);
drop(Box::from_raw(ptr as *mut [u8; block::SIZE]));
}
}
let mut bvec = Bvec::new(A {});
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
bvec.resize(123).unwrap();
unsafe {
for i in 0..L0_BLOCKS {
assert!(!bvec.l0[i].assume_init_ref().is_null());
}
let l1r = bvec.l1[0].assume_init_ref();
assert!(!l1r.is_null());
for i in 0..123 - L0_BLOCKS {
assert!(!l1r.as_ref_array()[i].assume_init_ref().is_null());
}
}
assert_eq!(A_COUNTER.load(Ordering::Acquire), 123 + 1);
bvec.resize(123 + block::ENTRY_COUNT).unwrap();
unsafe {
for i in 0..L0_BLOCKS {
assert!(!bvec.l0[i].assume_init_ref().is_null());
}
for i in 0..(123 + block::ENTRY_COUNT) - L0_BLOCKS {
let l1i = i / block::ENTRY_COUNT;
let l0i = i % block::ENTRY_COUNT;
let l1r = bvec.l1[l1i].assume_init_ref();
assert!(!l1r.is_null());
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
}
}
assert_eq!(
A_COUNTER.load(Ordering::Acquire),
123 + block::ENTRY_COUNT + 2
);
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT)
.unwrap();
unsafe {
for i in 0..L0_BLOCKS {
assert!(!bvec.l0[i].assume_init_ref().is_null());
}
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
let l1i = i / block::ENTRY_COUNT;
let l0i = i % block::ENTRY_COUNT;
let l1r = bvec.l1[l1i].assume_init_ref();
assert!(!l1r.is_null());
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
}
}
assert_eq!(
A_COUNTER.load(Ordering::Acquire),
L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS
);
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 4)
.unwrap();
unsafe {
for i in 0..L0_BLOCKS {
assert!(!bvec.l0[i].assume_init_ref().is_null());
}
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
let l1i = i / block::ENTRY_COUNT;
let l0i = i % block::ENTRY_COUNT;
let l1r = bvec.l1[l1i].assume_init_ref();
assert!(!l1r.is_null());
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
}
let l2r = bvec.l2.assume_init_ref();
assert!(!l2r.is_null());
for i in 0..block::ENTRY_COUNT * 4 {
let l1i = i / block::ENTRY_COUNT;
let l0i = i % block::ENTRY_COUNT;
let l1r = l2r.as_ref_array()[l1i].assume_init_ref();
assert!(!l1r.is_null());
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
}
}
assert_eq!(
A_COUNTER.load(Ordering::Acquire),
L0_BLOCKS + // L0
L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS + // L1
block::ENTRY_COUNT * 4 + 4 + 1
);
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 3 + 1)
.unwrap();
unsafe {
for i in 0..L0_BLOCKS {
assert!(!bvec.l0[i].assume_init_ref().is_null());
}
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
let l1i = i / block::ENTRY_COUNT;
let l0i = i % block::ENTRY_COUNT;
let l1r = bvec.l1[l1i].assume_init_ref();
assert!(!l1r.is_null());
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
}
let l2r = bvec.l2.assume_init_ref();
assert!(!l2r.is_null());
for i in 0..block::ENTRY_COUNT * 3 + 1 {
let l1i = i / block::ENTRY_COUNT;
let l0i = i % block::ENTRY_COUNT;
let l1r = l2r.as_ref_array()[l1i].assume_init_ref();
assert!(!l1r.is_null());
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
}
}
assert_eq!(
A_COUNTER.load(Ordering::Acquire),
L0_BLOCKS + // L0
L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS + // L1
block::ENTRY_COUNT * 3 + 1 + 4 + 1
);
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 2 + 1)
.unwrap();
unsafe {
for i in 0..L0_BLOCKS {
assert!(!bvec.l0[i].assume_init_ref().is_null());
}
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
let l1i = i / block::ENTRY_COUNT;
let l0i = i % block::ENTRY_COUNT;
let l1r = bvec.l1[l1i].assume_init_ref();
assert!(!l1r.is_null());
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
}
let l2r = bvec.l2.assume_init_ref();
assert!(!l2r.is_null());
for i in 0..block::ENTRY_COUNT * 2 + 1 {
let l1i = i / block::ENTRY_COUNT;
let l0i = i % block::ENTRY_COUNT;
let l1r = l2r.as_ref_array()[l1i].assume_init_ref();
assert!(!l1r.is_null());
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
}
}
assert_eq!(
A_COUNTER.load(Ordering::Acquire),
L0_BLOCKS + // L0
L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS + // L1
block::ENTRY_COUNT * 2 + 1 + 3 + 1
);
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 1)
.unwrap();
unsafe {
for i in 0..L0_BLOCKS {
assert!(!bvec.l0[i].assume_init_ref().is_null());
}
for i in 0..L1_BLOCKS * block::ENTRY_COUNT {
let l1i = i / block::ENTRY_COUNT;
let l0i = i % block::ENTRY_COUNT;
let l1r = bvec.l1[l1i].assume_init_ref();
assert!(!l1r.is_null());
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
}
let l2r = bvec.l2.assume_init_ref();
assert!(!l2r.is_null());
let l1r = l2r.as_ref_array()[0].assume_init_ref();
assert!(!l1r.is_null());
assert!(!l1r.as_ref_array()[0].assume_init_ref().is_null());
}
assert_eq!(
A_COUNTER.load(Ordering::Acquire),
L0_BLOCKS + // L0
L1_BLOCKS * block::ENTRY_COUNT + L1_BLOCKS + // L1
1 + 1 + 1
);
bvec.resize(L0_BLOCKS + 3 * block::ENTRY_COUNT + 1).unwrap();
unsafe {
for i in 0..L0_BLOCKS {
assert!(!bvec.l0[i].assume_init_ref().is_null());
}
for i in 0..3 * block::ENTRY_COUNT + 1 {
let l1i = i / block::ENTRY_COUNT;
let l0i = i % block::ENTRY_COUNT;
let l1r = bvec.l1[l1i].assume_init_ref();
assert!(!l1r.is_null());
assert!(!l1r.as_ref_array()[l0i].assume_init_ref().is_null());
}
let l2r = bvec.l2.assume_init_ref();
assert!(l2r.is_null());
}
assert_eq!(
A_COUNTER.load(Ordering::Acquire),
L0_BLOCKS + // L0
3 * block::ENTRY_COUNT + 1 + 4
);
bvec.resize(L0_BLOCKS).unwrap();
unsafe {
for i in 0..L0_BLOCKS {
assert!(!bvec.l0[i].assume_init_ref().is_null());
}
assert!(bvec.l1[0].assume_init_ref().is_null());
}
assert_eq!(A_COUNTER.load(Ordering::Acquire), L0_BLOCKS);
bvec.resize(12).unwrap();
unsafe {
for i in 0..12 {
assert!(!bvec.l0[i].assume_init_ref().is_null());
}
}
assert_eq!(A_COUNTER.load(Ordering::Acquire), 12);
bvec.resize(0).unwrap();
unsafe {
for i in 0..L0_BLOCKS {
assert!(bvec.l0[i].assume_init_ref().is_null());
}
}
assert_eq!(A_COUNTER.load(Ordering::Acquire), 0);
}
#[test]
fn bvec_index_l0() {
let mut bvec = Bvec::new(TestAlloc {});
bvec.resize(L0_BLOCKS).unwrap();
for i in 0..L0_BLOCKS {
let block = &bvec[i];
assert_eq!(block as *const _, bvec.l0[i].as_ptr());
}
}
#[test]
fn bvec_index_l1() {
let mut bvec = Bvec::new(TestAlloc {});
bvec.resize(L0_BLOCKS + block::ENTRY_COUNT * 2 + 3).unwrap();
for i in 0..block::ENTRY_COUNT * 2 + 3 {
let l1i = i / block::ENTRY_COUNT;
let l0i = i % block::ENTRY_COUNT;
let block = &bvec[i + L0_BLOCKS];
let l1r = unsafe { bvec.l1[l1i].assume_init_ref() };
assert_eq!(block as *const _, l1r.as_ref_array()[l0i].as_ptr());
}
}
#[test]
fn bvec_index_l2() {
let mut bvec = Bvec::new(TestAlloc {});
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 3)
.unwrap();
for i in 0..3 {
let l1i = i / block::ENTRY_COUNT;
let l0i = i % block::ENTRY_COUNT;
let block = &bvec[i + L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT];
let l2r = unsafe { bvec.l2.assume_init_ref() };
let l1r = unsafe { l2r.as_ref_array()[l1i].assume_init_ref() };
assert_eq!(block as *const _, l1r.as_ref_array()[l0i].as_ptr());
}
}
#[test]
#[should_panic]
fn bvec_index_invalid_l0_0() {
let bvec = Bvec::new(TestAlloc {});
let _block = &bvec[0];
}
#[test]
#[should_panic]
fn bvec_index_invalid_l0_1() {
let mut bvec = Bvec::new(TestAlloc {});
bvec.resize(13).unwrap();
let _block = &bvec[15];
}
#[test]
#[should_panic]
fn bvec_index_invalid_l1_0() {
let mut bvec = Bvec::new(TestAlloc {});
bvec.resize(13).unwrap();
let _block = &bvec[L0_BLOCKS + 2];
}
#[test]
#[should_panic]
fn bvec_index_invalid_l1_1() {
let mut bvec = Bvec::new(TestAlloc {});
bvec.resize(L0_BLOCKS + block::ENTRY_COUNT * 2 + 3).unwrap();
let _block = &bvec[L0_BLOCKS + block::ENTRY_COUNT * 2 + 6];
}
#[test]
#[should_panic]
fn bvec_index_invalid_l1_2() {
let mut bvec = Bvec::new(TestAlloc {});
bvec.resize(L0_BLOCKS + block::ENTRY_COUNT * 2 + 3).unwrap();
let _block = &bvec[L0_BLOCKS + block::ENTRY_COUNT * 3 + 1];
}
#[test]
#[should_panic]
fn bvec_index_invalid_l2_0() {
let mut bvec = Bvec::new(TestAlloc {});
bvec.resize(13).unwrap();
let _block = &bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 3];
}
#[test]
#[should_panic]
fn bvec_index_invalid_l2_1() {
let mut bvec = Bvec::new(TestAlloc {});
bvec.resize(L0_BLOCKS + block::ENTRY_COUNT * 3 + 13)
.unwrap();
let _block = &bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 3];
}
#[test]
#[should_panic]
fn bvec_index_invalid_l2_2() {
let mut bvec = Bvec::new(TestAlloc {});
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 6)
.unwrap();
let _block = &bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 8];
}
#[test]
#[should_panic]
fn bvec_index_invalid_l2_3() {
let mut bvec = Bvec::new(TestAlloc {});
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 2 + 7)
.unwrap();
let _block =
&bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 2 + 13];
}
#[test]
#[should_panic]
fn bvec_index_invalid_l2_4() {
let mut bvec = Bvec::new(TestAlloc {});
bvec.resize(L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 2 + 13)
.unwrap();
let _block = &bvec[L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + block::ENTRY_COUNT * 3 + 2];
}
#[test]
fn bvec_write_read() {
let mut bvec = Bvec::new(TestAlloc {});
const N: usize = block::SIZE * (L0_BLOCKS + L1_BLOCKS * block::ENTRY_COUNT + 3);
let mut data = vec![0u8; N];
for i in 0..N {
data[i] = (i & 0xFF) as u8;
}
assert_eq!(bvec.write(0, &data[..]), Ok(N));
let mut buf = vec![0u8; 327];
let mut off = 0usize;
let mut rem = N;
while rem != 0 {
let count = min(rem, buf.len());
assert_eq!(bvec.read(off, &mut buf[..]), Ok(count));
for i in 0..count {
assert_eq!(buf[i], ((i + off) & 0xFF) as u8);
}
rem -= count;
off += count;
}
}
}

View File

@ -1,53 +0,0 @@
#![no_std]
#![feature(new_uninit, maybe_uninit_uninit_array, maybe_uninit_extra)]
extern crate alloc;
#[cfg(test)]
#[macro_use]
extern crate std;
use alloc::rc::Rc;
use core::cell::RefCell;
use error::Errno;
use spin::Mutex;
use vfs::{Node, NodeRef, NodeType};
mod block;
pub use block::{BlockAllocator, BlockRef};
mod bvec;
use bvec::Bvec;
pub struct Ramfs<'a> {
root: NodeRef,
allocator: &'a Mutex<dyn BlockAllocator>,
}
pub struct NodeData {
}
fn load_tar(base: *const u8, size: usize) -> Result<NodeRef, Errno> {
let root = Node::directory(b"");
// TODO
Ok(root)
}
pub fn open(
base: *const u8,
size: usize,
allocator: &Mutex<dyn BlockAllocator>,
) -> Result<Ramfs, Errno> {
let root = load_tar(base, size)?;
Ok(Ramfs { root, allocator })
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ramfs_open() {
let data = include_str!("../test/test0.tar");
//let fs = open(data.as_ptr(), data.bytes().len()).unwrap();
}
}

View File

@ -1,2 +0,0 @@
This is a file
File0

Binary file not shown.

View File

@ -1,10 +0,0 @@
[package]
name = "vfs"
version = "0.1.0"
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
error = { path = "../../error" }
spin = "*"

View File

@ -1,187 +0,0 @@
use crate::{path_element, Node, NodeRef};
use alloc::rc::Rc;
use error::Errno;
pub struct Ioctx {
pub root: NodeRef,
pub cwd: NodeRef,
}
impl Ioctx {
fn lookup_or_load(parent: NodeRef, name: &[u8]) -> Result<NodeRef, Errno> {
let p = parent.borrow_mut();
if !p.is_directory() {
return Err(Errno::NotADirectory);
}
if let Some(node) = p.children().find(|&node| node.borrow().name() == name) {
return Ok(node.clone());
}
if let Some(ops) = p.ops.as_ref() {
todo!();
}
Err(Errno::DoesNotExist)
}
fn _find(&self, mut at: NodeRef, path: &[u8]) -> Result<NodeRef, Errno> {
let mut child_path: &[u8];
let mut element_name: &[u8];
if path.is_empty() {
return Ok(at);
}
child_path = path;
loop {
let r = path_element(child_path);
element_name = r.0;
child_path = r.1;
match element_name {
b"." => {
if child_path.is_empty() {
return Ok(at);
}
}
b".." => {
let parent = at.borrow().parent().unwrap_or(at.clone());
at = parent;
if child_path.is_empty() {
return Ok(at);
}
}
_ => break,
}
}
if element_name.is_empty() && child_path.is_empty() {
return Ok(at);
}
assert!(!element_name.is_empty());
let child = Self::lookup_or_load(at, element_name)?;
if child_path.is_empty() {
Ok(child)
} else {
self._find(child, child_path)
}
}
pub fn find(&self, at: Option<NodeRef>, mut path: &[u8]) -> Result<NodeRef, Errno> {
if path.is_empty() {
return at.ok_or(Errno::DoesNotExist);
}
let at = if path[0] == b'/' {
let index = path
.iter()
.position(|&x| x != b'/')
.unwrap_or(path.len() - 1);
path = &path[index..];
self.root.clone()
} else if let Some(node) = at {
node
} else {
self.cwd.clone()
};
self._find(at, path)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Node;
#[test]
fn ioctx_find_at() {
let r = Node::directory(b"");
let d0 = Node::directory(b"dir0");
let d1 = Node::directory(b"dir1");
let d0d0 = Node::directory(b"dir0");
let d0f0 = Node::file(b"file0");
Node::attach(r.clone(), d0.clone());
Node::attach(r.clone(), d1.clone());
Node::attach(d0.clone(), d0d0.clone());
Node::attach(d0.clone(), d0f0.clone());
let ioctx = Ioctx {
root: r.clone(),
cwd: r.clone(),
};
assert!(Rc::ptr_eq(
ioctx.find(Some(r.clone()), b"dir0").as_ref().unwrap(),
&d0
));
assert!(Rc::ptr_eq(
ioctx.find(Some(d0.clone()), b"dir0").as_ref().unwrap(),
&d0d0
));
assert!(Rc::ptr_eq(
ioctx.find(Some(r.clone()), b"dir0/dir0").as_ref().unwrap(),
&d0d0
));
assert!(Rc::ptr_eq(
ioctx.find(Some(d0.clone()), b"file0").as_ref().unwrap(),
&d0f0
));
assert!(Rc::ptr_eq(
ioctx.find(Some(r.clone()), b"dir0/file0").as_ref().unwrap(),
&d0f0
));
assert!(Rc::ptr_eq(
ioctx.find(Some(d0.clone()), b"../dir1").as_ref().unwrap(),
&d1
));
assert!(Rc::ptr_eq(
ioctx.find(Some(d0.clone()), b"./dir0").as_ref().unwrap(),
&d0d0
));
assert!(Rc::ptr_eq(
ioctx.find(Some(d0.clone()), b"././dir0/.").as_ref().unwrap(),
&d0d0
));
assert!(Rc::ptr_eq(
ioctx.find(Some(d0.clone()), b"././dir0/./").as_ref().unwrap(),
&d0d0
));
assert!(Rc::ptr_eq(
ioctx.find(Some(d0.clone()), b"././dir0/").as_ref().unwrap(),
&d0d0
));
assert!(Rc::ptr_eq(
ioctx.find(Some(d0.clone()), b"././dir0/..").as_ref().unwrap(),
&d0
));
assert!(Rc::ptr_eq(
ioctx.find(Some(d0.clone()), b"././dir0/../..").as_ref().unwrap(),
&r
));
assert!(Rc::ptr_eq(
ioctx.find(Some(d0.clone()), b"././dir0/../../../").as_ref().unwrap(),
&r
));
// TODO make these illegal
assert!(Rc::ptr_eq(
ioctx.find(Some(d0.clone()), b"././file0/..").as_ref().unwrap(),
&d0
));
assert!(Rc::ptr_eq(
ioctx.find(Some(d0.clone()), b"././file0/").as_ref().unwrap(),
&d0f0
));
assert!(Rc::ptr_eq(
ioctx.find(Some(d0.clone()), b"././file0/.").as_ref().unwrap(),
&d0f0
));
ioctx.find(Some(r.clone()), b"dir0/dir1").expect_err("");
}
}

View File

@ -1,28 +0,0 @@
#![no_std]
#[allow(unused_imports)]
#[macro_use]
extern crate alloc;
#[cfg(test)]
#[macro_use]
extern crate std;
use error::Errno;
pub mod node;
mod util;
pub use node::{Node, NodeOperations, NodeRef, NodeType};
mod ioctx;
pub use ioctx::Ioctx;
pub fn path_element(path: &[u8]) -> (&[u8], &[u8]) {
if let Some(mut index) = path.iter().position(|&x| x == b'/') {
let elem = &path[..index];
while index < path.len() && path[index] == b'/' {
index += 1;
}
(elem, &path[index..])
} else {
(path, b"")
}
}

View File

@ -1,181 +0,0 @@
use crate::util::{iter::LockedIterator, FixedStr};
use alloc::{boxed::Box, rc::Rc, vec::Vec};
use core::any::Any;
use core::cell::RefCell;
use core::ffi::c_void;
use core::fmt;
use core::ptr::null_mut;
use error::Errno;
use spin::Mutex;
pub type NodeRef = Rc<RefCell<Node>>;
pub enum NodeType {
Regular,
Directory { children: Vec<NodeRef> },
}
pub const NODE_MEMORY: u32 = 1 << 0;
pub struct NodeOperations {
lookup: Option<fn(parent: NodeRef, name: &[u8]) -> Result<NodeRef, Errno>>,
drop: Option<fn(node: &mut Node)>,
}
pub struct Node {
name: FixedStr<64>,
typ: NodeType,
flags: u32,
pub(crate) ops: Option<&'static NodeOperations>,
pub data: *mut c_void,
target: Option<NodeRef>,
parent: Option<NodeRef>,
}
impl Node {
pub fn new(name: &[u8], typ: NodeType) -> Self {
let mut r = Self {
name: FixedStr::empty(),
typ,
flags: 0,
ops: None,
data: null_mut(),
parent: None,
target: None,
};
r.name.copy_from_slice(name);
r
}
pub fn directory(name: &[u8]) -> Rc<RefCell<Self>> {
Rc::new(RefCell::new(Self::new(
name,
NodeType::Directory { children: vec![] },
)))
}
pub fn file(name: &[u8]) -> Rc<RefCell<Self>> {
Rc::new(RefCell::new(Self::new(name, NodeType::Regular)))
}
pub fn name(&self) -> &FixedStr<64> {
&self.name
}
pub fn parent(&self) -> Option<NodeRef> {
self.parent.clone()
}
pub fn is_directory(&self) -> bool {
matches!(&self.typ, NodeType::Directory { .. })
}
pub fn children(&self) -> impl Iterator<Item = &NodeRef> {
let lock = TREE_MUTEX.lock();
match &self.typ {
NodeType::Directory { children } => LockedIterator::new(children.iter(), lock),
_ => panic!("Not a directory"),
}
}
pub fn children_mut(&mut self) -> impl Iterator<Item = &mut NodeRef> {
let lock = TREE_MUTEX.lock();
match &mut self.typ {
NodeType::Directory { children } => LockedIterator::new(children.iter_mut(), lock),
_ => panic!("Not a directory"),
}
}
pub fn attach(parent: NodeRef, child: NodeRef) {
let _lock = TREE_MUTEX.lock();
assert!(child.borrow().parent.is_none());
match &mut parent.borrow_mut().typ {
NodeType::Directory { children } => children.push(child.clone()),
_ => panic!("Not a directory"),
}
child.borrow_mut().parent.replace(parent.clone());
}
pub fn detach(child: NodeRef) {
let _lock = TREE_MUTEX.lock();
assert!(child.borrow().parent.is_some());
let parent = child.borrow_mut().parent.take().unwrap();
match &mut parent.borrow_mut().typ {
NodeType::Directory { children } => {
children.remove(children.iter().position(|x| Rc::ptr_eq(x, &child)).unwrap());
}
_ => panic!("Not a directory"),
};
}
}
impl Drop for Node {
fn drop(&mut self) {
if let Some(ops) = self.ops.as_ref() {
if let Some(do_drop) = ops.drop.as_ref() {
do_drop(self);
}
}
}
}
impl fmt::Debug for Node {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Node {{ name={:?}, typ=??? }}", self.name)
}
}
static TREE_MUTEX: Mutex<()> = Mutex::new(());
#[cfg(test)]
mod tests {
use super::*;
use core::any::{type_name, TypeId};
use core::sync::atomic::{AtomicBool, Ordering};
#[test]
fn node_new() {
let r = Node::directory(b"");
let n = r.borrow();
assert_eq!(n.name, b""[..]);
assert!(matches!(n.typ, NodeType::Directory { .. }));
assert!(n.ops.is_none());
assert!(n.target.is_none());
assert!(n.parent.is_none());
let r = Node::file(b"file1");
let n = r.borrow();
assert_eq!(n.name, b"file1"[..]);
assert!(matches!(n.typ, NodeType::Regular));
assert!(n.ops.is_none());
assert!(n.target.is_none());
assert!(n.parent.is_none());
}
#[test]
fn node_attach() {
let r0 = Node::directory(b"");
let r1 = Node::directory(b"1234");
// Attach
Node::attach(r0.clone(), r1.clone());
assert!(Rc::ptr_eq(r1.borrow().parent.as_ref().unwrap(), &r0));
{
let n0 = r0.borrow();
let mut it = n0.children();
assert!(Rc::ptr_eq(&it.next().unwrap(), &r1));
assert!(it.next().is_none());
}
// Detach
Node::detach(r1.clone());
assert!(r1.borrow().parent.is_none());
assert_eq!(r0.borrow().children().count(), 0);
}
}

View File

@ -1,22 +0,0 @@
use spin::MutexGuard;
pub struct LockedIterator<'a, T: ?Sized, I: Iterator<Item = T>> {
inner: I,
#[allow(dead_code)]
lock: MutexGuard<'a, ()>,
}
impl<'a, T, I: Iterator<Item = T>> Iterator for LockedIterator<'a, T, I> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
}
impl<'a, T, I: Iterator<Item = T>> LockedIterator<'a, T, I> {
#[inline]
pub fn new(inner: I, lock: MutexGuard<'a, ()>) -> Self {
Self { inner, lock }
}
}

View File

@ -1,94 +0,0 @@
use alloc::boxed::Box;
use core::alloc::Layout;
use core::ffi::c_void;
use core::fmt;
pub mod iter;
#[repr(transparent)]
pub struct FixedStr<const N: usize> {
inner: [u8; N],
}
impl<const N: usize> FixedStr<N> {
pub fn new(s: &[u8]) -> Self {
let mut r = Self::empty();
r.copy_from_slice(s);
r
}
pub const fn empty() -> Self {
Self { inner: [0; N] }
}
pub fn len(&self) -> usize {
self.inner.iter().position(|&n| n == 0).unwrap_or(N)
}
pub fn copy_from_slice(&mut self, src: &[u8]) {
let src_len = src.len();
if src_len == 0 {
self.inner[0] = 0;
return;
}
if src_len >= N {
panic!("String buffer overflow");
}
let dst = &mut self.inner[..src_len];
dst.copy_from_slice(src);
self.inner[src_len] = 0;
}
}
impl<const N: usize> PartialEq<[u8]> for FixedStr<N> {
fn eq(&self, other: &[u8]) -> bool {
let self_len = self.len();
if self_len != other.len() {
return false;
}
&self.inner[..self_len] == other
}
}
impl<const N: usize> fmt::Display for FixedStr<N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for byte in self.inner {
if byte == 0 {
break;
}
write!(f, "{}", byte as char)?;
}
Ok(())
}
}
impl<const N: usize> fmt::Debug for FixedStr<N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self, f)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn fixed_str_display() {
let s = FixedStr::<64>::new(b"test");
assert_eq!(format!("{}", s), "test");
}
#[test]
fn fixed_str_length() {
assert_eq!(FixedStr::<64>::empty().len(), 0);
assert_eq!(FixedStr::<64>::new(b"test1").len(), 5);
assert_eq!(FixedStr::<6>::new(b"test1").len(), 5);
}
#[test]
fn fixed_str_eq_slice() {
assert_eq!(FixedStr::<64>::empty(), b""[..]);
assert_eq!(FixedStr::<64>::new(b"1234"), b"1234"[..]);
}
}

View File

@ -1,5 +0,0 @@
[unstable]
build-std = ["core", "compiler_builtins", "alloc"]
[build]
target = "../etc/aarch64-unknown-none-rpi3b.json"

View File

@ -1,17 +0,0 @@
[package]
name = "kernel"
version = "0.1.0"
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
address = { path = "../address" }
error = { path = "../error" }
spin = "0.9.2"
cfg-if = "*"
fdt-rs = { version = "*", optional = true, default-features = false }
[features]
mach_rpi3b = []
mach_virt = ["fdt-rs"]

View File

@ -1,69 +0,0 @@
use crate::arch::intrin;
use core::mem::MaybeUninit;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::AtomicUsize;
#[repr(C)]
pub struct Cpu {
index: u32, // 0x00
}
pub struct CpuRef {
inner: &'static mut Cpu,
}
impl Cpu {
fn new(index: u32) -> Self {
Self {
index,
}
}
#[inline(always)]
pub unsafe fn get_raw() -> &'static mut Cpu {
CPU.assume_init_mut()
}
#[inline(always)]
pub const fn index(&self) -> u32 {
self.index
}
#[inline]
pub fn get() -> CpuRef {
CpuRef {
inner: unsafe { Self::get_raw() },
}
}
pub unsafe fn init(index: u32) {
CPU.write(Self::new(index));
}
}
impl Deref for CpuRef {
type Target = Cpu;
fn deref(&self) -> &Self::Target {
self.inner
}
}
impl DerefMut for CpuRef {
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner
}
}
static mut CPU: MaybeUninit<Cpu> = MaybeUninit::uninit();
#[inline(always)]
pub fn get_phys_id() -> usize {
intrin::read_mpidr_el1() & 0x3
}
pub fn init(index: usize) {
unsafe {
Cpu::init(index as u32);
}
}

View File

@ -1,47 +0,0 @@
global_asm!(include_str!("vectors.S"));
#[repr(C)]
struct ExceptionContext {
x0: usize,
x1: usize,
x2: usize,
x3: usize,
x4: usize,
x5: usize,
x6: usize,
x7: usize,
x8: usize,
x9: usize,
x10: usize,
x11: usize,
x12: usize,
x13: usize,
x14: usize,
x15: usize,
x16: usize,
x17: usize,
x18: usize,
fp: usize,
lp: usize,
_r0: usize,
esr: usize,
far: usize,
}
impl ExceptionContext {
pub fn dump(&self) {
debugln!(" x0 = {:#018x}, x1 = {:#018x}", self.x0, self.x1);
debugln!(" x2 = {:#018x}, x3 = {:#018x}", self.x2, self.x3);
debugln!(" x4 = {:#018x}, x5 = {:#018x}", self.x4, self.x5);
debugln!(" x6 = {:#018x}, x7 = {:#018x}", self.x6, self.x7);
debugln!(" x8 = {:#018x}, x9 = {:#018x}", self.x8, self.x9);
}
}
#[no_mangle]
extern "C" fn exc_handler(ctx: ExceptionContext) -> ! {
debugln!("Unhandled exception");
debugln!("FAR = {:#018x}, ESR = {:#018x}", ctx.far, ctx.esr);
loop {}
}

View File

@ -1,107 +0,0 @@
pub fn delay(mut p: usize) {
while p != 0 {
nop();
p -= 1;
}
}
#[inline(always)]
pub unsafe fn disable_irq() {
llvm_asm!("msr daifset, #0xF");
}
#[inline(always)]
pub unsafe fn enable_irq() {
llvm_asm!("msr daifclr, #0xF");
}
#[inline(always)]
pub unsafe fn save_irq() -> usize {
let mut out: usize;
llvm_asm!(r#"
mrs $0, daif
msr daifset, #0xF
"#:"=r"(out));
out
}
#[inline(always)]
pub unsafe fn restore_irq(state: usize) {
llvm_asm!("msr daif, $0"::"r"(state));
}
#[inline(always)]
pub fn nop() {
unsafe {
llvm_asm!("nop");
}
}
#[inline(always)]
pub fn wfe() {
unsafe {
llvm_asm!("wfe");
}
}
#[inline(always)]
pub fn wfi() {
unsafe {
llvm_asm!("wfi");
}
}
// TPIDR
#[inline(always)]
pub fn read_tpidr_el1() -> usize {
let mut out: usize;
unsafe {
llvm_asm!("mrs $0, tpidr_el1":"=r"(out));
}
out
}
#[inline(always)]
pub unsafe fn write_tpidr_el1(value: usize) {
llvm_asm!("msr tpidr_el1, $0"::"r"(value));
}
// MPIDR
#[inline(always)]
pub fn read_mpidr_el1() -> usize {
let mut out: usize;
unsafe {
llvm_asm!("mrs $0, mpidr_el1":"=r"(out));
}
out
}
// CNTP_CTL
#[inline(always)]
pub unsafe fn write_cntp_ctl_el0(value: usize) {
llvm_asm!("msr cntp_ctl_el0, $0"::"r"(value));
}
// CNTP_CVAL
pub fn read_cntp_cval_el0() -> usize {
let mut out: usize;
unsafe {
llvm_asm!("mrs $0, cntp_cval_el0":"=r"(out));
}
out
}
pub unsafe fn write_cntp_cval_el0(value: usize) {
llvm_asm!("msr cntp_cval_el0, $0"::"r"(value));
}
pub unsafe fn write_cntp_tval_el0(value: usize) {
llvm_asm!("msr cntp_tval_el0, $0"::"r"(value));
}
#[inline(always)]
pub fn dsb_sy() {
unsafe {
llvm_asm!("dsb sy");
}
}

View File

@ -1,69 +0,0 @@
use crate::{
arch::{mmio_read, mmio_write},
dev::{irq::InterruptHandler, serial::SerialDevice, Device},
};
use address::PhysicalAddress;
pub struct AuxUart;
pub struct Aux;
impl Aux {
const REG_AUX_ENABLES: PhysicalAddress = PhysicalAddress::new(0x3F215004);
const AUX_ENABLES_MUART: u32 = 1 << 0;
pub unsafe fn enable_uart(&self) {
let tmp: u32 = mmio_read(Self::REG_AUX_ENABLES);
mmio_write(Self::REG_AUX_ENABLES, tmp | Self::AUX_ENABLES_MUART);
}
}
impl AuxUart {
const AUX_MU_BASE: PhysicalAddress = PhysicalAddress::new(0x3F215000);
const REG_AUX_MU_IO: PhysicalAddress = Self::AUX_MU_BASE.add(0x40);
const REG_AUX_MU_IER: PhysicalAddress = Self::AUX_MU_BASE.add(0x44);
const REG_AUX_MU_CNTL: PhysicalAddress = Self::AUX_MU_BASE.add(0x60);
const AUX_MU_CNTL_TE: u32 = 1 << 1;
const AUX_MU_CNTL_RE: u32 = 1 << 0;
const AUX_MU_IER_RIE: u32 = 1 << 0;
}
impl InterruptHandler for Aux {
fn do_irq(&self, _irq: u32) {}
}
impl Device for AuxUart {
fn name(&self) -> &'static str {
"BCM283x Mini-UART"
}
unsafe fn enable(&self) {
AUX.enable_uart();
mmio_write(Self::REG_AUX_MU_IER, Self::AUX_MU_IER_RIE);
mmio_write(
Self::REG_AUX_MU_CNTL,
Self::AUX_MU_CNTL_TE | Self::AUX_MU_CNTL_RE,
);
}
unsafe fn disable(&self) {}
}
impl SerialDevice for AuxUart {
fn send(&self, ch: u8) {
unsafe {
mmio_write(Self::REG_AUX_MU_IO, ch as u32);
}
}
}
impl InterruptHandler for AuxUart {
fn do_irq(&self, _irq: u32) {
let _byte = unsafe { mmio_read::<u32>(Self::REG_AUX_MU_IO) } as u8;
}
}
pub static AUX: Aux = Aux;
pub static UART: AuxUart = AuxUart;

View File

@ -1,138 +0,0 @@
use crate::{
arch::{cpu, mmio_read, mmio_write},
dev::{irq::InterruptController, Device},
};
use address::PhysicalAddress;
pub struct Qa7Intc;
pub struct Bcm2837Intc;
pub struct Intc {
qa7_intc: Qa7Intc,
bcm2837_intc: Bcm2837Intc,
}
impl Bcm2837Intc {
const REG_PENDING_IRQ1: PhysicalAddress = PhysicalAddress::new(0x3F00B204);
const REG_PENDING_IRQ2: PhysicalAddress = PhysicalAddress::new(0x3F00B208);
const REG_ENABLE_IRQ1: PhysicalAddress = PhysicalAddress::new(0x3F00B210);
const REG_ENABLE_IRQ2: PhysicalAddress = PhysicalAddress::new(0x3F00B214);
}
impl Qa7Intc {
const REG_TIMER_INTC: PhysicalAddress = PhysicalAddress::new(0x40000040);
const REG_INT_SRC: PhysicalAddress = PhysicalAddress::new(0x40000060);
const INTC_CNTPNSIRQ_IRQ: u32 = 1 << 1;
}
impl Device for Intc {
fn name(&self) -> &'static str {
"BCM283x Interrupt Controller"
}
unsafe fn enable(&self) {}
unsafe fn disable(&self) {}
}
impl Device for Qa7Intc {
fn name(&self) -> &'static str {
"Broadcom QA7 Interrupt Controller"
}
unsafe fn enable(&self) {}
unsafe fn disable(&self) {}
}
impl Device for Bcm2837Intc {
fn name(&self) -> &'static str {
"BCM2837 Interrupt Controller"
}
unsafe fn enable(&self) {}
unsafe fn disable(&self) {}
}
impl InterruptController for Qa7Intc {
unsafe fn enable_irq(&self, irq: u32) {
match irq {
super::IRQ_LOCAL_TIMER => {
let phys_core_id = cpu::get_phys_id();
let tmp: u32 = mmio_read(Self::REG_TIMER_INTC + 4 * phys_core_id);
mmio_write(
Self::REG_TIMER_INTC + 4 * phys_core_id,
tmp | Self::INTC_CNTPNSIRQ_IRQ,
);
}
_ => panic!("Unhandled IRQ number: {}", irq),
}
}
unsafe fn disable_irq(&self, _irq: u32) {
todo!();
}
fn is_irq_pending(&self, irq: u32) -> bool {
unsafe { mmio_read::<u32>(Self::REG_INT_SRC) & (1 << irq) != 0 }
}
unsafe fn clear_irq(&self, _irq: u32) {}
}
impl InterruptController for Bcm2837Intc {
unsafe fn enable_irq(&self, irq: u32) {
if irq < 32 {
mmio_write(Self::REG_ENABLE_IRQ1, 1 << irq);
} else if irq < 64 {
mmio_write(Self::REG_ENABLE_IRQ2, 1 << (irq - 32));
}
}
unsafe fn disable_irq(&self, _irq: u32) {
todo!();
}
fn is_irq_pending(&self, irq: u32) -> bool {
if irq < 32 {
unsafe { mmio_read::<u32>(Self::REG_PENDING_IRQ1) & (1 << irq) != 0 }
} else if irq < 64 {
unsafe { mmio_read::<u32>(Self::REG_PENDING_IRQ2) & (1 << (irq - 32)) != 0 }
} else {
false
}
}
unsafe fn clear_irq(&self, _irq: u32) {
todo!();
}
}
impl InterruptController for Intc {
unsafe fn enable_irq(&self, irq: u32) {
if irq < 16 {
self.qa7_intc.enable_irq(irq);
} else {
self.bcm2837_intc.enable_irq(irq - 16);
}
}
unsafe fn disable_irq(&self, _irq: u32) {
todo!();
}
fn is_irq_pending(&self, irq: u32) -> bool {
if irq < 16 {
self.qa7_intc.is_irq_pending(irq)
} else {
self.bcm2837_intc.is_irq_pending(irq - 16)
}
}
unsafe fn clear_irq(&self, _irq: u32) {}
}
pub static INTC: Intc = Intc {
qa7_intc: Qa7Intc,
bcm2837_intc: Bcm2837Intc,
};

View File

@ -1,133 +0,0 @@
use crate::{
arch::{cpu, intrin, mmio_read, mmio_write},
KernelSpace,
};
use address::{PhysicalAddress, VirtualAddress};
const MBOX_BASE: PhysicalAddress = PhysicalAddress::new(0x3F00B880);
const MBOX_READ: PhysicalAddress = MBOX_BASE.add(0x00);
//const MBOX_POLL: usize = MBOX_BASE + 0x10;
const MBOX_STATUS: PhysicalAddress = MBOX_BASE.add(0x18);
const MBOX_WRITE: PhysicalAddress = MBOX_BASE.add(0x20);
const MBOX_STATUS_FULL: u32 = 1 << 31;
const MBOX_STATUS_EMPTY: u32 = 1 << 30;
const MBOX_RESPONSE: u32 = 1 << 31;
const MBOX_REQUEST: u32 = 0;
const MBOX_CHAR_PROPERTY: u32 = 8;
const MBOX_TAG_GET_ARM_MEMORY: u32 = 0x10005;
const MBOX_TAG_LAST: u32 = 0;
#[repr(C, align(16))]
#[derive(Debug)]
struct Message {
data: [u32; 36],
}
static mut MESSAGE: Message = Message { data: [0; 36] };
//pub static CORE_MBOX0: CoreMailbox = CoreMailbox { index: 0 };
//
//pub struct CoreMailbox {
// index: usize,
//}
//
//impl IpiDelivery for CoreMailbox {
// fn enable(&self) {
// let phys_core_id = cpu::get_phys_id();
// unsafe {
// mmio_write(Self::REG_INTC + phys_core_id * 4, 1 << self.index);
// }
// }
//
// fn send_ipi(target_id: u32, message: IpiMessage) {
// unsafe {
// mmio_write(
// Self::REG_SET + target_id as usize * 16,
// 1 << (message as u32),
// );
// }
// }
//}
//
//impl CoreMailbox {
// const REG_INTC: PhysicalAddress = PhysicalAddress::new(0x40000050);
// const REG_SET: PhysicalAddress = PhysicalAddress::new(0x40000080);
// const REG_RDCLR: PhysicalAddress = PhysicalAddress::new(0x400000C0);
//
// pub fn do_irq(&self) {
// let phys_core_id = cpu::get_phys_id();
// let value: u32 = unsafe { mmio_read(Self::REG_RDCLR + phys_core_id * 16 + self.index * 4) };
// if value != 0 {
// macro_rules! test_ipi {
// ($value:expr, $msg:expr) => {
// if $value & (1 << ($msg as u32)) != 0 {
// smp::handle_ipi($msg);
// }
// };
// }
//
// test_ipi!(value, IpiMessage::Halt);
// test_ipi!(value, IpiMessage::Tick);
//
// unsafe {
// mmio_write::<u32>(
// Self::REG_RDCLR + phys_core_id * 16 + self.index * 4,
// 0xFFFFFFFF,
// );
// }
// }
// }
//}
unsafe fn call(ch: u32) -> Result<(), ()> {
let value = (usize::from(PhysicalAddress::from(
VirtualAddress::<KernelSpace>::from_ref(&MESSAGE),
)) as u32)
| (ch & 0xF);
while mmio_read::<u32>(MBOX_STATUS) & MBOX_STATUS_FULL != 0 {
llvm_asm!("nop");
}
intrin::dsb_sy();
mmio_write(MBOX_WRITE, value);
loop {
while mmio_read::<u32>(MBOX_STATUS) & MBOX_STATUS_EMPTY != 0 {
llvm_asm!("nop");
}
if mmio_read::<u32>(MBOX_READ) == value {
if MESSAGE.data[1] == MBOX_RESPONSE {
return Ok(());
} else {
return Err(());
}
}
}
}
pub fn get_arm_memory() -> Result<usize, ()> {
unsafe {
MESSAGE.data[0] = 8 * 4;
MESSAGE.data[1] = MBOX_REQUEST;
MESSAGE.data[2] = MBOX_TAG_GET_ARM_MEMORY;
MESSAGE.data[3] = 8;
MESSAGE.data[4] = 0;
MESSAGE.data[5] = 0;
MESSAGE.data[6] = 0;
MESSAGE.data[7] = MBOX_TAG_LAST;
call(MBOX_CHAR_PROPERTY)?;
assert!(MESSAGE.data[4] == 0x80000008); // Response of 8 bytes
assert!(MESSAGE.data[5] == 0x00000000); // Base address of RAM is zero
Ok(MESSAGE.data[6] as usize)
}
}

View File

@ -1,35 +0,0 @@
use crate::mem::phys::{init_from_iter, SimpleMemoryIterator, UsableMemory};
use address::PhysicalAddress;
pub mod aux;
pub mod intc;
pub mod mbox;
pub mod timer;
pub const IRQ_LOCAL_TIMER: u32 = 1;
pub const IRQ_AUX: u32 = 16 + 29;
// TODO as long as AUX is not used for anything else?
pub const IRQ_UART: u32 = IRQ_AUX;
pub use aux::UART as AUX_UART;
pub use intc::INTC;
// Configured as primary UART
pub use AUX_UART as UART;
// pub const INT_SRC_TIMER: u32 = 1 << 11;
// pub const INT_SRC_MBOX0: u32 = 1 << 4;
pub fn init_phys_memory() {
let arm_memory = mbox::get_arm_memory().unwrap();
let iter = SimpleMemoryIterator::new(UsableMemory {
start: PhysicalAddress::from(0usize),
end: PhysicalAddress::from(arm_memory),
});
unsafe {
init_from_iter(iter);
}
}
pub fn init() {}

View File

@ -1,21 +0,0 @@
use crate::arch;
pub mod timer;
use address::PhysicalAddress;
pub const IRQ_LOCAL_TIMER: u32 = 30;
pub const IRQ_UART: u32 = 32 + 1;
pub const IRQ_RTC: u32 = 32 + 2;
pub const GICD_BASE: PhysicalAddress = PhysicalAddress::new(0x08000000usize);
pub const GICC_BASE: PhysicalAddress = PhysicalAddress::new(0x08010000usize);
pub const PL031_BASE: PhysicalAddress = PhysicalAddress::new(0x09010000usize);
pub const PL011_BASE: PhysicalAddress = PhysicalAddress::new(0x09000000usize);
pub fn init() {
unsafe {
arch::timer::enable_rtc();
}
}

View File

@ -1,31 +0,0 @@
.ifndef __ASM_MACROS
.set SPSR_EL3_EL2t, 0x8
.set SPSR_EL3_EL2h, 0x9
.set SPSR_EL3_EL1t, 0x4
.set SPSR_EL3_EL1h, 0x5
.set SCR_EL3_RW, (1 << 10)
.set SCR_EL3_SMD, (1 << 7)
.set SCR_EL3_RES1, (3 << 4)
.set SCR_EL3_NS, (1 << 0)
.set SPSR_EL2_EL1h, 0x5
.set HCR_EL2_RW, (1 << 31)
.set HCR_EL2_HCD, (1 << 29)
.set HCR_EL2_C, (1 << 2)
.set HCR_EL2_A, (1 << 1)
.set MAIR_EL1_OUTER_NC, 4
.set MAIR_EL1_INNER_NC, (4 << 4)
.set MAIR_EL1_DEVICE_nGRE, 0
.set MAIR_EL1_DEVICE, 0
.set TCR_EL1_IPS_48, (5 << 32)
.set TCR_EL1_TG1_4K, (2 << 30)
.set CPACR_EL1_FPEN_TRAP_NONE, (3 << 20)
.set PAGE_ACCESSED, 1 << 10
.set PAGE_PRESENT, 1 << 0
.set PAGE_ISH, 3 << 8
.set PAGE_ATTR_SHIFT, 2
.set KERNEL_OFFSET, 0xFFFFFF8000000000
.endif

View File

@ -1,37 +0,0 @@
use crate::KernelSpace;
use address::{PhysicalAddress, VirtualAddress};
pub mod cpu;
pub mod exception;
pub mod intrin;
pub mod timer;
cfg_if! {
if #[cfg(feature = "mach_rpi3b")] {
pub mod mach_bcm283x;
pub use mach_bcm283x as machine;
} else if #[cfg(feature = "mach_virt")] {
pub mod mach_virt;
pub use mach_virt as machine;
}
}
pub trait MmioSize {}
impl MmioSize for u32 {}
impl MmioSize for i32 {}
impl MmioSize for u8 {}
#[inline]
pub unsafe fn mmio_write<T: MmioSize>(addr: PhysicalAddress, value: T) {
core::ptr::write_volatile(
VirtualAddress::<KernelSpace>::from(addr).as_mut_ptr(),
value,
);
}
#[inline]
pub unsafe fn mmio_read<T: MmioSize>(addr: PhysicalAddress) -> T {
core::ptr::read_volatile(
VirtualAddress::<KernelSpace>::from(addr).as_mut_ptr(),
)
}

View File

@ -1,3 +0,0 @@
pub fn init() {
}

View File

@ -1,62 +0,0 @@
use crate::{
arch::{intrin, machine, cpu::Cpu},
proc::sched,
dev::{
irq::{self, InterruptController, InterruptHandler},
Device,
},
};
struct ArmTimer;
impl Device for ArmTimer {
fn name(&self) -> &'static str {
"ARM Generic Timer"
}
unsafe fn enable(&self) {
intrin::write_cntp_ctl_el0(1);
}
unsafe fn disable(&self) {
intrin::write_cntp_ctl_el0(0);
}
}
impl InterruptHandler for ArmTimer {
fn do_irq(&self, _irq: u32) {
unsafe {
sched::sched_yield();
intrin::write_cntp_tval_el0(100000);
}
}
}
pub unsafe fn enable_local_timer() {
LOCAL_TIMER.enable();
let intc = irq::get_intc();
irq::set_irq_handler(machine::IRQ_LOCAL_TIMER, &LOCAL_TIMER);
intc.enable_irq(machine::IRQ_LOCAL_TIMER);
}
// TODO bcm283x RTC?
cfg_if! {
if #[cfg(feature = "mach_virt")] {
use crate::dev::pl031::Pl031;
static PL031: Pl031 = Pl031::new(machine::PL031_BASE);
use PL031 as RTC;
pub unsafe fn enable_rtc() {
RTC.enable();
let intc = irq::get_intc();
irq::set_irq_handler(machine::IRQ_RTC, &RTC);
intc.enable_irq(machine::IRQ_RTC);
}
}
}
static LOCAL_TIMER: ArmTimer = ArmTimer;

View File

@ -1,119 +0,0 @@
// vi:ft=asm :
.include "kernel/src/arch/macros.S"
.macro __exc_save_ctx
sub sp, sp, #192
stp x0, x1, [sp, #0]
stp x2, x3, [sp, #16]
stp x4, x5, [sp, #32]
stp x6, x7, [sp, #48]
stp x8, x9, [sp, #64]
stp x10, x11, [sp, #80]
stp x12, x13, [sp, #96]
stp x14, x15, [sp, #112]
stp x16, x17, [sp, #128]
stp x18, x29, [sp, #144]
mrs x0, elr_el1
stp x30, x0, [sp, #160]
.endm
.macro __exc_restore_ctx
ldp x30, x0, [sp, #160]
msr elr_el1, x0
ldp x0, x1, [sp, #0]
ldp x2, x3, [sp, #16]
ldp x4, x5, [sp, #32]
ldp x6, x7, [sp, #48]
ldp x8, x9, [sp, #64]
ldp x10, x11, [sp, #80]
ldp x12, x13, [sp, #96]
ldp x14, x15, [sp, #112]
ldp x16, x17, [sp, #128]
ldp x18, x29, [sp, #144]
add sp, sp, #192
.endm
.section .rodata
.global el1_vectors
.p2align 7
el1_vectors:
// Current level with SP_EL0
vec_el1_sp_el0_sync:
b .
.p2align 7
vec_el1_sp_el0_irq:
b .
.p2align 7
vec_el1_sp_el0_fiq:
b .
.p2align 7
vec_el1_sp_el0_serror:
b .
// Current level with SL_ELx, x > 0
.p2align 7
vec_el1_sp_elx_sync:
__exc_save_ctx
mov x0, sp
bl exc_handler
__exc_restore_ctx
b .
.p2align 7
vec_el1_sp_elx_irq:
__exc_save_ctx
mov x0, sp
bl do_irq
__exc_restore_ctx
eret
.p2align 7
vec_el1_sp_elx_fiq:
__exc_save_ctx
mov x0, sp
b .
__exc_restore_ctx
eret
.p2align 7
vec_el1_sp_elx_serror:
b .
// Lower EL, AArch64
.p2align 7
vec_el0_aa64_sync:
b .
.p2align 7
vec_el0_aa64_irq:
b .
.p2align 7
vec_el0_aa64_fiq:
b .
.p2align 7
vec_el0_aa64_serror:
b .
// Lower EL, AArch32
.p2align 7
vec_el0_aa32_sync:
b .
.p2align 7
vec_el0_aa32_irq:
b .
.p2align 7
vec_el0_aa32_fiq:
b .
.p2align 7
vec_el0_aa32_serror:
b .

View File

@ -1,167 +0,0 @@
.include "kernel/src/arch/macros.S"
.cpu cortex-a57
.section .text.boot
.global _entry
_entry:
mrs x1, mpidr_el1
ands x1, x1, #3
beq _entry_bsp
_ap_loop:
1:
b 1b
.section .text
_entry_bsp:
// Store FDT address
adr x1, fdt_addr
str x0, [x1]
// Setup paging tables
// This is done once for all PEs
adr x0, kernel_l1
mov x2, #PAGE_PRESENT
orr x2, x2, #PAGE_ACCESSED
orr x2, x2, #PAGE_ISH
mov x1, x2
str x1, [x0]
orr x1, x2, #1 << 30
str x1, [x0, #8]
ldr x1, =0x4010000000
orr x1, x2, x1
str x1, [x0, #16]
// Load BSP stack
mov x0, #KERNEL_OFFSET
adr x10, bsp_stack_top
adr x11, kernel_bsp_main
add x10, x10, x0
add x11, x11, x0
_entry_ap:
// NOTE the following code must not clobber: x10, x11
// EL3 check
mrs x0, CurrentEL
lsr x0, x0, #2
cmp x0, #3
bne 1f
// Leave EL3
adr x0, 1f
msr elr_el3, x0
mov x0, #SPSR_EL3_EL2h
// TODO mask DAIF?
msr spsr_el3, x0
mov x0, #(SCR_EL3_RW | SCR_EL3_SMD | SCR_EL3_RES1 | SCR_EL3_NS)
msr scr_el3, x0
eret
1:
// EL2 check
mrs x0, CurrentEL
lsr x0, x0, #2
cmp x0, #2
bne 1f
// Leave EL2
mrs x0, cnthctl_el2
orr x0, x0, #((1 << 1) | (1 << 0))
msr cnthctl_el2, x0
msr cntvoff_el2, xzr
adr x0, 1f
msr elr_el2, x0
// TODO mask DAIF?
mov x0, #SPSR_EL2_EL1h
msr spsr_el2, x0
mov x0, #(HCR_EL2_RW | HCR_EL2_HCD)
orr x0, x0, #HCR_EL2_A
msr hcr_el2, x0
eret
1:
// Setup the MMU
mov x0, #(MAIR_EL1_INNER_NC | MAIR_EL1_OUTER_NC)
msr mair_el1, x0
// TODO clarify
mov x0, #TCR_EL1_IPS_48
orr x0, x0, #TCR_EL1_TG1_4K
ldr x1, =(25 << 16)
orr x0, x0, x1
ldr x1, =25
orr x0, x0, x1
msr tcr_el1, x0
isb
adr x0, kernel_l1
msr ttbr1_el1, x0
msr ttbr0_el1, x0
dsb ish
isb
mrs x0, sctlr_el1
// TODO clarify
ldr x1, =~((1 << 25) | (1 << 24) | (1 << 19) | (1 << 12) | (1 << 4) | (1 << 2) | (1 << 0) | (1 << 1))
and x0, x0, x1
orr x0, x0, #1
msr sctlr_el1, x0
isb
adr x0, upper_half
ldr x1, =KERNEL_OFFSET
add x0, x0, x1
br x0
upper_half:
// Shoot off the legs
msr ttbr0_el1, xzr
// Disable trapping for FP instructions
mrs x0, cpacr_el1
orr x0, x0, CPACR_EL1_FPEN_TRAP_NONE
msr cpacr_el1, x0
mov sp, x10
adr x0, el1_vectors
msr vbar_el1, x0
msr daifset, #0xF
adr x1, fdt_addr
ldr x0, [x1]
br x11
.section .bss
.p2align 4
bsp_stack_bottom:
.skip 32768
bsp_stack_top:
.p2align 3
fdt_addr:
.skip 8
.p2align 12
kernel_l1:
.skip 4096
.p2align 4
.global ap_init_value
ap_init_value:
// AP stack pointer
.skip 8
.section .data
.p2align 4
.global ap_wakeup_lock
ap_wakeup_lock:
// Locked by default
.quad 1

View File

@ -1,44 +0,0 @@
use crate::{
arch::intrin,
dev::serial::{SerialDevice, SERIAL0},
sync::Spin as Mutex,
};
use core::fmt;
struct Debug;
impl Debug {
fn putc(&mut self, ch: u8) {
SERIAL0.send(ch);
}
}
impl fmt::Write for Debug {
fn write_str(&mut self, s: &str) -> fmt::Result {
for ch in s.bytes() {
self.putc(ch);
}
Ok(())
}
}
static DEBUG: Mutex<Debug> = Mutex::new(Debug {});
#[macro_export]
macro_rules! debug {
($($args:tt)+) => ($crate::debug::debug_fmt(format_args!($($args)+)))
}
#[macro_export]
macro_rules! debugln {
($($args:tt)+) => (debug!("{}\n", format_args!($($args)+)))
}
pub fn debug_fmt(args: fmt::Arguments<'_>) {
use fmt::Write;
let u = unsafe { intrin::save_irq() };
write!(DEBUG.lock(), "{}", args).unwrap();
unsafe {
intrin::restore_irq(u);
}
}

View File

@ -1,94 +0,0 @@
use crate::dev::Device;
use alloc::collections::LinkedList;
#[repr(C)]
pub struct IrqRegisters {
x0: usize,
x1: usize,
x2: usize,
x3: usize,
x4: usize,
x5: usize,
x6: usize,
x7: usize,
x8: usize,
x9: usize,
x10: usize,
x11: usize,
x12: usize,
x13: usize,
x14: usize,
x15: usize,
x16: usize,
x17: usize,
x18: usize,
x29: usize,
x30: usize,
elr: usize,
esr: usize,
far: usize,
}
pub trait InterruptController: Device {
unsafe fn enable_irq(&self, irq: u32);
unsafe fn disable_irq(&self, irq: u32);
fn is_irq_pending(&self, irq: u32) -> bool;
unsafe fn clear_irq(&self, irq: u32);
}
pub trait InterruptHandler {
fn do_irq(&self, irq: u32);
}
pub struct InterruptEntry {
irq: u32,
handler: &'static dyn InterruptHandler,
}
static mut IRQ_HANDLERS: LinkedList<InterruptEntry> = LinkedList::new();
#[no_mangle]
extern "C" fn do_irq(_regs: &mut IrqRegisters) {
let intc = get_intc();
for entry in unsafe { IRQ_HANDLERS.iter() } {
if intc.is_irq_pending(entry.irq) {
entry.handler.do_irq(entry.irq);
unsafe {
intc.clear_irq(entry.irq);
}
}
}
}
pub fn set_irq_handler(irq: u32, handler: &'static dyn InterruptHandler) {
unsafe {
IRQ_HANDLERS.push_front(InterruptEntry { irq, handler });
}
}
cfg_if! {
if #[cfg(feature = "mach_rpi3b")] {
use crate::arch::mach_bcm283x::INTC;
pub fn get_intc() -> &'static impl InterruptController {
&INTC
}
} else if #[cfg(feature = "mach_virt")] {
pub mod gic;
use gic::Gic;
use crate::arch::machine;
pub static GIC: Gic = Gic::new(
machine::GICD_BASE, machine::GICC_BASE
);
pub fn get_intc() -> &'static impl InterruptController {
&GIC
}
}
}
pub unsafe fn init() {
get_intc().enable();
}

View File

@ -1,87 +0,0 @@
use crate::{
arch::{mmio_read, mmio_write},
dev::{irq::InterruptController, Device},
};
use address::PhysicalAddress;
pub struct Gic {
gicd_base: PhysicalAddress,
gicc_base: PhysicalAddress,
}
impl Device for Gic {
fn name(&self) -> &'static str {
"ARM Generic Interrupt Controller"
}
unsafe fn enable(&self) {
mmio_write(self.gicd_base + Self::GICD_CTLR, Self::GICD_CTLR_ENABLE);
mmio_write(self.gicc_base + Self::GICC_CTLR, Self::GICC_CTLR_ENABLE);
mmio_write(self.gicc_base + Self::GICC_PMR, 0xFF);
}
unsafe fn disable(&self) {
todo!()
}
}
impl InterruptController for Gic {
unsafe fn enable_irq(&self, irq: u32) {
self.set_irq_config(irq, 1);
self.unmask_irq(irq);
}
unsafe fn disable_irq(&self, _irq: u32) {
todo!();
}
fn is_irq_pending(&self, irq: u32) -> bool {
unsafe {
mmio_read::<u32>(self.gicd_base + Self::GICD_ICPENDR + ((irq >> 3) & !0x3) as usize)
& (1 << (irq & 0x1F))
!= 0
}
}
unsafe fn clear_irq(&self, irq: u32) {
mmio_write(
self.gicd_base + Self::GICD_ICPENDR + ((irq >> 3) & !0x3) as usize,
1 << (irq & 0x1F),
);
}
}
impl Gic {
const GICD_CTLR: usize = 0;
const GICD_ISENABLER: usize = 0x100;
const GICD_ICFGR: usize = 0xC00;
const GICD_ICPENDR: usize = 0x280;
const GICD_CTLR_ENABLE: u32 = 1;
const GICC_CTLR: usize = 0;
const GICC_PMR: usize = 4;
const GICC_CTLR_ENABLE: u32 = 1;
pub const fn new(gicd_base: PhysicalAddress, gicc_base: PhysicalAddress) -> Self {
Self {
gicd_base,
gicc_base,
}
}
unsafe fn set_irq_config(&self, irq: u32, value: u32) {
mmio_write(
self.gicd_base + Self::GICD_ICFGR + ((irq >> 4) & !0x3) as usize,
value << (1 + (irq & 0xF)),
);
}
unsafe fn unmask_irq(&self, irq: u32) {
mmio_write(
self.gicd_base + Self::GICD_ISENABLER + ((irq >> 3) & !0x3) as usize,
1 << (irq & 0x1F),
);
}
}

View File

@ -1,13 +0,0 @@
pub mod irq;
pub mod serial;
pub mod pl011;
pub mod pl031;
pub mod virtio;
pub mod pcie;
pub trait Device {
fn name(&self) -> &'static str;
unsafe fn enable(&self);
unsafe fn disable(&self);
}

View File

@ -1,58 +0,0 @@
use crate::{arch::mmio_read, dev::Device, mem::EcamSpace};
use address::{PhysicalAddress, VirtualAddress};
use core::ops::Index;
pub struct HostPci {
base: PhysicalAddress,
}
pub struct PcieFunctionConfig {
base: PhysicalAddress,
}
impl Device for HostPci {
fn name(&self) -> &'static str {
"Host PCI(e) Controller"
}
unsafe fn enable(&self) {
let func = PcieFunctionConfig { base: self.base };
debugln!("{:#010x}", func.readl(0));
debugln!(":");
}
unsafe fn disable(&self) {}
}
impl HostPci {
pub const fn new(base: PhysicalAddress) -> Self {
Self { base }
}
}
impl PcieFunctionConfig {
#[inline(always)]
pub unsafe fn readl(&self, off: usize) -> u32 {
let addr = VirtualAddress::<EcamSpace>::from(self.base + off);
core::ptr::read_volatile(addr.as_ptr())
}
#[inline(always)]
pub unsafe fn readw(&self, off: usize) -> u16 {
(self.readl(off & !0x3) | (0xFFFF << (off & 0x3))) as u16
}
#[inline(always)]
pub fn device_id(&self) -> u16 {
unsafe { self.readw(2) }
}
#[inline(always)]
pub fn vendor_id(&self) -> u16 {
unsafe { self.readw(0) }
}
}
pub const fn func_offset(bus: u8, dev: u8, func: u8) -> usize {
((bus as usize) << 20) | ((dev as usize) << 15) | ((func as usize) << 12)
}

View File

@ -1,77 +0,0 @@
use crate::{
arch::{mmio_read, mmio_write},
dev::{irq::InterruptHandler, serial::SerialDevice, Device},
};
use address::PhysicalAddress;
pub struct Pl011 {
base: PhysicalAddress,
}
impl InterruptHandler for Pl011 {
fn do_irq(&self, _irq: u32) {
let tmp: u32 = unsafe { mmio_read(self.base + Self::UARTRIS) };
if tmp & Self::UARTRIS_RXRIS != 0 {
let _ch = unsafe { mmio_read::<u32>(self.base + Self::UARTDR) } as u8;
unsafe {
mmio_write(self.base + Self::UARTICR, Self::UARTICR_RXIC);
}
}
}
}
impl Device for Pl011 {
fn name(&self) -> &'static str {
"PL011 UART"
}
unsafe fn enable(&self) {
mmio_write(self.base + Self::UARTCR, 0);
mmio_write(self.base + Self::UARTCLR_H, 3 << 5);
mmio_write(
self.base + Self::UARTIMSC,
Self::UARTIMSC_RXIM,
);
mmio_write(
self.base + Self::UARTCR,
Self::UARTCR_TXE | Self::UARTCR_RXE | Self::UARTCR_UARTEN,
);
}
unsafe fn disable(&self) {}
}
impl SerialDevice for Pl011 {
fn send(&self, ch: u8) {
unsafe {
while mmio_read::<u32>(self.base + Self::UARTFR) & Self::UARTFR_BUSY != 0 {}
mmio_write(self.base + Self::UARTDR, ch as u32);
}
}
}
impl Pl011 {
const UARTDR: usize = 0x00;
const UARTFR: usize = 0x18;
const UARTCLR_H: usize = 0x2C;
const UARTCR: usize = 0x30;
const UARTIMSC: usize = 0x38;
const UARTRIS: usize = 0x3C;
const UARTICR: usize = 0x44;
const UARTCR_UARTEN: u32 = 1 << 0;
const UARTCR_TXE: u32 = 1 << 8;
const UARTCR_RXE: u32 = 1 << 9;
const UARTFR_BUSY: u32 = 1 << 3;
const UARTIMSC_RXIM: u32 = 1 << 4;
const UARTRIS_RXRIS: u32 = 1 << 4;
const UARTICR_RXIC: u32 = 1 << 4;
pub const fn new(base: PhysicalAddress) -> Self {
Self { base }
}
}

View File

@ -1,47 +0,0 @@
use crate::{
arch::{mmio_read, mmio_write},
dev::{irq::InterruptHandler, Device},
};
use address::PhysicalAddress;
pub struct Pl031 {
base: PhysicalAddress,
}
impl Pl031 {
const RTCDR: usize = 0x00;
const RTCMR: usize = 0x04;
const RTCCR: usize = 0x0C;
const RTCIMSC: usize = 0x10;
const RTCICR: usize = 0x1C;
pub const fn new(base: PhysicalAddress) -> Self {
Self { base }
}
}
impl Device for Pl031 {
fn name(&self) -> &'static str {
"ARM PL031 RTC"
}
unsafe fn enable(&self) {
let tmp: u32 = mmio_read(self.base + Self::RTCDR);
mmio_write(self.base + Self::RTCMR, tmp + 1);
mmio_write(self.base + Self::RTCIMSC, 1);
mmio_write(self.base + Self::RTCCR, 1);
}
unsafe fn disable(&self) {}
}
impl InterruptHandler for Pl031 {
fn do_irq(&self, _irq: u32) {
let time_int: u32 = unsafe { mmio_read(self.base + Self::RTCDR) };
unsafe {
mmio_write(self.base + Self::RTCICR, 1);
mmio_write(self.base + Self::RTCMR, time_int + 1);
}
}
}

View File

@ -1,23 +0,0 @@
use crate::dev::Device;
pub trait SerialDevice: Device {
fn send(&self, ch: u8);
}
cfg_if! {
if #[cfg(feature = "mach_rpi3b")] {
use crate::arch::mach_bcm283x;
pub use mach_bcm283x::UART as SERIAL0;
} else {
use crate::{dev::pl011::Pl011, arch::machine};
pub static SERIAL0: Pl011 = Pl011::new(machine::PL011_BASE);
}
}
pub fn init() {
unsafe {
SERIAL0.enable();
}
}

View File

@ -1,25 +0,0 @@
use crate::dev::Device;
use address::PhysicalAddress;
pub struct Display {
base: PhysicalAddress
}
impl Device for Display {
fn name(&self) -> &'static str {
"VirtIO GPU"
}
unsafe fn enable(&self) {
}
unsafe fn disable(&self) {
}
}
impl Display {
pub const fn new(base: PhysicalAddress) -> Self {
Self { base }
}
}

View File

@ -1,186 +0,0 @@
use crate::{
mem::{
self,
phys::{PageUsage, SimpleMemoryIterator, UsableMemory},
},
KernelSpace,
};
use address::{PhysicalAddress, VirtualAddress};
use core::mem::MaybeUninit;
use fdt_rs::{
base::{DevTree, DevTreeProp},
index::{DevTreeIndex, DevTreeIndexNode, DevTreeIndexProp},
prelude::*,
};
#[allow(dead_code)]
struct FdtManager {
fdt: DevTree<'static>,
index: DevTreeIndex<'static, 'static>,
address_cells: u32,
size_cells: u32,
}
impl FdtManager {
fn dump(&self) {
self.dump_node(&self.index.root(), 0);
}
fn dump_node(&self, node: &DevTreeIndexNode, depth: usize) {
for _ in 0..depth {
debug!(" ");
}
debugln!("\"{}\" {{", node.name().unwrap());
for prop in node.props() {
for _ in 0..=depth {
debug!(" ");
}
let name = prop.name().unwrap();
debug!("\"{}\" = ", name);
match name {
"compatible" => debug!("\"{}\"", prop.str().unwrap()),
"#size-cells" | "#address-cells" => debug!("{}", prop.u32(0).unwrap()),
"reg" => {
debug!("<");
for i in 0..prop.length() / 4 {
debug!("{:#010x}", prop.u32(i).unwrap());
if i != prop.length() / 4 - 1 {
debug!(", ");
}
}
debug!(">");
}
_ => debug!("..."),
}
debugln!(";");
}
for child in node.children() {
self.dump_node(&child, depth + 1);
}
for _ in 0..depth {
debug!(" ");
}
debugln!("}}");
}
}
static mut FDT_MANAGER: MaybeUninit<FdtManager> = MaybeUninit::uninit();
fn read_cell_index_prop(prop: &DevTreeIndexProp, offset: usize, cells: u32) -> Option<usize> {
if cells == 2 {
// Read as two u32s
let high = prop.u32(offset).ok()? as usize;
let low = prop.u32(offset + 1).ok()? as usize;
Some(low | (high << 32))
} else {
let val = prop.u32(offset).ok()?;
Some(val as usize)
}
}
fn read_cell_prop(prop: &DevTreeProp, offset: usize, cells: u32) -> Option<usize> {
if cells == 2 {
// Read as two u32s
let high = prop.u32(offset).ok()? as usize;
let low = prop.u32(offset + 1).ok()? as usize;
Some(low | (high << 32))
} else {
let val = prop.u32(offset).ok()?;
Some(val as usize)
}
}
pub fn init(fdt_base_phys: PhysicalAddress) {
let fdt_base = VirtualAddress::<KernelSpace>::from(fdt_base_phys);
let fdt = unsafe { DevTree::from_raw_pointer(fdt_base.as_ptr()) }.unwrap();
let layout = DevTreeIndex::get_layout(&fdt).unwrap();
assert!(layout.align() <= 0x1000);
let page_count = (layout.size() + 0xFFF) / 0x1000;
debugln!("Allocating {} pages for fdt index", page_count);
let pages = mem::phys::alloc_contiguous_pages(PageUsage::Kernel, page_count).unwrap();
let pages_virt = VirtualAddress::<KernelSpace>::from(pages);
let index =
DevTreeIndex::new(fdt, unsafe { pages_virt.as_slice_mut(page_count * 0x1000) }).unwrap();
let root = index.root();
let mut address_cells = None;
let mut size_cells = None;
for prop in root.props() {
match prop.name().unwrap() {
"#address-cells" => address_cells = Some(prop.u32(0).unwrap()),
"#size-cells" => size_cells = Some(prop.u32(0).unwrap()),
_ => {}
}
}
unsafe {
FDT_MANAGER.write(FdtManager {
address_cells: address_cells.unwrap(),
size_cells: size_cells.unwrap(),
fdt,
index,
});
FDT_MANAGER.assume_init_ref().dump();
}
}
pub fn init_phys_memory(fdt_base_phys: PhysicalAddress) {
let fdt_base = VirtualAddress::<KernelSpace>::from(fdt_base_phys);
let fdt = unsafe { DevTree::from_raw_pointer(fdt_base.as_ptr()) }.unwrap();
let mut address_cells = None;
let mut size_cells = None;
let mut nodes = fdt.nodes();
while let Ok(Some(node)) = nodes.next() {
if node.name().unwrap().is_empty() {
let mut props = node.props();
while let Ok(Some(prop)) = props.next() {
let name = prop.name().unwrap();
match name {
"#address-cells" => address_cells = prop.u32(0).ok(),
"#size-cells" => size_cells = prop.u32(0).ok(),
_ => {}
}
}
}
}
// Find out system address and size cells
let address_cells = address_cells.expect("Failed to find out system's #address-cells");
let size_cells = size_cells.expect("Failed to find out system's #size-cells");
// TODO support multiple memory regions
let mut region_start = None;
let mut region_size = None;
let mut nodes = fdt.nodes();
while let Ok(Some(node)) = nodes.next() {
if node.name().unwrap().starts_with("memory@") {
let mut props = node.props();
while let Ok(Some(prop)) = props.next() {
if prop.name().unwrap() == "reg" {
region_start = read_cell_prop(&prop, 0, address_cells);
region_size = read_cell_prop(&prop, address_cells as usize, size_cells);
}
}
}
}
let region_start = region_start.unwrap();
let region_end = region_start + region_size.unwrap();
let iter = SimpleMemoryIterator::new(UsableMemory {
start: PhysicalAddress::from((region_start + 0xFFF) & !0xFFF),
end: PhysicalAddress::from(region_end & !0xFFF),
});
unsafe {
mem::phys::init_from_iter(iter);
}
}

View File

@ -1,87 +0,0 @@
#![feature(
global_asm,
llvm_asm,
const_panic,
maybe_uninit_uninit_array,
alloc_error_handler,
const_fn_trait_bound
)]
#![no_std]
#![no_main]
#[macro_use]
extern crate cfg_if;
extern crate alloc;
#[macro_use]
pub mod debug;
pub mod arch;
pub mod boot;
pub mod dev;
#[cfg(feature = "fdt-rs")]
pub mod fdt;
pub mod mem;
pub mod time;
pub mod proc;
pub mod sync;
pub use mem::KernelSpace;
use address::PhysicalAddress;
use arch::{timer, cpu, intrin};
use dev::irq::{self, InterruptController};
use dev::virtio::Display;
use dev::pcie::HostPci;
#[no_mangle]
extern "C" fn kernel_bsp_main(fdt_base: PhysicalAddress) -> ! {
cpu::init(0);
dev::serial::init();
cfg_if! {
if #[cfg(feature = "fdt-rs")] {
// Initialize memory from FDT information
fdt::init_phys_memory(fdt_base);
fdt::init(fdt_base);
} else {
// Platform-specific memory init
mem::phys::init_from_platform();
}
}
mem::heap::init();
arch::machine::init();
// Enable IRQs for SERIAL0
let intc = irq::get_intc();
irq::set_irq_handler(arch::machine::IRQ_UART, &dev::serial::SERIAL0);
unsafe {
intc.enable_irq(arch::machine::IRQ_UART);
}
debug!("BSP init finished\n");
unsafe {
irq::init();
timer::enable_local_timer();
}
let mut bus = HostPci::new(PhysicalAddress::new(0x10000000));
unsafe {
use dev::Device;
bus.enable();
}
//let mut display = Display::new();
loop {}
//proc::enter();
}
use core::panic::PanicInfo;
#[panic_handler]
fn panic_handler(pi: &PanicInfo) -> ! {
unsafe {
intrin::disable_irq();
}
debug!("PANIC: {:?}\n", pi);
loop {}
}

View File

@ -1,209 +0,0 @@
// TODO document this module
#![allow(missing_docs)]
use crate::{
mem::{
self,
phys::{self, PageUsage},
},
KernelSpace,
};
use address::VirtualAddress;
use core::alloc::{GlobalAlloc, Layout};
use core::convert::TryFrom;
use core::mem::size_of;
use core::ptr::null_mut;
use spin::Mutex;
const HEAP_MAGIC: u32 = 0xB0000BA0;
const HEAP_SIZE: usize = 16 << 20; // 16MiB
struct Heap {
start: VirtualAddress<KernelSpace>,
mutex: Option<Mutex<()>>,
}
#[derive(Debug)]
struct HeapBlock {
magic: u32,
size: u32,
previous: *mut HeapBlock,
next: *mut HeapBlock,
}
unsafe impl GlobalAlloc for Heap {
#[track_caller]
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// Align the layout by 16 bytes
let count = ((layout.size() + 15) & !15) as u32;
// NOTE: that shouldn't be optimized away
let _lock = self.mutex.as_ref().unwrap().lock();
// Check if the memory is corrupted
let mut block_it = self.first();
while let Some(ref mut block) = block_it {
if (block.magic & HEAP_MAGIC) != HEAP_MAGIC {
panic!("Heap is out: block {:?}", block);
}
block_it = block.next_mut();
}
let mut block_it = self.first();
while let Some(ref mut block) = block_it {
if !block.is_available() {
block_it = block.next_mut();
continue;
}
if count == block.size {
block.take();
return block.data();
} else if count + size_of::<HeapBlock>() as u32 <= block.size {
block.split(count);
block.take();
return block.data();
}
block_it = block.next_mut();
}
null_mut()
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// NOTE: that shouldn't be optimized away
let _lock = self.mutex.as_ref().unwrap().lock();
let address = VirtualAddress::from_ptr(ptr);
// Check heap boundaries
if address < self.start
|| address > self.start + HEAP_SIZE
|| address + layout.size() > self.start + HEAP_SIZE
{
panic!("Deallocating out of heap");
};
// Get the block
let block: &mut HeapBlock = (address - size_of::<HeapBlock>()).as_mut().unwrap();
// Check the magic and shit
if (block.magic & !1) != HEAP_MAGIC {
panic!(
"Heap block is corrupted: magic {:#018x} {:?}",
block.magic, block
);
}
// Free that shit
block.free();
// Check if the adjacent blocks are available
// and merge them in
if let Some(next) = block.next.as_mut() {
if next.is_available() {
block.size += (*block.next).size;
block.next = (*block.next).next;
}
}
if let Some(previous) = block.previous.as_mut() {
if previous.is_available() {
previous.size += block.size;
previous.next = block.next;
}
}
// TODO: cfg debug turn the memory into junk
}
}
impl Heap {
pub unsafe fn place(addr: VirtualAddress<KernelSpace>) -> Self {
let block: &mut HeapBlock = addr.as_mut().unwrap();
block.magic = HEAP_MAGIC;
block.size = (HEAP_SIZE - size_of::<Heap>()) as u32;
block.previous = null_mut();
block.next = null_mut();
Heap {
start: addr,
mutex: Some(Mutex::new(())),
}
}
fn first(&self) -> Option<&'static mut HeapBlock> {
unsafe { self.start.as_mut::<HeapBlock>() }
}
#[allow(dead_code)]
fn dump(&self) {
// NOTE: that shouldn't be optimized away
let _lock = self.mutex.as_ref().unwrap().lock();
let mut block_it = self.first();
while let Some(ref mut block) = block_it {
debugln!("{:p}: {:?}", *block, block);
block_it = block.next_mut();
}
}
}
impl HeapBlock {
fn next_mut(&mut self) -> Option<&'static mut HeapBlock> {
unsafe { self.next.as_mut() }
}
fn is_available(&self) -> bool {
self.magic & 1 == 0
}
fn free(&mut self) {
self.magic = HEAP_MAGIC;
}
fn take(&mut self) {
self.magic |= 1;
}
fn split(&mut self, size: u32) -> &'static mut HeapBlock {
assert!(size + size_of::<HeapBlock>() as u32 <= self.size);
unsafe {
let mut new_block = &mut *(self.data().add(size as usize) as *mut HeapBlock);
new_block.previous = self;
new_block.next = self.next;
if let Some(ref mut p) = self.next_mut() {
p.previous = new_block;
};
new_block.magic = HEAP_MAGIC;
new_block.size = self.size - size - size_of::<HeapBlock>() as u32;
self.next = new_block;
self.size = size;
new_block
}
}
fn data(&mut self) -> *mut u8 {
unsafe { (self as *mut _ as *mut u8).add(size_of::<HeapBlock>()) }
}
}
#[global_allocator]
static mut KERNEL_HEAP: Heap = Heap {
start: VirtualAddress::null(),
mutex: None,
};
#[alloc_error_handler]
fn alloc_error_handler(layout: Layout) -> ! {
panic!("Failed to allocate {:?}", layout);
}
/// Fuck
pub fn init() {
let base_addr_phys =
phys::alloc_contiguous_pages(PageUsage::Kernel, HEAP_SIZE / mem::PAGE_SIZE).unwrap();
let base_addr = VirtualAddress::try_from(base_addr_phys).unwrap();
unsafe {
KERNEL_HEAP = Heap::place(base_addr);
};
debugln!("Heap: {:?}", base_addr);
}

View File

@ -1,31 +0,0 @@
use address::{AddressSpace, PhysicalAddress, TrivialConvert};
pub mod phys;
pub mod heap;
#[derive(Copy, Clone, PartialEq, PartialOrd)]
pub struct KernelSpace;
impl AddressSpace for KernelSpace {
const NAME: &'static str = "kernel";
const OFFSET: usize = 0xFFFFFF8000000000;
const LIMIT: usize = 0xFFFFFF8000000000 + (2 << 30);
}
impl TrivialConvert for KernelSpace {}
#[derive(Copy, Clone, PartialEq, PartialOrd)]
pub struct EcamSpace;
impl AddressSpace for EcamSpace {
const NAME: &'static str = "ecam";
const OFFSET: usize = 0xFFFFFF8080000000;
const LIMIT: usize = 0xFFFFFF8080000000 + (1 << 30);
}
impl TrivialConvert for EcamSpace {}
pub const PAGE_SIZE: usize = 0x1000;
pub fn kernel_end_phys() -> PhysicalAddress {
extern "C" {
static __kernel_end_phys: u8;
}
PhysicalAddress::from(unsafe { &__kernel_end_phys } as *const _ as usize)
}

View File

@ -1,103 +0,0 @@
use super::{PageInfo, PageUsage};
use crate::{mem::PAGE_SIZE, KernelSpace};
use address::{PhysicalAddress, VirtualAddress};
use core::mem;
use spin::Mutex;
use error::Errno;
pub unsafe trait Manager {
fn alloc_page(&mut self, pu: PageUsage) -> Result<PhysicalAddress, Errno>;
fn alloc_contiguous_pages(
&mut self,
pu: PageUsage,
count: usize,
) -> Result<PhysicalAddress, Errno>;
fn free_page(&mut self, page: PhysicalAddress) -> Result<(), Errno>;
// TODO status()
}
pub struct SimpleManager {
pages: &'static mut [PageInfo],
base_index: usize,
}
impl SimpleManager {
pub(super) unsafe fn initialize(
base: PhysicalAddress,
at: PhysicalAddress,
count: usize,
) -> Self {
let pages: &'static mut [PageInfo] =
VirtualAddress::<KernelSpace>::from(at).as_slice_mut(count);
// Initialize uninit pages
for index in 0..count {
mem::forget(mem::replace(
&mut pages[index],
PageInfo {
refcount: 0,
usage: PageUsage::Reserved,
},
));
}
Self {
base_index: base.page_index(),
pages,
}
}
pub(super) unsafe fn add_page(&mut self, addr: PhysicalAddress) {
let page = &mut self.pages[addr.page_index() - self.base_index];
assert!(page.refcount == 0 && page.usage == PageUsage::Reserved);
page.usage = PageUsage::Available;
// Fill the page with trash
let slice: &mut [u8; 4096] = VirtualAddress::<KernelSpace>::from(addr).as_mut().unwrap();
slice.fill(0);
}
}
unsafe impl Manager for SimpleManager {
fn alloc_page(&mut self, pu: PageUsage) -> Result<PhysicalAddress, Errno> {
for index in 0..self.pages.len() {
let page = &mut self.pages[index];
if page.usage == PageUsage::Available {
page.usage = pu;
page.refcount = 1;
return Ok(PhysicalAddress::from((self.base_index + index) * PAGE_SIZE));
}
}
Err(Errno::OutOfMemory)
}
fn alloc_contiguous_pages(
&mut self,
pu: PageUsage,
count: usize,
) -> Result<PhysicalAddress, Errno> {
'l0: for i in 0..self.pages.len() {
for j in 0..count {
if self.pages[i + j].usage != PageUsage::Available {
continue 'l0;
}
}
for j in 0..count {
let page = &mut self.pages[i + j];
assert!(page.usage == PageUsage::Available);
page.usage = pu;
page.refcount = 1;
}
return Ok(PhysicalAddress::from((self.base_index + i) * PAGE_SIZE));
}
Err(Errno::OutOfMemory)
}
fn free_page(&mut self, _page: PhysicalAddress) -> Result<(), Errno> {
todo!()
}
}
pub(super) static MANAGER: Mutex<Option<SimpleManager>> = Mutex::new(None);

View File

@ -1,158 +0,0 @@
use super::PAGE_SIZE;
use core::panic::Location;
use address::PhysicalAddress;
use core::convert::TryFrom;
use core::mem::size_of;
use error::Errno;
use spin::Mutex;
mod manager;
use manager::{Manager, SimpleManager, MANAGER};
mod reserved;
pub use reserved::ReservedRegion;
mod pbox;
pub use pbox::PhysBox;
type ManagerImpl = SimpleManager;
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum PageUsage {
Reserved,
Available,
Kernel,
}
pub struct PageInfo {
refcount: usize,
usage: PageUsage,
}
#[derive(Clone)]
pub struct UsableMemory {
pub start: PhysicalAddress,
pub end: PhysicalAddress,
}
#[repr(transparent)]
#[derive(Clone)]
pub struct SimpleMemoryIterator {
inner: Option<UsableMemory>,
}
impl SimpleMemoryIterator {
pub const fn new(reg: UsableMemory) -> Self {
Self { inner: Some(reg) }
}
}
impl Iterator for SimpleMemoryIterator {
type Item = UsableMemory;
fn next(&mut self) -> Option<Self::Item> {
self.inner.take()
}
}
const MAX_PAGES: usize = 16 * 1024;
pub fn alloc_page(pu: PageUsage) -> Result<PhysicalAddress, Errno> {
MANAGER.lock().as_mut().unwrap().alloc_page(pu)
}
pub fn alloc_contiguous_pages(pu: PageUsage, count: usize) -> Result<PhysicalAddress, Errno> {
MANAGER
.lock()
.as_mut()
.unwrap()
.alloc_contiguous_pages(pu, count)
}
pub fn free_page(page: PhysicalAddress) -> Result<(), Errno> {
MANAGER.lock().as_mut().unwrap().free_page(page)
}
fn find_contiguous<T: Iterator<Item = UsableMemory>>(
iter: T,
count: usize,
) -> Option<PhysicalAddress> {
for region in iter {
let mut collected = 0;
let mut base_addr = None;
for addr in (region.start..region.end).step_by(PAGE_SIZE) {
if reserved::is_reserved(addr) {
collected = 0;
base_addr = None;
continue;
}
if base_addr.is_none() {
base_addr = Some(addr);
}
collected += 1;
if collected == count {
return base_addr;
}
}
}
None
}
pub unsafe fn init_from_iter<T: Iterator<Item = UsableMemory> + Clone>(iter: T) {
let mut mem_base = PhysicalAddress::from(usize::MAX);
for reg in iter.clone() {
if reg.start < mem_base {
mem_base = reg.start;
}
}
debugln!("Memory base is {:?}", mem_base);
// Step 1. Count available memory
let mut total_pages = 0usize;
for reg in iter.clone() {
total_pages +=
usize::try_from(PhysicalAddress::diff(reg.start, reg.end)).unwrap() / PAGE_SIZE;
}
// TODO maybe instead of size_of::<...> use Layout?
let need_pages = ((total_pages * size_of::<Mutex<PageInfo>>()) + 0xFFF) / 0x1000;
reserved::reserve_kernel();
// Step 2. Allocate memory for page array
let pages_base =
find_contiguous(iter.clone(), need_pages).expect("Failed to allocate memory for page info");
reserved::reserve_pages(pages_base, need_pages);
// Step 3. Initialize the memory manager with available pages
let mut manager = ManagerImpl::initialize(mem_base, pages_base, total_pages);
let mut usable_pages = 0usize;
'l0: for region in iter {
for addr in (region.start..region.end).step_by(PAGE_SIZE) {
if !reserved::is_reserved(addr) {
manager.add_page(addr);
usable_pages += 1;
if usable_pages == MAX_PAGES {
break 'l0;
}
}
}
}
debug!("{}K of usable physical memory\n", usable_pages * 4);
*MANAGER.lock() = Some(manager);
}
pub fn init_from_platform() {
cfg_if! {
if #[cfg(feature = "mach_rpi3b")] {
use crate::arch::mach_bcm283x;
mach_bcm283x::init_phys_memory();
}
}
}

View File

@ -1,25 +0,0 @@
use super::PageUsage;
use address::PhysicalAddress;
use error::Errno;
pub struct PhysBox {
base: PhysicalAddress,
count: usize,
}
impl PhysBox {
pub fn new(pu: PageUsage) -> Result<Self, Errno> {
Ok(Self {
base: super::alloc_page(pu)?,
count: 1,
})
}
}
impl Drop for PhysBox {
fn drop(&mut self) {
for p in 0..self.count {
super::free_page(self.base + p * 0x1000).unwrap();
}
}
}

View File

@ -1,76 +0,0 @@
use crate::mem::{kernel_end_phys, PAGE_SIZE};
use address::PhysicalAddress;
use core::mem::MaybeUninit;
use core::ptr::null_mut;
pub struct ReservedRegion {
pub start: PhysicalAddress,
pub end: PhysicalAddress,
next: *mut ReservedRegion,
}
pub struct ReservedRegionIterator {
ptr: *mut ReservedRegion,
}
impl Iterator for ReservedRegionIterator {
type Item = &'static mut ReservedRegion;
fn next(&mut self) -> Option<Self::Item> {
if let Some(item) = unsafe { self.ptr.as_mut() } {
self.ptr = item.next;
Some(item)
} else {
None
}
}
}
impl ReservedRegion {
pub const fn new(start: PhysicalAddress, end: PhysicalAddress) -> ReservedRegion {
assert!(start.is_paligned() && end.is_paligned());
ReservedRegion {
start,
end,
next: null_mut(),
}
}
}
static mut RESERVED_REGIONS_HEAD: *mut ReservedRegion = null_mut();
static mut RESERVED_REGION_KERNEL: MaybeUninit<ReservedRegion> = MaybeUninit::uninit();
static mut RESERVED_REGION_PAGES: MaybeUninit<ReservedRegion> = MaybeUninit::uninit();
pub unsafe fn reserve(region: *mut ReservedRegion) {
(*region).next = RESERVED_REGIONS_HEAD;
RESERVED_REGIONS_HEAD = region;
}
pub(super) unsafe fn reserve_kernel() {
RESERVED_REGION_KERNEL.write(ReservedRegion::new(
PhysicalAddress::from(0usize),
kernel_end_phys(),
));
reserve(RESERVED_REGION_KERNEL.as_mut_ptr());
}
pub(super) unsafe fn reserve_pages(base: PhysicalAddress, count: usize) {
RESERVED_REGION_PAGES.write(ReservedRegion::new(base, base + count * PAGE_SIZE));
reserve(RESERVED_REGION_PAGES.as_mut_ptr());
}
pub fn is_reserved(page: PhysicalAddress) -> bool {
unsafe {
let mut iter = RESERVED_REGIONS_HEAD;
while !iter.is_null() {
let region = &*iter;
if page >= region.start && page < region.end {
return true;
}
iter = region.next;
}
}
false
}

View File

@ -1,54 +0,0 @@
.section .text
.global context_enter_kernel
.global context_switch_to
.global context_switch
.macro __callee_save_ctx
sub sp, sp, #96
stp x19, x20, [sp, #0]
stp x21, x22, [sp, #16]
stp x23, x24, [sp, #32]
stp x25, x26, [sp, #48]
stp x27, x29, [sp, #64]
stp xzr, lr, [sp, #80]
.endm
.macro __callee_restore_ctx
ldp x19, x20, [sp, #0]
ldp x21, x22, [sp, #16]
ldp x23, x24, [sp, #32]
ldp x25, x26, [sp, #48]
ldp x27, x29, [sp, #64]
ldp xzr, lr, [sp, #80]
add sp, sp, #96
.endm
context_enter_kernel:
mov x0, #5
msr spsr_el1, x0
ldp x0, x1, [sp]
msr elr_el1, x1
eret
context_switch:
msr daifset, #0xF
// Store old callee-saved regs
__callee_save_ctx
// Store old stack
mov x19, sp
str x19, [x1]
context_switch_to:
msr daifset, #0xF
// Load new stack
ldr x0, [x0]
mov sp, x0
// Load new callee-saved regs from stack
__callee_restore_ctx
ret

View File

@ -1,68 +0,0 @@
use crate::{KernelSpace, mem::phys::{self, PageUsage}};
use address::{VirtualAddress};
global_asm!(include_str!("context.S"));
#[repr(C)]
pub struct Context {
sp: VirtualAddress<KernelSpace>, // 0x00
}
struct WriteStack {
bp: VirtualAddress<KernelSpace>,
sp: VirtualAddress<KernelSpace>
}
extern "C" {
fn context_enter_kernel();
pub(super) fn context_switch_to(dst: *mut Context);
pub(super) fn context_switch(dst: *mut Context, src: *mut Context);
}
impl Context {
pub fn new_kernel(entry: usize, arg: usize) -> Self {
let pages = phys::alloc_contiguous_pages(PageUsage::Kernel, 4).unwrap();
let stack_bottom = VirtualAddress::<KernelSpace>::from(pages);
let mut stack = WriteStack::new(stack_bottom, 4 * 4096);
stack.push(entry);
stack.push(arg);
stack.push(context_enter_kernel as usize); // x30 LR
stack.push(0); // xzr
stack.push(0); // x29
stack.push(0); // x27
stack.push(0); // x26
stack.push(0); // x25
stack.push(0); // x24
stack.push(0); // x23
stack.push(0); // x22
stack.push(0); // x21
stack.push(0); // x20
stack.push(0); // x19
Context {
sp: stack.sp,
}
}
}
impl WriteStack {
pub fn new(bottom: VirtualAddress<KernelSpace>, size: usize) -> Self {
Self {
bp: bottom,
sp: bottom + size,
}
}
pub fn push(&mut self, value: usize) {
if self.sp == self.bp {
panic!("Stack overflow");
}
self.sp -= 8;
unsafe {
core::ptr::write(self.sp.as_mut_ptr(), value);
}
}
}

View File

@ -1,72 +0,0 @@
use crate::{
arch::{cpu::Cpu, intrin},
};
use core::mem::MaybeUninit;
use core::ptr::null_mut;
use core::sync::atomic::{AtomicU32, Ordering};
use spin::Mutex;
pub mod context;
pub mod sched;
pub use context::Context;
pub use sched::Scheduler;
#[repr(C)]
pub struct Process {
context: Context,
id: u32,
cpu_id: Option<u32>,
queue_prev: *mut Process,
queue_next: *mut Process,
}
impl Process {
pub fn new_kernel(entry: usize, arg: usize) -> Self {
Self {
context: Context::new_kernel(entry, arg),
id: new_pid(),
cpu_id: None,
queue_prev: null_mut(),
queue_next: null_mut(),
}
}
}
pub fn new_pid() -> u32 {
LAST_PID.fetch_add(1, Ordering::SeqCst)
}
extern "C" fn f0(arg: usize) {
loop {
debug!("{}", arg);
}
}
static LAST_PID: AtomicU32 = AtomicU32::new(1);
static mut S: [MaybeUninit<Process>; 2] = MaybeUninit::uninit_array();
pub fn enter() -> ! {
unsafe {
//let mut cpu = Cpu::get();
//cpu.scheduler.init();
let mut sched = Scheduler::get();
S[0].write(Process::new_kernel(f0 as usize, 0));
S[1].write(Process::new_kernel(f0 as usize, 1));
sched.queue(S[0].as_mut_ptr());
//cpu.scheduler.queue(S[0].as_mut_ptr());
intrin::delay(4000);
sched.queue(S[1].as_mut_ptr());
//cpu.scheduler.queue(S[1].as_mut_ptr());
sched.enter();
//cpu.scheduler.enter();
}
loop {}
}

View File

@ -1,174 +0,0 @@
use crate::{
arch::{cpu, intrin},
proc::{
context::{context_switch, context_switch_to},
Process,
},
sync::Spin as Mutex
};
use core::mem::MaybeUninit;
use core::ptr::null_mut;
use core::sync::atomic::{AtomicBool, Ordering};
pub struct Queue {
head: *mut Process,
current: *mut Process,
size: usize,
}
pub struct Scheduler {
queue: Mutex<Queue>,
ready: AtomicBool,
idle: MaybeUninit<Process>,
cpu_index: usize,
}
impl Queue {
pub const fn new() -> Self {
Self {
head: null_mut(),
current: null_mut(),
size: 0,
}
}
}
impl Scheduler {
pub const fn new() -> Self {
Self {
queue: Mutex::new(Queue::new()),
ready: AtomicBool::new(false),
idle: MaybeUninit::uninit(),
cpu_index: 0,
}
}
pub unsafe fn init(&mut self) {
self.idle.write(Process::new_kernel(idle_fn as usize, 0));
}
pub fn is_ready(&self) -> bool {
self.ready.load(Ordering::Acquire)
}
pub fn queue(&mut self, proc: *mut Process) {
let irq_state = unsafe { intrin::save_irq() };
let mut queue = self.queue.lock();
unsafe {
if !queue.head.is_null() {
let queue_tail = (*queue.head).queue_prev;
(*queue_tail).queue_next = proc;
(*proc).queue_prev = queue_tail;
(*queue.head).queue_prev = proc;
(*proc).queue_next = queue.head;
} else {
(*proc).queue_prev = proc;
(*proc).queue_next = proc;
queue.head = proc;
}
}
queue.size += 1;
unsafe {
intrin::restore_irq(irq_state);
}
}
pub unsafe fn switch_to(&mut self, dst: *mut Process) {
intrin::disable_irq();
let mut queue = self.queue.lock();
let src = queue.current;
if src == dst {
return;
}
assert!((*dst).cpu_id.is_none());
queue.current = dst;
(*dst).cpu_id = Some(self.cpu_index as u32);
if src.is_null() {
drop(queue);
context_switch_to(&mut (*dst).context);
} else {
(*src).cpu_id = None;
drop(queue);
context_switch(&mut (*dst).context, &mut (*src).context);
}
}
pub unsafe fn enter(&mut self) -> ! {
intrin::disable_irq();
let queue = self.queue.lock();
self.ready.store(true, Ordering::Release);
let initial = if let Some(first) = queue.head.as_mut() {
first
} else {
self.idle.as_mut_ptr()
};
drop(queue);
debugln!("cpu{}: N -> {:p}", self.cpu_index, initial);
self.switch_to(initial);
panic!("This code should not run");
}
pub unsafe fn switch(&mut self) {
intrin::disable_irq();
let mut queue = self.queue.lock();
let src = queue.current;
assert!(!src.is_null());
let src = &mut *src;
let dst = if !src.queue_next.is_null() {
src.queue_next
} else if !queue.head.is_null() {
queue.head
} else {
self.idle.as_mut_ptr()
};
assert!(!dst.is_null());
//debugln!("cpu{}: {:p} -> {:p}", self.cpu_index, src, dst);
drop(queue);
self.switch_to(dst);
}
pub unsafe fn get() -> &'static mut Self {
&mut SCHEDULER
}
}
unsafe impl Send for Queue {}
extern "C" fn idle_fn(_arg: usize) {
loop {}
}
static mut SCHEDULER: Scheduler = Scheduler::new();
pub fn queue(proc: *mut Process) {
unsafe {
Scheduler::get().queue(proc);
}
}
pub fn sched_yield() {
unsafe {
let mut sched = Scheduler::get();
if sched.is_ready() {
sched.switch();
}
}
}
pub fn unqueue(proc: *mut Process) {
todo!()
}

View File

@ -1,78 +0,0 @@
use crate::arch::{intrin, cpu::{self, Cpu}};
use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::{AtomicUsize, Ordering};
pub struct Spin<T: ?Sized + Send> {
inner: AtomicUsize,
value: UnsafeCell<T>,
}
pub struct SpinGuard<'a, T: ?Sized + Send> {
inner: &'a AtomicUsize,
value: &'a mut T,
irq_state: usize,
}
impl<T: Send> Spin<T> {
#[inline(always)]
pub const fn new(value: T) -> Self {
Self {
inner: AtomicUsize::new(usize::MAX),
value: UnsafeCell::new(value),
}
}
}
impl<T: ?Sized + Send> Spin<T> {
#[inline]
pub fn lock(&self) -> SpinGuard<T> {
let irq_state = unsafe { intrin::save_irq() };
let cpu_id = cpu::get_phys_id();
while self
.inner
.compare_exchange_weak(usize::MAX, cpu_id as usize, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
while self.inner.load(Ordering::Relaxed) != usize::MAX {
intrin::nop();
}
}
SpinGuard {
inner: &self.inner,
value: unsafe { &mut *self.value.get() },
irq_state,
}
}
}
unsafe impl<T: ?Sized + Send> Sync for Spin<T> {}
unsafe impl<T: ?Sized + Send> Send for Spin<T> {}
impl<'a, T: ?Sized + Send> Drop for SpinGuard<'a, T> {
#[inline]
fn drop(&mut self) {
self.inner.store(usize::MAX, Ordering::Release);
unsafe {
intrin::restore_irq(self.irq_state);
}
}
}
impl<'a, T: ?Sized + Send> Deref for SpinGuard<'a, T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &Self::Target {
self.value
}
}
impl<'a, T: ?Sized + Send> DerefMut for SpinGuard<'a, T> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
self.value
}
}

View File

@ -1,9 +0,0 @@
#[derive(Debug)]
pub struct Time {
pub year: u32,
pub mday: u8,
pub mon: u8,
pub hour: u8,
pub min: u8,
pub sec: u8
}

51
qemu.sh
View File

@ -1,51 +0,0 @@
#!/bin/sh
set -e
if [ -z "${MACH}" ]; then
MACH=rpi3b
fi
if [ -z "${PROFILE}" ]; then
PROFILE=debug
fi
if [ -z "$QEMU_BIN" ]; then
QEMU_BIN=qemu-system-aarch64
fi
ARCH=aarch64-unknown-none-${MACH}
KERNEL=target/${ARCH}/${PROFILE}/kernel
QEMU_OPTS="-chardev stdio,wait=off,id=char0,mux=on \
-mon chardev=char0"
if [ "$QEMU_DINT" = 1 ]; then
QEMU_OPTS="$QEMU_OPTS -d int"
fi
case ${MACH} in
rpi3b)
QEMU_OPTS="$QEMU_OPTS \
-serial null \
-serial chardev:char0 \
-dtb bcm2837-rpi-3-b.dtb \
-M raspi3b"
;;
virt)
KERNEL=target/${ARCH}/debug/kernel.bin
QEMU_OPTS="$QEMU_OPTS \
-serial chardev:char0 \
-M virt,virtualization=on \
-cpu cortex-a57 \
-m 256"
esac
QEMU_OPTS="$QEMU_OPTS \
-kernel ${KERNEL} \
-display none \
-device rtl8139 \
-s"
./build.sh
${QEMU_BIN} ${QEMU_OPTS}

228
src/boot/entry.S Normal file
View File

@ -0,0 +1,228 @@
.set SPSR_EL3_EL2t, 0x8
.set SPSR_EL3_EL2h, 0x9
.set SPSR_EL3_EL1t, 0x4
.set SPSR_EL3_EL1h, 0x5
.set SCR_EL3_RW, (1 << 10)
.set SCR_EL3_SMD, (1 << 7)
.set SCR_EL3_RES1, (3 << 4)
.set SCR_EL3_NS, (1 << 0)
.set SPSR_EL2_EL1h, 0x5
.set HCR_EL2_RW, (1 << 31)
.set HCR_EL2_HCD, (1 << 29)
.set HCR_EL2_C, (1 << 2)
.set HCR_EL2_A, (1 << 1)
.set MAIR_EL1_OUTER_NC, 4
.set MAIR_EL1_INNER_NC, (4 << 4)
.set MAIR_EL1_DEVICE_nGRE, 0
.set MAIR_EL1_DEVICE, 0
.set TCR_EL1_IPS_48, (5 << 32)
.set TCR_EL1_TG1_4K, (2 << 30)
.cpu cortex-a57
.section .text.boot
.global _entry
_entry:
mrs x0, mpidr_el1
ands x0, x0, #3
beq _entry_bsp
1:
b 1b
.section .text
_entry_bsp:
// EL3 check
mrs x0, CurrentEL
lsr x0, x0, #2
cmp x0, #3
bne 1f
// Leave EL3
adr x0, 1f
msr elr_el3, x0
mov x0, #SPSR_EL3_EL2h
// TODO mask DAIF?
msr spsr_el3, x0
mov x0, #(SCR_EL3_RW | SCR_EL3_SMD | SCR_EL3_RES1 | SCR_EL3_NS)
msr scr_el3, x0
eret
1:
// EL2 check
mrs x0, CurrentEL
lsr x0, x0, #2
cmp x0, #2
bne 1f
// Leave EL2
adr x0, 1f
msr elr_el2, x0
// TODO mask DAIF?
mov x0, #SPSR_EL2_EL1h
msr spsr_el2, x0
mov x0, #(HCR_EL2_RW | HCR_EL2_HCD)
orr x0, x0, #HCR_EL2_A
msr hcr_el2, x0
eret
1:
// Setup paging tables
adr x8, kernel_l1
mov x2, #(1 << 0) // Present
orr x2, x2, #(1 << 10) // Accessed
orr x2, x2, #(3 << 8) // Inner shareable
// orr x2, x2, #(0 << 2) // MAIR[0]
str x2, [x8]
mov x2, #(1 << 0) // Present
orr x2, x2, #(1 << 10) // Accessed
orr x2, x2, #(3 << 8) // Inner shareable
orr x2, x2, #(1 << 2) // MAIR[1]
str x2, [x8, #8]
// mov x1, #512
//1:
// sub x1, x1, #1
//
// lsl x0, x1, #30
// orr x0, x0, x2
//
// str x0, [x8, x1, lsl #3]
//
// cmp x1, xzr
// bne 1b
// Setup the MMU
mov x0, #(MAIR_EL1_INNER_NC | MAIR_EL1_OUTER_NC)
msr mair_el1, x0
// TODO clarify
mov x0, #TCR_EL1_IPS_48
orr x0, x0, #TCR_EL1_TG1_4K
ldr x1, =(25 << 16)
orr x0, x0, x1
ldr x1, =25
orr x0, x0, x1
msr tcr_el1, x0
isb
adr x0, kernel_l1
msr ttbr1_el1, x0
msr ttbr0_el1, x0
dsb ish
isb
mrs x0, sctlr_el1
// TODO clarify
ldr x1, =~((1 << 25) | (1 << 24) | (1 << 19) | (1 << 12) | (1 << 4) | (1 << 2) | (1 << 0) | (1 << 1))
and x0, x0, x1
orr x0, x0, #1
msr sctlr_el1, x0
isb
adr x0, upper_half
ldr x1, =0xFFFFFF8000000000
add x0, x0, x1
br x0
upper_half:
// Shoot off the legs
msr ttbr0_el1, xzr
adr x0, el1_vectors
msr vbar_el1, x0
adr x0, bsp_stack_top
mov sp, x0
bl kernel_main
b .
.section .rodata
.p2align 7
el1_vectors:
// Current level with SP_EL0
vec_el1_sp_el0_sync:
b .
.p2align 7
vec_el1_sp_el0_irq:
b .
.p2align 7
vec_el1_sp_el0_fiq:
b .
.p2align 7
vec_el1_sp_el0_serror:
b .
// Current level with SL_ELx, x > 0
.p2align 7
vec_el1_sp_elx_sync:
sub sp, sp, #192
stp x0, x1, [sp, #0]
stp x2, x3, [sp, #16]
stp x4, x5, [sp, #32]
stp x6, x7, [sp, #48]
stp x8, x9, [sp, #64]
stp x10, x11, [sp, #80]
stp x12, x13, [sp, #96]
stp x14, x15, [sp, #112]
stp x16, x17, [sp, #128]
stp x18, x29, [sp, #144]
stp x30, xzr, [sp, #160]
mrs x0, esr_el1
mrs x1, far_el1
stp x0, x1, [sp, #176]
mov x0, sp
bl exc_handler
.p2align 7
vec_el1_sp_elx_irq:
b .
.p2align 7
vec_el1_sp_elx_fiq:
b .
.p2align 7
vec_el1_sp_elx_serror:
b .
// Lower EL, AArch64
.p2align 7
vec_el0_aa64_sync:
b .
.p2align 7
vec_el0_aa64_irq:
b .
.p2align 7
vec_el0_aa64_fiq:
b .
.p2align 7
vec_el0_aa64_serror:
b .
// Lower EL, AArch32
.p2align 7
vec_el0_aa32_sync:
b .
.p2align 7
vec_el0_aa32_irq:
b .
.p2align 7
vec_el0_aa32_fiq:
b .
.p2align 7
vec_el0_aa32_serror:
b .
.section .bss
.p2align 4
bsp_stack_bottom:
.skip 32768
bsp_stack_top:
.p2align 12
kernel_l1:
.skip 4096

View File

57
src/main.rs Normal file
View File

@ -0,0 +1,57 @@
#![feature(global_asm, llvm_asm)]
#![no_std]
#![no_main]
pub mod boot;
#[repr(C)]
struct ExceptionContext {
x0: usize,
x1: usize,
x2: usize,
x3: usize,
x4: usize,
x5: usize,
x6: usize,
x7: usize,
x8: usize,
x9: usize,
x10: usize,
x11: usize,
x12: usize,
x13: usize,
x14: usize,
x15: usize,
x16: usize,
x17: usize,
x18: usize,
fp: usize,
lp: usize,
_r0: usize,
esr: usize,
far: usize,
}
#[no_mangle]
extern "C" fn exc_handler(context: ExceptionContext) -> ! {
loop {}
}
#[no_mangle]
extern "C" fn kernel_main() -> ! {
unsafe {
let v = *(0x1234 as *mut u64);
}
loop {
unsafe {
llvm_asm!("wfe");
}
}
}
use core::panic::PanicInfo;
#[panic_handler]
fn panic_handler(_pi: &PanicInfo) -> ! {
loop {}
}