Compare commits
3 Commits
060cac1565
...
4134f7ac02
Author | SHA1 | Date | |
---|---|---|---|
4134f7ac02 | |||
f60c3ac644 | |||
fe4c2f7c77 |
44
Cargo.lock
generated
44
Cargo.lock
generated
@ -2,6 +2,50 @@
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "address"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"error",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "error"
|
||||
version = "0.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "kernel"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"address",
|
||||
"error",
|
||||
"spin",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb"
|
||||
dependencies = [
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "osdev4"
|
||||
version = "0.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "spin"
|
||||
version = "0.9.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5"
|
||||
dependencies = [
|
||||
"lock_api",
|
||||
]
|
||||
|
@ -3,6 +3,9 @@ name = "osdev4"
|
||||
version = "0.1.0"
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
[workspace]
|
||||
members = [
|
||||
"kernel",
|
||||
"error",
|
||||
"address"
|
||||
]
|
||||
|
9
address/Cargo.toml
Normal file
9
address/Cargo.toml
Normal file
@ -0,0 +1,9 @@
|
||||
[package]
|
||||
name = "address"
|
||||
version = "0.1.0"
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
error = { path = "../error" }
|
0
address/src/base/mod.rs
Normal file
0
address/src/base/mod.rs
Normal file
19
address/src/lib.rs
Normal file
19
address/src/lib.rs
Normal file
@ -0,0 +1,19 @@
|
||||
//! Type-safe wrappers for different address kinds
|
||||
#![no_std]
|
||||
#![feature(
|
||||
step_trait,
|
||||
const_fn_trait_bound
|
||||
)]
|
||||
// #![warn(missing_docs)]
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate std;
|
||||
|
||||
pub mod virt;
|
||||
pub mod phys;
|
||||
|
||||
trait Address {}
|
||||
|
||||
pub use phys::PhysicalAddress;
|
||||
pub use virt::{AddressSpace, NoTrivialConvert, TrivialConvert, VirtualAddress};
|
162
address/src/phys.rs
Normal file
162
address/src/phys.rs
Normal file
@ -0,0 +1,162 @@
|
||||
use crate::{AddressSpace, TrivialConvert, VirtualAddress};
|
||||
use core::convert::TryFrom;
|
||||
use core::fmt;
|
||||
use core::iter::Step;
|
||||
use core::ops::{Add, AddAssign, Neg, Sub, SubAssign};
|
||||
|
||||
#[repr(transparent)]
|
||||
#[derive(PartialEq, PartialOrd, Copy, Clone)]
|
||||
pub struct PhysicalAddress(usize);
|
||||
|
||||
// Arithmetic
|
||||
impl<A: Into<usize>> Add<A> for PhysicalAddress {
|
||||
type Output = Self;
|
||||
|
||||
#[inline(always)]
|
||||
fn add(self, rhs: A) -> Self {
|
||||
// Will panic on overflow
|
||||
Self::from(self.0 + rhs.into())
|
||||
}
|
||||
}
|
||||
impl<A: Into<usize>> AddAssign<A> for PhysicalAddress {
|
||||
#[inline(always)]
|
||||
fn add_assign(&mut self, rhs: A) {
|
||||
// Will panic on overflow
|
||||
*self = Self::from(self.0 + rhs.into());
|
||||
}
|
||||
}
|
||||
impl Sub<usize> for PhysicalAddress {
|
||||
type Output = Self;
|
||||
|
||||
#[inline(always)]
|
||||
fn sub(self, rhs: usize) -> Self {
|
||||
Self::from(self.0 - rhs)
|
||||
}
|
||||
}
|
||||
impl SubAssign<usize> for PhysicalAddress {
|
||||
#[inline(always)]
|
||||
fn sub_assign(&mut self, rhs: usize) {
|
||||
*self = Self::from(self.0 - rhs);
|
||||
}
|
||||
}
|
||||
|
||||
// Construction
|
||||
impl From<usize> for PhysicalAddress {
|
||||
fn from(p: usize) -> Self {
|
||||
Self(p)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
impl From<u64> for PhysicalAddress {
|
||||
fn from(p: u64) -> Self {
|
||||
Self(p as usize)
|
||||
}
|
||||
}
|
||||
|
||||
impl PhysicalAddress {
|
||||
#[inline(always)]
|
||||
pub fn diff(start: PhysicalAddress, end: PhysicalAddress) -> isize {
|
||||
if end >= start {
|
||||
isize::try_from(end.0 - start.0).expect("Address subtraction overflowed")
|
||||
} else {
|
||||
-isize::try_from(start.0 - end.0).expect("Address subtraction overflowed")
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn diff_unchecked(start: PhysicalAddress, end: PhysicalAddress) -> isize {
|
||||
end.0 as isize - start.0 as isize
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub const fn is_paligned(self) -> bool {
|
||||
return self.0 & 0xFFF == 0
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub const fn page_index(self) -> usize {
|
||||
self.0 >> 12
|
||||
}
|
||||
}
|
||||
|
||||
// Trivial conversion PhysicalAddress -> VirtualAddress
|
||||
impl<T: AddressSpace + TrivialConvert> From<PhysicalAddress> for VirtualAddress<T> {
|
||||
fn from(p: PhysicalAddress) -> Self {
|
||||
VirtualAddress::from(p.0 + T::OFFSET)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
impl From<PhysicalAddress> for u64 {
|
||||
#[inline(always)]
|
||||
fn from(p: PhysicalAddress) -> Self {
|
||||
p.0 as u64
|
||||
}
|
||||
}
|
||||
|
||||
// Formatting
|
||||
impl fmt::Debug for PhysicalAddress {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "<phys {:#018x}>", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
// Step
|
||||
impl Step for PhysicalAddress {
|
||||
#[inline]
|
||||
fn steps_between(_p0: &Self, _p1: &Self) -> Option<usize> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn forward_checked(p: Self, steps: usize) -> Option<Self> {
|
||||
p.0.checked_add(steps).map(Self::from)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn backward_checked(p: Self, steps: usize) -> Option<Self> {
|
||||
p.0.checked_sub(steps).map(Self::from)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{AddressSpace, NoTrivialConvert, TrivialConvert, VirtualAddress};
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, PartialOrd)]
|
||||
struct S0;
|
||||
impl AddressSpace for S0 {
|
||||
const NAME: &'static str = "S0";
|
||||
const OFFSET: usize = 0x8000;
|
||||
const LIMIT: usize = Self::OFFSET + 0x4000;
|
||||
}
|
||||
impl TrivialConvert for S0 {}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, PartialOrd)]
|
||||
struct S1;
|
||||
impl AddressSpace for S1 {
|
||||
const NAME: &'static str = "S1";
|
||||
const OFFSET: usize = 0;
|
||||
const LIMIT: usize = 0;
|
||||
}
|
||||
impl NoTrivialConvert for S1 {}
|
||||
|
||||
#[test]
|
||||
fn test_virt_convert_valid() {
|
||||
let p0 = PhysicalAddress::from(0x1234usize);
|
||||
assert_eq!(
|
||||
VirtualAddress::<S0>::from(p0),
|
||||
VirtualAddress::<S0>::from(0x9234usize)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_virt_convert_invalid() {
|
||||
let p0 = PhysicalAddress::from(0x4321usize);
|
||||
let _v = VirtualAddress::<S0>::from(p0);
|
||||
}
|
||||
}
|
349
address/src/virt.rs
Normal file
349
address/src/virt.rs
Normal file
@ -0,0 +1,349 @@
|
||||
use super::PhysicalAddress;
|
||||
use core::convert::TryFrom;
|
||||
use core::fmt;
|
||||
use core::iter::Step;
|
||||
use core::marker::PhantomData;
|
||||
use core::ops::{Add, AddAssign, Neg, Sub, SubAssign};
|
||||
|
||||
pub trait AddressSpace: Copy + Clone + PartialEq + PartialOrd {
|
||||
const NAME: &'static str;
|
||||
const OFFSET: usize;
|
||||
const LIMIT: usize;
|
||||
}
|
||||
|
||||
pub trait NoTrivialConvert {}
|
||||
pub trait TrivialConvert {}
|
||||
|
||||
#[repr(transparent)]
|
||||
#[derive(Copy, Clone, PartialOrd, PartialEq)]
|
||||
pub struct VirtualAddress<Kind: AddressSpace>(usize, PhantomData<Kind>);
|
||||
|
||||
// Arithmetic
|
||||
impl<T: AddressSpace> Add<usize> for VirtualAddress<T> {
|
||||
type Output = Self;
|
||||
|
||||
#[inline(always)]
|
||||
fn add(self, rhs: usize) -> Self {
|
||||
// Will panic on overflow
|
||||
Self::from(self.0 + rhs)
|
||||
}
|
||||
}
|
||||
impl<T: AddressSpace> AddAssign<usize> for VirtualAddress<T> {
|
||||
#[inline(always)]
|
||||
fn add_assign(&mut self, rhs: usize) {
|
||||
// Will panic on overflow
|
||||
*self = Self::from(self.0 + rhs);
|
||||
}
|
||||
}
|
||||
impl<T: AddressSpace> Sub<usize> for VirtualAddress<T> {
|
||||
type Output = Self;
|
||||
|
||||
#[inline(always)]
|
||||
fn sub(self, rhs: usize) -> Self {
|
||||
// Will panic on underflow
|
||||
Self::from(self.0 - rhs)
|
||||
}
|
||||
}
|
||||
impl<T: AddressSpace> SubAssign<usize> for VirtualAddress<T> {
|
||||
#[inline(always)]
|
||||
fn sub_assign(&mut self, rhs: usize) {
|
||||
// Will panic on underflow
|
||||
*self = Self::from(self.0 - rhs);
|
||||
}
|
||||
}
|
||||
|
||||
// Trivial conversion VirtualAddress -> PhysicalAddress
|
||||
impl<T: AddressSpace + TrivialConvert> From<VirtualAddress<T>> for PhysicalAddress {
|
||||
#[inline(always)]
|
||||
fn from(virt: VirtualAddress<T>) -> Self {
|
||||
assert!(virt.0 < T::LIMIT);
|
||||
PhysicalAddress::from(virt.0 - T::OFFSET)
|
||||
}
|
||||
}
|
||||
|
||||
// Formatting
|
||||
impl<T: AddressSpace> fmt::Debug for VirtualAddress<T> {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "<{} {:#018x}>", T::NAME, self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AddressSpace> VirtualAddress<T> {
|
||||
#[inline(always)]
|
||||
pub const fn null() -> Self {
|
||||
Self(0, PhantomData)
|
||||
}
|
||||
|
||||
pub fn try_subtract(self, p: usize) -> Option<Self> {
|
||||
let (res, overflow) = self.0.overflowing_sub(p);
|
||||
if overflow || res < T::OFFSET || res >= T::LIMIT {
|
||||
None
|
||||
} else {
|
||||
Some(Self(res, PhantomData))
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn diff(start: Self, end: Self) -> isize {
|
||||
if end >= start {
|
||||
isize::try_from(end.0 - start.0).expect("Address subtraction overflowed")
|
||||
} else {
|
||||
-isize::try_from(start.0 - end.0).expect("Address subtraction overflowed")
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn try_diff(start: Self, end: Self) -> Option<isize> {
|
||||
if end >= start {
|
||||
isize::try_from(end.0 - start.0).ok()
|
||||
} else {
|
||||
isize::try_from(start.0 - end.0).map(Neg::neg).ok()
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub unsafe fn as_slice_mut<U>(self, count: usize) -> &'static mut [U] {
|
||||
core::slice::from_raw_parts_mut(self.0 as *mut _, count)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn as_mut_ptr<U>(self) -> *mut U {
|
||||
self.0 as *mut U
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub unsafe fn as_mut<U>(self) -> &'static mut U {
|
||||
&mut *(self.0 as *mut U)
|
||||
}
|
||||
}
|
||||
|
||||
// Step
|
||||
impl<T: AddressSpace> Step for VirtualAddress<T> {
|
||||
#[inline]
|
||||
fn steps_between(_p0: &Self, _p1: &Self) -> Option<usize> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn forward_checked(p: Self, steps: usize) -> Option<Self> {
|
||||
p.0.checked_add(steps).map(Self::from)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn backward_checked(p: Self, steps: usize) -> Option<Self> {
|
||||
p.0.checked_sub(steps).map(Self::from)
|
||||
}
|
||||
}
|
||||
|
||||
// Conversion into VirtualAddress
|
||||
impl<T: AddressSpace> From<usize> for VirtualAddress<T> {
|
||||
#[inline(always)]
|
||||
fn from(p: usize) -> Self {
|
||||
if T::LIMIT > 0 {
|
||||
assert!(p >= T::OFFSET && p < T::LIMIT);
|
||||
}
|
||||
Self(p, PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
impl<T: AddressSpace> From<u64> for VirtualAddress<T> {
|
||||
#[inline(always)]
|
||||
fn from(p: u64) -> Self {
|
||||
Self::from(p as usize)
|
||||
}
|
||||
}
|
||||
|
||||
// Conversion from VirtualAddress
|
||||
impl<T: AddressSpace> From<VirtualAddress<T>> for usize {
|
||||
#[inline(always)]
|
||||
fn from(p: VirtualAddress<T>) -> Self {
|
||||
p.0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
impl<T: AddressSpace> From<VirtualAddress<T>> for u64 {
|
||||
#[inline(always)]
|
||||
fn from(p: VirtualAddress<T>) -> Self {
|
||||
p.0 as u64
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::PhysicalAddress;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, PartialOrd)]
|
||||
struct S0;
|
||||
impl AddressSpace for S0 {
|
||||
const NAME: &'static str = "S0";
|
||||
const OFFSET: usize = 0x8000;
|
||||
const LIMIT: usize = Self::OFFSET + 0x4000;
|
||||
}
|
||||
impl TrivialConvert for S0 {}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, PartialOrd)]
|
||||
struct S1;
|
||||
impl AddressSpace for S1 {
|
||||
const NAME: &'static str = "S1";
|
||||
const OFFSET: usize = 0;
|
||||
const LIMIT: usize = 0;
|
||||
}
|
||||
impl NoTrivialConvert for S1 {}
|
||||
|
||||
#[test]
|
||||
fn test_trivial_construct_valid() {
|
||||
for i in 0x8000usize..0xC000 {
|
||||
VirtualAddress::<S0>::from(i);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_trivial_construct_invalid_0() {
|
||||
let _v = VirtualAddress::<S0>::from(0x1234usize);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_trivial_construct_invalid_1() {
|
||||
let _v = VirtualAddress::<S0>::from(0xD123usize);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trivial_convert() {
|
||||
let v0 = VirtualAddress::<S0>::from(0x8123usize);
|
||||
assert_eq!(
|
||||
PhysicalAddress::from(v0),
|
||||
PhysicalAddress::from(0x123usize)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_valid() {
|
||||
let v0 = VirtualAddress::<S0>::from(0x8100usize);
|
||||
assert_eq!(VirtualAddress::<S0>::from(0x8223usize), v0 + 0x123usize);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_add_overflow() {
|
||||
let v0 = VirtualAddress::<S0>::from(0x8100usize);
|
||||
let _v = v0 - 0xF123usize;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_subtract_valid() {
|
||||
let v0 = VirtualAddress::<S0>::from(0x8100usize);
|
||||
assert_eq!(VirtualAddress::<S0>::from(0x8023usize), v0 - 0xDDusize);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_subtract_overflow() {
|
||||
let v0 = VirtualAddress::<S0>::from(0x8100usize);
|
||||
let _v = v0 - 0x1234usize;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_subtract() {
|
||||
let v0 = VirtualAddress::<S0>::from(0x8100usize);
|
||||
assert_eq!(v0.try_subtract(0x1234usize), None);
|
||||
assert_eq!(
|
||||
v0.try_subtract(0x12usize),
|
||||
Some(VirtualAddress::<S0>::from(0x80EEusize))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_assign_valid() {
|
||||
let mut v0 = VirtualAddress::<S0>::from(0x8100usize);
|
||||
v0 += 0x123usize;
|
||||
assert_eq!(v0, VirtualAddress::<S0>::from(0x8223usize));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sub_assign_valid() {
|
||||
let mut v0 = VirtualAddress::<S0>::from(0x8321usize);
|
||||
v0 -= 0x123usize;
|
||||
assert_eq!(v0, VirtualAddress::<S0>::from(0x81FEusize));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_sub_assign_overflow() {
|
||||
let mut v0 = VirtualAddress::<S0>::from(0x8321usize);
|
||||
v0 -= 0x1234usize;
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_add_assign_overflow() {
|
||||
let mut v0 = VirtualAddress::<S0>::from(0x8321usize);
|
||||
v0 += 0xF234usize;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format() {
|
||||
let v0 = VirtualAddress::<S0>::from(0x8123usize);
|
||||
assert_eq!(&format!("{:?}", v0), "<S0 0x0000000000008123>");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_diff() {
|
||||
let v0 = VirtualAddress::<S0>::from(0x8123usize);
|
||||
let v1 = VirtualAddress::<S0>::from(0x8321usize);
|
||||
|
||||
// Ok
|
||||
assert_eq!(VirtualAddress::diff(v0, v1), 510);
|
||||
assert_eq!(VirtualAddress::diff(v1, v0), -510);
|
||||
assert_eq!(VirtualAddress::diff(v0, v0), 0);
|
||||
assert_eq!(VirtualAddress::diff(v1, v1), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_diff_overflow() {
|
||||
let v0 = VirtualAddress::<S1>::from(0usize);
|
||||
let v1 = VirtualAddress::<S1>::from(usize::MAX);
|
||||
|
||||
let _v = VirtualAddress::diff(v0, v1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_step() {
|
||||
let mut count = 0;
|
||||
for _ in VirtualAddress::<S0>::from(0x8000usize)..VirtualAddress::<S0>::from(0x8300usize) {
|
||||
count += 1;
|
||||
}
|
||||
assert_eq!(count, 0x300);
|
||||
|
||||
let mut count = 0;
|
||||
for _ in (VirtualAddress::<S0>::from(0x8000usize)..VirtualAddress::<S0>::from(0x8300usize))
|
||||
.step_by(0x100)
|
||||
{
|
||||
count += 1;
|
||||
}
|
||||
assert_eq!(count, 3);
|
||||
|
||||
let mut count = 0;
|
||||
for _ in
|
||||
(VirtualAddress::<S0>::from(0x8000usize)..VirtualAddress::<S0>::from(0x8300usize)).rev()
|
||||
{
|
||||
count += 1;
|
||||
}
|
||||
assert_eq!(count, 0x300);
|
||||
|
||||
let mut count = 0;
|
||||
for _ in (VirtualAddress::<S0>::from(0x8000usize)..VirtualAddress::<S0>::from(0x8300usize))
|
||||
.rev()
|
||||
.step_by(0x100)
|
||||
{
|
||||
count += 1;
|
||||
}
|
||||
assert_eq!(count, 3);
|
||||
}
|
||||
}
|
8
error/Cargo.toml
Normal file
8
error/Cargo.toml
Normal file
@ -0,0 +1,8 @@
|
||||
[package]
|
||||
name = "error"
|
||||
version = "0.1.0"
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
6
error/src/lib.rs
Normal file
6
error/src/lib.rs
Normal file
@ -0,0 +1,6 @@
|
||||
#![no_std]
|
||||
|
||||
#[derive(PartialEq, Debug, Clone, Copy)]
|
||||
pub enum Errno {
|
||||
OutOfMemory
|
||||
}
|
@ -7,18 +7,23 @@ SECTIONS {
|
||||
|
||||
.text : AT(. - KERNEL_OFFSET) ALIGN(4K) {
|
||||
KEEP(*(.text.boot))
|
||||
*(.text)
|
||||
*(.text*)
|
||||
}
|
||||
|
||||
.rodata : AT(. - KERNEL_OFFSET) ALIGN(4K) {
|
||||
*(.rodata)
|
||||
*(.rodata*)
|
||||
}
|
||||
|
||||
.data : AT(. - KERNEL_OFFSET) ALIGN(4K) {
|
||||
*(.data)
|
||||
*(.data*)
|
||||
}
|
||||
|
||||
.bss : AT(. - KERNEL_OFFSET) ALIGN(4K) {
|
||||
*(.bss)
|
||||
*(COMMON)
|
||||
*(.bss*)
|
||||
|
||||
. = ALIGN(4K);
|
||||
PROVIDE(__kernel_end = .);
|
||||
PROVIDE(__kernel_end_phys = . - KERNEL_OFFSET);
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
symbol-file target/aarch64-unknown-none-rpi3b/debug/osdev4
|
||||
symbol-file target/aarch64-unknown-none-rpi3b/debug/kernel
|
||||
target remote :1234
|
||||
layout asm
|
||||
layout regs
|
||||
|
@ -2,4 +2,4 @@
|
||||
build-std = ["core", "compiler_builtins"]
|
||||
|
||||
[build]
|
||||
target = "etc/aarch64-unknown-none-rpi3b.json"
|
||||
target = "../etc/aarch64-unknown-none-rpi3b.json"
|
11
kernel/Cargo.toml
Normal file
11
kernel/Cargo.toml
Normal file
@ -0,0 +1,11 @@
|
||||
[package]
|
||||
name = "kernel"
|
||||
version = "0.1.0"
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
address = { path = "../address" }
|
||||
error = { path = "../error" }
|
||||
spin = "0.9.2"
|
@ -1,8 +1,4 @@
|
||||
#![feature(global_asm, llvm_asm)]
|
||||
#![no_std]
|
||||
#![no_main]
|
||||
|
||||
pub mod boot;
|
||||
global_asm!(include_str!("vectors.S"));
|
||||
|
||||
#[repr(C)]
|
||||
struct ExceptionContext {
|
||||
@ -34,24 +30,7 @@ struct ExceptionContext {
|
||||
|
||||
#[no_mangle]
|
||||
extern "C" fn exc_handler(context: ExceptionContext) -> ! {
|
||||
loop {}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
extern "C" fn kernel_main() -> ! {
|
||||
unsafe {
|
||||
let v = *(0x1234 as *mut u64);
|
||||
}
|
||||
|
||||
loop {
|
||||
unsafe {
|
||||
llvm_asm!("wfe");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use core::panic::PanicInfo;
|
||||
#[panic_handler]
|
||||
fn panic_handler(_pi: &PanicInfo) -> ! {
|
||||
debug!("Unhandled exception");
|
||||
|
||||
loop {}
|
||||
}
|
1
kernel/src/arch/mod.rs
Normal file
1
kernel/src/arch/mod.rs
Normal file
@ -0,0 +1 @@
|
||||
pub mod exception;
|
75
kernel/src/arch/vectors.S
Normal file
75
kernel/src/arch/vectors.S
Normal file
@ -0,0 +1,75 @@
|
||||
.section .rodata
|
||||
.global el1_vectors
|
||||
.p2align 7
|
||||
el1_vectors:
|
||||
// Current level with SP_EL0
|
||||
vec_el1_sp_el0_sync:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el1_sp_el0_irq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el1_sp_el0_fiq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el1_sp_el0_serror:
|
||||
b .
|
||||
// Current level with SL_ELx, x > 0
|
||||
.p2align 7
|
||||
vec_el1_sp_elx_sync:
|
||||
sub sp, sp, #192
|
||||
stp x0, x1, [sp, #0]
|
||||
stp x2, x3, [sp, #16]
|
||||
stp x4, x5, [sp, #32]
|
||||
stp x6, x7, [sp, #48]
|
||||
stp x8, x9, [sp, #64]
|
||||
stp x10, x11, [sp, #80]
|
||||
stp x12, x13, [sp, #96]
|
||||
stp x14, x15, [sp, #112]
|
||||
stp x16, x17, [sp, #128]
|
||||
stp x18, x29, [sp, #144]
|
||||
stp x30, xzr, [sp, #160]
|
||||
mrs x0, esr_el1
|
||||
mrs x1, far_el1
|
||||
stp x0, x1, [sp, #176]
|
||||
|
||||
mov x0, sp
|
||||
|
||||
bl exc_handler
|
||||
|
||||
.p2align 7
|
||||
vec_el1_sp_elx_irq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el1_sp_elx_fiq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el1_sp_elx_serror:
|
||||
b .
|
||||
// Lower EL, AArch64
|
||||
.p2align 7
|
||||
vec_el0_aa64_sync:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el0_aa64_irq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el0_aa64_fiq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el0_aa64_serror:
|
||||
b .
|
||||
// Lower EL, AArch32
|
||||
.p2align 7
|
||||
vec_el0_aa32_sync:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el0_aa32_irq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el0_aa32_fiq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el0_aa32_serror:
|
||||
b .
|
||||
|
@ -17,9 +17,9 @@
|
||||
.set MAIR_EL1_INNER_NC, (4 << 4)
|
||||
.set MAIR_EL1_DEVICE_nGRE, 0
|
||||
.set MAIR_EL1_DEVICE, 0
|
||||
|
||||
.set TCR_EL1_IPS_48, (5 << 32)
|
||||
.set TCR_EL1_TG1_4K, (2 << 30)
|
||||
.set CPACR_EL1_FPEN_TRAP_NONE, (3 << 20)
|
||||
|
||||
.cpu cortex-a57
|
||||
.section .text.boot
|
||||
@ -28,11 +28,58 @@ _entry:
|
||||
mrs x0, mpidr_el1
|
||||
ands x0, x0, #3
|
||||
beq _entry_bsp
|
||||
|
||||
1:
|
||||
b 1b
|
||||
adr x8, ap_wakeup_lock
|
||||
mov x9, #1
|
||||
_ap_loop:
|
||||
// Will acquire exclusive access to [x8]
|
||||
ldaxr x0, [x8]
|
||||
// Will try to write 1 into [x8], failing if
|
||||
// any other PE has acquired exclusive access at this point
|
||||
stxr w1, x9, [x8]
|
||||
// Store failed, jump back
|
||||
cbnz w1, _ap_loop
|
||||
// [x8] data wasn't zero, jump back
|
||||
cbnz x0, _ap_loop
|
||||
_ap_wakeup:
|
||||
adr x0, ap_init_value
|
||||
ldr x0, [x0]
|
||||
|
||||
mov x1, #0xFFFFFF8000000000
|
||||
ldr x10, [x0, #8]
|
||||
add x0, x0, x1
|
||||
msr tpidr_el1, x0
|
||||
adr x11, kernel_ap_main
|
||||
add x11, x11, x1
|
||||
|
||||
b _entry_ap
|
||||
|
||||
.section .text
|
||||
_entry_bsp:
|
||||
// Setup paging tables
|
||||
// This is done once for all PEs
|
||||
adr x0, kernel_l1
|
||||
mov x1, #(1 << 0) // Present
|
||||
orr x1, x1, #(1 << 10) // Accessed
|
||||
orr x1, x1, #(3 << 8) // Inner shareable
|
||||
// orr x2, x2, #(0 << 2) // MAIR[0]
|
||||
str x1, [x0]
|
||||
|
||||
mov x1, #(1 << 0) // Present
|
||||
orr x1, x1, #(1 << 10) // Accessed
|
||||
orr x1, x1, #(3 << 8) // Inner shareable
|
||||
orr x1, x1, #(1 << 2) // MAIR[1]
|
||||
str x1, [x0, #8]
|
||||
|
||||
// Load BSP stack
|
||||
mov x0, #0xFFFFFF8000000000
|
||||
adr x10, bsp_stack_top
|
||||
adr x11, kernel_bsp_main
|
||||
add x10, x10, x0
|
||||
add x11, x11, x0
|
||||
_entry_ap:
|
||||
// NOTE the following code must not clobber: x10, x11
|
||||
// EL3 check
|
||||
mrs x0, CurrentEL
|
||||
lsr x0, x0, #2
|
||||
@ -68,32 +115,6 @@ _entry_bsp:
|
||||
|
||||
eret
|
||||
1:
|
||||
// Setup paging tables
|
||||
adr x8, kernel_l1
|
||||
mov x2, #(1 << 0) // Present
|
||||
orr x2, x2, #(1 << 10) // Accessed
|
||||
orr x2, x2, #(3 << 8) // Inner shareable
|
||||
// orr x2, x2, #(0 << 2) // MAIR[0]
|
||||
str x2, [x8]
|
||||
|
||||
mov x2, #(1 << 0) // Present
|
||||
orr x2, x2, #(1 << 10) // Accessed
|
||||
orr x2, x2, #(3 << 8) // Inner shareable
|
||||
orr x2, x2, #(1 << 2) // MAIR[1]
|
||||
str x2, [x8, #8]
|
||||
|
||||
// mov x1, #512
|
||||
//1:
|
||||
// sub x1, x1, #1
|
||||
//
|
||||
// lsl x0, x1, #30
|
||||
// orr x0, x0, x2
|
||||
//
|
||||
// str x0, [x8, x1, lsl #3]
|
||||
//
|
||||
// cmp x1, xzr
|
||||
// bne 1b
|
||||
|
||||
// Setup the MMU
|
||||
mov x0, #(MAIR_EL1_INNER_NC | MAIR_EL1_OUTER_NC)
|
||||
msr mair_el1, x0
|
||||
@ -134,89 +155,18 @@ upper_half:
|
||||
// Shoot off the legs
|
||||
msr ttbr0_el1, xzr
|
||||
|
||||
// Disable trapping for FP instructions
|
||||
mrs x0, cpacr_el1
|
||||
orr x0, x0, CPACR_EL1_FPEN_TRAP_NONE
|
||||
msr cpacr_el1, x0
|
||||
|
||||
mov sp, x10
|
||||
|
||||
adr x0, el1_vectors
|
||||
msr vbar_el1, x0
|
||||
|
||||
adr x0, bsp_stack_top
|
||||
mov sp, x0
|
||||
br x11
|
||||
|
||||
bl kernel_main
|
||||
|
||||
b .
|
||||
|
||||
.section .rodata
|
||||
.p2align 7
|
||||
el1_vectors:
|
||||
// Current level with SP_EL0
|
||||
vec_el1_sp_el0_sync:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el1_sp_el0_irq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el1_sp_el0_fiq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el1_sp_el0_serror:
|
||||
b .
|
||||
// Current level with SL_ELx, x > 0
|
||||
.p2align 7
|
||||
vec_el1_sp_elx_sync:
|
||||
sub sp, sp, #192
|
||||
stp x0, x1, [sp, #0]
|
||||
stp x2, x3, [sp, #16]
|
||||
stp x4, x5, [sp, #32]
|
||||
stp x6, x7, [sp, #48]
|
||||
stp x8, x9, [sp, #64]
|
||||
stp x10, x11, [sp, #80]
|
||||
stp x12, x13, [sp, #96]
|
||||
stp x14, x15, [sp, #112]
|
||||
stp x16, x17, [sp, #128]
|
||||
stp x18, x29, [sp, #144]
|
||||
stp x30, xzr, [sp, #160]
|
||||
mrs x0, esr_el1
|
||||
mrs x1, far_el1
|
||||
stp x0, x1, [sp, #176]
|
||||
|
||||
mov x0, sp
|
||||
|
||||
bl exc_handler
|
||||
|
||||
.p2align 7
|
||||
vec_el1_sp_elx_irq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el1_sp_elx_fiq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el1_sp_elx_serror:
|
||||
b .
|
||||
// Lower EL, AArch64
|
||||
.p2align 7
|
||||
vec_el0_aa64_sync:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el0_aa64_irq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el0_aa64_fiq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el0_aa64_serror:
|
||||
b .
|
||||
// Lower EL, AArch32
|
||||
.p2align 7
|
||||
vec_el0_aa32_sync:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el0_aa32_irq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el0_aa32_fiq:
|
||||
b .
|
||||
.p2align 7
|
||||
vec_el0_aa32_serror:
|
||||
b .
|
||||
|
||||
.section .bss
|
||||
.p2align 4
|
||||
@ -226,3 +176,16 @@ bsp_stack_top:
|
||||
.p2align 12
|
||||
kernel_l1:
|
||||
.skip 4096
|
||||
|
||||
.p2align 4
|
||||
.global ap_init_value
|
||||
ap_init_value:
|
||||
// AP stack pointer
|
||||
.skip 8
|
||||
|
||||
.section .data
|
||||
.p2align 4
|
||||
.global ap_wakeup_lock
|
||||
ap_wakeup_lock:
|
||||
// Locked by default
|
||||
.quad 1
|
105
kernel/src/cpu.rs
Normal file
105
kernel/src/cpu.rs
Normal file
@ -0,0 +1,105 @@
|
||||
use crate::{
|
||||
mem::phys::{self, PageUsage},
|
||||
proc::{self, Scheduler},
|
||||
KernelSpace,
|
||||
};
|
||||
use address::{PhysicalAddress, VirtualAddress};
|
||||
use core::mem::MaybeUninit;
|
||||
use core::sync::atomic::{Ordering, AtomicUsize};
|
||||
|
||||
#[repr(C)]
|
||||
pub struct Cpu {
|
||||
pub cpu_id: u32, // 0x00
|
||||
_pad0: u32, // 0x04
|
||||
stack_top: usize, // 0x08
|
||||
//
|
||||
pub scheduler: Scheduler
|
||||
}
|
||||
|
||||
const MAX_CPU: usize = 4;
|
||||
static mut CPUS: [MaybeUninit<Cpu>; MAX_CPU] = MaybeUninit::uninit_array();
|
||||
static CPU_COUNT: AtomicUsize = AtomicUsize::new(1);
|
||||
|
||||
#[inline(always)]
|
||||
pub fn get_cpu() -> &'static mut Cpu {
|
||||
unsafe {
|
||||
let mut out: usize;
|
||||
llvm_asm!("mrs $0, tpidr_el1":"=r"(out));
|
||||
&mut *(out as *mut _)
|
||||
}
|
||||
}
|
||||
|
||||
fn set_cpu(cpu: *mut Cpu) {
|
||||
unsafe {
|
||||
llvm_asm!("msr tpidr_el1, $0"::"r"(cpu));
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
extern "C" fn kernel_ap_main() -> ! {
|
||||
let cpu = get_cpu();
|
||||
debug!("cpu{} awakened\n", cpu.cpu_id);
|
||||
|
||||
CPU_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
wakeup_single_ap();
|
||||
|
||||
proc::enter();
|
||||
}
|
||||
|
||||
pub fn wakeup_single_ap() {
|
||||
extern "C" {
|
||||
static mut ap_wakeup_lock: u64;
|
||||
static mut ap_init_value: u64;
|
||||
}
|
||||
|
||||
let this_cpu = get_cpu();
|
||||
|
||||
// Allocate a stack for the AP
|
||||
let stack_bottom_phys = phys::alloc_contiguous_pages(PageUsage::Kernel, 4).unwrap();
|
||||
let stack_bottom = VirtualAddress::<KernelSpace>::from(stack_bottom_phys);
|
||||
|
||||
// Allocate a new CPU struct
|
||||
let index = CPU_COUNT.load(Ordering::Acquire);
|
||||
|
||||
if index == MAX_CPU {
|
||||
debug!("cpu{}: reached cpu limit\n", this_cpu.cpu_id);
|
||||
return;
|
||||
}
|
||||
|
||||
debug!("cpu{}: waking up cpu{}\n", this_cpu.cpu_id, index);
|
||||
|
||||
unsafe {
|
||||
CPUS[index].write(Cpu {
|
||||
cpu_id: index as u32,
|
||||
_pad0: 0,
|
||||
stack_top: (stack_bottom + 4 * 4096).into(),
|
||||
scheduler: Scheduler::new()
|
||||
});
|
||||
|
||||
let cpu_addr = VirtualAddress::<KernelSpace>::from(CPUS[index].as_mut_ptr() as u64);
|
||||
let cpu_addr_phys = PhysicalAddress::from(cpu_addr);
|
||||
|
||||
// Wakeup a single AP for test
|
||||
core::ptr::write_volatile(&mut ap_init_value, cpu_addr_phys.into());
|
||||
// Ensure all writes are complete before waking up an AP
|
||||
llvm_asm!("dsb sy");
|
||||
core::ptr::write_volatile(&mut ap_wakeup_lock, 0);
|
||||
}
|
||||
|
||||
// Any further CPUs will be waken up by the newly started AP
|
||||
}
|
||||
|
||||
pub fn bsp_init() {
|
||||
unsafe {
|
||||
CPUS[0].write(Cpu {
|
||||
cpu_id: 0,
|
||||
_pad0: 0,
|
||||
stack_top: 0,
|
||||
scheduler: Scheduler::new()
|
||||
});
|
||||
set_cpu(CPUS[0].as_mut_ptr());
|
||||
|
||||
debug!("BSP cpu: {:p}\n", get_cpu());
|
||||
}
|
||||
}
|
28
kernel/src/debug.rs
Normal file
28
kernel/src/debug.rs
Normal file
@ -0,0 +1,28 @@
|
||||
use core::fmt;
|
||||
|
||||
fn uart_send(ch: u8) {
|
||||
unsafe {
|
||||
core::ptr::write_volatile(0xFFFFFF803F215040 as *mut u32, ch as u32);
|
||||
}
|
||||
}
|
||||
|
||||
struct Uart;
|
||||
|
||||
impl fmt::Write for Uart {
|
||||
fn write_str(&mut self, s: &str) -> fmt::Result {
|
||||
for ch in s.bytes() {
|
||||
uart_send(ch);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! debug {
|
||||
($($args:tt)+) => ($crate::debug::debug_fmt(format_args!($($args)+)))
|
||||
}
|
||||
|
||||
pub fn debug_fmt(args: fmt::Arguments<'_>) {
|
||||
use fmt::Write;
|
||||
write!(Uart {}, "{}", args).unwrap();
|
||||
}
|
63
kernel/src/main.rs
Normal file
63
kernel/src/main.rs
Normal file
@ -0,0 +1,63 @@
|
||||
#![feature(global_asm, llvm_asm, const_panic, maybe_uninit_uninit_array)]
|
||||
#![no_std]
|
||||
#![no_main]
|
||||
|
||||
#[macro_use]
|
||||
pub mod debug;
|
||||
|
||||
pub mod arch;
|
||||
pub mod boot;
|
||||
pub mod mem;
|
||||
pub mod proc;
|
||||
// XXX XXX
|
||||
pub mod cpu;
|
||||
|
||||
pub use mem::KernelSpace;
|
||||
|
||||
use address::PhysicalAddress;
|
||||
use mem::phys::UsableMemory;
|
||||
#[derive(Clone)]
|
||||
struct SimpleMemoryIterator<'a> {
|
||||
inner: Option<&'a UsableMemory>,
|
||||
}
|
||||
impl Iterator for SimpleMemoryIterator<'_> {
|
||||
type Item = UsableMemory;
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if let Some(item) = self.inner {
|
||||
self.inner = None;
|
||||
Some(item.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
extern "C" fn kernel_bsp_main() -> ! {
|
||||
// TODO determine VC/ARM split
|
||||
cpu::bsp_init();
|
||||
|
||||
let memory = UsableMemory {
|
||||
start: PhysicalAddress::from(0usize),
|
||||
end: PhysicalAddress::from(0x30000000usize),
|
||||
};
|
||||
let iter = SimpleMemoryIterator {
|
||||
inner: Some(&memory),
|
||||
};
|
||||
unsafe {
|
||||
mem::phys::initialize(iter);
|
||||
}
|
||||
|
||||
debug!("BSP init finished\n");
|
||||
|
||||
cpu::wakeup_single_ap();
|
||||
proc::enter();
|
||||
}
|
||||
|
||||
use core::panic::PanicInfo;
|
||||
#[panic_handler]
|
||||
fn panic_handler(pi: &PanicInfo) -> ! {
|
||||
debug!("PANIC: {:?}\n", pi);
|
||||
|
||||
loop {}
|
||||
}
|
0
kernel/src/mem/address.rs
Normal file
0
kernel/src/mem/address.rs
Normal file
21
kernel/src/mem/mod.rs
Normal file
21
kernel/src/mem/mod.rs
Normal file
@ -0,0 +1,21 @@
|
||||
use address::{AddressSpace, PhysicalAddress, TrivialConvert};
|
||||
|
||||
pub mod phys;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, PartialOrd)]
|
||||
pub struct KernelSpace;
|
||||
impl AddressSpace for KernelSpace {
|
||||
const NAME: &'static str = "kernel";
|
||||
const OFFSET: usize = 0xFFFFFF8000000000;
|
||||
const LIMIT: usize = 0xFFFFFF8000000000 + (2 << 30);
|
||||
}
|
||||
impl TrivialConvert for KernelSpace {}
|
||||
|
||||
pub const PAGE_SIZE: usize = 0x1000;
|
||||
|
||||
pub fn kernel_end_phys() -> PhysicalAddress {
|
||||
extern "C" {
|
||||
static __kernel_end_phys: u8;
|
||||
}
|
||||
PhysicalAddress::from(unsafe { &__kernel_end_phys } as *const _ as usize)
|
||||
}
|
95
kernel/src/mem/phys/manager.rs
Normal file
95
kernel/src/mem/phys/manager.rs
Normal file
@ -0,0 +1,95 @@
|
||||
use super::{PageInfo, PageUsage};
|
||||
use crate::{KernelSpace, mem::PAGE_SIZE};
|
||||
use address::{PhysicalAddress, VirtualAddress};
|
||||
use core::mem::{self, MaybeUninit};
|
||||
use error::Errno;
|
||||
use spin::Mutex;
|
||||
|
||||
pub unsafe trait Manager {
|
||||
fn alloc_page(&mut self, pu: PageUsage) -> Result<PhysicalAddress, Errno>;
|
||||
fn alloc_contiguous_pages(
|
||||
&mut self,
|
||||
pu: PageUsage,
|
||||
count: usize,
|
||||
) -> Result<PhysicalAddress, Errno>;
|
||||
fn free_page(&mut self, page: PhysicalAddress) -> Result<(), Errno>;
|
||||
|
||||
// TODO status()
|
||||
}
|
||||
|
||||
pub struct SimpleManager {
|
||||
pages: &'static mut [Mutex<PageInfo>],
|
||||
}
|
||||
|
||||
impl SimpleManager {
|
||||
pub(super) unsafe fn initialize(at: PhysicalAddress, count: usize) -> Self {
|
||||
let pages: &'static mut [Mutex<PageInfo>] =
|
||||
VirtualAddress::<KernelSpace>::from(at).as_slice_mut(count);
|
||||
|
||||
// Initialize uninit pages
|
||||
for index in 0..count {
|
||||
mem::forget(mem::replace(
|
||||
&mut pages[index],
|
||||
Mutex::new(PageInfo {
|
||||
refcount: 0,
|
||||
usage: PageUsage::Reserved,
|
||||
}),
|
||||
));
|
||||
}
|
||||
|
||||
Self { pages }
|
||||
}
|
||||
|
||||
pub(super) unsafe fn add_page(&mut self, addr: PhysicalAddress) {
|
||||
let mut page = self.pages[addr.page_index()].lock();
|
||||
assert!(page.refcount == 0 && page.usage == PageUsage::Reserved);
|
||||
page.usage = PageUsage::Available;
|
||||
|
||||
// Fill the page with trash
|
||||
let slice: &mut [u8; 4096] = VirtualAddress::<KernelSpace>::from(addr).as_mut();
|
||||
slice.fill(0);
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Manager for SimpleManager {
|
||||
fn alloc_page(&mut self, pu: PageUsage) -> Result<PhysicalAddress, Errno> {
|
||||
for index in 0..self.pages.len() {
|
||||
let mut page = self.pages[index].lock();
|
||||
|
||||
if page.usage == PageUsage::Available {
|
||||
page.usage = pu;
|
||||
page.refcount = 1;
|
||||
return Ok(PhysicalAddress::from(index * PAGE_SIZE));
|
||||
}
|
||||
}
|
||||
Err(Errno::OutOfMemory)
|
||||
}
|
||||
fn alloc_contiguous_pages(
|
||||
&mut self,
|
||||
pu: PageUsage,
|
||||
count: usize,
|
||||
) -> Result<PhysicalAddress, Errno> {
|
||||
'l0: for i in 0..self.pages.len() {
|
||||
for j in 0..count {
|
||||
if self.pages[i + j].lock().usage != PageUsage::Available {
|
||||
continue 'l0;
|
||||
}
|
||||
}
|
||||
|
||||
for j in 0..count {
|
||||
let mut page = self.pages[i + j].lock();
|
||||
assert!(page.usage == PageUsage::Available);
|
||||
page.usage = pu;
|
||||
page.refcount = 1;
|
||||
}
|
||||
|
||||
return Ok(PhysicalAddress::from(i * PAGE_SIZE));
|
||||
}
|
||||
Err(Errno::OutOfMemory)
|
||||
}
|
||||
fn free_page(&mut self, _page: PhysicalAddress) -> Result<(), Errno> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) static MANAGER: Mutex<Option<SimpleManager>> = Mutex::new(None);
|
118
kernel/src/mem/phys/mod.rs
Normal file
118
kernel/src/mem/phys/mod.rs
Normal file
@ -0,0 +1,118 @@
|
||||
use super::PAGE_SIZE;
|
||||
use address::PhysicalAddress;
|
||||
use core::convert::TryFrom;
|
||||
use core::mem::size_of;
|
||||
use error::Errno;
|
||||
use spin::Mutex;
|
||||
|
||||
mod manager;
|
||||
use manager::{Manager, SimpleManager, MANAGER};
|
||||
mod reserved;
|
||||
pub use reserved::ReservedRegion;
|
||||
mod pbox;
|
||||
pub use pbox::PhysBox;
|
||||
|
||||
type ManagerImpl = SimpleManager;
|
||||
|
||||
#[derive(PartialEq, Debug, Clone, Copy)]
|
||||
pub enum PageUsage {
|
||||
Reserved,
|
||||
Available,
|
||||
Kernel,
|
||||
}
|
||||
|
||||
pub struct PageInfo {
|
||||
refcount: usize,
|
||||
usage: PageUsage,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct UsableMemory {
|
||||
pub start: PhysicalAddress,
|
||||
pub end: PhysicalAddress,
|
||||
}
|
||||
|
||||
const MAX_PAGES: usize = 1024;
|
||||
|
||||
pub fn alloc_page(pu: PageUsage) -> Result<PhysicalAddress, Errno> {
|
||||
MANAGER.lock().as_mut().unwrap().alloc_page(pu)
|
||||
}
|
||||
|
||||
pub fn alloc_contiguous_pages(pu: PageUsage, count: usize) -> Result<PhysicalAddress, Errno> {
|
||||
MANAGER
|
||||
.lock()
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.alloc_contiguous_pages(pu, count)
|
||||
}
|
||||
|
||||
pub fn free_page(page: PhysicalAddress) -> Result<(), Errno> {
|
||||
MANAGER.lock().as_mut().unwrap().free_page(page)
|
||||
}
|
||||
|
||||
fn find_contiguous<T: Iterator<Item = UsableMemory>>(
|
||||
iter: T,
|
||||
count: usize,
|
||||
) -> Option<PhysicalAddress> {
|
||||
for region in iter {
|
||||
let mut collected = 0;
|
||||
let mut base_addr = None;
|
||||
|
||||
for addr in (region.start..region.end).step_by(PAGE_SIZE) {
|
||||
if reserved::is_reserved(addr) {
|
||||
collected = 0;
|
||||
base_addr = None;
|
||||
continue;
|
||||
}
|
||||
|
||||
if base_addr.is_none() {
|
||||
base_addr = Some(addr);
|
||||
}
|
||||
collected += 1;
|
||||
if collected == count {
|
||||
return base_addr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub unsafe fn initialize<T: Iterator<Item = UsableMemory> + Clone>(iter: T) {
|
||||
// Step 1. Count available memory
|
||||
let mut total_pages = 0usize;
|
||||
for reg in iter.clone() {
|
||||
total_pages +=
|
||||
usize::try_from(PhysicalAddress::diff(reg.start, reg.end)).unwrap() / PAGE_SIZE;
|
||||
}
|
||||
// TODO maybe instead of size_of::<...> use Layout?
|
||||
let need_pages = ((total_pages * size_of::<Mutex<PageInfo>>()) + 0xFFF) / 0x1000;
|
||||
|
||||
reserved::reserve_kernel();
|
||||
|
||||
// Step 2. Allocate memory for page array
|
||||
let pages_base =
|
||||
find_contiguous(iter.clone(), need_pages).expect("Failed to allocate memory for page info");
|
||||
|
||||
reserved::reserve_pages(pages_base, need_pages);
|
||||
|
||||
// Step 3. Initialize the memory manager with available pages
|
||||
let mut manager = ManagerImpl::initialize(pages_base, total_pages);
|
||||
|
||||
let mut usable_pages = 0usize;
|
||||
'l0: for region in iter {
|
||||
for addr in (region.start..region.end).step_by(PAGE_SIZE) {
|
||||
if !reserved::is_reserved(addr) {
|
||||
manager.add_page(addr);
|
||||
usable_pages += 1;
|
||||
if usable_pages == MAX_PAGES {
|
||||
break 'l0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug!("{}K of usable physical memory\n", usable_pages * 4);
|
||||
|
||||
*MANAGER.lock() = Some(manager);
|
||||
}
|
25
kernel/src/mem/phys/pbox.rs
Normal file
25
kernel/src/mem/phys/pbox.rs
Normal file
@ -0,0 +1,25 @@
|
||||
use super::PageUsage;
|
||||
use address::PhysicalAddress;
|
||||
use error::Errno;
|
||||
|
||||
pub struct PhysBox {
|
||||
base: PhysicalAddress,
|
||||
count: usize,
|
||||
}
|
||||
|
||||
impl PhysBox {
|
||||
pub fn new(pu: PageUsage) -> Result<Self, Errno> {
|
||||
Ok(Self {
|
||||
base: super::alloc_page(pu)?,
|
||||
count: 1,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PhysBox {
|
||||
fn drop(&mut self) {
|
||||
for p in 0..self.count {
|
||||
super::free_page(self.base + p * 0x1000).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
79
kernel/src/mem/phys/reserved.rs
Normal file
79
kernel/src/mem/phys/reserved.rs
Normal file
@ -0,0 +1,79 @@
|
||||
use crate::mem::{kernel_end_phys, PAGE_SIZE};
|
||||
use address::PhysicalAddress;
|
||||
use core::mem::MaybeUninit;
|
||||
use core::ptr::null_mut;
|
||||
|
||||
pub struct ReservedRegion {
|
||||
pub start: PhysicalAddress,
|
||||
pub end: PhysicalAddress,
|
||||
next: *mut ReservedRegion,
|
||||
}
|
||||
|
||||
pub struct ReservedRegionIterator {
|
||||
ptr: *mut ReservedRegion,
|
||||
}
|
||||
|
||||
impl Iterator for ReservedRegionIterator {
|
||||
type Item = &'static mut ReservedRegion;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if let Some(item) = unsafe { self.ptr.as_mut() } {
|
||||
self.ptr = item.next;
|
||||
Some(item)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReservedRegion {
|
||||
pub const fn new(start: PhysicalAddress, end: PhysicalAddress) -> ReservedRegion {
|
||||
assert!(start.is_paligned() && end.is_paligned());
|
||||
ReservedRegion {
|
||||
start,
|
||||
end,
|
||||
next: null_mut()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static mut RESERVED_REGIONS_HEAD: *mut ReservedRegion = null_mut();
|
||||
static mut RESERVED_REGION_KERNEL: MaybeUninit<ReservedRegion> = MaybeUninit::uninit();
|
||||
static mut RESERVED_REGION_PAGES: MaybeUninit<ReservedRegion> = MaybeUninit::uninit();
|
||||
|
||||
pub unsafe fn reserve(region: *mut ReservedRegion) {
|
||||
(*region).next = RESERVED_REGIONS_HEAD;
|
||||
RESERVED_REGIONS_HEAD = region;
|
||||
}
|
||||
|
||||
pub(super) unsafe fn reserve_kernel() {
|
||||
RESERVED_REGION_KERNEL.write(ReservedRegion::new(
|
||||
PhysicalAddress::from(0usize),
|
||||
kernel_end_phys(),
|
||||
));
|
||||
reserve(RESERVED_REGION_KERNEL.as_mut_ptr());
|
||||
}
|
||||
|
||||
pub(super) unsafe fn reserve_pages(base: PhysicalAddress, count: usize) {
|
||||
RESERVED_REGION_PAGES.write(ReservedRegion::new(
|
||||
base,
|
||||
base + count * PAGE_SIZE
|
||||
));
|
||||
reserve(RESERVED_REGION_PAGES.as_mut_ptr());
|
||||
}
|
||||
|
||||
pub fn is_reserved(page: PhysicalAddress) -> bool {
|
||||
unsafe {
|
||||
let mut iter = RESERVED_REGIONS_HEAD;
|
||||
while !iter.is_null() {
|
||||
let region = &*iter;
|
||||
|
||||
if page >= region.start && page < region.end {
|
||||
return true;
|
||||
}
|
||||
|
||||
iter = region.next;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
41
kernel/src/proc/context.S
Normal file
41
kernel/src/proc/context.S
Normal file
@ -0,0 +1,41 @@
|
||||
.section .text
|
||||
.global context_switch_to
|
||||
.global context_switch
|
||||
|
||||
context_enter_kernel:
|
||||
ldp x0, xzr, [sp, #0]
|
||||
ldp lr, x1, [sp, #16]
|
||||
mov sp, x1
|
||||
ret
|
||||
|
||||
context_switch:
|
||||
// Store old callee-saved regs on stack
|
||||
sub sp, sp, #96
|
||||
|
||||
stp x19, x20, [sp, #0]
|
||||
stp x21, x22, [sp, #16]
|
||||
stp x23, x24, [sp, #32]
|
||||
stp x25, x26, [sp, #48]
|
||||
stp x27, x29, [sp, #64]
|
||||
stp xzr, lr, [sp, #80]
|
||||
|
||||
// Store old stack pointer
|
||||
mov x19, sp
|
||||
str x19, [x1]
|
||||
context_switch_to:
|
||||
// Load new stack
|
||||
ldr x0, [x0]
|
||||
mov sp, x0
|
||||
|
||||
// Load new callee-saved regs on stack
|
||||
ldp x19, x20, [sp, #0]
|
||||
ldp x21, x22, [sp, #16]
|
||||
ldp x23, x24, [sp, #32]
|
||||
ldp x25, x26, [sp, #48]
|
||||
ldp x27, x29, [sp, #64]
|
||||
ldp xzr, lr, [sp, #80]
|
||||
|
||||
add sp, sp, #96
|
||||
|
||||
// Simulate/perform a return
|
||||
ret
|
77
kernel/src/proc/context.rs
Normal file
77
kernel/src/proc/context.rs
Normal file
@ -0,0 +1,77 @@
|
||||
use crate::{
|
||||
mem::phys::{self, PageUsage},
|
||||
KernelSpace,
|
||||
};
|
||||
use address::VirtualAddress;
|
||||
use core::mem::size_of;
|
||||
|
||||
global_asm!(include_str!("context.S"));
|
||||
|
||||
#[repr(C)]
|
||||
pub(super) struct Context {
|
||||
kernel_sp: VirtualAddress<KernelSpace>, // 0x00
|
||||
cpu_id: u32, // 0x08
|
||||
}
|
||||
|
||||
struct StackBuilder {
|
||||
bp: VirtualAddress<KernelSpace>,
|
||||
sp: VirtualAddress<KernelSpace>,
|
||||
}
|
||||
|
||||
impl Context {
|
||||
pub fn new_kernel(entry: usize, arg: usize) -> Context {
|
||||
let kstack_phys = phys::alloc_contiguous_pages(PageUsage::Kernel, 4).unwrap();
|
||||
let mut stack = unsafe { StackBuilder::new(kstack_phys.into(), 4096 * 4) };
|
||||
let stack_top = stack.sp;
|
||||
|
||||
stack.push(stack_top); // popped into stack register before ERET
|
||||
stack.push(entry); // ELR before ERET
|
||||
stack.push(0usize); // padding
|
||||
stack.push(arg);
|
||||
|
||||
stack.push(context_enter_kernel as usize); // x30 LR
|
||||
stack.push(0usize); // padding
|
||||
stack.push(0usize); // x29
|
||||
stack.push(0usize); // x27
|
||||
stack.push(0usize); // x26
|
||||
stack.push(0usize); // x25
|
||||
stack.push(0usize); // x24
|
||||
stack.push(0usize); // x23
|
||||
stack.push(0usize); // x22
|
||||
stack.push(0usize); // x21
|
||||
stack.push(0usize); // x20
|
||||
stack.push(0usize); // x19
|
||||
|
||||
Context {
|
||||
kernel_sp: stack.sp,
|
||||
cpu_id: u32::MAX,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StackBuilder {
|
||||
pub unsafe fn new(bp: VirtualAddress<KernelSpace>, size: usize) -> Self {
|
||||
Self {
|
||||
bp,
|
||||
sp: bp + size,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push<A: Into<usize>>(&mut self, value: A) {
|
||||
if self.sp == self.bp {
|
||||
panic!("Stack overflow");
|
||||
}
|
||||
self.sp -= size_of::<usize>();
|
||||
unsafe {
|
||||
core::ptr::write(self.sp.as_mut_ptr(), value.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
pub(super) fn context_switch_to(dst: *mut Context);
|
||||
pub(super) fn context_switch(dst: *mut Context, src: *mut Context);
|
||||
|
||||
fn context_enter_kernel();
|
||||
}
|
||||
|
162
kernel/src/proc/mod.rs
Normal file
162
kernel/src/proc/mod.rs
Normal file
@ -0,0 +1,162 @@
|
||||
use crate::cpu::get_cpu;
|
||||
use core::mem::MaybeUninit;
|
||||
use core::ptr::null_mut;
|
||||
use spin::Mutex;
|
||||
|
||||
pub mod context;
|
||||
use context::{context_switch, context_switch_to, Context};
|
||||
|
||||
struct Process {
|
||||
context: Context,
|
||||
|
||||
sched_prev: *mut Process,
|
||||
sched_next: *mut Process,
|
||||
}
|
||||
|
||||
pub struct Scheduler {
|
||||
queue_head: Mutex<*mut Process>,
|
||||
current: Mutex<*mut Process>,
|
||||
idle: MaybeUninit<Process>,
|
||||
}
|
||||
|
||||
impl Process {
|
||||
fn new_kernel(entry: usize, arg: usize) -> Self {
|
||||
Self {
|
||||
context: Context::new_kernel(entry, arg),
|
||||
|
||||
sched_prev: null_mut(),
|
||||
sched_next: null_mut(),
|
||||
}
|
||||
}
|
||||
|
||||
//pub unsafe fn enter_initial(&mut self) -> ! {
|
||||
// context_switch_to(&mut self.context);
|
||||
// panic!("This code should not run");
|
||||
//}
|
||||
|
||||
//pub unsafe fn switch_to(&mut self, next: &mut Process) {
|
||||
// if self as *mut _ == next as *mut _ {
|
||||
// return;
|
||||
// }
|
||||
// // TODO &mut self.context argument can be eliminated
|
||||
// // if extracted from Cpu struct in assembly
|
||||
// context_switch(&mut next.context, &mut self.context);
|
||||
//}
|
||||
}
|
||||
|
||||
impl Scheduler {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
queue_head: Mutex::new(null_mut()),
|
||||
current: Mutex::new(null_mut()),
|
||||
idle: MaybeUninit::uninit(),
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn queue(&mut self, proc: *mut Process) {
|
||||
let mut lock = self.queue_head.lock();
|
||||
|
||||
if !(*lock).is_null() {
|
||||
let queue_tail = (**lock).sched_prev;
|
||||
(*queue_tail).sched_next = proc;
|
||||
(*proc).sched_prev = queue_tail;
|
||||
(**lock).sched_prev = proc;
|
||||
(*proc).sched_next = *lock;
|
||||
} else {
|
||||
(*proc).sched_prev = proc;
|
||||
(*proc).sched_next = proc;
|
||||
*lock = proc;
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn enter_process(&mut self, proc: *mut Process) -> ! {
|
||||
*self.current.lock() = proc;
|
||||
context_switch_to(&mut (*proc).context);
|
||||
panic!("This code should not run");
|
||||
}
|
||||
|
||||
unsafe fn switch_process(&mut self, from: *mut Process, to: *mut Process) {
|
||||
*self.current.lock() = to;
|
||||
context_switch(&mut (*to).context, &mut (*from).context);
|
||||
}
|
||||
|
||||
unsafe fn enter(&mut self) -> ! {
|
||||
let mut lock = self.queue_head.lock();
|
||||
|
||||
let proc = if let Some(first) = (*lock).as_mut() {
|
||||
first
|
||||
} else {
|
||||
self.idle.as_mut_ptr()
|
||||
};
|
||||
drop(lock);
|
||||
self.enter_process(proc);
|
||||
}
|
||||
|
||||
unsafe fn init_idle(&mut self) {
|
||||
self.idle.write(Process::new_kernel(idle_fn as usize, 0));
|
||||
}
|
||||
|
||||
unsafe fn sched(&mut self) {
|
||||
let queue_lock = self.queue_head.lock();
|
||||
let current_lock = self.current.lock();
|
||||
let from = *current_lock;
|
||||
assert!(!from.is_null());
|
||||
let from = &mut *from;
|
||||
|
||||
let to = if !from.sched_next.is_null() {
|
||||
from.sched_next
|
||||
} else if !(*queue_lock).is_null() {
|
||||
*queue_lock
|
||||
} else {
|
||||
self.idle.as_mut_ptr()
|
||||
};
|
||||
|
||||
assert!(!to.is_null());
|
||||
drop(queue_lock);
|
||||
drop(current_lock);
|
||||
|
||||
self.switch_process(from, to);
|
||||
}
|
||||
}
|
||||
|
||||
fn idle_fn(arg: usize) {
|
||||
loop {}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn sched_yield() {
|
||||
let cpu = get_cpu();
|
||||
unsafe {
|
||||
cpu.scheduler.sched();
|
||||
}
|
||||
}
|
||||
|
||||
fn f0(arg: usize) {
|
||||
loop {
|
||||
debug!("{}", arg);
|
||||
unsafe {
|
||||
sched_yield();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static mut C0: MaybeUninit<Process> = MaybeUninit::uninit();
|
||||
static mut C1: MaybeUninit<Process> = MaybeUninit::uninit();
|
||||
|
||||
pub fn enter() -> ! {
|
||||
unsafe {
|
||||
let cpu = get_cpu();
|
||||
|
||||
if cpu.cpu_id == 0 {
|
||||
debug!("Setting up a task for cpu0\n");
|
||||
C0.write(Process::new_kernel(f0 as usize, 0));
|
||||
cpu.scheduler.queue(C0.assume_init_mut());
|
||||
}
|
||||
|
||||
// Initialize the idle task
|
||||
cpu.scheduler.init_idle();
|
||||
|
||||
debug!("Entering scheduler on cpu{}\n", cpu.cpu_id);
|
||||
cpu.scheduler.enter();
|
||||
}
|
||||
}
|
0
src/lib.rs
Normal file
0
src/lib.rs
Normal file
Loading…
x
Reference in New Issue
Block a user