Initial commit
This commit is contained in:
commit
6510e0674c
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/target
|
21
Cargo.toml
Normal file
21
Cargo.toml
Normal file
@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "yggdrasil-kernel"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
yggdrasil-abi = { git = "https://git.alnyan.me/yggdrasil/yggdrasil-abi.git" }
|
||||
vfs = { path = "lib/vfs" }
|
||||
|
||||
aarch64-cpu = "9.3.1"
|
||||
atomic_enum = "0.2.0"
|
||||
bitflags = "2.3.3"
|
||||
fdt-rs = { version = "0.4.3", default-features = false }
|
||||
linked_list_allocator = "0.10.5"
|
||||
spinning_top = "0.2.5"
|
||||
static_assertions = "1.1.0"
|
||||
tock-registers = "0.8.1"
|
||||
|
||||
elf = { version = "0.7.2", default-features = false }
|
10
lib/vfs/Cargo.toml
Normal file
10
lib/vfs/Cargo.toml
Normal file
@ -0,0 +1,10 @@
|
||||
[package]
|
||||
name = "vfs"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
yggdrasil-abi = { git = "https://git.alnyan.me/yggdrasil/yggdrasil-abi.git" }
|
||||
bitflags = "2.3.3"
|
6
lib/vfs/src/block.rs
Normal file
6
lib/vfs/src/block.rs
Normal file
@ -0,0 +1,6 @@
|
||||
use yggdrasil_abi::error::Error;
|
||||
|
||||
pub trait BlockDevice {
|
||||
fn read(&self, pos: usize, buf: &mut [u8]) -> Result<(), Error>;
|
||||
fn write(&self, pos: usize, buf: &[u8]) -> Result<(), Error>;
|
||||
}
|
49
lib/vfs/src/char.rs
Normal file
49
lib/vfs/src/char.rs
Normal file
@ -0,0 +1,49 @@
|
||||
use yggdrasil_abi::error::Error;
|
||||
|
||||
use crate::node::{VnodeImpl, VnodeRef};
|
||||
|
||||
pub trait CharDevice {
|
||||
fn read(&'static self, blocking: bool, data: &mut [u8]) -> Result<usize, Error>;
|
||||
fn write(&self, blocking: bool, data: &[u8]) -> Result<usize, Error>;
|
||||
}
|
||||
|
||||
pub struct CharDeviceWrapper {
|
||||
device: &'static dyn CharDevice,
|
||||
}
|
||||
|
||||
impl CharDeviceWrapper {
|
||||
pub const fn new(device: &'static dyn CharDevice) -> Self {
|
||||
Self { device }
|
||||
}
|
||||
}
|
||||
|
||||
impl VnodeImpl for CharDeviceWrapper {
|
||||
fn open(
|
||||
&mut self,
|
||||
_node: &VnodeRef,
|
||||
_opts: yggdrasil_abi::io::OpenFlags,
|
||||
) -> Result<usize, Error> {
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn close(&mut self, _node: &VnodeRef) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read(&mut self, _node: &VnodeRef, _pos: usize, data: &mut [u8]) -> Result<usize, Error> {
|
||||
self.device.read(true, data)
|
||||
}
|
||||
|
||||
fn write(&mut self, _node: &VnodeRef, _pos: usize, data: &[u8]) -> Result<usize, Error> {
|
||||
self.device.write(true, data)
|
||||
}
|
||||
|
||||
fn create(
|
||||
&mut self,
|
||||
_at: &VnodeRef,
|
||||
_name: &str,
|
||||
_kind: crate::node::VnodeKind,
|
||||
) -> Result<VnodeRef, Error> {
|
||||
todo!()
|
||||
}
|
||||
}
|
88
lib/vfs/src/file.rs
Normal file
88
lib/vfs/src/file.rs
Normal file
@ -0,0 +1,88 @@
|
||||
use core::cell::RefCell;
|
||||
|
||||
use alloc::rc::Rc;
|
||||
use bitflags::bitflags;
|
||||
use yggdrasil_abi::error::Error;
|
||||
|
||||
use crate::{
|
||||
node::{VnodeKind, VnodeRef},
|
||||
Read, Write,
|
||||
};
|
||||
|
||||
bitflags! {
|
||||
pub struct FileFlags: u32 {
|
||||
const READ = 1 << 0;
|
||||
const WRITE = 1 << 1;
|
||||
}
|
||||
}
|
||||
|
||||
pub type FileRef = Rc<RefCell<File>>;
|
||||
|
||||
pub struct NormalFile {
|
||||
vnode: VnodeRef,
|
||||
pos: usize,
|
||||
}
|
||||
|
||||
pub enum FileInner {
|
||||
Normal(NormalFile),
|
||||
}
|
||||
|
||||
pub struct File {
|
||||
inner: FileInner,
|
||||
flags: FileFlags,
|
||||
}
|
||||
|
||||
impl File {
|
||||
pub fn normal(vnode: VnodeRef, pos: usize, flags: FileFlags) -> FileRef {
|
||||
Rc::new(RefCell::new(Self {
|
||||
inner: FileInner::Normal(NormalFile { vnode, pos }),
|
||||
flags,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for File {
|
||||
fn write(&mut self, data: &[u8]) -> Result<usize, Error> {
|
||||
if !self.flags.contains(FileFlags::WRITE) {
|
||||
panic!();
|
||||
}
|
||||
|
||||
match &mut self.inner {
|
||||
FileInner::Normal(inner) => {
|
||||
let count = inner.vnode.write(inner.pos, data)?;
|
||||
if inner.vnode.kind() != VnodeKind::Char {
|
||||
inner.pos += count;
|
||||
}
|
||||
Ok(count)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for File {
|
||||
fn read(&mut self, data: &mut [u8]) -> Result<usize, Error> {
|
||||
if !self.flags.contains(FileFlags::READ) {
|
||||
panic!();
|
||||
}
|
||||
|
||||
match &mut self.inner {
|
||||
FileInner::Normal(inner) => {
|
||||
let count = inner.vnode.read(inner.pos, data)?;
|
||||
if inner.vnode.kind() != VnodeKind::Char {
|
||||
inner.pos += count;
|
||||
}
|
||||
Ok(count)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for File {
|
||||
fn drop(&mut self) {
|
||||
match &mut self.inner {
|
||||
FileInner::Normal(inner) => {
|
||||
inner.vnode.close().ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
15
lib/vfs/src/fs.rs
Normal file
15
lib/vfs/src/fs.rs
Normal file
@ -0,0 +1,15 @@
|
||||
use core::{any::Any, cell::Ref};
|
||||
|
||||
use alloc::rc::Rc;
|
||||
|
||||
use yggdrasil_abi::error::Error;
|
||||
|
||||
use crate::{block::BlockDevice, node::VnodeRef};
|
||||
|
||||
pub trait Filesystem {
|
||||
fn root(self: Rc<Self>) -> Result<VnodeRef, Error>;
|
||||
|
||||
fn dev(self: Rc<Self>) -> Option<&'static dyn BlockDevice>;
|
||||
|
||||
fn data(&self) -> Option<Ref<dyn Any>>;
|
||||
}
|
216
lib/vfs/src/ioctx.rs
Normal file
216
lib/vfs/src/ioctx.rs
Normal file
@ -0,0 +1,216 @@
|
||||
use yggdrasil_abi::{error::Error, io::OpenFlags, path};
|
||||
|
||||
use crate::{file::FileRef, node::VnodeRef};
|
||||
|
||||
pub struct IoContext {
|
||||
root: VnodeRef,
|
||||
cwd: VnodeRef,
|
||||
}
|
||||
|
||||
impl IoContext {
|
||||
pub fn new(root: VnodeRef) -> Self {
|
||||
Self {
|
||||
cwd: root.clone(),
|
||||
root,
|
||||
}
|
||||
}
|
||||
|
||||
fn _find(&self, mut at: VnodeRef, path: &str, follow: bool) -> Result<VnodeRef, Error> {
|
||||
let mut element;
|
||||
let mut rest = path;
|
||||
|
||||
loop {
|
||||
(element, rest) = path::split_left(rest);
|
||||
|
||||
if !at.is_directory() {
|
||||
todo!();
|
||||
}
|
||||
|
||||
match element {
|
||||
path::PARENT_NAME => {
|
||||
at = at.parent();
|
||||
}
|
||||
path::SELF_NAME => {}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO resolve link target
|
||||
|
||||
if element.is_empty() && rest.is_empty() {
|
||||
return Ok(at);
|
||||
}
|
||||
|
||||
let node = at.lookup_or_load(element)?;
|
||||
|
||||
if rest.is_empty() {
|
||||
Ok(node)
|
||||
} else {
|
||||
self._find(node, rest, follow)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn find(
|
||||
&self,
|
||||
at: Option<VnodeRef>,
|
||||
mut path: &str,
|
||||
follow: bool,
|
||||
) -> Result<VnodeRef, Error> {
|
||||
let at = if path.starts_with('/') {
|
||||
path = path.trim_start_matches('/');
|
||||
self.root.clone()
|
||||
} else if let Some(at) = at {
|
||||
at
|
||||
} else {
|
||||
self.cwd.clone()
|
||||
};
|
||||
|
||||
self._find(at, path, follow)
|
||||
}
|
||||
|
||||
pub fn open(
|
||||
&self,
|
||||
at: Option<VnodeRef>,
|
||||
path: &str,
|
||||
opts: OpenFlags,
|
||||
) -> Result<FileRef, Error> {
|
||||
let node = match self.find(at.clone(), path, true) {
|
||||
Err(Error::DoesNotExist) => {
|
||||
// TODO check for create option
|
||||
return Err(Error::DoesNotExist);
|
||||
}
|
||||
o => o,
|
||||
}?;
|
||||
|
||||
node.open(opts)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use abi::error::Error;
|
||||
|
||||
use crate::{node::VnodeRef, IoContext};
|
||||
use std::fmt;
|
||||
|
||||
macro_rules! node {
|
||||
($name:literal) => {{
|
||||
$crate::node::Vnode::new($name, $crate::node::VnodeKind::Regular)
|
||||
}};
|
||||
|
||||
($name:literal [ $($child:expr),* ]) => {{
|
||||
let _node = $crate::node::Vnode::new($name, $crate::node::VnodeKind::Directory);
|
||||
|
||||
$(
|
||||
_node.add_child($child);
|
||||
)*
|
||||
|
||||
_node
|
||||
}};
|
||||
}
|
||||
|
||||
struct DumpNode<'a> {
|
||||
node: &'a VnodeRef,
|
||||
}
|
||||
|
||||
impl fmt::Debug for DumpNode<'_> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.node.dump(f, 0)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vnode_find() {
|
||||
let t = node! {
|
||||
"" [
|
||||
node!("file1.txt"),
|
||||
node!("file2.txt"),
|
||||
node! {
|
||||
"dir1" [
|
||||
node!("file3.txt")
|
||||
]
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
let ctx = IoContext::new(t);
|
||||
|
||||
// Absolute lookups
|
||||
assert_eq!(
|
||||
ctx.find(None, "/file1.txt", false).unwrap().name(),
|
||||
"file1.txt"
|
||||
);
|
||||
assert_eq!(
|
||||
ctx.find(None, "/file3.txt", false).unwrap_err(),
|
||||
Error::DoesNotExist
|
||||
);
|
||||
assert_eq!(
|
||||
ctx.find(None, "/dir1/file3.txt", false).unwrap().name(),
|
||||
"file3.txt"
|
||||
);
|
||||
|
||||
// Non-absolute lookups from root
|
||||
assert_eq!(
|
||||
ctx.find(None, "file1.txt", false).unwrap().name(),
|
||||
"file1.txt"
|
||||
);
|
||||
assert_eq!(
|
||||
ctx.find(None, "dir1/file3.txt", false).unwrap().name(),
|
||||
"file3.txt"
|
||||
);
|
||||
|
||||
// Absolute lookups from non-root
|
||||
let cwd = ctx.find(None, "/dir1", false).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
ctx.find(Some(cwd.clone()), "/file1.txt", false)
|
||||
.unwrap()
|
||||
.name(),
|
||||
"file1.txt"
|
||||
);
|
||||
assert_eq!(
|
||||
ctx.find(Some(cwd.clone()), "/dir1/file3.txt", false)
|
||||
.unwrap()
|
||||
.name(),
|
||||
"file3.txt"
|
||||
);
|
||||
assert_eq!(
|
||||
ctx.find(Some(cwd.clone()), "/file3.txt", false)
|
||||
.unwrap_err(),
|
||||
Error::DoesNotExist
|
||||
);
|
||||
assert_eq!(
|
||||
ctx.find(Some(cwd.clone()), "/dir2", false).unwrap_err(),
|
||||
Error::DoesNotExist
|
||||
);
|
||||
|
||||
// Non-absolute lookups in non-root
|
||||
assert_eq!(
|
||||
ctx.find(Some(cwd.clone()), "file3.txt", false)
|
||||
.unwrap()
|
||||
.name(),
|
||||
"file3.txt"
|
||||
);
|
||||
assert_eq!(
|
||||
ctx.find(Some(cwd.clone()), "././file3.txt", false)
|
||||
.unwrap()
|
||||
.name(),
|
||||
"file3.txt"
|
||||
);
|
||||
assert_eq!(
|
||||
ctx.find(Some(cwd.clone()), "../dir1/file3.txt", false)
|
||||
.unwrap()
|
||||
.name(),
|
||||
"file3.txt"
|
||||
);
|
||||
assert_eq!(
|
||||
ctx.find(Some(cwd.clone()), ".", false).unwrap().name(),
|
||||
"dir1"
|
||||
);
|
||||
assert_eq!(ctx.find(Some(cwd.clone()), "..", false).unwrap().name(), "");
|
||||
assert_eq!(
|
||||
ctx.find(Some(cwd.clone()), "../..", false).unwrap().name(),
|
||||
""
|
||||
);
|
||||
}
|
||||
}
|
29
lib/vfs/src/lib.rs
Normal file
29
lib/vfs/src/lib.rs
Normal file
@ -0,0 +1,29 @@
|
||||
#![no_std]
|
||||
|
||||
use yggdrasil_abi::error::Error;
|
||||
|
||||
extern crate alloc;
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate std;
|
||||
|
||||
pub(crate) mod block;
|
||||
pub(crate) mod char;
|
||||
pub(crate) mod file;
|
||||
pub(crate) mod fs;
|
||||
pub(crate) mod ioctx;
|
||||
pub(crate) mod node;
|
||||
|
||||
pub use self::block::BlockDevice;
|
||||
pub use self::char::{CharDevice, CharDeviceWrapper};
|
||||
pub use file::{File, FileFlags, FileRef};
|
||||
pub use ioctx::IoContext;
|
||||
pub use node::{Vnode, VnodeImpl, VnodeKind, VnodeRef, VnodeWeak};
|
||||
|
||||
pub trait Write {
|
||||
fn write(&mut self, data: &[u8]) -> Result<usize, Error>;
|
||||
}
|
||||
|
||||
pub trait Read {
|
||||
fn read(&mut self, data: &mut [u8]) -> Result<usize, Error>;
|
||||
}
|
225
lib/vfs/src/node.rs
Normal file
225
lib/vfs/src/node.rs
Normal file
@ -0,0 +1,225 @@
|
||||
use core::{
|
||||
cell::{RefCell, RefMut},
|
||||
fmt,
|
||||
};
|
||||
|
||||
use alloc::{
|
||||
boxed::Box,
|
||||
rc::{Rc, Weak},
|
||||
string::String,
|
||||
vec::Vec,
|
||||
};
|
||||
use yggdrasil_abi::{error::Error, io::OpenFlags};
|
||||
|
||||
use crate::file::{File, FileFlags, FileRef};
|
||||
|
||||
pub type VnodeRef = Rc<Vnode>;
|
||||
pub type VnodeWeak = Weak<Vnode>;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum VnodeKind {
|
||||
Directory,
|
||||
Regular,
|
||||
Char,
|
||||
Block,
|
||||
}
|
||||
|
||||
pub(crate) struct TreeNode {
|
||||
parent: Option<VnodeWeak>,
|
||||
children: Vec<VnodeRef>,
|
||||
}
|
||||
|
||||
pub struct Vnode {
|
||||
name: String,
|
||||
tree: RefCell<TreeNode>,
|
||||
kind: VnodeKind,
|
||||
data: RefCell<Option<Box<dyn VnodeImpl>>>,
|
||||
}
|
||||
|
||||
pub trait VnodeImpl {
|
||||
fn create(&mut self, at: &VnodeRef, name: &str, kind: VnodeKind) -> Result<VnodeRef, Error>;
|
||||
|
||||
fn open(&mut self, node: &VnodeRef, opts: OpenFlags) -> Result<usize, Error>;
|
||||
fn close(&mut self, node: &VnodeRef) -> Result<(), Error>;
|
||||
|
||||
fn read(&mut self, node: &VnodeRef, pos: usize, data: &mut [u8]) -> Result<usize, Error>;
|
||||
fn write(&mut self, node: &VnodeRef, pos: usize, data: &[u8]) -> Result<usize, Error>;
|
||||
}
|
||||
|
||||
impl Vnode {
|
||||
pub fn new<S: Into<String>>(name: S, kind: VnodeKind) -> VnodeRef {
|
||||
Rc::new(Self {
|
||||
name: name.into(),
|
||||
tree: RefCell::new(TreeNode {
|
||||
parent: None,
|
||||
children: Vec::new(),
|
||||
}),
|
||||
kind,
|
||||
data: RefCell::new(None),
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn kind(&self) -> VnodeKind {
|
||||
self.kind
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn data(&self) -> RefMut<Option<Box<dyn VnodeImpl>>> {
|
||||
self.data.borrow_mut()
|
||||
}
|
||||
|
||||
pub fn parent(self: &VnodeRef) -> VnodeRef {
|
||||
match &self.tree.borrow().parent {
|
||||
Some(parent) => parent.upgrade().unwrap(),
|
||||
None => self.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_data(&self, data: Box<dyn VnodeImpl>) {
|
||||
self.data.borrow_mut().replace(data);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_directory(&self) -> bool {
|
||||
self.kind == VnodeKind::Directory
|
||||
}
|
||||
|
||||
// Cache tree operations
|
||||
pub fn add_child(self: &VnodeRef, child: VnodeRef) {
|
||||
let parent_weak = Rc::downgrade(self);
|
||||
let mut parent_borrow = self.tree.borrow_mut();
|
||||
|
||||
assert!(child
|
||||
.tree
|
||||
.borrow_mut()
|
||||
.parent
|
||||
.replace(parent_weak)
|
||||
.is_none());
|
||||
parent_borrow.children.push(child);
|
||||
}
|
||||
|
||||
pub fn dump(&self, f: &mut fmt::Formatter<'_>, depth: usize) -> fmt::Result {
|
||||
for _ in 0..depth {
|
||||
f.write_str(" ")?;
|
||||
}
|
||||
|
||||
write!(f, "{:?}", self.name)?;
|
||||
|
||||
match self.kind {
|
||||
VnodeKind::Directory => {
|
||||
let tree = self.tree.borrow();
|
||||
if tree.children.is_empty() {
|
||||
f.write_str(" []")?;
|
||||
} else {
|
||||
f.write_str(" [\n")?;
|
||||
for child in tree.children.iter() {
|
||||
child.dump(f, depth + 1)?;
|
||||
f.write_str("\n")?;
|
||||
}
|
||||
for _ in 0..depth {
|
||||
f.write_str(" ")?;
|
||||
}
|
||||
f.write_str("]")?;
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn lookup(self: &VnodeRef, name: &str) -> Option<VnodeRef> {
|
||||
assert!(self.is_directory());
|
||||
self.tree
|
||||
.borrow()
|
||||
.children
|
||||
.iter()
|
||||
.find(|e| e.name == name)
|
||||
.cloned()
|
||||
}
|
||||
|
||||
//
|
||||
pub fn lookup_or_load(self: &VnodeRef, name: &str) -> Result<VnodeRef, Error> {
|
||||
// Lookup in cache
|
||||
if let Some(node) = self.lookup(name) {
|
||||
return Ok(node);
|
||||
}
|
||||
|
||||
// TODO load from FS
|
||||
Err(Error::DoesNotExist)
|
||||
}
|
||||
|
||||
// Node operations
|
||||
pub fn open(self: &VnodeRef, flags: OpenFlags) -> Result<FileRef, Error> {
|
||||
let mut open_flags = FileFlags::empty();
|
||||
|
||||
if flags.is_read() {
|
||||
open_flags |= FileFlags::READ;
|
||||
}
|
||||
if flags.is_write() {
|
||||
open_flags |= FileFlags::WRITE;
|
||||
}
|
||||
|
||||
if self.kind == VnodeKind::Directory {
|
||||
return Err(Error::IsADirectory);
|
||||
}
|
||||
|
||||
if let Some(ref mut data) = *self.data() {
|
||||
let pos = data.open(self, flags)?;
|
||||
Ok(File::normal(self.clone(), pos, open_flags))
|
||||
} else {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn close(self: &VnodeRef) -> Result<(), Error> {
|
||||
if let Some(ref mut data) = *self.data() {
|
||||
data.close(self)
|
||||
} else {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write(self: &VnodeRef, pos: usize, buf: &[u8]) -> Result<usize, Error> {
|
||||
if self.kind == VnodeKind::Directory {
|
||||
todo!();
|
||||
}
|
||||
|
||||
if let Some(ref mut data) = *self.data() {
|
||||
data.write(self, pos, buf)
|
||||
} else {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read(self: &VnodeRef, pos: usize, buf: &mut [u8]) -> Result<usize, Error> {
|
||||
if self.kind == VnodeKind::Directory {
|
||||
todo!();
|
||||
}
|
||||
|
||||
if let Some(ref mut data) = *self.data() {
|
||||
data.read(self, pos, buf)
|
||||
} else {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Vnode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let prefix = match self.kind {
|
||||
VnodeKind::Directory => "DIR ",
|
||||
VnodeKind::Regular => "REG ",
|
||||
VnodeKind::Char => "CHR ",
|
||||
VnodeKind::Block => "BLK ",
|
||||
};
|
||||
|
||||
write!(f, "[{} {}]", prefix, self.name)
|
||||
}
|
||||
}
|
128
src/arch/aarch64/boot/mod.rs
Normal file
128
src/arch/aarch64/boot/mod.rs
Normal file
@ -0,0 +1,128 @@
|
||||
//! Main entry point for the AArch64 platforms
|
||||
use core::{arch::asm, sync::atomic::Ordering};
|
||||
|
||||
use aarch64_cpu::registers::{CurrentEL, CPACR_EL1};
|
||||
use tock_registers::interfaces::{ReadWriteable, Readable};
|
||||
|
||||
use super::{
|
||||
cpu::Cpu, exception, kernel_main, smp::CPU_COUNT, AArch64, KernelStack, ARCHITECTURE,
|
||||
BOOT_STACK_SIZE,
|
||||
};
|
||||
use crate::{
|
||||
absolute_address,
|
||||
arch::{Architecture, PLATFORM},
|
||||
device::platform::Platform,
|
||||
mem::{ConvertAddress, KERNEL_VIRT_OFFSET},
|
||||
sync::SpinFence,
|
||||
task,
|
||||
};
|
||||
|
||||
pub(super) static CPU_INIT_FENCE: SpinFence = SpinFence::new();
|
||||
|
||||
fn __aarch64_common_lower_entry() {
|
||||
// Unmask FP operations
|
||||
CPACR_EL1.modify(CPACR_EL1::FPEN::TrapNothing);
|
||||
|
||||
if CurrentEL.read(CurrentEL::EL) != 1 {
|
||||
panic!("Only EL1 is supported for now");
|
||||
}
|
||||
}
|
||||
|
||||
fn enter_higher_half(sp: usize, elr: usize, arg: usize) -> ! {
|
||||
unsafe {
|
||||
asm!(r#"
|
||||
mov sp, {sp}
|
||||
mov x0, {arg}
|
||||
br {entry}
|
||||
"#, entry = in(reg) elr, arg = in(reg) arg, sp = in(reg) sp, options(noreturn));
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) extern "C" fn __aarch64_ap_lower_entry(sp: usize) -> ! {
|
||||
__aarch64_common_lower_entry();
|
||||
|
||||
unsafe {
|
||||
ARCHITECTURE.init_mmu(false);
|
||||
}
|
||||
|
||||
let sp = unsafe { sp.virtualize() };
|
||||
let elr = absolute_address!(__aarch64_ap_upper_entry);
|
||||
enter_higher_half(sp, elr, 0);
|
||||
}
|
||||
|
||||
extern "C" fn __aarch64_bsp_lower_entry(dtb_phys: usize) -> ! {
|
||||
__aarch64_common_lower_entry();
|
||||
|
||||
unsafe {
|
||||
ARCHITECTURE.init_mmu(true);
|
||||
}
|
||||
|
||||
let sp = unsafe { BSP_STACK.data.as_ptr().add(BOOT_STACK_SIZE).virtualize() };
|
||||
let elr = absolute_address!(__aarch64_bsp_upper_entry);
|
||||
enter_higher_half(sp as usize, elr, dtb_phys);
|
||||
}
|
||||
|
||||
extern "C" fn __aarch64_bsp_upper_entry(dtb_phys: usize) -> ! {
|
||||
kernel_main(dtb_phys);
|
||||
}
|
||||
|
||||
extern "C" fn __aarch64_ap_upper_entry(_x0: usize) -> ! {
|
||||
unsafe {
|
||||
AArch64::set_interrupt_mask(true);
|
||||
}
|
||||
|
||||
// Signal to BSP that we're up
|
||||
CPU_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
aarch64_cpu::asm::sev();
|
||||
|
||||
exception::init_exceptions();
|
||||
|
||||
// Initialize CPU-local GIC and timer
|
||||
unsafe {
|
||||
PLATFORM.init(false).expect("AP platform init failed");
|
||||
|
||||
Cpu::init_local();
|
||||
|
||||
// Synchronize the CPUs to this point
|
||||
CPU_INIT_FENCE.signal();
|
||||
CPU_INIT_FENCE.wait_all(CPU_COUNT.load(Ordering::Acquire));
|
||||
|
||||
task::enter();
|
||||
}
|
||||
}
|
||||
|
||||
#[link_section = ".text.entry"]
|
||||
#[no_mangle]
|
||||
#[naked]
|
||||
unsafe extern "C" fn __aarch64_entry() -> ! {
|
||||
// Setup the stack and pass on to a proper function
|
||||
asm!(
|
||||
r#"
|
||||
// Multiple processor cores may or may not be running at this point
|
||||
mrs x1, mpidr_el1
|
||||
ands x1, x1, #0xF
|
||||
bne 1f
|
||||
|
||||
// BSP in SMP or uniprocessor
|
||||
ldr x1, ={stack_bottom} + {stack_size} - {kernel_virt_offset}
|
||||
mov sp, x1
|
||||
|
||||
bl {kernel_lower_entry} - {kernel_virt_offset}
|
||||
|
||||
// AP in a SMP system
|
||||
// TODO spin loop for this method of init
|
||||
1:
|
||||
b .
|
||||
"#,
|
||||
kernel_lower_entry = sym __aarch64_bsp_lower_entry,
|
||||
stack_bottom = sym BSP_STACK,
|
||||
stack_size = const BOOT_STACK_SIZE,
|
||||
kernel_virt_offset = const KERNEL_VIRT_OFFSET,
|
||||
options(noreturn)
|
||||
);
|
||||
}
|
||||
|
||||
#[link_section = ".bss"]
|
||||
static BSP_STACK: KernelStack = KernelStack {
|
||||
data: [0; BOOT_STACK_SIZE],
|
||||
};
|
103
src/arch/aarch64/context.S
Normal file
103
src/arch/aarch64/context.S
Normal file
@ -0,0 +1,103 @@
|
||||
.global __aarch64_enter_task
|
||||
.global __aarch64_switch_task
|
||||
|
||||
.section .text
|
||||
|
||||
.macro SAVE_TASK_STATE
|
||||
sub sp, sp, #{context_size}
|
||||
|
||||
stp x19, x20, [sp, #16 * 0]
|
||||
stp x21, x22, [sp, #16 * 1]
|
||||
stp x23, x24, [sp, #16 * 2]
|
||||
stp x25, x26, [sp, #16 * 3]
|
||||
stp x27, x28, [sp, #16 * 4]
|
||||
stp x29, x30, [sp, #16 * 5]
|
||||
|
||||
mrs x19, tpidr_el0
|
||||
mrs x20, ttbr0_el1
|
||||
stp x29, x20, [sp, #16 * 6]
|
||||
.endm
|
||||
|
||||
.macro LOAD_TASK_STATE
|
||||
// x19 == tpidr_el0, x20 = ttbr0_el1
|
||||
ldp x19, x20, [sp, #16 * 6]
|
||||
msr tpidr_el0, x19
|
||||
msr ttbr0_el1, x20
|
||||
|
||||
ldp x19, x20, [sp, #16 * 0]
|
||||
ldp x21, x22, [sp, #16 * 1]
|
||||
ldp x23, x24, [sp, #16 * 2]
|
||||
ldp x25, x26, [sp, #16 * 3]
|
||||
ldp x27, x28, [sp, #16 * 4]
|
||||
ldp x29, x30, [sp, #16 * 5]
|
||||
|
||||
add sp, sp, #{context_size}
|
||||
.endm
|
||||
|
||||
__aarch64_task_enter_kernel:
|
||||
# EL1h, IRQs unmasked
|
||||
mov x0, #5
|
||||
msr spsr_el1, x0
|
||||
|
||||
# x0 == argument, x1 == entry point
|
||||
ldp x0, x1, [sp, #0]
|
||||
msr elr_el1, x1
|
||||
|
||||
add sp, sp, #16
|
||||
|
||||
eret
|
||||
|
||||
__aarch64_task_enter_user:
|
||||
// x0 == sp, x1 == ignored
|
||||
ldp x0, x1, [sp, #16 * 0]
|
||||
msr sp_el0, x0
|
||||
|
||||
# EL0t, IRQs unmasked
|
||||
msr spsr_el1, xzr
|
||||
|
||||
// x0 == arg, x1 == entry
|
||||
ldp x0, x1, [sp, #16 * 1]
|
||||
msr elr_el1, x1
|
||||
add sp, sp, #32
|
||||
|
||||
// Zero the registers
|
||||
mov x1, xzr
|
||||
mov x2, xzr
|
||||
mov x3, xzr
|
||||
mov x4, xzr
|
||||
mov x5, xzr
|
||||
mov x6, xzr
|
||||
mov x7, xzr
|
||||
mov x8, xzr
|
||||
mov x9, xzr
|
||||
mov x10, xzr
|
||||
mov x11, xzr
|
||||
mov x12, xzr
|
||||
mov x13, xzr
|
||||
mov x14, xzr
|
||||
mov x15, xzr
|
||||
mov x16, xzr
|
||||
mov x17, xzr
|
||||
mov x18, xzr
|
||||
|
||||
mov lr, xzr
|
||||
|
||||
eret
|
||||
|
||||
__aarch64_switch_task:
|
||||
SAVE_TASK_STATE
|
||||
mov x19, sp
|
||||
str x19, [x1]
|
||||
|
||||
ldr x0, [x0]
|
||||
mov sp, x0
|
||||
LOAD_TASK_STATE
|
||||
|
||||
ret
|
||||
|
||||
__aarch64_enter_task:
|
||||
ldr x0, [x0]
|
||||
mov sp, x0
|
||||
LOAD_TASK_STATE
|
||||
|
||||
ret
|
172
src/arch/aarch64/context.rs
Normal file
172
src/arch/aarch64/context.rs
Normal file
@ -0,0 +1,172 @@
|
||||
//! AArch64-specific task context implementation
|
||||
use core::{arch::global_asm, cell::UnsafeCell};
|
||||
|
||||
use abi::error::Error;
|
||||
use alloc::boxed::Box;
|
||||
|
||||
use crate::mem::{
|
||||
phys::{self, PageUsage},
|
||||
ConvertAddress,
|
||||
};
|
||||
|
||||
struct StackBuilder {
|
||||
base: usize,
|
||||
sp: usize,
|
||||
}
|
||||
|
||||
#[repr(C, align(0x10))]
|
||||
struct TaskContextInner {
|
||||
// 0x00
|
||||
sp: usize,
|
||||
}
|
||||
|
||||
/// AArch64 implementation of a task context
|
||||
pub struct TaskContext {
|
||||
inner: UnsafeCell<TaskContextInner>,
|
||||
}
|
||||
|
||||
const COMMON_CONTEXT_SIZE: usize = 8 * 14;
|
||||
|
||||
unsafe impl Sync for TaskContext {}
|
||||
|
||||
impl StackBuilder {
|
||||
fn new(base: usize, size: usize) -> Self {
|
||||
Self {
|
||||
base,
|
||||
sp: base + size,
|
||||
}
|
||||
}
|
||||
|
||||
fn push(&mut self, value: usize) {
|
||||
if self.sp == self.base {
|
||||
panic!();
|
||||
}
|
||||
self.sp -= 8;
|
||||
unsafe {
|
||||
(self.sp as *mut usize).write_volatile(value);
|
||||
}
|
||||
}
|
||||
|
||||
fn _skip(&mut self, count: usize) {
|
||||
self.sp -= count * 8;
|
||||
if self.sp < self.base {
|
||||
panic!();
|
||||
}
|
||||
}
|
||||
|
||||
fn build(self) -> usize {
|
||||
self.sp
|
||||
}
|
||||
|
||||
fn init_common(&mut self, entry: usize, ttbr0: usize) {
|
||||
self.push(ttbr0); // ttbr0_el1
|
||||
self.push(0); // tpidr_el0
|
||||
|
||||
self.push(entry); // x30/lr
|
||||
self.push(0); // x29
|
||||
self.push(0); // x28
|
||||
self.push(0); // x27
|
||||
self.push(0); // x26
|
||||
self.push(0); // x25
|
||||
self.push(0); // x24
|
||||
self.push(0); // x23
|
||||
self.push(0); // x22
|
||||
self.push(0); // x21
|
||||
self.push(0); // x20
|
||||
self.push(0); // x19
|
||||
}
|
||||
}
|
||||
|
||||
impl TaskContext {
|
||||
/// Constructs a kernel thread context. For a more convenient way of constructing kernel
|
||||
/// processes, see [TaskContext::kernel_closure()].
|
||||
pub fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> {
|
||||
const KERNEL_TASK_PAGES: usize = 4;
|
||||
let stack_base = unsafe {
|
||||
phys::alloc_pages_contiguous(KERNEL_TASK_PAGES, PageUsage::Used)?.virtualize()
|
||||
};
|
||||
|
||||
let mut stack = StackBuilder::new(stack_base, KERNEL_TASK_PAGES * 0x1000);
|
||||
|
||||
// Entry and argument
|
||||
stack.push(entry as _);
|
||||
stack.push(arg);
|
||||
|
||||
stack.init_common(__aarch64_task_enter_kernel as _, 0);
|
||||
|
||||
let sp = stack.build();
|
||||
|
||||
// TODO stack is leaked
|
||||
|
||||
Ok(Self {
|
||||
inner: UnsafeCell::new(TaskContextInner { sp }),
|
||||
})
|
||||
}
|
||||
|
||||
/// Constructs a safe wrapper process to execute a kernel-space closure
|
||||
pub fn kernel_closure<F: FnOnce() + Send + 'static>(f: F) -> Result<Self, Error> {
|
||||
extern "C" fn closure_wrapper<F: FnOnce() + Send + 'static>(closure_addr: usize) -> ! {
|
||||
let closure = unsafe { Box::from_raw(closure_addr as *mut F) };
|
||||
closure();
|
||||
todo!("Process termination");
|
||||
}
|
||||
|
||||
let closure = Box::new(f);
|
||||
Self::kernel(closure_wrapper::<F>, Box::into_raw(closure) as usize)
|
||||
}
|
||||
|
||||
/// Constructs a user thread context. The caller is responsible for allocating the userspace
|
||||
/// stack and setting up a valid address space for the context.
|
||||
pub fn user(
|
||||
entry: usize,
|
||||
arg: usize,
|
||||
ttbr0: usize,
|
||||
user_stack_sp: usize,
|
||||
) -> Result<Self, Error> {
|
||||
const USER_TASK_PAGES: usize = 8;
|
||||
let stack_base =
|
||||
unsafe { phys::alloc_pages_contiguous(USER_TASK_PAGES, PageUsage::Used)?.virtualize() };
|
||||
|
||||
let mut stack = StackBuilder::new(stack_base, USER_TASK_PAGES * 0x1000);
|
||||
|
||||
stack.push(entry as _);
|
||||
stack.push(arg);
|
||||
stack.push(0);
|
||||
stack.push(user_stack_sp);
|
||||
|
||||
stack.init_common(__aarch64_task_enter_user as _, ttbr0);
|
||||
|
||||
let sp = stack.build();
|
||||
|
||||
Ok(Self {
|
||||
inner: UnsafeCell::new(TaskContextInner { sp }),
|
||||
})
|
||||
}
|
||||
|
||||
/// Starts execution of `self` task on local CPU.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Only meant to be called from the scheduler code.
|
||||
pub unsafe fn enter(&self) -> ! {
|
||||
__aarch64_enter_task(self.inner.get())
|
||||
}
|
||||
|
||||
/// Switches from `from` task to `self` task.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Only meant to be called from the scheduler code.
|
||||
pub unsafe fn switch(&self, from: &Self) {
|
||||
__aarch64_switch_task(self.inner.get(), from.inner.get())
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
fn __aarch64_enter_task(to: *mut TaskContextInner) -> !;
|
||||
fn __aarch64_switch_task(to: *mut TaskContextInner, from: *mut TaskContextInner);
|
||||
fn __aarch64_task_enter_kernel();
|
||||
fn __aarch64_task_enter_user();
|
||||
}
|
||||
|
||||
global_asm!(include_str!("context.S"), context_size = const COMMON_CONTEXT_SIZE);
|
111
src/arch/aarch64/cpu.rs
Normal file
111
src/arch/aarch64/cpu.rs
Normal file
@ -0,0 +1,111 @@
|
||||
//! Per-CPU data structures
|
||||
use core::sync::atomic::Ordering;
|
||||
|
||||
use aarch64_cpu::registers::{MPIDR_EL1, TPIDR_EL1};
|
||||
use alloc::{boxed::Box, vec::Vec};
|
||||
use tock_registers::interfaces::{Readable, Writeable};
|
||||
|
||||
use crate::{arch::CpuMessage, sync::IrqSafeSpinlock, task::sched::CpuQueue, util::OneTimeInit};
|
||||
|
||||
use super::smp::CPU_COUNT;
|
||||
|
||||
/// Per-CPU private data structure
|
||||
#[repr(C, align(0x10))]
|
||||
pub struct Cpu {
|
||||
id: u32,
|
||||
|
||||
queue: OneTimeInit<&'static CpuQueue>,
|
||||
}
|
||||
|
||||
struct IpiQueue {
|
||||
data: IrqSafeSpinlock<Option<CpuMessage>>,
|
||||
}
|
||||
|
||||
static IPI_QUEUES: OneTimeInit<Vec<IpiQueue>> = OneTimeInit::new();
|
||||
|
||||
impl IpiQueue {
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
data: IrqSafeSpinlock::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push(&self, msg: CpuMessage) {
|
||||
let mut lock = self.data.lock();
|
||||
|
||||
assert!(lock.is_none());
|
||||
lock.replace(msg);
|
||||
}
|
||||
|
||||
pub fn pop(&self) -> Option<CpuMessage> {
|
||||
let mut lock = self.data.lock();
|
||||
lock.take()
|
||||
}
|
||||
}
|
||||
|
||||
impl Cpu {
|
||||
/// Returns a safe reference to the local CPU's private data structure
|
||||
#[inline(always)]
|
||||
pub fn local<'a>() -> &'a Self {
|
||||
Self::get_local().unwrap()
|
||||
}
|
||||
|
||||
/// Returns the local CPU data structure reference, if it was set up
|
||||
#[inline(always)]
|
||||
pub fn get_local<'a>() -> Option<&'a Self> {
|
||||
let tpidr = TPIDR_EL1.get() as *mut Cpu;
|
||||
unsafe { tpidr.as_ref() }
|
||||
}
|
||||
|
||||
/// Sets up the local CPU's private data structure.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The function is only meant to be called once during the early init process.
|
||||
pub unsafe fn init_local() {
|
||||
let this = Box::new(Cpu {
|
||||
id: Self::local_id(),
|
||||
queue: OneTimeInit::new(),
|
||||
});
|
||||
TPIDR_EL1.set(Box::into_raw(this) as _);
|
||||
}
|
||||
|
||||
/// Sets up the local CPU's execution queue.
|
||||
pub fn init_queue(&self, queue: &'static CpuQueue) {
|
||||
self.queue.init(queue);
|
||||
}
|
||||
|
||||
/// Returns the local CPU's execution queue.
|
||||
pub fn queue(&self) -> &'static CpuQueue {
|
||||
self.queue.get()
|
||||
}
|
||||
|
||||
/// Returns the index of the local CPU
|
||||
#[inline(always)]
|
||||
pub fn local_id() -> u32 {
|
||||
(MPIDR_EL1.get() & 0xFF) as _
|
||||
}
|
||||
|
||||
/// Inserts an IPI message to the back of the target CPU's message queue
|
||||
pub fn push_ipi_queue(cpu_id: u32, msg: CpuMessage) {
|
||||
let ipi_queue = &IPI_QUEUES.get()[cpu_id as usize];
|
||||
ipi_queue.push(msg);
|
||||
}
|
||||
|
||||
/// Pops the first IPI message received for this CPU.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// Currently the queue consists of only one entry, so the CPU will only receive the last one.
|
||||
pub fn get_ipi(&self) -> Option<CpuMessage> {
|
||||
let ipi_queue = &IPI_QUEUES.get()[self.id as usize];
|
||||
ipi_queue.pop()
|
||||
}
|
||||
|
||||
/// Sets up global list of interprocessor message queues
|
||||
pub fn init_ipi_queues() {
|
||||
IPI_QUEUES.init(Vec::from_iter(
|
||||
(0..CPU_COUNT.load(Ordering::Acquire)).map(|_| IpiQueue::new()),
|
||||
));
|
||||
}
|
||||
}
|
161
src/arch/aarch64/devtree.rs
Normal file
161
src/arch/aarch64/devtree.rs
Normal file
@ -0,0 +1,161 @@
|
||||
//! ARM device tree utlities
|
||||
use fdt_rs::{
|
||||
base::DevTree,
|
||||
index::{iters::DevTreeIndexNodeSiblingIter, DevTreeIndex, DevTreeIndexNode, DevTreeIndexProp},
|
||||
prelude::PropReader,
|
||||
};
|
||||
|
||||
use crate::{debug::LogLevel, mem::phys::PhysicalMemoryRegion};
|
||||
|
||||
#[repr(C, align(0x10))]
|
||||
struct FdtIndexBuffer([u8; 32768]);
|
||||
|
||||
static mut FDT_INDEX_BUFFER: FdtIndexBuffer = FdtIndexBuffer::zeroed();
|
||||
|
||||
impl FdtIndexBuffer {
|
||||
const fn zeroed() -> Self {
|
||||
Self([0; 32768])
|
||||
}
|
||||
}
|
||||
|
||||
/// Device tree node
|
||||
pub type TNode<'a> = DevTreeIndexNode<'a, 'a, 'a>;
|
||||
/// Device tree property
|
||||
pub type TProp<'a> = DevTreeIndexProp<'a, 'a, 'a>;
|
||||
|
||||
/// Iterator for physical memory regions present in the device tree
|
||||
#[derive(Clone)]
|
||||
pub struct FdtMemoryRegionIter<'a> {
|
||||
inner: DevTreeIndexNodeSiblingIter<'a, 'a, 'a>,
|
||||
}
|
||||
|
||||
/// Device tree wrapper struct
|
||||
pub struct DeviceTree<'a> {
|
||||
tree: DevTree<'a>,
|
||||
index: DevTreeIndex<'a, 'a>,
|
||||
}
|
||||
|
||||
impl<'a> DeviceTree<'a> {
|
||||
/// Constructs a device tree wrapper from the DTB virtual address.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure the validity of the address.
|
||||
pub unsafe fn from_addr(virt: usize) -> Self {
|
||||
let tree = DevTree::from_raw_pointer(virt as _).unwrap();
|
||||
let index = DevTreeIndex::new(tree, &mut FDT_INDEX_BUFFER.0).unwrap();
|
||||
Self { tree, index }
|
||||
}
|
||||
|
||||
/// Looks up a node for a given path
|
||||
pub fn node_by_path(&self, path: &str) -> Option<TNode> {
|
||||
find_node(self.index.root(), path.trim_start_matches('/'))
|
||||
}
|
||||
|
||||
/// Prints the device tree to log output
|
||||
pub fn dump(&self, level: LogLevel) {
|
||||
dump_node(&self.index.root(), 0, level)
|
||||
}
|
||||
|
||||
/// Returns the total size of the device tree in memory
|
||||
pub fn size(&self) -> usize {
|
||||
self.tree.totalsize()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> FdtMemoryRegionIter<'a> {
|
||||
/// Constructs a memory region iterator for given device tree
|
||||
pub fn new(dt: &'a DeviceTree) -> Self {
|
||||
let inner = dt.index.root().children();
|
||||
Self { inner }
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for FdtMemoryRegionIter<'_> {
|
||||
type Item = PhysicalMemoryRegion;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
loop {
|
||||
let Some(item) = self.inner.next() else {
|
||||
break None;
|
||||
};
|
||||
|
||||
if item.name().unwrap_or("").starts_with("memory@") {
|
||||
let reg = item
|
||||
.props()
|
||||
.find(|p| p.name().unwrap_or("") == "reg")
|
||||
.unwrap();
|
||||
|
||||
break Some(PhysicalMemoryRegion {
|
||||
base: reg.u64(0).unwrap() as usize,
|
||||
size: reg.u64(1).unwrap() as usize,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Looks up a property with given name in the node
|
||||
pub fn find_prop<'a>(node: &TNode<'a>, name: &str) -> Option<TProp<'a>> {
|
||||
node.props().find(|p| p.name().unwrap_or("") == name)
|
||||
}
|
||||
|
||||
fn path_component_left(path: &str) -> (&str, &str) {
|
||||
if let Some((left, right)) = path.split_once('/') {
|
||||
(left, right.trim_start_matches('/'))
|
||||
} else {
|
||||
(path, "")
|
||||
}
|
||||
}
|
||||
|
||||
fn find_node<'a>(at: TNode<'a>, path: &str) -> Option<TNode<'a>> {
|
||||
let (item, path) = path_component_left(path);
|
||||
if item.is_empty() {
|
||||
assert_eq!(path, "");
|
||||
Some(at)
|
||||
} else {
|
||||
let child = at.children().find(|c| c.name().unwrap() == item)?;
|
||||
if path.is_empty() {
|
||||
Some(child)
|
||||
} else {
|
||||
find_node(child, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn dump_node(node: &TNode, depth: usize, level: LogLevel) {
|
||||
fn indent(level: LogLevel, depth: usize) {
|
||||
for _ in 0..depth {
|
||||
log_print!(level, " ");
|
||||
}
|
||||
}
|
||||
|
||||
let node_name = node.name().unwrap();
|
||||
|
||||
// Don't dump these
|
||||
if node_name.starts_with("virtio_mmio@") {
|
||||
return;
|
||||
}
|
||||
|
||||
indent(level, depth);
|
||||
log_print!(level, "{:?} {{\n", node_name);
|
||||
for prop in node.props() {
|
||||
indent(level, depth + 1);
|
||||
let name = prop.name().unwrap();
|
||||
log_print!(level, "{name:?} = ");
|
||||
|
||||
match name {
|
||||
"compatible" | "stdout-path" => log_print!(level, "{:?}", prop.str().unwrap()),
|
||||
_ => log_print!(level, "{:x?}", prop.raw()),
|
||||
}
|
||||
|
||||
log_print!(level, "\n");
|
||||
}
|
||||
|
||||
for child in node.children() {
|
||||
dump_node(&child, depth + 1, level);
|
||||
}
|
||||
|
||||
indent(level, depth);
|
||||
log_print!(level, "}}\n");
|
||||
}
|
173
src/arch/aarch64/exception.rs
Normal file
173
src/arch/aarch64/exception.rs
Normal file
@ -0,0 +1,173 @@
|
||||
//! Exception and interrupt management functions
|
||||
use core::{arch::global_asm, fmt};
|
||||
|
||||
use aarch64_cpu::registers::{ELR_EL1, ESR_EL1, FAR_EL1, TTBR0_EL1, TTBR1_EL1, VBAR_EL1};
|
||||
use tock_registers::interfaces::{Readable, Writeable};
|
||||
|
||||
use crate::{
|
||||
arch::{aarch64::cpu::Cpu, CpuMessage, PLATFORM},
|
||||
debug::LogLevel,
|
||||
device::{interrupt::IrqContext, platform::Platform},
|
||||
panic::panic_secondary,
|
||||
syscall::raw_syscall_handler,
|
||||
task::process::Process,
|
||||
};
|
||||
|
||||
/// Struct for register values saved when taking an exception
|
||||
#[repr(C)]
|
||||
pub struct ExceptionFrame {
|
||||
r: [u64; 32],
|
||||
c: [u64; 4],
|
||||
// ...
|
||||
}
|
||||
|
||||
impl fmt::Debug for ExceptionFrame {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
for i in (0..32).step_by(2) {
|
||||
write!(
|
||||
f,
|
||||
"x{:<2} = {:#020x}\tx{:<2} = {:#020x}",
|
||||
i,
|
||||
self.r[i],
|
||||
i + 1,
|
||||
self.r[i + 1]
|
||||
)?;
|
||||
if i != 30 {
|
||||
f.write_str("\n")?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Initializes the exception/interrupt vectors. May be called repeatedly (though that makes no
|
||||
/// sense).
|
||||
pub fn init_exceptions() {
|
||||
extern "C" {
|
||||
static __aarch64_el1_vectors: u8;
|
||||
}
|
||||
let vbar = unsafe { &__aarch64_el1_vectors as *const _ };
|
||||
VBAR_EL1.set(vbar as u64);
|
||||
}
|
||||
|
||||
fn dump_irrecoverable_exception(frame: &ExceptionFrame, ec: u64, iss: u64) {
|
||||
let cpu = Cpu::get_local();
|
||||
|
||||
log_print_raw!(LogLevel::Fatal, "SYNC exception:\n");
|
||||
log_print_raw!(LogLevel::Fatal, "FAR: {:#x}\n", FAR_EL1.get());
|
||||
log_print_raw!(LogLevel::Fatal, "ELR: {:#x}\n", ELR_EL1.get());
|
||||
log_print_raw!(LogLevel::Fatal, "ESR: {:#x}\n", ESR_EL1.get());
|
||||
log_print_raw!(LogLevel::Fatal, "TTBR0_EL1: {:#x}\n", TTBR0_EL1.get());
|
||||
log_print_raw!(LogLevel::Fatal, "TTBR1_EL1: {:#x}\n", TTBR1_EL1.get());
|
||||
log_print_raw!(LogLevel::Fatal, "Register dump:\n");
|
||||
log_print_raw!(LogLevel::Fatal, "{:?}\n", frame);
|
||||
|
||||
if let Some(cpu) = cpu {
|
||||
let current = cpu.queue().current_process();
|
||||
|
||||
if let Some(current) = current {
|
||||
log_print_raw!(LogLevel::Fatal, "In process {}\n", current.id());
|
||||
}
|
||||
}
|
||||
|
||||
match ec {
|
||||
// Data abort from lower level
|
||||
0b100100 => {
|
||||
log_print_raw!(LogLevel::Fatal, "Exception kind: Data Abort from EL0\n");
|
||||
let dfsc = iss & 0x3F;
|
||||
|
||||
if iss & (1 << 24) != 0 {
|
||||
let access_size_str = match (iss >> 22) & 0x3 {
|
||||
0 => "i8",
|
||||
1 => "i16",
|
||||
2 => "i32",
|
||||
3 => "i64",
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let access_type_str = if iss & (1 << 6) != 0 { "write" } else { "read" };
|
||||
|
||||
log_print_raw!(
|
||||
LogLevel::Fatal,
|
||||
"Invalid {} of a {} to/from {:#x}\n",
|
||||
access_type_str,
|
||||
access_size_str,
|
||||
FAR_EL1.get()
|
||||
);
|
||||
}
|
||||
|
||||
log_print_raw!(LogLevel::Fatal, "DFSC = {:#x}\n", dfsc);
|
||||
}
|
||||
// Instruction abort from lower level
|
||||
0b100000 => {
|
||||
log_print_raw!(
|
||||
LogLevel::Fatal,
|
||||
"Exception kind: Instruction Abort from EL0\n"
|
||||
);
|
||||
let ifsc = iss & 0x3F;
|
||||
log_print_raw!(LogLevel::Fatal, "IFSC = {:#x}\n", ifsc);
|
||||
}
|
||||
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
extern "C" fn __aa64_exc_sync_handler(frame: *mut ExceptionFrame) {
|
||||
let frame = unsafe { &mut *frame };
|
||||
|
||||
let esr_el1 = ESR_EL1.get();
|
||||
let ec = (esr_el1 >> 26) & 0x3F;
|
||||
|
||||
match ec {
|
||||
// SVC in AArch64
|
||||
0b010101 => {
|
||||
let func = frame.r[8];
|
||||
let args = &frame.r[0..6];
|
||||
let result = raw_syscall_handler(func, args);
|
||||
frame.r[0] = result;
|
||||
}
|
||||
// BRK in AArch64
|
||||
0b111100 => {
|
||||
Process::current().exit(1);
|
||||
panic!("Cannot return here");
|
||||
}
|
||||
_ => {
|
||||
let iss = esr_el1 & 0x1FFFFFF;
|
||||
dump_irrecoverable_exception(frame, ec, iss);
|
||||
|
||||
panic!("Irrecoverable exception");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
extern "C" fn __aa64_exc_irq_handler(_frame: *mut ExceptionFrame) {
|
||||
unsafe {
|
||||
let ic = IrqContext::new();
|
||||
PLATFORM.interrupt_controller().handle_pending_irqs(&ic);
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
extern "C" fn __aa64_exc_fiq_handler() {
|
||||
todo!();
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
extern "C" fn __aa64_exc_serror_handler() {
|
||||
todo!();
|
||||
}
|
||||
|
||||
pub(super) fn ipi_handler(msg: Option<CpuMessage>) {
|
||||
if let Some(msg) = msg {
|
||||
match msg {
|
||||
CpuMessage::Panic => panic_secondary(),
|
||||
}
|
||||
} else {
|
||||
warnln!("Spurious IPI received by cpu{}", Cpu::local_id());
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
|
||||
global_asm!(include_str!("vectors.S"));
|
60
src/arch/aarch64/gic/gicc.rs
Normal file
60
src/arch/aarch64/gic/gicc.rs
Normal file
@ -0,0 +1,60 @@
|
||||
//! ARM GICv2 CPU registers
|
||||
use tock_registers::{
|
||||
interfaces::{Readable, Writeable},
|
||||
register_bitfields, register_structs,
|
||||
registers::ReadWrite,
|
||||
};
|
||||
|
||||
use crate::{device::interrupt::IrqContext, mem::device::DeviceMemoryIo};
|
||||
|
||||
register_bitfields! {
|
||||
u32,
|
||||
CTLR [
|
||||
Enable OFFSET(0) NUMBITS(1) []
|
||||
],
|
||||
PMR [
|
||||
Priority OFFSET(0) NUMBITS(8) []
|
||||
],
|
||||
IAR [
|
||||
InterruptID OFFSET(0) NUMBITS(10) []
|
||||
],
|
||||
EOIR [
|
||||
EOINTID OFFSET(0) NUMBITS(10) []
|
||||
]
|
||||
}
|
||||
|
||||
register_structs! {
|
||||
#[allow(non_snake_case)]
|
||||
pub(super) GiccRegs {
|
||||
(0x00 => CTLR: ReadWrite<u32, CTLR::Register>),
|
||||
(0x04 => PMR: ReadWrite<u32, PMR::Register>),
|
||||
(0x08 => _0),
|
||||
(0x0C => IAR: ReadWrite<u32, IAR::Register>),
|
||||
(0x10 => EOIR: ReadWrite<u32, EOIR::Register>),
|
||||
(0x14 => @END),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) struct Gicc {
|
||||
regs: DeviceMemoryIo<GiccRegs>,
|
||||
}
|
||||
|
||||
impl Gicc {
|
||||
pub const fn new(regs: DeviceMemoryIo<GiccRegs>) -> Self {
|
||||
Self { regs }
|
||||
}
|
||||
|
||||
pub unsafe fn init(&self) {
|
||||
debugln!("Enabling GICv2 GICC");
|
||||
self.regs.CTLR.write(CTLR::Enable::SET);
|
||||
self.regs.PMR.write(PMR::Priority.val(0xFF));
|
||||
}
|
||||
|
||||
pub fn pending_irq_number<'irq>(&'irq self, _ic: &IrqContext<'irq>) -> usize {
|
||||
self.regs.IAR.read(IAR::InterruptID) as usize
|
||||
}
|
||||
|
||||
pub fn clear_irq<'irq>(&'irq self, irq: usize, _ic: &IrqContext<'irq>) {
|
||||
self.regs.EOIR.write(EOIR::EOINTID.val(irq as u32));
|
||||
}
|
||||
}
|
161
src/arch/aarch64/gic/gicd.rs
Normal file
161
src/arch/aarch64/gic/gicd.rs
Normal file
@ -0,0 +1,161 @@
|
||||
//! ARM GICv2 Distributor registers
|
||||
use spinning_top::Spinlock;
|
||||
use tock_registers::{
|
||||
interfaces::{ReadWriteable, Readable, Writeable},
|
||||
register_bitfields, register_structs,
|
||||
registers::{ReadOnly, ReadWrite, WriteOnly},
|
||||
};
|
||||
|
||||
use crate::{device::interrupt::IpiDeliveryTarget, mem::device::DeviceMemoryIo};
|
||||
|
||||
use super::IrqNumber;
|
||||
|
||||
register_bitfields! {
|
||||
u32,
|
||||
CTLR [
|
||||
Enable OFFSET(0) NUMBITS(1) []
|
||||
],
|
||||
TYPER [
|
||||
ITLinesNumber OFFSET(0) NUMBITS(5) []
|
||||
],
|
||||
ITARGETSR [
|
||||
Offset3 OFFSET(24) NUMBITS(8) [],
|
||||
Offset2 OFFSET(16) NUMBITS(8) [],
|
||||
Offset1 OFFSET(8) NUMBITS(8) [],
|
||||
Offset0 OFFSET(0) NUMBITS(8) []
|
||||
],
|
||||
SGIR [
|
||||
TargetListFilter OFFSET(24) NUMBITS(2) [
|
||||
SpecifiedOnly = 0,
|
||||
AllExceptLocal = 1,
|
||||
LocalOnly = 2,
|
||||
],
|
||||
CPUTargetList OFFSET(16) NUMBITS(8) [],
|
||||
INTID OFFSET(0) NUMBITS(4) []
|
||||
],
|
||||
}
|
||||
|
||||
register_structs! {
|
||||
#[allow(non_snake_case)]
|
||||
pub(super) GicdSharedRegs {
|
||||
(0x000 => CTLR: ReadWrite<u32, CTLR::Register>),
|
||||
(0x004 => TYPER: ReadWrite<u32, TYPER::Register>),
|
||||
(0x008 => _0),
|
||||
(0x104 => ISENABLER: [ReadWrite<u32>; 31]),
|
||||
(0x180 => _1),
|
||||
(0x820 => ITARGETSR: [ReadWrite<u32, ITARGETSR::Register>; 248]),
|
||||
(0xC00 => _2),
|
||||
(0xC08 => ICFGR: [ReadWrite<u32>; 62]),
|
||||
(0xD00 => _3),
|
||||
(0xF00 => SGIR: WriteOnly<u32, SGIR::Register>),
|
||||
(0xF04 => @END),
|
||||
}
|
||||
}
|
||||
|
||||
register_structs! {
|
||||
#[allow(non_snake_case)]
|
||||
pub(super) GicdBankedRegs {
|
||||
(0x000 => _0),
|
||||
(0x100 => ISENABLER: ReadWrite<u32>),
|
||||
(0x104 => _1),
|
||||
(0x800 => ITARGETSR: [ReadOnly<u32, ITARGETSR::Register>; 8]),
|
||||
(0x820 => _2),
|
||||
(0xC00 => ICFGR: [ReadWrite<u32>; 2]),
|
||||
(0xC08 => @END),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) struct Gicd {
|
||||
shared_regs: Spinlock<DeviceMemoryIo<GicdSharedRegs>>,
|
||||
banked_regs: DeviceMemoryIo<GicdBankedRegs>,
|
||||
}
|
||||
|
||||
impl GicdSharedRegs {
|
||||
#[inline(always)]
|
||||
fn num_irqs(&self) -> usize {
|
||||
((self.TYPER.read(TYPER::ITLinesNumber) as usize) + 1) * 32
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn itargets_slice(&self) -> &[ReadWrite<u32, ITARGETSR::Register>] {
|
||||
assert!(self.num_irqs() >= 36);
|
||||
let itargetsr_max_index = ((self.num_irqs() - 32) >> 2) - 1;
|
||||
&self.ITARGETSR[0..itargetsr_max_index]
|
||||
}
|
||||
}
|
||||
|
||||
impl Gicd {
|
||||
pub const fn new(
|
||||
shared_regs: DeviceMemoryIo<GicdSharedRegs>,
|
||||
banked_regs: DeviceMemoryIo<GicdBankedRegs>,
|
||||
) -> Self {
|
||||
let shared_regs = Spinlock::new(shared_regs);
|
||||
Self {
|
||||
shared_regs,
|
||||
banked_regs,
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn set_sgir(&self, target: IpiDeliveryTarget, interrupt_id: u64) {
|
||||
assert_eq!(interrupt_id & !0xF, 0);
|
||||
let value = match target {
|
||||
IpiDeliveryTarget::AllExceptLocal => SGIR::TargetListFilter::AllExceptLocal,
|
||||
IpiDeliveryTarget::Specified(_mask) => {
|
||||
// TODO: need to handle self-ipi case, releasing the lock somehow
|
||||
todo!();
|
||||
}
|
||||
} + SGIR::INTID.val(interrupt_id as u32);
|
||||
|
||||
self.shared_regs.lock().SGIR.write(value);
|
||||
}
|
||||
|
||||
fn local_gic_target_mask(&self) -> u32 {
|
||||
self.banked_regs.ITARGETSR[0].read(ITARGETSR::Offset0)
|
||||
}
|
||||
|
||||
fn enable_irq_inner(&self, irq: usize) {
|
||||
let reg = irq >> 5;
|
||||
let bit = 1u32 << (irq & 0x1F);
|
||||
|
||||
match reg {
|
||||
// Private IRQs
|
||||
0 => {
|
||||
let reg = &self.banked_regs.ISENABLER;
|
||||
|
||||
reg.set(reg.get() | bit);
|
||||
}
|
||||
// Shared IRQs
|
||||
_ => {
|
||||
let regs = self.shared_regs.lock();
|
||||
let reg = ®s.ISENABLER[reg - 1];
|
||||
|
||||
reg.set(reg.get() | bit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enable_irq(&self, irq: IrqNumber) {
|
||||
let irq = irq.get();
|
||||
|
||||
self.enable_irq_inner(irq);
|
||||
}
|
||||
|
||||
pub unsafe fn init(&self) {
|
||||
let mask = self.local_gic_target_mask();
|
||||
let regs = self.shared_regs.lock();
|
||||
|
||||
debugln!("Enabling GICv2 GICD, max IRQ number: {}", regs.num_irqs());
|
||||
|
||||
regs.CTLR.modify(CTLR::Enable::SET);
|
||||
|
||||
for reg in regs.itargets_slice().iter() {
|
||||
// Redirect all IRQs to cpu0 (this CPU)
|
||||
reg.write(
|
||||
ITARGETSR::Offset0.val(mask)
|
||||
+ ITARGETSR::Offset1.val(mask)
|
||||
+ ITARGETSR::Offset2.val(mask)
|
||||
+ ITARGETSR::Offset3.val(mask),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
187
src/arch/aarch64/gic/mod.rs
Normal file
187
src/arch/aarch64/gic/mod.rs
Normal file
@ -0,0 +1,187 @@
|
||||
//! ARM Generic Interrupt Controller v2 driver
|
||||
use core::sync::atomic::Ordering;
|
||||
|
||||
use aarch64_cpu::asm::barrier;
|
||||
use abi::error::Error;
|
||||
use spinning_top::Spinlock;
|
||||
|
||||
use crate::{
|
||||
arch::CpuMessage,
|
||||
device::{
|
||||
interrupt::{InterruptController, InterruptSource, IpiDeliveryTarget},
|
||||
Device,
|
||||
},
|
||||
mem::device::{DeviceMemory, DeviceMemoryIo},
|
||||
util::OneTimeInit,
|
||||
};
|
||||
|
||||
use self::{gicc::Gicc, gicd::Gicd};
|
||||
|
||||
use super::{cpu::Cpu, exception::ipi_handler, smp::CPU_COUNT};
|
||||
|
||||
const MAX_IRQ: usize = 300;
|
||||
const IPI_VECTOR: u64 = 1;
|
||||
|
||||
pub mod gicc;
|
||||
pub mod gicd;
|
||||
|
||||
/// Wrapper type for ARM interrupt vector
|
||||
#[repr(transparent)]
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct IrqNumber(usize);
|
||||
|
||||
/// ARM Generic Interrupt Controller v2
|
||||
pub struct Gic {
|
||||
gicc: OneTimeInit<Gicc>,
|
||||
gicd: OneTimeInit<Gicd>,
|
||||
gicd_base: usize,
|
||||
gicc_base: usize,
|
||||
irq_table: Spinlock<[Option<&'static (dyn InterruptSource + Sync)>; MAX_IRQ]>,
|
||||
}
|
||||
|
||||
impl IrqNumber {
|
||||
/// Returns the underlying vector number
|
||||
#[inline(always)]
|
||||
pub const fn get(self) -> usize {
|
||||
self.0
|
||||
}
|
||||
|
||||
/// Wraps the interrupt vector value in the [IrqNumber] type.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Will panic if `v` is not a valid interrupt number.
|
||||
#[inline(always)]
|
||||
pub const fn new(v: usize) -> Self {
|
||||
assert!(v < MAX_IRQ);
|
||||
Self(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl Device for Gic {
|
||||
fn name(&self) -> &'static str {
|
||||
"ARM Generic Interrupt Controller v2"
|
||||
}
|
||||
|
||||
unsafe fn init(&self) -> Result<(), Error> {
|
||||
let gicd_mmio = DeviceMemory::map("GICv2 Distributor registers", self.gicd_base, 0x1000)?;
|
||||
let gicd_mmio_shared = DeviceMemoryIo::new(gicd_mmio.clone());
|
||||
let gicd_mmio_banked = DeviceMemoryIo::new(gicd_mmio);
|
||||
let gicc_mmio = DeviceMemoryIo::map("GICv2 CPU registers", self.gicc_base)?;
|
||||
|
||||
let gicd = Gicd::new(gicd_mmio_shared, gicd_mmio_banked);
|
||||
let gicc = Gicc::new(gicc_mmio);
|
||||
|
||||
gicd.init();
|
||||
gicc.init();
|
||||
|
||||
self.gicd.init(gicd);
|
||||
self.gicc.init(gicc);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl InterruptController for Gic {
|
||||
type IrqNumber = IrqNumber;
|
||||
|
||||
fn enable_irq(&self, irq: Self::IrqNumber) -> Result<(), Error> {
|
||||
self.gicd.get().enable_irq(irq);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_pending_irqs<'irq>(&'irq self, ic: &crate::device::interrupt::IrqContext<'irq>) {
|
||||
let gicc = self.gicc.get();
|
||||
let irq_number = gicc.pending_irq_number(ic);
|
||||
if irq_number >= MAX_IRQ {
|
||||
return;
|
||||
}
|
||||
|
||||
gicc.clear_irq(irq_number, ic);
|
||||
|
||||
if irq_number as u64 == IPI_VECTOR {
|
||||
// IPI received
|
||||
let msg = Cpu::local().get_ipi();
|
||||
ipi_handler(msg);
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
let table = self.irq_table.lock();
|
||||
match table[irq_number] {
|
||||
None => panic!("No IRQ handler registered for irq{}", irq_number),
|
||||
Some(handler) => {
|
||||
drop(table);
|
||||
handler.handle_irq().expect("IRQ handler failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn register_handler(
|
||||
&self,
|
||||
irq: Self::IrqNumber,
|
||||
handler: &'static (dyn InterruptSource + Sync),
|
||||
) -> Result<(), Error> {
|
||||
let mut table = self.irq_table.lock();
|
||||
let irq = irq.get();
|
||||
if table[irq].is_some() {
|
||||
return Err(Error::AlreadyExists);
|
||||
}
|
||||
|
||||
debugln!("Bound irq{} to {:?}", irq, Device::name(handler));
|
||||
table[irq] = Some(handler);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
unsafe fn send_ipi(&self, target: IpiDeliveryTarget, msg: CpuMessage) -> Result<(), Error> {
|
||||
// TODO message queue insertion should be moved
|
||||
match target {
|
||||
IpiDeliveryTarget::AllExceptLocal => {
|
||||
let local = Cpu::local_id();
|
||||
for i in 0..CPU_COUNT.load(Ordering::Acquire) {
|
||||
if i != local as usize {
|
||||
Cpu::push_ipi_queue(i as u32, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
IpiDeliveryTarget::Specified(_) => todo!(),
|
||||
}
|
||||
|
||||
// Issue a memory barrier
|
||||
barrier::dsb(barrier::ISH);
|
||||
barrier::isb(barrier::SY);
|
||||
|
||||
self.gicd.get().set_sgir(target, IPI_VECTOR);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Gic {
|
||||
/// Constructs an instance of GICv2.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure the addresses actually point to the GIC components.
|
||||
pub const unsafe fn new(gicd_base: usize, gicc_base: usize) -> Self {
|
||||
Self {
|
||||
gicc: OneTimeInit::new(),
|
||||
gicd: OneTimeInit::new(),
|
||||
gicd_base,
|
||||
gicc_base,
|
||||
irq_table: Spinlock::new([None; MAX_IRQ]),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initializes GICv2 for an application processor.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Must not be called more than once per each AP. Must not be called from BSP.
|
||||
pub unsafe fn init_smp_ap(&self) -> Result<(), Error> {
|
||||
self.gicc.get().init();
|
||||
Ok(())
|
||||
}
|
||||
}
|
13
src/arch/aarch64/intrinsics.rs
Normal file
13
src/arch/aarch64/intrinsics.rs
Normal file
@ -0,0 +1,13 @@
|
||||
//! Intrinsic helper functions for AArch64 platforms
|
||||
|
||||
/// Returns an absolute address to the given symbol
|
||||
#[macro_export]
|
||||
macro_rules! absolute_address {
|
||||
($sym:expr) => {{
|
||||
let mut _x: usize;
|
||||
unsafe {
|
||||
core::arch::asm!("ldr {0}, ={1}", out(reg) _x, sym $sym);
|
||||
}
|
||||
_x
|
||||
}};
|
||||
}
|
204
src/arch/aarch64/mod.rs
Normal file
204
src/arch/aarch64/mod.rs
Normal file
@ -0,0 +1,204 @@
|
||||
//! AArch64 architecture and platforms implementation
|
||||
|
||||
use core::sync::atomic::Ordering;
|
||||
|
||||
use aarch64_cpu::registers::{DAIF, ID_AA64MMFR0_EL1, SCTLR_EL1, TCR_EL1, TTBR0_EL1, TTBR1_EL1};
|
||||
use abi::error::Error;
|
||||
use plat_qemu::PLATFORM;
|
||||
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
|
||||
|
||||
use crate::{
|
||||
absolute_address,
|
||||
arch::{
|
||||
aarch64::{boot::CPU_INIT_FENCE, cpu::Cpu, devtree::FdtMemoryRegionIter, smp::CPU_COUNT},
|
||||
Architecture,
|
||||
},
|
||||
debug,
|
||||
device::platform::Platform,
|
||||
fs::devfs,
|
||||
mem::{
|
||||
heap,
|
||||
phys::{self, reserved::reserve_region, PageUsage, PhysicalMemoryRegion},
|
||||
ConvertAddress,
|
||||
},
|
||||
task,
|
||||
util::OneTimeInit,
|
||||
};
|
||||
|
||||
use self::{
|
||||
devtree::DeviceTree,
|
||||
table::{init_fixed_tables, KERNEL_TABLES},
|
||||
};
|
||||
|
||||
pub mod intrinsics;
|
||||
|
||||
pub mod plat_qemu;
|
||||
|
||||
pub mod boot;
|
||||
pub mod context;
|
||||
pub mod cpu;
|
||||
pub mod devtree;
|
||||
pub mod exception;
|
||||
pub mod gic;
|
||||
pub mod smp;
|
||||
pub mod table;
|
||||
pub mod timer;
|
||||
|
||||
pub(self) const BOOT_STACK_SIZE: usize = 65536;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
#[repr(C, align(0x20))]
|
||||
pub(self) struct KernelStack {
|
||||
data: [u8; BOOT_STACK_SIZE],
|
||||
}
|
||||
|
||||
/// AArch64 platform interface
|
||||
pub struct AArch64 {
|
||||
dt: OneTimeInit<DeviceTree<'static>>,
|
||||
}
|
||||
|
||||
/// Global platform handle
|
||||
pub static ARCHITECTURE: AArch64 = AArch64 {
|
||||
dt: OneTimeInit::new(),
|
||||
};
|
||||
|
||||
impl Architecture for AArch64 {
|
||||
const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000;
|
||||
|
||||
unsafe fn init_mmu(&self, bsp: bool) {
|
||||
if bsp {
|
||||
init_fixed_tables();
|
||||
}
|
||||
|
||||
let tables_phys = absolute_address!(KERNEL_TABLES).physicalize() as u64;
|
||||
|
||||
if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran4::Supported) {
|
||||
todo!();
|
||||
}
|
||||
|
||||
TCR_EL1.modify(
|
||||
// General
|
||||
TCR_EL1::IPS::Bits_48 +
|
||||
// TTBR0
|
||||
TCR_EL1::TG0::KiB_4 + TCR_EL1::T0SZ.val(25) + TCR_EL1::SH0::Inner +
|
||||
// TTBR1
|
||||
TCR_EL1::TG1::KiB_4 + TCR_EL1::T1SZ.val(25) + TCR_EL1::SH1::Outer,
|
||||
);
|
||||
|
||||
TTBR0_EL1.set_baddr(tables_phys);
|
||||
TTBR1_EL1.set_baddr(tables_phys);
|
||||
|
||||
SCTLR_EL1.modify(SCTLR_EL1::M::Enable);
|
||||
}
|
||||
|
||||
fn map_device_pages(&self, phys: usize, count: usize) -> Result<usize, Error> {
|
||||
unsafe { KERNEL_TABLES.map_device_pages(phys, count) }
|
||||
}
|
||||
|
||||
fn wait_for_interrupt() {
|
||||
aarch64_cpu::asm::wfi();
|
||||
}
|
||||
|
||||
unsafe fn set_interrupt_mask(mask: bool) {
|
||||
if mask {
|
||||
DAIF.modify(DAIF::I::SET);
|
||||
} else {
|
||||
DAIF.modify(DAIF::I::CLEAR);
|
||||
}
|
||||
}
|
||||
|
||||
fn interrupt_mask() -> bool {
|
||||
DAIF.read(DAIF::I) != 0
|
||||
}
|
||||
}
|
||||
|
||||
impl AArch64 {
|
||||
/// Initializes the architecture's device tree
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Only makes sense to call during the early initialization, once.
|
||||
pub unsafe fn init_device_tree(&self, dtb_phys: usize) {
|
||||
let dt = DeviceTree::from_addr(dtb_phys.virtualize());
|
||||
self.dt.init(dt);
|
||||
}
|
||||
|
||||
/// Returns the device tree
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Will panic if the device tree has not yet been initialized
|
||||
pub fn device_tree(&self) -> &DeviceTree {
|
||||
self.dt.get()
|
||||
}
|
||||
|
||||
unsafe fn init_physical_memory(&self, dtb_phys: usize) -> Result<(), Error> {
|
||||
let dt = self.device_tree();
|
||||
|
||||
reserve_region(
|
||||
"dtb",
|
||||
PhysicalMemoryRegion {
|
||||
base: dtb_phys,
|
||||
size: dt.size(),
|
||||
},
|
||||
);
|
||||
|
||||
let regions = FdtMemoryRegionIter::new(dt);
|
||||
phys::init_from_iter(regions)
|
||||
}
|
||||
}
|
||||
|
||||
/// AArch64 kernel main entry point
|
||||
pub fn kernel_main(dtb_phys: usize) -> ! {
|
||||
// NOTE it is critical that the code does not panic until the debug is set up, otherwise no
|
||||
// message will be displayed
|
||||
|
||||
// Unmap TTBR0
|
||||
TTBR0_EL1.set(0);
|
||||
|
||||
unsafe {
|
||||
AArch64::set_interrupt_mask(true);
|
||||
|
||||
ARCHITECTURE.init_device_tree(dtb_phys);
|
||||
PLATFORM.init_primary_serial();
|
||||
}
|
||||
// Setup debugging functions
|
||||
debug::init();
|
||||
|
||||
exception::init_exceptions();
|
||||
|
||||
debugln!("Initializing {} platform", PLATFORM.name());
|
||||
unsafe {
|
||||
ARCHITECTURE
|
||||
.init_physical_memory(dtb_phys)
|
||||
.expect("Failed to initialize the physical memory manager");
|
||||
|
||||
// Setup heap
|
||||
let heap_base = phys::alloc_pages_contiguous(16, PageUsage::Used)
|
||||
.expect("Could not allocate a block for heap");
|
||||
heap::init_heap(heap_base.virtualize(), 16 * 0x1000);
|
||||
|
||||
Cpu::init_local();
|
||||
|
||||
devfs::init();
|
||||
PLATFORM.init(true).unwrap();
|
||||
|
||||
let dt = ARCHITECTURE.dt.get();
|
||||
if let Err(e) = smp::start_ap_cores(dt) {
|
||||
errorln!(
|
||||
"Could not initialize AP CPUs: {:?}. Will continue with one CPU.",
|
||||
e
|
||||
);
|
||||
}
|
||||
|
||||
Cpu::init_ipi_queues();
|
||||
|
||||
CPU_INIT_FENCE.signal();
|
||||
CPU_INIT_FENCE.wait_all(CPU_COUNT.load(Ordering::Acquire));
|
||||
|
||||
task::init().expect("Failed to initialize the scheduler");
|
||||
|
||||
// Initialize and enter the scheduler
|
||||
task::enter();
|
||||
}
|
||||
}
|
83
src/arch/aarch64/plat_qemu/mod.rs
Normal file
83
src/arch/aarch64/plat_qemu/mod.rs
Normal file
@ -0,0 +1,83 @@
|
||||
//! Qemu's "virt" platform implementation for AArch64
|
||||
use aarch64_cpu::registers::{CNTP_CTL_EL0, CNTP_TVAL_EL0};
|
||||
use abi::error::Error;
|
||||
use tock_registers::interfaces::Writeable;
|
||||
|
||||
use crate::{
|
||||
device::{
|
||||
interrupt::{InterruptController, InterruptSource},
|
||||
platform::Platform,
|
||||
serial::{pl011::Pl011, SerialDevice},
|
||||
timer::TimestampSource,
|
||||
Device,
|
||||
},
|
||||
fs::devfs::{self, CharDeviceType},
|
||||
};
|
||||
|
||||
use super::{
|
||||
gic::{Gic, IrqNumber},
|
||||
timer::ArmTimer,
|
||||
};
|
||||
|
||||
/// AArch64 "virt" platform implementation
|
||||
pub struct QemuPlatform {
|
||||
gic: Gic,
|
||||
pl011: Pl011,
|
||||
local_timer: ArmTimer,
|
||||
}
|
||||
|
||||
impl Platform for QemuPlatform {
|
||||
type IrqNumber = IrqNumber;
|
||||
|
||||
const KERNEL_PHYS_BASE: usize = 0x40080000;
|
||||
|
||||
unsafe fn init(&'static self, is_bsp: bool) -> Result<(), Error> {
|
||||
if is_bsp {
|
||||
self.gic.init()?;
|
||||
|
||||
self.pl011.init_irq()?;
|
||||
devfs::add_char_device(&self.pl011, CharDeviceType::TtySerial)?;
|
||||
|
||||
self.local_timer.init()?;
|
||||
self.local_timer.init_irq()?;
|
||||
} else {
|
||||
self.gic.init_smp_ap()?;
|
||||
|
||||
// TODO somehow merge this with the rest of the code
|
||||
CNTP_CTL_EL0.write(CNTP_CTL_EL0::ENABLE::SET + CNTP_CTL_EL0::IMASK::CLEAR);
|
||||
CNTP_TVAL_EL0.set(10000000);
|
||||
self.gic.enable_irq(IrqNumber::new(30))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
unsafe fn init_primary_serial(&self) {
|
||||
self.pl011.init().ok();
|
||||
}
|
||||
|
||||
fn name(&self) -> &'static str {
|
||||
"qemu"
|
||||
}
|
||||
|
||||
fn primary_serial(&self) -> Option<&dyn SerialDevice> {
|
||||
Some(&self.pl011)
|
||||
}
|
||||
|
||||
fn interrupt_controller(&self) -> &dyn InterruptController<IrqNumber = Self::IrqNumber> {
|
||||
&self.gic
|
||||
}
|
||||
|
||||
fn timestamp_source(&self) -> &dyn TimestampSource {
|
||||
&self.local_timer
|
||||
}
|
||||
}
|
||||
|
||||
/// AArch64 "virt" platform
|
||||
pub static PLATFORM: QemuPlatform = unsafe {
|
||||
QemuPlatform {
|
||||
pl011: Pl011::new(0x09000000, IrqNumber::new(33)),
|
||||
gic: Gic::new(0x08000000, 0x08010000),
|
||||
local_timer: ArmTimer::new(IrqNumber::new(30)),
|
||||
}
|
||||
};
|
131
src/arch/aarch64/smp.rs
Normal file
131
src/arch/aarch64/smp.rs
Normal file
@ -0,0 +1,131 @@
|
||||
//! Simultaneous multiprocessing support for aarch64
|
||||
use core::{
|
||||
arch::asm,
|
||||
sync::atomic::{AtomicUsize, Ordering},
|
||||
};
|
||||
|
||||
use abi::error::Error;
|
||||
use fdt_rs::prelude::PropReader;
|
||||
|
||||
use crate::{
|
||||
absolute_address,
|
||||
arch::aarch64::boot::__aarch64_ap_lower_entry,
|
||||
mem::{
|
||||
phys::{self, PageUsage},
|
||||
ConvertAddress, KERNEL_VIRT_OFFSET,
|
||||
},
|
||||
};
|
||||
|
||||
use super::devtree::{self, DeviceTree};
|
||||
|
||||
/// ARM Power State Coordination Interface
|
||||
pub struct Psci {}
|
||||
|
||||
/// Number of online CPUs, initially set to 1 (BSP processor is up)
|
||||
pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(1);
|
||||
|
||||
impl Psci {
|
||||
/// Function ID for CPU startup request
|
||||
const CPU_ON: u32 = 0xC4000003;
|
||||
|
||||
/// Constructs an interface instance for PSCI
|
||||
pub const fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn call(&self, mut x0: u64, x1: u64, x2: u64, x3: u64) -> u64 {
|
||||
asm!("hvc #0", inout("x0") x0, in("x1") x1, in("x2") x2, in("x3") x3);
|
||||
x0
|
||||
}
|
||||
|
||||
/// Enables a single processor through a hvc/svc call.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Calling this outside of initialization sequence or more than once may lead to unexpected
|
||||
/// behavior.
|
||||
pub unsafe fn cpu_on(&self, target_cpu: usize, entry_point_address: usize, context_id: usize) {
|
||||
self.call(
|
||||
Self::CPU_ON as _,
|
||||
target_cpu as _,
|
||||
entry_point_address as _,
|
||||
context_id as _,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Starts application processors using the method specified in the device tree.
|
||||
///
|
||||
/// TODO: currently does not handle systems where APs are already started before entry.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure the physical memory manager was initialized, virtual memory tables are
|
||||
/// set up and the function has not been called before.
|
||||
pub unsafe fn start_ap_cores(dt: &DeviceTree) -> Result<(), Error> {
|
||||
let cpus = dt.node_by_path("/cpus").unwrap();
|
||||
let psci = Psci::new();
|
||||
|
||||
for cpu in cpus.children() {
|
||||
let Some(compatible) = devtree::find_prop(&cpu, "compatible") else {
|
||||
continue;
|
||||
};
|
||||
let Ok(compatible) = compatible.str() else {
|
||||
continue;
|
||||
};
|
||||
if !compatible.starts_with("arm,cortex-a") {
|
||||
continue;
|
||||
}
|
||||
|
||||
let reg = devtree::find_prop(&cpu, "reg").unwrap().u32(0).unwrap();
|
||||
if reg == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
debugln!(
|
||||
"Will start {}, compatible={:?}, reg={}",
|
||||
cpu.name().unwrap(),
|
||||
compatible,
|
||||
reg
|
||||
);
|
||||
|
||||
const AP_STACK_PAGES: usize = 4;
|
||||
let stack_pages = phys::alloc_pages_contiguous(AP_STACK_PAGES, PageUsage::Used)?;
|
||||
debugln!(
|
||||
"{} stack: {:#x}..{:#x}",
|
||||
cpu.name().unwrap(),
|
||||
stack_pages,
|
||||
stack_pages + AP_STACK_PAGES * 0x1000
|
||||
);
|
||||
// Wait for the CPU to come up
|
||||
let old_count = CPU_COUNT.load(Ordering::Acquire);
|
||||
|
||||
psci.cpu_on(
|
||||
reg as usize,
|
||||
absolute_address!(__aarch64_ap_entry).physicalize(),
|
||||
stack_pages + AP_STACK_PAGES * 0x1000,
|
||||
);
|
||||
|
||||
while CPU_COUNT.load(Ordering::Acquire) == old_count {
|
||||
aarch64_cpu::asm::wfe();
|
||||
}
|
||||
|
||||
debugln!("{} is up", cpu.name().unwrap());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[naked]
|
||||
unsafe extern "C" fn __aarch64_ap_entry() -> ! {
|
||||
asm!(
|
||||
r#"
|
||||
mov sp, x0
|
||||
bl {entry} - {kernel_virt_offset}
|
||||
"#,
|
||||
entry = sym __aarch64_ap_lower_entry,
|
||||
kernel_virt_offset = const KERNEL_VIRT_OFFSET,
|
||||
options(noreturn)
|
||||
);
|
||||
}
|
451
src/arch/aarch64/table.rs
Normal file
451
src/arch/aarch64/table.rs
Normal file
@ -0,0 +1,451 @@
|
||||
//! AArch64 virtual memory management facilities
|
||||
use core::{
|
||||
marker::PhantomData,
|
||||
ops::{Index, IndexMut},
|
||||
sync::atomic::{AtomicU8, Ordering},
|
||||
};
|
||||
|
||||
use abi::error::Error;
|
||||
use bitflags::bitflags;
|
||||
|
||||
use crate::mem::{
|
||||
phys::{self, PageUsage},
|
||||
table::{EntryLevel, NextPageTable, VirtualMemoryManager},
|
||||
ConvertAddress, KERNEL_VIRT_OFFSET,
|
||||
};
|
||||
|
||||
/// TODO
|
||||
#[derive(Clone)]
|
||||
#[repr(C)]
|
||||
pub struct AddressSpace {
|
||||
l1: *mut PageTable<L1>,
|
||||
asid: u8,
|
||||
}
|
||||
|
||||
/// Page table representing a single level of address translation
|
||||
#[derive(Clone)]
|
||||
#[repr(C, align(0x1000))]
|
||||
pub struct PageTable<L: EntryLevel> {
|
||||
data: [PageEntry<L>; 512],
|
||||
}
|
||||
|
||||
/// Translation level 1: Entry is 1GiB page/table
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct L1;
|
||||
/// Translation level 2: Entry is 2MiB page/table
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct L2;
|
||||
/// Translation level 3: Entry is 4KiB page
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct L3;
|
||||
|
||||
/// Tag trait to mark that the page table level may point to a next-level table
|
||||
pub trait NonTerminalEntryLevel: EntryLevel {
|
||||
/// Tag type of the level this entry level may point to
|
||||
type NextLevel: EntryLevel;
|
||||
}
|
||||
|
||||
impl NonTerminalEntryLevel for L1 {
|
||||
type NextLevel = L2;
|
||||
}
|
||||
impl NonTerminalEntryLevel for L2 {
|
||||
type NextLevel = L3;
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
/// TODO split attrs for different translation levels
|
||||
///
|
||||
/// Describes how each page is mapped: access, presence, type of the mapping.
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct PageAttributes: u64 {
|
||||
/// When set, the mapping is considered valid and assumed to point to a page/table
|
||||
const PRESENT = 1 << 0;
|
||||
|
||||
/// For L1/L2 mappings, indicates that the mapping points to the next-level translation
|
||||
/// table
|
||||
const TABLE = 1 << 1;
|
||||
/// (Must be set) For L3 mappings, indicates that the mapping points to a page
|
||||
const PAGE = 1 << 1;
|
||||
/// For L1/L2 mappings, indicates that the mapping points to a page of given level's size
|
||||
const BLOCK = 0 << 1;
|
||||
|
||||
/// (Must be set) For page/block mappings, indicates to the hardware that the page is
|
||||
/// accessed
|
||||
const ACCESS = 1 << 10;
|
||||
|
||||
/// For page/block mappings, allows both user and kernel code to read/write to the page
|
||||
const AP_BOTH_READWRITE = 1 << 6;
|
||||
/// For page/block mappings, only allows read access for EL0/EL1
|
||||
const AP_BOTH_READONLY = 3 << 6;
|
||||
}
|
||||
}
|
||||
|
||||
impl const EntryLevel for L1 {
|
||||
fn index(addr: usize) -> usize {
|
||||
(addr >> 30) & 0x1FF
|
||||
}
|
||||
|
||||
fn page_offset(addr: usize) -> usize {
|
||||
addr & 0x3FFFFFFF
|
||||
}
|
||||
}
|
||||
impl const EntryLevel for L2 {
|
||||
fn index(addr: usize) -> usize {
|
||||
(addr >> 21) & 0x1FF
|
||||
}
|
||||
|
||||
fn page_offset(addr: usize) -> usize {
|
||||
addr & 0x1FFFFF
|
||||
}
|
||||
}
|
||||
impl const EntryLevel for L3 {
|
||||
fn index(addr: usize) -> usize {
|
||||
(addr >> 12) & 0x1FF
|
||||
}
|
||||
|
||||
fn page_offset(addr: usize) -> usize {
|
||||
addr & 0xFFF
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a single entry in a translation table
|
||||
#[derive(Clone, Copy)]
|
||||
#[repr(transparent)]
|
||||
pub struct PageEntry<L>(u64, PhantomData<L>);
|
||||
|
||||
/// Fixed-layout kernel-space address mapping tables
|
||||
pub struct FixedTables {
|
||||
l1: PageTable<L1>,
|
||||
device_l2: PageTable<L2>,
|
||||
device_l3: PageTable<L3>,
|
||||
|
||||
device_l3i: usize,
|
||||
}
|
||||
|
||||
impl PageEntry<L3> {
|
||||
/// Creates a 4KiB page mapping
|
||||
pub fn page(phys: usize, attrs: PageAttributes) -> Self {
|
||||
Self(
|
||||
(phys as u64)
|
||||
| (PageAttributes::PAGE | PageAttributes::PRESENT | PageAttributes::ACCESS | attrs)
|
||||
.bits(),
|
||||
PhantomData,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the physical address of the page this entry refers to, returning None if it does
|
||||
/// not
|
||||
pub fn as_page(self) -> Option<usize> {
|
||||
let mask = (PageAttributes::PRESENT | PageAttributes::PAGE).bits();
|
||||
|
||||
if self.0 & mask == mask {
|
||||
Some((self.0 & !0xFFF) as usize)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: NonTerminalEntryLevel> PageEntry<T> {
|
||||
/// Creates a 2MiB page mapping
|
||||
pub fn block(phys: usize, attrs: PageAttributes) -> Self {
|
||||
Self(
|
||||
(phys as u64)
|
||||
| (PageAttributes::BLOCK
|
||||
| PageAttributes::PRESENT
|
||||
| PageAttributes::ACCESS
|
||||
| attrs)
|
||||
.bits(),
|
||||
PhantomData,
|
||||
)
|
||||
}
|
||||
|
||||
/// Creates a mapping pointing to the next-level translation table
|
||||
pub fn table(phys: usize, attrs: PageAttributes) -> Self {
|
||||
Self(
|
||||
(phys as u64) | (PageAttributes::TABLE | PageAttributes::PRESENT | attrs).bits(),
|
||||
PhantomData,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the physical address of the table this entry refers to, returning None if it
|
||||
/// does not
|
||||
pub fn as_table(self) -> Option<usize> {
|
||||
if self.0 & (PageAttributes::TABLE | PageAttributes::PRESENT).bits()
|
||||
== (PageAttributes::TABLE | PageAttributes::PRESENT).bits()
|
||||
{
|
||||
Some((self.0 & !0xFFF) as usize)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: EntryLevel> PageEntry<L> {
|
||||
/// Represents an absent/invalid mapping in the table
|
||||
pub const INVALID: Self = Self(0, PhantomData);
|
||||
|
||||
/// Converts a raw mapping value into this wrapper type
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller is responsible for making sure that `raw` is a valid mapping value for the
|
||||
/// current translation level.
|
||||
pub unsafe fn from_raw(raw: u64) -> Self {
|
||||
Self(raw, PhantomData)
|
||||
}
|
||||
|
||||
/// Returns `true` if the entry refers to some table/block/page
|
||||
pub fn is_present(&self) -> bool {
|
||||
self.0 & PageAttributes::PRESENT.bits() != 0
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: NonTerminalEntryLevel> NextPageTable for PageTable<L> {
|
||||
type NextLevel = PageTable<L::NextLevel>;
|
||||
|
||||
fn get_mut(&mut self, index: usize) -> Option<&'static mut Self::NextLevel> {
|
||||
let entry = self[index];
|
||||
|
||||
entry
|
||||
.as_table()
|
||||
.map(|addr| unsafe { &mut *(addr.virtualize() as *mut Self::NextLevel) })
|
||||
}
|
||||
|
||||
fn get_mut_or_alloc(&mut self, index: usize) -> Result<&'static mut Self::NextLevel, Error> {
|
||||
let entry = self[index];
|
||||
|
||||
if let Some(table) = entry.as_table() {
|
||||
Ok(unsafe { &mut *(table.virtualize() as *mut Self::NextLevel) })
|
||||
} else {
|
||||
let table = PageTable::new_zeroed()?;
|
||||
self[index] = PageEntry::<L>::table(table.physical_address(), PageAttributes::empty());
|
||||
Ok(table)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: EntryLevel> PageTable<L> {
|
||||
/// Constructs a page table with all entries marked as invalid
|
||||
pub const fn zeroed() -> Self {
|
||||
Self {
|
||||
data: [PageEntry::INVALID; 512],
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates a new page table, filling it with non-preset entries
|
||||
pub fn new_zeroed() -> Result<&'static mut Self, Error> {
|
||||
let page = unsafe { phys::alloc_page(PageUsage::Used)?.virtualize() };
|
||||
let table = unsafe { &mut *(page as *mut Self) };
|
||||
for i in 0..512 {
|
||||
table[i] = PageEntry::INVALID;
|
||||
}
|
||||
Ok(table)
|
||||
}
|
||||
|
||||
/// Returns a physical address pointing to this page table
|
||||
pub fn physical_address(&self) -> usize {
|
||||
// &self may already by a physical address
|
||||
let addr = self.data.as_ptr() as usize;
|
||||
if addr < KERNEL_VIRT_OFFSET {
|
||||
addr
|
||||
} else {
|
||||
unsafe { addr.physicalize() }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: EntryLevel> Index<usize> for PageTable<L> {
|
||||
type Output = PageEntry<L>;
|
||||
|
||||
fn index(&self, index: usize) -> &Self::Output {
|
||||
&self.data[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: EntryLevel> IndexMut<usize> for PageTable<L> {
|
||||
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
|
||||
&mut self.data[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl FixedTables {
|
||||
/// Constructs an empty table group
|
||||
pub const fn zeroed() -> Self {
|
||||
Self {
|
||||
l1: PageTable::zeroed(),
|
||||
device_l2: PageTable::zeroed(),
|
||||
device_l3: PageTable::zeroed(),
|
||||
|
||||
device_l3i: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Maps a physical memory region as device memory and returns its allocated base address
|
||||
pub fn map_device_pages(&mut self, phys: usize, count: usize) -> Result<usize, Error> {
|
||||
if count > 512 * 512 {
|
||||
panic!("Unsupported device memory mapping size");
|
||||
} else if count > 512 {
|
||||
// 2MiB mappings
|
||||
todo!();
|
||||
} else {
|
||||
// 4KiB mappings
|
||||
if self.device_l3i + count > 512 {
|
||||
return Err(Error::OutOfMemory);
|
||||
}
|
||||
|
||||
let virt = DEVICE_VIRT_OFFSET + (self.device_l3i << 12);
|
||||
for i in 0..count {
|
||||
self.device_l3[self.device_l3i + i] =
|
||||
PageEntry::page(phys + i * 0x1000, PageAttributes::empty());
|
||||
}
|
||||
self.device_l3i += count;
|
||||
|
||||
tlb_flush_vaae1(virt);
|
||||
|
||||
Ok(virt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VirtualMemoryManager for AddressSpace {
|
||||
fn allocate(
|
||||
&self,
|
||||
hint: Option<usize>,
|
||||
len: usize,
|
||||
attrs: PageAttributes,
|
||||
) -> Result<usize, Error> {
|
||||
if hint.is_some() {
|
||||
todo!();
|
||||
}
|
||||
|
||||
const TRY_ALLOC_START: usize = 0x100000000;
|
||||
const TRY_ALLOC_END: usize = 0xF00000000;
|
||||
|
||||
'l0: for base in (TRY_ALLOC_START..TRY_ALLOC_END - len * 0x1000).step_by(0x1000) {
|
||||
for i in 0..len {
|
||||
if self.translate(base + i * 0x1000).is_some() {
|
||||
continue 'l0;
|
||||
}
|
||||
}
|
||||
|
||||
for i in 0..len {
|
||||
let page = phys::alloc_page(PageUsage::Used)?;
|
||||
self.map_page(base + i * 0x1000, page, attrs)?;
|
||||
}
|
||||
|
||||
return Ok(base);
|
||||
}
|
||||
|
||||
Err(Error::OutOfMemory)
|
||||
}
|
||||
|
||||
fn deallocate(&self, addr: usize, len: usize) -> Result<(), Error> {
|
||||
for page in (addr..addr + len).step_by(0x1000) {
|
||||
let Some(_phys) = self.translate(page) else {
|
||||
todo!();
|
||||
};
|
||||
|
||||
self.write_entry(page, PageEntry::INVALID, true)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl AddressSpace {
|
||||
/// Allocates an empty address space with all entries marked as non-present
|
||||
pub fn new_empty() -> Result<Self, Error> {
|
||||
static LAST_ASID: AtomicU8 = AtomicU8::new(1);
|
||||
|
||||
let asid = LAST_ASID.fetch_add(1, Ordering::AcqRel);
|
||||
|
||||
let l1 = unsafe { phys::alloc_page(PageUsage::Used)?.virtualize() as *mut PageTable<L1> };
|
||||
|
||||
for i in 0..512 {
|
||||
unsafe {
|
||||
(*l1)[i] = PageEntry::INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self { l1, asid })
|
||||
}
|
||||
|
||||
unsafe fn as_mut(&self) -> &'static mut PageTable<L1> {
|
||||
self.l1.as_mut().unwrap()
|
||||
}
|
||||
|
||||
// TODO return page size and attributes
|
||||
/// Returns the physical address to which the `virt` address is mapped
|
||||
pub fn translate(&self, virt: usize) -> Option<usize> {
|
||||
let l1i = L1::index(virt);
|
||||
let l2i = L2::index(virt);
|
||||
let l3i = L3::index(virt);
|
||||
|
||||
let l2 = unsafe { self.as_mut().get_mut(l1i) }?;
|
||||
let l3 = l2.get_mut(l2i)?;
|
||||
|
||||
l3[l3i].as_page()
|
||||
}
|
||||
|
||||
// Write a single 4KiB entry
|
||||
fn write_entry(&self, virt: usize, entry: PageEntry<L3>, overwrite: bool) -> Result<(), Error> {
|
||||
let l1i = L1::index(virt);
|
||||
let l2i = L2::index(virt);
|
||||
let l3i = L3::index(virt);
|
||||
|
||||
let l2 = unsafe { self.as_mut().get_mut_or_alloc(l1i) }?;
|
||||
let l3 = l2.get_mut_or_alloc(l2i)?;
|
||||
|
||||
if l3[l3i].is_present() && !overwrite {
|
||||
todo!()
|
||||
}
|
||||
l3[l3i] = entry;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Inserts a single 4KiB virt -> phys mapping into the address apce
|
||||
pub fn map_page(&self, virt: usize, phys: usize, attrs: PageAttributes) -> Result<(), Error> {
|
||||
self.write_entry(virt, PageEntry::page(phys, attrs), true)
|
||||
}
|
||||
|
||||
/// Returns the physical address of the address space (to be used in a TTBRn_ELx)
|
||||
pub fn physical_address(&self) -> usize {
|
||||
unsafe { (self.l1 as usize).physicalize() | ((self.asid as usize) << 48) }
|
||||
}
|
||||
}
|
||||
|
||||
/// Flushes the virtual address from TLB
|
||||
pub fn tlb_flush_vaae1(page: usize) {
|
||||
assert_eq!(page & 0xFFF, 0);
|
||||
unsafe {
|
||||
core::arch::asm!("tlbi vaae1, {addr}", addr = in(reg) page);
|
||||
}
|
||||
}
|
||||
|
||||
/// Initializes mappings for the kernel and device memory tables.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Only allowed to be called once during lower-half part of the initialization process.
|
||||
pub unsafe fn init_fixed_tables() {
|
||||
// Map first 256GiB
|
||||
for i in 0..256 {
|
||||
KERNEL_TABLES.l1[i] = PageEntry::<L1>::block(i << 30, PageAttributes::empty());
|
||||
}
|
||||
|
||||
KERNEL_TABLES.l1[256] = PageEntry::<L1>::table(
|
||||
KERNEL_TABLES.device_l2.physical_address(),
|
||||
PageAttributes::empty(),
|
||||
);
|
||||
KERNEL_TABLES.device_l2[0] = PageEntry::<L2>::table(
|
||||
KERNEL_TABLES.device_l3.physical_address(),
|
||||
PageAttributes::empty(),
|
||||
);
|
||||
}
|
||||
|
||||
/// Offset applied to device virtual memory mappings
|
||||
pub const DEVICE_VIRT_OFFSET: usize = KERNEL_VIRT_OFFSET + (256 << 30);
|
||||
/// Global kernel address space translation tables
|
||||
pub static mut KERNEL_TABLES: FixedTables = FixedTables::zeroed();
|
78
src/arch/aarch64/timer.rs
Normal file
78
src/arch/aarch64/timer.rs
Normal file
@ -0,0 +1,78 @@
|
||||
//! AArch64 Generic Timer
|
||||
|
||||
use core::time::Duration;
|
||||
|
||||
use aarch64_cpu::registers::{CNTFRQ_EL0, CNTPCT_EL0, CNTP_CTL_EL0, CNTP_TVAL_EL0};
|
||||
use abi::error::Error;
|
||||
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
|
||||
|
||||
use crate::{
|
||||
arch::PLATFORM,
|
||||
device::{interrupt::InterruptSource, platform::Platform, timer::TimestampSource, Device},
|
||||
proc::wait,
|
||||
};
|
||||
|
||||
use super::{cpu::Cpu, gic::IrqNumber};
|
||||
|
||||
/// ARM Generic Timer driver
|
||||
pub struct ArmTimer {
|
||||
irq: IrqNumber,
|
||||
}
|
||||
|
||||
/// ARM timer tick interval (in some time units?)
|
||||
pub const TICK_INTERVAL: u64 = 1000000;
|
||||
|
||||
impl Device for ArmTimer {
|
||||
fn name(&self) -> &'static str {
|
||||
"ARM Generic Timer"
|
||||
}
|
||||
|
||||
unsafe fn init(&self) -> Result<(), Error> {
|
||||
CNTP_CTL_EL0.write(CNTP_CTL_EL0::ENABLE::SET + CNTP_CTL_EL0::IMASK::SET);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl TimestampSource for ArmTimer {
|
||||
fn timestamp(&self) -> Result<Duration, Error> {
|
||||
let count = CNTPCT_EL0.get() * 1_000_000;
|
||||
let freq = CNTFRQ_EL0.get();
|
||||
|
||||
Ok(Duration::from_nanos((count / freq) * 1_000))
|
||||
}
|
||||
}
|
||||
|
||||
impl InterruptSource for ArmTimer {
|
||||
fn handle_irq(&self) -> Result<(), Error> {
|
||||
CNTP_TVAL_EL0.set(TICK_INTERVAL);
|
||||
wait::tick();
|
||||
|
||||
unsafe {
|
||||
Cpu::local().queue().yield_cpu();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
unsafe fn init_irq(&'static self) -> Result<(), Error> {
|
||||
let intc = PLATFORM.interrupt_controller();
|
||||
|
||||
intc.register_handler(self.irq, self)?;
|
||||
CNTP_CTL_EL0.modify(CNTP_CTL_EL0::IMASK::CLEAR);
|
||||
CNTP_TVAL_EL0.set(TICK_INTERVAL);
|
||||
intc.enable_irq(self.irq)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ArmTimer {
|
||||
/// Constructs an instance of ARM generic timer.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure the function has not been called before.
|
||||
pub const unsafe fn new(irq: IrqNumber) -> Self {
|
||||
Self { irq }
|
||||
}
|
||||
}
|
128
src/arch/aarch64/vectors.S
Normal file
128
src/arch/aarch64/vectors.S
Normal file
@ -0,0 +1,128 @@
|
||||
// vi:ft=a64asm:
|
||||
|
||||
.macro EXC_VECTOR el, ht, bits, kind
|
||||
.p2align 7
|
||||
b __aa\bits\()_el\el\ht\()_\kind
|
||||
.endm
|
||||
|
||||
.macro EXC_HANDLER el, ht, bits, kind
|
||||
__aa\bits\()_el\el\ht\()_\kind:
|
||||
.if \bits == 32
|
||||
// TODO
|
||||
b .
|
||||
.endif
|
||||
|
||||
EXC_SAVE_STATE
|
||||
mov x0, sp
|
||||
mov lr, xzr
|
||||
bl __aa64_exc_\kind\()_handler
|
||||
EXC_RESTORE_STATE
|
||||
eret
|
||||
.endm
|
||||
|
||||
// 32 gp regs + 3 special regs
|
||||
.set PT_REGS_SIZE, (16 * 16 + 16 * 2)
|
||||
|
||||
.macro EXC_SAVE_STATE
|
||||
sub sp, sp, #PT_REGS_SIZE
|
||||
|
||||
stp x0, x1, [sp, #16 * 0]
|
||||
stp x2, x3, [sp, #16 * 1]
|
||||
stp x4, x5, [sp, #16 * 2]
|
||||
stp x6, x7, [sp, #16 * 3]
|
||||
stp x8, x9, [sp, #16 * 4]
|
||||
stp x10, x11, [sp, #16 * 5]
|
||||
stp x12, x13, [sp, #16 * 6]
|
||||
stp x14, x15, [sp, #16 * 7]
|
||||
|
||||
stp x16, x17, [sp, #16 * 8]
|
||||
stp x18, x19, [sp, #16 * 9]
|
||||
stp x20, x21, [sp, #16 * 10]
|
||||
stp x22, x23, [sp, #16 * 11]
|
||||
stp x24, x25, [sp, #16 * 12]
|
||||
stp x26, x27, [sp, #16 * 13]
|
||||
stp x28, x29, [sp, #16 * 14]
|
||||
stp x30, x31, [sp, #16 * 15]
|
||||
|
||||
mrs x0, spsr_el1
|
||||
mrs x1, elr_el1
|
||||
mrs x2, sp_el0
|
||||
|
||||
// TODO
|
||||
stp x0, x1, [sp, #16 * 16]
|
||||
stp x2, xzr, [sp, #16 * 17]
|
||||
.endm
|
||||
|
||||
.macro EXC_RESTORE_STATE
|
||||
ldp x0, x1, [sp, #16 * 16]
|
||||
ldp x2, x3, [sp, #16 * 17]
|
||||
|
||||
msr spsr_el1, x0
|
||||
msr elr_el1, x1
|
||||
msr sp_el0, x2
|
||||
|
||||
ldp x0, x1, [sp, #16 * 0]
|
||||
ldp x2, x3, [sp, #16 * 1]
|
||||
ldp x4, x5, [sp, #16 * 2]
|
||||
ldp x6, x7, [sp, #16 * 3]
|
||||
ldp x8, x9, [sp, #16 * 4]
|
||||
ldp x10, x11, [sp, #16 * 5]
|
||||
ldp x12, x13, [sp, #16 * 6]
|
||||
ldp x14, x15, [sp, #16 * 7]
|
||||
|
||||
ldp x16, x17, [sp, #16 * 8]
|
||||
ldp x18, x19, [sp, #16 * 9]
|
||||
ldp x20, x21, [sp, #16 * 10]
|
||||
ldp x22, x23, [sp, #16 * 11]
|
||||
ldp x24, x25, [sp, #16 * 12]
|
||||
ldp x26, x27, [sp, #16 * 13]
|
||||
ldp x28, x29, [sp, #16 * 14]
|
||||
ldp x30, x31, [sp, #16 * 15]
|
||||
|
||||
add sp, sp, #PT_REGS_SIZE
|
||||
.endm
|
||||
|
||||
.section .text
|
||||
.p2align 12
|
||||
__aarch64_el1_vectors:
|
||||
EXC_VECTOR 1, t, 64, sync
|
||||
EXC_VECTOR 1, t, 64, irq
|
||||
EXC_VECTOR 1, t, 64, fiq
|
||||
EXC_VECTOR 1, t, 64, serror
|
||||
|
||||
EXC_VECTOR 1, h, 64, sync
|
||||
EXC_VECTOR 1, h, 64, irq
|
||||
EXC_VECTOR 1, h, 64, fiq
|
||||
EXC_VECTOR 1, h, 64, serror
|
||||
|
||||
EXC_VECTOR 0, t, 64, sync
|
||||
EXC_VECTOR 0, t, 64, irq
|
||||
EXC_VECTOR 0, t, 64, fiq
|
||||
EXC_VECTOR 0, t, 64, serror
|
||||
|
||||
EXC_VECTOR 0, t, 32, sync
|
||||
EXC_VECTOR 0, t, 32, irq
|
||||
EXC_VECTOR 0, t, 32, fiq
|
||||
EXC_VECTOR 0, t, 32, serror
|
||||
|
||||
|
||||
.p2align 7
|
||||
EXC_HANDLER 1, t, 64, sync
|
||||
EXC_HANDLER 1, t, 64, irq
|
||||
EXC_HANDLER 1, t, 64, fiq
|
||||
EXC_HANDLER 1, t, 64, serror
|
||||
|
||||
EXC_HANDLER 1, h, 64, sync
|
||||
EXC_HANDLER 1, h, 64, irq
|
||||
EXC_HANDLER 1, h, 64, fiq
|
||||
EXC_HANDLER 1, h, 64, serror
|
||||
|
||||
EXC_HANDLER 0, t, 64, sync
|
||||
EXC_HANDLER 0, t, 64, irq
|
||||
EXC_HANDLER 0, t, 64, fiq
|
||||
EXC_HANDLER 0, t, 64, serror
|
||||
|
||||
EXC_HANDLER 0, t, 32, sync
|
||||
EXC_HANDLER 0, t, 32, irq
|
||||
EXC_HANDLER 0, t, 32, fiq
|
||||
EXC_HANDLER 0, t, 32, serror
|
48
src/arch/mod.rs
Normal file
48
src/arch/mod.rs
Normal file
@ -0,0 +1,48 @@
|
||||
//! Provides architecture/platform-specific implementation details
|
||||
pub mod aarch64;
|
||||
|
||||
pub use aarch64::plat_qemu::{QemuPlatform as PlatformImpl, PLATFORM};
|
||||
pub use aarch64::{AArch64 as ArchitectureImpl, ARCHITECTURE};
|
||||
use abi::error::Error;
|
||||
|
||||
/// Describes messages sent from some CPU to others
|
||||
#[derive(Clone, Copy, PartialEq, Debug)]
|
||||
#[repr(u64)]
|
||||
pub enum CpuMessage {
|
||||
/// Indicates that the sender CPU entered kernel panic and wants other CPUs to follow
|
||||
Panic,
|
||||
}
|
||||
|
||||
/// Interface for an architecture-specific facilities
|
||||
pub trait Architecture {
|
||||
/// Address, to which "zero" address is mapped in the virtual address space
|
||||
const KERNEL_VIRT_OFFSET: usize;
|
||||
|
||||
/// Initializes the memory management unit and sets up virtual memory management.
|
||||
/// `bsp` flag is provided to make sure mapping tables are only initialized once in a SMP
|
||||
/// system.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Unsafe to call if the MMU has already been initialized.
|
||||
unsafe fn init_mmu(&self, bsp: bool);
|
||||
|
||||
/// Allocates a virtual mapping for the specified physical memory region
|
||||
fn map_device_pages(&self, phys: usize, count: usize) -> Result<usize, Error>;
|
||||
|
||||
// Architecture intrinsics
|
||||
|
||||
/// Suspends CPU until an interrupt is received
|
||||
fn wait_for_interrupt();
|
||||
|
||||
/// Sets the local CPU's interrupt mask.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Enabling interrupts may lead to unexpected behavior unless the context explicitly expects
|
||||
/// them.
|
||||
unsafe fn set_interrupt_mask(mask: bool);
|
||||
|
||||
/// Returns the local CPU's interrupt mask
|
||||
fn interrupt_mask() -> bool;
|
||||
}
|
124
src/debug.rs
Normal file
124
src/debug.rs
Normal file
@ -0,0 +1,124 @@
|
||||
//! Utilities for debug information logging
|
||||
use core::fmt::{self, Arguments};
|
||||
|
||||
use crate::{
|
||||
arch::PLATFORM,
|
||||
device::{platform::Platform, serial::SerialDevice},
|
||||
sync::IrqSafeSpinlock,
|
||||
util::OneTimeInit,
|
||||
};
|
||||
|
||||
/// Defines the severity of the message
|
||||
#[derive(Clone, Copy)]
|
||||
pub enum LogLevel {
|
||||
/// Debugging and verbose information
|
||||
Debug,
|
||||
/// General information about transitions in the system state
|
||||
Info,
|
||||
/// Non-critical abnormalities or notices
|
||||
Warning,
|
||||
/// Failures of non-essential components
|
||||
Error,
|
||||
/// Irrecoverable errors which result in kernel panic
|
||||
Fatal,
|
||||
}
|
||||
|
||||
struct DebugPrinter {
|
||||
sink: &'static dyn SerialDevice,
|
||||
}
|
||||
|
||||
macro_rules! log_print_raw {
|
||||
($level:expr, $($args:tt)+) => {
|
||||
$crate::debug::debug_internal(format_args!($($args)+), $level)
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! log_print {
|
||||
($level:expr, $($args:tt)+) => {
|
||||
log_print_raw!($level, "cpu{}:{}:{}: {}", $crate::arch::aarch64::cpu::Cpu::local_id(), file!(), line!(), format_args!($($args)+))
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! debug_tpl {
|
||||
($d:tt $name:ident, $nameln:ident, $level:ident) => {
|
||||
#[allow(unused_macros)]
|
||||
/// Prints the message to the log
|
||||
macro_rules! $name {
|
||||
($d($d args:tt)+) => (log_print!($crate::debug::LogLevel::$level, $d($d args)+));
|
||||
}
|
||||
|
||||
/// Prints the message to the log, terminated by a newline character
|
||||
#[allow(unused_macros)]
|
||||
macro_rules! $nameln {
|
||||
() => {
|
||||
$name!("\n")
|
||||
};
|
||||
($d($d args:tt)+) => ($name!("{}\n", format_args!($d($d args)+)));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
debug_tpl!($ debug, debugln, Debug);
|
||||
debug_tpl!($ info, infoln, Info);
|
||||
debug_tpl!($ warn, warnln, Warning);
|
||||
debug_tpl!($ error, errorln, Error);
|
||||
debug_tpl!($ fatal, fatalln, Fatal);
|
||||
|
||||
#[no_mangle]
|
||||
static DEBUG_PRINTER: OneTimeInit<IrqSafeSpinlock<DebugPrinter>> = OneTimeInit::new();
|
||||
|
||||
impl LogLevel {
|
||||
fn log_prefix(self) -> &'static str {
|
||||
match self {
|
||||
LogLevel::Debug => "",
|
||||
LogLevel::Info => "\x1b[36m\x1b[1m",
|
||||
LogLevel::Warning => "\x1b[33m\x1b[1m",
|
||||
LogLevel::Error => "\x1b[31m\x1b[1m",
|
||||
LogLevel::Fatal => "\x1b[38;2;255;0;0m\x1b[1m",
|
||||
}
|
||||
}
|
||||
|
||||
fn log_suffix(self) -> &'static str {
|
||||
match self {
|
||||
LogLevel::Debug => "",
|
||||
LogLevel::Info => "\x1b[0m",
|
||||
LogLevel::Warning => "\x1b[0m",
|
||||
LogLevel::Error => "\x1b[0m",
|
||||
LogLevel::Fatal => "\x1b[0m",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Write for DebugPrinter {
|
||||
fn write_str(&mut self, s: &str) -> fmt::Result {
|
||||
for c in s.bytes() {
|
||||
self.sink.send(c).ok();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Initializes the debug logging faclities.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Will panic if called more than once.
|
||||
pub fn init() {
|
||||
DEBUG_PRINTER.init(IrqSafeSpinlock::new(DebugPrinter {
|
||||
sink: PLATFORM.primary_serial().unwrap(),
|
||||
}));
|
||||
}
|
||||
|
||||
#[doc = "hide"]
|
||||
pub fn debug_internal(args: Arguments, level: LogLevel) {
|
||||
use fmt::Write;
|
||||
|
||||
if DEBUG_PRINTER.is_initialized() {
|
||||
let mut printer = DEBUG_PRINTER.get().lock();
|
||||
|
||||
printer.write_str(level.log_prefix()).ok();
|
||||
printer.write_fmt(args).ok();
|
||||
printer.write_str(level.log_suffix()).ok();
|
||||
}
|
||||
}
|
78
src/device/interrupt.rs
Normal file
78
src/device/interrupt.rs
Normal file
@ -0,0 +1,78 @@
|
||||
//! Interrupt-related interfaces
|
||||
use core::marker::PhantomData;
|
||||
|
||||
use abi::error::Error;
|
||||
|
||||
use crate::arch::CpuMessage;
|
||||
|
||||
use super::Device;
|
||||
|
||||
/// Specifies the target(s) of interprocessor interrupt delivery
|
||||
#[derive(Clone, Copy, PartialEq, Debug)]
|
||||
pub enum IpiDeliveryTarget {
|
||||
/// IPI will be delivered to every CPU except the local one
|
||||
AllExceptLocal,
|
||||
/// IPI will only be sent to CPUs specified in the mask
|
||||
Specified(u64),
|
||||
}
|
||||
|
||||
/// Interface for a device capable of emitting interrupts
|
||||
pub trait InterruptSource: Device {
|
||||
/// Initializes and enables IRQs for the device.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure the function hasn't been called before.
|
||||
unsafe fn init_irq(&'static self) -> Result<(), Error>;
|
||||
|
||||
/// Handles the interrupt raised by the device
|
||||
fn handle_irq(&self) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// Interface for a device responsible for routing and handling IRQs
|
||||
pub trait InterruptController: Device {
|
||||
/// Interrupt number wrapper type
|
||||
type IrqNumber;
|
||||
|
||||
/// Binds an interrupt number to its handler implementation
|
||||
fn register_handler(
|
||||
&self,
|
||||
irq: Self::IrqNumber,
|
||||
handler: &'static (dyn InterruptSource + Sync),
|
||||
) -> Result<(), Error>;
|
||||
|
||||
/// Enables given interrupt number/vector
|
||||
fn enable_irq(&self, irq: Self::IrqNumber) -> Result<(), Error>;
|
||||
|
||||
/// Handles all pending interrupts on this controller
|
||||
fn handle_pending_irqs<'irq>(&'irq self, ic: &IrqContext<'irq>);
|
||||
|
||||
/// Sends a message to the requested set of CPUs through an interprocessor interrupt.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// u64 limits the number of targetable CPUs to (only) 64. Platform-specific implementations
|
||||
/// may impose narrower restrictions.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// As the call may alter the flow of execution on CPUs, this function is unsafe.
|
||||
unsafe fn send_ipi(&self, target: IpiDeliveryTarget, msg: CpuMessage) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// Token type to indicate that the code is being run from an interrupt handler
|
||||
pub struct IrqContext<'irq> {
|
||||
_0: PhantomData<&'irq ()>,
|
||||
}
|
||||
|
||||
impl<'irq> IrqContext<'irq> {
|
||||
/// Constructs an IRQ context token
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Only allowed to be constructed in top-level IRQ handlers
|
||||
#[inline(always)]
|
||||
pub const unsafe fn new() -> Self {
|
||||
Self { _0: PhantomData }
|
||||
}
|
||||
}
|
21
src/device/mod.rs
Normal file
21
src/device/mod.rs
Normal file
@ -0,0 +1,21 @@
|
||||
//! Device management and interfaces
|
||||
use abi::error::Error;
|
||||
|
||||
pub mod interrupt;
|
||||
pub mod platform;
|
||||
pub mod serial;
|
||||
pub mod timer;
|
||||
pub mod tty;
|
||||
|
||||
/// General device interface
|
||||
pub trait Device {
|
||||
/// Initializes the device to a state where it can be used.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Unsafe to call if the device has already been initialized.
|
||||
unsafe fn init(&self) -> Result<(), Error>;
|
||||
|
||||
/// Returns a display name for the device
|
||||
fn name(&self) -> &'static str;
|
||||
}
|
51
src/device/platform.rs
Normal file
51
src/device/platform.rs
Normal file
@ -0,0 +1,51 @@
|
||||
//! Hardware platform interface
|
||||
|
||||
use abi::error::Error;
|
||||
|
||||
use super::{interrupt::InterruptController, serial::SerialDevice, timer::TimestampSource};
|
||||
|
||||
/// Platform interface for interacting with a general hardware set
|
||||
pub trait Platform {
|
||||
/// Interrupt number type for the platform
|
||||
type IrqNumber;
|
||||
|
||||
/// Address, to which the kernel is expected to be loaded for this platform
|
||||
const KERNEL_PHYS_BASE: usize;
|
||||
|
||||
/// Initializes the platform devices to their usable state.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Unsafe to call if the platform has already been initialized.
|
||||
unsafe fn init(&'static self, is_bsp: bool) -> Result<(), Error>;
|
||||
/// Initializes the primary serial device to provide the debugging output as early as possible.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Unsafe to call if the device has already been initialized.
|
||||
unsafe fn init_primary_serial(&self);
|
||||
|
||||
/// Returns a display name for the platform
|
||||
fn name(&self) -> &'static str;
|
||||
|
||||
/// Returns a reference to the primary serial device.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// May not be initialized at the moment of calling.
|
||||
fn primary_serial(&self) -> Option<&dyn SerialDevice>;
|
||||
|
||||
/// Returns a reference to the platform's interrupt controller.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// May not be initialized at the moment of calling.
|
||||
fn interrupt_controller(&self) -> &dyn InterruptController<IrqNumber = Self::IrqNumber>;
|
||||
|
||||
/// Returns the platform's primary timestamp source.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// May not be initialized at the moment of calling.
|
||||
fn timestamp_source(&self) -> &dyn TimestampSource;
|
||||
}
|
15
src/device/serial/mod.rs
Normal file
15
src/device/serial/mod.rs
Normal file
@ -0,0 +1,15 @@
|
||||
//! Serial device interfaces
|
||||
use abi::error::Error;
|
||||
|
||||
use super::Device;
|
||||
|
||||
pub mod pl011;
|
||||
|
||||
/// Generic serial device interface
|
||||
pub trait SerialDevice: Device {
|
||||
/// Sends (blocking) a single byte into the serial port
|
||||
fn send(&self, byte: u8) -> Result<(), Error>;
|
||||
|
||||
/// Receive a single byte from the serial port, blocking if necessary
|
||||
fn receive(&self, blocking: bool) -> Result<u8, Error>;
|
||||
}
|
187
src/device/serial/pl011.rs
Normal file
187
src/device/serial/pl011.rs
Normal file
@ -0,0 +1,187 @@
|
||||
//! ARM PL011 driver
|
||||
use abi::error::Error;
|
||||
use tock_registers::{
|
||||
interfaces::{ReadWriteable, Readable, Writeable},
|
||||
register_bitfields, register_structs,
|
||||
registers::{ReadOnly, ReadWrite, WriteOnly},
|
||||
};
|
||||
use vfs::CharDevice;
|
||||
|
||||
use super::SerialDevice;
|
||||
use crate::{
|
||||
arch::{aarch64::gic::IrqNumber, PLATFORM},
|
||||
device::{
|
||||
interrupt::InterruptSource,
|
||||
platform::Platform,
|
||||
tty::{CharRing, TtyDevice},
|
||||
Device,
|
||||
},
|
||||
mem::device::DeviceMemoryIo,
|
||||
sync::IrqSafeSpinlock,
|
||||
util::OneTimeInit,
|
||||
};
|
||||
|
||||
register_bitfields! {
|
||||
u32,
|
||||
FR [
|
||||
TXFF OFFSET(5) NUMBITS(1) [],
|
||||
RXFE OFFSET(4) NUMBITS(1) [],
|
||||
BUSY OFFSET(3) NUMBITS(1) [],
|
||||
],
|
||||
CR [
|
||||
RXE OFFSET(9) NUMBITS(1) [],
|
||||
TXE OFFSET(8) NUMBITS(1) [],
|
||||
UARTEN OFFSET(0) NUMBITS(1) [],
|
||||
],
|
||||
ICR [
|
||||
ALL OFFSET(0) NUMBITS(11) [],
|
||||
],
|
||||
IMSC [
|
||||
RXIM OFFSET(4) NUMBITS(1) [],
|
||||
]
|
||||
}
|
||||
|
||||
register_structs! {
|
||||
#[allow(non_snake_case)]
|
||||
Regs {
|
||||
/// Transmit/receive data register
|
||||
(0x00 => DR: ReadWrite<u32>),
|
||||
(0x04 => _0),
|
||||
(0x18 => FR: ReadOnly<u32, FR::Register>),
|
||||
(0x1C => _1),
|
||||
(0x2C => LCR_H: ReadWrite<u32>),
|
||||
(0x30 => CR: ReadWrite<u32, CR::Register>),
|
||||
(0x34 => IFLS: ReadWrite<u32>),
|
||||
(0x38 => IMSC: ReadWrite<u32, IMSC::Register>),
|
||||
(0x3C => _2),
|
||||
(0x44 => ICR: WriteOnly<u32, ICR::Register>),
|
||||
(0x48 => @END),
|
||||
}
|
||||
}
|
||||
|
||||
struct Pl011Inner {
|
||||
regs: DeviceMemoryIo<Regs>,
|
||||
}
|
||||
|
||||
/// PL011 device instance
|
||||
pub struct Pl011 {
|
||||
inner: OneTimeInit<IrqSafeSpinlock<Pl011Inner>>,
|
||||
base: usize,
|
||||
irq: IrqNumber,
|
||||
ring: CharRing<16>,
|
||||
}
|
||||
|
||||
impl Pl011Inner {
|
||||
fn send_byte(&mut self, b: u8) -> Result<(), Error> {
|
||||
while self.regs.FR.matches_all(FR::TXFF::SET) {
|
||||
core::hint::spin_loop();
|
||||
}
|
||||
self.regs.DR.set(b as u32);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn recv_byte(&mut self, blocking: bool) -> Result<u8, Error> {
|
||||
if self.regs.FR.matches_all(FR::RXFE::SET) {
|
||||
if !blocking {
|
||||
todo!();
|
||||
}
|
||||
while self.regs.FR.matches_all(FR::RXFE::SET) {
|
||||
core::hint::spin_loop();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(self.regs.DR.get() as u8)
|
||||
}
|
||||
|
||||
unsafe fn init(&mut self) {
|
||||
self.regs.CR.set(0);
|
||||
self.regs.ICR.write(ICR::ALL::CLEAR);
|
||||
self.regs
|
||||
.CR
|
||||
.write(CR::UARTEN::SET + CR::TXE::SET + CR::RXE::SET);
|
||||
}
|
||||
}
|
||||
|
||||
impl TtyDevice<16> for Pl011 {
|
||||
fn ring(&self) -> &CharRing<16> {
|
||||
&self.ring
|
||||
}
|
||||
}
|
||||
|
||||
impl CharDevice for Pl011 {
|
||||
fn write(&self, blocking: bool, data: &[u8]) -> Result<usize, Error> {
|
||||
assert!(blocking);
|
||||
self.line_write(data)
|
||||
}
|
||||
|
||||
fn read(&'static self, blocking: bool, data: &mut [u8]) -> Result<usize, Error> {
|
||||
assert!(blocking);
|
||||
self.line_read(data)
|
||||
}
|
||||
}
|
||||
|
||||
impl SerialDevice for Pl011 {
|
||||
fn send(&self, byte: u8) -> Result<(), Error> {
|
||||
self.inner.get().lock().send_byte(byte)
|
||||
}
|
||||
|
||||
fn receive(&self, blocking: bool) -> Result<u8, Error> {
|
||||
self.inner.get().lock().recv_byte(blocking)
|
||||
}
|
||||
}
|
||||
|
||||
impl InterruptSource for Pl011 {
|
||||
unsafe fn init_irq(&'static self) -> Result<(), Error> {
|
||||
let intc = PLATFORM.interrupt_controller();
|
||||
|
||||
intc.register_handler(self.irq, self)?;
|
||||
self.inner.get().lock().regs.IMSC.modify(IMSC::RXIM::SET);
|
||||
intc.enable_irq(self.irq)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_irq(&self) -> Result<(), Error> {
|
||||
let inner = self.inner.get().lock();
|
||||
inner.regs.ICR.write(ICR::ALL::CLEAR);
|
||||
|
||||
let byte = inner.regs.DR.get();
|
||||
drop(inner);
|
||||
|
||||
self.recv_byte(byte as u8);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Device for Pl011 {
|
||||
unsafe fn init(&self) -> Result<(), Error> {
|
||||
let mut inner = Pl011Inner {
|
||||
regs: DeviceMemoryIo::map("pl011 UART", self.base)?,
|
||||
};
|
||||
inner.init();
|
||||
|
||||
self.inner.init(IrqSafeSpinlock::new(inner));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn name(&self) -> &'static str {
|
||||
"pl011"
|
||||
}
|
||||
}
|
||||
|
||||
impl Pl011 {
|
||||
/// Constructs an instance of the device at `base`.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure the address is valid.
|
||||
pub const unsafe fn new(base: usize, irq: IrqNumber) -> Self {
|
||||
Self {
|
||||
inner: OneTimeInit::new(),
|
||||
ring: CharRing::new(),
|
||||
base,
|
||||
irq,
|
||||
}
|
||||
}
|
||||
}
|
13
src/device/timer.rs
Normal file
13
src/device/timer.rs
Normal file
@ -0,0 +1,13 @@
|
||||
//! Time-providing device interfaces
|
||||
use core::time::Duration;
|
||||
|
||||
use abi::error::Error;
|
||||
|
||||
use super::Device;
|
||||
|
||||
/// Interface for devices capable of providing some notion of time
|
||||
pub trait TimestampSource: Device {
|
||||
/// Returns current time signalled by the device. The time may not be a "real" time and instead
|
||||
/// is assumed to be monotonically increasing.
|
||||
fn timestamp(&self) -> Result<Duration, Error>;
|
||||
}
|
155
src/device/tty.rs
Normal file
155
src/device/tty.rs
Normal file
@ -0,0 +1,155 @@
|
||||
//! Terminal driver implementation
|
||||
use abi::error::Error;
|
||||
|
||||
use crate::{proc::wait::Wait, sync::IrqSafeSpinlock};
|
||||
|
||||
use super::serial::SerialDevice;
|
||||
|
||||
struct CharRingInner<const N: usize> {
|
||||
rd: usize,
|
||||
wr: usize,
|
||||
data: [u8; N],
|
||||
flags: u8,
|
||||
}
|
||||
|
||||
/// Ring buffer for a character device. Handles reads, writes and channel notifications for a
|
||||
/// terminal device.
|
||||
pub struct CharRing<const N: usize> {
|
||||
wait_read: Wait,
|
||||
wait_write: Wait,
|
||||
inner: IrqSafeSpinlock<CharRingInner<N>>,
|
||||
}
|
||||
|
||||
/// Terminal device interface
|
||||
pub trait TtyDevice<const N: usize>: SerialDevice {
|
||||
/// Returns the ring buffer associated with the device
|
||||
fn ring(&self) -> &CharRing<N>;
|
||||
|
||||
/// Returns `true` if data is ready to be read from or written to the terminal
|
||||
fn is_ready(&self, write: bool) -> Result<bool, Error> {
|
||||
let ring = self.ring();
|
||||
if write {
|
||||
todo!();
|
||||
} else {
|
||||
Ok(ring.is_readable())
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends a single byte to the terminal
|
||||
fn line_send(&self, byte: u8) -> Result<(), Error> {
|
||||
self.send(byte)
|
||||
}
|
||||
|
||||
/// Receives a single byte from the terminal
|
||||
fn recv_byte(&self, byte: u8) {
|
||||
let ring = self.ring();
|
||||
ring.putc(byte, false).ok();
|
||||
}
|
||||
|
||||
/// Reads and processes data from the terminal
|
||||
fn line_read(&'static self, data: &mut [u8]) -> Result<usize, Error> {
|
||||
let ring = self.ring();
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
let byte = ring.getc()?;
|
||||
data[0] = byte;
|
||||
Ok(1)
|
||||
}
|
||||
|
||||
/// Processes and writes the data to the terminal
|
||||
fn line_write(&self, data: &[u8]) -> Result<usize, Error> {
|
||||
for &byte in data {
|
||||
self.line_send(byte)?;
|
||||
}
|
||||
Ok(data.len())
|
||||
}
|
||||
|
||||
/// Writes raw data to the terminal bypassing the processing functions
|
||||
fn raw_write(&self, _data: &[u8]) -> Result<usize, Error> {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> CharRingInner<N> {
|
||||
#[inline]
|
||||
const fn is_readable(&self) -> bool {
|
||||
if self.rd <= self.wr {
|
||||
(self.wr - self.rd) > 0
|
||||
} else {
|
||||
(self.wr + (N - self.rd)) > 0
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn read_unchecked(&mut self) -> u8 {
|
||||
let res = self.data[self.rd];
|
||||
self.rd = (self.rd + 1) % N;
|
||||
res
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn write_unchecked(&mut self, ch: u8) {
|
||||
self.data[self.wr] = ch;
|
||||
self.wr = (self.wr + 1) % N;
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> CharRing<N> {
|
||||
/// Constructs an empty ring buffer
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
inner: IrqSafeSpinlock::new(CharRingInner {
|
||||
rd: 0,
|
||||
wr: 0,
|
||||
data: [0; N],
|
||||
flags: 0,
|
||||
}),
|
||||
wait_read: Wait::new("char_ring_read"),
|
||||
wait_write: Wait::new("char_ring_write"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the buffer has data to read
|
||||
pub fn is_readable(&self) -> bool {
|
||||
let inner = self.inner.lock();
|
||||
inner.is_readable() || inner.flags != 0
|
||||
}
|
||||
|
||||
/// Reads a single character from the buffer, blocking until available
|
||||
pub fn getc(&'static self) -> Result<u8, Error> {
|
||||
let mut lock = self.inner.lock();
|
||||
loop {
|
||||
if !lock.is_readable() && lock.flags == 0 {
|
||||
drop(lock);
|
||||
self.wait_read.wait(None)?;
|
||||
lock = self.inner.lock();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let byte = unsafe { lock.read_unchecked() };
|
||||
drop(lock);
|
||||
self.wait_write.wakeup_one();
|
||||
// TODO WAIT_SELECT
|
||||
Ok(byte)
|
||||
}
|
||||
|
||||
/// Sends a single character to the buffer
|
||||
pub fn putc(&self, ch: u8, blocking: bool) -> Result<(), Error> {
|
||||
let mut lock = self.inner.lock();
|
||||
if blocking {
|
||||
todo!();
|
||||
}
|
||||
unsafe {
|
||||
lock.write_unchecked(ch);
|
||||
}
|
||||
drop(lock);
|
||||
self.wait_read.wakeup_one();
|
||||
// TODO WAIT_SELECT
|
||||
Ok(())
|
||||
}
|
||||
}
|
57
src/fs/devfs.rs
Normal file
57
src/fs/devfs.rs
Normal file
@ -0,0 +1,57 @@
|
||||
//! Device virtual file system
|
||||
use core::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use abi::error::Error;
|
||||
use alloc::{boxed::Box, format, string::String};
|
||||
use vfs::{CharDevice, CharDeviceWrapper, Vnode, VnodeKind, VnodeRef};
|
||||
|
||||
use crate::util::OneTimeInit;
|
||||
|
||||
/// Describes the kind of a character device
|
||||
#[derive(Debug)]
|
||||
pub enum CharDeviceType {
|
||||
/// Serial terminal
|
||||
TtySerial,
|
||||
}
|
||||
|
||||
static DEVFS_ROOT: OneTimeInit<VnodeRef> = OneTimeInit::new();
|
||||
|
||||
/// Sets up the device filesystem
|
||||
pub fn init() {
|
||||
let node = Vnode::new("", VnodeKind::Directory);
|
||||
DEVFS_ROOT.init(node);
|
||||
}
|
||||
|
||||
/// Returns the root of the devfs.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Will panic if the devfs hasn't yet been initialized.
|
||||
pub fn root() -> &'static VnodeRef {
|
||||
DEVFS_ROOT.get()
|
||||
}
|
||||
|
||||
fn _add_char_device(dev: &'static dyn CharDevice, name: String) -> Result<(), Error> {
|
||||
infoln!("Add char device: {}", name);
|
||||
|
||||
let node = Vnode::new(name, VnodeKind::Char);
|
||||
node.set_data(Box::new(CharDeviceWrapper::new(dev)));
|
||||
|
||||
DEVFS_ROOT.get().add_child(node);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Adds a character device to the devfs
|
||||
pub fn add_char_device(dev: &'static dyn CharDevice, kind: CharDeviceType) -> Result<(), Error> {
|
||||
static TTYS_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
let (count, prefix) = match kind {
|
||||
CharDeviceType::TtySerial => (&TTYS_COUNT, "ttyS"),
|
||||
};
|
||||
|
||||
let value = count.fetch_add(1, Ordering::AcqRel);
|
||||
let name = format!("{}{}", prefix, value);
|
||||
|
||||
_add_char_device(dev, name)
|
||||
}
|
3
src/fs/mod.rs
Normal file
3
src/fs/mod.rs
Normal file
@ -0,0 +1,3 @@
|
||||
//! Filesystem implementations
|
||||
|
||||
pub mod devfs;
|
86
src/main.rs
Normal file
86
src/main.rs
Normal file
@ -0,0 +1,86 @@
|
||||
//! osdev-x kernel crate
|
||||
#![feature(
|
||||
naked_functions,
|
||||
asm_const,
|
||||
panic_info_message,
|
||||
optimize_attribute,
|
||||
const_trait_impl,
|
||||
maybe_uninit_slice,
|
||||
linked_list_cursors
|
||||
)]
|
||||
#![allow(clippy::new_without_default)]
|
||||
#![warn(missing_docs)]
|
||||
#![no_std]
|
||||
#![no_main]
|
||||
|
||||
extern crate yggdrasil_abi as abi;
|
||||
|
||||
use abi::io::{OpenFlags, RawFd};
|
||||
use task::process::Process;
|
||||
use vfs::IoContext;
|
||||
|
||||
use crate::fs::devfs;
|
||||
|
||||
extern crate alloc;
|
||||
|
||||
#[macro_use]
|
||||
pub mod debug;
|
||||
#[macro_use]
|
||||
pub mod arch;
|
||||
|
||||
pub mod device;
|
||||
pub mod fs;
|
||||
pub mod mem;
|
||||
pub mod panic;
|
||||
pub mod proc;
|
||||
pub mod sync;
|
||||
pub mod syscall;
|
||||
pub mod task;
|
||||
pub mod util;
|
||||
|
||||
/// Entry point for common kernel code.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// This function is meant to be used as a kernel-space process after all the platform-specific
|
||||
/// initialization has finished.
|
||||
pub fn kernel_main() {
|
||||
// static USER_PROGRAM: &[u8] = include_bytes!(concat!(
|
||||
// "../../target/aarch64-unknown-yggdrasil/",
|
||||
// env!("PROFILE"),
|
||||
// "/test_program"
|
||||
// ));
|
||||
|
||||
// let devfs_root = devfs::root();
|
||||
// let tty_node = devfs_root.lookup("ttyS0").unwrap();
|
||||
|
||||
// let ioctx = IoContext::new(devfs_root.clone());
|
||||
|
||||
// // Spawn a test user task
|
||||
// let proc =
|
||||
// proc::exec::create_from_memory(USER_PROGRAM, &["user-program", "argument 1", "argument 2"]);
|
||||
|
||||
// match proc {
|
||||
// Ok(proc) => {
|
||||
// // Setup I/O for the process
|
||||
// // let mut io = proc.io.lock();
|
||||
// // io.set_file(RawFd::STDOUT, todo!()).unwrap();
|
||||
// {
|
||||
// let mut io = proc.io.lock();
|
||||
// io.set_ioctx(ioctx);
|
||||
// let stdout = tty_node.open(OpenFlags::new().write()).unwrap();
|
||||
// let stderr = stdout.clone();
|
||||
|
||||
// io.set_file(RawFd::STDOUT, stdout).unwrap();
|
||||
// io.set_file(RawFd::STDERR, stderr).unwrap();
|
||||
// }
|
||||
|
||||
// proc.enqueue_somewhere();
|
||||
// }
|
||||
// Err(err) => {
|
||||
// warnln!("Failed to create user process: {:?}", err);
|
||||
// }
|
||||
// };
|
||||
|
||||
Process::current().exit(0);
|
||||
}
|
76
src/mem/device.rs
Normal file
76
src/mem/device.rs
Normal file
@ -0,0 +1,76 @@
|
||||
//! Facilities for mapping devices to virtual address space
|
||||
use core::{marker::PhantomData, mem::size_of, ops::Deref};
|
||||
|
||||
use abi::error::Error;
|
||||
|
||||
use crate::arch::{Architecture, ARCHITECTURE};
|
||||
|
||||
/// Generic MMIO access mapping
|
||||
#[derive(Clone)]
|
||||
#[allow(unused)]
|
||||
pub struct DeviceMemory {
|
||||
name: &'static str,
|
||||
base: usize,
|
||||
size: usize,
|
||||
}
|
||||
|
||||
/// MMIO wrapper for `T`
|
||||
pub struct DeviceMemoryIo<T> {
|
||||
mmio: DeviceMemory,
|
||||
_pd: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl DeviceMemory {
|
||||
/// Maps the device to some virtual memory address and constructs a wrapper for that range.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller is responsible for making sure the (phys, size) range is valid and actually
|
||||
/// points to some device's MMIO. The caller must also make sure no aliasing for that range is
|
||||
/// possible.
|
||||
pub unsafe fn map(name: &'static str, phys: usize, size: usize) -> Result<Self, Error> {
|
||||
if size > 0x1000 {
|
||||
todo!("Device memory mappings larger than 4K");
|
||||
}
|
||||
|
||||
let base = ARCHITECTURE.map_device_pages(phys, 1)?;
|
||||
|
||||
Ok(Self { name, base, size })
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DeviceMemoryIo<T> {
|
||||
/// Maps the `T` struct at `phys` to some virtual memory address and provides a [Deref]able
|
||||
/// wrapper to it.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller is responsible for making sure the `phys` address points to a MMIO region which
|
||||
/// is at least `size_of::<T>()` and no aliasing for that region is possible.
|
||||
pub unsafe fn map(name: &'static str, phys: usize) -> Result<Self, Error> {
|
||||
DeviceMemory::map(name, phys, size_of::<T>()).map(|t| Self::new(t))
|
||||
}
|
||||
|
||||
/// Constructs a device MMIO wrapper from given [DeviceMemory] mapping.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure `mmio` actually points to a device of type `T`.
|
||||
pub unsafe fn new(mmio: DeviceMemory) -> Self {
|
||||
assert!(mmio.size >= size_of::<T>());
|
||||
// TODO check align
|
||||
|
||||
Self {
|
||||
mmio,
|
||||
_pd: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for DeviceMemoryIo<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
unsafe { &*(self.mmio.base as *const T) }
|
||||
}
|
||||
}
|
51
src/mem/heap.rs
Normal file
51
src/mem/heap.rs
Normal file
@ -0,0 +1,51 @@
|
||||
//! Kernel's global heap allocator
|
||||
use core::{
|
||||
alloc::{GlobalAlloc, Layout},
|
||||
ptr::{null_mut, NonNull},
|
||||
};
|
||||
|
||||
use linked_list_allocator::Heap;
|
||||
use spinning_top::Spinlock;
|
||||
|
||||
struct KernelAllocator {
|
||||
inner: Spinlock<Heap>,
|
||||
}
|
||||
|
||||
impl KernelAllocator {
|
||||
const fn empty() -> Self {
|
||||
Self {
|
||||
inner: Spinlock::new(Heap::empty()),
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn init(&self, base: usize, size: usize) {
|
||||
self.inner.lock().init(base as _, size);
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl GlobalAlloc for KernelAllocator {
|
||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
match self.inner.lock().allocate_first_fit(layout) {
|
||||
Ok(v) => v.as_ptr(),
|
||||
Err(_) => null_mut(),
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
let ptr = NonNull::new(ptr).unwrap();
|
||||
self.inner.lock().deallocate(ptr, layout)
|
||||
}
|
||||
}
|
||||
|
||||
#[global_allocator]
|
||||
static GLOBAL_HEAP: KernelAllocator = KernelAllocator::empty();
|
||||
|
||||
/// Sets up kernel's global heap with given memory range.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure the range is valid and mapped virtual memory.
|
||||
pub unsafe fn init_heap(heap_base: usize, heap_size: usize) {
|
||||
debugln!("Heap: {:#x}..{:#x}", heap_base, heap_base + heap_size);
|
||||
GLOBAL_HEAP.init(heap_base, heap_size);
|
||||
}
|
90
src/mem/mod.rs
Normal file
90
src/mem/mod.rs
Normal file
@ -0,0 +1,90 @@
|
||||
//! Memory management utilities and types
|
||||
use crate::{
|
||||
arch::{Architecture, ArchitectureImpl, PlatformImpl},
|
||||
device::platform::Platform,
|
||||
};
|
||||
|
||||
pub mod device;
|
||||
pub mod heap;
|
||||
pub mod phys;
|
||||
pub mod table;
|
||||
|
||||
/// Kernel's physical load address
|
||||
pub const KERNEL_PHYS_BASE: usize = PlatformImpl::KERNEL_PHYS_BASE;
|
||||
/// Kernel's virtual memory mapping offset (i.e. kernel's virtual address is [KERNEL_PHYS_BASE] +
|
||||
/// [KERNEL_VIRT_OFFSET])
|
||||
pub const KERNEL_VIRT_OFFSET: usize = ArchitectureImpl::KERNEL_VIRT_OFFSET;
|
||||
|
||||
/// Interface for converting between address spaces.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// An incorrect implementation can produce invalid address.
|
||||
pub unsafe trait ConvertAddress {
|
||||
/// Convert the address into a virtual one
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the address is already a virtual one
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// An incorrect implementation can produce invalid address.
|
||||
unsafe fn virtualize(self) -> Self;
|
||||
/// Convert the address into a physical one
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the address is already a physical one
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// An incorrect implementation can produce invalid address.
|
||||
unsafe fn physicalize(self) -> Self;
|
||||
}
|
||||
|
||||
unsafe impl ConvertAddress for usize {
|
||||
#[inline(always)]
|
||||
unsafe fn virtualize(self) -> Self {
|
||||
#[cfg(debug_assertions)]
|
||||
if self > KERNEL_VIRT_OFFSET {
|
||||
todo!();
|
||||
}
|
||||
|
||||
self + KERNEL_VIRT_OFFSET
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn physicalize(self) -> Self {
|
||||
#[cfg(debug_assertions)]
|
||||
if self < KERNEL_VIRT_OFFSET {
|
||||
todo!();
|
||||
}
|
||||
|
||||
self - KERNEL_VIRT_OFFSET
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T> ConvertAddress for *mut T {
|
||||
#[inline(always)]
|
||||
unsafe fn virtualize(self) -> Self {
|
||||
(self as usize).virtualize() as Self
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn physicalize(self) -> Self {
|
||||
(self as usize).physicalize() as Self
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T> ConvertAddress for *const T {
|
||||
#[inline(always)]
|
||||
unsafe fn virtualize(self) -> Self {
|
||||
(self as usize).virtualize() as Self
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn physicalize(self) -> Self {
|
||||
(self as usize).physicalize() as Self
|
||||
}
|
||||
}
|
95
src/mem/phys/manager.rs
Normal file
95
src/mem/phys/manager.rs
Normal file
@ -0,0 +1,95 @@
|
||||
//! Physical memory manager implementation
|
||||
use core::mem::size_of;
|
||||
|
||||
use abi::error::Error;
|
||||
|
||||
use super::{Page, PageUsage};
|
||||
|
||||
/// Physical memory management interface
|
||||
pub struct PhysicalMemoryManager {
|
||||
pages: &'static mut [Page],
|
||||
offset: usize,
|
||||
}
|
||||
|
||||
impl PhysicalMemoryManager {
|
||||
/// Constructs a [PhysicalMemoryManager] with page tracking array placed at given
|
||||
/// `base`..`base+size` range. Physical addresses allocated are offset by the given value.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Addresses are not checked. The caller is responsible for making sure (base, size) ranges do
|
||||
/// not alias/overlap, they're accessible through virtual memory and that the offset is a
|
||||
/// meaningful value.
|
||||
pub unsafe fn new(offset: usize, base: usize, size: usize) -> PhysicalMemoryManager {
|
||||
// TODO check alignment
|
||||
let page_count = size / size_of::<Page>();
|
||||
let pages = core::slice::from_raw_parts_mut(base as *mut _, page_count);
|
||||
|
||||
for page in pages.iter_mut() {
|
||||
*page = Page {
|
||||
usage: PageUsage::Reserved,
|
||||
refcount: 0,
|
||||
};
|
||||
}
|
||||
|
||||
PhysicalMemoryManager { pages, offset }
|
||||
}
|
||||
|
||||
/// Allocates a single page, marking it as used with `usage`
|
||||
pub fn alloc_page(&mut self, usage: PageUsage) -> Result<usize, Error> {
|
||||
assert_ne!(usage, PageUsage::Available);
|
||||
assert_ne!(usage, PageUsage::Reserved);
|
||||
|
||||
for index in 0..self.pages.len() {
|
||||
if self.pages[index].usage == PageUsage::Available {
|
||||
self.pages[index].usage = PageUsage::Used;
|
||||
return Ok(index * 4096 + self.offset);
|
||||
}
|
||||
}
|
||||
|
||||
Err(Error::OutOfMemory)
|
||||
}
|
||||
|
||||
/// Allocates a contiguous range of physical pages, marking it as used with `usage`
|
||||
pub fn alloc_contiguous_pages(
|
||||
&mut self,
|
||||
count: usize,
|
||||
usage: PageUsage,
|
||||
) -> Result<usize, Error> {
|
||||
assert_ne!(usage, PageUsage::Available);
|
||||
assert_ne!(usage, PageUsage::Reserved);
|
||||
assert_ne!(count, 0);
|
||||
|
||||
'l0: for i in 0..self.pages.len() {
|
||||
for j in 0..count {
|
||||
if self.pages[i + j].usage != PageUsage::Available {
|
||||
continue 'l0;
|
||||
}
|
||||
}
|
||||
for j in 0..count {
|
||||
let page = &mut self.pages[i + j];
|
||||
assert!(page.usage == PageUsage::Available);
|
||||
page.usage = usage;
|
||||
page.refcount = 1;
|
||||
}
|
||||
return Ok(self.offset + i * 0x1000);
|
||||
}
|
||||
|
||||
Err(Error::OutOfMemory)
|
||||
}
|
||||
|
||||
/// Marks a previously reserved page as available.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Will panic if the address does not point to a valid, reserved (and unallocated) page.
|
||||
pub fn add_available_page(&mut self, addr: usize) {
|
||||
assert!(addr >= self.offset);
|
||||
let index = (addr - self.offset) / 4096;
|
||||
|
||||
assert_eq!(self.pages[index].usage, PageUsage::Reserved);
|
||||
assert_eq!(self.pages[index].refcount, 0);
|
||||
|
||||
self.pages[index].usage = PageUsage::Available;
|
||||
}
|
||||
}
|
199
src/mem/phys/mod.rs
Normal file
199
src/mem/phys/mod.rs
Normal file
@ -0,0 +1,199 @@
|
||||
//! Physical memory management facilities
|
||||
use core::{iter::StepBy, mem::size_of, ops::Range};
|
||||
|
||||
use abi::error::Error;
|
||||
use spinning_top::Spinlock;
|
||||
|
||||
use crate::{
|
||||
absolute_address,
|
||||
mem::{
|
||||
phys::reserved::{is_reserved, reserve_region},
|
||||
ConvertAddress, KERNEL_PHYS_BASE,
|
||||
},
|
||||
util::OneTimeInit,
|
||||
};
|
||||
|
||||
use self::manager::PhysicalMemoryManager;
|
||||
|
||||
pub mod manager;
|
||||
pub mod reserved;
|
||||
|
||||
/// Represents the way in which the page is used (or not)
|
||||
#[derive(PartialEq, Clone, Copy, Debug)]
|
||||
#[repr(u32)]
|
||||
pub enum PageUsage {
|
||||
/// Page is not available for allocation or use
|
||||
Reserved = 0,
|
||||
/// Regular page available for allocation
|
||||
Available,
|
||||
/// Page is used by some kernel facility
|
||||
Used,
|
||||
}
|
||||
|
||||
/// Page descriptor structure for the page management array
|
||||
#[repr(C)]
|
||||
pub struct Page {
|
||||
usage: PageUsage,
|
||||
refcount: u32,
|
||||
}
|
||||
|
||||
/// Defines an usable memory region
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct PhysicalMemoryRegion {
|
||||
/// Start of the region
|
||||
pub base: usize,
|
||||
/// Length of the region
|
||||
pub size: usize,
|
||||
}
|
||||
|
||||
impl PhysicalMemoryRegion {
|
||||
/// Returns the end address of the region
|
||||
pub const fn end(&self) -> usize {
|
||||
self.base + self.size
|
||||
}
|
||||
|
||||
/// Returns an address range covered by the region
|
||||
pub const fn range(&self) -> Range<usize> {
|
||||
self.base..self.end()
|
||||
}
|
||||
|
||||
/// Provides an iterator over the pages in the region
|
||||
pub const fn pages(&self) -> StepBy<Range<usize>> {
|
||||
self.range().step_by(0x1000)
|
||||
}
|
||||
}
|
||||
|
||||
/// Global physical memory manager
|
||||
pub static PHYSICAL_MEMORY: OneTimeInit<Spinlock<PhysicalMemoryManager>> = OneTimeInit::new();
|
||||
|
||||
/// Allocates a single physical page from the global manager
|
||||
pub fn alloc_page(usage: PageUsage) -> Result<usize, Error> {
|
||||
PHYSICAL_MEMORY.get().lock().alloc_page(usage)
|
||||
}
|
||||
|
||||
/// Allocates a contiguous range of physical pages from the global manager
|
||||
pub fn alloc_pages_contiguous(count: usize, usage: PageUsage) -> Result<usize, Error> {
|
||||
PHYSICAL_MEMORY
|
||||
.get()
|
||||
.lock()
|
||||
.alloc_contiguous_pages(count, usage)
|
||||
}
|
||||
|
||||
fn physical_memory_range<I: Iterator<Item = PhysicalMemoryRegion>>(
|
||||
it: I,
|
||||
) -> Option<(usize, usize)> {
|
||||
let mut start = usize::MAX;
|
||||
let mut end = usize::MIN;
|
||||
|
||||
for reg in it {
|
||||
if reg.base < start {
|
||||
start = reg.base;
|
||||
}
|
||||
if reg.base + reg.size > end {
|
||||
end = reg.base + reg.size;
|
||||
}
|
||||
}
|
||||
|
||||
if start == usize::MAX || end == usize::MIN {
|
||||
None
|
||||
} else {
|
||||
Some((start, end))
|
||||
}
|
||||
}
|
||||
|
||||
fn find_contiguous_region<I: Iterator<Item = PhysicalMemoryRegion>>(
|
||||
it: I,
|
||||
count: usize,
|
||||
) -> Option<usize> {
|
||||
for region in it {
|
||||
let mut collected = 0;
|
||||
let mut base_addr = None;
|
||||
|
||||
for addr in region.pages() {
|
||||
if is_reserved(addr) {
|
||||
collected = 0;
|
||||
base_addr = None;
|
||||
continue;
|
||||
}
|
||||
if base_addr.is_none() {
|
||||
base_addr = Some(addr);
|
||||
}
|
||||
collected += 1;
|
||||
if collected == count {
|
||||
return base_addr;
|
||||
}
|
||||
}
|
||||
}
|
||||
todo!()
|
||||
}
|
||||
|
||||
/// Initializes physical memory manager from given available memory region iterator.
|
||||
///
|
||||
/// 1. Finds a non-reserved range to place the page tracking array.
|
||||
/// 2. Adds all non-reserved pages to the manager.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure this function has not been called before and that the regions
|
||||
/// are valid and actually available.
|
||||
pub unsafe fn init_from_iter<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
|
||||
it: I,
|
||||
) -> Result<(), Error> {
|
||||
let (phys_start, phys_end) = physical_memory_range(it.clone()).unwrap();
|
||||
let total_count = (phys_end - phys_start) / 0x1000;
|
||||
let pages_array_size = total_count * size_of::<Page>();
|
||||
|
||||
debugln!("Initializing physical memory manager");
|
||||
debugln!("Total tracked pages: {}", total_count);
|
||||
|
||||
// Reserve memory regions from which allocation is forbidden
|
||||
reserve_region("kernel", kernel_physical_memory_region());
|
||||
|
||||
let pages_array_base = find_contiguous_region(it.clone(), (pages_array_size + 0xFFF) / 0x1000)
|
||||
.ok_or(Error::OutOfMemory)?;
|
||||
|
||||
debugln!(
|
||||
"Placing page tracking at {:#x}",
|
||||
pages_array_base.virtualize()
|
||||
);
|
||||
|
||||
reserve_region(
|
||||
"pages",
|
||||
PhysicalMemoryRegion {
|
||||
base: pages_array_base,
|
||||
size: (pages_array_size + 0xFFF) & !0xFFF,
|
||||
},
|
||||
);
|
||||
|
||||
let mut manager =
|
||||
PhysicalMemoryManager::new(phys_start, pages_array_base.virtualize(), pages_array_size);
|
||||
let mut page_count = 0;
|
||||
|
||||
for region in it {
|
||||
for page in region.pages() {
|
||||
if is_reserved(page) {
|
||||
continue;
|
||||
}
|
||||
|
||||
manager.add_available_page(page);
|
||||
page_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
infoln!("{} available pages", page_count);
|
||||
|
||||
PHYSICAL_MEMORY.init(Spinlock::new(manager));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn kernel_physical_memory_region() -> PhysicalMemoryRegion {
|
||||
extern "C" {
|
||||
static __kernel_size: u8;
|
||||
}
|
||||
let size = absolute_address!(__kernel_size);
|
||||
|
||||
PhysicalMemoryRegion {
|
||||
base: KERNEL_PHYS_BASE,
|
||||
size,
|
||||
}
|
||||
}
|
33
src/mem/phys/reserved.rs
Normal file
33
src/mem/phys/reserved.rs
Normal file
@ -0,0 +1,33 @@
|
||||
//! Utilities for handling reserved memory regions
|
||||
|
||||
use crate::util::StaticVector;
|
||||
|
||||
use super::PhysicalMemoryRegion;
|
||||
|
||||
static mut RESERVED_MEMORY: StaticVector<PhysicalMemoryRegion, 4> = StaticVector::new();
|
||||
|
||||
/// Marks a region of physical memory as reserved.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Can only be called from initialization code **before** physical memory manager is initialized.
|
||||
pub unsafe fn reserve_region(reason: &str, region: PhysicalMemoryRegion) {
|
||||
debugln!(
|
||||
"Reserve {:?} memory: {:#x}..{:#x}",
|
||||
reason,
|
||||
region.base,
|
||||
region.end()
|
||||
);
|
||||
|
||||
RESERVED_MEMORY.push(region);
|
||||
}
|
||||
|
||||
/// Returns `true` if `addr` refers to any reserved memory region
|
||||
pub fn is_reserved(addr: usize) -> bool {
|
||||
for region in unsafe { RESERVED_MEMORY.iter() } {
|
||||
if region.range().contains(&addr) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
40
src/mem/table.rs
Normal file
40
src/mem/table.rs
Normal file
@ -0,0 +1,40 @@
|
||||
//! Virtual memory table interface
|
||||
use abi::error::Error;
|
||||
|
||||
pub use crate::arch::aarch64::table::{AddressSpace, PageAttributes, PageEntry, PageTable};
|
||||
|
||||
/// Interface for virtual memory address space management
|
||||
pub trait VirtualMemoryManager {
|
||||
/// Allocates a region of virtual memory inside the address space and maps it to physical
|
||||
/// memory pages with given attributes
|
||||
fn allocate(
|
||||
&self,
|
||||
hint: Option<usize>,
|
||||
len: usize,
|
||||
attrs: PageAttributes,
|
||||
) -> Result<usize, Error>;
|
||||
|
||||
/// Releases the virtual memory region from the address space and the pages it refers to
|
||||
fn deallocate(&self, addr: usize, len: usize) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// Interface for non-terminal tables to retrieve the next level of address translation tables
|
||||
pub trait NextPageTable {
|
||||
/// Type for the next-level page table
|
||||
type NextLevel;
|
||||
|
||||
/// Tries looking up a next-level table at given index, allocating and mapping one if it is not
|
||||
/// present there
|
||||
fn get_mut_or_alloc(&mut self, index: usize) -> Result<&'static mut Self::NextLevel, Error>;
|
||||
/// Returns a mutable reference to a next-level table at `index`, if present
|
||||
fn get_mut(&mut self, index: usize) -> Option<&'static mut Self::NextLevel>;
|
||||
}
|
||||
|
||||
/// Interface for a single level of address translation
|
||||
#[const_trait]
|
||||
pub trait EntryLevel: Copy {
|
||||
/// Returns the index into a page table for a given address
|
||||
fn index(addr: usize) -> usize;
|
||||
/// Returns the offset of an address from the page start at current level
|
||||
fn page_offset(addr: usize) -> usize;
|
||||
}
|
77
src/panic.rs
Normal file
77
src/panic.rs
Normal file
@ -0,0 +1,77 @@
|
||||
//! Kernel panic handler code
|
||||
use core::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use crate::{
|
||||
arch::{Architecture, ArchitectureImpl, CpuMessage, PLATFORM},
|
||||
debug::{debug_internal, LogLevel},
|
||||
device::{interrupt::IpiDeliveryTarget, platform::Platform},
|
||||
sync::SpinFence,
|
||||
};
|
||||
|
||||
// Just a fence to ensure secondary panics don't trash the screen
|
||||
static PANIC_FINISHED_FENCE: SpinFence = SpinFence::new();
|
||||
|
||||
/// Panic handler for CPUs other than the one that initiated it
|
||||
pub fn panic_secondary() -> ! {
|
||||
unsafe {
|
||||
ArchitectureImpl::set_interrupt_mask(true);
|
||||
}
|
||||
|
||||
PANIC_FINISHED_FENCE.wait_one();
|
||||
|
||||
log_print_raw!(LogLevel::Fatal, "X");
|
||||
|
||||
loop {
|
||||
ArchitectureImpl::wait_for_interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
#[panic_handler]
|
||||
fn panic_handler(pi: &core::panic::PanicInfo) -> ! {
|
||||
unsafe {
|
||||
ArchitectureImpl::set_interrupt_mask(true);
|
||||
}
|
||||
static PANIC_HAPPENED: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
if PANIC_HAPPENED
|
||||
.compare_exchange(false, true, Ordering::Release, Ordering::Acquire)
|
||||
.is_ok()
|
||||
{
|
||||
// Let other CPUs know we're screwed
|
||||
unsafe {
|
||||
PLATFORM
|
||||
.interrupt_controller()
|
||||
.send_ipi(IpiDeliveryTarget::AllExceptLocal, CpuMessage::Panic)
|
||||
.ok();
|
||||
}
|
||||
|
||||
log_print_raw!(LogLevel::Fatal, "--- BEGIN PANIC ---\n");
|
||||
log_print_raw!(LogLevel::Fatal, "Kernel panic ");
|
||||
|
||||
if let Some(location) = pi.location() {
|
||||
log_print_raw!(
|
||||
LogLevel::Fatal,
|
||||
"at {}:{}:",
|
||||
location.file(),
|
||||
location.line()
|
||||
);
|
||||
} else {
|
||||
log_print_raw!(LogLevel::Fatal, ":");
|
||||
}
|
||||
|
||||
log_print_raw!(LogLevel::Fatal, "\n");
|
||||
|
||||
if let Some(msg) = pi.message() {
|
||||
debug_internal(*msg, LogLevel::Fatal);
|
||||
log_print_raw!(LogLevel::Fatal, "\n");
|
||||
}
|
||||
log_print_raw!(LogLevel::Fatal, "--- END PANIC ---\n");
|
||||
|
||||
log_print_raw!(LogLevel::Fatal, "X");
|
||||
PANIC_FINISHED_FENCE.signal();
|
||||
}
|
||||
|
||||
loop {
|
||||
ArchitectureImpl::wait_for_interrupt();
|
||||
}
|
||||
}
|
101
src/proc/exec.rs
Normal file
101
src/proc/exec.rs
Normal file
@ -0,0 +1,101 @@
|
||||
//! Binary execution functions
|
||||
use core::mem::size_of;
|
||||
|
||||
use abi::error::Error;
|
||||
use alloc::rc::Rc;
|
||||
|
||||
use crate::{
|
||||
arch::aarch64::context::TaskContext,
|
||||
mem::{
|
||||
phys::{self, PageUsage},
|
||||
table::{AddressSpace, PageAttributes},
|
||||
ConvertAddress,
|
||||
},
|
||||
proc,
|
||||
task::process::Process,
|
||||
};
|
||||
|
||||
fn setup_args(space: &mut AddressSpace, virt: usize, args: &[&str]) -> Result<(), Error> {
|
||||
// arg data len
|
||||
let args_size: usize = args.iter().map(|x| x.len()).sum();
|
||||
// 1 + arg ptr:len count
|
||||
let args_ptr_size = (1 + args.len() * 2) * size_of::<usize>();
|
||||
|
||||
let total_size = args_size + args_ptr_size;
|
||||
|
||||
if total_size > 0x1000 {
|
||||
todo!();
|
||||
}
|
||||
|
||||
debugln!("arg data size = {}", args_size);
|
||||
|
||||
let phys_page = phys::alloc_page(PageUsage::Used)?;
|
||||
// TODO check if this doesn't overwrite anything
|
||||
space.map_page(virt, phys_page, PageAttributes::AP_BOTH_READWRITE)?;
|
||||
|
||||
let write = unsafe { phys_page.virtualize() };
|
||||
|
||||
let mut offset = args_ptr_size;
|
||||
|
||||
unsafe {
|
||||
(write as *mut usize).write_volatile(args.len());
|
||||
}
|
||||
|
||||
for i in 0..args.len() {
|
||||
// Place the argument pointer
|
||||
let ptr_place = write + (i * 2 + 1) * size_of::<usize>();
|
||||
let len_place = ptr_place + size_of::<usize>();
|
||||
unsafe {
|
||||
(ptr_place as *mut usize).write_volatile(virt + offset);
|
||||
(len_place as *mut usize).write_volatile(args[i].len());
|
||||
}
|
||||
offset += args[i].len();
|
||||
}
|
||||
|
||||
// Place the argument data
|
||||
unsafe {
|
||||
let arg_data_slice =
|
||||
core::slice::from_raw_parts_mut((write + args_ptr_size) as *mut u8, args_size);
|
||||
let mut offset = 0;
|
||||
for &s in args {
|
||||
arg_data_slice[offset..offset + s.len()].copy_from_slice(s.as_bytes());
|
||||
offset += s.len();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sets up a userspace structure from a slice defining an ELF binary
|
||||
pub fn create_from_memory(data: &[u8], args: &[&str]) -> Result<Rc<Process>, Error> {
|
||||
const USER_STACK_PAGES: usize = 8;
|
||||
|
||||
let mut space = AddressSpace::new_empty()?;
|
||||
let elf_entry = proc::load_elf_from_memory(&mut space, data);
|
||||
|
||||
let virt_stack_base = 0x10000000;
|
||||
// 0x1000 of guard page
|
||||
let virt_args_base = virt_stack_base + (USER_STACK_PAGES + 1) * 0x1000;
|
||||
|
||||
for i in 0..USER_STACK_PAGES {
|
||||
let phys = phys::alloc_page(PageUsage::Used)?;
|
||||
space.map_page(
|
||||
virt_stack_base + i * 0x1000,
|
||||
phys,
|
||||
PageAttributes::AP_BOTH_READWRITE,
|
||||
)?;
|
||||
}
|
||||
|
||||
setup_args(&mut space, virt_args_base, args)?;
|
||||
|
||||
debugln!("Entry: {:#x}", elf_entry);
|
||||
|
||||
let context = TaskContext::user(
|
||||
elf_entry,
|
||||
virt_args_base,
|
||||
space.physical_address(),
|
||||
virt_stack_base + USER_STACK_PAGES * 0x1000,
|
||||
)?;
|
||||
|
||||
Ok(Process::new_with_context(Some(space), context))
|
||||
}
|
71
src/proc/io.rs
Normal file
71
src/proc/io.rs
Normal file
@ -0,0 +1,71 @@
|
||||
//! Process I/O management
|
||||
use abi::{error::Error, io::RawFd};
|
||||
use alloc::collections::BTreeMap;
|
||||
use vfs::{FileRef, IoContext};
|
||||
|
||||
/// I/O context of a process, contains information like root, current directory and file
|
||||
/// descriptor table
|
||||
pub struct ProcessIo {
|
||||
ioctx: Option<IoContext>,
|
||||
files: BTreeMap<RawFd, FileRef>,
|
||||
}
|
||||
|
||||
impl ProcessIo {
|
||||
/// Constructs an uninitialized I/O context
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
ioctx: None,
|
||||
files: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a file given descriptor refers to
|
||||
pub fn file(&self, fd: RawFd) -> Result<FileRef, Error> {
|
||||
self.files
|
||||
.get(&fd)
|
||||
.cloned()
|
||||
.ok_or_else(|| Error::InvalidFile)
|
||||
}
|
||||
|
||||
/// Sets the inner I/O context
|
||||
pub fn set_ioctx(&mut self, ioctx: IoContext) {
|
||||
self.ioctx.replace(ioctx);
|
||||
}
|
||||
|
||||
/// Inserts a file into the descriptor table. Returns error if the file is already present for
|
||||
/// given descriptor.
|
||||
pub fn set_file(&mut self, fd: RawFd, file: FileRef) -> Result<(), Error> {
|
||||
if self.files.contains_key(&fd) {
|
||||
todo!();
|
||||
}
|
||||
|
||||
self.files.insert(fd, file);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Allocates a slot for a file and returns it
|
||||
pub fn place_file(&mut self, file: FileRef) -> Result<RawFd, Error> {
|
||||
for idx in 0..64 {
|
||||
let fd = RawFd(idx);
|
||||
if !self.files.contains_key(&fd) {
|
||||
self.files.insert(fd, file);
|
||||
return Ok(fd);
|
||||
}
|
||||
}
|
||||
todo!();
|
||||
}
|
||||
|
||||
/// Closes the file and removes it from the table
|
||||
pub fn close_file(&mut self, fd: RawFd) -> Result<(), Error> {
|
||||
let file = self.files.remove(&fd);
|
||||
if file.is_none() {
|
||||
todo!();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the inner I/O context reference
|
||||
pub fn ioctx(&mut self) -> &mut IoContext {
|
||||
self.ioctx.as_mut().unwrap()
|
||||
}
|
||||
}
|
91
src/proc/mod.rs
Normal file
91
src/proc/mod.rs
Normal file
@ -0,0 +1,91 @@
|
||||
//! Internal management for processes
|
||||
|
||||
pub mod exec;
|
||||
pub mod io;
|
||||
pub mod wait;
|
||||
|
||||
use aarch64_cpu::registers::TTBR0_EL1;
|
||||
use elf::{
|
||||
abi::{PF_W, PF_X, PT_LOAD},
|
||||
endian::AnyEndian,
|
||||
ElfBytes,
|
||||
};
|
||||
use tock_registers::interfaces::Writeable;
|
||||
|
||||
use crate::{
|
||||
arch::aarch64::table::tlb_flush_vaae1,
|
||||
mem::{
|
||||
phys::{self, PageUsage},
|
||||
table::{AddressSpace, PageAttributes},
|
||||
},
|
||||
};
|
||||
|
||||
fn load_segment(space: &mut AddressSpace, addr: usize, data: &[u8], memsz: usize, elf_attrs: u32) {
|
||||
let attrs = match (elf_attrs & PF_W, elf_attrs & PF_X) {
|
||||
(0, 0) => PageAttributes::AP_BOTH_READONLY,
|
||||
(_, 0) => PageAttributes::AP_BOTH_READWRITE,
|
||||
(0, _) => PageAttributes::AP_BOTH_READONLY,
|
||||
(_, _) => PageAttributes::AP_BOTH_READWRITE,
|
||||
};
|
||||
|
||||
let aligned_start = addr & !0xFFF;
|
||||
let aligned_end = (addr + memsz + 0xFFF) & !0xFFF;
|
||||
|
||||
// Map and write pages
|
||||
for page in (aligned_start..aligned_end).step_by(0x1000) {
|
||||
if let Some(_phys) = space.translate(page) {
|
||||
todo!();
|
||||
} else {
|
||||
let phys = phys::alloc_page(PageUsage::Used).unwrap();
|
||||
space
|
||||
.map_page(page, phys, PageAttributes::AP_BOTH_READWRITE)
|
||||
.unwrap();
|
||||
|
||||
debugln!("MAP (alloc) {:#x} -> {:#x}", page, phys);
|
||||
tlb_flush_vaae1(page);
|
||||
}
|
||||
}
|
||||
|
||||
unsafe {
|
||||
// Write the data
|
||||
let dst = core::slice::from_raw_parts_mut(addr as *mut u8, memsz);
|
||||
dst[..data.len()].copy_from_slice(data);
|
||||
|
||||
// Zero the rest
|
||||
dst[data.len()..memsz].fill(0);
|
||||
}
|
||||
|
||||
// Map the region as readonly
|
||||
for page in (aligned_start..aligned_end).step_by(0x1000) {
|
||||
let phys = space.translate(page).unwrap();
|
||||
space.map_page(page, phys, attrs).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads an ELF image into the address space from a slice
|
||||
pub fn load_elf_from_memory(space: &mut AddressSpace, src: &[u8]) -> usize {
|
||||
// Map the address space temporarily
|
||||
TTBR0_EL1.set(space.physical_address() as u64);
|
||||
|
||||
let elf = ElfBytes::<AnyEndian>::minimal_parse(src).unwrap();
|
||||
|
||||
for phdr in elf.segments().unwrap() {
|
||||
if phdr.p_type != PT_LOAD {
|
||||
continue;
|
||||
}
|
||||
|
||||
debugln!("LOAD {:#x}", phdr.p_vaddr);
|
||||
let data = &src[phdr.p_offset as usize..(phdr.p_offset + phdr.p_filesz) as usize];
|
||||
load_segment(
|
||||
space,
|
||||
phdr.p_vaddr as usize,
|
||||
data,
|
||||
phdr.p_memsz as usize,
|
||||
phdr.p_flags,
|
||||
);
|
||||
}
|
||||
|
||||
TTBR0_EL1.set_baddr(0);
|
||||
|
||||
elf.ehdr.e_entry as usize
|
||||
}
|
174
src/proc/wait.rs
Normal file
174
src/proc/wait.rs
Normal file
@ -0,0 +1,174 @@
|
||||
//! Wait channel implementation
|
||||
use core::time::Duration;
|
||||
|
||||
use abi::error::Error;
|
||||
use alloc::{collections::LinkedList, rc::Rc};
|
||||
|
||||
use crate::{
|
||||
arch::PLATFORM, device::platform::Platform, sync::IrqSafeSpinlock, task::process::Process,
|
||||
};
|
||||
|
||||
/// Defines whether the wait channel is available for a specific task
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum WaitStatus {
|
||||
/// Wait on the channel was interrupted
|
||||
Interrupted,
|
||||
/// Channel did not yet signal availability
|
||||
Pending,
|
||||
/// Channel has data available
|
||||
Done,
|
||||
}
|
||||
|
||||
/// Wait notification channel
|
||||
pub struct Wait {
|
||||
queue: IrqSafeSpinlock<LinkedList<Rc<Process>>>,
|
||||
// Used for tracing waits
|
||||
#[allow(dead_code)]
|
||||
name: &'static str,
|
||||
}
|
||||
|
||||
struct Timeout {
|
||||
process: Rc<Process>,
|
||||
deadline: Duration,
|
||||
}
|
||||
|
||||
impl Wait {
|
||||
/// Constructs a new wait notification channel
|
||||
pub const fn new(name: &'static str) -> Self {
|
||||
Self {
|
||||
name,
|
||||
queue: IrqSafeSpinlock::new(LinkedList::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Wakes up tasks waiting for availability on this channel, but no more than `limit`
|
||||
pub fn wakeup_some(&self, mut limit: usize) -> usize {
|
||||
let mut queue = self.queue.lock();
|
||||
let mut count = 0;
|
||||
while limit != 0 && !queue.is_empty() {
|
||||
let proc = queue.pop_front().unwrap();
|
||||
|
||||
{
|
||||
let mut tick_lock = TICK_LIST.lock();
|
||||
let mut cursor = tick_lock.cursor_front_mut();
|
||||
|
||||
while let Some(item) = cursor.current() {
|
||||
if proc.id() == item.process.id() {
|
||||
cursor.remove_current();
|
||||
break;
|
||||
} else {
|
||||
cursor.move_next();
|
||||
}
|
||||
}
|
||||
|
||||
drop(tick_lock);
|
||||
|
||||
unsafe {
|
||||
proc.set_wait_status(WaitStatus::Done);
|
||||
}
|
||||
proc.enqueue_somewhere();
|
||||
}
|
||||
|
||||
limit -= 1;
|
||||
count += 1;
|
||||
}
|
||||
|
||||
count
|
||||
}
|
||||
|
||||
/// Wakes up all tasks waiting on this channel
|
||||
pub fn wakeup_all(&self) {
|
||||
self.wakeup_some(usize::MAX);
|
||||
}
|
||||
|
||||
/// Wakes up a single task waiting on this channel
|
||||
pub fn wakeup_one(&self) {
|
||||
self.wakeup_some(1);
|
||||
}
|
||||
|
||||
/// Suspends the task until either the deadline is reached or this channel signals availability
|
||||
pub fn wait(&'static self, deadline: Option<Duration>) -> Result<(), Error> {
|
||||
let process = Process::current();
|
||||
let mut queue_lock = self.queue.lock();
|
||||
queue_lock.push_back(process.clone());
|
||||
unsafe {
|
||||
process.setup_wait(self);
|
||||
}
|
||||
|
||||
if let Some(deadline) = deadline {
|
||||
TICK_LIST.lock().push_back(Timeout {
|
||||
process: process.clone(),
|
||||
deadline,
|
||||
});
|
||||
}
|
||||
|
||||
loop {
|
||||
match process.wait_status() {
|
||||
WaitStatus::Pending => (),
|
||||
WaitStatus::Done => return Ok(()),
|
||||
WaitStatus::Interrupted => todo!(),
|
||||
}
|
||||
|
||||
drop(queue_lock);
|
||||
process.suspend();
|
||||
|
||||
queue_lock = self.queue.lock();
|
||||
|
||||
if let Some(deadline) = deadline {
|
||||
let now = PLATFORM.timestamp_source().timestamp()?;
|
||||
|
||||
if now > deadline {
|
||||
let mut cursor = queue_lock.cursor_front_mut();
|
||||
|
||||
while let Some(item) = cursor.current() {
|
||||
if item.id() == process.id() {
|
||||
cursor.remove_current();
|
||||
return Err(Error::TimedOut);
|
||||
} else {
|
||||
cursor.move_next();
|
||||
}
|
||||
}
|
||||
|
||||
panic!();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static TICK_LIST: IrqSafeSpinlock<LinkedList<Timeout>> = IrqSafeSpinlock::new(LinkedList::new());
|
||||
|
||||
/// Suspends current task until given deadline
|
||||
pub fn sleep(timeout: Duration, remaining: &mut Duration) -> Result<(), Error> {
|
||||
static SLEEP_NOTIFY: Wait = Wait::new("sleep");
|
||||
let now = PLATFORM.timestamp_source().timestamp()?;
|
||||
let deadline = now + timeout;
|
||||
|
||||
match SLEEP_NOTIFY.wait(Some(deadline)) {
|
||||
// Just what we expected
|
||||
Err(Error::TimedOut) => {
|
||||
*remaining = Duration::ZERO;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
Ok(_) => panic!("This should not happen"),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates all pending timeouts and wakes up the tasks that have reached theirs
|
||||
pub fn tick() {
|
||||
let now = PLATFORM.timestamp_source().timestamp().unwrap();
|
||||
let mut list = TICK_LIST.lock();
|
||||
let mut cursor = list.cursor_front_mut();
|
||||
|
||||
while let Some(item) = cursor.current() {
|
||||
if now > item.deadline {
|
||||
let t = cursor.remove_current().unwrap();
|
||||
|
||||
t.process.enqueue_somewhere();
|
||||
} else {
|
||||
cursor.move_next();
|
||||
}
|
||||
}
|
||||
}
|
177
src/sync.rs
Normal file
177
src/sync.rs
Normal file
@ -0,0 +1,177 @@
|
||||
//! Synchronization primitives
|
||||
use core::{
|
||||
cell::UnsafeCell,
|
||||
ops::{Deref, DerefMut},
|
||||
sync::atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
};
|
||||
|
||||
use aarch64_cpu::registers::DAIF;
|
||||
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
|
||||
|
||||
/// Simple spinloop-based fence guaranteeing that the execution resumes only after its condition is
|
||||
/// met.
|
||||
pub struct SpinFence {
|
||||
value: AtomicUsize,
|
||||
}
|
||||
|
||||
/// Token type used to prevent IRQs from firing during some critical section. Normal IRQ operation
|
||||
/// (if enabled before) is resumed when [IrqGuard]'s lifetime is over.
|
||||
pub struct IrqGuard(u64);
|
||||
|
||||
struct SpinlockInner<T> {
|
||||
value: UnsafeCell<T>,
|
||||
state: AtomicBool,
|
||||
}
|
||||
|
||||
struct SpinlockInnerGuard<'a, T> {
|
||||
lock: &'a SpinlockInner<T>,
|
||||
}
|
||||
|
||||
/// Spinlock implementation which prevents interrupts to avoid deadlocks when an interrupt handler
|
||||
/// tries to acquire a lock taken before the IRQ fired.
|
||||
pub struct IrqSafeSpinlock<T> {
|
||||
inner: SpinlockInner<T>,
|
||||
}
|
||||
|
||||
/// Token type allowing safe access to the underlying data of the [IrqSafeSpinlock]. Resumes normal
|
||||
/// IRQ operation (if enabled before acquiring) when the lifetime is over.
|
||||
pub struct IrqSafeSpinlockGuard<'a, T> {
|
||||
// Must come first to ensure the lock is dropped first and only then IRQs are re-enabled
|
||||
inner: SpinlockInnerGuard<'a, T>,
|
||||
_irq: IrqGuard,
|
||||
}
|
||||
|
||||
// Spinlock impls
|
||||
impl<T> SpinlockInner<T> {
|
||||
const fn new(value: T) -> Self {
|
||||
Self {
|
||||
value: UnsafeCell::new(value),
|
||||
state: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
fn lock(&self) -> SpinlockInnerGuard<T> {
|
||||
// Loop until the lock can be acquired
|
||||
while self
|
||||
.state
|
||||
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
|
||||
.is_err()
|
||||
{
|
||||
core::hint::spin_loop();
|
||||
}
|
||||
|
||||
SpinlockInnerGuard { lock: self }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Deref for SpinlockInnerGuard<'a, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
unsafe { &*self.lock.value.get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> DerefMut for SpinlockInnerGuard<'a, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
unsafe { &mut *self.lock.value.get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Drop for SpinlockInnerGuard<'a, T> {
|
||||
fn drop(&mut self) {
|
||||
self.lock
|
||||
.state
|
||||
.compare_exchange(true, false, Ordering::Release, Ordering::Relaxed)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T> Sync for SpinlockInner<T> {}
|
||||
unsafe impl<T> Send for SpinlockInner<T> {}
|
||||
|
||||
// IrqSafeSpinlock impls
|
||||
impl<T> IrqSafeSpinlock<T> {
|
||||
/// Wraps the value in a spinlock primitive
|
||||
pub const fn new(value: T) -> Self {
|
||||
Self {
|
||||
inner: SpinlockInner::new(value),
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to acquire a lock. IRQs will be disabled until the lock is released.
|
||||
pub fn lock(&self) -> IrqSafeSpinlockGuard<T> {
|
||||
// Disable IRQs to avoid IRQ handler trying to acquire the same lock
|
||||
let irq_guard = IrqGuard::acquire();
|
||||
|
||||
// Acquire the inner lock
|
||||
let inner = self.inner.lock();
|
||||
|
||||
IrqSafeSpinlockGuard {
|
||||
inner,
|
||||
_irq: irq_guard,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Deref for IrqSafeSpinlockGuard<'a, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.inner.deref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> DerefMut for IrqSafeSpinlockGuard<'a, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.inner.deref_mut()
|
||||
}
|
||||
}
|
||||
|
||||
// IrqGuard impls
|
||||
impl IrqGuard {
|
||||
/// Saves the current IRQ state and masks them
|
||||
pub fn acquire() -> Self {
|
||||
let this = Self(DAIF.get());
|
||||
DAIF.modify(DAIF::I::SET);
|
||||
this
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for IrqGuard {
|
||||
fn drop(&mut self) {
|
||||
DAIF.set(self.0);
|
||||
}
|
||||
}
|
||||
|
||||
// SpinFence impls
|
||||
impl SpinFence {
|
||||
/// Constructs a new [SpinFence]
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
value: AtomicUsize::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Resets a fence back to its original state
|
||||
pub fn reset(&self) {
|
||||
self.value.store(0, Ordering::Release);
|
||||
}
|
||||
|
||||
/// "Signals" a fence, incrementing its internal counter by one
|
||||
pub fn signal(&self) {
|
||||
self.value.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
/// Waits until the fence is signalled at least the amount of times specified
|
||||
pub fn wait_all(&self, count: usize) {
|
||||
while self.value.load(Ordering::Acquire) < count {
|
||||
core::hint::spin_loop();
|
||||
}
|
||||
}
|
||||
|
||||
/// Waits until the fence is signalled at least once
|
||||
pub fn wait_one(&self) {
|
||||
self.wait_all(1);
|
||||
}
|
||||
}
|
145
src/syscall.rs
Normal file
145
src/syscall.rs
Normal file
@ -0,0 +1,145 @@
|
||||
//! System function call handlers
|
||||
use core::time::Duration;
|
||||
|
||||
use abi::{
|
||||
error::{Error, IntoSyscallResult},
|
||||
io::{OpenFlags, RawFd},
|
||||
SyscallFunction,
|
||||
};
|
||||
use vfs::{Read, Write};
|
||||
|
||||
use crate::{
|
||||
mem::table::{PageAttributes, VirtualMemoryManager},
|
||||
proc::wait,
|
||||
task::process::Process,
|
||||
};
|
||||
|
||||
fn arg_buffer_ref<'a>(base: usize, len: usize) -> Result<&'a [u8], Error> {
|
||||
if base + len > crate::mem::KERNEL_VIRT_OFFSET {
|
||||
panic!("Invalid argument");
|
||||
}
|
||||
Ok(unsafe { core::slice::from_raw_parts(base as *const u8, len) })
|
||||
}
|
||||
|
||||
fn arg_buffer_mut<'a>(base: usize, len: usize) -> Result<&'a mut [u8], Error> {
|
||||
if base + len > crate::mem::KERNEL_VIRT_OFFSET {
|
||||
panic!("Invalid argument");
|
||||
}
|
||||
Ok(unsafe { core::slice::from_raw_parts_mut(base as *mut u8, len) })
|
||||
}
|
||||
|
||||
fn arg_user_str<'a>(base: usize, len: usize) -> Result<&'a str, Error> {
|
||||
let slice = arg_buffer_ref(base, len)?;
|
||||
Ok(core::str::from_utf8(slice).unwrap())
|
||||
}
|
||||
|
||||
fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error> {
|
||||
match func {
|
||||
SyscallFunction::DebugTrace => {
|
||||
let pid = Process::get_current()
|
||||
.as_deref()
|
||||
.map(Process::id)
|
||||
.unwrap_or(0);
|
||||
let arg = arg_user_str(args[0] as usize, args[1] as usize)?;
|
||||
debugln!("[{}] TRACE: {:?}", pid, arg);
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
SyscallFunction::Nanosleep => {
|
||||
let seconds = args[0];
|
||||
let nanos = args[1] as u32;
|
||||
let duration = Duration::new(seconds, nanos);
|
||||
let mut remaining = Duration::ZERO;
|
||||
|
||||
wait::sleep(duration, &mut remaining).unwrap();
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
SyscallFunction::Exit => {
|
||||
Process::current().exit(args[0] as _);
|
||||
panic!();
|
||||
}
|
||||
SyscallFunction::MapMemory => {
|
||||
let len = args[1] as usize;
|
||||
|
||||
let proc = Process::current();
|
||||
let space = proc.address_space();
|
||||
|
||||
if len & 0xFFF != 0 {
|
||||
todo!();
|
||||
}
|
||||
|
||||
let addr = space.allocate(None, len / 0x1000, PageAttributes::AP_BOTH_READWRITE);
|
||||
debugln!("mmap({:#x}) = {:x?}", len, addr);
|
||||
|
||||
addr
|
||||
}
|
||||
SyscallFunction::UnmapMemory => {
|
||||
let addr = args[0] as usize;
|
||||
let len = args[1] as usize;
|
||||
|
||||
let proc = Process::current();
|
||||
let space = proc.address_space();
|
||||
|
||||
if len & 0xFFF != 0 {
|
||||
todo!();
|
||||
}
|
||||
|
||||
debugln!("munmap({:#x}, {:#x})", addr, len);
|
||||
space.deallocate(addr, len)?;
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
SyscallFunction::Write => {
|
||||
let fd = RawFd(args[0] as u32);
|
||||
let data = arg_buffer_ref(args[1] as _, args[2] as _)?;
|
||||
|
||||
let proc = Process::current();
|
||||
let io = proc.io.lock();
|
||||
let file = io.file(fd)?;
|
||||
let mut file_borrow = file.borrow_mut();
|
||||
|
||||
file_borrow.write(data)
|
||||
}
|
||||
SyscallFunction::Read => {
|
||||
let fd = RawFd(args[0] as u32);
|
||||
let data = arg_buffer_mut(args[1] as _, args[2] as _)?;
|
||||
|
||||
let proc = Process::current();
|
||||
let io = proc.io.lock();
|
||||
let file = io.file(fd)?;
|
||||
let mut file_borrow = file.borrow_mut();
|
||||
|
||||
file_borrow.read(data)
|
||||
}
|
||||
SyscallFunction::Open => {
|
||||
let path = arg_user_str(args[0] as usize, args[1] as usize)?;
|
||||
let opts = OpenFlags(args[2] as u32);
|
||||
|
||||
let proc = Process::current();
|
||||
let mut io = proc.io.lock();
|
||||
|
||||
let file = io.ioctx().open(None, path, opts)?;
|
||||
let fd = io.place_file(file)?;
|
||||
|
||||
Ok(fd.0 as usize)
|
||||
}
|
||||
SyscallFunction::Close => {
|
||||
let fd = RawFd(args[0] as u32);
|
||||
|
||||
let proc = Process::current();
|
||||
let mut io = proc.io.lock();
|
||||
io.close_file(fd)?;
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Entrypoint for system calls that takes raw argument values
|
||||
pub fn raw_syscall_handler(func: u64, args: &[u64]) -> u64 {
|
||||
let Ok(func) = SyscallFunction::try_from(func as usize) else {
|
||||
todo!("Undefined syscall: {}", func);
|
||||
};
|
||||
|
||||
syscall_handler(func, args).into_syscall_result() as u64
|
||||
}
|
110
src/task/mod.rs
Normal file
110
src/task/mod.rs
Normal file
@ -0,0 +1,110 @@
|
||||
//! Multitasking and process/thread management interfaces
|
||||
use core::sync::atomic::Ordering;
|
||||
|
||||
use aarch64_cpu::registers::MPIDR_EL1;
|
||||
use abi::error::Error;
|
||||
use alloc::{rc::Rc, vec::Vec};
|
||||
use tock_registers::interfaces::Readable;
|
||||
|
||||
use crate::{
|
||||
arch::aarch64::{context::TaskContext, cpu::Cpu, smp::CPU_COUNT},
|
||||
kernel_main,
|
||||
sync::{IrqSafeSpinlock, SpinFence},
|
||||
task::sched::CpuQueue,
|
||||
};
|
||||
|
||||
use self::process::Process;
|
||||
|
||||
pub mod process;
|
||||
pub mod sched;
|
||||
|
||||
/// Process identifier alias for clarity
|
||||
pub type ProcessId = usize;
|
||||
|
||||
/// Wrapper structure to hold all the system's processes
|
||||
pub struct ProcessList {
|
||||
data: Vec<(ProcessId, Rc<Process>)>,
|
||||
last_process_id: ProcessId,
|
||||
}
|
||||
|
||||
impl ProcessList {
|
||||
/// Constructs an empty process list
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
last_process_id: 0,
|
||||
data: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts a new process into the list.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Only meant to be called from inside the Process impl, as this function does not perform any
|
||||
/// accounting information updates.
|
||||
pub unsafe fn push(&mut self, process: Rc<Process>) -> ProcessId {
|
||||
self.last_process_id += 1;
|
||||
debugln!("Insert process with ID {}", self.last_process_id);
|
||||
self.data.push((self.last_process_id, process));
|
||||
self.last_process_id
|
||||
}
|
||||
|
||||
/// Looks up a process by its ID
|
||||
pub fn get(&self, id: ProcessId) -> Option<&Rc<Process>> {
|
||||
self.data
|
||||
.iter()
|
||||
.find_map(|(i, p)| if *i == id { Some(p) } else { None })
|
||||
}
|
||||
}
|
||||
|
||||
/// Global shared process list
|
||||
pub static PROCESSES: IrqSafeSpinlock<ProcessList> = IrqSafeSpinlock::new(ProcessList::new());
|
||||
|
||||
/// Creates a new kernel-space process to execute a closure and queues it to some CPU
|
||||
pub fn spawn_kernel_closure<F: Fn() + Send + 'static>(f: F) -> Result<(), Error> {
|
||||
let proc = Process::new_with_context(None, TaskContext::kernel_closure(f)?);
|
||||
proc.enqueue_somewhere();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sets up CPU queues and gives them some processes to run
|
||||
pub fn init() -> Result<(), Error> {
|
||||
let cpu_count = CPU_COUNT.load(Ordering::Acquire);
|
||||
|
||||
// Create a queue for each CPU
|
||||
sched::init_queues(Vec::from_iter((0..cpu_count).map(|_| CpuQueue::new())));
|
||||
|
||||
// Spawn kernel main task
|
||||
spawn_kernel_closure(kernel_main)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sets up the local CPU queue and switches to some task in it for execution.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// Any locks held at this point will not be dropped properly, which may lead to a deadlock.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Only safe to call once at the end of non-threaded system initialization.
|
||||
pub unsafe fn enter() -> ! {
|
||||
static AP_CAN_ENTER: SpinFence = SpinFence::new();
|
||||
|
||||
let cpu_id = MPIDR_EL1.get() & 0xF;
|
||||
|
||||
if cpu_id != 0 {
|
||||
// Wait until BSP allows us to enter
|
||||
AP_CAN_ENTER.wait_one();
|
||||
} else {
|
||||
AP_CAN_ENTER.signal();
|
||||
}
|
||||
|
||||
let queue = CpuQueue::for_cpu(cpu_id as usize);
|
||||
let cpu = Cpu::local();
|
||||
cpu.init_queue(queue);
|
||||
|
||||
queue.enter()
|
||||
}
|
246
src/task/process.rs
Normal file
246
src/task/process.rs
Normal file
@ -0,0 +1,246 @@
|
||||
//! Process data structures
|
||||
use core::sync::atomic::{AtomicU32, Ordering};
|
||||
|
||||
use alloc::rc::Rc;
|
||||
use atomic_enum::atomic_enum;
|
||||
|
||||
use crate::{
|
||||
arch::aarch64::{context::TaskContext, cpu::Cpu},
|
||||
mem::table::AddressSpace,
|
||||
proc::{
|
||||
io::ProcessIo,
|
||||
wait::{Wait, WaitStatus},
|
||||
},
|
||||
sync::{IrqGuard, IrqSafeSpinlock},
|
||||
util::OneTimeInit,
|
||||
};
|
||||
|
||||
use super::{sched::CpuQueue, ProcessId, PROCESSES};
|
||||
|
||||
/// Represents the states a process can be at some point in time
|
||||
#[atomic_enum]
|
||||
#[derive(PartialEq)]
|
||||
pub enum ProcessState {
|
||||
/// Process is ready for execution and is present in some CPU's queue
|
||||
Ready,
|
||||
/// Process is currently being executed by some CPU
|
||||
Running,
|
||||
/// Process is present in a global list, but is not queued for execution until it is resumed
|
||||
Suspended,
|
||||
/// Process is terminated and waits to be reaped
|
||||
Terminated,
|
||||
}
|
||||
|
||||
struct ProcessInner {
|
||||
pending_wait: Option<&'static Wait>,
|
||||
wait_status: WaitStatus,
|
||||
}
|
||||
|
||||
/// Process data and state structure
|
||||
pub struct Process {
|
||||
context: TaskContext,
|
||||
|
||||
// Process state info
|
||||
id: OneTimeInit<ProcessId>,
|
||||
state: AtomicProcessState,
|
||||
cpu_id: AtomicU32,
|
||||
inner: IrqSafeSpinlock<ProcessInner>,
|
||||
space: Option<AddressSpace>,
|
||||
/// I/O state of the task
|
||||
pub io: IrqSafeSpinlock<ProcessIo>,
|
||||
}
|
||||
|
||||
impl Process {
|
||||
/// Creates a process from raw architecture-specific [TaskContext].
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// Has side-effect of allocating a new PID for itself.
|
||||
pub fn new_with_context(space: Option<AddressSpace>, context: TaskContext) -> Rc<Self> {
|
||||
let this = Rc::new(Self {
|
||||
context,
|
||||
id: OneTimeInit::new(),
|
||||
state: AtomicProcessState::new(ProcessState::Suspended),
|
||||
cpu_id: AtomicU32::new(0),
|
||||
inner: IrqSafeSpinlock::new(ProcessInner {
|
||||
pending_wait: None,
|
||||
wait_status: WaitStatus::Done,
|
||||
}),
|
||||
space,
|
||||
io: IrqSafeSpinlock::new(ProcessIo::new()),
|
||||
});
|
||||
|
||||
let id = unsafe { PROCESSES.lock().push(this.clone()) };
|
||||
this.id.init(id);
|
||||
|
||||
this
|
||||
}
|
||||
|
||||
/// Returns a reference to the inner architecture-specific [TaskContext].
|
||||
pub fn context(&self) -> &TaskContext {
|
||||
&self.context
|
||||
}
|
||||
|
||||
/// Returns this process' ID
|
||||
pub fn id(&self) -> ProcessId {
|
||||
*self.id.get()
|
||||
}
|
||||
|
||||
/// Returns the state of the process.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// Maybe I should remove this and make ALL state changes atomic.
|
||||
pub fn state(&self) -> ProcessState {
|
||||
self.state.load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
/// Atomically updates the state of the process and returns the previous one.
|
||||
pub fn set_state(&self, state: ProcessState) -> ProcessState {
|
||||
self.state.swap(state, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Marks the task as running on the specified CPU.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Only meant to be called from scheduler routines.
|
||||
pub unsafe fn set_running(&self, cpu: u32) {
|
||||
self.cpu_id.store(cpu, Ordering::Release);
|
||||
self.state.store(ProcessState::Running, Ordering::Release);
|
||||
}
|
||||
|
||||
/// Returns the address space of the task
|
||||
pub fn address_space(&self) -> &AddressSpace {
|
||||
self.space.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Selects a suitable CPU queue and submits the process for execution.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Currently, the code will panic if the process is queued/executing on any queue.
|
||||
pub fn enqueue_somewhere(self: Rc<Self>) -> usize {
|
||||
// Doesn't have to be precise, so even if something changes, we can still be rebalanced
|
||||
// to another CPU
|
||||
let (index, queue) = CpuQueue::least_loaded().unwrap();
|
||||
|
||||
self.enqueue_to(queue);
|
||||
|
||||
index
|
||||
}
|
||||
|
||||
/// Submits the process to a specific queue.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Currently, the code will panic if the process is queued/executing on any queue.
|
||||
pub fn enqueue_to(self: Rc<Self>, queue: &CpuQueue) {
|
||||
let current_state = self.state.swap(ProcessState::Ready, Ordering::SeqCst);
|
||||
|
||||
if current_state != ProcessState::Suspended {
|
||||
todo!("Handle attempt to enqueue an already queued/running/terminated process");
|
||||
}
|
||||
|
||||
unsafe {
|
||||
queue.enqueue(self);
|
||||
}
|
||||
}
|
||||
|
||||
/// Marks the process as suspended, blocking it from being run until it's resumed.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// The process may not halt its execution immediately when this function is called, only when
|
||||
/// this function is called targeting the *current process* running on *local* CPU.
|
||||
///
|
||||
/// # TODO
|
||||
///
|
||||
/// The code currently does not allow suspension of active processes on either local or other
|
||||
/// CPUs.
|
||||
pub fn suspend(&self) {
|
||||
let _irq = IrqGuard::acquire();
|
||||
let current_state = self.state.swap(ProcessState::Suspended, Ordering::SeqCst);
|
||||
|
||||
match current_state {
|
||||
// NOTE: I'm not sure if the process could've been queued between the store and this
|
||||
// but most likely not (if I'm not that bad with atomics)
|
||||
// Do nothing, its queue will just drop the process
|
||||
ProcessState::Ready => (),
|
||||
// Do nothing, not in a queue already
|
||||
ProcessState::Suspended => (),
|
||||
ProcessState::Terminated => panic!("Process is terminated"),
|
||||
ProcessState::Running => {
|
||||
let cpu_id = self.cpu_id.load(Ordering::Acquire);
|
||||
let local_cpu_id = Cpu::local_id();
|
||||
let queue = Cpu::local().queue();
|
||||
|
||||
if cpu_id == local_cpu_id {
|
||||
// Suspending a process running on local CPU
|
||||
unsafe { queue.yield_cpu() }
|
||||
} else {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets up a pending wait for the process.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is only meant to be called in no-IRQ context and when caller can guarantee
|
||||
/// the task won't get scheduled to a CPU in such state.
|
||||
pub unsafe fn setup_wait(&self, wait: &'static Wait) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.pending_wait.replace(wait);
|
||||
inner.wait_status = WaitStatus::Pending;
|
||||
}
|
||||
|
||||
/// Returns current wait status of the task
|
||||
pub fn wait_status(&self) -> WaitStatus {
|
||||
self.inner.lock().wait_status
|
||||
}
|
||||
|
||||
/// Updates the wait status for the task.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is only meant to be called on waiting tasks, otherwise atomicity is not
|
||||
/// guaranteed.
|
||||
pub unsafe fn set_wait_status(&self, status: WaitStatus) {
|
||||
self.inner.lock().wait_status = status;
|
||||
}
|
||||
|
||||
/// Returns the [Process] currently executing on local CPU, None if idling.
|
||||
pub fn get_current() -> Option<Rc<Self>> {
|
||||
let queue = Cpu::local().queue();
|
||||
queue.current_process()
|
||||
}
|
||||
|
||||
/// Wraps [Process::get_current()] for cases when the caller is absolutely sure there is a
|
||||
/// running process (e.g. the call itself comes from a process).
|
||||
pub fn current() -> Rc<Self> {
|
||||
Self::get_current().unwrap()
|
||||
}
|
||||
|
||||
/// Terminate a process
|
||||
pub fn exit(&self, status: usize) {
|
||||
let current_state = self.state.swap(ProcessState::Terminated, Ordering::SeqCst);
|
||||
|
||||
debugln!("Process {} exited with code {}", self.id(), status);
|
||||
|
||||
match current_state {
|
||||
ProcessState::Suspended => (),
|
||||
ProcessState::Ready => todo!(),
|
||||
ProcessState::Running => unsafe { Cpu::local().queue().yield_cpu() },
|
||||
ProcessState::Terminated => todo!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Process {
|
||||
fn drop(&mut self) {
|
||||
infoln!("Drop process!");
|
||||
}
|
||||
}
|
273
src/task/sched.rs
Normal file
273
src/task/sched.rs
Normal file
@ -0,0 +1,273 @@
|
||||
//! Per-CPU queue implementation
|
||||
|
||||
use aarch64_cpu::registers::CNTPCT_EL0;
|
||||
use alloc::{collections::VecDeque, rc::Rc, vec::Vec};
|
||||
use tock_registers::interfaces::Readable;
|
||||
|
||||
use crate::{
|
||||
arch::aarch64::{context::TaskContext, cpu::Cpu},
|
||||
sync::{IrqSafeSpinlock, IrqSafeSpinlockGuard},
|
||||
util::OneTimeInit,
|
||||
};
|
||||
|
||||
use super::{
|
||||
process::{Process, ProcessState},
|
||||
ProcessId,
|
||||
};
|
||||
|
||||
/// Per-CPU statistics
|
||||
#[derive(Default)]
|
||||
pub struct CpuQueueStats {
|
||||
/// Ticks spent idling
|
||||
pub idle_time: u64,
|
||||
/// Ticks spent running CPU tasks
|
||||
pub cpu_time: u64,
|
||||
|
||||
/// Time since last measurement
|
||||
measure_time: u64,
|
||||
}
|
||||
|
||||
/// Per-CPU queue's inner data, normally resides under a lock
|
||||
pub struct CpuQueueInner {
|
||||
/// Current process, None if idling
|
||||
pub current: Option<Rc<Process>>,
|
||||
/// LIFO queue for processes waiting for execution
|
||||
pub queue: VecDeque<Rc<Process>>,
|
||||
|
||||
/// CPU time usage statistics
|
||||
pub stats: CpuQueueStats,
|
||||
}
|
||||
|
||||
/// Per-CPU queue
|
||||
pub struct CpuQueue {
|
||||
inner: IrqSafeSpinlock<CpuQueueInner>,
|
||||
idle: TaskContext,
|
||||
}
|
||||
|
||||
static QUEUES: OneTimeInit<Vec<CpuQueue>> = OneTimeInit::new();
|
||||
|
||||
#[naked]
|
||||
extern "C" fn __idle(_x: usize) -> ! {
|
||||
unsafe {
|
||||
core::arch::asm!("1: nop; b 1b", options(noreturn));
|
||||
}
|
||||
}
|
||||
|
||||
impl CpuQueueStats {
|
||||
/// Reset the stats to zero values
|
||||
pub fn reset(&mut self) {
|
||||
self.cpu_time = 0;
|
||||
self.idle_time = 0;
|
||||
}
|
||||
}
|
||||
|
||||
impl CpuQueueInner {
|
||||
/// Picks a next task for execution, skipping (dropping) those that were suspended. May return
|
||||
/// None if the queue is empty or no valid task was found, in which case the scheduler should
|
||||
/// go idle.
|
||||
pub fn next_ready_task(&mut self) -> Option<Rc<Process>> {
|
||||
while !self.queue.is_empty() {
|
||||
let task = self.queue.pop_front().unwrap();
|
||||
|
||||
match task.state() {
|
||||
ProcessState::Ready => {
|
||||
return Some(task);
|
||||
}
|
||||
// Drop suspended tasks from the queue
|
||||
ProcessState::Suspended | ProcessState::Terminated => (),
|
||||
e => panic!("Unexpected process state in CpuQueue: {:?}", e),
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns an iterator over all the processes in the queue plus the currently running process,
|
||||
/// if there is one.
|
||||
pub fn iter(&self) -> impl Iterator<Item = &Rc<Process>> {
|
||||
Iterator::chain(self.queue.iter(), self.current.iter())
|
||||
}
|
||||
}
|
||||
|
||||
impl CpuQueue {
|
||||
/// Constructs an empty queue with its own idle task
|
||||
pub fn new() -> Self {
|
||||
let idle = TaskContext::kernel(__idle, 0).expect("Could not construct an idle task");
|
||||
|
||||
Self {
|
||||
inner: {
|
||||
IrqSafeSpinlock::new(CpuQueueInner {
|
||||
current: None,
|
||||
queue: VecDeque::new(),
|
||||
stats: CpuQueueStats::default(),
|
||||
})
|
||||
},
|
||||
idle,
|
||||
}
|
||||
}
|
||||
|
||||
/// Starts queue execution on current CPU.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Only meant to be called from [crate::task::enter()] function.
|
||||
pub unsafe fn enter(&self) -> ! {
|
||||
// Start from idle thread to avoid having a Rc stuck here without getting dropped
|
||||
let t = CNTPCT_EL0.get();
|
||||
self.lock().stats.measure_time = t;
|
||||
|
||||
let mut inner = self.inner.lock();
|
||||
if let Some(proc) = inner.next_ready_task() {
|
||||
inner.queue.push_back(proc.clone());
|
||||
inner.current = Some(proc.clone());
|
||||
proc.set_running(Cpu::local_id());
|
||||
|
||||
drop(inner);
|
||||
proc.context().enter();
|
||||
} else {
|
||||
drop(inner);
|
||||
|
||||
self.idle.enter();
|
||||
};
|
||||
}
|
||||
|
||||
/// Yields CPU execution to the next task in queue (or idle task if there aren't any).
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The function is only meant to be called from kernel threads (e.g. if they want to yield
|
||||
/// CPU execution to wait for something) or interrupt handlers.
|
||||
pub unsafe fn yield_cpu(&self) {
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
let t = CNTPCT_EL0.get();
|
||||
let delta = t - inner.stats.measure_time;
|
||||
inner.stats.measure_time = t;
|
||||
|
||||
let current = inner.current.clone();
|
||||
|
||||
if let Some(current) = current.as_ref() {
|
||||
if current.state() == ProcessState::Running {
|
||||
current.set_state(ProcessState::Ready);
|
||||
}
|
||||
inner.queue.push_back(current.clone());
|
||||
|
||||
inner.stats.cpu_time += delta;
|
||||
} else {
|
||||
inner.stats.idle_time += delta;
|
||||
}
|
||||
|
||||
let next = inner.next_ready_task();
|
||||
|
||||
inner.current = next.clone();
|
||||
|
||||
// Can drop the lock, we hold current and next Rc's
|
||||
drop(inner);
|
||||
|
||||
let (from, _from_rc) = if let Some(current) = current.as_ref() {
|
||||
(current.context(), Rc::strong_count(current))
|
||||
} else {
|
||||
(&self.idle, 0)
|
||||
};
|
||||
|
||||
let (to, _to_rc) = if let Some(next) = next.as_ref() {
|
||||
next.set_running(Cpu::local_id());
|
||||
(next.context(), Rc::strong_count(next))
|
||||
} else {
|
||||
(&self.idle, 0)
|
||||
};
|
||||
|
||||
// if let Some(from) = current.as_ref() {
|
||||
// log_print_raw!(crate::debug::LogLevel::Info, "{}", from.id());
|
||||
// } else {
|
||||
// log_print_raw!(crate::debug::LogLevel::Info, "{{idle}}");
|
||||
// }
|
||||
|
||||
// log_print_raw!(crate::debug::LogLevel::Info, " -> ");
|
||||
|
||||
// if let Some(to) = next.as_ref() {
|
||||
// log_print_raw!(crate::debug::LogLevel::Info, "{}", to.id());
|
||||
// } else {
|
||||
// log_print_raw!(crate::debug::LogLevel::Info, "{{idle}}");
|
||||
// }
|
||||
|
||||
// log_print_raw!(crate::debug::LogLevel::Info, "\n");
|
||||
|
||||
to.switch(from)
|
||||
}
|
||||
|
||||
/// Pushes the process to the back of the execution queue.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Only meant to be called from Process impl. The function does not set any process accounting
|
||||
/// information, which may lead to invalid states.
|
||||
pub unsafe fn enqueue(&self, p: Rc<Process>) {
|
||||
self.inner.lock().queue.push_back(p);
|
||||
}
|
||||
|
||||
/// Removes process with given PID from the exeuction queue.
|
||||
pub fn dequeue(&self, _pid: ProcessId) {
|
||||
todo!();
|
||||
}
|
||||
|
||||
/// Returns the queue length at this moment.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// This value may immediately change.
|
||||
pub fn len(&self) -> usize {
|
||||
self.inner.lock().queue.len()
|
||||
}
|
||||
|
||||
/// Returns `true` if the queue is empty at the moment.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// This may immediately change.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.inner.lock().queue.is_empty()
|
||||
}
|
||||
|
||||
/// Returns a safe reference to the inner data structure.
|
||||
pub fn lock(&self) -> IrqSafeSpinlockGuard<CpuQueueInner> {
|
||||
self.inner.lock()
|
||||
}
|
||||
|
||||
/// Returns the process currently being executed.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// This function should be safe in all kernel thread/interrupt contexts:
|
||||
///
|
||||
/// * (in kthread) the code calling this will still remain on the same thread.
|
||||
/// * (in irq) the code cannot be interrupted and other CPUs shouldn't change this queue, so it
|
||||
/// will remain valid until the end of the interrupt or until [CpuQueue::yield_cpu]
|
||||
/// is called.
|
||||
pub fn current_process(&self) -> Option<Rc<Process>> {
|
||||
self.inner.lock().current.clone()
|
||||
}
|
||||
|
||||
/// Returns a queue for given CPU index
|
||||
pub fn for_cpu(id: usize) -> &'static CpuQueue {
|
||||
&QUEUES.get()[id]
|
||||
}
|
||||
|
||||
/// Returns an iterator over all queues of the system
|
||||
#[inline]
|
||||
pub fn all() -> impl Iterator<Item = &'static CpuQueue> {
|
||||
QUEUES.get().iter()
|
||||
}
|
||||
|
||||
/// Picks a queue with the least amount of tasks in it
|
||||
pub fn least_loaded() -> Option<(usize, &'static CpuQueue)> {
|
||||
let queues = QUEUES.get();
|
||||
|
||||
queues.iter().enumerate().min_by_key(|(_, q)| q.len())
|
||||
}
|
||||
}
|
||||
|
||||
/// Initializes the global queue list
|
||||
pub fn init_queues(queues: Vec<CpuQueue>) {
|
||||
QUEUES.init(queues);
|
||||
}
|
117
src/util.rs
Normal file
117
src/util.rs
Normal file
@ -0,0 +1,117 @@
|
||||
//! Synchronization utilities
|
||||
use core::{
|
||||
cell::UnsafeCell,
|
||||
mem::MaybeUninit,
|
||||
ops::Deref,
|
||||
panic,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
};
|
||||
|
||||
/// Statically-allocated "dynamic" vector
|
||||
pub struct StaticVector<T, const N: usize> {
|
||||
data: [MaybeUninit<T>; N],
|
||||
len: usize,
|
||||
}
|
||||
|
||||
/// Wrapper struct to ensure a value can only be initialized once and used only after that
|
||||
#[repr(C)]
|
||||
pub struct OneTimeInit<T> {
|
||||
value: UnsafeCell<MaybeUninit<T>>,
|
||||
state: AtomicBool,
|
||||
}
|
||||
|
||||
unsafe impl<T> Sync for OneTimeInit<T> {}
|
||||
unsafe impl<T> Send for OneTimeInit<T> {}
|
||||
|
||||
impl<T> OneTimeInit<T> {
|
||||
/// Wraps the value in an [OneTimeInit]
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
value: UnsafeCell::new(MaybeUninit::uninit()),
|
||||
state: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the value has already been initialized
|
||||
pub fn is_initialized(&self) -> bool {
|
||||
self.state.load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
/// Sets the underlying value of the [OneTimeInit]. If already initialized, panics.
|
||||
#[track_caller]
|
||||
pub fn init(&self, value: T) {
|
||||
if self
|
||||
.state
|
||||
.compare_exchange(false, true, Ordering::Release, Ordering::Relaxed)
|
||||
.is_err()
|
||||
{
|
||||
panic!(
|
||||
"{:?}: Double initialization of OneTimeInit<T>",
|
||||
panic::Location::caller()
|
||||
);
|
||||
}
|
||||
|
||||
unsafe {
|
||||
(*self.value.get()).write(value);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an immutable reference to the underlying value and panics if it hasn't yet been
|
||||
/// initialized
|
||||
#[track_caller]
|
||||
pub fn get(&self) -> &T {
|
||||
if !self.state.load(Ordering::Acquire) {
|
||||
panic!(
|
||||
"{:?}: Attempt to dereference an uninitialized value",
|
||||
panic::Location::caller()
|
||||
);
|
||||
}
|
||||
|
||||
unsafe { (*self.value.get()).assume_init_ref() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, const N: usize> StaticVector<T, N> {
|
||||
/// Constructs an empty instance of [StaticVector]
|
||||
pub const fn new() -> Self
|
||||
where
|
||||
T: Copy,
|
||||
{
|
||||
Self {
|
||||
data: [MaybeUninit::uninit(); N],
|
||||
len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Appends an item to the vector.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Will panic if the vector is full.
|
||||
pub fn push(&mut self, value: T) {
|
||||
if self.len == N {
|
||||
panic!("Static vector overflow: reached limit of {}", N);
|
||||
}
|
||||
|
||||
self.data[self.len].write(value);
|
||||
self.len += 1;
|
||||
}
|
||||
|
||||
/// Returns the number of items present in the vector
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
/// Returns `true` if the vector is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len == 0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, const N: usize> Deref for StaticVector<T, N> {
|
||||
type Target = [T];
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
unsafe { MaybeUninit::slice_assume_init_ref(&self.data[..self.len]) }
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user