rv64: platform init, task switching

This commit is contained in:
Mark Poliakov 2025-01-19 15:16:26 +02:00
parent f46f3ddc31
commit 20fa34c945
42 changed files with 2244 additions and 568 deletions

2
Cargo.lock generated
View File

@ -1027,6 +1027,7 @@ dependencies = [
"device-api",
"kernel-arch-interface",
"libk-mm-interface",
"log",
"memtables",
"static_assertions",
"tock-registers 0.9.0",
@ -1307,6 +1308,7 @@ dependencies = [
name = "memtables"
version = "0.1.0"
dependencies = [
"bitflags 2.6.0",
"bytemuck",
]

View File

@ -91,17 +91,17 @@ unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }
[workspace.lints.clippy]
derivable_impls = { level = "allow" }
[profile.dev]
opt-level = 1
split-debuginfo = "packed"
lto = "thin"
panic = "abort"
# [profile.dev]
# opt-level = 1
# split-debuginfo = "packed"
# lto = "thin"
# panic = "abort"
[profile.test]
split-debuginfo = "none"
[profile.dev.package."*"]
opt-level = 3
# [profile.dev.package."*"]
# opt-level = 3
# [profile.dev]
# opt-level = "s"

BIN
boot/riscv/fw_jump.bin Normal file

Binary file not shown.

View File

@ -1,6 +1,6 @@
ENTRY(__rv64_entry);
KERNEL_PHYS_BASE = 0x80000000;
KERNEL_PHYS_BASE = 0x80200000;
KERNEL_VIRT_OFFSET = 0xFFFFFFF000000000;
SECTIONS {
@ -23,11 +23,6 @@ SECTIONS {
.rodata : AT(. - KERNEL_VIRT_OFFSET) {
*(.rodata*)
*(.eh_frame*)
. = ALIGN(16);
PROVIDE(__init_array_start = .);
KEEP(*(.init_array*))
PROVIDE(__init_array_end = .);
}
. = ALIGN(4K);
@ -39,7 +34,13 @@ SECTIONS {
.data : AT(. - KERNEL_VIRT_OFFSET) {
*(.data*)
. = ALIGN(8);
PROVIDE(__global_pointer = . + 0x800 - KERNEL_VIRT_OFFSET);
/* PROVIDE(__global_pointer = . + 0x800 - KERNEL_VIRT_OFFSET); */
. = ALIGN(16);
PROVIDE(__init_array_start = .);
KEEP(*(.init_array*))
PROVIDE(__init_array_end = .);
*(.got*)
}

View File

@ -54,6 +54,7 @@ device-tree.workspace = true
kernel-arch-aarch64.workspace = true
[target.'cfg(target_arch = "riscv64")'.dependencies]
device-tree.workspace = true
kernel-arch-riscv64.workspace = true
[target.'cfg(target_arch = "x86_64")'.dependencies]

View File

@ -30,6 +30,18 @@ pub struct IpiQueue<A: Architecture> {
data: IrqSafeSpinlock<A, Option<IpiMessage>>,
}
pub trait CpuData {
fn is_bootstrap(&self, id: u32) -> bool {
// On most architectures
id == 0
}
fn queue_index(&self, id: u32) -> usize {
// On most architectures
id as usize
}
}
pub trait CpuFeatureSet {
fn iter(&self) -> impl Iterator<Item = &'static str>;
}
@ -50,6 +62,14 @@ impl<A: Architecture, S: Scheduler + 'static> CpuImpl<A, S> {
unsafe { A::init_ipi_queues(queues) }
}
pub fn is_bootstrap(&self) -> bool {
self.inner.is_bootstrap(self.id)
}
pub fn queue_index(&self) -> usize {
self.inner.queue_index(self.id)
}
pub fn set_current_thread_id(&mut self, id: Option<S::ThreadId>) {
self.current_thread_id = id;
}

View File

@ -3,7 +3,7 @@
#![allow(clippy::new_without_default)]
use alloc::vec::Vec;
use cpu::{CpuFeatureSet, CpuImpl, IpiQueue};
use cpu::{CpuData, CpuFeatureSet, CpuImpl, IpiQueue};
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use task::Scheduler;
@ -19,14 +19,15 @@ pub mod sync;
pub mod task;
pub mod util;
#[cfg(any(target_pointer_width = "32", rust_analyzer))]
#[cfg(any(target_arch = "x86", rust_analyzer))]
pub const KERNEL_VIRT_OFFSET: usize = 0xC0000000;
#[cfg(any(target_pointer_width = "64", rust_analyzer))]
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64", rust_analyzer))]
pub const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000;
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
pub const KERNEL_VIRT_OFFSET: usize = 0xFFFFFFF000000000;
pub trait Architecture: Sized + 'static {
type PerCpuData;
type PerCpuData: CpuData;
type CpuFeatures: CpuFeatureSet;
type BreakpointType;

View File

@ -13,6 +13,7 @@ device-api = { workspace = true, features = ["derive"] }
tock-registers.workspace = true
bitflags.workspace = true
static_assertions.workspace = true
log.workspace = true
[lints]
workspace = true

View File

@ -0,0 +1,110 @@
// vi:ft=asm:
.section .text
.macro SAVE_TASK_STATE
addi sp, sp, -{context_size}
sd ra, 0 * 8(sp)
sd gp, 1 * 8(sp)
sd s11, 2 * 8(sp)
sd s10, 3 * 8(sp)
sd s9, 4 * 8(sp)
sd s8, 5 * 8(sp)
sd s7, 6 * 8(sp)
sd s6, 7 * 8(sp)
sd s5, 8 * 8(sp)
sd s4, 9 * 8(sp)
sd s3, 10 * 8(sp)
sd s2, 11 * 8(sp)
sd s1, 12 * 8(sp)
sd s0, 13 * 8(sp)
.endm
.macro LOAD_TASK_STATE
ld ra, 0 * 8(sp)
ld gp, 1 * 8(sp)
ld s11, 2 * 8(sp)
ld s10, 3 * 8(sp)
ld s9, 4 * 8(sp)
ld s8, 5 * 8(sp)
ld s7, 6 * 8(sp)
ld s6, 7 * 8(sp)
ld s5, 8 * 8(sp)
ld s4, 9 * 8(sp)
ld s3, 10 * 8(sp)
ld s2, 11 * 8(sp)
ld s1, 12 * 8(sp)
ld s0, 13 * 8(sp)
addi sp, sp, {context_size}
.endm
.option push
.option norvc
.global __rv64_task_enter_kernel
.global __rv64_task_enter_user
.global __rv64_switch_task
.global __rv64_switch_task_and_drop
.global __rv64_enter_task
// Context switching
.type __rv64_enter_task, @function
__rv64_enter_task:
// a0 - task ctx
ld sp, (a0)
LOAD_TASK_STATE
ret
.size __rv64_enter_task, . - __rv64_enter_task
.type __rv64_switch_task, @function
__rv64_switch_task:
// a0 - destination task ctx
// a1 - source task ctx
SAVE_TASK_STATE
sd sp, (a1)
ld sp, (a0)
LOAD_TASK_STATE
ret
.size __rv64_switch_task, . - __rv64_switch_task
.type __rv64_switch_task_and_drop, @function
__rv64_switch_task_and_drop:
// a0 - destination task ctx
// a1 - thread struct to drop
ld sp, (a0)
mv a0, a1
call __arch_drop_thread
LOAD_TASK_STATE
ret
.size __rv64_switch_task_and_drop, . - __rv64_switch_task_and_drop
// Entry functions
.type __rv64_task_enter_kernel, @function
__rv64_task_enter_kernel:
ld a0, (sp) // argument
ld ra, 8(sp) // entry
addi sp, sp, 16
// Set SPIE to enable interrupts
// Set SPP = 1 to indicate a return to S-mode
csrr t0, sstatus
ori t0, t0, (1 << 5)
ori t0, t0, (1 << 8)
csrw sstatus, t0
csrw sepc, ra
sret
.size __rv64_task_enter_kernel, . - __rv64_task_enter_kernel
.type __rv64_task_enter_user, @function
__rv64_task_enter_user:
// TODO
j .
.size __rv64_task_enter_user, . - __rv64_task_enter_user
.option pop

View File

@ -1,16 +1,42 @@
use core::marker::PhantomData;
use core::{arch::global_asm, cell::UnsafeCell, marker::PhantomData};
use kernel_arch_interface::{
mem::{KernelTableManager, PhysicalMemoryAllocator},
task::{TaskContext, UserContextInfo},
task::{StackBuilder, TaskContext, UserContextInfo},
};
use libk_mm_interface::address::PhysicalAddress;
use yggdrasil_abi::error::Error;
pub struct TaskContextImpl<K, PA> {
pub const CONTEXT_SIZE: usize = 14 * size_of::<usize>();
#[repr(C, align(0x10))]
struct TaskContextInner {
// 0x00
sp: usize,
}
pub struct TaskContextImpl<
K: KernelTableManager,
PA: PhysicalMemoryAllocator<Address = PhysicalAddress>,
> {
inner: UnsafeCell<TaskContextInner>,
// fp_context: UnsafeCell<FpContext>,
stack_base_phys: PhysicalAddress,
stack_size: usize,
_pd: PhantomData<(K, PA)>,
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>>
TaskContextImpl<K, PA>
{
unsafe fn load_state(&self) {
// TODO load new SATP value
}
unsafe fn store_state(&self) {}
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>>
TaskContext<K, PA> for TaskContextImpl<K, PA>
{
@ -23,9 +49,31 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
}
fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> {
let _ = entry;
let _ = arg;
todo!()
const KERNEL_TASK_PAGES: usize = 8;
let stack_base_phys = PA::allocate_contiguous_pages(KERNEL_TASK_PAGES)?;
let stack_base = stack_base_phys.raw_virtualize::<K>();
let mut stack = StackBuilder::new(stack_base, KERNEL_TASK_PAGES * 0x1000);
// Entry and argument
stack.push(entry as _);
stack.push(arg);
setup_common_context(&mut stack, __rv64_task_enter_kernel as _);
let sp = stack.build();
// TODO stack is leaked
log::info!("stack = {:#x}", stack_base);
Ok(Self {
inner: UnsafeCell::new(TaskContextInner { sp }),
// fp_context: UnsafeCell::new(FpContext::new()),
stack_base_phys,
stack_size: KERNEL_TASK_PAGES * 0x1000,
_pd: PhantomData,
})
}
fn set_thread_pointer(&self, tp: usize) {
@ -39,16 +87,74 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
}
unsafe fn enter(&self) -> ! {
todo!()
unsafe {
self.load_state();
__rv64_enter_task(self.inner.get())
}
}
unsafe fn switch(&self, from: &Self) {
let _ = from;
todo!()
if core::ptr::addr_eq(self, from) {
return;
}
unsafe {
from.store_state();
self.load_state();
__rv64_switch_task(self.inner.get(), from.inner.get())
}
}
unsafe fn switch_and_drop(&self, thread: *const ()) {
let _ = thread;
todo!()
unsafe {
self.load_state();
__rv64_switch_task_and_drop(self.inner.get(), thread)
}
}
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>> Drop
for TaskContextImpl<K, PA>
{
fn drop(&mut self) {
assert_eq!(self.stack_size % 0x1000, 0);
for offset in (0..self.stack_size).step_by(0x1000) {
unsafe {
PA::free_page(self.stack_base_phys.add(offset));
}
}
}
}
fn setup_common_context(builder: &mut StackBuilder, entry: usize) {
builder.push(0); // x8/s0/fp
builder.push(0); // x9/s1
builder.push(0); // x18/s2
builder.push(0); // x19/s3
builder.push(0); // x20/s4
builder.push(0); // x21/s5
builder.push(0); // x22/s6
builder.push(0); // x23/s7
builder.push(0); // x24/s8
builder.push(0); // x25/s9
builder.push(0); // x26/s10
builder.push(0); // x27/s11
builder.push(0); // x4/gp
builder.push(entry); // x1/ra return address
}
unsafe extern "C" {
fn __rv64_enter_task(to: *mut TaskContextInner) -> !;
fn __rv64_switch_task(to: *mut TaskContextInner, from: *mut TaskContextInner);
fn __rv64_switch_task_and_drop(to: *mut TaskContextInner, thread: *const ()) -> !;
fn __rv64_task_enter_kernel();
fn __rv64_task_enter_user();
// fn __rv64_fp_store_context(to: *mut c_void);
// fn __rv64_fp_restore_context(from: *const c_void);
}
global_asm!(
include_str!("context.S"),
context_size = const CONTEXT_SIZE,
);

View File

@ -0,0 +1,6 @@
#[inline]
pub fn rdtime() -> u64 {
let mut output: u64;
unsafe { core::arch::asm!("rdtime {0}", out(reg) output) };
output
}

View File

@ -1,55 +1,95 @@
#![feature(decl_macro)]
#![feature(decl_macro, naked_functions)]
#![no_std]
extern crate alloc;
use alloc::vec::Vec;
use alloc::{boxed::Box, vec::Vec};
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use kernel_arch_interface::{
cpu::{CpuImpl, IpiQueue},
cpu::{CpuData, CpuImpl, IpiQueue},
task::Scheduler,
Architecture,
};
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
use registers::{SSCRATCH, SSTATUS};
pub mod mem;
pub use mem::{KernelTableManagerImpl, ProcessAddressSpaceImpl};
pub use mem::{process::ProcessAddressSpaceImpl, KernelTableManagerImpl};
pub mod context;
pub use context::TaskContextImpl;
use registers::MSTATUS;
use tock_registers::interfaces::{ReadWriteable, Readable};
pub mod intrinsics;
pub mod registers;
pub mod sbi;
pub struct ArchitectureImpl;
#[repr(C)]
pub struct PerCpuData {
// Used in assembly
pub tmp_t0: usize, // 0x00
pub smode_sp: usize, // 0x08
// Used elsewhere
pub bootstrap: bool,
pub queue_index: usize,
}
impl CpuData for PerCpuData {
fn is_bootstrap(&self, id: u32) -> bool {
let _ = id;
self.bootstrap
}
fn queue_index(&self, id: u32) -> usize {
let _ = id;
self.queue_index
}
}
#[naked]
extern "C" fn idle_task(_: usize) -> ! {
unsafe {
core::arch::naked_asm!("1: nop; j 1b");
}
}
impl Architecture for ArchitectureImpl {
type PerCpuData = ();
type PerCpuData = PerCpuData;
type CpuFeatures = ();
type BreakpointType = u32;
const BREAKPOINT_VALUE: Self::BreakpointType = 0;
fn halt() -> ! {
loop {}
loop {
unsafe { Self::set_interrupt_mask(true) };
Self::wait_for_interrupt();
}
}
unsafe fn set_local_cpu(cpu: *mut ()) {
let _ = cpu;
loop {}
SSCRATCH.set(cpu.addr() as u64);
unsafe { core::arch::asm!("mv tp, {0}", in(reg) cpu) };
}
#[inline]
fn local_cpu() -> *mut () {
loop {}
let value: u64;
unsafe { core::arch::asm!("mv {0}, tp", out(reg) value) };
value as _
}
unsafe fn init_local_cpu<S: Scheduler + 'static>(id: Option<u32>, data: Self::PerCpuData) {
let _ = id;
let _ = data;
loop {}
let id = id.expect("riscv64 requires an explicit HART ID in its per-processor struct");
let cpu = Box::leak(Box::new(CpuImpl::<Self, S>::new(id, data)));
unsafe { cpu.set_local() };
}
unsafe fn init_ipi_queues(queues: Vec<IpiQueue<Self>>) {
// TODO
let _ = queues;
loop {}
// loop {}
}
fn ipi_queue(cpu_id: u32) -> Option<&'static IpiQueue<Self>> {
@ -61,24 +101,27 @@ impl Architecture for ArchitectureImpl {
unsafe fn set_interrupt_mask(mask: bool) -> bool {
let old = Self::interrupt_mask();
if mask {
MSTATUS.modify(MSTATUS::MIE::CLEAR);
SSTATUS.modify(SSTATUS::SIE::CLEAR);
} else {
MSTATUS.modify(MSTATUS::MIE::SET);
SSTATUS.modify(SSTATUS::SIE::SET);
}
old
}
#[inline]
fn interrupt_mask() -> bool {
MSTATUS.matches_all(MSTATUS::MIE::SET)
SSTATUS.matches_all(SSTATUS::SIE::CLEAR)
}
fn wait_for_interrupt() {
loop {}
unsafe {
core::arch::asm!("wfi");
}
}
fn cpu_count() -> usize {
loop {}
// TODO
1
}
fn cpu_index<S: Scheduler + 'static>() -> u32 {
@ -104,6 +147,6 @@ impl Architecture for ArchitectureImpl {
}
fn idle_task() -> extern "C" fn(usize) -> ! {
loop {}
idle_task
}
}

View File

@ -1,20 +1,22 @@
use core::marker::PhantomData;
use kernel_arch_interface::{
mem::{DeviceMemoryAttributes, KernelTableManager, RawDeviceMemoryMapping},
split_spinlock,
};
use libk_mm_interface::{
address::PhysicalAddress,
process::ProcessAddressSpaceManager,
table::{page_index, MapAttributes, TableAllocator},
table::{page_index, EntryLevel, EntryLevelExt},
};
use static_assertions::const_assert_eq;
use table::{L1, L2};
use memtables::riscv64::PageAttributes;
use static_assertions::{const_assert, const_assert_eq};
use table::{PageEntry, PageTable, L1, L2, L3};
use tock_registers::interfaces::Writeable;
use yggdrasil_abi::error::Error;
pub use memtables::riscv64::FixedTables;
use crate::registers::SATP;
pub mod process;
pub mod table;
split_spinlock! {
@ -28,34 +30,53 @@ split_spinlock! {
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
}
pub const KERNEL_VIRT_OFFSET: usize = 0xFFFFFFF0_00000000;
pub const KERNEL_VIRT_OFFSET: usize = kernel_arch_interface::KERNEL_VIRT_OFFSET;
pub const KERNEL_PHYS_BASE: usize = 0x80000000;
pub const SIGN_EXTEND_MASK: usize = 0xFFFFFFC0_00000000;
pub const SIGN_EXTEND_MASK: usize = 0xFFFFFF80_00000000;
pub const KERNEL_START_L1I: usize = page_index::<L1>(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
pub const KERNEL_L2I: usize = page_index::<L2>(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
const_assert_eq!(KERNEL_START_L1I, 450);
const_assert_eq!(KERNEL_L2I, 0);
// Runtime mappings
// 1GiB of device memory space
const DEVICE_MAPPING_L1I: usize = KERNEL_START_L1I + 1;
const DEVICE_MAPPING_L3_COUNT: usize = 4;
// 32GiB of RAM space
const RAM_MAPPING_START_L1I: usize = KERNEL_START_L1I + 2;
const RAM_MAPPING_L1_COUNT: usize = 32;
const_assert!(RAM_MAPPING_START_L1I + RAM_MAPPING_L1_COUNT <= 512);
const_assert!(DEVICE_MAPPING_L1I < 512);
const DEVICE_MAPPING_OFFSET: usize = (DEVICE_MAPPING_L1I << L1::SHIFT) | SIGN_EXTEND_MASK;
const RAM_MAPPING_OFFSET: usize = (RAM_MAPPING_START_L1I << L1::SHIFT) | SIGN_EXTEND_MASK;
// Runtime tables
static mut DEVICE_MAPPING_L2: PageTable<L2> = PageTable::zeroed();
static mut DEVICE_MAPPING_L3S: [PageTable<L3>; DEVICE_MAPPING_L3_COUNT] =
[const { PageTable::zeroed() }; DEVICE_MAPPING_L3_COUNT];
/// Any VAs above this one are sign-extended
pub const USER_BOUNDARY: usize = 0x40_00000000;
#[derive(Debug)]
pub struct KernelTableManagerImpl;
pub struct ProcessAddressSpaceImpl<TA: TableAllocator> {
_pd: PhantomData<TA>,
}
impl KernelTableManager for KernelTableManagerImpl {
fn virtualize(phys: u64) -> usize {
let _ = phys;
loop {}
fn virtualize(address: u64) -> usize {
let address = address as usize;
if address >= RAM_MAPPING_OFFSET {
panic!("Invalid physical address: {address:#x}");
}
address + RAM_MAPPING_OFFSET
}
fn physicalize(virt: usize) -> u64 {
let _ = virt;
loop {}
fn physicalize(address: usize) -> u64 {
if address < RAM_MAPPING_OFFSET {
panic!("Invalid \"physicalized\" virtual address {address:#x}");
}
(address - RAM_MAPPING_OFFSET) as u64
}
unsafe fn map_device_pages(
@ -63,58 +84,194 @@ impl KernelTableManager for KernelTableManagerImpl {
count: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping<Self>, Error> {
let _ = base;
let _ = count;
let _ = attrs;
loop {}
unsafe { map_device_memory(PhysicalAddress::from_u64(base), count, attrs) }
}
unsafe fn unmap_device_pages(mapping: &RawDeviceMemoryMapping<Self>) {
let _ = mapping;
loop {}
}
unsafe fn unmap_physical_address(virt: usize) {
let _ = virt;
loop {}
unsafe { unmap_device_memory(mapping) }
}
}
impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceImpl<TA> {
const LOWER_LIMIT_PFN: usize = 0;
const UPPER_LIMIT_PFN: usize = 0;
// Device mappings
unsafe fn map_device_memory_l3(
base: PhysicalAddress,
count: usize,
_attrs: DeviceMemoryAttributes,
) -> Result<usize, Error> {
// TODO don't map pages if already mapped
fn new() -> Result<Self, Error> {
todo!()
'l0: for i in 0..DEVICE_MAPPING_L3_COUNT * 512 {
for j in 0..count {
let l2i = (i + j) / 512;
let l3i = (i + j) % 512;
unsafe {
if DEVICE_MAPPING_L3S[l2i][l3i].is_present() {
continue 'l0;
}
}
}
for j in 0..count {
let l2i = (i + j) / 512;
let l3i = (i + j) % 512;
unsafe {
DEVICE_MAPPING_L3S[l2i][l3i] =
PageEntry::page(base.add(j * L3::SIZE), PageAttributes::W);
}
tlb_flush_va(DEVICE_MAPPING_OFFSET + l2i * L2::SIZE + l3i * L3::SIZE);
}
return Ok(DEVICE_MAPPING_OFFSET + i * L3::SIZE);
}
unsafe fn map_page(
&mut self,
address: usize,
physical: PhysicalAddress,
flags: MapAttributes,
) -> Result<(), Error> {
let _ = address;
let _ = physical;
let _ = flags;
todo!()
Err(Error::OutOfMemory)
}
#[allow(unused)]
unsafe fn map_device_memory_l2(
base: PhysicalAddress,
count: usize,
_attrs: DeviceMemoryAttributes,
) -> Result<usize, Error> {
'l0: for i in DEVICE_MAPPING_L3_COUNT..512 {
for j in 0..count {
unsafe {
if DEVICE_MAPPING_L2[i + j].is_present() {
continue 'l0;
}
}
}
unsafe {
for j in 0..count {
DEVICE_MAPPING_L2[i + j] =
PageEntry::<L2>::block(base.add(j * L2::SIZE), PageAttributes::W);
// tlb_flush_vaae1(DEVICE_MAPPING_OFFSET + (i + j) * L2::SIZE);
}
}
return Ok(DEVICE_MAPPING_OFFSET + i * L2::SIZE);
}
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error> {
let _ = address;
todo!()
}
Err(Error::OutOfMemory)
}
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error> {
let _ = address;
todo!()
}
pub(crate) unsafe fn map_device_memory(
base: PhysicalAddress,
size: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping<KernelTableManagerImpl>, Error> {
let l3_aligned = base.page_align_down::<L3>();
let l3_offset = base.page_offset::<L3>();
let page_count = (l3_offset + size).page_count::<L3>();
fn as_address_with_asid(&self) -> u64 {
todo!()
}
if page_count > 256 {
// Large mapping, use L2 mapping instead
let l2_aligned = base.page_align_down::<L2>();
let l2_offset = base.page_offset::<L2>();
let page_count = (l2_offset + size).page_count::<L2>();
unsafe fn clear(&mut self) {
todo!()
unsafe {
let base_address = map_device_memory_l2(l2_aligned, page_count, attrs)?;
let address = base_address + l2_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
address,
base_address,
page_count,
L2::SIZE,
))
}
} else {
// Just map the pages directly
unsafe {
let base_address = map_device_memory_l3(l3_aligned, page_count, attrs)?;
let address = base_address + l3_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
address,
base_address,
page_count,
L3::SIZE,
))
}
}
}
pub(crate) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping<KernelTableManagerImpl>) {
match map.page_size {
L3::SIZE => {
for i in 0..map.page_count {
let page = map.base_address + i * L3::SIZE;
let l2i = page.page_index::<L2>();
let l3i = page.page_index::<L3>();
unsafe {
assert!(DEVICE_MAPPING_L3S[l2i][l3i].is_present());
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::INVALID;
}
// tlb_flush_vaae1(page);
}
}
L2::SIZE => todo!(),
_ => unimplemented!(),
}
}
pub fn auto_address<T>(x: *const T) -> usize {
let x = x.addr();
if x >= KERNEL_VIRT_OFFSET {
x - KERNEL_VIRT_OFFSET
} else {
x
}
}
pub unsafe fn enable_mmu() {
let l1_phys = auto_address(&raw const KERNEL_TABLES) as u64;
SATP.write(SATP::PPN.val(l1_phys >> 12) + SATP::MODE::Sv39);
}
// Also unmaps the lower half
pub unsafe fn setup_fixed_tables() {
let kernel_l1i_lower = page_index::<L1>(KERNEL_PHYS_BASE);
let mut tables = KERNEL_TABLES.lock();
let device_mapping_l2_phys = auto_address(&raw const DEVICE_MAPPING_L2);
// Unmap the lower half
tables.l1.data[kernel_l1i_lower] = 0;
// Set up static runtime mappings
for i in 0..DEVICE_MAPPING_L3_COUNT {
unsafe {
let device_mapping_l3_phys = PhysicalAddress::from_usize(
(&raw const DEVICE_MAPPING_L3S[i]).addr() - KERNEL_VIRT_OFFSET,
);
DEVICE_MAPPING_L2[i] =
PageEntry::table(device_mapping_l3_phys, PageAttributes::empty());
}
}
assert_eq!(tables.l1.data[DEVICE_MAPPING_L1I], 0);
tables.l1.data[DEVICE_MAPPING_L1I] =
((device_mapping_l2_phys as u64) >> 2) | PageAttributes::V.bits();
// tlb_flush_vaae1(DEVICE_MAPPING_OFFSET);
for l1i in 0..RAM_MAPPING_L1_COUNT {
let physical = (l1i as u64) << L1::SHIFT;
tables.l1.data[l1i + RAM_MAPPING_START_L1I] =
(physical >> 2) | (PageAttributes::R | PageAttributes::W | PageAttributes::V).bits();
}
// tlb_flush_all()
}
pub fn tlb_flush_va(va: usize) {
unsafe {
core::arch::asm!("sfence.vma zero, {0}", in(reg) va);
}
}

View File

@ -0,0 +1,51 @@
use core::marker::PhantomData;
use libk_mm_interface::{
address::PhysicalAddress,
process::ProcessAddressSpaceManager,
table::{MapAttributes, TableAllocator},
};
use yggdrasil_abi::error::Error;
pub struct ProcessAddressSpaceImpl<TA: TableAllocator> {
_pd: PhantomData<TA>,
}
impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceImpl<TA> {
const LOWER_LIMIT_PFN: usize = 0;
const UPPER_LIMIT_PFN: usize = 0;
fn new() -> Result<Self, Error> {
todo!()
}
unsafe fn map_page(
&mut self,
address: usize,
physical: PhysicalAddress,
flags: MapAttributes,
) -> Result<(), Error> {
let _ = address;
let _ = physical;
let _ = flags;
todo!()
}
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error> {
let _ = address;
todo!()
}
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error> {
let _ = address;
todo!()
}
fn as_address_with_asid(&self) -> u64 {
todo!()
}
unsafe fn clear(&mut self) {
todo!()
}
}

View File

@ -3,7 +3,6 @@ use core::{
ops::{Index, IndexMut},
};
use bitflags::bitflags;
use libk_mm_interface::{
address::PhysicalAddress,
pointer::{PhysicalRef, PhysicalRefMut},
@ -13,37 +12,7 @@ use yggdrasil_abi::error::Error;
use super::KernelTableManagerImpl;
bitflags! {
pub struct PageAttributes: u64 {
const N = 1 << 63;
/// Dirty bit
const D = 1 << 7;
/// Access bit
const A = 1 << 6;
/// Global mapping bit, implies all lower levels are also global
const G = 1 << 5;
/// U-mode access permission
const U = 1 << 4;
/// Execute permission
const X = 1 << 3;
/// Write permission
const W = 1 << 2;
/// Read-permission
const R = 1 << 1;
/// Valid bit
const V = 1 << 0;
}
// X W R Meaning
// 0 0 0 Pointer to next level of page table
// 0 0 1 Read-only page
// 0 1 0 ---
// 0 1 1 Read-write page
// 1 0 0 Execute only
// 1 0 1 Read-execute page
// 1 1 0 ---
// 1 1 1 Read-write-execute page
}
pub use memtables::riscv64::PageAttributes;
/// L3 - entry is 4KiB
#[derive(Debug, Clone, Copy)]

View File

@ -27,184 +27,312 @@ macro impl_csr_write($struct:ident, $repr:ty, $reg:ident, $register:ty) {
}
}
pub mod misa {
use tock_registers::{interfaces::Readable, register_bitfields};
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub MISA [
A OFFSET(0) NUMBITS(1) [],
C OFFSET(2) NUMBITS(1) [],
D OFFSET(3) NUMBITS(1) [],
E OFFSET(4) NUMBITS(1) [],
F OFFSET(5) NUMBITS(1) [],
H OFFSET(6) NUMBITS(1) [],
I OFFSET(7) NUMBITS(1) [],
M OFFSET(12) NUMBITS(1) [],
Q OFFSET(16) NUMBITS(1) [],
S OFFSET(17) NUMBITS(1) [],
U OFFSET(18) NUMBITS(1) [],
X OFFSET(23) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, misa, MISA::Register);
impl_csr_write!(Reg, u64, misa, MISA::Register);
impl Reg {
pub fn is_valid(&self) -> bool {
self.get() != 0
}
}
pub const MISA: Reg = Reg;
}
pub mod mstatus {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub MSTATUS [
/// Interrupt enable for S-mode
SIE OFFSET(1) NUMBITS(1) [],
/// Interrupt enable for M-mode
MIE OFFSET(3) NUMBITS(1) [],
/// Stored SIE state on S-mode trap delegation
SPIE OFFSET(5) NUMBITS(1) [],
/// U-mode big endian
UBE OFFSET(6) NUMBITS(1) [],
/// TODO: something written here on trap to M-mode
MPIE OFFSET(7) NUMBITS(1) [],
/// TODO: something for nested traps
SPP OFFSET(8) NUMBITS(1) [],
/// Vector register dirty status
VS OFFSET(9) NUMBITS(2) [],
/// Original mode before being trapped into M-mode
MPP OFFSET(11) NUMBITS(2) [
U = 0,
S = 1,
M = 3
],
/// Float register dirty status
FS OFFSET(13) NUMBITS(2) [],
/// U-mode extension dirty status
XS OFFSET(15) NUMBITS(2) [],
/// Effective privilege mode at which loads and stores execute.
///
/// When MPRV = 0, loads and stores behave as normal
/// MPRV = 1, loads/stores are translated and protected
MPRV OFFSET(17) NUMBITS(1) [],
/// Permit supervisor user memory access
///
/// When SUM = 0, S-mode access to pages accessible by U-mode will fault
SUM OFFSET(18) NUMBITS(1) [],
MXR OFFSET(19) NUMBITS(1) [],
/// Trap virtual memory
///
/// When TVM = 1, attempts to read/write satp CSR, execute sfence.vma or sinval.vma
/// in S-mode will raise an illegal instruction exception
TVM OFFSET(20) NUMBITS(1) [],
/// Timeout wait
///
/// When TW = 1, wfi executed in lower privilege level which does not complete
/// within some implementation-specific timeout, raises an illegal
/// instruction exception
TW OFFSET(21) NUMBITS(1) [],
TSR OFFSET(22) NUMBITS(1) [],
/// U-mode XLEN value
UXL OFFSET(32) NUMBITS(2) [],
/// S-mode XLEN value
SXL OFFSET(34) NUMBITS(2) [],
/// S-mode big endian
SBE OFFSET(36) NUMBITS(1) [],
/// M-mode big endian
MBE OFFSET(37) NUMBITS(1) [],
SD OFFSET(63) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, mstatus, MSTATUS::Register);
impl_csr_write!(Reg, u64, mstatus, MSTATUS::Register);
pub const MSTATUS: Reg = Reg;
}
pub mod mepc {
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
impl_csr_read!(Reg, u64, mepc, ());
impl_csr_write!(Reg, u64, mepc, ());
pub const MEPC: Reg = Reg;
}
pub mod mtvec {
use tock_registers::{interfaces::ReadWriteable, register_bitfields};
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
register_bitfields!(
u64,
pub MTVEC [
MODE OFFSET(0) NUMBITS(2) [
Direct = 0,
Vectored = 1
],
BASE OFFSET(2) NUMBITS(62) [],
]
);
impl_csr_read!(Reg, u64, mtvec, MTVEC::Register);
impl_csr_write!(Reg, u64, mtvec, MTVEC::Register);
impl Reg {
pub fn set_base(&self, base: usize) {
debug_assert_eq!(base & 0xF, 0);
let mask = match base & 63 != 0 {
false => 0,
true => 0x3 << 62,
};
self.modify(MTVEC::BASE.val(((base as u64) >> 2) | mask));
}
}
pub const MTVEC: Reg = Reg;
}
pub mod medeleg {
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
impl_csr_read!(Reg, u64, medeleg, ());
impl_csr_write!(Reg, u64, medeleg, ());
pub const MEDELEG: Reg = Reg;
}
pub mod mideleg {
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
impl_csr_read!(Reg, u64, mideleg, ());
impl_csr_write!(Reg, u64, mideleg, ());
pub const MIDELEG: Reg = Reg;
}
// pub mod misa {
// use tock_registers::{interfaces::Readable, register_bitfields};
//
// use super::{impl_csr_read, impl_csr_write};
//
// register_bitfields!(
// u64,
// pub MISA [
// A OFFSET(0) NUMBITS(1) [],
// C OFFSET(2) NUMBITS(1) [],
// D OFFSET(3) NUMBITS(1) [],
// E OFFSET(4) NUMBITS(1) [],
// F OFFSET(5) NUMBITS(1) [],
// H OFFSET(6) NUMBITS(1) [],
// I OFFSET(7) NUMBITS(1) [],
// M OFFSET(12) NUMBITS(1) [],
// Q OFFSET(16) NUMBITS(1) [],
// S OFFSET(17) NUMBITS(1) [],
// U OFFSET(18) NUMBITS(1) [],
// X OFFSET(23) NUMBITS(1) [],
// ]
// );
//
// pub struct Reg;
//
// impl_csr_read!(Reg, u64, misa, MISA::Register);
// impl_csr_write!(Reg, u64, misa, MISA::Register);
//
// impl Reg {
// pub fn is_valid(&self) -> bool {
// self.get() != 0
// }
// }
//
// pub const MISA: Reg = Reg;
// }
//
// pub mod mstatus {
// use tock_registers::register_bitfields;
//
// use super::{impl_csr_read, impl_csr_write};
//
// register_bitfields!(
// u64,
// pub MSTATUS [
// /// Interrupt enable for S-mode
// SIE OFFSET(1) NUMBITS(1) [],
// /// Interrupt enable for M-mode
// MIE OFFSET(3) NUMBITS(1) [],
// /// Stored SIE state on S-mode trap delegation
// SPIE OFFSET(5) NUMBITS(1) [],
// /// U-mode big endian
// UBE OFFSET(6) NUMBITS(1) [],
// /// TODO: something written here on trap to M-mode
// MPIE OFFSET(7) NUMBITS(1) [],
// /// TODO: something for nested traps
// SPP OFFSET(8) NUMBITS(1) [],
// /// Vector register dirty status
// VS OFFSET(9) NUMBITS(2) [],
// /// Original mode before being trapped into M-mode
// MPP OFFSET(11) NUMBITS(2) [
// U = 0,
// S = 1,
// M = 3
// ],
// /// Float register dirty status
// FS OFFSET(13) NUMBITS(2) [],
// /// U-mode extension dirty status
// XS OFFSET(15) NUMBITS(2) [],
// /// Effective privilege mode at which loads and stores execute.
// ///
// /// When MPRV = 0, loads and stores behave as normal
// /// MPRV = 1, loads/stores are translated and protected
// MPRV OFFSET(17) NUMBITS(1) [],
// /// Permit supervisor user memory access
// ///
// /// When SUM = 0, S-mode access to pages accessible by U-mode will fault
// SUM OFFSET(18) NUMBITS(1) [],
// MXR OFFSET(19) NUMBITS(1) [],
// /// Trap virtual memory
// ///
// /// When TVM = 1, attempts to read/write satp CSR, execute sfence.vma or sinval.vma
// /// in S-mode will raise an illegal instruction exception
// TVM OFFSET(20) NUMBITS(1) [],
// /// Timeout wait
// ///
// /// When TW = 1, wfi executed in lower privilege level which does not complete
// /// within some implementation-specific timeout, raises an illegal
// /// instruction exception
// TW OFFSET(21) NUMBITS(1) [],
// TSR OFFSET(22) NUMBITS(1) [],
// /// U-mode XLEN value
// UXL OFFSET(32) NUMBITS(2) [],
// /// S-mode XLEN value
// SXL OFFSET(34) NUMBITS(2) [],
// /// S-mode big endian
// SBE OFFSET(36) NUMBITS(1) [],
// /// M-mode big endian
// MBE OFFSET(37) NUMBITS(1) [],
// SD OFFSET(63) NUMBITS(1) [],
// ]
// );
//
// pub struct Reg;
//
// impl_csr_read!(Reg, u64, mstatus, MSTATUS::Register);
// impl_csr_write!(Reg, u64, mstatus, MSTATUS::Register);
//
// pub const MSTATUS: Reg = Reg;
// }
//
// pub mod mepc {
// use super::{impl_csr_read, impl_csr_write};
//
// pub struct Reg;
//
// impl_csr_read!(Reg, u64, mepc, ());
// impl_csr_write!(Reg, u64, mepc, ());
//
// pub const MEPC: Reg = Reg;
// }
//
// pub mod mtvec {
// use tock_registers::{interfaces::ReadWriteable, register_bitfields};
//
// use super::{impl_csr_read, impl_csr_write};
//
// pub struct Reg;
//
// register_bitfields!(
// u64,
// pub MTVEC [
// MODE OFFSET(0) NUMBITS(2) [
// Direct = 0,
// Vectored = 1
// ],
// BASE OFFSET(2) NUMBITS(62) [],
// ]
// );
//
// impl_csr_read!(Reg, u64, mtvec, MTVEC::Register);
// impl_csr_write!(Reg, u64, mtvec, MTVEC::Register);
//
// impl Reg {
// pub fn set_base(&self, base: usize) {
// debug_assert_eq!(base & 0xF, 0);
// let mask = match base & 63 != 0 {
// false => 0,
// true => 0x3 << 62,
// };
// self.modify(MTVEC::BASE.val(((base as u64) >> 2) | mask));
// }
// }
//
// pub const MTVEC: Reg = Reg;
// }
//
// pub mod medeleg {
// use tock_registers::register_bitfields;
//
// use super::{impl_csr_read, impl_csr_write};
//
// pub struct Reg;
//
// register_bitfields!(
// u64,
// pub MEDELEG [
// ECALL_SMODE OFFSET(9) NUMBITS(1) [],
// ]
// );
//
// impl_csr_read!(Reg, u64, medeleg, MEDELEG::Register);
// impl_csr_write!(Reg, u64, medeleg, MEDELEG::Register);
//
// pub const MEDELEG: Reg = Reg;
// }
//
// pub mod mideleg {
// use super::{impl_csr_read, impl_csr_write, MIE};
//
// pub struct Reg;
//
// impl_csr_read!(Reg, u64, mideleg, MIE::Register);
// impl_csr_write!(Reg, u64, mideleg, MIE::Register);
//
// pub const MIDELEG: Reg = Reg;
// }
//
// pub mod mcause {
// use tock_registers::register_bitfields;
//
// use super::{impl_csr_read, impl_csr_write};
//
// register_bitfields!(
// u64,
// pub MCAUSE [
// CODE OFFSET(0) NUMBITS(63) [],
// INTERRUPT OFFSET(63) NUMBITS(1) [],
// ]
// );
//
// pub struct Reg;
//
// impl_csr_read!(Reg, u64, mcause, MCAUSE::Register);
// impl_csr_write!(Reg, u64, mcause, MCAUSE::Register);
//
// pub const MCAUSE: Reg = Reg;
// }
//
// pub mod mie {
// use tock_registers::register_bitfields;
//
// use super::{impl_csr_read, impl_csr_write};
//
// register_bitfields!(
// u64,
// pub MIE [
// /// ???
// SSIE OFFSET(1) NUMBITS(1) [],
// /// ???
// MSIE OFFSET(3) NUMBITS(1) [],
// /// S-mode timer enable
// STIE OFFSET(5) NUMBITS(1) [],
// /// M-mode timer enable
// MTIE OFFSET(7) NUMBITS(1) [],
// /// S-mode external interrupt enable
// SEIE OFFSET(9) NUMBITS(1) [],
// /// M-mode external interrupt enable
// MEIE OFFSET(11) NUMBITS(1) [],
// ]
// );
//
// pub struct Reg;
//
// impl_csr_read!(Reg, u64, mie, MIE::Register);
// impl_csr_write!(Reg, u64, mie, MIE::Register);
//
// pub const MIE: Reg = Reg;
// }
//
// pub mod mip {
// use tock_registers::register_bitfields;
//
// use super::{impl_csr_read, impl_csr_write};
//
// register_bitfields!(
// u64,
// pub MIP [
// /// ???
// SSIP OFFSET(1) NUMBITS(1) [],
// /// ???
// MSIP OFFSET(3) NUMBITS(1) [],
// /// S-mode timer pending
// STIP OFFSET(5) NUMBITS(1) [],
// /// M-mode timer pending
// MTIP OFFSET(7) NUMBITS(1) [],
// /// S-mode external interrupt pending
// SEIP OFFSET(9) NUMBITS(1) [],
// /// M-mode external interrupt pending
// MEIP OFFSET(11) NUMBITS(1) [],
// ]
// );
//
// pub struct Reg;
//
// impl_csr_read!(Reg, u64, mip, MIP::Register);
// impl_csr_write!(Reg, u64, mip, MIP::Register);
//
// pub const MIP: Reg = Reg;
// }
//
// pub mod mcounteren {
// use tock_registers::register_bitfields;
//
// use super::{impl_csr_read, impl_csr_write};
//
// register_bitfields!(
// u64,
// pub MCOUNTEREN [
// /// Enable reading cycle counter from S-mode
// CY OFFSET(1) NUMBITS(1) [],
// /// Enable reading time counter from S-mode
// TM OFFSET(2) NUMBITS(1) [],
// /// Enable reading instret counter from S-mode
// IR OFFSET(3) NUMBITS(1) [],
// ]
// );
//
// pub struct Reg;
//
// impl_csr_read!(Reg, u64, mcounteren, MCOUNTEREN::Register);
// impl_csr_write!(Reg, u64, mcounteren, MCOUNTEREN::Register);
//
// pub const MCOUNTEREN: Reg = Reg;
// }
//
// pub mod mscratch {
// use super::{impl_csr_read, impl_csr_write};
//
// pub struct Reg;
//
// impl_csr_read!(Reg, u64, mscratch, ());
// impl_csr_write!(Reg, u64, mscratch, ());
//
// pub const MSCRATCH: Reg = Reg;
// }
pub mod satp {
use tock_registers::register_bitfields;
@ -290,6 +418,17 @@ pub mod scause {
pub const SCAUSE: Reg = Reg;
}
pub mod stval {
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
impl_csr_read!(Reg, u64, stval, ());
impl_csr_write!(Reg, u64, stval, ());
pub const STVAL: Reg = Reg;
}
pub mod sepc {
use super::{impl_csr_read, impl_csr_write};
@ -301,13 +440,99 @@ pub mod sepc {
pub const SEPC: Reg = Reg;
}
pub use medeleg::MEDELEG;
pub use mepc::MEPC;
pub use mideleg::MIDELEG;
pub use misa::MISA;
pub use mstatus::MSTATUS;
pub use mtvec::MTVEC;
pub mod sstatus {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SSTATUS [
SIE OFFSET(1) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, sstatus, SSTATUS::Register);
impl_csr_write!(Reg, u64, sstatus, SSTATUS::Register);
pub const SSTATUS: Reg = Reg;
}
pub mod sscratch {
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
impl_csr_read!(Reg, u64, sscratch, ());
impl_csr_write!(Reg, u64, sscratch, ());
pub const SSCRATCH: Reg = Reg;
}
pub mod sip {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SIP [
SSIP OFFSET(1) NUMBITS(1) [],
STIP OFFSET(5) NUMBITS(1) [],
SEIP OFFSET(9) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, sip, SIP::Register);
impl_csr_write!(Reg, u64, sip, SIP::Register);
pub const SIP: Reg = Reg;
}
pub mod sie {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SIE [
SSIE OFFSET(1) NUMBITS(1) [],
STIE OFFSET(5) NUMBITS(1) [],
SEIE OFFSET(9) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, sie, SIE::Register);
impl_csr_write!(Reg, u64, sie, SIE::Register);
pub const SIE: Reg = Reg;
}
// pub use mcause::MCAUSE;
// pub use mcounteren::MCOUNTEREN;
// pub use medeleg::MEDELEG;
// pub use mepc::MEPC;
// pub use mideleg::MIDELEG;
// pub use mie::MIE;
// pub use mip::MIP;
// pub use misa::MISA;
// pub use mscratch::MSCRATCH;
// pub use mstatus::MSTATUS;
// pub use mtvec::MTVEC;
pub use satp::SATP;
pub use scause::SCAUSE;
pub use sepc::SEPC;
pub use sie::SIE;
pub use sip::SIP;
pub use sscratch::SSCRATCH;
pub use sstatus::SSTATUS;
pub use stval::STVAL;
pub use stvec::STVEC;

View File

@ -0,0 +1,35 @@
unsafe fn sbi_do_call(
extension: u64,
function: u64,
mut a0: u64,
mut a1: u64,
a2: u64,
a3: u64,
a4: u64,
a5: u64,
) {
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1,
in("a2") a2,
in("a3") a3,
in("a4") a4,
in("a5") a5,
in("a6") function,
in("a7") extension,
);
}
// TODO return `struct sbiret`
let _ = a0;
let _ = a1;
}
pub fn sbi_debug_console_write_byte(byte: u8) {
unsafe { sbi_do_call(0x4442434E, 0x02, byte as u64, 0, 0, 0, 0, 0) };
}
pub fn sbi_set_timer(next_event: u64) {
unsafe { sbi_do_call(0x54494D45, 0x00, next_event, 0, 0, 0, 0, 0) };
}

View File

@ -466,7 +466,7 @@ impl PciBusManager {
Ok(())
}
#[cfg(target_arch = "aarch64")]
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64", rust_analyzer))]
pub fn add_segment_from_device_tree(
cfg_base: PhysicalAddress,
bus_range: core::ops::Range<u8>,

View File

@ -44,11 +44,21 @@ pub struct Node {
pub(crate) interrupt_controller: OneTimeInit<Arc<dyn DeviceTreeInterruptController>>,
}
struct NodeDevice {
driver: &'static dyn Driver,
device: Arc<dyn Device>,
enum NodeDevice {
// Node probed, no device found
Missing,
// Node probed and driver found
Present {
driver: &'static dyn Driver,
device: Arc<dyn Device>,
},
}
// struct NodeDevice {
// driver: &'static dyn Driver,
// device: Arc<dyn Device>,
// }
struct EnumerationContext {
address_cells: usize,
size_cells: usize,
@ -56,6 +66,22 @@ struct EnumerationContext {
interrupt_parent: Option<Phandle>,
}
impl NodeDevice {
fn as_device(&self) -> Option<Arc<dyn Device>> {
match self {
Self::Missing => None,
Self::Present { device, .. } => Some(device.clone()),
}
}
fn driver(&self) -> Option<&'static dyn Driver> {
match self {
Self::Missing => None,
Self::Present { driver, .. } => Some(*driver),
}
}
}
impl Node {
fn probe_upwards(self: Arc<Self>) -> (Option<Arc<dyn Device>>, Option<Weak<dyn Bus>>) {
let mut parent_bus = None;
@ -81,15 +107,19 @@ impl Node {
let drivers = DRIVERS.read();
let driver = drivers.iter().find(|d| d.matches(compatible))?;
let device = driver.imp.probe(&self, &cx)?;
let device = driver.imp.probe(&self, &cx);
Some(NodeDevice {
driver: driver.imp,
device,
})
let slot = match device {
Some(device) => NodeDevice::Present {
driver: driver.imp,
device,
},
None => NodeDevice::Missing,
};
Some(slot)
});
let device = inner.map(|d| d.device.clone());
let device = inner.and_then(|d| d.as_device());
let bus = if let Some(device) = device.as_ref() {
device.clone().as_bus().as_ref().map(Arc::downgrade)
@ -120,7 +150,7 @@ impl Node {
/// Returns the device driver associated with this node, if any was probed.
pub fn driver(&self) -> Option<&'static dyn Driver> {
Some(self.device.try_get()?.driver)
self.device.try_get()?.driver()
}
/// Performs a lazy initialization of the node:
@ -156,7 +186,7 @@ impl Node {
match self.clone().lazy_init() {
Some(Ok(())) => {
let device = self.device.get();
let status = unsafe { device.device.clone().init_irq() };
let status = unsafe { device.as_device()?.init_irq() };
Some(status)
}
Some(Err(_)) | None => None,
@ -244,7 +274,7 @@ impl Node {
/// Attempts to get a clock controller represented by this node, if any
pub fn as_clock_controller(&self) -> Option<Arc<dyn ClockController>> {
let device = self.device.try_get()?;
device.device.clone().as_clock_controller()
device.as_device()?.as_clock_controller()
}
/// Returns the `#address-cells` value of the node's parent bus

View File

@ -10,7 +10,9 @@ use libk_mm::address::PhysicalAddress;
use yggdrasil_abi::error::Error;
use crate::{
node::DeviceTreeNodeExt, property::DeviceTreePropertyRead, util::DeviceTreeMemoryRegionIter,
node::DeviceTreeNodeExt,
property::DeviceTreePropertyRead,
util::{DeviceTreeMemoryRegionIter, DeviceTreeReservedRegionIter},
};
const FDT_INDEX_BUFFER_SIZE: usize = 65536;
@ -174,6 +176,11 @@ impl<'a> DeviceTree<'a> {
DeviceTreeMemoryRegionIter::new(self)
}
/// Returns an iterator over the reserved memory regions specified by this device tree
pub fn reserved_regions(&self) -> DeviceTreeReservedRegionIter {
DeviceTreeReservedRegionIter::new(self)
}
/// Returns the length of the header provided as a slice of bytes.
///
/// # Safety

View File

@ -19,6 +19,12 @@ pub struct DeviceTreeMemoryRegionIter<'a> {
inner: DevTreeIndexNodeSiblingIter<'a, 'a, 'a>,
}
/// Iterator for reserved physical memory regions
#[derive(Clone)]
pub struct DeviceTreeReservedRegionIter<'a> {
inner: Option<DevTreeIndexNodeSiblingIter<'a, 'a, 'a>>,
}
impl<'a> DeviceTreeMemoryRegionIter<'a> {
pub(crate) fn new(dt: &'a DeviceTree) -> Self {
let inner = dt.root().children();
@ -54,6 +60,38 @@ impl Iterator for DeviceTreeMemoryRegionIter<'_> {
}
}
impl<'a> DeviceTreeReservedRegionIter<'a> {
pub(crate) fn new(dt: &'a DeviceTree) -> Self {
let inner = dt.root().child("reserved-memory").map(|r| r.children());
Self { inner }
}
}
impl Iterator for DeviceTreeReservedRegionIter<'_> {
type Item = PhysicalMemoryRegion;
fn next(&mut self) -> Option<Self::Item> {
let inner = self.inner.as_mut()?;
loop {
let Some(node) = inner.next() else {
break None;
};
if let Some(reg) = node.property("reg") {
let address_cells = node.parent_address_cells();
let size_cells = node.parent_size_cells();
if let Some((base, size)) = reg.read_cells(0, (address_cells, size_cells)) {
let base = PhysicalAddress::from_u64(base);
let size = size as usize;
break Some(PhysicalMemoryRegion { base, size });
}
}
}
}
}
/// Registers sysfs objects related to the device tree
pub fn create_sysfs_nodes(dt: &'static DeviceTree) {
struct Raw;

View File

@ -4,9 +4,8 @@ version = "0.1.0"
edition = "2021"
authors = ["Mark Poliakov <mark@alnyan.me>"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bitflags.workspace = true
bytemuck.workspace = true
[features]

View File

@ -1,8 +1,9 @@
use crate::{aarch64, x86_64};
use crate::{aarch64, riscv64, x86_64};
pub enum AnyTables {
X86_64(x86_64::FixedTables),
AArch64(aarch64::FixedTables),
Riscv64(riscv64::FixedTables),
}
impl AnyTables {
@ -10,6 +11,7 @@ impl AnyTables {
match self {
Self::X86_64(tables) => bytemuck::bytes_of(tables),
Self::AArch64(tables) => bytemuck::bytes_of(tables),
Self::Riscv64(tables) => bytemuck::bytes_of(tables),
}
}
}
@ -25,3 +27,9 @@ impl From<aarch64::FixedTables> for AnyTables {
Self::AArch64(value)
}
}
impl From<riscv64::FixedTables> for AnyTables {
fn from(value: riscv64::FixedTables) -> Self {
Self::Riscv64(value)
}
}

View File

@ -1,19 +1,82 @@
use core::fmt;
use bitflags::bitflags;
use bytemuck::{Pod, Zeroable};
use crate::RawTable;
pub const KERNEL_L3_COUNT: usize = 8;
bitflags! {
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct PageAttributes: u64 {
const N = 1 << 63;
/// Dirty bit
const D = 1 << 7;
/// Access bit
const A = 1 << 6;
/// Global mapping bit, implies all lower levels are also global
const G = 1 << 5;
/// U-mode access permission
const U = 1 << 4;
/// Execute permission
const X = 1 << 3;
/// Write permission
const W = 1 << 2;
/// Read-permission
const R = 1 << 1;
/// Valid bit
const V = 1 << 0;
}
// X W R Meaning
// 0 0 0 Pointer to next level of page table
// 0 0 1 Read-only page
// 0 1 0 ---
// 0 1 1 Read-write page
// 1 0 0 Execute only
// 1 0 1 Read-execute page
// 1 1 0 ---
// 1 1 1 Read-write-execute page
}
#[derive(Clone, Copy, Pod, Zeroable)]
#[repr(C)]
pub struct FixedTables {
_dummy: RawTable,
pub l1: RawTable,
pub kernel_l2: RawTable,
pub kernel_l3s: [RawTable; KERNEL_L3_COUNT],
}
impl FixedTables {
pub const fn zeroed() -> Self {
Self {
_dummy: RawTable::zeroed(),
l1: RawTable::zeroed(),
kernel_l2: RawTable::zeroed(),
kernel_l3s: [RawTable::zeroed(); KERNEL_L3_COUNT],
}
}
}
impl fmt::Display for PageAttributes {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use fmt::Write;
macro_rules! bit {
($self:ident, $field:expr, $letter:literal) => {
if $self.contains($field) {
f.write_char($letter)
} else {
f.write_char('-')
}
};
}
bit!(self, Self::R, 'r')?;
bit!(self, Self::W, 'w')?;
bit!(self, Self::X, 'x')?;
bit!(self, Self::U, 'u')?;
Ok(())
}
}

View File

@ -20,7 +20,7 @@ use libk_util::{
spin_rwlock::{IrqSafeRwLock, IrqSafeRwLockReadGuard},
IrqSafeSpinlock,
},
StaticVector,
OneTimeInit, StaticVector,
};
use yggdrasil_abi::error::Error;
@ -95,6 +95,7 @@ pub trait DebugSink: Sync {
#[derive(Clone)]
pub enum DebugSinkWrapper {
Arc(LogLevel, Arc<dyn DebugSink>),
Static(LogLevel, &'static dyn DebugSink),
}
unsafe impl Send for DebugSinkWrapper {}
@ -120,6 +121,7 @@ impl DebugSinkWrapper {
pub fn sink(&self) -> &dyn DebugSink {
match self {
Self::Arc(_, arc) => arc.as_ref(),
Self::Static(_, sink) => *sink,
}
}
@ -127,12 +129,14 @@ impl DebugSinkWrapper {
pub fn level(&self) -> LogLevel {
match self {
Self::Arc(level, _) => *level,
Self::Static(level, _) => *level,
}
}
pub fn set_level(&mut self, target: LogLevel) {
match self {
Self::Arc(level, _) => *level = target,
Self::Static(level, _) => *level = target,
}
}
}
@ -372,12 +376,11 @@ fn make_sysfs_sink_object(index: usize) -> Arc<KObject<usize>> {
object
}
/// Adds a debugging output sink
pub fn add_sink(sink: Arc<dyn DebugSink>, level: LogLevel) {
fn add_sink_inner(sink: DebugSinkWrapper) {
let index = {
let mut sinks = DEBUG_SINKS.write();
let index = sinks.len();
sinks.push(DebugSinkWrapper::Arc(level, sink.clone()));
sinks.push(sink);
index
};
@ -388,6 +391,11 @@ pub fn add_sink(sink: Arc<dyn DebugSink>, level: LogLevel) {
}
}
/// Adds a debugging output sink
pub fn add_sink(sink: Arc<dyn DebugSink>, level: LogLevel) {
add_sink_inner(DebugSinkWrapper::Arc(level, sink.clone()));
}
pub fn add_serial_sink(sink: Arc<dyn DebugSink>, level: LogLevel) {
if SERIAL_SINK_SET_UP.swap(true, Ordering::Acquire) {
return;
@ -396,6 +404,20 @@ pub fn add_serial_sink(sink: Arc<dyn DebugSink>, level: LogLevel) {
add_sink(sink, level);
}
pub fn add_early_sink(sink: &'static dyn DebugSink, level: LogLevel) {
add_sink_inner(DebugSinkWrapper::Static(level, sink));
}
pub fn disable_early_sinks() {
let mut sinks = DEBUG_SINKS.write();
// TODO proper sink storage/manipulation
for sink in sinks.iter_mut() {
if let DebugSinkWrapper::Static(level, _) = sink {
*level = LogLevel::Fatal;
}
}
}
/// Print a trace message coming from a process
pub fn program_trace(process: &Process, thread: &Thread, message: &str) {
log::debug!(
@ -407,15 +429,23 @@ pub fn program_trace(process: &Process, thread: &Thread, message: &str) {
);
}
pub fn init_logger() {
static LOGGER_SET_UP: OneTimeInit<()> = OneTimeInit::new();
LOGGER_SET_UP.or_init_with(|| {
log::set_logger(&LOGGER)
.map(|_| log::set_max_level(log::LevelFilter::Trace))
.ok();
});
}
/// Resets the debugging terminal by clearing it
pub fn init() {
if RING_LOGGER_SINK.init_buffer().is_ok() {
RING_AVAILABLE.store(true, Ordering::Release);
}
log::set_logger(&LOGGER)
.map(|_| log::set_max_level(log::LevelFilter::Trace))
.ok();
init_logger();
}
impl fmt::Display for LogLevel {

View File

@ -46,7 +46,7 @@ impl CpuQueue {
pub fn new(index: usize) -> Self {
let idle = TaskContextImpl::kernel(
ArchitectureImpl::idle_task(),
CpuImpl::<Self>::local().id() as usize,
CpuImpl::<Self>::local().queue_index(),
)
.expect("Could not construct an idle task");

View File

@ -1,3 +1,5 @@
// vi:ft=asm:
.macro LOAD_PCREL label, register, symbol
\label: auipc \register, %pcrel_hi(\symbol)
addi \register, \register, %pcrel_lo(\label)
@ -5,55 +7,83 @@
.section .text.entry
.option norvc
.type __rv64_entry, @function
.global __rv64_entry
.type __rv64_entry, @function
__rv64_entry:
// Jump to parking place if hard id is not zero
csrr t0, mhartid
bnez t0, .spin_loop
// a0 - bootstrap HART ID
// a1 - device tree blob
// mhartid == a0
// satp == 0
// Reset translation control
csrw satp, zero
// Zero the .bss
LOAD_PCREL .L00, t0, __bss_start_phys
LOAD_PCREL .L01, t1, __bss_end_phys
// Zero the .bss
LOAD_PCREL .L00, t0, __bss_start_phys
LOAD_PCREL .L01, t1, __bss_end_phys
1: bgeu t0, t1, 2f
sd zero, (t0)
addi t0, t0, 4
j 1b
1: bgeu t0, t1, 2f
sd zero, (t0)
addi t0, t0, 4
j 1b
2:
// Setup boot stack
LOAD_PCREL .L02, sp, {boot_stack_bottom} + {boot_stack_size} - {kernel_virt_offset}
// Jump to entry
LOAD_PCREL .L03, t0, {entry_mmode_lower} - {kernel_virt_offset}
jr t0
3: wfi
j 3b
.spin_loop:
wfi
j .spin_loop
// Setup boot stack and entry point
LOAD_PCREL .L02, sp, {boot_stack_bottom} + {boot_stack_size} - {kernel_virt_offset}
LOAD_PCREL .L03, t0, {entry_smode_lower} - {kernel_virt_offset}
jr t0
.size __rv64_entry, . - __rv64_entry
// .section .text.entry
// .option norvc
// .type __rv64_entry, @function
// .global __rv64_entry
// __rv64_entry:
// // Jump to parking place if hard id is not zero
// csrr t0, mhartid
// bnez t0, .spin_loop
//
// // Reset translation control
// csrw satp, zero
//
// // Zero the .bss
// LOAD_PCREL .L00, t0, __bss_start_phys
// LOAD_PCREL .L01, t1, __bss_end_phys
//
// 1: bgeu t0, t1, 2f
// sd zero, (t0)
// addi t0, t0, 4
// j 1b
// 2:
//
// // Setup boot stack
// LOAD_PCREL .L02, sp, {boot_stack_bottom} + {boot_stack_size} - {kernel_virt_offset}
//
// // Jump to entry
// LOAD_PCREL .L03, t0, entry_mmode_lower - {kernel_virt_offset}
//
// mv a0, a1
// jr t0
//
// 3: wfi
// j 3b
//
// .spin_loop:
// wfi
// j .spin_loop
//
// .size __rv64_entry, . - __rv64_entry
.section .text
.global __rv64_smode_entry
.type __rv64_smode_entry, @function
.p2align 4
__rv64_smode_entry:
// Set up the stack again
LOAD_PCREL .L04, sp, {boot_stack_bottom} + {boot_stack_size}
// Enter kernel proper
LOAD_PCREL .L05, t0, {entry_smode_lower}
jr t0
1: wfi
j 1b
.size __rv64_smode_entry, . - __rv64_smode_entry
// .section .text
// .global __rv64_smode_entry
// .type __rv64_smode_entry, @function
// .p2align 4
// __rv64_smode_entry:
// // Set up the stack again
// LOAD_PCREL .L04, sp, {boot_stack_bottom} + {boot_stack_size}
// // Enter kernel proper
// LOAD_PCREL .L05, t0, {entry_smode_lower}
//
// jr t0
//
// 1: wfi
// j 1b
// .size __rv64_smode_entry, . - __rv64_smode_entry

View File

@ -1,16 +1,20 @@
use core::arch::global_asm;
use kernel_arch::Architecture;
use kernel_arch_riscv64::{
mem::{
table::{PageAttributes, PageEntry, PageTable, L1},
KERNEL_VIRT_OFFSET,
},
registers::{MEDELEG, MEPC, MIDELEG, MSTATUS, MTVEC, SATP},
mem::{self, KERNEL_VIRT_OFFSET},
ArchitectureImpl,
};
use libk_mm::{address::PhysicalAddress, table::EntryLevel};
use tock_registers::interfaces::{ReadWriteable, Writeable};
use libk::{
debug,
fs::{devfs, sysfs},
task::runtime,
};
use libk_mm::address::PhysicalAddress;
use super::exception;
use crate::kernel_main;
use super::PLATFORM;
const BOOT_STACK_SIZE: usize = 65536;
@ -25,113 +29,138 @@ impl<const N: usize> BootStack<N> {
#[link_section = ".bss"]
static mut BOOT_STACK: BootStack<BOOT_STACK_SIZE> = BootStack::zeroed();
static mut TABLE: PageTable<L1> = PageTable::zeroed();
unsafe fn long_jump(pc: usize, sp: usize) -> ! {
// static mut DTB_PHYSICAL: PhysicalAddress = PhysicalAddress::ZERO;
unsafe fn long_jump(pc: usize, sp: usize, a0: usize, a1: usize) -> ! {
core::arch::asm!(r#"
mv sp, {sp}
jr {pc}
"#, pc = in(reg) pc, sp = in(reg) sp, options(noreturn));
"#,
in("a0") a0,
in("a1") a1,
pc = in(reg) pc,
sp = in(reg) sp,
options(noreturn)
);
}
unsafe extern "C" fn __rv64_bsp_smode_entry_lower() -> ! {
// TODO move this to kernel-arch-riscv64, like in other archs
for i in 0..4 {
TABLE[i] = PageEntry::block(
PhysicalAddress::from_usize(i << L1::SHIFT),
PageAttributes::W | PageAttributes::X,
);
}
// TODO magic numbers
// Map kernel
TABLE[450] = PageEntry::block(
PhysicalAddress::from_usize(0x8000_0000),
PageAttributes::W | PageAttributes::X,
);
unsafe extern "C" fn __rv64_bsp_smode_entry_lower(a0: usize, a1: usize) -> ! {
ArchitectureImpl::set_interrupt_mask(true);
let address = (&raw const TABLE).addr();
let address = if address >= KERNEL_VIRT_OFFSET {
address - KERNEL_VIRT_OFFSET
} else {
address
};
SATP.modify(SATP::PPN.val((address as u64) >> 12) + SATP::MODE::Sv39);
mem::enable_mmu();
let stack = (&raw const BOOT_STACK).addr() + KERNEL_VIRT_OFFSET;
let pc = __rv64_bsp_entry_upper as usize + KERNEL_VIRT_OFFSET;
let sp = stack + BOOT_STACK_SIZE;
long_jump(pc, sp)
long_jump(pc, sp, a0, a1)
}
unsafe extern "C" fn __rv64_bsp_entry_upper() -> ! {
// TODO set up per-CPU pointer, pass DTB from M-mode, figure out the rest of the boot process
exception::init_smode_exceptions();
loop {}
}
unsafe extern "C" fn __rv64_bsp_entry_upper(bsp_hart_id: u64, dtb_physical: PhysicalAddress) -> ! {
debug::init_logger();
super::debug::register_sbi_debug();
// Drop to S-mode
unsafe extern "C" fn __rv64_bsp_mmode_entry_lower() -> ! {
extern "C" {
fn __rv64_smode_entry() -> !;
static __rv64_mmode_trap_vectors: u8;
log::info!("Starting riscv64 upper half");
if dtb_physical.is_zero() {
log::error!("No device tree provided");
// No DTB provided
ArchitectureImpl::halt();
}
// Setup trap vector for M-mode
let mtvec = (&raw const __rv64_mmode_trap_vectors).addr() - KERNEL_VIRT_OFFSET;
MTVEC.set_base(mtvec);
MTVEC.modify(MTVEC::MODE::Direct);
// Setup trap delegation to S-mode
MIDELEG.set(u64::MAX);
MEDELEG.set(u64::MAX);
MSTATUS.modify(
// Mask S-mode interrupts
MSTATUS::SIE::CLEAR
+ MSTATUS::MPIE::CLEAR
// UXLEN=SXLEN=64
+ MSTATUS::UXL.val(2)
+ MSTATUS::SXL.val(2)
// Little endian
+ MSTATUS::UBE::CLEAR
+ MSTATUS::SBE::CLEAR
// Don't trap S-mode VM insns, sret + U-mode wfi
+ MSTATUS::TVM::CLEAR
+ MSTATUS::TW::CLEAR
+ MSTATUS::TSR::CLEAR
// Disable effective privilege modification
+ MSTATUS::MPRV::CLEAR
// Enable S-mode access to U-mode pages
+ MSTATUS::SUM::SET
// Make mret return to S-mode
+ MSTATUS::MPP::S,
);
let entry = __rv64_smode_entry as usize - KERNEL_VIRT_OFFSET;
MEPC.set(entry as u64);
// Modify pmpcfg/pmpaddr to allow lower-level execution
unsafe {
let mut pmpcfg0: u64;
core::arch::asm!("csrr {0}, pmpcfg0", out(reg) pmpcfg0);
let pmpaddr0: u64 = 0xFFFFffffFFFFffff;
pmpcfg0 &= !0xFF;
// A = 1, X, W, R
pmpcfg0 |= 0xF;
core::arch::asm!("csrw pmpaddr0, {0}; csrw pmpcfg0, {1}", in(reg) pmpaddr0, in(reg) pmpcfg0);
if let Err(error) = PLATFORM.init_memory_management(dtb_physical) {
log::error!("Failed to initialize memory management: {error:?}");
ArchitectureImpl::halt();
}
core::arch::asm!("mret", options(noreturn));
sysfs::init();
devfs::init();
runtime::init_task_queue();
if let Err(error) = PLATFORM.init_platform(bsp_hart_id as _, true) {
log::error!("Failed to initialize the platform: {error:?}");
ArchitectureImpl::halt();
}
kernel_main()
}
// // Drop to S-mode
// unsafe extern "C" fn __rv64_bsp_mmode_entry_lower(dtb: PhysicalAddress) -> ! {
// extern "C" {
// fn __rv64_smode_entry() -> !;
// }
//
// // Set mscratch to M-mode trap stack
// let trap_stack_bottom = (&raw const BSP_MMODE_TRAP_STACK).addr();
// let trap_sp = trap_stack_bottom + BOOT_STACK_SIZE;
// MSCRATCH.set(trap_sp as u64);
//
// // Setup trap vector for M-mode
// exception::init_mmode_exceptions();
//
// // Setup trap delegation to S-mode:
// // * S-mode timer -> S-mode
// // * All exceptions, except ecall from S-mode
// MIDELEG.modify(MIE::STIE::SET);
// MEDELEG.set(u64::MAX);
// MEDELEG.modify(MEDELEG::ECALL_SMODE::CLEAR);
//
// MCOUNTEREN.modify(MCOUNTEREN::CY::SET + MCOUNTEREN::TM::SET + MCOUNTEREN::IR::SET);
//
// MSTATUS.modify(
// // Mask S-mode interrupts
// MSTATUS::SIE::SET
// + MSTATUS::MPIE::CLEAR
// // UXLEN=SXLEN=64
// + MSTATUS::UXL.val(2)
// + MSTATUS::SXL.val(2)
// // Little endian
// + MSTATUS::UBE::CLEAR
// + MSTATUS::SBE::CLEAR
// // Don't trap S-mode VM insns, sret + U-mode wfi
// + MSTATUS::TVM::CLEAR
// + MSTATUS::TW::CLEAR
// + MSTATUS::TSR::CLEAR
// // Disable effective privilege modification
// + MSTATUS::MPRV::CLEAR
// // Enable S-mode access to U-mode pages
// + MSTATUS::SUM::SET
// // Make mret return to S-mode
// + MSTATUS::MPP::S,
// );
//
// MIE.modify(MIE::STIE::SET + MIE::SEIE::SET + MIE::MTIE::SET);
//
// let entry = __rv64_smode_entry as usize - KERNEL_VIRT_OFFSET;
// MEPC.set(entry as u64);
//
// // Modify pmpcfg/pmpaddr to allow lower-level execution
// unsafe {
// let mut pmpcfg0: u64;
// core::arch::asm!("csrr {0}, pmpcfg0", out(reg) pmpcfg0);
//
// let pmpaddr0: u64 = 0xFFFFffffFFFFffff;
//
// pmpcfg0 &= !0xFF;
// // A = 1, X, W, R
// pmpcfg0 |= 0xF;
//
// core::arch::asm!("csrw pmpaddr0, {0}; csrw pmpcfg0, {1}", in(reg) pmpaddr0, in(reg) pmpcfg0);
// }
//
// // Store the DTB address
// unsafe {
// DTB_PHYSICAL = dtb;
// }
//
// core::arch::asm!("mret", options(noreturn));
// }
global_asm!(
include_str!("entry.S"),
entry_mmode_lower = sym __rv64_bsp_mmode_entry_lower,
entry_smode_lower = sym __rv64_bsp_smode_entry_lower,
boot_stack_bottom = sym BOOT_STACK,
kernel_virt_offset = const KERNEL_VIRT_OFFSET,

View File

@ -0,0 +1,22 @@
use abi::error::Error;
use kernel_arch_riscv64::sbi;
use libk::debug::{self, DebugSink, LogLevel};
pub struct SbiDebugConsole;
impl DebugSink for SbiDebugConsole {
fn putc(&self, c: u8) -> Result<(), Error> {
sbi::sbi_debug_console_write_byte(c);
Ok(())
}
fn supports_control_sequences(&self) -> bool {
true
}
}
static SBI_DEBUG: SbiDebugConsole = SbiDebugConsole;
pub fn register_sbi_debug() {
debug::add_early_sink(&SBI_DEBUG, LogLevel::Debug);
}

View File

@ -1,8 +1,25 @@
use core::arch::global_asm;
use kernel_arch::{task::Scheduler, Architecture};
use libk::arch::Cpu;
use tock_registers::interfaces::{ReadWriteable, Readable};
use kernel_arch_riscv64::registers::{SCAUSE, SEPC, STVEC};
use kernel_arch_riscv64::{
intrinsics,
registers::{SCAUSE, SEPC, STVAL, STVEC},
sbi, ArchitectureImpl,
};
#[derive(Debug)]
#[repr(C)]
pub struct TrapFrame {
pub ra: u64,
pub gp: u64,
pub t0_2: [u64; 3],
pub a0_7: [u64; 8],
pub t3_6: [u64; 4],
pub s0: u64,
}
pub fn init_smode_exceptions() {
extern "C" {
@ -14,15 +31,47 @@ pub fn init_smode_exceptions() {
STVEC.modify(STVEC::MODE::Vectored);
}
unsafe extern "C" fn smode_trap_handler() {
unsafe fn smode_exception_handler(frame: *mut TrapFrame) {
let _ = frame;
let cause = SCAUSE.read(SCAUSE::CODE);
let pc = SEPC.get();
// Just put these into t0/t1 until I complete the rest of the kernel, lol
core::arch::asm!("j .", in("t0") cause, in("t1") pc);
loop {}
let tval = STVAL.get();
let epc = SEPC.get();
log::error!("S-mode exception cause={cause}, tval={tval:#x}, epc={epc:#x}");
ArchitectureImpl::halt();
}
unsafe extern "C" fn smode_interrupt_handler(frame: *mut TrapFrame) {
let _ = frame;
let cause = SCAUSE.read(SCAUSE::CODE);
match cause {
// S-mode timer interrupt
5 => {
sbi::sbi_set_timer(intrinsics::rdtime() + 1_000_000);
// TODO runtime tick, time accounting
Cpu::local().scheduler().yield_cpu();
}
_ => {
log::warn!("Unknown/unhandled S-mode interrupt {cause}");
ArchitectureImpl::halt();
}
}
}
unsafe extern "C" fn smode_general_trap_handler(frame: *mut TrapFrame) {
let interrupt = SCAUSE.matches_all(SCAUSE::INTERRUPT::SET);
if interrupt {
smode_interrupt_handler(frame);
} else {
smode_exception_handler(frame);
}
}
global_asm!(
include_str!("vectors.S"),
smode_handler = sym smode_trap_handler
smode_general_handler = sym smode_general_trap_handler,
smode_interrupt_handler = sym smode_interrupt_handler,
);

View File

@ -1,19 +1,51 @@
#![allow(missing_docs)]
use core::sync::atomic::{self, AtomicUsize, Ordering};
use abi::error::Error;
use alloc::sync::Arc;
use device_api::{
interrupt::{IpiDeliveryTarget, IpiMessage},
ResetDevice,
};
use kernel_arch_riscv64::mem::KERNEL_VIRT_OFFSET;
use libk_mm::table::EntryLevel;
use device_tree::{driver::unflatten_device_tree, DeviceTree, DeviceTreeNodeExt};
use kernel_arch_riscv64::{
intrinsics,
mem::{self, KERNEL_VIRT_OFFSET},
registers::SIE,
sbi, PerCpuData,
};
use libk::{arch::Cpu, config};
use libk_mm::{
address::PhysicalAddress,
phys::{self, reserved::reserve_region, PhysicalMemoryRegion},
pointer::PhysicalRef,
table::{EntryLevel, EntryLevelExt},
};
use libk_util::OneTimeInit;
use tock_registers::interfaces::ReadWriteable;
use ygg_driver_pci::PciBusManager;
use crate::{
device::MACHINE_NAME,
fs::{Initrd, INITRD_DATA},
util::call_init_array,
};
use super::Platform;
pub mod boot;
pub mod debug;
pub mod exception;
pub struct Riscv64;
pub struct Riscv64 {
dt: OneTimeInit<DeviceTree<'static>>,
initrd: OneTimeInit<PhysicalRef<'static, [u8]>>,
}
pub static PLATFORM: Riscv64 = Riscv64 {
dt: OneTimeInit::new(),
initrd: OneTimeInit::new(),
};
#[derive(Debug, Clone, Copy)]
pub struct L3;
@ -33,11 +65,13 @@ impl Platform for Riscv64 {
unsafe fn send_ipi(&self, target: IpiDeliveryTarget, msg: IpiMessage) -> Result<bool, Error> {
let _ = target;
let _ = msg;
loop {}
log::warn!("TODO: send_ipi({msg:?})");
Ok(false)
// loop {}
}
unsafe fn start_application_processors(&self) {
loop {}
// TODO
}
fn register_reset_device(&self, reset: Arc<dyn ResetDevice>) -> Result<(), Error> {
@ -46,4 +80,174 @@ impl Platform for Riscv64 {
}
}
pub static PLATFORM: Riscv64 = Riscv64;
impl Riscv64 {
unsafe fn init_memory_management(&'static self, dtb: PhysicalAddress) -> Result<(), Error> {
// Unmap the lower half
mem::setup_fixed_tables();
// Extract the size of the device tree
let dtb_size = {
let dtb_header = PhysicalRef::<u8>::map_slice(dtb, DeviceTree::MIN_HEADER_SIZE);
DeviceTree::read_totalsize(dtb_header.as_ref()).map_err(|_| Error::InvalidArgument)?
};
log::debug!("DTB: {:#x?}", dtb..dtb.add(dtb_size));
reserve_region(
"dtb",
PhysicalMemoryRegion {
base: dtb,
size: (dtb_size + 0xFFF) & !0xFFF,
},
);
let dtb_slice = PhysicalRef::<u8>::map_slice(dtb, dtb_size);
let dt = DeviceTree::from_raw(dtb_slice.as_ptr() as usize)?;
// Reserve memory regions specified in the DTB
log::info!("Reserved memory:");
for region in dt.reserved_regions() {
log::info!("* {:#x}..{:#x}", region.base, region.base.add(region.size));
reserve_region("mmode-resv", region);
}
// Setup initrd from the dt
let initrd = dt.chosen_initrd();
if let Some((start, end)) = initrd {
let aligned_start = start.page_align_down::<L3>();
let aligned_end = end.page_align_up::<L3>();
let size = aligned_end - aligned_start;
reserve_region(
"initrd",
PhysicalMemoryRegion {
base: aligned_start,
size,
},
);
}
// Initialize the physical memory
phys::init_from_iter(dt.memory_regions(), |_, _, _| Ok(()))?;
self.dt.init(dt);
// Setup initrd
if let Some((initrd_start, initrd_end)) = initrd {
let aligned_start = initrd_start.page_align_down::<L3>();
let aligned_end = initrd_end.page_align_up::<L3>();
let len = initrd_end - initrd_start;
let data = unsafe { PhysicalRef::map_slice(initrd_start, len) };
let initrd = self.initrd.init(data);
INITRD_DATA.init(Initrd {
phys_page_start: aligned_start,
phys_page_len: aligned_end - aligned_start,
data: initrd.as_ref(),
});
}
Ok(())
}
// TODO boot hart ID may not be zero?
unsafe fn init_platform(&'static self, hart_id: u32, is_bsp: bool) -> Result<(), Error> {
static QUEUE_INDEX: AtomicUsize = AtomicUsize::new(0);
let queue_index = QUEUE_INDEX.fetch_add(1, Ordering::AcqRel);
let per_cpu = PerCpuData {
tmp_t0: 0,
smode_sp: 0,
bootstrap: is_bsp,
queue_index,
};
Cpu::init_local(Some(hart_id), per_cpu);
assert_eq!(Cpu::local().id(), hart_id);
exception::init_smode_exceptions();
if is_bsp {
call_init_array();
atomic::compiler_fence(Ordering::SeqCst);
libk::debug::init();
let dt = self.dt.get();
let bootargs = dt.chosen_bootargs().unwrap_or("");
config::parse_boot_arguments(bootargs);
// Create device tree sysfs nodes
device_tree::util::create_sysfs_nodes(dt);
let (_, machine_name) = Self::machine_name(dt);
unflatten_device_tree(dt);
Self::setup_chosen_stdout(dt).ok();
libk::debug::disable_early_sinks();
if let Some(machine) = machine_name {
log::info!("Running on {machine:?}");
MACHINE_NAME.init(machine.into());
}
log::info!("Boot arguments: {bootargs:?}");
log::info!("Initializing riscv64 platform");
device_tree::driver::lazy_init(
|_| (),
|node, error| {
log::error!("{}: {error:?}", node.name().unwrap_or("<unknown>"));
},
);
device_tree::driver::init_irqs(
|_| (),
|node, error| {
log::error!(
"{}: irq init error: {error:?}",
node.name().unwrap_or("<unknown>")
);
},
);
PciBusManager::setup_bus_devices()?;
}
// Setup the timer
SIE.modify(SIE::STIE::SET);
sbi::sbi_set_timer(intrinsics::rdtime() + 1_000_000);
// Test call into M-mode
// core::arch::asm!("ecall", in("a0") MModeFunction::WriteTimerComparator as u64, in("a1") 0x4321);
Ok(())
}
fn machine_name(dt: &'static DeviceTree) -> (Option<&'static str>, Option<&'static str>) {
(
dt.root().prop_string("compatible"),
dt.root().prop_string("model"),
)
}
#[inline(never)]
unsafe fn setup_chosen_stdout(dt: &'static DeviceTree) -> Result<(), Error> {
// Get /chosen.stdout-path to get early debug printing
// TODO honor defined configuration value
let stdout = dt.chosen_stdout();
let stdout_path = stdout.map(|(p, _)| p);
let node = stdout_path.and_then(device_tree::driver::find_node);
if let Some(node) = node {
node.force_init()?;
}
// No stdout
Ok(())
}
}

View File

@ -1,11 +1,113 @@
// vi:ft=asm:
.section .text
.macro SMODE_TRAP n
.set SMODE_TRAP_STATE_SIZE, (8 * 18)
.macro SAVE_TRAP_CONTEXT
addi sp, sp, -SMODE_TRAP_STATE_SIZE
sd ra, 8 * 0(sp)
sd gp, 8 * 1(sp)
sd t0, 8 * 2(sp)
sd t1, 8 * 3(sp)
sd t2, 8 * 4(sp)
sd a0, 8 * 5(sp)
sd a1, 8 * 6(sp)
sd a2, 8 * 7(sp)
sd a3, 8 * 8(sp)
sd a4, 8 * 9(sp)
sd a5, 8 * 10(sp)
sd a6, 8 * 11(sp)
sd a7, 8 * 12(sp)
sd t3, 8 * 13(sp)
sd t4, 8 * 14(sp)
sd t5, 8 * 15(sp)
sd t6, 8 * 16(sp)
sd s0, 8 * 17(sp)
.endm
.macro LOAD_TRAP_CONTEXT
ld ra, 8 * 0(sp)
ld gp, 8 * 1(sp)
ld t0, 8 * 2(sp)
ld t1, 8 * 3(sp)
ld t2, 8 * 4(sp)
ld a0, 8 * 5(sp)
ld a1, 8 * 6(sp)
ld a2, 8 * 7(sp)
ld a3, 8 * 8(sp)
ld a4, 8 * 9(sp)
ld a5, 8 * 10(sp)
ld a6, 8 * 11(sp)
ld a7, 8 * 12(sp)
ld t3, 8 * 13(sp)
ld t4, 8 * 14(sp)
ld t5, 8 * 15(sp)
ld t6, 8 * 16(sp)
ld s0, 8 * 17(sp)
addi sp, sp, SMODE_TRAP_STATE_SIZE
.endm
// * Switch stack to kernel if needed
// * Store pre-trap register state on the stack
// * Make a0 point to the frame
// * Make s0 = original_tp
.macro SMODE_TRAP_ENTER
// Stack may be either U-mode or S-mode stack depending on sstatus.SPP
// Original tp -> sscratch
// Per-CPU struct -> tp
csrrw tp, sscratch, tp
// Store t0 in per-CPU scratch space
sd t0, 0(tp)
// Determine where the interrupt came from (SPP is bit 8)
csrr t0, sstatus
andi t0, t0, (1 << 8)
bnez t0, 1f
// Trap came from U-mode
// TODO
j .
1:
// Trap came from S-mode
2:
// Either stack was adjusted or the trap came from S-mode
// Load t0 back
ld t0, 0(tp)
SAVE_TRAP_CONTEXT
mv a0, sp
csrr s0, sscratch
.endm
// * Set sscratch to pre-trap tp
// * Restore the pre-trap register state
// * Return
.macro SMODE_TRAP_LEAVE_TO_SMODE
csrw sscratch, s0
// Restore the state
LOAD_TRAP_CONTEXT
// Swap the tp<->scratch back
csrrw tp, sscratch, tp
sret
.endm
.macro SMODE_TRAP n, handler
.type __rv64_smode_trap_\n, @function
__rv64_smode_trap_\n:
// TODO store registers
j {smode_handler}
// TODO restore registers and return
SMODE_TRAP_ENTER
// TODO when coming through a non-zero vector, trap is always asyncrhonous, so
// the interrupt handler can be called directly instead of a more generic
// trap handler to avoid an extra indirection
call \handler
// TODO U-mode trap return
SMODE_TRAP_LEAVE_TO_SMODE
.size __rv64_smode_trap_\n, . - __rv64_smode_trap_\n
.endm
@ -13,7 +115,6 @@ __rv64_smode_trap_\n:
.option norvc
.global __rv64_smode_trap_vectors
.global __rv64_mmode_trap_vectors
.type __rv64_smode_trap_vectors, @function
.p2align 4
@ -36,27 +137,21 @@ __rv64_smode_trap_vectors:
j __rv64_smode_trap_15
.size __rv64_smode_trap_vectors, . - __rv64_smode_trap_vectors
.type __rv64_mmode_trap_vectors, @function
.p2align 4
__rv64_mmode_trap_vectors:
j .
.size __rv64_mmode_trap_vectors, . - __rv64_mmode_trap_vectors
SMODE_TRAP 0
SMODE_TRAP 1
SMODE_TRAP 2
SMODE_TRAP 3
SMODE_TRAP 4
SMODE_TRAP 5
SMODE_TRAP 6
SMODE_TRAP 7
SMODE_TRAP 8
SMODE_TRAP 9
SMODE_TRAP 10
SMODE_TRAP 11
SMODE_TRAP 12
SMODE_TRAP 13
SMODE_TRAP 14
SMODE_TRAP 15
SMODE_TRAP 0, {smode_general_handler}
SMODE_TRAP 1, {smode_interrupt_handler}
SMODE_TRAP 2, {smode_interrupt_handler}
SMODE_TRAP 3, {smode_interrupt_handler}
SMODE_TRAP 4, {smode_interrupt_handler}
SMODE_TRAP 5, {smode_interrupt_handler}
SMODE_TRAP 6, {smode_interrupt_handler}
SMODE_TRAP 7, {smode_interrupt_handler}
SMODE_TRAP 8, {smode_interrupt_handler}
SMODE_TRAP 9, {smode_interrupt_handler}
SMODE_TRAP 10, {smode_interrupt_handler}
SMODE_TRAP 11, {smode_interrupt_handler}
SMODE_TRAP 12, {smode_interrupt_handler}
SMODE_TRAP 13, {smode_interrupt_handler}
SMODE_TRAP 14, {smode_interrupt_handler}
SMODE_TRAP 15, {smode_interrupt_handler}
.option pop

View File

@ -1,6 +1,6 @@
//! Bus devices
#[cfg(any(target_arch = "aarch64", rust_analyzer))]
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64", rust_analyzer))]
pub mod pci_host_ecam_generic;
#[cfg(any(target_arch = "aarch64", rust_analyzer))]
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64", rust_analyzer))]
pub mod simple_bus;

View File

@ -33,6 +33,10 @@ impl Device for SimpleBus {
impl Bus for SimpleBus {
fn map_range(&self, bus_range: Range<u64>) -> Option<Range<u64>> {
if self.ranges.is_empty() {
return Some(bus_range);
}
let start = bus_range.start;
let end = bus_range.end;
@ -66,6 +70,7 @@ device_tree_driver! {
let cell_sizes = (child_address_cells, parent_address_cells, child_size_cells);
let mut items = Vec::new();
for (child_address, parent_address, length) in ranges.iter_cells(cell_sizes) {
let child_range = child_address..child_address + length;
items.push((child_range, parent_address));

View File

@ -5,3 +5,6 @@ pub mod bcm2835_aux_uart;
#[cfg(any(target_arch = "aarch64", rust_analyzer))]
pub mod pl011;
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
pub mod ns16550a;

View File

@ -0,0 +1,173 @@
//! 16550-style UART device driver
use abi::{error::Error, io::TerminalOptions};
use alloc::sync::Arc;
use device_api::device::Device;
use device_tree::driver::{device_tree_driver, Node, ProbeContext};
use libk::{
debug::DebugSink,
device::manager::DEVICE_REGISTRY,
vfs::{Terminal, TerminalInput, TerminalOutput},
};
use libk_mm::{address::PhysicalAddress, device::DeviceMemoryIo};
use libk_util::{sync::IrqSafeSpinlock, OneTimeInit};
use tock_registers::{
interfaces::{Readable, Writeable},
register_bitfields, register_structs,
registers::{ReadOnly, ReadWrite, WriteOnly},
};
register_bitfields!(
u8,
IER [
/// Received data ready
RDR OFFSET(0) NUMBITS(1) [],
/// Trasmitter holding register empty
THRE OFFSET(1) NUMBITS(1) [],
/// Receiver line status
RLS OFFSET(2) NUMBITS(1) [],
/// Modem status
MS OFFSET(3) NUMBITS(1) [],
],
LSR [
/// Data ready indicator
DR OFFSET(0) NUMBITS(1) [],
/// Transmitter FIFO empty
TFE OFFSET(5) NUMBITS(1) [],
],
LCR [
BITS OFFSET(0) NUMBITS(2) [
Bits8 = 3
],
STOPBITS OFFSET(2) NUMBITS(1) [],
PARITY OFFSET(3) NUMBITS(1) [],
PARITY_EVEN OFFSET(4) NUMBITS(1) [],
PARITY_STICK OFFSET(5) NUMBITS(1) [],
BREAK OFFSET(6) NUMBITS(1) [],
DLAB OFFSET(7) NUMBITS(1) [],
]
);
register_structs! {
#[allow(non_snake_case)]
Regs {
// Read: receive buffer, write: transmit buffer
(0x00 => DR: ReadWrite<u8>),
(0x01 => IER: ReadWrite<u8>),
// Read: interrupt idenditication, write: FIFO control
(0x02 => FCR: ReadWrite<u8>),
(0x03 => LCR: ReadWrite<u8, LCR::Register>),
(0x04 => MCR: WriteOnly<u8>),
(0x05 => LSR: ReadOnly<u8, LSR::Register>),
(0x06 => MSR: ReadOnly<u8>),
(0x07 => _0),
(0x08 => @END),
}
}
struct Io {
regs: DeviceMemoryIo<'static, Regs>,
}
struct Inner {
io: IrqSafeSpinlock<Io>,
}
/// ns16550a-style UART driver
pub struct Ns16550a {
inner: OneTimeInit<Arc<Terminal<Inner>>>,
base: PhysicalAddress,
// irq: FullIrq,
}
impl Io {
fn init(&mut self) {
self.regs.LCR.write(
LCR::BITS::Bits8 + LCR::BREAK::CLEAR + LCR::STOPBITS::CLEAR + LCR::PARITY::CLEAR,
);
self.regs.IER.set(0);
}
fn send(&mut self, byte: u8) {
while self.regs.LSR.matches_all(LSR::TFE::CLEAR) {
core::hint::spin_loop();
}
self.regs.DR.set(byte);
}
}
impl Device for Ns16550a {
unsafe fn init(self: Arc<Self>) -> Result<(), Error> {
let mut io = Io {
regs: DeviceMemoryIo::map(self.base, Default::default())?,
};
io.init();
let input = TerminalInput::with_capacity(64)?;
let output = Inner {
io: IrqSafeSpinlock::new(io),
};
let terminal = self.inner.init(Arc::new(Terminal::from_parts(
TerminalOptions::const_default(),
input,
output,
)));
DEVICE_REGISTRY
.serial_terminal
.register(terminal.clone(), Some(self.clone()))
.ok();
Ok(())
}
unsafe fn init_irq(self: Arc<Self>) -> Result<(), Error> {
log::warn!("TODO: init ns16550a irq");
Ok(())
}
fn display_name(&self) -> &str {
"ns16550a UART"
}
}
impl TerminalOutput for Inner {
fn write(&self, byte: u8) -> Result<(), Error> {
self.io.lock().send(byte);
Ok(())
}
fn write_multiple(&self, bytes: &[u8]) -> Result<usize, Error> {
let mut lock = self.io.lock();
for &byte in bytes {
lock.send(byte);
}
Ok(bytes.len())
}
}
impl DebugSink for Ns16550a {
fn putc(&self, c: u8) -> Result<(), Error> {
self.inner.get().putc_to_output(c)
}
fn supports_control_sequences(&self) -> bool {
true
}
}
device_tree_driver!(
compatible: ["ns16550a"],
driver: {
fn probe(&self, node: &Arc<Node>, context: &ProbeContext) -> Option<Arc<dyn Device>> {
let base = node.map_base(context, 0)?;
// let irq = node.interrupt(0)?;
Some(Arc::new(Ns16550a {
base,
// irq,
inner: OneTimeInit::new(),
}))
}
}
);

View File

@ -48,7 +48,7 @@ fn dump_panic_info(cpu: &LocalCpu, pi: &core::panic::PanicInfo) {
debug::panic_log!(sink, "Kernel panic");
if let Some(location) = pi.location() {
debug::panic_log!(sink, " ar {}:{}:\n", location.file(), location.line());
debug::panic_log!(sink, " at {}:{}:\n", location.file(), location.line());
} else {
debug::panic_log!(sink, ":\n");
}

View File

@ -65,16 +65,15 @@ pub unsafe fn enter() -> ! {
static AP_CAN_ENTER: SpinFence = SpinFence::new();
let mut cpu = Cpu::local();
let id = cpu.id();
if id != 0 {
if !cpu.is_bootstrap() {
// Wait until BSP allows us to enter
AP_CAN_ENTER.wait_one();
} else {
AP_CAN_ENTER.signal();
}
let queue = CpuQueue::for_cpu(cpu.id() as usize);
let queue = CpuQueue::for_cpu(cpu.queue_index());
cpu.set_scheduler(queue);
queue.enter()

View File

@ -14,11 +14,13 @@ use elf::{
ElfStream,
};
use memtables::any::AnyTables;
use riscv64::Riscv64Builder;
use thiserror::Error;
use crate::{aarch64::AArch64Builder, x86_64::X8664Builder};
mod aarch64;
mod riscv64;
mod x86_64;
#[derive(Error, Debug)]
@ -213,35 +215,18 @@ fn build_tables<F: Read + Seek>(
println!("Kernel image range: {:#x?}", kernel_start..kernel_end);
println!("KERNEL_VIRT_OFFSET = {:#x}", kernel_virt_offset);
let gen_data = GenData {
kernel_virt_offset,
kernel_start,
kernel_end,
table_offset,
table_physical_address,
};
let (tables, table_offset, symbol_table) = match elf.ehdr.e_machine {
EM_X86_64 => X8664Builder::new(
elf,
GenData {
kernel_virt_offset,
kernel_start,
kernel_end,
table_offset,
table_physical_address,
},
)?
.build()
.map(into_any),
EM_AARCH64 => AArch64Builder::new(
elf,
GenData {
kernel_virt_offset,
kernel_start,
kernel_end,
table_offset,
table_physical_address,
},
)?
.build()
.map(into_any),
EM_RISCV => {
// TODO
std::process::exit(0);
}
EM_X86_64 => X8664Builder::new(elf, gen_data)?.build().map(into_any),
EM_AARCH64 => AArch64Builder::new(elf, gen_data)?.build().map(into_any),
EM_RISCV => Riscv64Builder::new(elf, gen_data)?.build().map(into_any),
_ => todo!(),
}?;

View File

@ -0,0 +1,172 @@
use std::{
collections::HashMap,
io::{Read, Seek},
mem::offset_of,
};
use elf::{
abi::{PF_W, PF_X, PT_LOAD},
endian::AnyEndian,
ElfStream,
};
use memtables::riscv64::{FixedTables, PageAttributes, KERNEL_L3_COUNT};
use crate::{extract_symbols, GenData, GenError};
pub struct Riscv64Builder<F: Read + Seek> {
elf: ElfStream<AnyEndian, F>,
data: GenData,
tables: FixedTables,
l1i_lower: usize,
l1i: usize,
l2i_start: usize,
l2i_end: usize,
}
const L1_SHIFT: usize = 30;
const L1_PAGE_SIZE: usize = 1 << L1_SHIFT;
const L2_SHIFT: usize = 21;
const L2_PAGE_SIZE: usize = 1 << L2_SHIFT;
const L3_SHIFT: usize = 12;
const L3_PAGE_SIZE: usize = 1 << L3_SHIFT;
fn segment_attributes(f: u32) -> PageAttributes {
let mut attrs = PageAttributes::R;
if f & PF_W != 0 {
attrs |= PageAttributes::W;
}
if f & PF_X != 0 {
attrs |= PageAttributes::X;
}
attrs
}
fn shift_pfn(physical: u64) -> u64 {
physical >> 2
}
impl<F: Read + Seek> Riscv64Builder<F> {
pub fn new(elf: ElfStream<AnyEndian, F>, data: GenData) -> Result<Self, GenError> {
assert_eq!(data.kernel_virt_offset & (L1_PAGE_SIZE as u64 - 1), 0);
let l1i = (data.kernel_start >> L1_SHIFT) as usize & 0x1FF;
let l1i_lower =
((data.kernel_start - data.kernel_virt_offset) >> L1_SHIFT) as usize & 0x1FF;
let end_l1i = ((data.kernel_end + L1_PAGE_SIZE as u64) >> L1_SHIFT) as usize & 0x1FF;
if end_l1i < l1i || end_l1i - l1i > 1 {
// TODO return error
panic!("Kernel image crosses a 1GiB boundary");
}
let l2i_start = (data.kernel_start >> L2_SHIFT) as usize & 0x1FF;
let l2i_end = ((data.kernel_end + L2_PAGE_SIZE as u64 - 1) >> L2_SHIFT) as usize & 0x1FF;
assert!(l2i_end >= l2i_start);
if l2i_end - l2i_start > KERNEL_L3_COUNT {
panic!();
}
Ok(Self {
elf,
data,
tables: FixedTables::zeroed(),
l1i_lower,
l1i,
l2i_start,
l2i_end,
})
}
pub fn build(mut self) -> Result<(FixedTables, u64, HashMap<String, usize>), GenError> {
assert_eq!(offset_of!(FixedTables, l1), 0);
let l2_physical_address =
self.data.table_physical_address + offset_of!(FixedTables, kernel_l2) as u64;
// L1 -> L2
self.tables.l1.data[self.l1i_lower] =
shift_pfn(l2_physical_address) | PageAttributes::V.bits();
self.tables.l1.data[self.l1i] = shift_pfn(l2_physical_address) | PageAttributes::V.bits();
// L2 -> L3s
for l2i in self.l2i_start..self.l2i_end {
let l3_table_index = l2i - self.l2i_start;
let l3_physical_address = self.data.table_physical_address
+ offset_of!(FixedTables, kernel_l3s) as u64
+ (l3_table_index * 0x1000) as u64;
self.tables.kernel_l2.data[l2i] =
shift_pfn(l3_physical_address) | PageAttributes::V.bits();
}
let symbol_table = extract_symbols(&mut self.elf)?;
for (i, segment) in self.elf.segments().into_iter().enumerate() {
if segment.p_type != PT_LOAD
|| segment.p_vaddr != segment.p_paddr + self.data.kernel_virt_offset
{
continue;
}
let aligned_virt_start = segment.p_vaddr & !(L3_PAGE_SIZE as u64 - 1);
let aligned_virt_end = (segment.p_vaddr + segment.p_memsz + L3_PAGE_SIZE as u64 - 1)
& !(L3_PAGE_SIZE as u64 - 1);
let aligned_phys_start = segment.p_paddr & !(L3_PAGE_SIZE as u64 - 1);
let count = (aligned_virt_end - aligned_virt_start) / 0x1000;
let attrs = segment_attributes(segment.p_flags);
println!(
"{}: {:#x?} -> {:#x} {}",
i,
aligned_virt_start..aligned_virt_end,
aligned_phys_start,
attrs
);
Self::map_segment(
self.l2i_start,
&mut self.tables,
aligned_virt_start,
aligned_phys_start,
count as usize,
attrs,
)?;
}
Ok((self.tables, self.data.table_offset, symbol_table))
}
fn map_segment(
start_l2i: usize,
tables: &mut FixedTables,
vaddr_start: u64,
paddr_start: u64,
count: usize,
flags: PageAttributes,
) -> Result<(), GenError> {
for index in 0..count {
let vaddr = vaddr_start + (index * L3_PAGE_SIZE) as u64;
let paddr = paddr_start + (index * L3_PAGE_SIZE) as u64;
let entry = shift_pfn(paddr) | (PageAttributes::V | flags).bits();
let l2i = (vaddr >> L2_SHIFT) as usize & 0x1FF - start_l2i;
let l3i = (vaddr >> L3_SHIFT) as usize & 0x1FF;
let l3 = &mut tables.kernel_l3s[l2i];
if l3.data[l3i] != 0 {
if l3.data[l3i] != entry {
todo!();
} else {
continue;
}
}
l3.data[l3i] = entry;
}
Ok(())
}
}

View File

@ -16,8 +16,8 @@ pub enum Machine {
pub struct QemuRiscv64;
#[derive(Debug)]
pub struct Image {
pub kernel: PathBuf,
pub enum Image {
OpenSBI { kernel: PathBuf, bios: PathBuf },
}
impl IntoArgs for Machine {
@ -33,16 +33,21 @@ impl IntoArgs for Cpu {
fn add_args(&self, command: &mut Command) {
command.arg("-cpu");
match self {
Self::Rv64 => command.arg("rv64,zicsr=true,zifencei=true"),
Self::Rv64 => command.arg("rv64,a=true,zicsr=true,zifencei=true"),
};
}
}
impl IntoArgs for Image {
fn add_args(&self, command: &mut Command) {
command.arg("-kernel");
command.arg(&self.kernel);
command.args(["-bios", "none"]);
match self {
Self::OpenSBI { kernel, bios } => {
command.arg("-kernel");
command.arg(kernel);
command.arg("-bios");
command.arg(bios);
}
}
}
}

View File

@ -256,6 +256,7 @@ fn run_i686(
fn run_riscv64(
config: &QemuConfig,
env: &BuildEnv,
qemu_bin: Option<PathBuf>,
devices: Vec<QemuDevice>,
kernel: PathBuf,
@ -267,10 +268,11 @@ fn run_riscv64(
if let Some(qemu_bin) = qemu_bin {
qemu.override_qemu(qemu_bin);
}
let bios = env.workspace_root.join("boot/riscv/fw_jump.bin");
qemu.with_serial(QemuSerialTarget::MonStdio)
.with_machine(riscv64::Machine::Virt)
.with_cpu(riscv64::Cpu::Rv64)
.with_boot_image(riscv64::Image { kernel });
.with_boot_image(riscv64::Image::OpenSBI { kernel, bios });
Ok(qemu.into_command())
}
@ -348,7 +350,7 @@ pub fn run(
let mut command = match built {
AllBuilt::Riscv64(KernelProcessed(KernelBuilt(kernel))) => {
run_riscv64(&config, qemu, devices, kernel)?
run_riscv64(&config, &env, qemu, devices, kernel)?
}
AllBuilt::AArch64(KernelProcessed(KernelBuilt(kernel)), InitrdGenerated(initrd)) => {
make_kernel_bin(kernel, &kernel_bin)?;