rv64: Implement RISC-V support, implement VisionFive2 support #34

Merged
alnyan merged 17 commits from feature/rv64 into master 2025-01-21 19:54:55 +02:00
95 changed files with 4862 additions and 148 deletions

19
Cargo.lock generated
View File

@ -967,6 +967,7 @@ dependencies = [
"kernel-arch-hosted",
"kernel-arch-i686",
"kernel-arch-interface",
"kernel-arch-riscv64",
"kernel-arch-x86_64",
]
@ -1018,6 +1019,22 @@ dependencies = [
"yggdrasil-abi",
]
[[package]]
name = "kernel-arch-riscv64"
version = "0.1.0"
dependencies = [
"bitflags 2.6.0",
"cfg-if",
"device-api",
"kernel-arch-interface",
"libk-mm-interface",
"log",
"memtables",
"static_assertions",
"tock-registers 0.9.0",
"yggdrasil-abi",
]
[[package]]
name = "kernel-arch-x86"
version = "0.1.0"
@ -1292,6 +1309,7 @@ dependencies = [
name = "memtables"
version = "0.1.0"
dependencies = [
"bitflags 2.6.0",
"bytemuck",
]
@ -2658,6 +2676,7 @@ dependencies = [
"kernel-arch-aarch64",
"kernel-arch-i686",
"kernel-arch-interface",
"kernel-arch-riscv64",
"kernel-arch-x86",
"kernel-arch-x86_64",
"libk",

View File

@ -16,8 +16,9 @@ members = [
"lib/abi",
"lib/libyalloc",
"lib/runtime",
"lib/qemu"
, "lib/abi-serde"]
"lib/qemu",
"lib/abi-serde"
]
[workspace.dependencies]
chrono = { version = "0.4.38", default-features = false, features = ["alloc"] }
@ -53,6 +54,7 @@ abi-generator.path = "tool/abi-generator"
# Kernel parts
kernel-arch-interface.path = "kernel/arch/interface"
kernel-arch-aarch64.path = "kernel/arch/aarch64"
kernel-arch-riscv64.path = "kernel/arch/riscv64"
kernel-arch-x86_64.path = "kernel/arch/x86_64"
kernel-arch-i686.path = "kernel/arch/i686"
kernel-arch-x86.path = "kernel/arch/x86"
@ -91,7 +93,7 @@ derivable_impls = { level = "allow" }
[profile.dev]
opt-level = 1
split-debuginfo = "packed"
# split-debuginfo = "packed"
lto = "thin"
panic = "abort"
@ -100,6 +102,3 @@ split-debuginfo = "none"
[profile.dev.package."*"]
opt-level = 3
# [profile.dev]
# opt-level = "s"

BIN
boot/riscv/fw_jump.bin Normal file

Binary file not shown.

30
doc/visionfive2.txt Normal file
View File

@ -0,0 +1,30 @@
Booting Yggdrasil OS on Starfive VisionFive 2 RISC-V board:
* TODO: proper format for initrd image
* TODO: 0x70000000 can be replaced with a builtin var?
Prerequisites:
* OpenSBI + u-boot (you can use the regular debian installation from Starfive)
* yggdrasil-kernel.bin
* initrd.img
Steps:
1. Copy yggdrasil-kernel.bin and initrd.img into some directory and start a TFTP server there
2. Connect to VF2's serial port, ethernet and enter u-boot
3. Run the following commands:
# Get an IP address
dhcp
# [Optional] set some kernel cmdline params
setenv bootargs "debug.serial-level=info"
# Load initrd
tftpboot 0x70000000 <your-ip-address>:initrd.img
# Load kernel
tftpboot ${loadaddr} <your-ip-address>:yggdrasil-kernel.bin
# Load dtb
load mmc 1:3 ${fdt_addr_r} dtbs/...-starfive/starfive/${fdtfile}
fdt resize
# Enter the kernel
booti ${loadaddr} 0x70000000:<initrd-size> ${fdt_addr_r}

View File

@ -0,0 +1,58 @@
ENTRY(__rv64_entry);
KERNEL_PHYS_BASE = 0x40200000;
KERNEL_VIRT_OFFSET = 0xFFFFFFF000000000;
SECTIONS {
. = KERNEL_PHYS_BASE;
PROVIDE(__kernel_start = . + KERNEL_VIRT_OFFSET);
.text.entry : {
*(.text.entry)
}
. = ALIGN(16);
. = . + KERNEL_VIRT_OFFSET;
.text : AT(. - KERNEL_VIRT_OFFSET) {
KEEP(*(.text.vectors));
*(.text*)
}
. = ALIGN(4K);
.rodata : AT(. - KERNEL_VIRT_OFFSET) {
*(.rodata*)
*(.eh_frame*)
}
. = ALIGN(4K);
.data.tables : AT (. - KERNEL_VIRT_OFFSET) {
KEEP(*(.data.tables))
}
. = ALIGN(4K);
.data : AT(. - KERNEL_VIRT_OFFSET) {
*(.data*)
. = ALIGN(8);
/* PROVIDE(__global_pointer = . + 0x800 - KERNEL_VIRT_OFFSET); */
. = ALIGN(16);
PROVIDE(__init_array_start = .);
KEEP(*(.init_array*))
PROVIDE(__init_array_end = .);
*(.got*)
}
. = ALIGN(4K);
PROVIDE(__bss_start_phys = . - KERNEL_VIRT_OFFSET);
.bss : AT(. - KERNEL_VIRT_OFFSET) {
*(COMMON)
*(.bss*)
}
. = ALIGN(4K);
PROVIDE(__bss_end_phys = . - KERNEL_VIRT_OFFSET);
PROVIDE(__bss_size = __bss_end_phys - __bss_start_phys);
PROVIDE(__kernel_end = .);
};

View File

@ -0,0 +1,58 @@
ENTRY(__rv64_entry);
KERNEL_PHYS_BASE = 0x80200000;
KERNEL_VIRT_OFFSET = 0xFFFFFFF000000000;
SECTIONS {
. = KERNEL_PHYS_BASE;
PROVIDE(__kernel_start = . + KERNEL_VIRT_OFFSET);
.text.entry : {
*(.text.entry)
}
. = ALIGN(16);
. = . + KERNEL_VIRT_OFFSET;
.text : AT(. - KERNEL_VIRT_OFFSET) {
KEEP(*(.text.vectors));
*(.text*)
}
. = ALIGN(4K);
.rodata : AT(. - KERNEL_VIRT_OFFSET) {
*(.rodata*)
*(.eh_frame*)
}
. = ALIGN(4K);
.data.tables : AT (. - KERNEL_VIRT_OFFSET) {
KEEP(*(.data.tables))
}
. = ALIGN(4K);
.data : AT(. - KERNEL_VIRT_OFFSET) {
*(.data*)
. = ALIGN(8);
/* PROVIDE(__global_pointer = . + 0x800 - KERNEL_VIRT_OFFSET); */
. = ALIGN(16);
PROVIDE(__init_array_start = .);
KEEP(*(.init_array*))
PROVIDE(__init_array_end = .);
*(.got*)
}
. = ALIGN(4K);
PROVIDE(__bss_start_phys = . - KERNEL_VIRT_OFFSET);
.bss : AT(. - KERNEL_VIRT_OFFSET) {
*(COMMON)
*(.bss*)
}
. = ALIGN(4K);
PROVIDE(__bss_end_phys = . - KERNEL_VIRT_OFFSET);
PROVIDE(__bss_size = __bss_end_phys - __bss_start_phys);
PROVIDE(__kernel_end = .);
};

View File

@ -0,0 +1,26 @@
{
"arch": "riscv64",
"os": "none",
"abi": "softfloat",
"cpu": "generic-rv64",
"llvm-target": "riscv64",
"data-layout": "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128",
"max-atomic-width": 64,
"target-pointer-width": "64",
"features": "+m,+a,+c",
"disable-redzone": true,
"executables": true,
"panic-strategy": "abort",
"dynamic-linking": true,
"relocation-model": "pic",
"code-model": "large",
"eh-frame-header": false,
"crt-objects-fallback": "false",
"emit-debug-gdb-scripts": false,
"llvm-abiname": "lp64",
"linker": "rust-lld",
"linker-flavor": "ld.lld"
}

View File

@ -53,6 +53,10 @@ aarch64-cpu.workspace = true
device-tree.workspace = true
kernel-arch-aarch64.workspace = true
[target.'cfg(target_arch = "riscv64")'.dependencies]
device-tree.workspace = true
kernel-arch-riscv64.workspace = true
[target.'cfg(target_arch = "x86_64")'.dependencies]
yboot-proto.workspace = true
kernel-arch-x86_64.workspace = true
@ -81,6 +85,7 @@ kernel-arch-x86_64.workspace = true
kernel-arch-i686.workspace = true
kernel-arch-x86.workspace = true
kernel-arch-aarch64.workspace = true
kernel-arch-riscv64.workspace = true
[features]
default = ["fb_console"]
@ -90,5 +95,8 @@ fb_console = []
aarch64_board_virt = ["kernel-arch-aarch64/aarch64_board_virt"]
aarch64_board_raspi4b = ["kernel-arch-aarch64/aarch64_board_raspi4b"]
riscv64_board_virt = ["kernel-arch-riscv64/riscv64_board_virt"]
riscv64_board_jh7110 = ["kernel-arch-riscv64/riscv64_board_jh7110"]
[lints]
workspace = true

View File

@ -3,21 +3,22 @@ name = "kernel-arch"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[target.'cfg(all(target_os = "none", target_arch = "x86_64"))'.dependencies]
kernel-arch-x86_64 = { path = "x86_64" }
kernel-arch-x86_64.path = "x86_64"
[target.'cfg(all(target_os = "none", target_arch = "aarch64"))'.dependencies]
kernel-arch-aarch64 = { path = "aarch64" }
kernel-arch-aarch64.path = "aarch64"
[target.'cfg(all(target_os = "none", target_arch = "x86"))'.dependencies]
kernel-arch-i686 = { path = "i686" }
kernel-arch-i686.path = "i686"
[target.'cfg(all(target_os = "none", target_arch = "riscv64"))'.dependencies]
kernel-arch-riscv64.path = "riscv64"
[target.'cfg(not(target_os = "none"))'.dependencies]
kernel-arch-hosted = { path = "hosted" }
kernel-arch-hosted.path = "hosted"
[dependencies]
kernel-arch-interface = { path = "interface" }
kernel-arch-interface.path = "interface"
cfg-if.workspace = true

View File

@ -208,10 +208,12 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
stack.push(mdscr_el1);
stack.push(context.stack_pointer);
let ttbr0 = context.address_space | (context.asid << 48) | 1;
setup_common_context(
&mut stack,
__aarch64_task_enter_user as _,
context.address_space,
ttbr0,
context.thread_pointer as _,
);

View File

@ -10,7 +10,7 @@ use aarch64_cpu::registers::{DAIF, MPIDR_EL1, TPIDR_EL1};
use alloc::{boxed::Box, sync::Arc, vec::Vec};
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use kernel_arch_interface::{
cpu::{CpuImpl, IpiQueue},
cpu::{CpuData, CpuImpl, IpiQueue},
guard::IrqGuard,
task::Scheduler,
util::OneTimeInit,
@ -32,6 +32,8 @@ pub struct PerCpuData {
pub gic: OneTimeInit<Arc<dyn GicInterface>>,
}
impl CpuData for PerCpuData {}
static IPI_QUEUES: OneTimeInit<Vec<IpiQueue<ArchitectureImpl>>> = OneTimeInit::new();
pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(1);

View File

@ -83,8 +83,9 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
self.pop_l3_entry(address)
}
fn as_address_with_asid(&self) -> u64 {
unsafe { u64::from(self.l1.as_physical_address()) | ((self.asid as u64) << 48) | 1 }
fn as_address_with_asid(&self) -> (u64, u64) {
let physical = unsafe { u64::from(self.l1.as_physical_address()) };
(physical, self.asid as u64)
}
unsafe fn clear(&mut self) {

View File

@ -8,7 +8,7 @@ use core::ptr::null_mut;
use alloc::vec::Vec;
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use kernel_arch_interface::{
cpu::{CpuImpl, IpiQueue},
cpu::{CpuData, CpuImpl, IpiQueue},
task::Scheduler,
Architecture,
};
@ -29,6 +29,8 @@ pub struct PerCpuData {
pub enabled_features: CpuFeatures,
}
impl CpuData for PerCpuData {}
static mut CPU: *mut () = null_mut();
#[naked]

View File

@ -68,8 +68,8 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
self.pop_l3_entry(address)
}
fn as_address_with_asid(&self) -> u64 {
unsafe { self.l0.as_physical_address().into_u64() }
fn as_address_with_asid(&self) -> (u64, u64) {
(unsafe { self.l0.as_physical_address().into_u64() }, 0)
}
}

View File

@ -30,6 +30,18 @@ pub struct IpiQueue<A: Architecture> {
data: IrqSafeSpinlock<A, Option<IpiMessage>>,
}
pub trait CpuData {
fn is_bootstrap(&self, id: u32) -> bool {
// On most architectures
id == 0
}
fn queue_index(&self, id: u32) -> usize {
// On most architectures
id as usize
}
}
pub trait CpuFeatureSet {
fn iter(&self) -> impl Iterator<Item = &'static str>;
}
@ -50,6 +62,14 @@ impl<A: Architecture, S: Scheduler + 'static> CpuImpl<A, S> {
unsafe { A::init_ipi_queues(queues) }
}
pub fn is_bootstrap(&self) -> bool {
self.inner.is_bootstrap(self.id)
}
pub fn queue_index(&self) -> usize {
self.inner.queue_index(self.id)
}
pub fn set_current_thread_id(&mut self, id: Option<S::ThreadId>) {
self.current_thread_id = id;
}

View File

@ -3,7 +3,7 @@
#![allow(clippy::new_without_default)]
use alloc::vec::Vec;
use cpu::{CpuFeatureSet, CpuImpl, IpiQueue};
use cpu::{CpuData, CpuFeatureSet, CpuImpl, IpiQueue};
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use task::Scheduler;
@ -19,14 +19,15 @@ pub mod sync;
pub mod task;
pub mod util;
#[cfg(any(target_pointer_width = "32", rust_analyzer))]
#[cfg(any(target_arch = "x86", rust_analyzer))]
pub const KERNEL_VIRT_OFFSET: usize = 0xC0000000;
#[cfg(any(target_pointer_width = "64", rust_analyzer))]
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64", rust_analyzer))]
pub const KERNEL_VIRT_OFFSET: usize = 0xFFFFFF8000000000;
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
pub const KERNEL_VIRT_OFFSET: usize = 0xFFFFFFF000000000;
pub trait Architecture: Sized + 'static {
type PerCpuData;
type PerCpuData: CpuData;
type CpuFeatures: CpuFeatureSet;
type BreakpointType;

View File

@ -83,6 +83,7 @@ pub struct UserContextInfo {
pub stack_pointer: usize,
pub thread_pointer: usize,
pub address_space: u64,
pub asid: u64,
pub single_step: bool,
}

View File

@ -0,0 +1,26 @@
[package]
name = "kernel-arch-riscv64"
version = "0.1.0"
edition = "2024"
[dependencies]
yggdrasil-abi.workspace = true
kernel-arch-interface.workspace = true
libk-mm-interface.workspace = true
memtables.workspace = true
device-api = { workspace = true, features = ["derive"] }
tock-registers.workspace = true
bitflags.workspace = true
static_assertions.workspace = true
log.workspace = true
cfg-if.workspace = true
[features]
default = []
riscv64_board_virt = []
riscv64_board_jh7110 = []
[lints]
workspace = true

View File

@ -0,0 +1,128 @@
// vi:ft=asm:
.section .text
.macro SAVE_TASK_STATE
addi sp, sp, -{context_size}
sd ra, 0 * 8(sp)
sd gp, 1 * 8(sp)
sd s11, 2 * 8(sp)
sd s10, 3 * 8(sp)
sd s9, 4 * 8(sp)
sd s8, 5 * 8(sp)
sd s7, 6 * 8(sp)
sd s6, 7 * 8(sp)
sd s5, 8 * 8(sp)
sd s4, 9 * 8(sp)
sd s3, 10 * 8(sp)
sd s2, 11 * 8(sp)
sd s1, 12 * 8(sp)
sd s0, 13 * 8(sp)
.endm
.macro LOAD_TASK_STATE
ld ra, 0 * 8(sp)
ld gp, 1 * 8(sp)
ld s11, 2 * 8(sp)
ld s10, 3 * 8(sp)
ld s9, 4 * 8(sp)
ld s8, 5 * 8(sp)
ld s7, 6 * 8(sp)
ld s6, 7 * 8(sp)
ld s5, 8 * 8(sp)
ld s4, 9 * 8(sp)
ld s3, 10 * 8(sp)
ld s2, 11 * 8(sp)
ld s1, 12 * 8(sp)
ld s0, 13 * 8(sp)
addi sp, sp, {context_size}
.endm
.option push
.option norvc
.global __rv64_task_enter_kernel
.global __rv64_task_enter_user
.global __rv64_switch_task
.global __rv64_switch_task_and_drop
.global __rv64_enter_task
// Context switching
.type __rv64_enter_task, @function
__rv64_enter_task:
// a0 - task ctx
ld sp, (a0)
LOAD_TASK_STATE
ret
.size __rv64_enter_task, . - __rv64_enter_task
.type __rv64_switch_task, @function
__rv64_switch_task:
// a0 - destination task ctx
// a1 - source task ctx
SAVE_TASK_STATE
sd sp, (a1)
ld sp, (a0)
LOAD_TASK_STATE
ret
.size __rv64_switch_task, . - __rv64_switch_task
.type __rv64_switch_task_and_drop, @function
__rv64_switch_task_and_drop:
// a0 - destination task ctx
// a1 - thread struct to drop
ld sp, (a0)
mv a0, a1
call __arch_drop_thread
LOAD_TASK_STATE
ret
.size __rv64_switch_task_and_drop, . - __rv64_switch_task_and_drop
// Entry functions
.type __rv64_task_enter_kernel, @function
__rv64_task_enter_kernel:
ld a0, (sp) // argument
ld ra, 8(sp) // entry
addi sp, sp, 16
// Set SPIE to enable interrupts
// Set SPP = 1 to indicate a return to S-mode
csrr t0, sstatus
ori t0, t0, (1 << 5)
ori t0, t0, (1 << 8)
csrw sstatus, t0
csrw sepc, ra
csrw sscratch, zero
sret
.size __rv64_task_enter_kernel, . - __rv64_task_enter_kernel
.type __rv64_task_enter_user, @function
__rv64_task_enter_user:
csrw sscratch, tp
ld a0, 0 * 8(sp) // argument
ld ra, 1 * 8(sp) // entry
ld tp, 2 * 8(sp) // thread pointer
ld sp, 3 * 8(sp) // user stack
// Set SPIE to enable interrupts
// Set SPP = 0 to indicate a return to U-mode
li t1, (1 << 8)
not t1, t1
csrr t0, sstatus
ori t0, t0, (1 << 5)
and t0, t0, t1
csrw sstatus, t0
csrw sepc, ra
sret
.size __rv64_task_enter_user, . - __rv64_task_enter_user
.option pop

View File

@ -0,0 +1,222 @@
use core::{arch::global_asm, cell::UnsafeCell, marker::PhantomData};
use kernel_arch_interface::{
mem::{KernelTableManager, PhysicalMemoryAllocator},
task::{StackBuilder, TaskContext, UserContextInfo},
Architecture,
};
use libk_mm_interface::address::PhysicalAddress;
use tock_registers::{
interfaces::{Readable, Writeable},
registers::InMemoryRegister,
};
use yggdrasil_abi::error::Error;
use crate::{
mem::{self, KERNEL_VIRT_OFFSET},
registers::SATP,
ArchitectureImpl, PerCpuData,
};
pub const CONTEXT_SIZE: usize = 14 * size_of::<usize>();
#[repr(C, align(0x10))]
struct TaskContextInner {
// 0x00
sp: usize,
satp: InMemoryRegister<u64, SATP::Register>,
}
pub struct TaskContextImpl<
K: KernelTableManager,
PA: PhysicalMemoryAllocator<Address = PhysicalAddress>,
> {
inner: UnsafeCell<TaskContextInner>,
// fp_context: UnsafeCell<FpContext>,
stack_base_phys: PhysicalAddress,
stack_top: usize,
stack_size: usize,
_pd: PhantomData<(K, PA)>,
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>>
TaskContextImpl<K, PA>
{
unsafe fn load_state(&self) {
// TODO load new SATP value
let inner = unsafe { &*self.inner.get() };
let cpu = unsafe { &mut *ArchitectureImpl::local_cpu().cast::<PerCpuData>() };
// Copy new SATP
let satp = inner.satp.get();
let asid = inner.satp.read(SATP::ASID);
if satp != SATP.get() {
mem::tlb_flush_asid(asid as usize);
SATP.set(satp);
}
cpu.smode_sp = self.stack_top;
}
unsafe fn store_state(&self) {}
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>>
TaskContext<K, PA> for TaskContextImpl<K, PA>
{
const USER_STACK_EXTRA_ALIGN: usize = 8;
const SIGNAL_STACK_EXTRA_ALIGN: usize = 0;
fn user(context: UserContextInfo) -> Result<Self, Error> {
const USER_TASK_PAGES: usize = 16;
let stack_base_phys = PA::allocate_contiguous_pages(USER_TASK_PAGES)?;
let stack_base = stack_base_phys.raw_virtualize::<K>();
let mut stack = StackBuilder::new(stack_base, USER_TASK_PAGES * 0x1000);
log::debug!(
"Set up user task: pc={:#x}, sp={:#x}, tp={:#x}",
context.entry,
context.stack_pointer,
context.thread_pointer
);
stack.push(context.stack_pointer);
stack.push(context.thread_pointer);
stack.push(context.entry);
stack.push(context.argument);
setup_common_context(&mut stack, __rv64_task_enter_user as _);
let sp = stack.build();
let satp = InMemoryRegister::new(0);
satp.write(
SATP::MODE::Sv39
+ SATP::ASID.val(context.asid)
+ SATP::PPN.val(context.address_space >> 12),
);
Ok(Self {
inner: UnsafeCell::new(TaskContextInner { sp, satp }),
// fp_context: UnsafeCell::new(FpContext::new()),
stack_base_phys,
stack_top: stack_base + USER_TASK_PAGES * 0x1000,
stack_size: USER_TASK_PAGES * 0x1000,
_pd: PhantomData,
})
}
fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> {
const KERNEL_TASK_PAGES: usize = 8;
let stack_base_phys = PA::allocate_contiguous_pages(KERNEL_TASK_PAGES)?;
let stack_base = stack_base_phys.raw_virtualize::<K>();
let mut stack = StackBuilder::new(stack_base, KERNEL_TASK_PAGES * 0x1000);
// Entry and argument
stack.push(entry as _);
stack.push(arg);
setup_common_context(&mut stack, __rv64_task_enter_kernel as _);
let sp = stack.build();
// TODO stack is leaked
let satp = InMemoryRegister::new(0);
let kernel_table_phys =
((&raw const mem::KERNEL_TABLES).addr() - KERNEL_VIRT_OFFSET) as u64;
satp.write(SATP::MODE::Sv39 + SATP::ASID.val(0) + SATP::PPN.val(kernel_table_phys >> 12));
Ok(Self {
inner: UnsafeCell::new(TaskContextInner { sp, satp }),
// fp_context: UnsafeCell::new(FpContext::new()),
stack_base_phys,
stack_top: 0,
stack_size: KERNEL_TASK_PAGES * 0x1000,
_pd: PhantomData,
})
}
fn set_thread_pointer(&self, tp: usize) {
let _ = tp;
todo!()
}
fn align_stack_for_entry(sp: usize) -> usize {
sp
}
unsafe fn enter(&self) -> ! {
unsafe {
self.load_state();
__rv64_enter_task(self.inner.get())
}
}
unsafe fn switch(&self, from: &Self) {
if core::ptr::addr_eq(self, from) {
return;
}
unsafe {
from.store_state();
self.load_state();
__rv64_switch_task(self.inner.get(), from.inner.get())
}
}
unsafe fn switch_and_drop(&self, thread: *const ()) {
unsafe {
self.load_state();
__rv64_switch_task_and_drop(self.inner.get(), thread)
}
}
}
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>> Drop
for TaskContextImpl<K, PA>
{
fn drop(&mut self) {
assert_eq!(self.stack_size % 0x1000, 0);
for offset in (0..self.stack_size).step_by(0x1000) {
unsafe {
PA::free_page(self.stack_base_phys.add(offset));
}
}
}
}
fn setup_common_context(builder: &mut StackBuilder, entry: usize) {
builder.push(0); // x8/s0/fp
builder.push(0); // x9/s1
builder.push(0); // x18/s2
builder.push(0); // x19/s3
builder.push(0); // x20/s4
builder.push(0); // x21/s5
builder.push(0); // x22/s6
builder.push(0); // x23/s7
builder.push(0); // x24/s8
builder.push(0); // x25/s9
builder.push(0); // x26/s10
builder.push(0); // x27/s11
builder.push(0); // x4/gp
builder.push(entry); // x1/ra return address
}
unsafe extern "C" {
fn __rv64_enter_task(to: *mut TaskContextInner) -> !;
fn __rv64_switch_task(to: *mut TaskContextInner, from: *mut TaskContextInner);
fn __rv64_switch_task_and_drop(to: *mut TaskContextInner, thread: *const ()) -> !;
fn __rv64_task_enter_kernel();
fn __rv64_task_enter_user();
// fn __rv64_fp_store_context(to: *mut c_void);
// fn __rv64_fp_restore_context(from: *const c_void);
}
global_asm!(
include_str!("context.S"),
context_size = const CONTEXT_SIZE,
);

View File

@ -0,0 +1,6 @@
#[inline]
pub fn rdtime() -> u64 {
let mut output: u64;
unsafe { core::arch::asm!("rdtime {0}", out(reg) output) };
output
}

View File

@ -0,0 +1,170 @@
#![feature(decl_macro, naked_functions)]
#![no_std]
extern crate alloc;
use core::sync::atomic::{AtomicUsize, Ordering};
use alloc::{boxed::Box, collections::btree_map::BTreeMap, vec::Vec};
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use kernel_arch_interface::{
cpu::{CpuData, CpuImpl, IpiQueue},
sync::IrqSafeSpinlock,
task::Scheduler,
util::OneTimeInit,
Architecture,
};
use tock_registers::interfaces::{ReadWriteable, Readable};
use registers::SSTATUS;
pub mod mem;
pub use mem::{process::ProcessAddressSpaceImpl, KernelTableManagerImpl};
pub mod context;
pub use context::TaskContextImpl;
pub mod intrinsics;
pub mod registers;
pub mod sbi;
pub struct ArchitectureImpl;
#[repr(C)]
pub struct PerCpuData {
// Used in assembly
pub tmp_t0: usize, // 0x00
pub umode_sp: usize, // 0x08
pub smode_sp: usize, // 0x10
// Used elsewhere
pub bootstrap: bool,
pub queue_index: usize,
}
pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(1);
static IPI_QUEUES: OneTimeInit<Vec<IpiQueue<ArchitectureImpl>>> = OneTimeInit::new();
static HART_TO_QUEUE: IrqSafeSpinlock<ArchitectureImpl, BTreeMap<u32, usize>> =
IrqSafeSpinlock::new(BTreeMap::new());
impl CpuData for PerCpuData {
fn is_bootstrap(&self, id: u32) -> bool {
let _ = id;
self.bootstrap
}
fn queue_index(&self, id: u32) -> usize {
let _ = id;
self.queue_index
}
}
#[naked]
extern "C" fn idle_task(_: usize) -> ! {
unsafe {
core::arch::naked_asm!("1: nop; j 1b");
}
}
impl ArchitectureImpl {
pub fn for_each_hart<F: FnMut(u32, usize, &IpiQueue<ArchitectureImpl>)>(mut f: F) {
let map = HART_TO_QUEUE.lock();
map.iter().for_each(|(&hart_id, &queue_index)| {
let queue = &IPI_QUEUES.get()[queue_index];
f(hart_id, queue_index, queue);
});
}
}
impl Architecture for ArchitectureImpl {
type PerCpuData = PerCpuData;
type CpuFeatures = ();
type BreakpointType = u32;
const BREAKPOINT_VALUE: Self::BreakpointType = 0;
fn halt() -> ! {
loop {
unsafe { Self::set_interrupt_mask(true) };
Self::wait_for_interrupt();
}
}
unsafe fn set_local_cpu(cpu: *mut ()) {
unsafe { core::arch::asm!("mv tp, {0}", in(reg) cpu) };
}
#[inline]
fn local_cpu() -> *mut () {
let value: u64;
unsafe { core::arch::asm!("mv {0}, tp", out(reg) value) };
value as _
}
unsafe fn init_local_cpu<S: Scheduler + 'static>(id: Option<u32>, data: Self::PerCpuData) {
let id = id.expect("riscv64 requires an explicit HART ID in its per-processor struct");
let queue_index = data.queue_index;
HART_TO_QUEUE.lock().insert(id, queue_index);
let cpu = Box::leak(Box::new(CpuImpl::<Self, S>::new(id, data)));
unsafe { cpu.set_local() };
}
unsafe fn init_ipi_queues(queues: Vec<IpiQueue<Self>>) {
IPI_QUEUES.init(queues);
}
fn ipi_queue(cpu_id: u32) -> Option<&'static IpiQueue<Self>> {
let queue_index = *HART_TO_QUEUE.lock().get(&cpu_id)?;
IPI_QUEUES.try_get().and_then(|q| q.get(queue_index))
}
#[inline]
unsafe fn set_interrupt_mask(mask: bool) -> bool {
let old = Self::interrupt_mask();
if mask {
SSTATUS.modify(SSTATUS::SIE::CLEAR);
} else {
SSTATUS.modify(SSTATUS::SIE::SET);
}
old
}
#[inline]
fn interrupt_mask() -> bool {
SSTATUS.matches_all(SSTATUS::SIE::CLEAR)
}
fn wait_for_interrupt() {
unsafe {
core::arch::asm!("wfi");
}
}
fn cpu_count() -> usize {
CPU_COUNT.load(Ordering::Acquire)
}
fn cpu_index<S: Scheduler + 'static>() -> u32 {
CpuImpl::<Self, S>::local().id()
}
fn cpu_enabled_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
let _ = cpu;
todo!()
}
fn cpu_available_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
let _ = cpu;
todo!()
}
fn local_interrupt_controller() -> Option<&'static dyn LocalInterruptController> {
todo!()
}
fn message_interrupt_controller() -> Option<&'static dyn MessageInterruptController> {
todo!()
}
fn idle_task() -> extern "C" fn(usize) -> ! {
idle_task
}
}

View File

@ -0,0 +1,357 @@
use cfg_if::cfg_if;
use kernel_arch_interface::{
mem::{DeviceMemoryAttributes, KernelTableManager, RawDeviceMemoryMapping},
split_spinlock,
};
use libk_mm_interface::{
address::PhysicalAddress,
table::{page_index, EntryLevel, EntryLevelExt},
};
use memtables::riscv64::PageAttributes;
use static_assertions::{const_assert, const_assert_eq};
use table::{PageEntry, PageTable, L1, L2, L3};
use tock_registers::interfaces::Writeable;
use yggdrasil_abi::error::Error;
pub use memtables::riscv64::FixedTables;
use crate::registers::SATP;
pub mod process;
pub mod table;
split_spinlock! {
use crate::ArchitectureImpl;
use crate::mem::FixedTables;
use libk_mm_interface::KernelImageObject;
#[link_section = ".data.tables"]
#[used]
static KERNEL_TABLES: KernelImageObject<FixedTables> =
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
}
cfg_if! {
if #[cfg(feature = "riscv64_board_virt")] {
pub const KERNEL_PHYS_BASE: usize = 0x80200000;
} else if #[cfg(feature = "riscv64_board_jh7110")] {
pub const KERNEL_PHYS_BASE: usize = 0x40200000;
} else if #[cfg(rust_analyzer)] {
pub const KERNEL_PHYS_BASE: usize = 0x80200000;
}
}
pub const KERNEL_VIRT_OFFSET: usize = kernel_arch_interface::KERNEL_VIRT_OFFSET;
pub const SIGN_EXTEND_MASK: usize = 0xFFFFFF80_00000000;
pub const KERNEL_START_L1I: usize = page_index::<L1>(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
pub const KERNEL_L2I: usize = page_index::<L2>(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
const_assert_eq!(KERNEL_L2I, 1);
// Runtime mappings
// 1GiB of device memory space
const DEVICE_MAPPING_L1I: usize = KERNEL_START_L1I + 1;
const DEVICE_MAPPING_L3_COUNT: usize = 4;
// 32GiB of RAM space
const RAM_MAPPING_START_L1I: usize = KERNEL_START_L1I + 2;
const RAM_MAPPING_L1_COUNT: usize = 32;
const_assert!(RAM_MAPPING_START_L1I + RAM_MAPPING_L1_COUNT <= 512);
const_assert!(DEVICE_MAPPING_L1I < 512);
const DEVICE_MAPPING_OFFSET: usize = (DEVICE_MAPPING_L1I << L1::SHIFT) | SIGN_EXTEND_MASK;
const RAM_MAPPING_OFFSET: usize = (RAM_MAPPING_START_L1I << L1::SHIFT) | SIGN_EXTEND_MASK;
// Runtime tables
static mut DEVICE_MAPPING_L2: PageTable<L2> = PageTable::zeroed();
static mut DEVICE_MAPPING_L3S: [PageTable<L3>; DEVICE_MAPPING_L3_COUNT] =
[const { PageTable::zeroed() }; DEVICE_MAPPING_L3_COUNT];
/// Any VAs above this one are sign-extended
pub const USER_BOUNDARY: usize = 0x40_00000000;
#[derive(Debug)]
pub struct KernelTableManagerImpl;
impl KernelTableManager for KernelTableManagerImpl {
fn virtualize(address: u64) -> usize {
let address = address as usize;
if address >= RAM_MAPPING_OFFSET {
panic!("Invalid physical address: {address:#x}");
}
address + RAM_MAPPING_OFFSET
}
fn physicalize(address: usize) -> u64 {
if address < RAM_MAPPING_OFFSET {
panic!("Invalid \"physicalized\" virtual address {address:#x}");
}
(address - RAM_MAPPING_OFFSET) as u64
}
unsafe fn map_device_pages(
base: u64,
count: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping<Self>, Error> {
unsafe { map_device_memory(PhysicalAddress::from_u64(base), count, attrs) }
}
unsafe fn unmap_device_pages(mapping: &RawDeviceMemoryMapping<Self>) {
unsafe { unmap_device_memory(mapping) }
}
}
// Device mappings
unsafe fn map_device_memory_l3(
base: PhysicalAddress,
count: usize,
_attrs: DeviceMemoryAttributes,
) -> Result<usize, Error> {
// TODO don't map pages if already mapped
'l0: for i in 0..DEVICE_MAPPING_L3_COUNT * 512 {
for j in 0..count {
let l2i = (i + j) / 512;
let l3i = (i + j) % 512;
unsafe {
if DEVICE_MAPPING_L3S[l2i][l3i].is_present() {
continue 'l0;
}
}
}
for j in 0..count {
let l2i = (i + j) / 512;
let l3i = (i + j) % 512;
unsafe {
DEVICE_MAPPING_L3S[l2i][l3i] =
PageEntry::page(base.add(j * L3::SIZE), PageAttributes::W);
}
}
let start = DEVICE_MAPPING_OFFSET + i * L3::SIZE;
tlb_flush_range_va(start, count * L3::SIZE);
return Ok(start);
}
Err(Error::OutOfMemory)
}
#[allow(unused)]
unsafe fn map_device_memory_l2(
base: PhysicalAddress,
count: usize,
_attrs: DeviceMemoryAttributes,
) -> Result<usize, Error> {
'l0: for i in DEVICE_MAPPING_L3_COUNT..512 {
for j in 0..count {
unsafe {
if DEVICE_MAPPING_L2[i + j].is_present() {
continue 'l0;
}
}
}
unsafe {
for j in 0..count {
DEVICE_MAPPING_L2[i + j] =
PageEntry::<L2>::block(base.add(j * L2::SIZE), PageAttributes::W);
}
}
let start = DEVICE_MAPPING_OFFSET + i * L2::SIZE;
tlb_flush_range_va(start, count * L2::SIZE);
return Ok(start);
}
Err(Error::OutOfMemory)
}
pub(crate) unsafe fn map_device_memory(
base: PhysicalAddress,
size: usize,
attrs: DeviceMemoryAttributes,
) -> Result<RawDeviceMemoryMapping<KernelTableManagerImpl>, Error> {
let l3_aligned = base.page_align_down::<L3>();
let l3_offset = base.page_offset::<L3>();
let page_count = (l3_offset + size).page_count::<L3>();
if page_count > 256 {
// Large mapping, use L2 mapping instead
let l2_aligned = base.page_align_down::<L2>();
let l2_offset = base.page_offset::<L2>();
let page_count = (l2_offset + size).page_count::<L2>();
unsafe {
let base_address = map_device_memory_l2(l2_aligned, page_count, attrs)?;
let address = base_address + l2_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
address,
base_address,
page_count,
L2::SIZE,
))
}
} else {
// Just map the pages directly
unsafe {
let base_address = map_device_memory_l3(l3_aligned, page_count, attrs)?;
let address = base_address + l3_offset;
Ok(RawDeviceMemoryMapping::from_raw_parts(
address,
base_address,
page_count,
L3::SIZE,
))
}
}
}
pub(crate) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping<KernelTableManagerImpl>) {
match map.page_size {
L3::SIZE => {
for i in 0..map.page_count {
let page = map.base_address + i * L3::SIZE;
let l2i = page.page_index::<L2>();
let l3i = page.page_index::<L3>();
unsafe {
assert!(DEVICE_MAPPING_L3S[l2i][l3i].is_present());
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::INVALID;
}
}
tlb_flush_range_va(map.base_address, map.page_count * L3::SIZE);
}
L2::SIZE => todo!(),
_ => unimplemented!(),
}
}
pub fn auto_address<T>(x: *const T) -> usize {
let x = x.addr();
if x >= KERNEL_VIRT_OFFSET {
x - KERNEL_VIRT_OFFSET
} else {
x
}
}
/// Enables the memory translation.
///
/// # Safety
///
/// Only meant to be called once per each HART during their early init.
pub unsafe fn enable_mmu() {
let l1_phys = auto_address(&raw const KERNEL_TABLES) as u64;
tlb_flush_full();
SATP.write(SATP::PPN.val(l1_phys >> 12) + SATP::MODE::Sv39);
}
/// Removes the lower half translation mappings.
///
/// # Safety
///
/// Needs to be called once after secondary HARTs are initialized.
pub unsafe fn unmap_lower_half() {
let mut tables = KERNEL_TABLES.lock();
let kernel_l1i_lower = page_index::<L1>(KERNEL_PHYS_BASE);
tables.l1.data[kernel_l1i_lower] = 0;
tlb_flush_range_va(0x0, L1::SIZE);
}
/// Sets up run-time kernel translation tables.
///
/// # Safety
///
/// The caller must ensure MMU is already enabled.
pub unsafe fn setup_fixed_tables() {
let mut tables = KERNEL_TABLES.lock();
let device_mapping_l2_phys = auto_address(&raw const DEVICE_MAPPING_L2);
// Set up static runtime mappings
for i in 0..DEVICE_MAPPING_L3_COUNT {
unsafe {
let device_mapping_l3_phys = PhysicalAddress::from_usize(
(&raw const DEVICE_MAPPING_L3S[i]).addr() - KERNEL_VIRT_OFFSET,
);
DEVICE_MAPPING_L2[i] =
PageEntry::table(device_mapping_l3_phys, PageAttributes::empty());
}
}
assert_eq!(tables.l1.data[DEVICE_MAPPING_L1I], 0);
tables.l1.data[DEVICE_MAPPING_L1I] =
((device_mapping_l2_phys as u64) >> 2) | PageAttributes::V.bits();
for l1i in 0..RAM_MAPPING_L1_COUNT {
let physical = (l1i as u64) << L1::SHIFT;
tables.l1.data[l1i + RAM_MAPPING_START_L1I] = (physical >> 2)
| (PageAttributes::R
| PageAttributes::W
| PageAttributes::A
| PageAttributes::D
| PageAttributes::V)
.bits();
}
tlb_flush_full();
}
pub fn tlb_flush_global_full() {
tlb_flush_full();
// TODO send TLB shootdown IPI to other harts
}
pub fn tlb_flush_global_va(va: usize) {
tlb_flush_va(va);
// TODO send TLB shootdown IPI to other harts
}
pub fn tlb_flush_range_va(start: usize, size: usize) {
let end = (start + size).page_align_up::<L3>();
let start = start.page_align_down::<L3>();
for page in (start..end).step_by(L3::SIZE) {
tlb_flush_va(page);
}
}
pub fn tlb_flush_range_va_asid(asid: usize, start: usize, size: usize) {
let end = (start + size).page_align_up::<L3>();
let start = start.page_align_down::<L3>();
for page in (start..end).step_by(L3::SIZE) {
tlb_flush_va_asid(page, asid);
}
}
#[inline]
pub fn tlb_flush_full() {
unsafe { core::arch::asm!("sfence.vma") };
}
#[inline]
pub fn tlb_flush_va(va: usize) {
unsafe { core::arch::asm!("sfence.vma {0}, zero", in(reg) va) };
}
#[inline]
pub fn tlb_flush_asid(asid: usize) {
unsafe { core::arch::asm!("sfence.vma zero, {0}", in(reg) asid) };
}
#[inline]
pub fn tlb_flush_va_asid(va: usize, asid: usize) {
unsafe { core::arch::asm!("sfence.vma {0}, {1}", in(reg) va, in(reg) asid) };
}
pub fn clone_kernel_tables(dst: &mut PageTable<L1>) {
let tables = KERNEL_TABLES.lock();
for l1i in page_index::<L1>(USER_BOUNDARY)..512 {
dst[l1i] = unsafe { PageEntry::from_raw(tables.l1.data[l1i]) };
}
}

View File

@ -0,0 +1,196 @@
use core::{
marker::PhantomData,
sync::atomic::{AtomicU16, Ordering},
};
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::PhysicalRefMut,
process::ProcessAddressSpaceManager,
table::{
EntryLevel, EntryLevelDrop, EntryLevelExt, MapAttributes, NextPageTable, TableAllocator,
},
};
use memtables::riscv64::PageAttributes;
use yggdrasil_abi::error::Error;
use crate::mem::{clone_kernel_tables, table::PageEntry};
use super::{
table::{DroppableRange, PageTable, L1, L2, L3},
KernelTableManagerImpl, USER_BOUNDARY,
};
pub struct ProcessAddressSpaceImpl<TA: TableAllocator> {
l1: PhysicalRefMut<'static, PageTable<L1>, KernelTableManagerImpl>,
asid: u16,
_pd: PhantomData<TA>,
}
impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceImpl<TA> {
const LOWER_LIMIT_PFN: usize = 8;
const UPPER_LIMIT_PFN: usize = (16 << 30) / L3::SIZE;
fn new() -> Result<Self, Error> {
static LAST_ASID: AtomicU16 = AtomicU16::new(1);
let mut l1 = unsafe {
PhysicalRefMut::<'static, PageTable<L1>, KernelTableManagerImpl>::map(
TA::allocate_page_table()?,
)
};
for i in 0..512 {
l1[i] = PageEntry::INVALID;
}
// Copy the kernel mappings
clone_kernel_tables(&mut l1);
let asid = LAST_ASID.fetch_add(1, Ordering::AcqRel);
Ok(Self {
l1,
asid,
_pd: PhantomData,
})
}
unsafe fn map_page(
&mut self,
address: usize,
physical: PhysicalAddress,
flags: MapAttributes,
) -> Result<(), Error> {
self.write_l3_entry(
address,
PageEntry::page(physical, to_page_attributes(flags)),
false,
)
.unwrap();
Ok(())
}
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error> {
self.pop_l3_entry(address)
}
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error> {
self.read_l3_entry(address).ok_or(Error::DoesNotExist)
}
fn as_address_with_asid(&self) -> (u64, u64) {
let physical = unsafe { self.l1.as_physical_address() }.into_u64();
(physical, self.asid as u64)
}
unsafe fn clear(&mut self) {
unsafe { self.l1.drop_range::<TA>(L1::DROPPABLE_RANGE) };
}
}
impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
// Write a single 4KiB entry
fn write_l3_entry(
&mut self,
virt: usize,
entry: PageEntry<L3>,
overwrite: bool,
) -> Result<(), Error> {
if virt >= USER_BOUNDARY {
log::warn!("Tried to map a userspace page to a non-userspace virtual region");
return Err(Error::InvalidArgument);
}
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
let mut l2 = self.l1.get_mut_or_alloc::<TA>(l1i)?;
let mut l3 = l2.get_mut_or_alloc::<TA>(l2i)?;
if l3[l3i].is_present() && !overwrite {
todo!();
}
l3[l3i] = entry;
super::tlb_flush_va_asid(virt, self.asid as usize);
Ok(())
}
fn pop_l3_entry(&mut self, virt: usize) -> Result<PhysicalAddress, Error> {
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
// TODO somehow drop tables if they're known to be empty?
let mut l2 = self.l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
let page = l3[l3i].as_page().ok_or(Error::DoesNotExist)?;
l3[l3i] = PageEntry::INVALID;
super::tlb_flush_va_asid(virt, self.asid as usize);
Ok(page)
}
fn read_l3_entry(&self, virt: usize) -> Option<(PhysicalAddress, MapAttributes)> {
if virt >= USER_BOUNDARY {
log::warn!("Tried read an userspace page to a non-userspace virtual region");
return None;
}
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
let l2 = self.l1.get(l1i)?;
let l3 = l2.get(l2i)?;
let page = l3[l3i].as_page()?;
Some((
page.add(virt & 0xFFF),
to_map_attributes(l3[l3i].attributes()),
))
}
}
impl<TA: TableAllocator> Drop for ProcessAddressSpaceImpl<TA> {
fn drop(&mut self) {
// SAFETY: with safe usage of the ProcessAddressSpaceImpl, clearing and dropping
// is safe, no one refers to the memory
unsafe {
self.clear();
let l1_phys = self.l1.as_physical_address();
TA::free_page_table(l1_phys);
super::tlb_flush_asid(self.asid as usize);
}
}
}
fn to_page_attributes(src: MapAttributes) -> PageAttributes {
let mut result = PageAttributes::R | PageAttributes::X;
if src.contains(MapAttributes::USER_WRITE) {
result |= PageAttributes::W;
}
if src.intersects(MapAttributes::USER_READ | MapAttributes::USER_WRITE) {
result |= PageAttributes::U;
}
result
}
fn to_map_attributes(src: PageAttributes) -> MapAttributes {
let mut result = MapAttributes::NON_GLOBAL;
if src.contains(PageAttributes::U) {
result |= MapAttributes::USER_READ;
if src.contains(PageAttributes::W) {
result |= MapAttributes::USER_WRITE;
}
}
result
}

View File

@ -0,0 +1,252 @@
use core::{
marker::PhantomData,
ops::{Index, IndexMut, Range},
};
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::{PhysicalRef, PhysicalRefMut},
table::{
page_index, EntryLevel, EntryLevelDrop, NextPageTable, NonTerminalEntryLevel,
TableAllocator,
},
};
use yggdrasil_abi::error::Error;
use super::{KernelTableManagerImpl, USER_BOUNDARY};
pub use memtables::riscv64::PageAttributes;
/// L3 - entry is 4KiB
#[derive(Debug, Clone, Copy)]
pub struct L3;
/// L2 - entry is 2MiB
#[derive(Debug, Clone, Copy)]
pub struct L2;
/// L1 - entry is 1GiB
#[derive(Debug, Clone, Copy)]
pub struct L1;
impl EntryLevel for L3 {
const SHIFT: usize = 12;
}
impl EntryLevel for L2 {
const SHIFT: usize = 21;
}
impl EntryLevel for L1 {
const SHIFT: usize = 30;
}
#[repr(C, align(0x1000))]
pub struct PageTable<L: EntryLevel> {
entries: [PageEntry<L>; 512],
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct PageEntry<L: EntryLevel>(pub u64, PhantomData<L>);
pub(super) trait DroppableRange {
const DROPPABLE_RANGE: Range<usize>;
}
impl DroppableRange for L1 {
const DROPPABLE_RANGE: Range<usize> = 0..page_index::<L1>(USER_BOUNDARY);
}
impl DroppableRange for L2 {
const DROPPABLE_RANGE: Range<usize> = 0..512;
}
impl NonTerminalEntryLevel for L1 {
type NextLevel = L2;
}
impl NonTerminalEntryLevel for L2 {
type NextLevel = L3;
}
impl<L: EntryLevel> PageTable<L> {
pub const fn zeroed() -> Self {
Self {
entries: [PageEntry::INVALID; 512],
}
}
pub fn new_zeroed<'a, TA: TableAllocator>(
) -> Result<PhysicalRefMut<'a, PageTable<L>, KernelTableManagerImpl>, Error> {
let physical = TA::allocate_page_table()?;
let mut table =
unsafe { PhysicalRefMut::<'a, Self, KernelTableManagerImpl>::map(physical) };
for i in 0..512 {
table[i] = PageEntry::INVALID;
}
Ok(table)
}
}
impl<L: EntryLevel> PageEntry<L> {
pub const INVALID: Self = Self(0, PhantomData);
/// Constructs a [PageEntry] from its raw representation.
///
/// # Safety
///
/// The caller must ensure `value` is actually a "valid" PTE.
pub const unsafe fn from_raw(value: u64) -> Self {
Self(value, PhantomData)
}
pub const fn is_present(&self) -> bool {
self.0 & PageAttributes::V.bits() != 0
}
pub fn attributes(self) -> PageAttributes {
PageAttributes::from_bits_retain(self.0)
}
}
impl<L: NonTerminalEntryLevel + DroppableRange> EntryLevelDrop for PageTable<L>
where
PageTable<L::NextLevel>: EntryLevelDrop,
{
const FULL_RANGE: Range<usize> = L::DROPPABLE_RANGE;
unsafe fn drop_range<TA: TableAllocator>(&mut self, range: Range<usize>) {
for index in range {
let entry = self[index];
if let Some(table) = entry.as_table() {
unsafe {
let mut table_ref: PhysicalRefMut<
PageTable<L::NextLevel>,
KernelTableManagerImpl,
> = PhysicalRefMut::map(table);
table_ref.drop_all::<TA>();
TA::free_page_table(table);
}
} else if entry.is_present() {
// Memory must've been cleared beforehand, so no non-table entries must be present
panic!(
"Expected a table containing only tables, got table[{}] = {:#x?}",
index, entry.0
);
}
self[index] = PageEntry::INVALID;
// dc_cvac((&raw const self[index]).addr());
}
}
}
impl EntryLevelDrop for PageTable<L3> {
const FULL_RANGE: Range<usize> = 0..512;
// Do nothing
unsafe fn drop_range<TA: TableAllocator>(&mut self, _range: Range<usize>) {}
}
impl<L: NonTerminalEntryLevel + 'static> NextPageTable for PageTable<L> {
type NextLevel = PageTable<L::NextLevel>;
type TableRef = PhysicalRef<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>;
type TableRefMut = PhysicalRefMut<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>;
fn get(&self, index: usize) -> Option<Self::TableRef> {
let table = self[index].as_table()?;
Some(unsafe { PhysicalRef::map(table) })
}
fn get_mut(&mut self, index: usize) -> Option<Self::TableRefMut> {
let table = self[index].as_table()?;
Some(unsafe { PhysicalRefMut::map(table) })
}
fn get_mut_or_alloc<TA: TableAllocator>(
&mut self,
index: usize,
) -> Result<Self::TableRefMut, Error> {
if let Some(table) = self[index].as_table() {
Ok(unsafe { PhysicalRefMut::map(table) })
} else {
let table = PageTable::new_zeroed::<TA>()?;
self[index] = PageEntry::<L>::table(
unsafe { table.as_physical_address() },
PageAttributes::empty(),
);
// dc_cvac((&raw const self[index]).addr());
Ok(table)
}
}
}
impl<L: NonTerminalEntryLevel> PageEntry<L> {
pub fn block(address: PhysicalAddress, attrs: PageAttributes) -> Self {
// TODO validate address alignment
Self(
(address.into_u64() >> 2)
| (PageAttributes::R
| PageAttributes::A
| PageAttributes::D
| PageAttributes::V
| attrs)
.bits(),
PhantomData,
)
}
pub fn table(address: PhysicalAddress, mut attrs: PageAttributes) -> Self {
attrs.remove(PageAttributes::R | PageAttributes::W | PageAttributes::X);
Self(
(address.into_u64() >> 2) | (PageAttributes::V | attrs).bits(),
PhantomData,
)
}
pub fn as_table(&self) -> Option<PhysicalAddress> {
(self.0
& (PageAttributes::R | PageAttributes::W | PageAttributes::X | PageAttributes::V)
.bits()
== PageAttributes::V.bits())
.then_some((self.0 << 2) & !0xFFF)
.map(PhysicalAddress::from_u64)
}
}
impl PageEntry<L3> {
pub fn page(address: PhysicalAddress, attrs: PageAttributes) -> Self {
Self(
(address.into_u64() >> 2)
| (PageAttributes::R
| PageAttributes::A
| PageAttributes::D
| PageAttributes::V
| attrs)
.bits(),
PhantomData,
)
}
pub fn as_page(&self) -> Option<PhysicalAddress> {
(self.0 & PageAttributes::V.bits() != 0)
.then_some((self.0 << 2) & !0xFFF)
.map(PhysicalAddress::from_u64)
}
}
impl<L: EntryLevel> Index<usize> for PageTable<L> {
type Output = PageEntry<L>;
fn index(&self, index: usize) -> &Self::Output {
&self.entries[index]
}
}
impl<L: EntryLevel> IndexMut<usize> for PageTable<L> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.entries[index]
}
}

View File

@ -0,0 +1,221 @@
macro impl_csr_read($struct:ident, $repr:ty, $reg:ident, $register:ty) {
impl tock_registers::interfaces::Readable for $struct {
type T = $repr;
type R = $register;
#[inline]
fn get(&self) -> $repr {
let mut value: $repr;
unsafe {
core::arch::asm!(concat!("csrr {0}, ", stringify!($reg)), out(reg) value);
}
value
}
}
}
macro impl_csr_write($struct:ident, $repr:ty, $reg:ident, $register:ty) {
impl tock_registers::interfaces::Writeable for $struct {
type T = $repr;
type R = $register;
#[inline]
fn set(&self, value: $repr) {
unsafe {
core::arch::asm!(concat!("csrw ", stringify!($reg), ", {0}"), in(reg) value);
}
}
}
}
pub mod satp {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SATP [
PPN OFFSET(0) NUMBITS(44) [],
ASID OFFSET(44) NUMBITS(16) [],
MODE OFFSET(60) NUMBITS(4) [
Bare = 0,
Sv39 = 8,
Sv48 = 9,
Sv57 = 10,
Sv64 = 11,
],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, satp, SATP::Register);
impl_csr_write!(Reg, u64, satp, SATP::Register);
pub const SATP: Reg = Reg;
}
pub mod stvec {
use tock_registers::{interfaces::ReadWriteable, register_bitfields};
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub STVEC [
MODE OFFSET(0) NUMBITS(2) [
Direct = 0,
Vectored = 1
],
BASE OFFSET(2) NUMBITS(62) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, stvec, STVEC::Register);
impl_csr_write!(Reg, u64, stvec, STVEC::Register);
impl Reg {
pub fn set_base(&self, base: usize) {
debug_assert_eq!(base & 0xF, 0);
let mask = match base & 63 != 0 {
false => 0,
true => 0x3 << 62,
};
self.modify(STVEC::BASE.val(((base as u64) >> 2) | mask));
}
}
pub const STVEC: Reg = Reg;
}
pub mod scause {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SCAUSE [
CODE OFFSET(0) NUMBITS(63) [],
INTERRUPT OFFSET(63) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, scause, SCAUSE::Register);
impl_csr_write!(Reg, u64, scause, SCAUSE::Register);
pub const SCAUSE: Reg = Reg;
}
pub mod stval {
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
impl_csr_read!(Reg, u64, stval, ());
impl_csr_write!(Reg, u64, stval, ());
pub const STVAL: Reg = Reg;
}
pub mod sepc {
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
impl_csr_read!(Reg, u64, sepc, ());
impl_csr_write!(Reg, u64, sepc, ());
pub const SEPC: Reg = Reg;
}
pub mod sstatus {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SSTATUS [
SUM OFFSET(18) NUMBITS(1) [],
SPP OFFSET(8) NUMBITS(1) [],
SIE OFFSET(1) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, sstatus, SSTATUS::Register);
impl_csr_write!(Reg, u64, sstatus, SSTATUS::Register);
pub const SSTATUS: Reg = Reg;
}
pub mod sscratch {
use super::{impl_csr_read, impl_csr_write};
pub struct Reg;
impl_csr_read!(Reg, u64, sscratch, ());
impl_csr_write!(Reg, u64, sscratch, ());
pub const SSCRATCH: Reg = Reg;
}
pub mod sip {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SIP [
SSIP OFFSET(1) NUMBITS(1) [],
STIP OFFSET(5) NUMBITS(1) [],
SEIP OFFSET(9) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, sip, SIP::Register);
impl_csr_write!(Reg, u64, sip, SIP::Register);
pub const SIP: Reg = Reg;
}
pub mod sie {
use tock_registers::register_bitfields;
use super::{impl_csr_read, impl_csr_write};
register_bitfields!(
u64,
pub SIE [
SSIE OFFSET(1) NUMBITS(1) [],
STIE OFFSET(5) NUMBITS(1) [],
SEIE OFFSET(9) NUMBITS(1) [],
]
);
pub struct Reg;
impl_csr_read!(Reg, u64, sie, SIE::Register);
impl_csr_write!(Reg, u64, sie, SIE::Register);
pub const SIE: Reg = Reg;
}
pub use satp::SATP;
pub use scause::SCAUSE;
pub use sepc::SEPC;
pub use sie::SIE;
pub use sip::SIP;
pub use sscratch::SSCRATCH;
pub use sstatus::SSTATUS;
pub use stval::STVAL;
pub use stvec::STVEC;

View File

@ -0,0 +1,110 @@
use yggdrasil_abi::{error::Error, primitive_enum};
const EXT_HSM: u64 = 0x48534D;
const EXT_TIME: u64 = 0x54494D45;
const EXT_DBCN: u64 = 0x4442434E;
const EXT_SPI: u64 = 0x735049;
primitive_enum! {
pub enum Status: i64 {
Failed = -1,
NotSupported = -2,
InvalidParam = -3,
Denied = -4,
InvalidAddress = -5,
AlreadyAvailable = -6,
AlreadyStarted = -7,
AlreadyStopped = -8,
NoShmem = -9,
InvalidState = -10,
BadRange = -11,
Timeout = -12,
Io = -13,
}
}
primitive_enum! {
pub enum HartState: u64 {
Started = 0,
Stopped = 1,
StartPending = 2,
StopPending = 3,
Suspended = 4,
SuspendPending = 5,
ResumePending = 6,
}
}
pub enum SbiError {
Status(Status),
Other(i64),
}
impl From<i64> for SbiError {
#[inline]
fn from(value: i64) -> Self {
match Status::try_from(value) {
Ok(value) => Self::Status(value),
Err(_) => Self::Other(value),
}
}
}
#[allow(clippy::too_many_arguments)]
#[inline(always)]
unsafe fn sbi_do_call(
extension: u64,
function: u64,
mut a0: u64,
mut a1: u64,
a2: u64,
a3: u64,
a4: u64,
a5: u64,
) -> Result<u64, SbiError> {
unsafe {
core::arch::asm!(
"ecall",
inlateout("a0") a0,
inlateout("a1") a1,
in("a2") a2,
in("a3") a3,
in("a4") a4,
in("a5") a5,
in("a6") function,
in("a7") extension,
);
}
let a0 = a0 as i64;
if a0 == 0 {
Ok(a1)
} else {
Err(a0.into())
}
}
pub fn sbi_hart_start(hart_id: u64, start_addr: u64, opaque: u64) -> Result<(), Error> {
match unsafe { sbi_do_call(EXT_HSM, 0x00, hart_id, start_addr, opaque, 0, 0, 0) } {
Ok(_) => Ok(()),
Err(SbiError::Status(Status::AlreadyAvailable)) => Err(Error::AlreadyExists),
Err(SbiError::Status(Status::InvalidParam)) => Err(Error::DoesNotExist),
Err(SbiError::Status(Status::InvalidAddress)) => Err(Error::InvalidArgument),
Err(_) => Err(Error::InvalidOperation),
}
}
pub fn sbi_send_ipi(hart_mask: u64, hart_mask_base: u64) -> Result<(), Error> {
match unsafe { sbi_do_call(EXT_SPI, 0x00, hart_mask, hart_mask_base, 0, 0, 0, 0) } {
Ok(_) => Ok(()),
Err(SbiError::Status(Status::InvalidParam)) => Err(Error::DoesNotExist),
Err(_) => Err(Error::InvalidOperation),
}
}
pub fn sbi_debug_console_write_byte(byte: u8) {
unsafe { sbi_do_call(EXT_DBCN, 0x02, byte as u64, 0, 0, 0, 0, 0) }.ok();
}
pub fn sbi_set_timer(next_event: u64) {
unsafe { sbi_do_call(EXT_TIME, 0x00, next_event, 0, 0, 0, 0, 0) }.ok();
}

View File

@ -28,6 +28,8 @@ cfg_if! {
extern crate kernel_arch_x86_64 as imp;
} else if #[cfg(target_arch = "x86")] {
extern crate kernel_arch_i686 as imp;
} else if #[cfg(target_arch = "riscv64")] {
extern crate kernel_arch_riscv64 as imp;
} else {
compile_error!("Unsupported architecture");
}

View File

@ -12,7 +12,7 @@ use core::{
use alloc::{boxed::Box, vec::Vec};
use device_api::interrupt::{LocalInterruptController, MessageInterruptController};
use kernel_arch_interface::{
cpu::{CpuImpl, IpiQueue},
cpu::{CpuData, CpuImpl, IpiQueue},
task::Scheduler,
util::OneTimeInit,
Architecture,
@ -58,6 +58,8 @@ pub struct PerCpuData {
pub enabled_features: CpuFeatures,
}
impl CpuData for PerCpuData {}
impl PerCpuData {
#[inline]
pub fn local_apic(&self) -> &dyn LocalApicInterface {

View File

@ -71,9 +71,9 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
.ok_or(Error::InvalidMemoryOperation)
}
fn as_address_with_asid(&self) -> u64 {
fn as_address_with_asid(&self) -> (u64, u64) {
// TODO x86-64 PCID/ASID?
unsafe { self.l0.as_physical_address().into_u64() }
(unsafe { self.l0.as_physical_address().into_u64() }, 0)
}
unsafe fn clear(&mut self) {

View File

@ -92,6 +92,7 @@ fn main() {
"x86" => (),
"x86_64" => build_x86_64(),
"aarch64" => (),
"riscv64" => (),
_ => panic!("Unknown target arch: {:?}", arch),
}
}

View File

@ -122,7 +122,10 @@ struct BusAddressAllocator {
offset_32: u32,
}
#[cfg_attr(any(target_arch = "x86_64", target_arch = "x86"), allow(dead_code))]
#[cfg_attr(
any(target_arch = "x86_64", target_arch = "x86", target_arch = "riscv64"),
allow(dead_code)
)]
impl BusAddressAllocator {
pub fn from_ranges(ranges: &[PciAddressRange]) -> Self {
let mut range_32 = None;
@ -463,7 +466,7 @@ impl PciBusManager {
Ok(())
}
#[cfg(target_arch = "aarch64")]
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64", rust_analyzer))]
pub fn add_segment_from_device_tree(
cfg_base: PhysicalAddress,
bus_range: core::ops::Range<u8>,

View File

@ -44,11 +44,21 @@ pub struct Node {
pub(crate) interrupt_controller: OneTimeInit<Arc<dyn DeviceTreeInterruptController>>,
}
struct NodeDevice {
driver: &'static dyn Driver,
device: Arc<dyn Device>,
enum NodeDevice {
// Node probed, no device found
Missing,
// Node probed and driver found
Present {
driver: &'static dyn Driver,
device: Arc<dyn Device>,
},
}
// struct NodeDevice {
// driver: &'static dyn Driver,
// device: Arc<dyn Device>,
// }
struct EnumerationContext {
address_cells: usize,
size_cells: usize,
@ -56,6 +66,22 @@ struct EnumerationContext {
interrupt_parent: Option<Phandle>,
}
impl NodeDevice {
fn as_device(&self) -> Option<Arc<dyn Device>> {
match self {
Self::Missing => None,
Self::Present { device, .. } => Some(device.clone()),
}
}
fn driver(&self) -> Option<&'static dyn Driver> {
match self {
Self::Missing => None,
Self::Present { driver, .. } => Some(*driver),
}
}
}
impl Node {
fn probe_upwards(self: Arc<Self>) -> (Option<Arc<dyn Device>>, Option<Weak<dyn Bus>>) {
let mut parent_bus = None;
@ -79,17 +105,25 @@ impl Node {
let inner = self.device.or_init_with_opt(|| {
let compatible = self.compatible?;
let drivers = DRIVERS.read();
let driver = drivers.iter().find(|d| d.matches(compatible))?;
let driver = drivers.iter().find(|d| d.matches(compatible));
if driver.is_none() {
// log::warn!("No driver for {compatible:?}");
}
let driver = driver?;
let device = driver.imp.probe(&self, &cx)?;
let device = driver.imp.probe(&self, &cx);
Some(NodeDevice {
driver: driver.imp,
device,
})
let slot = match device {
Some(device) => NodeDevice::Present {
driver: driver.imp,
device,
},
None => NodeDevice::Missing,
};
Some(slot)
});
let device = inner.map(|d| d.device.clone());
let device = inner.and_then(|d| d.as_device());
let bus = if let Some(device) = device.as_ref() {
device.clone().as_bus().as_ref().map(Arc::downgrade)
@ -109,6 +143,11 @@ impl Node {
device
}
/// Returns the parent node of this node
pub fn parent(&self) -> Option<Arc<Node>> {
self.parent.as_ref().and_then(Weak::upgrade)
}
/// When called from an interrupt-controller driver, informs the node of its capability as
/// an interrupt controller, allowing any other nodes which refer to it to map their
/// interrupts.
@ -120,7 +159,7 @@ impl Node {
/// Returns the device driver associated with this node, if any was probed.
pub fn driver(&self) -> Option<&'static dyn Driver> {
Some(self.device.try_get()?.driver)
self.device.try_get()?.driver()
}
/// Performs a lazy initialization of the node:
@ -156,7 +195,7 @@ impl Node {
match self.clone().lazy_init() {
Some(Ok(())) => {
let device = self.device.get();
let status = unsafe { device.device.clone().init_irq() };
let status = unsafe { device.as_device()?.init_irq() };
Some(status)
}
Some(Err(_)) | None => None,
@ -175,7 +214,11 @@ impl Node {
/// * `Err(Error::DoesNotExist)` - couldn't find a device/driver for this node.
/// * `Err(other)` - initialization failed.
pub fn force_init(self: Arc<Self>) -> Result<(), Error> {
let device = self.clone().probe().ok_or(Error::DoesNotExist)?;
let device = self
.clone()
.probe()
.ok_or(Error::DoesNotExist)
.inspect_err(|_| log::error!("Does not exist: probe({:?})", self.name))?;
self.init_token.try_init_with_opt(|| {
unsafe { device.init() }?;
@ -244,7 +287,7 @@ impl Node {
/// Attempts to get a clock controller represented by this node, if any
pub fn as_clock_controller(&self) -> Option<Arc<dyn ClockController>> {
let device = self.device.try_get()?;
device.device.clone().as_clock_controller()
device.as_device()?.as_clock_controller()
}
/// Returns the `#address-cells` value of the node's parent bus

View File

@ -10,7 +10,9 @@ use libk_mm::address::PhysicalAddress;
use yggdrasil_abi::error::Error;
use crate::{
node::DeviceTreeNodeExt, property::DeviceTreePropertyRead, util::DeviceTreeMemoryRegionIter,
node::DeviceTreeNodeExt,
property::DeviceTreePropertyRead,
util::{DeviceTreeMemoryRegionIter, DeviceTreeReservedRegionIter},
};
const FDT_INDEX_BUFFER_SIZE: usize = 65536;
@ -174,6 +176,11 @@ impl<'a> DeviceTree<'a> {
DeviceTreeMemoryRegionIter::new(self)
}
/// Returns an iterator over the reserved memory regions specified by this device tree
pub fn reserved_regions(&self) -> DeviceTreeReservedRegionIter {
DeviceTreeReservedRegionIter::new(self)
}
/// Returns the length of the header provided as a slice of bytes.
///
/// # Safety

View File

@ -19,6 +19,12 @@ pub struct DeviceTreeMemoryRegionIter<'a> {
inner: DevTreeIndexNodeSiblingIter<'a, 'a, 'a>,
}
/// Iterator for reserved physical memory regions
#[derive(Clone)]
pub struct DeviceTreeReservedRegionIter<'a> {
inner: Option<DevTreeIndexNodeSiblingIter<'a, 'a, 'a>>,
}
impl<'a> DeviceTreeMemoryRegionIter<'a> {
pub(crate) fn new(dt: &'a DeviceTree) -> Self {
let inner = dt.root().children();
@ -54,6 +60,38 @@ impl Iterator for DeviceTreeMemoryRegionIter<'_> {
}
}
impl<'a> DeviceTreeReservedRegionIter<'a> {
pub(crate) fn new(dt: &'a DeviceTree) -> Self {
let inner = dt.root().child("reserved-memory").map(|r| r.children());
Self { inner }
}
}
impl Iterator for DeviceTreeReservedRegionIter<'_> {
type Item = PhysicalMemoryRegion;
fn next(&mut self) -> Option<Self::Item> {
let inner = self.inner.as_mut()?;
loop {
let Some(node) = inner.next() else {
break None;
};
if let Some(reg) = node.property("reg") {
let address_cells = node.parent_address_cells();
let size_cells = node.parent_size_cells();
if let Some((base, size)) = reg.read_cells(0, (address_cells, size_cells)) {
let base = PhysicalAddress::from_u64(base);
let size = size as usize;
break Some(PhysicalMemoryRegion { base, size });
}
}
}
}
}
/// Registers sysfs objects related to the device tree
pub fn create_sysfs_nodes(dt: &'static DeviceTree) {
struct Raw;

View File

@ -4,9 +4,8 @@ version = "0.1.0"
edition = "2021"
authors = ["Mark Poliakov <mark@alnyan.me>"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bitflags.workspace = true
bytemuck.workspace = true
[features]

View File

@ -1,8 +1,9 @@
use crate::{aarch64, x86_64};
use crate::{aarch64, riscv64, x86_64};
pub enum AnyTables {
X86_64(x86_64::FixedTables),
AArch64(aarch64::FixedTables),
Riscv64(riscv64::FixedTables),
}
impl AnyTables {
@ -10,6 +11,7 @@ impl AnyTables {
match self {
Self::X86_64(tables) => bytemuck::bytes_of(tables),
Self::AArch64(tables) => bytemuck::bytes_of(tables),
Self::Riscv64(tables) => bytemuck::bytes_of(tables),
}
}
}
@ -25,3 +27,9 @@ impl From<aarch64::FixedTables> for AnyTables {
Self::AArch64(value)
}
}
impl From<riscv64::FixedTables> for AnyTables {
fn from(value: riscv64::FixedTables) -> Self {
Self::Riscv64(value)
}
}

View File

@ -14,6 +14,12 @@ pub mod x86_64;
#[cfg(all(not(feature = "all"), target_arch = "x86_64"))]
pub use x86_64::FixedTables;
// RISC-V 64-bit
#[cfg(any(feature = "all", target_arch = "riscv64"))]
pub mod riscv64;
#[cfg(all(not(feature = "all"), target_arch = "riscv64"))]
pub use riscv64::FixedTables;
#[cfg(feature = "all")]
pub mod any;

View File

@ -0,0 +1,82 @@
use core::fmt;
use bitflags::bitflags;
use bytemuck::{Pod, Zeroable};
use crate::RawTable;
pub const KERNEL_L3_COUNT: usize = 8;
bitflags! {
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct PageAttributes: u64 {
const N = 1 << 63;
/// Dirty bit
const D = 1 << 7;
/// Access bit
const A = 1 << 6;
/// Global mapping bit, implies all lower levels are also global
const G = 1 << 5;
/// U-mode access permission
const U = 1 << 4;
/// Execute permission
const X = 1 << 3;
/// Write permission
const W = 1 << 2;
/// Read-permission
const R = 1 << 1;
/// Valid bit
const V = 1 << 0;
}
// X W R Meaning
// 0 0 0 Pointer to next level of page table
// 0 0 1 Read-only page
// 0 1 0 ---
// 0 1 1 Read-write page
// 1 0 0 Execute only
// 1 0 1 Read-execute page
// 1 1 0 ---
// 1 1 1 Read-write-execute page
}
#[derive(Clone, Copy, Pod, Zeroable)]
#[repr(C)]
pub struct FixedTables {
pub l1: RawTable,
pub kernel_l2: RawTable,
pub kernel_l3s: [RawTable; KERNEL_L3_COUNT],
}
impl FixedTables {
pub const fn zeroed() -> Self {
Self {
l1: RawTable::zeroed(),
kernel_l2: RawTable::zeroed(),
kernel_l3s: [RawTable::zeroed(); KERNEL_L3_COUNT],
}
}
}
impl fmt::Display for PageAttributes {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use fmt::Write;
macro_rules! bit {
($self:ident, $field:expr, $letter:literal) => {
if $self.contains($field) {
f.write_char($letter)
} else {
f.write_char('-')
}
};
}
bit!(self, Self::R, 'r')?;
bit!(self, Self::W, 'w')?;
bit!(self, Self::X, 'x')?;
bit!(self, Self::U, 'u')?;
Ok(())
}
}

View File

@ -39,8 +39,8 @@ pub trait ProcessAddressSpaceManager<TA: TableAllocator>: Sized {
/// if one is mapped
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error>;
/// Returns the implementation specific physical address of this space, with ASID applied
fn as_address_with_asid(&self) -> u64;
/// Returns the physical address of the translation table along with its ASID
fn as_address_with_asid(&self) -> (u64, u64);
/// Clears the address space by dropping and non-global tables.
///

View File

@ -119,6 +119,10 @@ impl<T> PageBox<T, GlobalPhysicalAllocator> {
result.trace_created();
Ok(result)
}
pub unsafe fn from_physical_raw(address: PhysicalAddress) -> PageBox<T> {
PageBox::from_physical_raw_in(address)
}
}
impl<T, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> PageBox<T, A> {
@ -220,6 +224,16 @@ impl<T, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> PageBox<T, A> {
result.trace_created();
Ok(result)
}
pub unsafe fn from_physical_raw_in(address: PhysicalAddress) -> PageBox<T, A> {
let page_count = size_of::<T>().div_ceil(L3_PAGE_SIZE);
let value = address.virtualize() as *mut T;
PageBox {
value,
page_count,
_pd: PhantomData,
}
}
}
impl<T: ?Sized, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> PageBox<T, A> {
@ -228,6 +242,12 @@ impl<T: ?Sized, A: PhysicalMemoryAllocator<Address = PhysicalAddress>> PageBox<T
self.value as _
}
pub fn into_physical_raw(self) -> PhysicalAddress {
let address = unsafe { self.as_physical_address() };
core::mem::forget(self);
address
}
#[inline]
fn trace_created(&self) {
log::trace!(

View File

@ -335,7 +335,8 @@ impl<TA: TableAllocator> ProcessAddressSpace<TA> {
ProcessAddressSpaceImpl::<TA>::LOWER_LIMIT_PFN,
ProcessAddressSpaceImpl::<TA>::UPPER_LIMIT_PFN,
);
log::debug!("New AddressSpace {:#x}", table.as_address_with_asid());
let (physical, asid) = table.as_address_with_asid();
log::debug!("New AddressSpace {:#x}, asid {:#x}", physical, asid);
Ok(Self {
inner: IrqSafeSpinlock::new(Inner { table, allocator }),
})
@ -451,8 +452,8 @@ impl<TA: TableAllocator> ProcessAddressSpace<TA> {
lock.unmap_range(address, size / L3_PAGE_SIZE)
}
/// Returns the physical address of this table, with ASID applied
pub fn as_address_with_asid(&self) -> u64 {
/// Returns the physical address of the translation table along with its ASID
pub fn as_address_with_asid(&self) -> (u64, u64) {
self.inner.lock().table.as_address_with_asid()
}
@ -465,7 +466,8 @@ impl<TA: TableAllocator> ProcessAddressSpace<TA> {
impl<TA: TableAllocator> Drop for ProcessAddressSpace<TA> {
fn drop(&mut self) {
log::debug!("Drop AddressSpace {:#x}", self.as_address_with_asid());
let (physical, asid) = self.as_address_with_asid();
log::debug!("Drop AddressSpace {:#x}, asid {:#x}", physical, asid);
self.clear().ok();
}
}

View File

@ -20,11 +20,12 @@ use libk_util::{
spin_rwlock::{IrqSafeRwLock, IrqSafeRwLockReadGuard},
IrqSafeSpinlock,
},
StaticVector,
OneTimeInit, StaticVector,
};
use yggdrasil_abi::error::Error;
use crate::{
arch::Cpu,
config,
fs::sysfs::{
self,
@ -95,6 +96,7 @@ pub trait DebugSink: Sync {
#[derive(Clone)]
pub enum DebugSinkWrapper {
Arc(LogLevel, Arc<dyn DebugSink>),
Static(LogLevel, &'static dyn DebugSink),
}
unsafe impl Send for DebugSinkWrapper {}
@ -120,6 +122,7 @@ impl DebugSinkWrapper {
pub fn sink(&self) -> &dyn DebugSink {
match self {
Self::Arc(_, arc) => arc.as_ref(),
Self::Static(_, sink) => *sink,
}
}
@ -127,12 +130,14 @@ impl DebugSinkWrapper {
pub fn level(&self) -> LogLevel {
match self {
Self::Arc(level, _) => *level,
Self::Static(level, _) => *level,
}
}
pub fn set_level(&mut self, target: LogLevel) {
match self {
Self::Arc(level, _) => *level = target,
Self::Static(level, _) => *level = target,
}
}
}
@ -154,6 +159,7 @@ impl log::Log for DebugSinkWrapper {
let level = LogLevel::from(record.level());
let sink = self.sink();
let cpu = Cpu::try_local().map(|c| c.id());
let file = record.file().unwrap_or("<???>");
let line = record.line().unwrap_or(0);
let args = record.args();
@ -177,7 +183,13 @@ impl log::Log for DebugSinkWrapper {
writeln!(writer, "[io] {args}").ok();
}
_ => {
writeln!(writer, "{prefix}{file}:{line}: {args}{suffix}").ok();
write!(writer, "{prefix}").ok();
if let Some(cpu) = cpu {
write!(writer, "{cpu}:").ok();
} else {
write!(writer, "?:").ok();
}
writeln!(writer, "{file}:{line}: {args}{suffix}").ok();
}
}
}
@ -372,12 +384,11 @@ fn make_sysfs_sink_object(index: usize) -> Arc<KObject<usize>> {
object
}
/// Adds a debugging output sink
pub fn add_sink(sink: Arc<dyn DebugSink>, level: LogLevel) {
fn add_sink_inner(sink: DebugSinkWrapper) {
let index = {
let mut sinks = DEBUG_SINKS.write();
let index = sinks.len();
sinks.push(DebugSinkWrapper::Arc(level, sink.clone()));
sinks.push(sink);
index
};
@ -388,6 +399,11 @@ pub fn add_sink(sink: Arc<dyn DebugSink>, level: LogLevel) {
}
}
/// Adds a debugging output sink
pub fn add_sink(sink: Arc<dyn DebugSink>, level: LogLevel) {
add_sink_inner(DebugSinkWrapper::Arc(level, sink.clone()));
}
pub fn add_serial_sink(sink: Arc<dyn DebugSink>, level: LogLevel) {
if SERIAL_SINK_SET_UP.swap(true, Ordering::Acquire) {
return;
@ -396,6 +412,20 @@ pub fn add_serial_sink(sink: Arc<dyn DebugSink>, level: LogLevel) {
add_sink(sink, level);
}
pub fn add_early_sink(sink: &'static dyn DebugSink, level: LogLevel) {
add_sink_inner(DebugSinkWrapper::Static(level, sink));
}
pub fn disable_early_sinks() {
let mut sinks = DEBUG_SINKS.write();
// TODO proper sink storage/manipulation
for sink in sinks.iter_mut() {
if let DebugSinkWrapper::Static(level, _) = sink {
*level = LogLevel::Fatal;
}
}
}
/// Print a trace message coming from a process
pub fn program_trace(process: &Process, thread: &Thread, message: &str) {
log::debug!(
@ -407,15 +437,23 @@ pub fn program_trace(process: &Process, thread: &Thread, message: &str) {
);
}
pub fn init_logger() {
static LOGGER_SET_UP: OneTimeInit<()> = OneTimeInit::new();
LOGGER_SET_UP.or_init_with(|| {
log::set_logger(&LOGGER)
.map(|_| log::set_max_level(log::LevelFilter::Trace))
.ok();
});
}
/// Resets the debugging terminal by clearing it
pub fn init() {
if RING_LOGGER_SINK.init_buffer().is_ok() {
RING_AVAILABLE.store(true, Ordering::Release);
}
log::set_logger(&LOGGER)
.map(|_| log::set_max_level(log::LevelFilter::Trace))
.ok();
init_logger();
}
impl fmt::Display for LogLevel {

View File

@ -30,6 +30,8 @@ cfg_if! {
const EXPECTED_ELF_MACHINE: u16 = elf::abi::EM_AARCH64;
} else if #[cfg(target_arch = "x86")] {
const EXPECTED_ELF_MACHINE: u16 = elf::abi::EM_386;
} else if #[cfg(target_arch = "riscv64")] {
const EXPECTED_ELF_MACHINE: u16 = elf::abi::EM_RISCV;
}
}
@ -486,7 +488,7 @@ fn write_rela(rela: &Rela, space: &ProcessAddressSpace, b: usize) -> Result<(),
let rel_field = rela.r_offset as usize + b;
let (value, width) = match rela.r_type {
elf::abi::R_X86_64_RELATIVE | elf::abi::R_AARCH64_RELATIVE => {
elf::abi::R_X86_64_RELATIVE | elf::abi::R_AARCH64_RELATIVE | elf::abi::R_RISCV_RELATIVE => {
// B + A
// Width: qword
(b as i64 + a, 8)

View File

@ -217,13 +217,21 @@ where
// let tls_address = elf::clone_tls(space, image)?;
log::debug!("argument = {:#x}", argument);
log::debug!(
"argument = {:#x}, user_sp = {:#x}, stack: {:#x}..{:#x}",
argument,
user_sp,
virt_stack_base,
virt_stack_base + USER_STACK_PAGES * 0x1000 - TaskContextImpl::USER_STACK_EXTRA_ALIGN
);
let (address_space, asid) = space.as_address_with_asid();
TaskContext::user(UserContextInfo {
entry: image.entry,
argument,
stack_pointer: ptr.addr(),
thread_pointer: 0,
address_space: space.as_address_with_asid(),
address_space,
asid,
single_step: options.single_step,
})
}

View File

@ -194,10 +194,13 @@ impl Process {
let sp = TaskContextImpl::align_stack_for_entry(options.stack_top) as *mut usize;
let sp = unsafe { Thread::setup_stack_header(&space, sp, options.argument)? };
let (address_space, asid) = space.as_address_with_asid();
let info = UserContextInfo {
entry: options.entry as _,
argument: options.argument,
address_space: space.as_address_with_asid(),
address_space,
asid,
stack_pointer: sp.addr(),
single_step: false,
thread_pointer: 0,

View File

@ -46,7 +46,7 @@ impl CpuQueue {
pub fn new(index: usize) -> Self {
let idle = TaskContextImpl::kernel(
ArchitectureImpl::idle_task(),
CpuImpl::<Self>::local().id() as usize,
CpuImpl::<Self>::local().queue_index(),
)
.expect("Could not construct an idle task");

View File

@ -30,7 +30,7 @@ impl InterruptHandler for ArmTimer {
CNTP_TVAL_EL0.set(TICK_INTERVAL);
if Cpu::local().id() == 0 {
if Cpu::local().is_bootstrap() {
let last = LAST_TICKS.swap(count, Ordering::Relaxed);
let freq = CNTFRQ_EL0.get();
if let Some(delta) = count.checked_sub(last) {

View File

@ -10,7 +10,7 @@ use device_api::{
};
use kernel_arch::{Architecture, ArchitectureImpl};
use kernel_arch_i686::{gdt, mem::table::L3, PerCpuData};
use kernel_arch_x86::cpuid::{self, CpuFeatures, EcxFeatures, EdxFeatures};
use kernel_arch_x86::cpuid::{self, CpuFeatures, EcxFeatures, EdxFeatures, ExtEdxFeatures};
use libk::{
arch::Cpu,
config, debug,
@ -95,10 +95,12 @@ impl I686 {
CpuFeatures {
ecx: EcxFeatures::SSE3 | EcxFeatures::XSAVE | EcxFeatures::OSXSAVE,
edx: EdxFeatures::SSE2 | EdxFeatures::FXSR,
ext_edx: ExtEdxFeatures::empty(),
},
CpuFeatures {
ecx: EcxFeatures::empty(),
edx: EdxFeatures::FPU | EdxFeatures::SSE | EdxFeatures::PSE,
ext_edx: ExtEdxFeatures::empty(),
},
);
let will_features = will_features.expect("Could not enable needed CPU features");

View File

@ -7,10 +7,6 @@ use device_api::{
interrupt::{IpiDeliveryTarget, IpiMessage},
ResetDevice,
};
// use device_api::{
// interrupt::{IpiDeliveryTarget, IpiMessage},
// ResetDevice,
// };
use kernel_arch::{Architecture, ArchitectureImpl};
use libk_mm::table::EntryLevel;
@ -32,7 +28,17 @@ pub mod i686;
#[cfg(any(target_arch = "x86", rust_analyzer))]
pub use i686::{I686 as PlatformImpl, PLATFORM};
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "x86")))]
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
pub mod riscv64;
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
pub use riscv64::{Riscv64 as PlatformImpl, PLATFORM};
#[cfg(not(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "riscv64",
target_arch = "x86"
)))]
compile_error!("Unsupported architecture");
/// Architecture-specific lowest level of page mapping

View File

@ -0,0 +1,82 @@
// vi:ft=asm:
.macro LOAD_PCREL label, register, symbol
\label: auipc \register, %pcrel_hi(\symbol)
addi \register, \register, %pcrel_lo(\label)
.endm
.pushsection .text.entry
.option push
.option rvc
.global __rv64_entry
.global __boot_header
.global __rv64_secondary_entry
.type __rv64_entry, @function
.type __boot_header, @object
__boot_header:
__rv64_entry:
// Look <s>ma</s>u-boot, I'm Linux!
.ascii "MZ" // Magic 0
j __rv64_real_entry // Jump to real entry (if entered by non-Linux bootloader)
.long 0
.quad 0x200000 // Offset from RAM start
.quad 2000000 // Image size TODO fill this by post-tooling
.quad 0 // Kernel flags
.long 0x2 // Header version
.long 0
.quad 0
.ascii "RISCV\x00\x00\x00" // Magic 1
.ascii "RSC\x05" // Magic 2
.long 0
.size __rv64_entry, . - __rv64_entry
.size __boot_header, . - __boot_header
.option pop
.option push
.option norvc
.type __rv64_real_entry, @function
__rv64_real_entry:
// a0 - bootstrap HART ID
// a1 - device tree blob
// mhartid == a0
csrw satp, zero
mv tp, zero
// Zero the .bss
LOAD_PCREL .L00, t0, __bss_start_phys
LOAD_PCREL .L01, t1, __bss_end_phys
1: bgeu t0, t1, 2f
sd zero, (t0)
addi t0, t0, 4
j 1b
2:
// Setup boot stack and entry point
LOAD_PCREL .L02, sp, {boot_stack_bottom} + {boot_stack_size} - {kernel_virt_offset}
LOAD_PCREL .L03, t0, {entry_smode_lower} - {kernel_virt_offset}
jr t0
.size __rv64_real_entry, . - __rv64_real_entry
.type __rv64_secondary_entry, @function
__rv64_secondary_entry:
// a1 - context struct
csrw satp, zero
mv tp, zero
// Setup stack and jump to Rust entry code
ld sp, (a1)
mv a0, a1
mv a1, sp
LOAD_PCREL .L04, t0, {entry_smode_secondary_lower} - {kernel_virt_offset}
jr t0
.size __rv64_secondary_entry, . - __rv64_secondary_entry
.option pop
.popsection

View File

@ -0,0 +1,132 @@
use core::{
arch::global_asm,
sync::atomic::{compiler_fence, Ordering},
};
use kernel_arch::Architecture;
use kernel_arch_riscv64::{
mem::{self, KERNEL_VIRT_OFFSET},
ArchitectureImpl, CPU_COUNT,
};
use libk::{
debug,
fs::{devfs, sysfs},
task::runtime,
};
use libk_mm::{
address::{PhysicalAddress, Virtualize},
PageBox,
};
use crate::{kernel_main, kernel_secondary_main};
use super::{smp::SecondaryContext, PLATFORM};
const BOOT_STACK_SIZE: usize = 65536;
#[repr(C, align(0x10))]
struct BootStack<const N: usize>([u8; N]);
impl<const N: usize> BootStack<N> {
pub const fn zeroed() -> Self {
Self([0; N])
}
}
#[link_section = ".bss"]
static mut BOOT_STACK: BootStack<BOOT_STACK_SIZE> = BootStack::zeroed();
unsafe fn long_jump(pc: usize, sp: usize, a0: usize, a1: usize) -> ! {
core::arch::asm!(r#"
mv sp, {sp}
jr {pc}
"#,
in("a0") a0,
in("a1") a1,
pc = in(reg) pc,
sp = in(reg) sp,
options(noreturn)
);
}
unsafe extern "C" fn __rv64_bsp_entry_upper(bsp_hart_id: u64, dtb_physical: PhysicalAddress) -> ! {
debug::init_logger();
super::debug::register_sbi_debug();
log::info!("Starting riscv64 upper half");
if dtb_physical.is_zero() {
log::error!("No device tree provided");
// No DTB provided
ArchitectureImpl::halt();
}
if let Err(error) = PLATFORM.init_memory_management(dtb_physical) {
log::error!("Failed to initialize memory management: {error:?}");
ArchitectureImpl::halt();
}
sysfs::init();
devfs::init();
runtime::init_task_queue();
if let Err(error) = PLATFORM.init_platform(bsp_hart_id as _, 0, true) {
log::error!("Failed to initialize the platform: {error:?}");
ArchitectureImpl::halt();
}
kernel_main()
}
unsafe extern "C" fn __rv64_secondary_entry_upper(context: PhysicalAddress) -> ! {
let hart_id = {
let context = PageBox::<SecondaryContext>::from_physical_raw(context);
context.hart_id
};
let queue_index = CPU_COUNT.fetch_add(1, Ordering::Acquire);
if let Err(error) = PLATFORM.init_platform(hart_id as u32, queue_index, false) {
log::error!("Secondary hart init error: {error:?}");
ArchitectureImpl::halt();
}
kernel_secondary_main()
}
unsafe extern "C" fn __rv64_bsp_smode_entry_lower(a0: usize, a1: usize) -> ! {
ArchitectureImpl::set_interrupt_mask(true);
mem::enable_mmu();
let stack = (&raw const BOOT_STACK).addr() + KERNEL_VIRT_OFFSET;
let pc = __rv64_bsp_entry_upper as usize + KERNEL_VIRT_OFFSET;
let sp = stack + BOOT_STACK_SIZE;
long_jump(pc, sp, a0, a1)
}
unsafe extern "C" fn __rv64_secondary_smode_entry_lower(
context: PhysicalAddress,
sp: PhysicalAddress,
) -> ! {
let sp = sp.virtualize();
ArchitectureImpl::set_interrupt_mask(true);
compiler_fence(Ordering::Release);
mem::enable_mmu();
compiler_fence(Ordering::Acquire);
let pc = __rv64_secondary_entry_upper as usize + KERNEL_VIRT_OFFSET;
long_jump(pc, sp, context.into_usize(), 0)
}
global_asm!(
include_str!("entry.S"),
entry_smode_lower = sym __rv64_bsp_smode_entry_lower,
entry_smode_secondary_lower = sym __rv64_secondary_smode_entry_lower,
boot_stack_bottom = sym BOOT_STACK,
kernel_virt_offset = const KERNEL_VIRT_OFFSET,
boot_stack_size = const BOOT_STACK_SIZE,
);

View File

@ -0,0 +1,22 @@
use abi::error::Error;
use kernel_arch_riscv64::sbi;
use libk::debug::{self, DebugSink, LogLevel};
pub struct SbiDebugConsole;
impl DebugSink for SbiDebugConsole {
fn putc(&self, c: u8) -> Result<(), Error> {
sbi::sbi_debug_console_write_byte(c);
Ok(())
}
fn supports_control_sequences(&self) -> bool {
true
}
}
static SBI_DEBUG: SbiDebugConsole = SbiDebugConsole;
pub fn register_sbi_debug() {
debug::add_early_sink(&SBI_DEBUG, LogLevel::Debug);
}

View File

@ -0,0 +1,268 @@
use core::arch::global_asm;
use abi::{arch::SavedFrame, primitive_enum, process::Signal, SyscallFunction};
use kernel_arch::task::TaskFrame;
use libk::{device::external_interrupt_controller, task::thread::Thread};
use tock_registers::interfaces::ReadWriteable;
use kernel_arch_riscv64::{mem, registers::STVEC};
use crate::syscall;
use super::{smp, timer};
primitive_enum! {
pub enum Cause: usize {
MisalignedInstruction = 0,
InstructionAccessFault = 1,
IllegalInstruction = 2,
Breakpoint = 3,
LoadAddressMisaligned = 4,
LoadAccessFault = 5,
StoreAddressMisaligned = 6,
StoreAccessFault = 7,
EcallUmode = 8,
EcallSmode = 9,
EcallMmode = 11,
InstructionPageFault = 12,
LoadPageFault = 13,
StorePageFault = 15,
}
}
#[derive(Debug)]
#[repr(C)]
pub struct TrapFrame {
// General-purpose
pub ra: usize,
pub gp: usize,
pub tn: [usize; 7],
pub sn: [usize; 12],
pub an: [usize; 8],
// Special
pub sp: usize,
pub sstatus: usize,
pub sepc: usize,
pub stval: usize,
pub scause: usize,
pub tp: usize,
}
pub fn init_smode_exceptions() {
extern "C" {
static __rv64_smode_trap_vectors: u8;
}
let address = (&raw const __rv64_smode_trap_vectors).addr();
STVEC.set_base(address);
STVEC.modify(STVEC::MODE::Direct);
// STVEC.modify(STVEC::MODE::Vectored);
}
unsafe fn umode_exception_handler(frame: &mut TrapFrame) {
let thread = Thread::current();
let cause = Cause::try_from(frame.scause).ok();
let (dump, dump_tval) = match cause {
Some(Cause::LoadPageFault)
| Some(Cause::StorePageFault)
| Some(Cause::LoadAccessFault)
| Some(Cause::StoreAccessFault)
| Some(Cause::InstructionPageFault)
| Some(Cause::InstructionAccessFault) => {
thread.raise_signal(Signal::MemoryAccessViolation);
(true, true)
}
Some(Cause::EcallUmode) => {
let func = frame.an[0];
if func == usize::from(SyscallFunction::ExitSignal) {
unsafe {
syscall::handle_signal_exit(frame);
}
return;
}
let args = &frame.an[1..];
let result = syscall::raw_syscall_handler(func, args) as _;
mem::tlb_flush_full();
frame.an[0] = result;
frame.sepc += 4;
(false, false)
}
_ => {
thread.raise_signal(Signal::MemoryAccessViolation);
(true, false)
}
};
if dump {
let process = thread.process();
log::warn!(
"U-mode exception cause={:?} ({}), epc={:#x}, sp={:#x}, tval={:#x}",
cause,
frame.scause,
frame.sepc,
frame.sp,
frame.stval
);
log::warn!("In thread {} ({:?})", thread.id, *thread.name.read());
log::warn!("Of process {} ({:?})", process.id, process.name);
if dump_tval {
let translation = process.space().translate(frame.stval).ok();
if let Some(physical) = translation {
log::warn!(" * tval translates to {physical:#x}");
} else {
log::warn!(" * tval does not translate");
}
}
}
}
unsafe fn smode_exception_handler(frame: &mut TrapFrame) {
let cause = Cause::try_from(frame.scause).expect("Invalid exception cause");
log::error!(
"S-mode exception cause={:?} ({}), tval={:#x}, epc={:#x}, sp={:#x}",
cause,
frame.scause,
frame.stval,
frame.sepc,
frame.sp
);
match cause {
Cause::LoadPageFault
| Cause::StorePageFault
| Cause::LoadAccessFault
| Cause::StoreAccessFault
| Cause::InstructionPageFault
| Cause::InstructionAccessFault => {
let translation = if let Some(space) = Thread::get_current()
.and_then(|t| t.try_get_process())
.map(|p| p.space())
{
space.translate(frame.stval).ok()
} else {
None
};
if let Some(physical) = translation {
log::warn!(" * tval translates to {physical:#x}");
} else {
log::warn!(" * tval does not translate");
}
}
_ => (),
}
panic!("Unhandled S-mode exception");
}
unsafe extern "C" fn smode_interrupt_handler(frame: *mut TrapFrame) {
let frame = &mut *frame;
let smode = frame.sstatus & (1 << 8) != 0;
match frame.scause & !(1 << 63) {
// S-mode software interrupt
1 => smp::handle_ipi(),
// S-mode timer interrupt
5 => timer::handle_interrupt(),
// S-mode external interrupt
9 => {
if let Ok(intc) = external_interrupt_controller() {
intc.handle_pending_irqs();
}
}
n => log::warn!("Unhandled interrupt #{n}"),
}
if !smode && let Some(thread) = Thread::get_current() {
thread.handle_pending_signals(frame);
}
}
unsafe extern "C" fn smode_general_trap_handler(frame: *mut TrapFrame) {
let frame = &mut *frame;
let interrupt = frame.scause & (1 << 63) != 0;
let smode = frame.sstatus & (1 << 8) != 0;
match (interrupt, smode) {
(true, _) => smode_interrupt_handler(frame),
(false, true) => smode_exception_handler(frame),
(false, false) => umode_exception_handler(frame),
}
if !smode && let Some(thread) = Thread::get_current() {
thread.handle_pending_signals(frame);
}
mem::tlb_flush_full();
}
impl TaskFrame for TrapFrame {
fn store(&self) -> SavedFrame {
SavedFrame {
ra: self.ra,
gp: self.gp,
tn: self.tn,
sn: self.sn,
an: self.an,
sp: self.sp,
ip: self.sepc,
tp: self.tp,
}
}
fn restore(&mut self, saved: &SavedFrame) {
self.ra = saved.ra;
self.gp = saved.gp;
self.tn = saved.tn;
self.sn = saved.sn;
self.an = saved.an;
self.sp = saved.sp;
self.sepc = saved.ip;
self.tp = saved.tp;
}
fn user_sp(&self) -> usize {
self.sp
}
fn user_ip(&self) -> usize {
self.sepc
}
fn argument(&self) -> u64 {
self.an[0] as u64
}
fn set_user_sp(&mut self, value: usize) {
self.sp = value;
}
fn set_user_ip(&mut self, value: usize) {
self.sepc = value;
}
fn set_argument(&mut self, value: u64) {
self.an[0] = value as usize;
}
fn set_single_step(&mut self, step: bool) {
let _ = step;
todo!()
}
fn set_return_value(&mut self, value: u64) {
self.an[0] = value as usize;
}
}
global_asm!(
include_str!("vectors.S"),
smode_general_handler = sym smode_general_trap_handler,
smode_interrupt_handler = sym smode_interrupt_handler,
);

View File

@ -0,0 +1,292 @@
#![allow(missing_docs)]
use core::sync::atomic::{self, AtomicU32, Ordering};
use abi::error::Error;
use alloc::sync::Arc;
use device_api::{
interrupt::{IpiDeliveryTarget, IpiMessage},
ResetDevice,
};
use device_tree::{driver::unflatten_device_tree, DeviceTree, DeviceTreeNodeExt};
use kernel_arch::Architecture;
use kernel_arch_riscv64::{
mem::{self, KERNEL_VIRT_OFFSET},
registers::{SIE, SSTATUS},
ArchitectureImpl, PerCpuData,
};
use libk::{arch::Cpu, config};
use libk_mm::{
address::PhysicalAddress,
phys::{self, reserved::reserve_region, PhysicalMemoryRegion},
pointer::PhysicalRef,
table::{EntryLevel, EntryLevelExt},
};
use libk_util::OneTimeInit;
use tock_registers::interfaces::ReadWriteable;
use ygg_driver_pci::PciBusManager;
use crate::{
device::MACHINE_NAME,
fs::{Initrd, INITRD_DATA},
util::call_init_array,
};
use super::Platform;
pub mod boot;
pub mod debug;
pub mod exception;
pub mod smp;
pub mod timer;
pub static BOOT_HART_ID: AtomicU32 = AtomicU32::new(u32::MAX);
pub struct Riscv64 {
dt: OneTimeInit<DeviceTree<'static>>,
initrd: OneTimeInit<PhysicalRef<'static, [u8]>>,
}
pub static PLATFORM: Riscv64 = Riscv64 {
dt: OneTimeInit::new(),
initrd: OneTimeInit::new(),
};
#[derive(Debug, Clone, Copy)]
pub struct L3;
impl EntryLevel for L3 {
const SIZE: usize = 4096;
const SHIFT: usize = 12;
}
impl Platform for Riscv64 {
const KERNEL_VIRT_OFFSET: usize = KERNEL_VIRT_OFFSET;
type L3 = L3;
unsafe fn reset(&self) -> ! {
ArchitectureImpl::halt();
}
unsafe fn send_ipi(&self, target: IpiDeliveryTarget, msg: IpiMessage) -> Result<bool, Error> {
smp::send_ipi(target, msg)?;
Ok(true)
}
unsafe fn start_application_processors(&self) {
// TODO asymmetric systems with different hart types are not yet supported.
// e.g., in JH7110 there're two different types of cores
let dt = self.dt.get();
if let Err(error) = smp::start_secondary_harts(dt) {
log::error!("Couldn't start secondary harts: {error:?}");
}
}
fn register_reset_device(&self, reset: Arc<dyn ResetDevice>) -> Result<(), Error> {
let _ = reset;
Ok(())
}
}
impl Riscv64 {
unsafe fn init_memory_management(&'static self, dtb: PhysicalAddress) -> Result<(), Error> {
// Unmap the lower half
mem::setup_fixed_tables();
// Extract the size of the device tree
let dtb_size = {
let dtb_header = PhysicalRef::<u8>::map_slice(dtb, DeviceTree::MIN_HEADER_SIZE);
DeviceTree::read_totalsize(dtb_header.as_ref()).map_err(|_| Error::InvalidArgument)?
};
log::debug!("DTB: {:#x?}", dtb..dtb.add(dtb_size));
reserve_region(
"dtb",
PhysicalMemoryRegion {
base: dtb,
size: (dtb_size + 0xFFF) & !0xFFF,
},
);
let dtb_slice = PhysicalRef::<u8>::map_slice(dtb, dtb_size);
let dt = DeviceTree::from_raw(dtb_slice.as_ptr() as usize)?;
// Reserve memory regions specified in the DTB
log::info!("Reserved memory:");
for region in dt.reserved_regions() {
log::info!("* {:#x}..{:#x}", region.base, region.base.add(region.size));
reserve_region("mmode-resv", region);
}
// Setup initrd from the dt
let initrd = dt.chosen_initrd();
if let Some((start, end)) = initrd {
let aligned_start = start.page_align_down::<L3>();
let aligned_end = end.page_align_up::<L3>();
let size = aligned_end - aligned_start;
log::info!("Reserve initrd @ {:#x}..{:#x}", aligned_start, aligned_end);
reserve_region(
"initrd",
PhysicalMemoryRegion {
base: aligned_start,
size,
},
);
}
// Initialize the physical memory
phys::init_from_iter(dt.memory_regions(), |_, _, _| Ok(()))?;
self.dt.init(dt);
// Setup initrd
if let Some((initrd_start, initrd_end)) = initrd {
let aligned_start = initrd_start.page_align_down::<L3>();
let aligned_end = initrd_end.page_align_up::<L3>();
let len = initrd_end - initrd_start;
let data = unsafe { PhysicalRef::map_slice(initrd_start, len) };
let initrd = self.initrd.init(data);
INITRD_DATA.init(Initrd {
phys_page_start: aligned_start,
phys_page_len: aligned_end - aligned_start,
data: initrd.as_ref(),
});
}
Ok(())
}
// TODO boot hart ID may not be zero?
unsafe fn init_platform(
&'static self,
hart_id: u32,
queue_index: usize,
is_bsp: bool,
) -> Result<(), Error> {
let per_cpu = PerCpuData {
tmp_t0: 0,
umode_sp: 0,
smode_sp: 0,
bootstrap: is_bsp,
queue_index,
};
Cpu::init_local(Some(hart_id), per_cpu);
assert_eq!(Cpu::local().id(), hart_id);
exception::init_smode_exceptions();
if is_bsp {
BOOT_HART_ID.store(hart_id, Ordering::Release);
call_init_array();
atomic::compiler_fence(Ordering::SeqCst);
libk::debug::init();
let dt = self.dt.get();
let bootargs = dt.chosen_bootargs().unwrap_or("");
config::parse_boot_arguments(bootargs);
// Create device tree sysfs nodes
device_tree::util::create_sysfs_nodes(dt);
let (machine_compatible, machine_name) = Self::machine_name(dt);
unflatten_device_tree(dt);
if let Some(machine_compatible) = machine_compatible {
Self::apply_board_workarounds(machine_compatible);
}
if let Err(error) = Self::setup_clock_timebase(dt) {
log::error!("Could not setup clock timebase from device tree: {error:?}");
}
if let Err(error) = Self::setup_chosen_stdout(dt) {
log::error!("chosen-stdout setup error: {error:?}");
} else {
libk::debug::disable_early_sinks();
}
if let Some(machine) = machine_name {
log::info!("Running on {machine:?}");
MACHINE_NAME.init(machine.into());
}
log::info!("Boot arguments: {bootargs:?}");
log::info!("Initializing riscv64 platform");
device_tree::driver::lazy_init(
|_| (),
|node, error| {
log::error!("{}: {error:?}", node.name().unwrap_or("<unknown>"));
},
);
device_tree::driver::init_irqs(
|_| (),
|node, error| {
log::error!(
"{}: irq init error: {error:?}",
node.name().unwrap_or("<unknown>")
);
},
);
PciBusManager::setup_bus_devices()?;
}
// TODO more granular control over how U-mode pages are accessed from S-mode
SSTATUS.modify(SSTATUS::SUM::SET);
// Setup the timer
SIE.modify(SIE::SSIE::SET + SIE::SEIE::SET);
timer::init_hart(is_bsp);
Ok(())
}
fn apply_board_workarounds(compatible: &str) {
#[allow(clippy::single_match)]
match compatible {
_ => (),
}
}
fn machine_name(dt: &'static DeviceTree) -> (Option<&'static str>, Option<&'static str>) {
(
dt.root().prop_string("compatible"),
dt.root().prop_string("model"),
)
}
fn setup_clock_timebase(dt: &'static DeviceTree) -> Result<(), Error> {
let cpus = dt.root().child("cpus").ok_or(Error::DoesNotExist)?;
let timebase_frequency = cpus
.prop_cell("timebase-frequency", 1)
.ok_or(Error::DoesNotExist)?;
timer::FREQUENCY.store(timebase_frequency, Ordering::Release);
log::info!("System timer frequency: {timebase_frequency}");
Ok(())
}
#[inline(never)]
unsafe fn setup_chosen_stdout(dt: &'static DeviceTree) -> Result<(), Error> {
// Get /chosen.stdout-path to get early debug printing
// TODO honor defined configuration value
let stdout = dt.chosen_stdout();
let stdout_path = stdout.map(|(p, _)| p);
let node = stdout_path.and_then(device_tree::driver::find_node);
if let Some(node) = node {
log::info!("Probe chosen stdout: {:?}", node.name());
node.force_init()?;
}
// No stdout
Ok(())
}
}

View File

@ -0,0 +1,144 @@
use core::{mem::MaybeUninit, sync::atomic::Ordering};
use abi::error::Error;
use device_api::interrupt::{IpiDeliveryTarget, IpiMessage};
use device_tree::{DeviceTree, DeviceTreeNodeExt};
use kernel_arch_riscv64::{mem, registers::SIP, sbi, ArchitectureImpl, CPU_COUNT};
use libk::arch::Cpu;
use libk_mm::{
address::{AsPhysicalAddress, PhysicalAddress},
PageBox,
};
use tock_registers::interfaces::ReadWriteable;
use crate::{arch::riscv64::BOOT_HART_ID, panic};
pub const SECONDARY_STACK_SIZE: usize = 32768;
#[repr(C)]
pub struct SecondaryContext {
// 0x00
pub stack_top: PhysicalAddress,
pub stack: PageBox<[MaybeUninit<u8>]>,
pub hart_id: u64,
}
fn start_secondary_hart(hart_id: u64) -> Result<(), Error> {
extern "C" {
fn __rv64_secondary_entry();
}
let start_addr = __rv64_secondary_entry as usize;
let stack = PageBox::<u8>::new_uninit_slice(SECONDARY_STACK_SIZE)?;
let stack_top = unsafe { PageBox::as_physical_address(&stack).add(SECONDARY_STACK_SIZE) };
let context = PageBox::new(SecondaryContext {
stack,
stack_top,
hart_id,
})?;
let a1 = context.into_physical_raw();
log::info!(
"Start secondary hart {hart_id}: pc={:#x}, a1={:#x}",
start_addr,
a1,
);
let old = CPU_COUNT.load(Ordering::Acquire);
sbi::sbi_hart_start(hart_id, start_addr as u64, a1.into_u64())?;
while CPU_COUNT.load(Ordering::Acquire) == old {
core::hint::spin_loop();
}
Ok(())
}
pub fn start_secondary_harts(dt: &DeviceTree) -> Result<(), Error> {
log::info!("Setting up secondary harts");
let boot_hart_id = BOOT_HART_ID.load(Ordering::Acquire);
let cpus = dt.find_absolute("/cpus").ok_or(Error::DoesNotExist)?;
// Find the boot hart
let boot_hart_isa = cpus
.children()
.find(|child| child.prop_cell_usize("reg") == Some(boot_hart_id as usize))
.and_then(|child| child.prop_string("riscv,isa"))
.ok_or(Error::DoesNotExist)
.inspect_err(|_| log::error!("Could not find the boot hart {boot_hart_id} in the dtb"))?;
// Print a list of HARTs
for cpu in cpus.children() {
let Some(reg) = cpu.prop_cell_usize("reg") else {
continue;
};
let isa = cpu.prop_string("riscv,isa").unwrap_or("???");
let letter = if reg == boot_hart_id as usize {
'@'
} else if isa == boot_hart_isa {
'+'
} else {
'-'
};
log::info!("{letter} {reg}: {isa}");
}
for cpu in cpus.children() {
let Some(reg) = cpu.prop_cell_usize("reg") else {
continue;
};
let isa = cpu.prop_string("riscv,isa").unwrap_or("???");
if reg == boot_hart_id as usize || isa != boot_hart_isa {
continue;
}
if let Err(error) = start_secondary_hart(reg as u64) {
log::error!("hart {reg} start error: {error:?}");
}
}
// Can get rid of lower half now
unsafe { mem::unmap_lower_half() };
Ok(())
}
pub fn send_ipi(target: IpiDeliveryTarget, msg: IpiMessage) -> Result<(), Error> {
let local = Cpu::local();
let mut hart_mask = 0;
match target {
IpiDeliveryTarget::Specific(_) => todo!(),
IpiDeliveryTarget::ThisCpu => todo!(),
IpiDeliveryTarget::OtherCpus => {
ArchitectureImpl::for_each_hart(|hart_id, _, queue| {
if hart_id != local.id() {
hart_mask |= 1 << hart_id;
queue.push(msg);
}
});
}
}
log::debug!("send_ipi({hart_mask:#x}, {msg:?})");
sbi::sbi_send_ipi(hart_mask, 0)
}
pub fn handle_ipi() {
let local = Cpu::local();
if let Some(message) = local.get_ipi() {
match message {
IpiMessage::Panic => panic::panic_secondary(),
IpiMessage::Shutdown => todo!(),
}
} else {
log::warn!("Spurious IPI received");
}
SIP.modify(SIP::SSIP::CLEAR);
}

View File

@ -0,0 +1,54 @@
use core::sync::atomic::{AtomicU64, Ordering};
use abi::time::NANOSECONDS_IN_SECOND;
use kernel_arch::task::Scheduler;
use kernel_arch_riscv64::{intrinsics, registers::SIE, sbi};
use libk::{arch::Cpu, task::runtime, time};
use tock_registers::interfaces::ReadWriteable;
pub static LAST_TICK: AtomicU64 = AtomicU64::new(0);
pub static FREQUENCY: AtomicU64 = AtomicU64::new(0);
// 1kHz
const TICK_RATE: u64 = 1000;
// TODO use stimecmp instead of sbi calls if sstc extension is available
pub fn handle_interrupt() {
let frequency = FREQUENCY.load(Ordering::Acquire);
// TODO merge this code with other system timer implementations
let now = intrinsics::rdtime();
sbi::sbi_set_timer(now.wrapping_add(frequency / TICK_RATE));
if Cpu::local().is_bootstrap() {
let last = LAST_TICK.swap(now, Ordering::Release);
if frequency != 0 {
if let Some(delta) = now.checked_sub(last) {
// Only update time from local CPU
let dt = delta * NANOSECONDS_IN_SECOND / frequency;
time::add_nanoseconds(dt);
}
}
LAST_TICK.store(now, Ordering::Release);
runtime::tick();
}
unsafe { Cpu::local().scheduler().yield_cpu() };
}
pub fn init_hart(is_bsp: bool) {
let frequency = FREQUENCY.load(Ordering::Acquire);
if frequency != 0 {
SIE.modify(SIE::STIE::SET);
let now = intrinsics::rdtime();
if is_bsp {
LAST_TICK.store(now, Ordering::Release);
}
sbi::sbi_set_timer(now.wrapping_add(frequency / TICK_RATE));
} else {
SIE.modify(SIE::STIE::CLEAR);
sbi::sbi_set_timer(u64::MAX);
}
}

View File

@ -0,0 +1,203 @@
// vi:ft=asm:
.section .text
// ra+gp, 7 tN, 12 sN, 8 aN
.set GP_REGS_SIZE, (2 + 7 + 12 + 8) * 8
// U-mode sp, sstatus, sepc, stval, scause, sscratch
.set CTL_REGS_SIZE, 6 * 8
.set TRAP_CONTEXT_SIZE, (GP_REGS_SIZE) + (CTL_REGS_SIZE)
.set REG_UMODE_SP, (GP_REGS_SIZE + 0 * 8)
.set REG_SSTATUS, (GP_REGS_SIZE + 1 * 8)
.set REG_SEPC, (GP_REGS_SIZE + 2 * 8)
.set REG_STVAL, (GP_REGS_SIZE + 3 * 8)
.set REG_SCAUSE, (GP_REGS_SIZE + 4 * 8)
.set REG_SSCRATCH, (GP_REGS_SIZE + 5 * 8)
.macro SAVE_GP_REGS
// Save all general-purpose registers, except:
// * sp (saved elsewhere)
// * tp (saved elsewhere)
sd ra, 0 * 8(sp)
sd gp, 1 * 8(sp)
sd t0, 2 * 8(sp)
sd t1, 3 * 8(sp)
sd t2, 4 * 8(sp)
sd t3, 5 * 8(sp)
sd t4, 6 * 8(sp)
sd t5, 7 * 8(sp)
sd t6, 8 * 8(sp)
sd s0, 9 * 8(sp)
sd s1, 10 * 8(sp)
sd s2, 11 * 8(sp)
sd s3, 12 * 8(sp)
sd s4, 13 * 8(sp)
sd s5, 14 * 8(sp)
sd s6, 15 * 8(sp)
sd s7, 16 * 8(sp)
sd s8, 17 * 8(sp)
sd s9, 18 * 8(sp)
sd s10, 19 * 8(sp)
sd s11, 20 * 8(sp)
sd a0, 21 * 8(sp)
sd a1, 22 * 8(sp)
sd a2, 23 * 8(sp)
sd a3, 24 * 8(sp)
sd a4, 25 * 8(sp)
sd a5, 26 * 8(sp)
sd a6, 27 * 8(sp)
sd a7, 28 * 8(sp)
.endm
.macro LOAD_GP_REGS
ld ra, 0 * 8(sp)
ld gp, 1 * 8(sp)
ld t0, 2 * 8(sp)
ld t1, 3 * 8(sp)
ld t2, 4 * 8(sp)
ld t3, 5 * 8(sp)
ld t4, 6 * 8(sp)
ld t5, 7 * 8(sp)
ld t6, 8 * 8(sp)
ld s0, 9 * 8(sp)
ld s1, 10 * 8(sp)
ld s2, 11 * 8(sp)
ld s3, 12 * 8(sp)
ld s4, 13 * 8(sp)
ld s5, 14 * 8(sp)
ld s6, 15 * 8(sp)
ld s7, 16 * 8(sp)
ld s8, 17 * 8(sp)
ld s9, 18 * 8(sp)
ld s10, 19 * 8(sp)
ld s11, 20 * 8(sp)
ld a0, 21 * 8(sp)
ld a1, 22 * 8(sp)
ld a2, 23 * 8(sp)
ld a3, 24 * 8(sp)
ld a4, 25 * 8(sp)
ld a5, 26 * 8(sp)
ld a6, 27 * 8(sp)
ld a7, 28 * 8(sp)
.endm
.macro SMODE_TRAP n, handler
.type __rv64_smode_trap_\n, @function
__rv64_smode_trap_\n:
// If coming from userspace, sscratch = kernel-mode tp
// If coming from kernelspace, sscratch = 0
csrrw tp, sscratch, tp
bnez tp, 1f
// Coming from S-mode
// tp = 0, sscratch contains kernel tp
csrr tp, sscratch
sd sp, 16(tp) // Set proper S-mode sp
1:
sd sp, 8(tp) // Store U-mode sp
ld sp, 16(tp) // Load S-mode sp
// Store pre-trap context
addi sp, sp, -(TRAP_CONTEXT_SIZE)
SAVE_GP_REGS
// Save special registers
ld t0, 8(tp)
csrr t1, sstatus
csrr t2, sepc
csrr t3, stval
csrr t4, scause
csrr t5, sscratch
sd t0, REG_UMODE_SP (sp)
sd t1, REG_SSTATUS (sp)
sd t2, REG_SEPC (sp)
sd t3, REG_STVAL (sp)
sd t4, REG_SCAUSE (sp)
sd t5, REG_SSCRATCH (sp)
// Reset sscratch to zero to make sure a S-mode -> S-mode nested exception
// happens properly
csrw sscratch, zero
mv a0, sp
call \handler
// Return from exception
ld t0, REG_SSTATUS (sp)
andi t0, t0, (1 << 8)
bnez t0, 2f
// Return to U-mode
// Restore SSCRATCH to a proper value
csrw sscratch, tp
2:
// Return to S-mode
// Restore special registers
ld t0, REG_SSTATUS (sp)
ld t1, REG_SEPC (sp)
csrw sstatus, t0
csrw sepc, t1
// Restore general-purpose registers
LOAD_GP_REGS
ld tp, REG_SSCRATCH (sp)
ld sp, REG_UMODE_SP (sp)
sret
.size __rv64_smode_trap_\n, . - __rv64_smode_trap_\n
.endm
.option push
.option norvc
.global __rv64_smode_trap_vectors
.type __rv64_smode_trap_vectors, @function
.p2align 4
__rv64_smode_trap_vectors:
j __rv64_smode_trap_0
j __rv64_smode_trap_1
j __rv64_smode_trap_2
j __rv64_smode_trap_3
j __rv64_smode_trap_4
j __rv64_smode_trap_5
j __rv64_smode_trap_6
j __rv64_smode_trap_7
j __rv64_smode_trap_8
j __rv64_smode_trap_9
j __rv64_smode_trap_10
j __rv64_smode_trap_11
j __rv64_smode_trap_12
j __rv64_smode_trap_13
j __rv64_smode_trap_14
j __rv64_smode_trap_15
.size __rv64_smode_trap_vectors, . - __rv64_smode_trap_vectors
SMODE_TRAP 0, {smode_general_handler}
SMODE_TRAP 1, {smode_interrupt_handler}
SMODE_TRAP 2, {smode_interrupt_handler}
SMODE_TRAP 3, {smode_interrupt_handler}
SMODE_TRAP 4, {smode_interrupt_handler}
SMODE_TRAP 5, {smode_interrupt_handler}
SMODE_TRAP 6, {smode_interrupt_handler}
SMODE_TRAP 7, {smode_interrupt_handler}
SMODE_TRAP 8, {smode_interrupt_handler}
SMODE_TRAP 9, {smode_interrupt_handler}
SMODE_TRAP 10, {smode_interrupt_handler}
SMODE_TRAP 11, {smode_interrupt_handler}
SMODE_TRAP 12, {smode_interrupt_handler}
SMODE_TRAP 13, {smode_interrupt_handler}
SMODE_TRAP 14, {smode_interrupt_handler}
SMODE_TRAP 15, {smode_interrupt_handler}
.option pop

View File

@ -1,6 +1,6 @@
//! Bus devices
#[cfg(any(target_arch = "aarch64", rust_analyzer))]
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64", rust_analyzer))]
pub mod pci_host_ecam_generic;
#[cfg(any(target_arch = "aarch64", rust_analyzer))]
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64", rust_analyzer))]
pub mod simple_bus;

View File

@ -33,6 +33,10 @@ impl Device for SimpleBus {
impl Bus for SimpleBus {
fn map_range(&self, bus_range: Range<u64>) -> Option<Range<u64>> {
if self.ranges.is_empty() {
return Some(bus_range);
}
let start = bus_range.start;
let end = bus_range.end;
@ -66,6 +70,7 @@ device_tree_driver! {
let cell_sizes = (child_address_cells, parent_address_cells, child_size_cells);
let mut items = Vec::new();
for (child_address, parent_address, length) in ranges.iter_cells(cell_sizes) {
let child_range = child_address..child_address + length;
items.push((child_range, parent_address));

View File

@ -0,0 +1,4 @@
//! Interrupt controller drivers
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
pub mod riscv_plic;

View File

@ -0,0 +1,334 @@
//! RISC-V PLIC driver
use core::sync::atomic::Ordering;
use abi::{error::Error, primitive_enum};
use alloc::{sync::Arc, vec::Vec};
use device_api::{
device::Device,
interrupt::{
ExternalInterruptController, FixedInterruptTable, FullIrq, InterruptHandler,
InterruptTable, Irq, IrqOptions,
},
};
use device_tree::{
driver::{
device_tree_driver, lookup_phandle, DeviceTreeInterruptController, Node, ProbeContext,
},
DeviceTreePropertyRead, TProp,
};
use libk::{arch::Cpu, device::register_external_interrupt_controller};
use libk_mm::{address::PhysicalAddress, device::DeviceMemoryIo};
use libk_util::{sync::spin_rwlock::IrqSafeRwLock, OneTimeInit};
use tock_registers::{
interfaces::{Readable, Writeable},
register_structs,
registers::{ReadOnly, ReadWrite},
};
use crate::arch::riscv64::BOOT_HART_ID;
const MAX_IRQS: usize = 1024;
const ENABLE_BASE: usize = 0x2000;
const ENABLE_STRIDE: usize = 0x80;
const CONTROL_BASE: usize = 0x200000;
const CONTROL_STRIDE: usize = 0x1000;
primitive_enum! {
enum ContextMode: u32 {
SoftU = 0,
SoftS = 1,
SoftH = 2,
SoftM = 3,
TimerU = 4,
TimerS = 5,
TimerH = 6,
TimerM = 7,
ExternalU = 8,
ExternalS = 9,
ExternalH = 10,
ExternalM = 11,
}
}
register_structs! {
#[allow(non_snake_case)]
CommonRegs {
(0x0000 => _0),
(0x0004 => PRIORITY: [ReadWrite<u32>; 1023]),
(0x1000 => PENDING: [ReadOnly<u32>; 32]),
(0x1080 => @END),
}
}
register_structs! {
#[allow(non_snake_case)]
ContextEnableRegs {
(0x0000 => ENABLE: [ReadWrite<u32>; 32]),
(0x0080 => @END),
}
}
register_structs! {
#[allow(non_snake_case)]
ContextControlRegs {
(0x0000 => THRESHOLD: ReadWrite<u32>),
(0x0004 => CLAIM: ReadWrite<u32>),
(0x0008 => @END),
}
}
struct Context {
enable: IrqSafeRwLock<DeviceMemoryIo<'static, ContextEnableRegs>>,
control: IrqSafeRwLock<DeviceMemoryIo<'static, ContextControlRegs>>,
// TODO scale the table depending on effective MAX_IRQS value
table: IrqSafeRwLock<FixedInterruptTable<64>>,
}
struct Inner {
#[allow(unused)]
common: IrqSafeRwLock<DeviceMemoryIo<'static, CommonRegs>>,
}
struct HartContext {
hart: u32,
index: usize,
context: OneTimeInit<Context>,
}
/// RISC-V Platform-Level Interrupt Controller (PLIC) device
pub struct Plic {
base: PhysicalAddress,
max_irqs: usize,
// hart id -> context map
context_map: Vec<HartContext>,
inner: OneTimeInit<Inner>,
}
impl Plic {
fn hart_context(&self, hart: u32) -> Option<&HartContext> {
self.context_map.iter().find(|c| c.hart == hart)
}
fn validate_irq(&self, irq: Irq) -> Result<u32, Error> {
let Irq::External(irq) = irq else {
log::error!("plic: irq {irq:?} is not an external interrupt");
return Err(Error::InvalidArgument);
};
if irq == 0 {
log::error!("plic: irq cannot be zero");
return Err(Error::InvalidArgument);
}
if irq as usize >= self.max_irqs {
log::error!("plic: irq ({}) >= max_irqs ({})", irq, self.max_irqs);
return Err(Error::InvalidArgument);
}
Ok(irq)
}
}
impl ContextEnableRegs {
fn enable_irq(&self, irq: u32) {
let reg = &self.ENABLE[irq as usize / 32];
reg.set(reg.get() | (1 << (irq % 32)));
}
}
impl ExternalInterruptController for Plic {
fn enable_irq(&self, irq: Irq) -> Result<(), Error> {
// TODO balance IRQs between harts?
let irq = self.validate_irq(irq)?;
let bsp_hart_id = BOOT_HART_ID.load(Ordering::Acquire);
let context = self
.hart_context(bsp_hart_id)
.ok_or(Error::InvalidArgument)
.inspect_err(|_| log::error!("plic: no context for hart {bsp_hart_id}"))?
.context
.get();
let enable = context.enable.write();
enable.enable_irq(irq);
Ok(())
}
fn register_irq(
&self,
irq: Irq,
_options: IrqOptions,
handler: Arc<dyn InterruptHandler>,
) -> Result<(), Error> {
let bsp_hart_id = BOOT_HART_ID.load(Ordering::Acquire);
let irq = self.validate_irq(irq)?;
let context = self
.hart_context(bsp_hart_id)
.ok_or(Error::InvalidArgument)
.inspect_err(|_| log::error!("plic: no context for hart {bsp_hart_id}"))?
.context
.get();
let mut table = context.table.write();
log::info!(
"Bind irq #{irq} -> hart {bsp_hart_id}, {:?}",
handler.display_name()
);
table.insert(irq as usize, handler)?;
// TODO
Ok(())
}
fn handle_pending_irqs(&self) {
let hart_id = Cpu::local().id();
let Some(context) = self.hart_context(hart_id) else {
log::warn!("plic: irq on hart without a context: {hart_id}");
return;
};
let context = context.context.get();
let control = context.control.write();
let table = context.table.read();
loop {
let irq = control.CLAIM.get();
if irq == 0 {
break;
}
if let Some(handler) = table.handler(irq as usize) {
handler.clone().handle_irq(None);
} else {
log::warn!("plic: no handler for IRQ #{irq}");
}
// Done servicing
control.CLAIM.set(irq);
}
}
}
impl Device for Plic {
unsafe fn init(self: Arc<Self>) -> Result<(), Error> {
log::info!("Initialize RISC-V PLIC");
let common = DeviceMemoryIo::<CommonRegs>::map(self.base, Default::default())?;
for i in 0..self.max_irqs - 1 {
common.PRIORITY[i].set(3);
}
for context in self.context_map.iter() {
let enable_offset = ENABLE_BASE + context.index * ENABLE_STRIDE;
let control_offset = CONTROL_BASE + context.index * CONTROL_STRIDE;
log::info!(
"* HART {}: context {}, enable={:#x}, control={:#x}",
context.hart,
context.index,
enable_offset,
control_offset
);
let enable = DeviceMemoryIo::<ContextEnableRegs>::map(
self.base.add(enable_offset),
Default::default(),
)?;
let control = DeviceMemoryIo::<ContextControlRegs>::map(
self.base.add(control_offset),
Default::default(),
)?;
for i in 0..self.max_irqs.div_ceil(32) {
enable.ENABLE[i].set(0);
}
control.THRESHOLD.set(0);
context.context.init(Context {
enable: IrqSafeRwLock::new(enable),
control: IrqSafeRwLock::new(control),
table: IrqSafeRwLock::new(FixedInterruptTable::new()),
});
}
self.inner.init(Inner {
common: IrqSafeRwLock::new(common),
});
register_external_interrupt_controller(self);
Ok(())
}
fn display_name(&self) -> &str {
"RISC-V PLIC"
}
}
impl DeviceTreeInterruptController for Plic {
fn map_interrupt(&self, property: &TProp, offset: usize) -> Option<FullIrq> {
let num = property.read_cell(offset, 1)?;
Some(FullIrq {
irq: Irq::External(num as _),
options: IrqOptions::default(),
})
}
}
fn map_context_to_hart(target: u32) -> Option<u32> {
let hart_intc = lookup_phandle(target, false)?;
let parent = hart_intc.parent()?;
let reg = parent.prop_usize("reg")?;
Some(reg as u32)
}
device_tree_driver! {
compatible: ["starfive,jh7110-plic", "sifive,plic-1.0.0", "riscv,plic0"],
driver: {
fn probe(&self, node: &Arc<Node>, context: &ProbeContext) -> Option<Arc<dyn Device>> {
let base = node.map_base(context, 0)?;
let ndev = node.prop_usize("riscv,ndev")?;
let iext = node.property("interrupts-extended")?;
let max_irqs = MAX_IRQS.min(ndev);
// Parse the context -> hart mapping, only select S-mode external interrupts
let mut context_map = Vec::new();
let mut mapped_harts = 0u64;
for (context, (target, mode)) in iext.iter_cells((1, 1)).enumerate() {
let (Ok(mode), Some(hart_id)) = (
ContextMode::try_from(mode as u32),
map_context_to_hart(target as u32)
) else {
continue;
};
if mode != ContextMode::ExternalS {
continue;
}
// Don't map the same hart to two contexts
if mapped_harts & (1 << hart_id) == 0 {
mapped_harts |= 1 << hart_id;
context_map.push(HartContext {
hart: hart_id,
index: context,
context: OneTimeInit::new()
});
}
}
if context_map.is_empty() {
log::warn!("{:?}: could not map any contexts to harts", node.name());
return None;
}
let intc = Arc::new(Plic {
base,
max_irqs,
context_map,
inner: OneTimeInit::new(),
});
node.make_interrupt_controller(intc.clone());
Some(intc)
}
}
}

View File

@ -6,6 +6,7 @@ use libk_util::OneTimeInit;
pub mod bus;
pub mod clock;
pub mod display;
pub mod interrupt;
pub mod power;
pub mod serial;
// pub mod timer;

View File

@ -5,3 +5,8 @@ pub mod bcm2835_aux_uart;
#[cfg(any(target_arch = "aarch64", rust_analyzer))]
pub mod pl011;
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
pub mod ns16550a;
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
pub mod snps_dw_apb_uart;

View File

@ -0,0 +1,205 @@
//! 16550-style UART device driver
use abi::{error::Error, io::TerminalOptions};
use alloc::sync::Arc;
use device_api::{
device::Device,
interrupt::{FullIrq, InterruptHandler},
};
use device_tree::driver::{device_tree_driver, Node, ProbeContext};
use libk::{
debug::DebugSink,
device::{external_interrupt_controller, manager::DEVICE_REGISTRY},
vfs::{Terminal, TerminalInput, TerminalOutput},
};
use libk_mm::{address::PhysicalAddress, device::DeviceMemoryIo};
use libk_util::{sync::IrqSafeSpinlock, OneTimeInit};
use tock_registers::{
interfaces::{ReadWriteable, Readable, Writeable},
register_bitfields, register_structs,
registers::{ReadOnly, ReadWrite, WriteOnly},
};
register_bitfields!(
u8,
IER [
/// Received data ready
RDR OFFSET(0) NUMBITS(1) [],
/// Trasmitter holding register empty
THRE OFFSET(1) NUMBITS(1) [],
/// Receiver line status
RLS OFFSET(2) NUMBITS(1) [],
/// Modem status
MS OFFSET(3) NUMBITS(1) [],
],
LSR [
/// Data ready indicator
DR OFFSET(0) NUMBITS(1) [],
/// Transmitter FIFO empty
TFE OFFSET(5) NUMBITS(1) [],
],
LCR [
BITS OFFSET(0) NUMBITS(2) [
Bits8 = 3
],
STOPBITS OFFSET(2) NUMBITS(1) [],
PARITY OFFSET(3) NUMBITS(1) [],
PARITY_EVEN OFFSET(4) NUMBITS(1) [],
PARITY_STICK OFFSET(5) NUMBITS(1) [],
BREAK OFFSET(6) NUMBITS(1) [],
DLAB OFFSET(7) NUMBITS(1) [],
]
);
register_structs! {
#[allow(non_snake_case)]
Regs {
// Read: receive buffer, write: transmit buffer
(0x00 => DR: ReadWrite<u8>),
(0x01 => IER: ReadWrite<u8, IER::Register>),
// Read: interrupt idenditication, write: FIFO control
(0x02 => FCR: ReadWrite<u8>),
(0x03 => LCR: ReadWrite<u8, LCR::Register>),
(0x04 => MCR: WriteOnly<u8>),
(0x05 => LSR: ReadOnly<u8, LSR::Register>),
(0x06 => MSR: ReadOnly<u8>),
(0x07 => _0),
(0x08 => @END),
}
}
struct Io {
regs: DeviceMemoryIo<'static, Regs>,
}
struct Inner {
io: IrqSafeSpinlock<Io>,
}
/// ns16550a-style UART driver
pub struct Ns16550a {
inner: OneTimeInit<Arc<Terminal<Inner>>>,
base: PhysicalAddress,
irq: FullIrq,
}
impl Io {
fn init(&mut self) {
self.regs.LCR.write(
LCR::BITS::Bits8 + LCR::BREAK::CLEAR + LCR::STOPBITS::CLEAR + LCR::PARITY::CLEAR,
);
self.regs.IER.set(0);
}
fn send(&mut self, byte: u8) {
while self.regs.LSR.matches_all(LSR::TFE::CLEAR) {
core::hint::spin_loop();
}
self.regs.DR.set(byte);
}
fn handle_irq(&self) -> Option<u8> {
let status = self.regs.FCR.get() & 0xF;
if status == 0b1100 || status == 0b0100 {
Some(self.regs.DR.get())
} else {
None
}
}
}
impl Device for Ns16550a {
unsafe fn init(self: Arc<Self>) -> Result<(), Error> {
log::info!("Init ns16550a @ {:#x}", self.base);
let mut io = Io {
regs: DeviceMemoryIo::map(self.base, Default::default())?,
};
io.init();
let input = TerminalInput::with_capacity(64)?;
let output = Inner {
io: IrqSafeSpinlock::new(io),
};
let terminal = self.inner.init(Arc::new(Terminal::from_parts(
TerminalOptions::const_default(),
input,
output,
)));
DEVICE_REGISTRY
.serial_terminal
.register(terminal.clone(), Some(self.clone()))
.ok();
Ok(())
}
unsafe fn init_irq(self: Arc<Self>) -> Result<(), Error> {
let intc = external_interrupt_controller()?;
intc.register_irq(self.irq.irq, self.irq.options, self.clone())?;
intc.enable_irq(self.irq.irq)?;
let io = self.inner.get().output().io.lock();
io.regs.IER.modify(IER::RDR::SET);
Ok(())
}
fn display_name(&self) -> &str {
"ns16550a UART"
}
}
impl InterruptHandler for Ns16550a {
fn handle_irq(self: Arc<Self>, _vector: Option<usize>) -> bool {
let inner = self.inner.get();
let output = inner.output();
let byte = output.io.lock().handle_irq();
if let Some(byte) = byte {
inner.write_to_input(byte);
true
} else {
false
}
}
}
impl TerminalOutput for Inner {
fn write(&self, byte: u8) -> Result<(), Error> {
self.io.lock().send(byte);
Ok(())
}
fn write_multiple(&self, bytes: &[u8]) -> Result<usize, Error> {
let mut lock = self.io.lock();
for &byte in bytes {
lock.send(byte);
}
Ok(bytes.len())
}
}
impl DebugSink for Ns16550a {
fn putc(&self, c: u8) -> Result<(), Error> {
self.inner.get().putc_to_output(c)
}
fn supports_control_sequences(&self) -> bool {
true
}
}
device_tree_driver!(
compatible: ["ns16550a"],
driver: {
fn probe(&self, node: &Arc<Node>, context: &ProbeContext) -> Option<Arc<dyn Device>> {
let base = node.map_base(context, 0)?;
log::debug!("ns16550a base = {base:#x}");
let irq = node.interrupt(0)?;
Some(Arc::new(Ns16550a {
base,
irq,
inner: OneTimeInit::new(),
}))
}
}
);

View File

@ -0,0 +1,230 @@
//! Synopsys DesignWare 8250 driver
use abi::{error::Error, io::TerminalOptions};
use alloc::sync::Arc;
use device_api::{
device::Device,
interrupt::{FullIrq, InterruptHandler},
};
use device_tree::driver::{device_tree_driver, Node, ProbeContext};
use libk::{
debug::DebugSink,
device::{external_interrupt_controller, manager::DEVICE_REGISTRY},
vfs::{Terminal, TerminalInput, TerminalOutput},
};
use libk_mm::{address::PhysicalAddress, device::DeviceMemoryIo};
use libk_util::{sync::IrqSafeSpinlock, OneTimeInit};
use tock_registers::{
interfaces::{ReadWriteable, Readable, Writeable},
register_bitfields, register_structs,
registers::{ReadOnly, ReadWrite, WriteOnly},
};
register_bitfields! {
u32,
IER [
PTIME OFFSET(7) NUMBITS(1) [],
EDSSI OFFSET(3) NUMBITS(1) [],
ELSI OFFSET(2) NUMBITS(1) [],
// Transmit buffer available
ETBEI OFFSET(1) NUMBITS(1) [],
// Receive data available
ERBFI OFFSET(0) NUMBITS(1) [],
],
LSR [
// Data ready bit
DR OFFSET(0) NUMBITS(1) [],
// Transmitter holding register empty
THRE OFFSET(5) NUMBITS(1) [],
]
}
register_structs! {
#[allow(non_snake_case)]
Regs {
// DLAB=0, Write: transmitter holding register/Read: receiver buffer register
// DLAB=1, Read/Write: divisor latch low
(0x000 => DR: ReadWrite<u32>),
// DLAB=0: Interrupt enable register
// DLAB=1: Divisor latch high
(0x004 => IER: ReadWrite<u32, IER::Register>),
// Read: interrupt identification register/Write: frame control register
(0x008 => IIR: ReadWrite<u32>),
// Line control register
(0x00C => LCR: ReadWrite<u32>),
// Modem control register
(0x010 => MCR: ReadWrite<u32>),
// Line status register
(0x014 => LSR: ReadOnly<u32, LSR::Register>),
// Modem status register
(0x018 => MSR: ReadOnly<u32>),
// Scratchpad
(0x01C => SCR: ReadWrite<u32>),
// Low-power divisor latch low
(0x020 => LPDLL: ReadWrite<u32>),
// Low-power divisor latch high
(0x024 => LPDLH: ReadWrite<u32>),
(0x028 => _0),
// Shadow receive/transmit buffer
(0x030 => SDR: [ReadWrite<u32>; 16]),
(0x070 => FAR: ReadWrite<u32>),
(0x074 => TFR: ReadOnly<u32>),
(0x078 => RFW: WriteOnly<u32>),
(0x07C => USR: ReadOnly<u32>),
(0x080 => TFL: ReadOnly<u32>),
(0x084 => RFL: ReadOnly<u32>),
(0x088 => SRR: WriteOnly<u32>),
(0x08C => SRTS: ReadWrite<u32>),
(0x090 => SBCR: ReadWrite<u32>),
(0x094 => SDMAM: ReadWrite<u32>),
(0x098 => SFE: ReadWrite<u32>),
(0x09C => SRT: ReadWrite<u32>),
(0x0A0 => STET: ReadWrite<u32>),
(0x0A4 => HTX: ReadWrite<u32>),
(0x0A8 => DMASA: WriteOnly<u32>),
(0x0AC => _1),
(0x0F4 => CPR: ReadOnly<u32>),
(0x0F8 => UCV: ReadOnly<u32>),
(0x0FC => CTR: ReadOnly<u32>),
(0x100 => @END),
}
}
struct Io {
regs: DeviceMemoryIo<'static, Regs>,
}
struct Inner {
io: IrqSafeSpinlock<Io>,
}
/// Synopsys DesignWare 8250 UART
pub struct DwUart {
base: PhysicalAddress,
irq: FullIrq,
inner: OneTimeInit<Arc<Terminal<Inner>>>,
}
impl Io {
fn send(&mut self, byte: u8) {
// TODO
if byte == b'\n' {
self.send(b'\r');
}
while !self.regs.LSR.matches_all(LSR::THRE::SET) {
core::hint::spin_loop();
}
self.regs.DR.set(byte as u32);
}
fn init(&mut self) {
self.regs.IER.set(0);
}
fn handle_irq(&mut self) -> Option<u8> {
let status = self.regs.IIR.get();
if status & 0xF == 4 {
Some(self.regs.DR.get() as u8)
} else {
None
}
}
}
impl InterruptHandler for DwUart {
fn handle_irq(self: Arc<Self>, _vector: Option<usize>) -> bool {
let inner = self.inner.get();
let output = inner.output();
let byte = output.io.lock().handle_irq();
if let Some(byte) = byte {
inner.write_to_input(byte);
true
} else {
false
}
}
}
impl Device for DwUart {
unsafe fn init(self: Arc<Self>) -> Result<(), Error> {
let regs = DeviceMemoryIo::map(self.base, Default::default())?;
let mut io = Io { regs };
io.init();
let input = TerminalInput::with_capacity(64)?;
let output = Inner {
io: IrqSafeSpinlock::new(io),
};
let terminal = self.inner.init(Arc::new(Terminal::from_parts(
TerminalOptions::const_default(),
input,
output,
)));
DEVICE_REGISTRY
.serial_terminal
.register(terminal.clone(), Some(self.clone()))
.ok();
Ok(())
}
unsafe fn init_irq(self: Arc<Self>) -> Result<(), Error> {
let intc = external_interrupt_controller()?;
intc.register_irq(self.irq.irq, Default::default(), self.clone())?;
intc.enable_irq(self.irq.irq)?;
let output = self.inner.get().output();
let io = output.io.lock();
io.regs.IER.modify(IER::ERBFI::SET);
Ok(())
}
fn display_name(&self) -> &str {
"Synopsys DesignWare 8250 UART"
}
}
impl DebugSink for DwUart {
fn putc(&self, c: u8) -> Result<(), Error> {
self.inner.get().putc_to_output(c)
}
fn supports_control_sequences(&self) -> bool {
true
}
}
impl TerminalOutput for Inner {
fn write(&self, byte: u8) -> Result<(), Error> {
self.io.lock().send(byte);
Ok(())
}
fn write_multiple(&self, bytes: &[u8]) -> Result<usize, Error> {
let mut lock = self.io.lock();
for &byte in bytes {
lock.send(byte);
}
Ok(bytes.len())
}
}
device_tree_driver! {
compatible: ["snps,dw-apb-uart"],
driver: {
fn probe(&self, node: &Arc<Node>, context: &ProbeContext) -> Option<Arc<dyn Device>> {
let base = node.map_base(context, 0)?;
let irq = node.interrupt(0)?;
Some(Arc::new(DwUart {
base,
irq,
inner: OneTimeInit::new()
}))
}
}
}

View File

@ -58,7 +58,7 @@ pub fn kinit() -> Result<(), Error> {
// TODO move this to userspace so it doesn't block the init process, maybe lazy-load on first
// attempt to load a module?
#[cfg(not(target_arch = "aarch64"))]
#[cfg(all(not(target_arch = "aarch64"), not(target_arch = "riscv64")))]
{
use libk::module::load_kernel_symbol_table;

View File

@ -29,6 +29,7 @@
clippy::match_ref_pats,
clippy::match_single_binding,
clippy::missing_transmute_annotations,
clippy::modulo_one,
async_fn_in_trait
)]
#![deny(missing_docs)]

View File

@ -48,7 +48,7 @@ fn dump_panic_info(cpu: &LocalCpu, pi: &core::panic::PanicInfo) {
debug::panic_log!(sink, "Kernel panic");
if let Some(location) = pi.location() {
debug::panic_log!(sink, " ar {}:{}:\n", location.file(), location.line());
debug::panic_log!(sink, " at {}:{}:\n", location.file(), location.line());
} else {
debug::panic_log!(sink, ":\n");
}

View File

@ -53,6 +53,9 @@ pub(crate) fn map_memory(
space.allocate(None, len, backing, attrs)
})
.inspect_err(|error| {
log::warn!("map_memory({len}) failed: {error:?}");
})
}
pub(crate) fn unmap_memory(address: usize, len: usize) -> Result<(), Error> {

View File

@ -65,16 +65,15 @@ pub unsafe fn enter() -> ! {
static AP_CAN_ENTER: SpinFence = SpinFence::new();
let mut cpu = Cpu::local();
let id = cpu.id();
if id != 0 {
if !cpu.is_bootstrap() {
// Wait until BSP allows us to enter
AP_CAN_ENTER.wait_one();
} else {
AP_CAN_ENTER.signal();
}
let queue = CpuQueue::for_cpu(cpu.id() as usize);
let queue = CpuQueue::for_cpu(cpu.queue_index());
cpu.set_scheduler(queue);
queue.enter()

View File

@ -52,6 +52,10 @@ pub const fn arch_str() -> &'static str {
{
"x86_64"
}
#[cfg(target_arch = "riscv64")]
{
"riscv64"
}
#[cfg(target_arch = "x86")]
{
"i686"

View File

@ -9,16 +9,18 @@ use std::{
use clap::Parser;
use elf::{
abi::{EM_386, EM_AARCH64, EM_X86_64, PT_LOAD},
abi::{EM_386, EM_AARCH64, EM_RISCV, EM_X86_64, PT_LOAD},
endian::AnyEndian,
ElfStream,
};
use memtables::any::AnyTables;
use riscv64::Riscv64Builder;
use thiserror::Error;
use crate::{aarch64::AArch64Builder, x86_64::X8664Builder};
mod aarch64;
mod riscv64;
mod x86_64;
#[derive(Error, Debug)]
@ -115,6 +117,7 @@ fn find_tables<F: Read + Seek>(elf: &mut ElfStream<AnyEndian, F>) -> Result<(u64
let section_size = match elf.ehdr.e_machine {
EM_AARCH64 => size_of::<memtables::aarch64::FixedTables>(),
EM_X86_64 => size_of::<memtables::x86_64::FixedTables>(),
EM_RISCV => size_of::<memtables::riscv64::FixedTables>(),
_ => unimplemented!(),
};
let (shdrs, strtab) = elf.section_headers_with_strtab()?;
@ -212,31 +215,18 @@ fn build_tables<F: Read + Seek>(
println!("Kernel image range: {:#x?}", kernel_start..kernel_end);
println!("KERNEL_VIRT_OFFSET = {:#x}", kernel_virt_offset);
let gen_data = GenData {
kernel_virt_offset,
kernel_start,
kernel_end,
table_offset,
table_physical_address,
};
let (tables, table_offset, symbol_table) = match elf.ehdr.e_machine {
EM_X86_64 => X8664Builder::new(
elf,
GenData {
kernel_virt_offset,
kernel_start,
kernel_end,
table_offset,
table_physical_address,
},
)?
.build()
.map(into_any),
EM_AARCH64 => AArch64Builder::new(
elf,
GenData {
kernel_virt_offset,
kernel_start,
kernel_end,
table_offset,
table_physical_address,
},
)?
.build()
.map(into_any),
EM_X86_64 => X8664Builder::new(elf, gen_data)?.build().map(into_any),
EM_AARCH64 => AArch64Builder::new(elf, gen_data)?.build().map(into_any),
EM_RISCV => Riscv64Builder::new(elf, gen_data)?.build().map(into_any),
_ => todo!(),
}?;

View File

@ -0,0 +1,172 @@
use std::{
collections::HashMap,
io::{Read, Seek},
mem::offset_of,
};
use elf::{
abi::{PF_W, PF_X, PT_LOAD},
endian::AnyEndian,
ElfStream,
};
use memtables::riscv64::{FixedTables, PageAttributes, KERNEL_L3_COUNT};
use crate::{extract_symbols, GenData, GenError};
pub struct Riscv64Builder<F: Read + Seek> {
elf: ElfStream<AnyEndian, F>,
data: GenData,
tables: FixedTables,
l1i_lower: usize,
l1i: usize,
l2i_start: usize,
l2i_end: usize,
}
const L1_SHIFT: usize = 30;
const L1_PAGE_SIZE: usize = 1 << L1_SHIFT;
const L2_SHIFT: usize = 21;
const L2_PAGE_SIZE: usize = 1 << L2_SHIFT;
const L3_SHIFT: usize = 12;
const L3_PAGE_SIZE: usize = 1 << L3_SHIFT;
fn segment_attributes(f: u32) -> PageAttributes {
let mut attrs = PageAttributes::R | PageAttributes::A | PageAttributes::D;
if f & PF_W != 0 {
attrs |= PageAttributes::W;
}
if f & PF_X != 0 {
attrs |= PageAttributes::X;
}
attrs
}
fn shift_pfn(physical: u64) -> u64 {
physical >> 2
}
impl<F: Read + Seek> Riscv64Builder<F> {
pub fn new(elf: ElfStream<AnyEndian, F>, data: GenData) -> Result<Self, GenError> {
assert_eq!(data.kernel_virt_offset & (L1_PAGE_SIZE as u64 - 1), 0);
let l1i = (data.kernel_start >> L1_SHIFT) as usize & 0x1FF;
let l1i_lower =
((data.kernel_start - data.kernel_virt_offset) >> L1_SHIFT) as usize & 0x1FF;
let end_l1i = ((data.kernel_end + L1_PAGE_SIZE as u64) >> L1_SHIFT) as usize & 0x1FF;
if end_l1i < l1i || end_l1i - l1i > 1 {
// TODO return error
panic!("Kernel image crosses a 1GiB boundary");
}
let l2i_start = (data.kernel_start >> L2_SHIFT) as usize & 0x1FF;
let l2i_end = ((data.kernel_end + L2_PAGE_SIZE as u64 - 1) >> L2_SHIFT) as usize & 0x1FF;
assert!(l2i_end >= l2i_start);
if l2i_end - l2i_start > KERNEL_L3_COUNT {
panic!();
}
Ok(Self {
elf,
data,
tables: FixedTables::zeroed(),
l1i_lower,
l1i,
l2i_start,
l2i_end,
})
}
pub fn build(mut self) -> Result<(FixedTables, u64, HashMap<String, usize>), GenError> {
assert_eq!(offset_of!(FixedTables, l1), 0);
let l2_physical_address =
self.data.table_physical_address + offset_of!(FixedTables, kernel_l2) as u64;
// L1 -> L2
self.tables.l1.data[self.l1i_lower] =
shift_pfn(l2_physical_address) | PageAttributes::V.bits();
self.tables.l1.data[self.l1i] = shift_pfn(l2_physical_address) | PageAttributes::V.bits();
// L2 -> L3s
for l2i in self.l2i_start..self.l2i_end {
let l3_table_index = l2i - self.l2i_start;
let l3_physical_address = self.data.table_physical_address
+ offset_of!(FixedTables, kernel_l3s) as u64
+ (l3_table_index * 0x1000) as u64;
self.tables.kernel_l2.data[l2i] =
shift_pfn(l3_physical_address) | PageAttributes::V.bits();
}
let symbol_table = extract_symbols(&mut self.elf)?;
for (i, segment) in self.elf.segments().into_iter().enumerate() {
if segment.p_type != PT_LOAD
|| segment.p_vaddr != segment.p_paddr + self.data.kernel_virt_offset
{
continue;
}
let aligned_virt_start = segment.p_vaddr & !(L3_PAGE_SIZE as u64 - 1);
let aligned_virt_end = (segment.p_vaddr + segment.p_memsz + L3_PAGE_SIZE as u64 - 1)
& !(L3_PAGE_SIZE as u64 - 1);
let aligned_phys_start = segment.p_paddr & !(L3_PAGE_SIZE as u64 - 1);
let count = (aligned_virt_end - aligned_virt_start) / 0x1000;
let attrs = segment_attributes(segment.p_flags);
println!(
"{}: {:#x?} -> {:#x} {}",
i,
aligned_virt_start..aligned_virt_end,
aligned_phys_start,
attrs
);
Self::map_segment(
self.l2i_start,
&mut self.tables,
aligned_virt_start,
aligned_phys_start,
count as usize,
attrs,
)?;
}
Ok((self.tables, self.data.table_offset, symbol_table))
}
fn map_segment(
start_l2i: usize,
tables: &mut FixedTables,
vaddr_start: u64,
paddr_start: u64,
count: usize,
flags: PageAttributes,
) -> Result<(), GenError> {
for index in 0..count {
let vaddr = vaddr_start + (index * L3_PAGE_SIZE) as u64;
let paddr = paddr_start + (index * L3_PAGE_SIZE) as u64;
let entry = shift_pfn(paddr) | (PageAttributes::V | flags).bits();
let l2i = ((vaddr >> L2_SHIFT) as usize & 0x1FF) - start_l2i;
let l3i = (vaddr >> L3_SHIFT) as usize & 0x1FF;
let l3 = &mut tables.kernel_l3s[l2i];
if l3.data[l3i] != 0 {
if l3.data[l3i] != entry {
todo!();
} else {
continue;
}
}
l3.data[l3i] = entry;
}
Ok(())
}
}

View File

@ -17,6 +17,11 @@ pub(crate) mod i686;
#[cfg(target_arch = "x86")]
use i686 as arch_impl;
#[cfg(target_arch = "riscv64")]
pub(crate) mod riscv64;
#[cfg(target_arch = "riscv64")]
use riscv64 as arch_impl;
pub use arch_impl::SavedFrame;
pub trait FrameOps {

View File

@ -0,0 +1,30 @@
#![allow(missing_docs)]
use super::FrameOps;
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[derive(Clone, Debug, Default)]
#[repr(C)]
pub struct SavedFrame {
// General-purpose
pub ra: usize,
pub gp: usize,
pub tn: [usize; 7],
pub sn: [usize; 12],
pub an: [usize; 8],
// Special
pub sp: usize,
pub ip: usize,
pub tp: usize,
}
impl FrameOps for SavedFrame {
fn set_user_ip(&mut self, value: usize) {
let _ = value;
todo!()
}
fn user_ip(&self) -> usize {
todo!()
}
}

View File

@ -10,6 +10,7 @@ use device::{QemuDevice, QemuSerialTarget};
pub mod aarch64;
pub mod i386;
pub mod riscv64;
pub mod x86_64;
pub mod device;
@ -82,6 +83,12 @@ impl Qemu<i386::QemuI386> {
}
}
impl Qemu<riscv64::QemuRiscv64> {
pub fn new_riscv64() -> Self {
Qemu::new(riscv64::QemuRiscv64)
}
}
impl<A: Architecture> Qemu<A> {
pub fn new(arch: A) -> Self {
Self {

75
lib/qemu/src/riscv64.rs Normal file
View File

@ -0,0 +1,75 @@
use std::{path::PathBuf, process::Command};
use crate::{Architecture, IntoArgs};
#[derive(Debug)]
pub enum Cpu {
Rv64,
}
#[derive(Debug)]
pub enum Machine {
Virt,
}
#[derive(Debug)]
pub struct QemuRiscv64;
#[derive(Debug)]
pub enum Image {
OpenSBI {
bios: PathBuf,
kernel: PathBuf,
initrd: PathBuf,
},
}
impl IntoArgs for Machine {
fn add_args(&self, command: &mut Command) {
command.arg("-M");
match self {
Self::Virt => command.arg("virt"),
};
}
}
impl IntoArgs for Cpu {
fn add_args(&self, command: &mut Command) {
command.arg("-cpu");
match self {
Self::Rv64 => command.arg("rv64,a=true,zicsr=true,zifencei=true"),
};
}
}
impl IntoArgs for Image {
fn add_args(&self, command: &mut Command) {
match self {
Self::OpenSBI {
bios,
kernel,
initrd,
} => {
command.arg("-kernel");
command.arg(kernel);
command.arg("-initrd");
command.arg(initrd);
command.arg("-bios");
command.arg(bios);
}
}
}
}
impl Architecture for QemuRiscv64 {
type CpuType = Cpu;
type ImageType = Image;
type MachineType = Machine;
const DEFAULT_COMMAND: &'static str = "qemu-system-riscv64";
}
impl IntoArgs for QemuRiscv64 {
fn add_args(&self, command: &mut Command) {
let _ = command;
}
}

View File

@ -26,9 +26,14 @@ mod i686;
#[cfg(any(target_arch = "x86", rust_analyzer))]
use i686 as imp;
#[cfg(any(target_arch = "aarch64", rust_analyzer))]
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
mod riscv64;
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
use riscv64 as imp;
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64", rust_analyzer))]
mod variant1;
#[cfg(any(target_arch = "aarch64", rust_analyzer))]
#[cfg(any(target_arch = "aarch64", target_arch = "riscv64", rust_analyzer))]
use variant1 as layout;
#[cfg(any(target_arch = "x86", target_arch = "x86_64", rust_analyzer))]
@ -58,6 +63,13 @@ pub struct Dtv {
specific: Vec<*mut c_void>,
}
#[allow(missing_docs)]
pub struct TlsInfo {
base: usize,
tp: usize,
module0_offset: Option<usize>,
}
struct TcbHeader {
#[allow(unused)]
self_pointer: usize,
@ -165,8 +177,10 @@ impl Dtv {
/// Will panic if key == 0.
/// Will panic if key is longer than the DTV itself.
pub fn set_specific(&mut self, key: usize, value: *mut c_void, grow: bool) {
self.try_set_specific(key, value, grow)
.expect("Dtv::set_specific(): invalid key")
if self.try_set_specific(key, value, grow).is_err() {
crate::debug_trace!("Dtv::set_specific(): invalid key {key}, grow={grow}");
panic!("Dtv::set_specific(): invalid key {key})")
}
}
/// Sets a DTV entry for a thread-specific key.
@ -176,7 +190,7 @@ impl Dtv {
value: *mut c_void,
grow: bool,
) -> Result<(), Error> {
if key > self.entries.len() && grow {
if key > self.specific.len() && grow {
self.specific.resize(key, null_mut());
}
if !Self::set_key(&mut self.specific, key, value) {
@ -198,7 +212,13 @@ impl Dtv {
/// Will panic if key == 0.
/// Will panic if key is larger than the DTV itself.
pub fn get(&self, key: usize) -> *mut c_void {
Self::get_key(&self.entries, key).expect("Out-of-bounds DTV key")
match Self::get_key(&self.entries, key) {
Some(value) => value,
None => {
crate::debug_trace!("Dtv::get(): out-of-bounds DTV key: {key}");
panic!("Dtv::get(): out-of-bounds DTV key: {key}");
}
}
}
/// Sets a DTV entry, growing the DTV allocation if necessary
@ -211,7 +231,8 @@ impl Dtv {
self.entries.resize(key, null_mut());
}
if !Self::set_key(&mut self.entries, key, value) {
panic!("Dtv::set(): invalid key");
crate::debug_trace!("Dtv::set(): invalid key {key}");
panic!("Dtv::set(): invalid key {key}");
}
}
}
@ -226,9 +247,9 @@ pub fn init_tls_from_auxv<'a, I: Iterator<Item = &'a AuxValue>>(
};
if force || !tls_image.already_initialized {
let (base, tp) = clone_tls(&tls_image)?;
unsafe { set_thread_pointer(tp) }?;
setup_dtv(&tls_image, base)?;
let tls = clone_tls(&tls_image)?;
unsafe { set_thread_pointer(tls.tp) }?;
setup_dtv(&tls_image, &tls)?;
}
Ok(Some(tls_image))
@ -243,9 +264,9 @@ pub fn init_tls(image: Option<&TlsImage>, force: bool) -> Result<(), Error> {
};
if force || !image.already_initialized {
let (base, tp) = clone_tls(image)?;
unsafe { set_thread_pointer(tp) }?;
setup_dtv(image, base)?;
let tls = clone_tls(image)?;
unsafe { set_thread_pointer(tls.tp) }?;
setup_dtv(image, &tls)?;
}
Ok(())
@ -260,16 +281,31 @@ fn get_tcb_mut() -> &'static mut TcbHeader {
unsafe { &mut *get_tcb_raw() }
}
fn setup_dtv(image: &TlsImage, tls_base: usize) -> Result<(), Error> {
fn setup_dtv(image: &TlsImage, tls_info: &TlsInfo) -> Result<(), Error> {
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
const DTV_OFFSET: usize = 0x800;
#[cfg(any(not(target_arch = "riscv64"), rust_analyzer))]
const DTV_OFFSET: usize = 0;
let dtv = get_dtv();
// Executable itself
// NOTE if module 1 is specified again by the dynamic loader, it will be overriden with
// what dynamic loader says
if let Some(module0_offset) = tls_info.module0_offset {
dtv.set(
1,
core::ptr::without_provenance_mut(tls_info.base + module0_offset + DTV_OFFSET),
);
}
if image.module_offsets.is_empty() {
return Ok(());
}
let dtv = get_dtv();
for &(module_id, module_offset) in image.module_offsets.iter() {
assert!(module_offset < image.full_size);
dtv.set(
module_id,
core::ptr::with_exposed_provenance_mut(tls_base + module_offset),
core::ptr::with_exposed_provenance_mut(tls_info.base + module_offset + DTV_OFFSET),
);
}
Ok(())
@ -287,14 +323,12 @@ pub fn get_dtv() -> &'static mut Dtv {
}
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
any(target_arch = "x86", target_arch = "x86_64", target_arch = "riscv64"),
any(feature = "__tls_get_addr", rust_analyzer)
))]
#[no_mangle]
unsafe extern "C" fn __tls_get_addr(index: *mut usize) -> *mut c_void {
let module_id = index.read();
let offset = index.add(1).read();
assert!(module_id > 0);
get_dtv().get(module_id).add(offset)
}

View File

@ -0,0 +1,19 @@
#![allow(missing_docs)]
use abi::error::Error;
pub fn get_thread_pointer() -> usize {
let output: usize;
unsafe { core::arch::asm!("mv {0}, tp", out(reg) output) };
output
}
/// Writes `value` into `tp` register.
///
/// # Safety
///
/// Usual pointer rules apply.
pub unsafe fn set_thread_pointer(value: usize) -> Result<(), Error> {
core::arch::asm!("mv tp, {0}", in(reg) value);
Ok(())
}

View File

@ -1,9 +1,10 @@
use abi::{
error::Error,
mem::{MappingFlags, MappingSource},
process::ExitCode,
};
use super::TlsImage;
use super::{TlsImage, TlsInfo};
// Variant I TLS layout:
//
@ -16,14 +17,16 @@ use super::TlsImage;
/// Creates a new TLS image in the process memory, copying data from the TLS master copy (if any).
/// Returns the resulting thread pointer.
pub fn clone_tls(image: &TlsImage) -> Result<(usize, usize), Error> {
pub fn clone_tls(image: &TlsImage) -> Result<TlsInfo, Error> {
const TCB_SIZE: usize = size_of::<usize>() * 2;
if !image.align.is_power_of_two() {
panic!("TLS layout not aligned to a power of two: {}", image.align)
crate::debug_trace!("TLS layout not aligned to a power of two: {}", image.align);
unsafe { crate::sys::exit_process(ExitCode::Exited(1)) };
}
if image.align > 0x1000 {
panic!("TODO: TLS alignment larger than a page size is not supported");
crate::debug_trace!("TODO: TLS alignment larger than a page size is not supported");
unsafe { crate::sys::exit_process(ExitCode::Exited(1)) };
}
// TCB size, padded to align. Also the start of the first module
@ -72,7 +75,11 @@ pub fn clone_tls(image: &TlsImage) -> Result<(usize, usize), Error> {
crate::debug_trace!("TLS: base={:#x}, tp={:#x}", base, tp);
Ok((base, tp))
Ok(TlsInfo {
base,
tp,
module0_offset: Some(tcb_aligned_size),
})
}
pub(super) fn get_tcb_raw(tp: usize) -> *mut u8 {

View File

@ -3,7 +3,7 @@ use abi::{
mem::{MappingFlags, MappingSource},
};
use super::TlsImage;
use super::{TlsImage, TlsInfo};
// Variant II TLS layout:
//
@ -14,7 +14,7 @@ use super::TlsImage;
/// Creates a new TLS image in the process memory, copying data from the TLS master copy (if any).
/// Returns the resulting thread pointer.
pub fn clone_tls(image: &TlsImage) -> Result<(usize, usize), Error> {
pub fn clone_tls(image: &TlsImage) -> Result<TlsInfo, Error> {
// Basically, the layout is:
// * align(image.full_size) below the TP
// * tcb_size starting with the TP
@ -73,7 +73,12 @@ pub fn clone_tls(image: &TlsImage) -> Result<(usize, usize), Error> {
crate::debug_trace!("TLS: base={:#x}, tp={:#x}", base, tp);
Ok((base, tp))
Ok(TlsInfo {
base,
tp,
module0_offset: None,
})
// Ok((base, tp))
}
// In Variant II, the TP points directly at the TCB start

View File

@ -9,8 +9,11 @@ mod x86_64;
#[cfg(any(target_arch = "x86", rust_analyzer))]
#[macro_use]
mod i686;
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
#[macro_use]
mod riscv64;
#[allow(missing_docs)]
#[allow(missing_docs, unreachable_code)]
mod generated {
// Import all the necessary types from generated ABI
use abi::{

View File

@ -0,0 +1,51 @@
/// 64-bit RISC-V implementations of the syscall macro
#[macro_export]
macro_rules! syscall {
($num:expr $(,)?) => {{
let mut a0 = usize::from($num);
core::arch::asm!("ecall", inlateout("a0") a0);
a0
}};
($num:expr, $a1:expr $(,)?) => {{
let mut a0 = usize::from($num);
core::arch::asm!("ecall", inlateout("a0") a0, in("a1") $a1);
a0
}};
($num:expr, $a1:expr, $a2:expr $(,)?) => {{
let mut a0 = usize::from($num);
core::arch::asm!("ecall", inlateout("a0") a0, in("a1") $a1, in("a2") $a2);
a0
}};
($num:expr, $a1:expr, $a2:expr, $a3:expr $(,)?) => {{
let mut a0 = usize::from($num);
core::arch::asm!("ecall", inlateout("a0") a0, in("a1") $a1, in("a2") $a2, in("a3") $a3);
a0
}};
($num:expr, $a1:expr, $a2:expr, $a3:expr, $a4:expr $(,)?) => {{
let mut a0 = usize::from($num);
core::arch::asm!(
"ecall",
inlateout("a0") a0,
in("a1") $a1, in("a2") $a2, in("a3") $a3, in("a4") $a4
);
a0
}};
($num:expr, $a1:expr, $a2:expr, $a3:expr, $a4:expr, $a5:expr $(,)?) => {{
let mut a0 = usize::from($num);
core::arch::asm!(
"ecall",
inlateout("a0") a0,
in("a1") $a1, in("a2") $a2, in("a3") $a3, in("a4") $a4, in("a5") $a5
);
a0
}};
($num:expr, $a1:expr, $a2:expr, $a3:expr, $a4:expr, $a5:expr, $a6:expr $(,)?) => {{
let mut a0 = usize::from($num);
core::arch::asm!(
"ecall",
inlateout("a0") a0,
in("a1") $a1, in("a2") $a2, in("a3") $a3, in("a4") $a4, in("a5") $a5, in("a6") $a6
);
a0
}};
}

View File

@ -0,0 +1,4 @@
init:1:wait:/sbin/rc default
logd:1:once:/sbin/logd
user:1:once:/sbin/login /dev/ttyS0

View File

@ -71,17 +71,13 @@ fn run(binary: &str, args: &[String]) -> Result<!, Error> {
});
for module in layout.segments.iter() {
if module.object_id == 0 {
continue;
}
auxv.push(AuxValue {
tag: auxv::TLS_MODULE_ID,
val: module.object_id as _,
val: module.object_id as u64 + 1,
});
auxv.push(AuxValue {
tag: auxv::TLS_MODULE_OFFSET,
val: module.offset as _,
val: module.offset as u64,
});
}
}
@ -124,6 +120,12 @@ unsafe fn enter(entry: extern "C" fn(usize), argument: usize) -> ! {
options(att_syntax, noreturn)
);
}
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
{
let _ = entry;
let _ = argument;
todo!()
}
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64", rust_analyzer))]
{
entry(argument);

View File

@ -8,6 +8,8 @@ mod aarch64;
mod x86_64;
#[cfg(any(target_arch = "x86", rust_analyzer))]
mod i686;
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
mod riscv64;
pub enum RelaValue {
DQWord(i64, i64),

View File

@ -0,0 +1,33 @@
use elf::relocation::{Rel, Rela};
use crate::{error::Error, object::ResolvedSymbol, state::State};
use super::{RelValue, RelaValue, Relocation};
impl Relocation for Rel {
type Value = RelValue;
fn resolve(
&self,
_state: &State,
_name: &str,
_symbol: &ResolvedSymbol,
_load_base: usize,
) -> Result<Option<Self::Value>, Error> {
todo!()
}
}
impl Relocation for Rela {
type Value = RelaValue;
fn resolve(
&self,
_state: &State,
_name: &str,
_symbol: &ResolvedSymbol,
_load_base: usize,
) -> Result<Option<Self::Value>, Error> {
todo!()
}
}

View File

@ -13,7 +13,7 @@ cfg_if! {
} else if #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] {
mod variant2;
pub use variant2::TlsLayoutImpl;
} else if #[cfg(target_arch = "aarch64")] {
} else if #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))] {
mod variant1;
pub use variant1::TlsLayoutImpl;
}

View File

@ -152,9 +152,20 @@ impl<'e> CargoBuilder<'e> {
"-Zunstable-options",
]);
match env.board {
Board::virt | Board::default => command.arg("--features=aarch64_board_virt"),
Board::raspi4b => command.arg("--features=aarch64_board_raspi4b"),
match (env.arch, env.board) {
(Arch::aarch64, Board::virt | Board::default) => {
command.arg("--features=aarch64_board_virt");
}
(Arch::aarch64, Board::raspi4b) => {
command.arg("--features=aarch64_board_raspi4b");
}
(Arch::riscv64, Board::virt | Board::default) => {
command.arg("--features=riscv64_board_virt");
}
(Arch::riscv64, Board::jh7110) => {
command.arg("--features=riscv64_board_jh7110");
}
(_, _) => (),
};
if env.profile == Profile::Release {

View File

@ -39,9 +39,11 @@ impl CheckAction {
pub struct ToolsBuilt(pub PathBuf);
pub struct KernelBuilt(pub PathBuf);
pub struct KernelProcessed(pub KernelBuilt);
pub struct KernelBin(pub PathBuf);
pub struct InitrdGenerated(pub PathBuf);
pub struct ImageBuilt(pub PathBuf);
pub enum AllBuilt {
Riscv64(KernelBin, InitrdGenerated),
X86_64(ImageBuilt),
AArch64(KernelProcessed, InitrdGenerated),
I686(ImageBuilt),
@ -61,6 +63,26 @@ pub fn build_kernel(env: &BuildEnv, _: AllOk) -> Result<KernelBuilt, Error> {
Ok(KernelBuilt(env.kernel_output_dir.join("yggdrasil-kernel")))
}
pub fn make_kernel_bin(
env: &BuildEnv,
kernel: KernelProcessed,
_: AllOk,
) -> Result<KernelBin, Error> {
log::info!("Building yggdrasil-kernel.bin");
let kernel_bin = env.kernel_output_dir.join("yggdrasil-kernel.bin");
let status = Command::new("llvm-objcopy")
.args(["-O", "binary"])
.arg(kernel.0 .0)
.arg(&kernel_bin)
.status()?;
if !status.success() {
return Err(Error::ExternalCommandFailed);
}
Ok(KernelBin(kernel_bin))
}
pub fn generate_kernel_tables(
symbol_path: impl AsRef<Path>,
kernel: KernelBuilt,
@ -104,6 +126,7 @@ pub fn build_all(env: &BuildEnv) -> Result<AllBuilt, Error> {
// Build target-specific image
let image = match env.arch {
Arch::riscv64 => AllBuilt::Riscv64(make_kernel_bin(env, kernel, check)?, initrd),
Arch::aarch64 => AllBuilt::AArch64(kernel, initrd),
Arch::x86_64 => AllBuilt::X86_64(x86_64::build_image(env, kernel, initrd)?),
Arch::i686 => AllBuilt::I686(i686::build_image(env, kernel, initrd)?),

View File

@ -56,11 +56,16 @@ fn check_commands_aarch64() -> Result<CommandsOk, Error> {
])
}
fn check_commands_riscv64() -> Result<CommandsOk, Error> {
check_command_list([("ld64.lld", "Install LLVM")])
}
pub fn check_build_env(arch: Arch) -> Result<AllOk, Error> {
let user_toolchain = check_user_toolchain()?;
let commands = match arch {
Arch::x86_64 => check_commands_x86_64()?,
Arch::aarch64 => check_commands_aarch64()?,
Arch::riscv64 => check_commands_riscv64()?,
Arch::i686 => check_commands_i686()?,
};
Ok(AllOk(commands, user_toolchain))

View File

@ -22,6 +22,12 @@ pub struct AArch64TargetConfig {
pub components: BuildComponents,
}
#[derive(Debug, Default, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct Riscv64TargetConfig {
pub components: BuildComponents,
}
#[derive(Debug, Default, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct X86_64TargetConfig {
@ -38,6 +44,7 @@ pub struct I686TargetConfig {
#[serde(default)]
pub struct TargetConfig {
pub aarch64: AArch64TargetConfig,
pub riscv64: Riscv64TargetConfig,
pub x86_64: X86_64TargetConfig,
pub i686: I686TargetConfig,
}
@ -62,6 +69,7 @@ pub enum Profile {
pub enum Arch {
#[default]
aarch64,
riscv64,
x86_64,
i686,
}
@ -73,9 +81,14 @@ pub enum Board {
#[default]
default,
// Generic aarch64/riscv board
virt,
// AArch64 boards
raspi4b,
virt,
// RISC-V boards
jh7110,
}
#[derive(Debug)]
@ -127,6 +140,8 @@ impl BuildEnv {
) -> Self {
let kernel_triple = match (arch, board) {
(Arch::aarch64, Board::virt | Board::default) => "aarch64-unknown-qemu",
(Arch::riscv64, Board::virt | Board::default) => "riscv64-unknown-qemu",
(Arch::riscv64, Board::jh7110) => "riscv64-unknown-jh7110",
(Arch::aarch64, Board::raspi4b) => "aarch64-unknown-raspi4b",
(Arch::x86_64, Board::default) => "x86_64-unknown-none",
(Arch::i686, Board::default) => "i686-unknown-none",
@ -137,6 +152,7 @@ impl BuildEnv {
};
let kernel_linker_script = match arch {
Arch::aarch64 => format!("arm/{kernel_triple}.ld"),
Arch::riscv64 => format!("riscv/{kernel_triple}.ld"),
Arch::i686 | Arch::x86_64 => format!("x86/{kernel_triple}.ld"),
};
let kernel_output_dir =
@ -190,6 +206,7 @@ impl BuildEnv {
impl XTaskConfig {
pub fn components(&self, env: &BuildEnv) -> &BuildComponents {
match env.arch {
Arch::riscv64 => &self.target.riscv64.components,
Arch::aarch64 => &self.target.aarch64.components,
Arch::x86_64 => &self.target.x86_64.components,
Arch::i686 => &self.target.i686.components,
@ -212,11 +229,12 @@ impl Profile {
impl Arch {
pub fn all() -> impl Iterator<Item = Self> {
[Self::aarch64, Self::x86_64, Self::i686].into_iter()
[Self::aarch64, Self::x86_64, Self::i686, Self::riscv64].into_iter()
}
pub fn user_triple(&self) -> &str {
match self {
Self::riscv64 => "riscv64-unknown-yggdrasil",
Self::aarch64 => "aarch64-unknown-yggdrasil",
Self::x86_64 => "x86_64-unknown-yggdrasil",
Self::i686 => "i686-unknown-yggdrasil",
@ -225,6 +243,7 @@ impl Arch {
pub fn name(&self) -> &str {
match self {
Self::riscv64 => "riscv64",
Self::aarch64 => "aarch64",
Self::x86_64 => "x86_64",
Self::i686 => "i686",

View File

@ -4,6 +4,8 @@ use std::{
process::{Command, ExitStatusError},
};
use crate::env::Board;
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("{0}")]
@ -47,6 +49,9 @@ pub enum Error {
CompilerRtConfigFailed(CommandFailed),
#[error("compiler-rt build failed: {0}")]
CompilerRtBuildFailed(CommandFailed),
#[error("No qemu support for board {0:?}")]
UnsupportedEmulation(Board),
}
#[derive(Debug, thiserror::Error)]

View File

@ -7,11 +7,11 @@ use std::{
use qemu::{
aarch64,
device::{QemuDevice, QemuDrive, QemuNic, QemuSerialTarget},
i386, x86_64, Qemu,
i386, riscv64, x86_64, Qemu,
};
use crate::{
build::{self, AllBuilt, ImageBuilt, InitrdGenerated, KernelBuilt, KernelProcessed},
build::{self, AllBuilt, ImageBuilt, InitrdGenerated, KernelBin, KernelBuilt, KernelProcessed},
env::{Board, BuildEnv},
error::Error,
util::run_external_command,
@ -177,6 +177,9 @@ fn run_aarch64(
.with_serial(QemuSerialTarget::MonStdio)
.with_cpu(aarch64::Cpu::Max)
.with_memory_megabytes(config.machine.aarch64.memory),
_ => {
return Err(Error::UnsupportedEmulation(env.board));
}
};
if env.board != Board::raspi4b {
@ -254,6 +257,36 @@ fn run_i686(
Ok(qemu.into_command())
}
fn run_riscv64(
config: &QemuConfig,
env: &BuildEnv,
qemu_bin: Option<PathBuf>,
devices: Vec<QemuDevice>,
kernel: PathBuf,
initrd: PathBuf,
) -> Result<Command, Error> {
let _ = config;
let _ = devices;
let mut qemu = Qemu::new_riscv64();
if let Some(qemu_bin) = qemu_bin {
qemu.override_qemu(qemu_bin);
}
let bios = env.workspace_root.join("boot/riscv/fw_jump.bin");
qemu.with_serial(QemuSerialTarget::MonStdio)
.with_machine(riscv64::Machine::Virt)
.with_cpu(riscv64::Cpu::Rv64)
.with_memory_megabytes(1024)
.disable_display()
.with_boot_image(riscv64::Image::OpenSBI {
kernel,
initrd,
bios,
});
Ok(qemu.into_command())
}
fn load_qemu_config<P: AsRef<Path>>(path: P) -> Result<QemuConfig, Error> {
let path = path.as_ref();
@ -326,6 +359,9 @@ pub fn run(
add_devices_from_config(&mut devices, disk.as_ref(), &config)?;
let mut command = match built {
AllBuilt::Riscv64(KernelBin(kernel), InitrdGenerated(initrd)) => {
run_riscv64(&config, &env, qemu, devices, kernel, initrd)?
}
AllBuilt::AArch64(KernelProcessed(KernelBuilt(kernel)), InitrdGenerated(initrd)) => {
make_kernel_bin(kernel, &kernel_bin)?;
run_aarch64(&config, &env, qemu, devices, kernel_bin, initrd)?