rv64: boot into usermode

This commit is contained in:
Mark Poliakov 2025-01-20 00:54:26 +02:00
parent 20fa34c945
commit 8ba37c9762
36 changed files with 907 additions and 267 deletions

View File

@ -83,6 +83,7 @@ pub struct UserContextInfo {
pub stack_pointer: usize, pub stack_pointer: usize,
pub thread_pointer: usize, pub thread_pointer: usize,
pub address_space: u64, pub address_space: u64,
pub asid: u64,
pub single_step: bool, pub single_step: bool,
} }

View File

@ -97,14 +97,32 @@ __rv64_task_enter_kernel:
csrw sstatus, t0 csrw sstatus, t0
csrw sepc, ra csrw sepc, ra
csrw sscratch, zero
sret sret
.size __rv64_task_enter_kernel, . - __rv64_task_enter_kernel .size __rv64_task_enter_kernel, . - __rv64_task_enter_kernel
.type __rv64_task_enter_user, @function .type __rv64_task_enter_user, @function
__rv64_task_enter_user: __rv64_task_enter_user:
// TODO csrw sscratch, tp
j .
ld a0, 0 * 8(sp) // argument
ld ra, 1 * 8(sp) // entry
ld tp, 2 * 8(sp) // thread pointer
ld sp, 3 * 8(sp) // user stack
// Set SPIE to enable interrupts
// Set SPP = 0 to indicate a return to U-mode
li t1, (1 << 8)
not t1, t1
csrr t0, sstatus
ori t0, t0, (1 << 5)
and t0, t0, t1
csrw sstatus, t0
csrw sepc, ra
sret
.size __rv64_task_enter_user, . - __rv64_task_enter_user .size __rv64_task_enter_user, . - __rv64_task_enter_user
.option pop .option pop

View File

@ -3,16 +3,29 @@ use core::{arch::global_asm, cell::UnsafeCell, marker::PhantomData};
use kernel_arch_interface::{ use kernel_arch_interface::{
mem::{KernelTableManager, PhysicalMemoryAllocator}, mem::{KernelTableManager, PhysicalMemoryAllocator},
task::{StackBuilder, TaskContext, UserContextInfo}, task::{StackBuilder, TaskContext, UserContextInfo},
Architecture,
}; };
use libk_mm_interface::address::PhysicalAddress; use libk_mm_interface::address::PhysicalAddress;
use tock_registers::{
interfaces::{Readable, Writeable},
registers::InMemoryRegister,
};
use yggdrasil_abi::error::Error; use yggdrasil_abi::error::Error;
use crate::{
mem::{self, KERNEL_VIRT_OFFSET},
registers::SATP,
ArchitectureImpl, PerCpuData,
};
pub const CONTEXT_SIZE: usize = 14 * size_of::<usize>(); pub const CONTEXT_SIZE: usize = 14 * size_of::<usize>();
#[repr(C, align(0x10))] #[repr(C, align(0x10))]
struct TaskContextInner { struct TaskContextInner {
// 0x00 // 0x00
sp: usize, sp: usize,
satp: InMemoryRegister<u64, SATP::Register>,
} }
pub struct TaskContextImpl< pub struct TaskContextImpl<
@ -22,6 +35,7 @@ pub struct TaskContextImpl<
inner: UnsafeCell<TaskContextInner>, inner: UnsafeCell<TaskContextInner>,
// fp_context: UnsafeCell<FpContext>, // fp_context: UnsafeCell<FpContext>,
stack_base_phys: PhysicalAddress, stack_base_phys: PhysicalAddress,
stack_top: usize,
stack_size: usize, stack_size: usize,
_pd: PhantomData<(K, PA)>, _pd: PhantomData<(K, PA)>,
@ -32,6 +46,15 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
{ {
unsafe fn load_state(&self) { unsafe fn load_state(&self) {
// TODO load new SATP value // TODO load new SATP value
let inner = unsafe { &*self.inner.get() };
let cpu = unsafe { &mut *ArchitectureImpl::local_cpu().cast::<PerCpuData>() };
// Copy new SATP
let satp = inner.satp.get();
if satp != SATP.get() {
SATP.set(satp);
}
cpu.smode_sp = self.stack_top;
} }
unsafe fn store_state(&self) {} unsafe fn store_state(&self) {}
@ -40,12 +63,46 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>> impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddress>>
TaskContext<K, PA> for TaskContextImpl<K, PA> TaskContext<K, PA> for TaskContextImpl<K, PA>
{ {
const USER_STACK_EXTRA_ALIGN: usize = 0; const USER_STACK_EXTRA_ALIGN: usize = 8;
const SIGNAL_STACK_EXTRA_ALIGN: usize = 0; const SIGNAL_STACK_EXTRA_ALIGN: usize = 0;
fn user(context: UserContextInfo) -> Result<Self, Error> { fn user(context: UserContextInfo) -> Result<Self, Error> {
let _ = context; const USER_TASK_PAGES: usize = 16;
todo!() let stack_base_phys = PA::allocate_contiguous_pages(USER_TASK_PAGES)?;
let stack_base = stack_base_phys.raw_virtualize::<K>();
let mut stack = StackBuilder::new(stack_base, USER_TASK_PAGES * 0x1000);
log::info!(
"Set up user task: pc={:#x}, sp={:#x}, tp={:#x}",
context.entry,
context.stack_pointer,
context.thread_pointer
);
stack.push(context.stack_pointer);
stack.push(context.thread_pointer);
stack.push(context.entry);
stack.push(context.argument);
setup_common_context(&mut stack, __rv64_task_enter_user as _);
let sp = stack.build();
let satp = InMemoryRegister::new(0);
satp.write(
SATP::MODE::Sv39
+ SATP::ASID.val(context.asid)
+ SATP::PPN.val(context.address_space >> 12),
);
Ok(Self {
inner: UnsafeCell::new(TaskContextInner { sp, satp }),
// fp_context: UnsafeCell::new(FpContext::new()),
stack_base_phys,
stack_top: stack_base + USER_TASK_PAGES * 0x1000,
stack_size: USER_TASK_PAGES * 0x1000,
_pd: PhantomData,
})
} }
fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> { fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> {
@ -65,11 +122,16 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
// TODO stack is leaked // TODO stack is leaked
log::info!("stack = {:#x}", stack_base); log::info!("stack = {:#x}", stack_base);
let satp = InMemoryRegister::new(0);
let kernel_table_phys =
((&raw const mem::KERNEL_TABLES).addr() - KERNEL_VIRT_OFFSET) as u64;
satp.write(SATP::MODE::Sv39 + SATP::ASID.val(0) + SATP::PPN.val(kernel_table_phys >> 12));
Ok(Self { Ok(Self {
inner: UnsafeCell::new(TaskContextInner { sp }), inner: UnsafeCell::new(TaskContextInner { sp, satp }),
// fp_context: UnsafeCell::new(FpContext::new()), // fp_context: UnsafeCell::new(FpContext::new()),
stack_base_phys, stack_base_phys,
stack_top: 0,
stack_size: KERNEL_TASK_PAGES * 0x1000, stack_size: KERNEL_TASK_PAGES * 0x1000,
_pd: PhantomData, _pd: PhantomData,

View File

@ -10,9 +10,9 @@ use kernel_arch_interface::{
task::Scheduler, task::Scheduler,
Architecture, Architecture,
}; };
use tock_registers::interfaces::{ReadWriteable, Readable, Writeable}; use tock_registers::interfaces::{ReadWriteable, Readable};
use registers::{SSCRATCH, SSTATUS}; use registers::SSTATUS;
pub mod mem; pub mod mem;
pub use mem::{process::ProcessAddressSpaceImpl, KernelTableManagerImpl}; pub use mem::{process::ProcessAddressSpaceImpl, KernelTableManagerImpl};
@ -28,7 +28,8 @@ pub struct ArchitectureImpl;
pub struct PerCpuData { pub struct PerCpuData {
// Used in assembly // Used in assembly
pub tmp_t0: usize, // 0x00 pub tmp_t0: usize, // 0x00
pub smode_sp: usize, // 0x08 pub umode_sp: usize, // 0x08
pub smode_sp: usize, // 0x10
// Used elsewhere // Used elsewhere
pub bootstrap: bool, pub bootstrap: bool,
@ -69,7 +70,6 @@ impl Architecture for ArchitectureImpl {
} }
unsafe fn set_local_cpu(cpu: *mut ()) { unsafe fn set_local_cpu(cpu: *mut ()) {
SSCRATCH.set(cpu.addr() as u64);
unsafe { core::arch::asm!("mv tp, {0}", in(reg) cpu) }; unsafe { core::arch::asm!("mv tp, {0}", in(reg) cpu) };
} }
@ -94,7 +94,7 @@ impl Architecture for ArchitectureImpl {
fn ipi_queue(cpu_id: u32) -> Option<&'static IpiQueue<Self>> { fn ipi_queue(cpu_id: u32) -> Option<&'static IpiQueue<Self>> {
let _ = cpu_id; let _ = cpu_id;
loop {} todo!()
} }
#[inline] #[inline]
@ -125,25 +125,25 @@ impl Architecture for ArchitectureImpl {
} }
fn cpu_index<S: Scheduler + 'static>() -> u32 { fn cpu_index<S: Scheduler + 'static>() -> u32 {
loop {} CpuImpl::<Self, S>::local().id()
} }
fn cpu_enabled_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> { fn cpu_enabled_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
let _ = cpu; let _ = cpu;
loop {} todo!()
} }
fn cpu_available_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> { fn cpu_available_features<S: Scheduler>(cpu: &CpuImpl<Self, S>) -> Option<&Self::CpuFeatures> {
let _ = cpu; let _ = cpu;
loop {} todo!()
} }
fn local_interrupt_controller() -> Option<&'static dyn LocalInterruptController> { fn local_interrupt_controller() -> Option<&'static dyn LocalInterruptController> {
loop {} todo!()
} }
fn message_interrupt_controller() -> Option<&'static dyn MessageInterruptController> { fn message_interrupt_controller() -> Option<&'static dyn MessageInterruptController> {
loop {} todo!()
} }
fn idle_task() -> extern "C" fn(usize) -> ! { fn idle_task() -> extern "C" fn(usize) -> ! {

View File

@ -229,13 +229,23 @@ pub fn auto_address<T>(x: *const T) -> usize {
} }
} }
/// Enables the memory translation.
///
/// # Safety
///
/// Only meant to be called once per each HART during their early init.
pub unsafe fn enable_mmu() { pub unsafe fn enable_mmu() {
let l1_phys = auto_address(&raw const KERNEL_TABLES) as u64; let l1_phys = auto_address(&raw const KERNEL_TABLES) as u64;
SATP.write(SATP::PPN.val(l1_phys >> 12) + SATP::MODE::Sv39); SATP.write(SATP::PPN.val(l1_phys >> 12) + SATP::MODE::Sv39);
} }
// Also unmaps the lower half /// Sets up run-time kernel translation tables and removed the lower-half mapping.
///
/// # Safety
///
/// The caller must ensure MMU is already enabled and that lower-half addresses will no
/// longer be referred to.
pub unsafe fn setup_fixed_tables() { pub unsafe fn setup_fixed_tables() {
let kernel_l1i_lower = page_index::<L1>(KERNEL_PHYS_BASE); let kernel_l1i_lower = page_index::<L1>(KERNEL_PHYS_BASE);
let mut tables = KERNEL_TABLES.lock(); let mut tables = KERNEL_TABLES.lock();
@ -275,3 +285,16 @@ pub fn tlb_flush_va(va: usize) {
core::arch::asm!("sfence.vma zero, {0}", in(reg) va); core::arch::asm!("sfence.vma zero, {0}", in(reg) va);
} }
} }
pub fn tlb_flush_va_asid(va: usize, asid: usize) {
unsafe {
core::arch::asm!("sfence.vma {0}, {1}", in(reg) asid, in(reg) va);
}
}
pub fn clone_kernel_tables(dst: &mut PageTable<L1>) {
let tables = KERNEL_TABLES.lock();
for l1i in page_index::<L1>(USER_BOUNDARY)..512 {
dst[l1i] = unsafe { PageEntry::from_raw(tables.l1.data[l1i]) };
}
}

View File

@ -1,22 +1,56 @@
use core::marker::PhantomData; use core::{
marker::PhantomData,
sync::atomic::{AtomicU16, Ordering},
};
use libk_mm_interface::{ use libk_mm_interface::{
address::PhysicalAddress, address::{AsPhysicalAddress, PhysicalAddress},
pointer::PhysicalRefMut,
process::ProcessAddressSpaceManager, process::ProcessAddressSpaceManager,
table::{MapAttributes, TableAllocator}, table::{EntryLevel, EntryLevelExt, MapAttributes, NextPageTable, TableAllocator},
}; };
use memtables::riscv64::PageAttributes;
use yggdrasil_abi::error::Error; use yggdrasil_abi::error::Error;
use crate::mem::{clone_kernel_tables, table::PageEntry};
use super::{
table::{PageTable, L1, L2, L3},
tlb_flush_va_asid, KernelTableManagerImpl, USER_BOUNDARY,
};
pub struct ProcessAddressSpaceImpl<TA: TableAllocator> { pub struct ProcessAddressSpaceImpl<TA: TableAllocator> {
l1: PhysicalRefMut<'static, PageTable<L1>, KernelTableManagerImpl>,
asid: u16,
_pd: PhantomData<TA>, _pd: PhantomData<TA>,
} }
impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceImpl<TA> { impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceImpl<TA> {
const LOWER_LIMIT_PFN: usize = 0; const LOWER_LIMIT_PFN: usize = 8;
const UPPER_LIMIT_PFN: usize = 0; const UPPER_LIMIT_PFN: usize = (16 << 30) / L3::SIZE;
fn new() -> Result<Self, Error> { fn new() -> Result<Self, Error> {
todo!() static LAST_ASID: AtomicU16 = AtomicU16::new(1);
let mut l1 = unsafe {
PhysicalRefMut::<'static, PageTable<L1>, KernelTableManagerImpl>::map(
TA::allocate_page_table()?,
)
};
for i in 0..512 {
l1[i] = PageEntry::INVALID;
}
// Copy the kernel mappings
clone_kernel_tables(&mut l1);
let asid = LAST_ASID.fetch_add(1, Ordering::AcqRel);
Ok(Self {
l1,
asid,
_pd: PhantomData,
})
} }
unsafe fn map_page( unsafe fn map_page(
@ -25,27 +59,138 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
physical: PhysicalAddress, physical: PhysicalAddress,
flags: MapAttributes, flags: MapAttributes,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _ = address; self.write_l3_entry(
let _ = physical; address,
let _ = flags; PageEntry::page(physical, to_page_attributes(flags)),
todo!() false,
)
.unwrap();
Ok(())
} }
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error> { unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error> {
let _ = address; self.pop_l3_entry(address)
todo!()
} }
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error> { fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error> {
let _ = address; self.read_l3_entry(address).ok_or(Error::DoesNotExist)
todo!()
} }
fn as_address_with_asid(&self) -> u64 { fn as_address_with_asid(&self) -> (u64, u64) {
todo!() let physical = unsafe { self.l1.as_physical_address() }.into_u64();
(physical, self.asid as u64)
} }
unsafe fn clear(&mut self) { unsafe fn clear(&mut self) {}
todo!() }
impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
// Write a single 4KiB entry
fn write_l3_entry(
&mut self,
virt: usize,
entry: PageEntry<L3>,
overwrite: bool,
) -> Result<(), Error> {
if virt >= USER_BOUNDARY {
log::warn!("Tried to map a userspace page to a non-userspace virtual region");
return Err(Error::InvalidArgument);
}
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
let mut l2 = self.l1.get_mut_or_alloc::<TA>(l1i)?;
let mut l3 = l2.get_mut_or_alloc::<TA>(l2i)?;
if l3[l3i].is_present() && !overwrite {
todo!();
}
l3[l3i] = entry;
tlb_flush_va_asid(virt, self.asid as usize);
// dc_cvac((&raw const l3[l3i]).addr());
// tlb_flush_vaae1(virt);
Ok(())
}
fn pop_l3_entry(&mut self, virt: usize) -> Result<PhysicalAddress, Error> {
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
// TODO somehow drop tables if they're known to be empty?
let mut l2 = self.l1.get_mut(l1i).ok_or(Error::DoesNotExist)?;
let mut l3 = l2.get_mut(l2i).ok_or(Error::DoesNotExist)?;
let page = l3[l3i].as_page().ok_or(Error::DoesNotExist)?;
l3[l3i] = PageEntry::INVALID;
tlb_flush_va_asid(virt, self.asid as usize);
// ic_iallu();
// dc_cvac((&raw const l3[l3i]).addr());
// tlb_flush_vaae1(virt);
Ok(page)
}
fn read_l3_entry(&self, virt: usize) -> Option<(PhysicalAddress, MapAttributes)> {
if virt >= USER_BOUNDARY {
log::warn!("Tried read an userspace page to a non-userspace virtual region");
return None;
}
let l1i = virt.page_index::<L1>();
let l2i = virt.page_index::<L2>();
let l3i = virt.page_index::<L3>();
let l2 = self.l1.get(l1i)?;
let l3 = l2.get(l2i)?;
let page = l3[l3i].as_page()?;
Some((
page.add(virt & 0xFFF),
to_map_attributes(l3[l3i].attributes()),
))
} }
} }
impl<TA: TableAllocator> Drop for ProcessAddressSpaceImpl<TA> {
fn drop(&mut self) {
// // SAFETY: with safe usage of the ProcessAddressSpaceImpl, clearing and dropping
// // is safe, no one refers to the memory
// unsafe {
// self.clear();
// let l1_phys = self.l1.as_physical_address();
// TA::free_page_table(l1_phys);
// }
}
}
fn to_page_attributes(src: MapAttributes) -> PageAttributes {
let mut result = PageAttributes::R | PageAttributes::X;
if src.contains(MapAttributes::USER_WRITE) {
result |= PageAttributes::W;
}
if src.intersects(MapAttributes::USER_READ | MapAttributes::USER_WRITE) {
result |= PageAttributes::U;
}
result
}
fn to_map_attributes(src: PageAttributes) -> MapAttributes {
let mut result = MapAttributes::NON_GLOBAL;
if src.contains(PageAttributes::U) {
result |= MapAttributes::USER_READ;
if src.contains(PageAttributes::W) {
result |= MapAttributes::USER_WRITE;
}
}
result
}

View File

@ -4,7 +4,7 @@ use core::{
}; };
use libk_mm_interface::{ use libk_mm_interface::{
address::PhysicalAddress, address::{AsPhysicalAddress, PhysicalAddress},
pointer::{PhysicalRef, PhysicalRefMut}, pointer::{PhysicalRef, PhysicalRefMut},
table::{EntryLevel, NextPageTable, NonTerminalEntryLevel, TableAllocator}, table::{EntryLevel, NextPageTable, NonTerminalEntryLevel, TableAllocator},
}; };
@ -42,7 +42,7 @@ pub struct PageTable<L: EntryLevel> {
} }
#[derive(Clone, Copy, Debug, PartialEq)] #[derive(Clone, Copy, Debug, PartialEq)]
pub struct PageEntry<L: EntryLevel>(u64, PhantomData<L>); pub struct PageEntry<L: EntryLevel>(pub u64, PhantomData<L>);
impl NonTerminalEntryLevel for L1 { impl NonTerminalEntryLevel for L1 {
type NextLevel = L2; type NextLevel = L2;
@ -57,11 +57,33 @@ impl<L: EntryLevel> PageTable<L> {
entries: [PageEntry::INVALID; 512], entries: [PageEntry::INVALID; 512],
} }
} }
pub fn new_zeroed<'a, TA: TableAllocator>(
) -> Result<PhysicalRefMut<'a, PageTable<L>, KernelTableManagerImpl>, Error> {
let physical = TA::allocate_page_table()?;
let mut table =
unsafe { PhysicalRefMut::<'a, Self, KernelTableManagerImpl>::map(physical) };
for i in 0..512 {
table[i] = PageEntry::INVALID;
}
Ok(table)
}
} }
impl<L: EntryLevel> PageEntry<L> { impl<L: EntryLevel> PageEntry<L> {
pub const INVALID: Self = Self(0, PhantomData); pub const INVALID: Self = Self(0, PhantomData);
/// Constructs a [PageEntry] from its raw representation.
///
/// # Safety
///
/// The caller must ensure `value` is actually a "valid" PTE.
pub const unsafe fn from_raw(value: u64) -> Self {
Self(value, PhantomData)
}
pub const fn is_present(&self) -> bool { pub const fn is_present(&self) -> bool {
self.0 & PageAttributes::V.bits() != 0 self.0 & PageAttributes::V.bits() != 0
} }
@ -76,19 +98,31 @@ impl<L: NonTerminalEntryLevel + 'static> NextPageTable for PageTable<L> {
type TableRef = PhysicalRef<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>; type TableRef = PhysicalRef<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>;
type TableRefMut = PhysicalRefMut<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>; type TableRefMut = PhysicalRefMut<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>;
fn get(&self, _index: usize) -> Option<Self::TableRef> { fn get(&self, index: usize) -> Option<Self::TableRef> {
loop {} let table = self[index].as_table()?;
Some(unsafe { PhysicalRef::map(table) })
} }
fn get_mut(&mut self, _index: usize) -> Option<Self::TableRefMut> { fn get_mut(&mut self, index: usize) -> Option<Self::TableRefMut> {
loop {} let table = self[index].as_table()?;
Some(unsafe { PhysicalRefMut::map(table) })
} }
fn get_mut_or_alloc<TA: TableAllocator>( fn get_mut_or_alloc<TA: TableAllocator>(
&mut self, &mut self,
_index: usize, index: usize,
) -> Result<Self::TableRefMut, Error> { ) -> Result<Self::TableRefMut, Error> {
loop {} if let Some(table) = self[index].as_table() {
Ok(unsafe { PhysicalRefMut::map(table) })
} else {
let table = PageTable::new_zeroed::<TA>()?;
self[index] = PageEntry::<L>::table(
unsafe { table.as_physical_address() },
PageAttributes::empty(),
);
// dc_cvac((&raw const self[index]).addr());
Ok(table)
}
} }
} }
@ -108,6 +142,15 @@ impl<L: NonTerminalEntryLevel> PageEntry<L> {
PhantomData, PhantomData,
) )
} }
pub fn as_table(&self) -> Option<PhysicalAddress> {
(self.0
& (PageAttributes::R | PageAttributes::W | PageAttributes::X | PageAttributes::V)
.bits()
== PageAttributes::V.bits())
.then_some((self.0 << 2) & !0xFFF)
.map(PhysicalAddress::from_u64)
}
} }
impl PageEntry<L3> { impl PageEntry<L3> {
@ -119,7 +162,9 @@ impl PageEntry<L3> {
} }
pub fn as_page(&self) -> Option<PhysicalAddress> { pub fn as_page(&self) -> Option<PhysicalAddress> {
loop {} (self.0 & PageAttributes::V.bits() != 0)
.then_some((self.0 << 2) & !0xFFF)
.map(PhysicalAddress::from_u64)
} }
} }

View File

@ -448,6 +448,8 @@ pub mod sstatus {
register_bitfields!( register_bitfields!(
u64, u64,
pub SSTATUS [ pub SSTATUS [
SUM OFFSET(18) NUMBITS(1) [],
SPP OFFSET(8) NUMBITS(1) [],
SIE OFFSET(1) NUMBITS(1) [], SIE OFFSET(1) NUMBITS(1) [],
] ]
); );

View File

@ -1,3 +1,5 @@
#[allow(clippy::too_many_arguments)]
#[inline(always)]
unsafe fn sbi_do_call( unsafe fn sbi_do_call(
extension: u64, extension: u64,
function: u64, function: u64,

View File

@ -39,8 +39,8 @@ pub trait ProcessAddressSpaceManager<TA: TableAllocator>: Sized {
/// if one is mapped /// if one is mapped
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error>; fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error>;
/// Returns the implementation specific physical address of this space, with ASID applied /// Returns the physical address of the translation table along with its ASID
fn as_address_with_asid(&self) -> u64; fn as_address_with_asid(&self) -> (u64, u64);
/// Clears the address space by dropping and non-global tables. /// Clears the address space by dropping and non-global tables.
/// ///

View File

@ -335,7 +335,8 @@ impl<TA: TableAllocator> ProcessAddressSpace<TA> {
ProcessAddressSpaceImpl::<TA>::LOWER_LIMIT_PFN, ProcessAddressSpaceImpl::<TA>::LOWER_LIMIT_PFN,
ProcessAddressSpaceImpl::<TA>::UPPER_LIMIT_PFN, ProcessAddressSpaceImpl::<TA>::UPPER_LIMIT_PFN,
); );
log::debug!("New AddressSpace {:#x}", table.as_address_with_asid()); let (physical, asid) = table.as_address_with_asid();
log::debug!("New AddressSpace {:#x}, asid {:#x}", physical, asid);
Ok(Self { Ok(Self {
inner: IrqSafeSpinlock::new(Inner { table, allocator }), inner: IrqSafeSpinlock::new(Inner { table, allocator }),
}) })
@ -451,8 +452,8 @@ impl<TA: TableAllocator> ProcessAddressSpace<TA> {
lock.unmap_range(address, size / L3_PAGE_SIZE) lock.unmap_range(address, size / L3_PAGE_SIZE)
} }
/// Returns the physical address of this table, with ASID applied /// Returns the physical address of the translation table along with its ASID
pub fn as_address_with_asid(&self) -> u64 { pub fn as_address_with_asid(&self) -> (u64, u64) {
self.inner.lock().table.as_address_with_asid() self.inner.lock().table.as_address_with_asid()
} }
@ -465,7 +466,8 @@ impl<TA: TableAllocator> ProcessAddressSpace<TA> {
impl<TA: TableAllocator> Drop for ProcessAddressSpace<TA> { impl<TA: TableAllocator> Drop for ProcessAddressSpace<TA> {
fn drop(&mut self) { fn drop(&mut self) {
log::debug!("Drop AddressSpace {:#x}", self.as_address_with_asid()); let (physical, asid) = self.as_address_with_asid();
log::debug!("Drop AddressSpace {:#x}, asid {:#x}", physical, asid);
self.clear().ok(); self.clear().ok();
} }
} }

View File

@ -488,7 +488,7 @@ fn write_rela(rela: &Rela, space: &ProcessAddressSpace, b: usize) -> Result<(),
let rel_field = rela.r_offset as usize + b; let rel_field = rela.r_offset as usize + b;
let (value, width) = match rela.r_type { let (value, width) = match rela.r_type {
elf::abi::R_X86_64_RELATIVE | elf::abi::R_AARCH64_RELATIVE => { elf::abi::R_X86_64_RELATIVE | elf::abi::R_AARCH64_RELATIVE | elf::abi::R_RISCV_RELATIVE => {
// B + A // B + A
// Width: qword // Width: qword
(b as i64 + a, 8) (b as i64 + a, 8)

View File

@ -217,13 +217,21 @@ where
// let tls_address = elf::clone_tls(space, image)?; // let tls_address = elf::clone_tls(space, image)?;
log::debug!("argument = {:#x}", argument); log::debug!(
"argument = {:#x}, user_sp = {:#x}, stack: {:#x}..{:#x}",
argument,
user_sp,
virt_stack_base,
virt_stack_base + USER_STACK_PAGES * 0x1000 - TaskContextImpl::USER_STACK_EXTRA_ALIGN
);
let (address_space, asid) = space.as_address_with_asid();
TaskContext::user(UserContextInfo { TaskContext::user(UserContextInfo {
entry: image.entry, entry: image.entry,
argument, argument,
stack_pointer: ptr.addr(), stack_pointer: ptr.addr(),
thread_pointer: 0, thread_pointer: 0,
address_space: space.as_address_with_asid(), address_space,
asid,
single_step: options.single_step, single_step: options.single_step,
}) })
} }

View File

@ -194,10 +194,13 @@ impl Process {
let sp = TaskContextImpl::align_stack_for_entry(options.stack_top) as *mut usize; let sp = TaskContextImpl::align_stack_for_entry(options.stack_top) as *mut usize;
let sp = unsafe { Thread::setup_stack_header(&space, sp, options.argument)? }; let sp = unsafe { Thread::setup_stack_header(&space, sp, options.argument)? };
let (address_space, asid) = space.as_address_with_asid();
let info = UserContextInfo { let info = UserContextInfo {
entry: options.entry as _, entry: options.entry as _,
argument: options.argument, argument: options.argument,
address_space: space.as_address_with_asid(), address_space,
asid,
stack_pointer: sp.addr(), stack_pointer: sp.addr(),
single_step: false, single_step: false,
thread_pointer: 0, thread_pointer: 0,

View File

@ -14,7 +14,7 @@ __rv64_entry:
// a0 - bootstrap HART ID // a0 - bootstrap HART ID
// a1 - device tree blob // a1 - device tree blob
// mhartid == a0 // mhartid == a0
// satp == 0 csrw satp, zero
// Zero the .bss // Zero the .bss
LOAD_PCREL .L00, t0, __bss_start_phys LOAD_PCREL .L00, t0, __bss_start_phys

View File

@ -1,24 +1,56 @@
use core::arch::global_asm; use core::arch::global_asm;
use kernel_arch::{task::Scheduler, Architecture}; use abi::{arch::SavedFrame, primitive_enum, process::Signal, SyscallFunction};
use libk::arch::Cpu; use kernel_arch::{
use tock_registers::interfaces::{ReadWriteable, Readable}; task::{Scheduler, TaskFrame},
Architecture,
};
use libk::{arch::Cpu, task::thread::Thread};
use tock_registers::interfaces::ReadWriteable;
use kernel_arch_riscv64::{ use kernel_arch_riscv64::{
intrinsics, intrinsics,
registers::{SCAUSE, SEPC, STVAL, STVEC}, registers::{SSTATUS, STVEC},
sbi, ArchitectureImpl, sbi, ArchitectureImpl,
}; };
use crate::syscall;
primitive_enum! {
pub enum Cause: u64 {
MisalignedInstruction = 0,
InstructionAccessFault = 1,
IllegalInstruction = 2,
Breakpoint = 3,
LoadAddressMisaligned = 4,
LoadAccessFault = 5,
StoreAddressMisaligned = 6,
StoreAccessFault = 7,
EcallUmode = 8,
EcallSmode = 9,
EcallMmode = 11,
InstructionPageFault = 12,
LoadPageFault = 13,
StorePageFault = 15,
}
}
#[derive(Debug)] #[derive(Debug)]
#[repr(C)] #[repr(C)]
pub struct TrapFrame { pub struct TrapFrame {
// General-purpose
pub ra: u64, pub ra: u64,
pub gp: u64, pub gp: u64,
pub t0_2: [u64; 3], pub tn: [u64; 7],
pub a0_7: [u64; 8], pub sn: [u64; 12],
pub t3_6: [u64; 4], pub an: [usize; 8],
pub s0: u64, // Special
pub sp: u64,
pub sstatus: u64,
pub sepc: u64,
pub stval: u64,
pub scause: u64,
pub tp: u64,
} }
pub fn init_smode_exceptions() { pub fn init_smode_exceptions() {
@ -31,42 +63,180 @@ pub fn init_smode_exceptions() {
STVEC.modify(STVEC::MODE::Vectored); STVEC.modify(STVEC::MODE::Vectored);
} }
unsafe fn smode_exception_handler(frame: *mut TrapFrame) { unsafe fn umode_exception_handler(frame: &mut TrapFrame) {
let _ = frame; let thread = Thread::current();
let cause = SCAUSE.read(SCAUSE::CODE);
let tval = STVAL.get();
let epc = SEPC.get();
log::error!("S-mode exception cause={cause}, tval={tval:#x}, epc={epc:#x}"); let cause = Cause::try_from(frame.scause).ok();
let dump = match cause {
Some(Cause::LoadPageFault)
| Some(Cause::StorePageFault)
| Some(Cause::LoadAccessFault)
| Some(Cause::StoreAccessFault)
| Some(Cause::InstructionPageFault)
| Some(Cause::InstructionAccessFault) => {
let translation = if let Some(space) = thread.try_get_process().map(|p| p.space()) {
space.translate(frame.stval as usize).ok()
} else {
None
};
thread.raise_signal(Signal::MemoryAccessViolation);
if let Some(physical) = translation {
log::warn!(" * tval translates to {physical:#x}");
} else {
log::warn!(" * tval does not translate");
}
true
}
Some(Cause::EcallUmode) => {
// TODO more granular control over how U-mode pages are accessed from S-mode
SSTATUS.modify(SSTATUS::SUM::SET);
let func = frame.an[0];
if func == usize::from(SyscallFunction::ExitSignal) {
todo!()
}
let args = &frame.an[1..];
let result = syscall::raw_syscall_handler(func, args) as _;
frame.an[0] = result;
frame.sepc += 4;
false
}
_ => {
thread.raise_signal(Signal::MemoryAccessViolation);
true
}
};
if dump {
log::warn!(
"U-mode exception cause={:?} ({}), epc={:#x}, sp={:#x}, tval={:#x}",
cause,
frame.scause,
frame.sepc,
frame.sp,
frame.stval
);
}
}
unsafe fn smode_exception_handler(frame: &mut TrapFrame) {
let cause = Cause::try_from(frame.scause).expect("Invalid exception cause");
log::error!(
"S-mode exception cause={:?} ({}), tval={:#x}, epc={:#x}, sp={:#x}",
cause,
frame.scause,
frame.stval,
frame.sepc,
frame.sp
);
match cause {
Cause::LoadPageFault
| Cause::StorePageFault
| Cause::LoadAccessFault
| Cause::StoreAccessFault
| Cause::InstructionPageFault
| Cause::InstructionAccessFault => {
let translation = if let Some(space) = Thread::get_current()
.and_then(|t| t.try_get_process())
.map(|p| p.space())
{
space.translate(frame.stval as usize).ok()
} else {
None
};
if let Some(physical) = translation {
log::warn!(" * tval translates to {physical:#x}");
} else {
log::warn!(" * tval does not translate");
}
}
_ => (),
}
ArchitectureImpl::halt(); ArchitectureImpl::halt();
} }
unsafe extern "C" fn smode_interrupt_handler(frame: *mut TrapFrame) { unsafe extern "C" fn smode_interrupt_handler(frame: *mut TrapFrame) {
let _ = frame; let frame = &*frame;
let cause = SCAUSE.read(SCAUSE::CODE); match frame.scause & !(1 << 63) {
match cause {
// S-mode timer interrupt // S-mode timer interrupt
5 => { 5 => {
sbi::sbi_set_timer(intrinsics::rdtime() + 1_000_000); sbi::sbi_set_timer(intrinsics::rdtime() + 100_000);
// TODO runtime tick, time accounting // TODO runtime tick, time accounting
Cpu::local().scheduler().yield_cpu(); Cpu::local().scheduler().yield_cpu();
} }
_ => { n => todo!("Unhandled interrupt #{n}"),
log::warn!("Unknown/unhandled S-mode interrupt {cause}");
ArchitectureImpl::halt();
}
} }
} }
unsafe extern "C" fn smode_general_trap_handler(frame: *mut TrapFrame) { unsafe extern "C" fn smode_general_trap_handler(frame: *mut TrapFrame) {
let interrupt = SCAUSE.matches_all(SCAUSE::INTERRUPT::SET); let frame = &mut *frame;
if interrupt { let interrupt = frame.scause & (1 << 63) != 0;
smode_interrupt_handler(frame); let smode = frame.sstatus & (1 << 8) != 0;
} else {
smode_exception_handler(frame); match (interrupt, smode) {
(true, _) => smode_interrupt_handler(frame),
(false, true) => smode_exception_handler(frame),
(false, false) => umode_exception_handler(frame),
}
if !smode && let Some(thread) = Thread::get_current() {
thread.handle_pending_signals(frame);
}
}
impl TaskFrame for TrapFrame {
fn store(&self) -> SavedFrame {
todo!()
}
fn restore(&mut self, saved: &SavedFrame) {
let _ = saved;
todo!()
}
fn user_sp(&self) -> usize {
todo!()
}
fn user_ip(&self) -> usize {
todo!()
}
fn argument(&self) -> u64 {
todo!()
}
fn set_user_sp(&mut self, value: usize) {
let _ = value;
todo!()
}
fn set_user_ip(&mut self, value: usize) {
let _ = value;
todo!()
}
fn set_argument(&mut self, value: u64) {
let _ = value;
todo!()
}
fn set_single_step(&mut self, step: bool) {
let _ = step;
todo!()
}
fn set_return_value(&mut self, value: u64) {
let _ = value;
todo!()
} }
} }

View File

@ -8,11 +8,12 @@ use device_api::{
ResetDevice, ResetDevice,
}; };
use device_tree::{driver::unflatten_device_tree, DeviceTree, DeviceTreeNodeExt}; use device_tree::{driver::unflatten_device_tree, DeviceTree, DeviceTreeNodeExt};
use kernel_arch::Architecture;
use kernel_arch_riscv64::{ use kernel_arch_riscv64::{
intrinsics, intrinsics,
mem::{self, KERNEL_VIRT_OFFSET}, mem::{self, KERNEL_VIRT_OFFSET},
registers::SIE, registers::SIE,
sbi, PerCpuData, sbi, ArchitectureImpl, PerCpuData,
}; };
use libk::{arch::Cpu, config}; use libk::{arch::Cpu, config};
use libk_mm::{ use libk_mm::{
@ -59,7 +60,7 @@ impl Platform for Riscv64 {
type L3 = L3; type L3 = L3;
unsafe fn reset(&self) -> ! { unsafe fn reset(&self) -> ! {
loop {} ArchitectureImpl::halt();
} }
unsafe fn send_ipi(&self, target: IpiDeliveryTarget, msg: IpiMessage) -> Result<bool, Error> { unsafe fn send_ipi(&self, target: IpiDeliveryTarget, msg: IpiMessage) -> Result<bool, Error> {
@ -76,7 +77,7 @@ impl Platform for Riscv64 {
fn register_reset_device(&self, reset: Arc<dyn ResetDevice>) -> Result<(), Error> { fn register_reset_device(&self, reset: Arc<dyn ResetDevice>) -> Result<(), Error> {
let _ = reset; let _ = reset;
loop {} Ok(())
} }
} }
@ -119,6 +120,7 @@ impl Riscv64 {
let aligned_end = end.page_align_up::<L3>(); let aligned_end = end.page_align_up::<L3>();
let size = aligned_end - aligned_start; let size = aligned_end - aligned_start;
log::info!("Reserve initrd @ {:#x}..{:#x}", aligned_start, aligned_end);
reserve_region( reserve_region(
"initrd", "initrd",
PhysicalMemoryRegion { PhysicalMemoryRegion {
@ -159,6 +161,7 @@ impl Riscv64 {
let per_cpu = PerCpuData { let per_cpu = PerCpuData {
tmp_t0: 0, tmp_t0: 0,
umode_sp: 0,
smode_sp: 0, smode_sp: 0,
bootstrap: is_bsp, bootstrap: is_bsp,
@ -220,7 +223,7 @@ impl Riscv64 {
// Setup the timer // Setup the timer
SIE.modify(SIE::STIE::SET); SIE.modify(SIE::STIE::SET);
sbi::sbi_set_timer(intrinsics::rdtime() + 1_000_000); sbi::sbi_set_timer(intrinsics::rdtime() + 100_000);
// Test call into M-mode // Test call into M-mode
// core::arch::asm!("ecall", in("a0") MModeFunction::WriteTimerComparator as u64, in("a1") 0x4321); // core::arch::asm!("ecall", in("a0") MModeFunction::WriteTimerComparator as u64, in("a1") 0x4321);

View File

@ -2,112 +2,158 @@
.section .text .section .text
.set SMODE_TRAP_STATE_SIZE, (8 * 18) // ra+gp, 7 tN, 12 sN, 8 aN
.set GP_REGS_SIZE, (2 + 7 + 12 + 8) * 8
// U-mode sp, sstatus, sepc, stval, scause, sscratch
.set CTL_REGS_SIZE, 6 * 8
.set TRAP_CONTEXT_SIZE, (GP_REGS_SIZE) + (CTL_REGS_SIZE)
.macro SAVE_TRAP_CONTEXT .set REG_UMODE_SP, (GP_REGS_SIZE + 0 * 8)
addi sp, sp, -SMODE_TRAP_STATE_SIZE .set REG_SSTATUS, (GP_REGS_SIZE + 1 * 8)
sd ra, 8 * 0(sp) .set REG_SEPC, (GP_REGS_SIZE + 2 * 8)
sd gp, 8 * 1(sp) .set REG_STVAL, (GP_REGS_SIZE + 3 * 8)
sd t0, 8 * 2(sp) .set REG_SCAUSE, (GP_REGS_SIZE + 4 * 8)
sd t1, 8 * 3(sp) .set REG_SSCRATCH, (GP_REGS_SIZE + 5 * 8)
sd t2, 8 * 4(sp)
sd a0, 8 * 5(sp) .macro SAVE_GP_REGS
sd a1, 8 * 6(sp) // Save all general-purpose registers, except:
sd a2, 8 * 7(sp) // * sp (saved elsewhere)
sd a3, 8 * 8(sp) // * tp (saved elsewhere)
sd a4, 8 * 9(sp) sd ra, 0 * 8(sp)
sd a5, 8 * 10(sp) sd gp, 1 * 8(sp)
sd a6, 8 * 11(sp)
sd a7, 8 * 12(sp) sd t0, 2 * 8(sp)
sd t3, 8 * 13(sp) sd t1, 3 * 8(sp)
sd t4, 8 * 14(sp) sd t2, 4 * 8(sp)
sd t5, 8 * 15(sp) sd t3, 5 * 8(sp)
sd t6, 8 * 16(sp) sd t4, 6 * 8(sp)
sd s0, 8 * 17(sp) sd t5, 7 * 8(sp)
sd t6, 8 * 8(sp)
sd s0, 9 * 8(sp)
sd s1, 10 * 8(sp)
sd s2, 11 * 8(sp)
sd s3, 12 * 8(sp)
sd s4, 13 * 8(sp)
sd s5, 14 * 8(sp)
sd s6, 15 * 8(sp)
sd s7, 16 * 8(sp)
sd s8, 17 * 8(sp)
sd s9, 18 * 8(sp)
sd s10, 19 * 8(sp)
sd s11, 20 * 8(sp)
sd a0, 21 * 8(sp)
sd a1, 22 * 8(sp)
sd a2, 23 * 8(sp)
sd a3, 24 * 8(sp)
sd a4, 25 * 8(sp)
sd a5, 26 * 8(sp)
sd a6, 27 * 8(sp)
sd a7, 28 * 8(sp)
.endm .endm
.macro LOAD_TRAP_CONTEXT .macro LOAD_GP_REGS
ld ra, 8 * 0(sp) ld ra, 0 * 8(sp)
ld gp, 8 * 1(sp) ld gp, 1 * 8(sp)
ld t0, 8 * 2(sp)
ld t1, 8 * 3(sp)
ld t2, 8 * 4(sp)
ld a0, 8 * 5(sp)
ld a1, 8 * 6(sp)
ld a2, 8 * 7(sp)
ld a3, 8 * 8(sp)
ld a4, 8 * 9(sp)
ld a5, 8 * 10(sp)
ld a6, 8 * 11(sp)
ld a7, 8 * 12(sp)
ld t3, 8 * 13(sp)
ld t4, 8 * 14(sp)
ld t5, 8 * 15(sp)
ld t6, 8 * 16(sp)
ld s0, 8 * 17(sp)
addi sp, sp, SMODE_TRAP_STATE_SIZE
.endm
// * Switch stack to kernel if needed ld t0, 2 * 8(sp)
// * Store pre-trap register state on the stack ld t1, 3 * 8(sp)
// * Make a0 point to the frame ld t2, 4 * 8(sp)
// * Make s0 = original_tp ld t3, 5 * 8(sp)
.macro SMODE_TRAP_ENTER ld t4, 6 * 8(sp)
// Stack may be either U-mode or S-mode stack depending on sstatus.SPP ld t5, 7 * 8(sp)
// Original tp -> sscratch ld t6, 8 * 8(sp)
// Per-CPU struct -> tp
csrrw tp, sscratch, tp
// Store t0 in per-CPU scratch space ld s0, 9 * 8(sp)
sd t0, 0(tp) ld s1, 10 * 8(sp)
ld s2, 11 * 8(sp)
ld s3, 12 * 8(sp)
ld s4, 13 * 8(sp)
ld s5, 14 * 8(sp)
ld s6, 15 * 8(sp)
ld s7, 16 * 8(sp)
ld s8, 17 * 8(sp)
ld s9, 18 * 8(sp)
ld s10, 19 * 8(sp)
ld s11, 20 * 8(sp)
// Determine where the interrupt came from (SPP is bit 8) ld a0, 21 * 8(sp)
csrr t0, sstatus ld a1, 22 * 8(sp)
andi t0, t0, (1 << 8) ld a2, 23 * 8(sp)
bnez t0, 1f ld a3, 24 * 8(sp)
ld a4, 25 * 8(sp)
// Trap came from U-mode ld a5, 26 * 8(sp)
// TODO ld a6, 27 * 8(sp)
j . ld a7, 28 * 8(sp)
1:
// Trap came from S-mode
2:
// Either stack was adjusted or the trap came from S-mode
// Load t0 back
ld t0, 0(tp)
SAVE_TRAP_CONTEXT
mv a0, sp
csrr s0, sscratch
.endm
// * Set sscratch to pre-trap tp
// * Restore the pre-trap register state
// * Return
.macro SMODE_TRAP_LEAVE_TO_SMODE
csrw sscratch, s0
// Restore the state
LOAD_TRAP_CONTEXT
// Swap the tp<->scratch back
csrrw tp, sscratch, tp
sret
.endm .endm
.macro SMODE_TRAP n, handler .macro SMODE_TRAP n, handler
.type __rv64_smode_trap_\n, @function .type __rv64_smode_trap_\n, @function
__rv64_smode_trap_\n: __rv64_smode_trap_\n:
SMODE_TRAP_ENTER // If coming from userspace, sscratch = kernel-mode tp
// If coming from kernelspace, sscratch = 0
csrrw tp, sscratch, tp
bnez tp, 1f
// TODO when coming through a non-zero vector, trap is always asyncrhonous, so // Coming from S-mode
// the interrupt handler can be called directly instead of a more generic // tp = 0, sscratch contains kernel tp
// trap handler to avoid an extra indirection csrr tp, sscratch
sd sp, 16(tp) // Set proper S-mode sp
1:
sd sp, 8(tp) // Store U-mode sp
ld sp, 16(tp) // Load S-mode sp
// Store pre-trap context
addi sp, sp, -(TRAP_CONTEXT_SIZE)
SAVE_GP_REGS
// Save special registers
ld t0, 8(tp)
csrr t1, sstatus
csrr t2, sepc
csrr t3, stval
csrr t4, scause
csrr t5, sscratch
sd t0, REG_UMODE_SP (sp)
sd t1, REG_SSTATUS (sp)
sd t2, REG_SEPC (sp)
sd t3, REG_STVAL (sp)
sd t4, REG_SCAUSE (sp)
sd t5, REG_SSCRATCH (sp)
// Reset sscratch to zero to make sure a S-mode -> S-mode nested exception
// happens properly
csrw sscratch, zero
mv a0, sp
call \handler call \handler
// TODO U-mode trap return // Return from exception
SMODE_TRAP_LEAVE_TO_SMODE ld t0, REG_SSTATUS (sp)
andi t0, t0, (1 << 8)
bnez t0, 2f
// Return to U-mode
// Restore SSCRATCH to a proper value
csrw sscratch, tp
2:
// Return to S-mode
// Restore special registers
ld t0, REG_SSTATUS (sp)
ld t1, REG_SEPC (sp)
csrw sstatus, t0
csrw sepc, t1
// Restore general-purpose registers
LOAD_GP_REGS
ld tp, REG_SSCRATCH (sp)
ld sp, REG_UMODE_SP (sp)
sret
.size __rv64_smode_trap_\n, . - __rv64_smode_trap_\n .size __rv64_smode_trap_\n, . - __rv64_smode_trap_\n
.endm .endm

View File

@ -58,7 +58,7 @@ pub fn kinit() -> Result<(), Error> {
// TODO move this to userspace so it doesn't block the init process, maybe lazy-load on first // TODO move this to userspace so it doesn't block the init process, maybe lazy-load on first
// attempt to load a module? // attempt to load a module?
#[cfg(not(target_arch = "aarch64"))] #[cfg(all(not(target_arch = "aarch64"), not(target_arch = "riscv64")))]
{ {
use libk::module::load_kernel_symbol_table; use libk::module::load_kernel_symbol_table;

View File

@ -29,6 +29,7 @@
clippy::match_ref_pats, clippy::match_ref_pats,
clippy::match_single_binding, clippy::match_single_binding,
clippy::missing_transmute_annotations, clippy::missing_transmute_annotations,
clippy::modulo_one,
async_fn_in_trait async_fn_in_trait
)] )]
#![deny(missing_docs)] #![deny(missing_docs)]

View File

@ -53,6 +53,9 @@ pub(crate) fn map_memory(
space.allocate(None, len, backing, attrs) space.allocate(None, len, backing, attrs)
}) })
.inspect_err(|error| {
log::warn!("map_memory({len}) failed: {error:?}");
})
} }
pub(crate) fn unmap_memory(address: usize, len: usize) -> Result<(), Error> { pub(crate) fn unmap_memory(address: usize, len: usize) -> Result<(), Error> {

View File

@ -151,7 +151,7 @@ impl<F: Read + Seek> Riscv64Builder<F> {
let entry = shift_pfn(paddr) | (PageAttributes::V | flags).bits(); let entry = shift_pfn(paddr) | (PageAttributes::V | flags).bits();
let l2i = (vaddr >> L2_SHIFT) as usize & 0x1FF - start_l2i; let l2i = ((vaddr >> L2_SHIFT) as usize & 0x1FF) - start_l2i;
let l3i = (vaddr >> L3_SHIFT) as usize & 0x1FF; let l3i = (vaddr >> L3_SHIFT) as usize & 0x1FF;
let l3 = &mut tables.kernel_l3s[l2i]; let l3 = &mut tables.kernel_l3s[l2i];

View File

@ -17,7 +17,11 @@ pub struct QemuRiscv64;
#[derive(Debug)] #[derive(Debug)]
pub enum Image { pub enum Image {
OpenSBI { kernel: PathBuf, bios: PathBuf }, OpenSBI {
bios: PathBuf,
kernel: PathBuf,
initrd: PathBuf,
},
} }
impl IntoArgs for Machine { impl IntoArgs for Machine {
@ -41,9 +45,15 @@ impl IntoArgs for Cpu {
impl IntoArgs for Image { impl IntoArgs for Image {
fn add_args(&self, command: &mut Command) { fn add_args(&self, command: &mut Command) {
match self { match self {
Self::OpenSBI { kernel, bios } => { Self::OpenSBI {
bios,
kernel,
initrd,
} => {
command.arg("-kernel"); command.arg("-kernel");
command.arg(kernel); command.arg(kernel);
command.arg("-initrd");
command.arg(initrd);
command.arg("-bios"); command.arg("-bios");
command.arg(bios); command.arg(bios);
} }

View File

@ -63,6 +63,13 @@ pub struct Dtv {
specific: Vec<*mut c_void>, specific: Vec<*mut c_void>,
} }
#[allow(missing_docs)]
pub struct TlsInfo {
base: usize,
tp: usize,
module0_offset: Option<usize>,
}
struct TcbHeader { struct TcbHeader {
#[allow(unused)] #[allow(unused)]
self_pointer: usize, self_pointer: usize,
@ -170,8 +177,10 @@ impl Dtv {
/// Will panic if key == 0. /// Will panic if key == 0.
/// Will panic if key is longer than the DTV itself. /// Will panic if key is longer than the DTV itself.
pub fn set_specific(&mut self, key: usize, value: *mut c_void, grow: bool) { pub fn set_specific(&mut self, key: usize, value: *mut c_void, grow: bool) {
self.try_set_specific(key, value, grow) if self.try_set_specific(key, value, grow).is_err() {
.expect("Dtv::set_specific(): invalid key") crate::debug_trace!("Dtv::set_specific(): invalid key {key}");
panic!("Dtv::set_specific(): invalid key {key})")
}
} }
/// Sets a DTV entry for a thread-specific key. /// Sets a DTV entry for a thread-specific key.
@ -203,7 +212,13 @@ impl Dtv {
/// Will panic if key == 0. /// Will panic if key == 0.
/// Will panic if key is larger than the DTV itself. /// Will panic if key is larger than the DTV itself.
pub fn get(&self, key: usize) -> *mut c_void { pub fn get(&self, key: usize) -> *mut c_void {
Self::get_key(&self.entries, key).expect("Out-of-bounds DTV key") match Self::get_key(&self.entries, key) {
Some(value) => value,
None => {
crate::debug_trace!("Dtv::get(): out-of-bounds DTV key: {key}");
panic!("Dtv::get(): out-of-bounds DTV key: {key}");
}
}
} }
/// Sets a DTV entry, growing the DTV allocation if necessary /// Sets a DTV entry, growing the DTV allocation if necessary
@ -216,7 +231,8 @@ impl Dtv {
self.entries.resize(key, null_mut()); self.entries.resize(key, null_mut());
} }
if !Self::set_key(&mut self.entries, key, value) { if !Self::set_key(&mut self.entries, key, value) {
panic!("Dtv::set(): invalid key"); crate::debug_trace!("Dtv::set(): invalid key {key}");
panic!("Dtv::set(): invalid key {key}");
} }
} }
} }
@ -231,9 +247,9 @@ pub fn init_tls_from_auxv<'a, I: Iterator<Item = &'a AuxValue>>(
}; };
if force || !tls_image.already_initialized { if force || !tls_image.already_initialized {
let (base, tp) = clone_tls(&tls_image)?; let tls = clone_tls(&tls_image)?;
unsafe { set_thread_pointer(tp) }?; unsafe { set_thread_pointer(tls.tp) }?;
setup_dtv(&tls_image, base)?; setup_dtv(&tls_image, &tls)?;
} }
Ok(Some(tls_image)) Ok(Some(tls_image))
@ -248,9 +264,9 @@ pub fn init_tls(image: Option<&TlsImage>, force: bool) -> Result<(), Error> {
}; };
if force || !image.already_initialized { if force || !image.already_initialized {
let (base, tp) = clone_tls(image)?; let tls = clone_tls(image)?;
unsafe { set_thread_pointer(tp) }?; unsafe { set_thread_pointer(tls.tp) }?;
setup_dtv(image, base)?; setup_dtv(image, &tls)?;
} }
Ok(()) Ok(())
@ -265,16 +281,31 @@ fn get_tcb_mut() -> &'static mut TcbHeader {
unsafe { &mut *get_tcb_raw() } unsafe { &mut *get_tcb_raw() }
} }
fn setup_dtv(image: &TlsImage, tls_base: usize) -> Result<(), Error> { fn setup_dtv(image: &TlsImage, tls_info: &TlsInfo) -> Result<(), Error> {
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
const DTV_OFFSET: usize = 0x800;
#[cfg(any(not(target_arch = "riscv64"), rust_analyzer))]
const DTV_OFFSET: usize = 0;
let dtv = get_dtv();
// Executable itself
// NOTE if module 1 is specified again by the dynamic loader, it will be overriden with
// what dynamic loader says
if let Some(module0_offset) = tls_info.module0_offset {
dtv.set(
1,
core::ptr::without_provenance_mut(tls_info.base + module0_offset + DTV_OFFSET),
);
}
if image.module_offsets.is_empty() { if image.module_offsets.is_empty() {
return Ok(()); return Ok(());
} }
let dtv = get_dtv();
for &(module_id, module_offset) in image.module_offsets.iter() { for &(module_id, module_offset) in image.module_offsets.iter() {
assert!(module_offset < image.full_size); assert!(module_offset < image.full_size);
dtv.set( dtv.set(
module_id, module_id,
core::ptr::with_exposed_provenance_mut(tls_base + module_offset), core::ptr::with_exposed_provenance_mut(tls_info.base + module_offset + DTV_OFFSET),
); );
} }
Ok(()) Ok(())
@ -292,14 +323,12 @@ pub fn get_dtv() -> &'static mut Dtv {
} }
#[cfg(all( #[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"), any(target_arch = "x86", target_arch = "x86_64", target_arch = "riscv64"),
any(feature = "__tls_get_addr", rust_analyzer) any(feature = "__tls_get_addr", rust_analyzer)
))] ))]
#[no_mangle] #[no_mangle]
unsafe extern "C" fn __tls_get_addr(index: *mut usize) -> *mut c_void { unsafe extern "C" fn __tls_get_addr(index: *mut usize) -> *mut c_void {
let module_id = index.read(); let module_id = index.read();
let offset = index.add(1).read(); let offset = index.add(1).read();
assert!(module_id > 0);
get_dtv().get(module_id).add(offset) get_dtv().get(module_id).add(offset)
} }

View File

@ -3,10 +3,17 @@
use abi::error::Error; use abi::error::Error;
pub fn get_thread_pointer() -> usize { pub fn get_thread_pointer() -> usize {
todo!() let output: usize;
unsafe { core::arch::asm!("mv {0}, tp", out(reg) output) };
output
} }
/// Writes `value` into `tp` register.
///
/// # Safety
///
/// Usual pointer rules apply.
pub unsafe fn set_thread_pointer(value: usize) -> Result<(), Error> { pub unsafe fn set_thread_pointer(value: usize) -> Result<(), Error> {
let _ = value; core::arch::asm!("mv tp, {0}", in(reg) value);
todo!() Ok(())
} }

View File

@ -1,9 +1,10 @@
use abi::{ use abi::{
error::Error, error::Error,
mem::{MappingFlags, MappingSource}, mem::{MappingFlags, MappingSource},
process::ExitCode,
}; };
use super::TlsImage; use super::{TlsImage, TlsInfo};
// Variant I TLS layout: // Variant I TLS layout:
// //
@ -16,14 +17,16 @@ use super::TlsImage;
/// Creates a new TLS image in the process memory, copying data from the TLS master copy (if any). /// Creates a new TLS image in the process memory, copying data from the TLS master copy (if any).
/// Returns the resulting thread pointer. /// Returns the resulting thread pointer.
pub fn clone_tls(image: &TlsImage) -> Result<(usize, usize), Error> { pub fn clone_tls(image: &TlsImage) -> Result<TlsInfo, Error> {
const TCB_SIZE: usize = size_of::<usize>() * 2; const TCB_SIZE: usize = size_of::<usize>() * 2;
if !image.align.is_power_of_two() { if !image.align.is_power_of_two() {
panic!("TLS layout not aligned to a power of two: {}", image.align) crate::debug_trace!("TLS layout not aligned to a power of two: {}", image.align);
unsafe { crate::sys::exit_process(ExitCode::Exited(1)) };
} }
if image.align > 0x1000 { if image.align > 0x1000 {
panic!("TODO: TLS alignment larger than a page size is not supported"); crate::debug_trace!("TODO: TLS alignment larger than a page size is not supported");
unsafe { crate::sys::exit_process(ExitCode::Exited(1)) };
} }
// TCB size, padded to align. Also the start of the first module // TCB size, padded to align. Also the start of the first module
@ -72,7 +75,11 @@ pub fn clone_tls(image: &TlsImage) -> Result<(usize, usize), Error> {
crate::debug_trace!("TLS: base={:#x}, tp={:#x}", base, tp); crate::debug_trace!("TLS: base={:#x}, tp={:#x}", base, tp);
Ok((base, tp)) Ok(TlsInfo {
base,
tp,
module0_offset: Some(tcb_aligned_size),
})
} }
pub(super) fn get_tcb_raw(tp: usize) -> *mut u8 { pub(super) fn get_tcb_raw(tp: usize) -> *mut u8 {

View File

@ -3,7 +3,7 @@ use abi::{
mem::{MappingFlags, MappingSource}, mem::{MappingFlags, MappingSource},
}; };
use super::TlsImage; use super::{TlsImage, TlsInfo};
// Variant II TLS layout: // Variant II TLS layout:
// //
@ -14,7 +14,7 @@ use super::TlsImage;
/// Creates a new TLS image in the process memory, copying data from the TLS master copy (if any). /// Creates a new TLS image in the process memory, copying data from the TLS master copy (if any).
/// Returns the resulting thread pointer. /// Returns the resulting thread pointer.
pub fn clone_tls(image: &TlsImage) -> Result<(usize, usize), Error> { pub fn clone_tls(image: &TlsImage) -> Result<TlsInfo, Error> {
// Basically, the layout is: // Basically, the layout is:
// * align(image.full_size) below the TP // * align(image.full_size) below the TP
// * tcb_size starting with the TP // * tcb_size starting with the TP
@ -73,7 +73,12 @@ pub fn clone_tls(image: &TlsImage) -> Result<(usize, usize), Error> {
crate::debug_trace!("TLS: base={:#x}, tp={:#x}", base, tp); crate::debug_trace!("TLS: base={:#x}, tp={:#x}", base, tp);
Ok((base, tp)) Ok(TlsInfo {
base,
tp,
module0_offset: None,
})
// Ok((base, tp))
} }
// In Variant II, the TP points directly at the TCB start // In Variant II, the TP points directly at the TCB start

View File

@ -2,52 +2,50 @@
#[macro_export] #[macro_export]
macro_rules! syscall { macro_rules! syscall {
($num:expr $(,)?) => {{ ($num:expr $(,)?) => {{
let _ = $num; let mut a0 = usize::from($num);
todo!() core::arch::asm!("ecall", inlateout("a0") a0);
a0
}}; }};
($num:expr, $a0:expr $(,)?) => {{ ($num:expr, $a1:expr $(,)?) => {{
let _ = $num; let mut a0 = usize::from($num);
let _ = $a0; core::arch::asm!("ecall", inlateout("a0") a0, in("a1") $a1);
todo!() a0
}}; }};
($num:expr, $a0:expr, $a1:expr $(,)?) => {{ ($num:expr, $a1:expr, $a2:expr $(,)?) => {{
let _ = $num; let mut a0 = usize::from($num);
let _ = $a0; core::arch::asm!("ecall", inlateout("a0") a0, in("a1") $a1, in("a2") $a2);
let _ = $a1; a0
todo!()
}}; }};
($num:expr, $a0:expr, $a1:expr, $a2:expr $(,)?) => {{ ($num:expr, $a1:expr, $a2:expr, $a3:expr $(,)?) => {{
let _ = $num; let mut a0 = usize::from($num);
let _ = $a0; core::arch::asm!("ecall", inlateout("a0") a0, in("a1") $a1, in("a2") $a2, in("a3") $a3);
let _ = $a1; a0
let _ = $a2;
todo!()
}}; }};
($num:expr, $a0:expr, $a1:expr, $a2:expr, $a3:expr $(,)?) => {{ ($num:expr, $a1:expr, $a2:expr, $a3:expr, $a4:expr $(,)?) => {{
let _ = $num; let mut a0 = usize::from($num);
let _ = $a0; core::arch::asm!(
let _ = $a1; "ecall",
let _ = $a2; inlateout("a0") a0,
let _ = $a3; in("a1") $a1, in("a2") $a2, in("a3") $a3, in("a4") $a4
todo!() );
a0
}}; }};
($num:expr, $a0:expr, $a1:expr, $a2:expr, $a3:expr, $a4:expr $(,)?) => {{ ($num:expr, $a1:expr, $a2:expr, $a3:expr, $a4:expr, $a5:expr $(,)?) => {{
let _ = $num; let mut a0 = usize::from($num);
let _ = $a0; core::arch::asm!(
let _ = $a1; "ecall",
let _ = $a2; inlateout("a0") a0,
let _ = $a3; in("a1") $a1, in("a2") $a2, in("a3") $a3, in("a4") $a4, in("a5") $a5
let _ = $a4; );
todo!() a0
}}; }};
($num:expr, $a0:expr, $a1:expr, $a2:expr, $a3:expr, $a4:expr, $a5:expr $(,)?) => {{ ($num:expr, $a1:expr, $a2:expr, $a3:expr, $a4:expr, $a5:expr, $a6:expr $(,)?) => {{
let _ = $num; let mut a0 = usize::from($num);
let _ = $a0; core::arch::asm!(
let _ = $a1; "ecall",
let _ = $a2; inlateout("a0") a0,
let _ = $a3; in("a1") $a1, in("a2") $a2, in("a3") $a3, in("a4") $a4, in("a5") $a5, in("a6") $a6
let _ = $a4; );
let _ = $a5; a0
todo!()
}}; }};
} }

View File

@ -0,0 +1,4 @@
init:1:wait:/sbin/rc default
logd:1:once:/sbin/logd
user:1:once:/sbin/login /dev/ttyS0

View File

@ -71,17 +71,13 @@ fn run(binary: &str, args: &[String]) -> Result<!, Error> {
}); });
for module in layout.segments.iter() { for module in layout.segments.iter() {
if module.object_id == 0 {
continue;
}
auxv.push(AuxValue { auxv.push(AuxValue {
tag: auxv::TLS_MODULE_ID, tag: auxv::TLS_MODULE_ID,
val: module.object_id as _, val: module.object_id as u64 + 1,
}); });
auxv.push(AuxValue { auxv.push(AuxValue {
tag: auxv::TLS_MODULE_OFFSET, tag: auxv::TLS_MODULE_OFFSET,
val: module.offset as _, val: module.offset as u64,
}); });
} }
} }
@ -124,6 +120,12 @@ unsafe fn enter(entry: extern "C" fn(usize), argument: usize) -> ! {
options(att_syntax, noreturn) options(att_syntax, noreturn)
); );
} }
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
{
let _ = entry;
let _ = argument;
todo!()
}
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64", rust_analyzer))] #[cfg(any(target_arch = "aarch64", target_arch = "x86_64", rust_analyzer))]
{ {
entry(argument); entry(argument);

View File

@ -8,6 +8,8 @@ mod aarch64;
mod x86_64; mod x86_64;
#[cfg(any(target_arch = "x86", rust_analyzer))] #[cfg(any(target_arch = "x86", rust_analyzer))]
mod i686; mod i686;
#[cfg(any(target_arch = "riscv64", rust_analyzer))]
mod riscv64;
pub enum RelaValue { pub enum RelaValue {
DQWord(i64, i64), DQWord(i64, i64),

View File

@ -0,0 +1,33 @@
use elf::relocation::{Rel, Rela};
use crate::{error::Error, object::ResolvedSymbol, state::State};
use super::{RelValue, RelaValue, Relocation};
impl Relocation for Rel {
type Value = RelValue;
fn resolve(
&self,
_state: &State,
_name: &str,
_symbol: &ResolvedSymbol,
_load_base: usize,
) -> Result<Option<Self::Value>, Error> {
todo!()
}
}
impl Relocation for Rela {
type Value = RelaValue;
fn resolve(
&self,
_state: &State,
_name: &str,
_symbol: &ResolvedSymbol,
_load_base: usize,
) -> Result<Option<Self::Value>, Error> {
todo!()
}
}

View File

@ -13,7 +13,7 @@ cfg_if! {
} else if #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] { } else if #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] {
mod variant2; mod variant2;
pub use variant2::TlsLayoutImpl; pub use variant2::TlsLayoutImpl;
} else if #[cfg(target_arch = "aarch64")] { } else if #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))] {
mod variant1; mod variant1;
pub use variant1::TlsLayoutImpl; pub use variant1::TlsLayoutImpl;
} }

View File

@ -42,7 +42,7 @@ pub struct KernelProcessed(pub KernelBuilt);
pub struct InitrdGenerated(pub PathBuf); pub struct InitrdGenerated(pub PathBuf);
pub struct ImageBuilt(pub PathBuf); pub struct ImageBuilt(pub PathBuf);
pub enum AllBuilt { pub enum AllBuilt {
Riscv64(KernelProcessed), Riscv64(KernelProcessed, InitrdGenerated),
X86_64(ImageBuilt), X86_64(ImageBuilt),
AArch64(KernelProcessed, InitrdGenerated), AArch64(KernelProcessed, InitrdGenerated),
I686(ImageBuilt), I686(ImageBuilt),
@ -99,16 +99,13 @@ pub fn build_all(env: &BuildEnv) -> Result<AllBuilt, Error> {
// for module in modules { // for module in modules {
// install_extra.push((module.clone(), module.file_name().unwrap().into())); // install_extra.push((module.clone(), module.file_name().unwrap().into()));
// } // }
if env.arch == Arch::riscv64 {
return Ok(AllBuilt::Riscv64(kernel));
}
// Userspace stuff // Userspace stuff
let initrd = userspace::build_initrd(env, install_extra, check)?; let initrd = userspace::build_initrd(env, install_extra, check)?;
// Build target-specific image // Build target-specific image
let image = match env.arch { let image = match env.arch {
Arch::riscv64 => AllBuilt::Riscv64(kernel), Arch::riscv64 => AllBuilt::Riscv64(kernel, initrd),
Arch::aarch64 => AllBuilt::AArch64(kernel, initrd), Arch::aarch64 => AllBuilt::AArch64(kernel, initrd),
Arch::x86_64 => AllBuilt::X86_64(x86_64::build_image(env, kernel, initrd)?), Arch::x86_64 => AllBuilt::X86_64(x86_64::build_image(env, kernel, initrd)?),
Arch::i686 => AllBuilt::I686(i686::build_image(env, kernel, initrd)?), Arch::i686 => AllBuilt::I686(i686::build_image(env, kernel, initrd)?),

View File

@ -22,6 +22,12 @@ pub struct AArch64TargetConfig {
pub components: BuildComponents, pub components: BuildComponents,
} }
#[derive(Debug, Default, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct Riscv64TargetConfig {
pub components: BuildComponents,
}
#[derive(Debug, Default, serde::Deserialize, serde::Serialize)] #[derive(Debug, Default, serde::Deserialize, serde::Serialize)]
#[serde(default)] #[serde(default)]
pub struct X86_64TargetConfig { pub struct X86_64TargetConfig {
@ -38,6 +44,7 @@ pub struct I686TargetConfig {
#[serde(default)] #[serde(default)]
pub struct TargetConfig { pub struct TargetConfig {
pub aarch64: AArch64TargetConfig, pub aarch64: AArch64TargetConfig,
pub riscv64: Riscv64TargetConfig,
pub x86_64: X86_64TargetConfig, pub x86_64: X86_64TargetConfig,
pub i686: I686TargetConfig, pub i686: I686TargetConfig,
} }
@ -193,7 +200,7 @@ impl BuildEnv {
impl XTaskConfig { impl XTaskConfig {
pub fn components(&self, env: &BuildEnv) -> &BuildComponents { pub fn components(&self, env: &BuildEnv) -> &BuildComponents {
match env.arch { match env.arch {
Arch::riscv64 => todo!(), Arch::riscv64 => &self.target.riscv64.components,
Arch::aarch64 => &self.target.aarch64.components, Arch::aarch64 => &self.target.aarch64.components,
Arch::x86_64 => &self.target.x86_64.components, Arch::x86_64 => &self.target.x86_64.components,
Arch::i686 => &self.target.i686.components, Arch::i686 => &self.target.i686.components,

View File

@ -260,6 +260,7 @@ fn run_riscv64(
qemu_bin: Option<PathBuf>, qemu_bin: Option<PathBuf>,
devices: Vec<QemuDevice>, devices: Vec<QemuDevice>,
kernel: PathBuf, kernel: PathBuf,
initrd: PathBuf,
) -> Result<Command, Error> { ) -> Result<Command, Error> {
let _ = config; let _ = config;
let _ = devices; let _ = devices;
@ -272,7 +273,11 @@ fn run_riscv64(
qemu.with_serial(QemuSerialTarget::MonStdio) qemu.with_serial(QemuSerialTarget::MonStdio)
.with_machine(riscv64::Machine::Virt) .with_machine(riscv64::Machine::Virt)
.with_cpu(riscv64::Cpu::Rv64) .with_cpu(riscv64::Cpu::Rv64)
.with_boot_image(riscv64::Image::OpenSBI { kernel, bios }); .with_boot_image(riscv64::Image::OpenSBI {
kernel,
initrd,
bios,
});
Ok(qemu.into_command()) Ok(qemu.into_command())
} }
@ -349,8 +354,8 @@ pub fn run(
add_devices_from_config(&mut devices, disk.as_ref(), &config)?; add_devices_from_config(&mut devices, disk.as_ref(), &config)?;
let mut command = match built { let mut command = match built {
AllBuilt::Riscv64(KernelProcessed(KernelBuilt(kernel))) => { AllBuilt::Riscv64(KernelProcessed(KernelBuilt(kernel)), InitrdGenerated(initrd)) => {
run_riscv64(&config, &env, qemu, devices, kernel)? run_riscv64(&config, &env, qemu, devices, kernel, initrd)?
} }
AllBuilt::AArch64(KernelProcessed(KernelBuilt(kernel)), InitrdGenerated(initrd)) => { AllBuilt::AArch64(KernelProcessed(KernelBuilt(kernel)), InitrdGenerated(initrd)) => {
make_kernel_bin(kernel, &kernel_bin)?; make_kernel_bin(kernel, &kernel_bin)?;