Compare commits

...

3 Commits

6 changed files with 184 additions and 78 deletions

View File

@ -51,7 +51,9 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
// Copy new SATP
let satp = inner.satp.get();
let asid = inner.satp.read(SATP::ASID);
if satp != SATP.get() {
mem::tlb_flush_asid(asid as usize);
SATP.set(satp);
}
cpu.smode_sp = self.stack_top;
@ -149,7 +151,6 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
unsafe fn enter(&self) -> ! {
unsafe {
self.load_state();
mem::tlb_flush_full();
__rv64_enter_task(self.inner.get())
}
}
@ -162,7 +163,6 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
unsafe {
from.store_state();
self.load_state();
mem::tlb_flush_full();
__rv64_switch_task(self.inner.get(), from.inner.get())
}
}
@ -170,7 +170,6 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
unsafe fn switch_and_drop(&self, thread: *const ()) {
unsafe {
self.load_state();
mem::tlb_flush_full();
__rv64_switch_task_and_drop(self.inner.get(), thread)
}
}

View File

@ -130,9 +130,10 @@ unsafe fn map_device_memory_l3(
PageEntry::page(base.add(j * L3::SIZE), PageAttributes::W);
}
}
tlb_flush_full();
return Ok(DEVICE_MAPPING_OFFSET + i * L3::SIZE);
let start = DEVICE_MAPPING_OFFSET + i * L3::SIZE;
tlb_flush_range_va(start, count * L3::SIZE);
return Ok(start);
}
Err(Error::OutOfMemory)
@ -157,12 +158,12 @@ unsafe fn map_device_memory_l2(
for j in 0..count {
DEVICE_MAPPING_L2[i + j] =
PageEntry::<L2>::block(base.add(j * L2::SIZE), PageAttributes::W);
// tlb_flush_va(DEVICE_MAPPING_OFFSET + (i + j) * L2::SIZE);
}
}
tlb_flush_full();
return Ok(DEVICE_MAPPING_OFFSET + i * L2::SIZE);
let start = DEVICE_MAPPING_OFFSET + i * L2::SIZE;
tlb_flush_range_va(start, count * L2::SIZE);
return Ok(start);
}
Err(Error::OutOfMemory)
@ -221,9 +222,8 @@ pub(crate) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping<KernelTabl
assert!(DEVICE_MAPPING_L3S[l2i][l3i].is_present());
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::INVALID;
}
tlb_flush_va(page);
}
tlb_flush_range_va(map.base_address, map.page_count * L3::SIZE);
}
L2::SIZE => todo!(),
_ => unimplemented!(),
@ -259,7 +259,7 @@ pub unsafe fn unmap_lower_half() {
let mut tables = KERNEL_TABLES.lock();
let kernel_l1i_lower = page_index::<L1>(KERNEL_PHYS_BASE);
tables.l1.data[kernel_l1i_lower] = 0;
tlb_flush_full();
tlb_flush_range_va(0x0, L1::SIZE);
}
/// Sets up run-time kernel translation tables.
@ -301,22 +301,52 @@ pub unsafe fn setup_fixed_tables() {
tlb_flush_full();
}
pub fn tlb_flush_global_full() {
tlb_flush_full();
// TODO send TLB shootdown IPI to other harts
}
pub fn tlb_flush_global_va(va: usize) {
tlb_flush_va(va);
// TODO send TLB shootdown IPI to other harts
}
pub fn tlb_flush_range_va(start: usize, size: usize) {
let end = (start + size).page_align_up::<L3>();
let start = start.page_align_down::<L3>();
for page in (start..end).step_by(L3::SIZE) {
tlb_flush_va(page);
}
}
pub fn tlb_flush_range_va_asid(asid: usize, start: usize, size: usize) {
let end = (start + size).page_align_up::<L3>();
let start = start.page_align_down::<L3>();
for page in (start..end).step_by(L3::SIZE) {
tlb_flush_va_asid(page, asid);
}
}
#[inline]
pub fn tlb_flush_full() {
unsafe {
core::arch::asm!("sfence.vma");
}
unsafe { core::arch::asm!("sfence.vma") };
}
#[inline]
pub fn tlb_flush_va(va: usize) {
unsafe {
core::arch::asm!("sfence.vma zero, {0}", in(reg) va);
}
unsafe { core::arch::asm!("sfence.vma {0}, zero", in(reg) va) };
}
#[inline]
pub fn tlb_flush_asid(asid: usize) {
unsafe { core::arch::asm!("sfence.vma zero, {0}", in(reg) asid) };
}
#[inline]
pub fn tlb_flush_va_asid(va: usize, asid: usize) {
unsafe {
core::arch::asm!("sfence.vma {0}, {1}", in(reg) asid, in(reg) va);
}
unsafe { core::arch::asm!("sfence.vma {0}, {1}", in(reg) va, in(reg) asid) };
}
pub fn clone_kernel_tables(dst: &mut PageTable<L1>) {

View File

@ -7,7 +7,9 @@ use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::PhysicalRefMut,
process::ProcessAddressSpaceManager,
table::{EntryLevel, EntryLevelExt, MapAttributes, NextPageTable, TableAllocator},
table::{
EntryLevel, EntryLevelDrop, EntryLevelExt, MapAttributes, NextPageTable, TableAllocator,
},
};
use memtables::riscv64::PageAttributes;
use yggdrasil_abi::error::Error;
@ -15,8 +17,8 @@ use yggdrasil_abi::error::Error;
use crate::mem::{clone_kernel_tables, table::PageEntry};
use super::{
table::{PageTable, L1, L2, L3},
tlb_flush_va_asid, KernelTableManagerImpl, USER_BOUNDARY,
table::{DroppableRange, PageTable, L1, L2, L3},
KernelTableManagerImpl, USER_BOUNDARY,
};
pub struct ProcessAddressSpaceImpl<TA: TableAllocator> {
@ -81,7 +83,9 @@ impl<TA: TableAllocator> ProcessAddressSpaceManager<TA> for ProcessAddressSpaceI
(physical, self.asid as u64)
}
unsafe fn clear(&mut self) {}
unsafe fn clear(&mut self) {
unsafe { self.l1.drop_range::<TA>(L1::DROPPABLE_RANGE) };
}
}
impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
@ -109,9 +113,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
}
l3[l3i] = entry;
tlb_flush_va_asid(virt, self.asid as usize);
// dc_cvac((&raw const l3[l3i]).addr());
// tlb_flush_vaae1(virt);
super::tlb_flush_va_asid(virt, self.asid as usize);
Ok(())
}
@ -128,10 +130,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
let page = l3[l3i].as_page().ok_or(Error::DoesNotExist)?;
l3[l3i] = PageEntry::INVALID;
tlb_flush_va_asid(virt, self.asid as usize);
// ic_iallu();
// dc_cvac((&raw const l3[l3i]).addr());
// tlb_flush_vaae1(virt);
super::tlb_flush_va_asid(virt, self.asid as usize);
Ok(page)
}
@ -160,13 +159,14 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
impl<TA: TableAllocator> Drop for ProcessAddressSpaceImpl<TA> {
fn drop(&mut self) {
// // SAFETY: with safe usage of the ProcessAddressSpaceImpl, clearing and dropping
// // is safe, no one refers to the memory
// unsafe {
// self.clear();
// let l1_phys = self.l1.as_physical_address();
// TA::free_page_table(l1_phys);
// }
// SAFETY: with safe usage of the ProcessAddressSpaceImpl, clearing and dropping
// is safe, no one refers to the memory
unsafe {
self.clear();
let l1_phys = self.l1.as_physical_address();
TA::free_page_table(l1_phys);
super::tlb_flush_asid(self.asid as usize);
}
}
}

View File

@ -1,16 +1,19 @@
use core::{
marker::PhantomData,
ops::{Index, IndexMut},
ops::{Index, IndexMut, Range},
};
use libk_mm_interface::{
address::{AsPhysicalAddress, PhysicalAddress},
pointer::{PhysicalRef, PhysicalRefMut},
table::{EntryLevel, NextPageTable, NonTerminalEntryLevel, TableAllocator},
table::{
page_index, EntryLevel, EntryLevelDrop, NextPageTable, NonTerminalEntryLevel,
TableAllocator,
},
};
use yggdrasil_abi::error::Error;
use super::KernelTableManagerImpl;
use super::{KernelTableManagerImpl, USER_BOUNDARY};
pub use memtables::riscv64::PageAttributes;
@ -44,6 +47,18 @@ pub struct PageTable<L: EntryLevel> {
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct PageEntry<L: EntryLevel>(pub u64, PhantomData<L>);
pub(super) trait DroppableRange {
const DROPPABLE_RANGE: Range<usize>;
}
impl DroppableRange for L1 {
const DROPPABLE_RANGE: Range<usize> = 0..page_index::<L1>(USER_BOUNDARY);
}
impl DroppableRange for L2 {
const DROPPABLE_RANGE: Range<usize> = 0..512;
}
impl NonTerminalEntryLevel for L1 {
type NextLevel = L2;
}
@ -93,6 +108,48 @@ impl<L: EntryLevel> PageEntry<L> {
}
}
impl<L: NonTerminalEntryLevel + DroppableRange> EntryLevelDrop for PageTable<L>
where
PageTable<L::NextLevel>: EntryLevelDrop,
{
const FULL_RANGE: Range<usize> = L::DROPPABLE_RANGE;
unsafe fn drop_range<TA: TableAllocator>(&mut self, range: Range<usize>) {
for index in range {
let entry = self[index];
if let Some(table) = entry.as_table() {
unsafe {
let mut table_ref: PhysicalRefMut<
PageTable<L::NextLevel>,
KernelTableManagerImpl,
> = PhysicalRefMut::map(table);
table_ref.drop_all::<TA>();
TA::free_page_table(table);
}
} else if entry.is_present() {
// Memory must've been cleared beforehand, so no non-table entries must be present
panic!(
"Expected a table containing only tables, got table[{}] = {:#x?}",
index, entry.0
);
}
self[index] = PageEntry::INVALID;
// dc_cvac((&raw const self[index]).addr());
}
}
}
impl EntryLevelDrop for PageTable<L3> {
const FULL_RANGE: Range<usize> = 0..512;
// Do nothing
unsafe fn drop_range<TA: TableAllocator>(&mut self, _range: Range<usize>) {}
}
impl<L: NonTerminalEntryLevel + 'static> NextPageTable for PageTable<L> {
type NextLevel = PageTable<L::NextLevel>;
type TableRef = PhysicalRef<'static, PageTable<L::NextLevel>, KernelTableManagerImpl>;

View File

@ -1,11 +1,6 @@
use core::arch::global_asm;
use abi::{
arch::SavedFrame,
primitive_enum,
process::{ExitCode, Signal},
SyscallFunction,
};
use abi::{arch::SavedFrame, primitive_enum, process::Signal, SyscallFunction};
use kernel_arch::task::TaskFrame;
use libk::{device::external_interrupt_controller, task::thread::Thread};
use tock_registers::interfaces::ReadWriteable;
@ -17,7 +12,7 @@ use crate::syscall;
use super::{smp, timer};
primitive_enum! {
pub enum Cause: u64 {
pub enum Cause: usize {
MisalignedInstruction = 0,
InstructionAccessFault = 1,
IllegalInstruction = 2,
@ -39,18 +34,18 @@ primitive_enum! {
#[repr(C)]
pub struct TrapFrame {
// General-purpose
pub ra: u64,
pub gp: u64,
pub tn: [u64; 7],
pub sn: [u64; 12],
pub ra: usize,
pub gp: usize,
pub tn: [usize; 7],
pub sn: [usize; 12],
pub an: [usize; 8],
// Special
pub sp: u64,
pub sstatus: u64,
pub sepc: u64,
pub stval: u64,
pub scause: u64,
pub tp: u64,
pub sp: usize,
pub sstatus: usize,
pub sepc: usize,
pub stval: usize,
pub scause: usize,
pub tp: usize,
}
pub fn init_smode_exceptions() {
@ -82,7 +77,10 @@ unsafe fn umode_exception_handler(frame: &mut TrapFrame) {
Some(Cause::EcallUmode) => {
let func = frame.an[0];
if func == usize::from(SyscallFunction::ExitSignal) {
todo!()
unsafe {
syscall::handle_signal_exit(frame);
}
return;
}
let args = &frame.an[1..];
@ -112,7 +110,7 @@ unsafe fn umode_exception_handler(frame: &mut TrapFrame) {
log::warn!("Of process {} ({:?})", process.id, process.name);
if dump_tval {
let translation = process.space().translate(frame.stval as usize).ok();
let translation = process.space().translate(frame.stval).ok();
if let Some(physical) = translation {
log::warn!(" * tval translates to {physical:#x}");
@ -120,8 +118,6 @@ unsafe fn umode_exception_handler(frame: &mut TrapFrame) {
log::warn!(" * tval does not translate");
}
}
thread.exit_process(ExitCode::BySignal(Ok(Signal::MemoryAccessViolation)));
}
}
@ -146,7 +142,7 @@ unsafe fn smode_exception_handler(frame: &mut TrapFrame) {
.and_then(|t| t.try_get_process())
.map(|p| p.space())
{
space.translate(frame.stval as usize).ok()
space.translate(frame.stval).ok()
} else {
None
};
@ -206,39 +202,53 @@ unsafe extern "C" fn smode_general_trap_handler(frame: *mut TrapFrame) {
impl TaskFrame for TrapFrame {
fn store(&self) -> SavedFrame {
todo!()
SavedFrame {
ra: self.ra,
gp: self.gp,
tn: self.tn,
sn: self.sn,
an: self.an,
sp: self.sp,
ip: self.sepc,
tp: self.tp,
}
}
fn restore(&mut self, saved: &SavedFrame) {
let _ = saved;
todo!()
self.ra = saved.ra;
self.gp = saved.gp;
self.tn = saved.tn;
self.sn = saved.sn;
self.an = saved.an;
self.sp = saved.sp;
self.sepc = saved.ip;
self.tp = saved.tp;
}
fn user_sp(&self) -> usize {
todo!()
self.sp
}
fn user_ip(&self) -> usize {
todo!()
self.sepc
}
fn argument(&self) -> u64 {
todo!()
self.an[0] as u64
}
fn set_user_sp(&mut self, value: usize) {
let _ = value;
todo!()
self.sp = value;
}
fn set_user_ip(&mut self, value: usize) {
let _ = value;
todo!()
self.sepc = value;
}
fn set_argument(&mut self, value: u64) {
let _ = value;
todo!()
self.an[0] = value as usize;
}
fn set_single_step(&mut self, step: bool) {
@ -247,8 +257,7 @@ impl TaskFrame for TrapFrame {
}
fn set_return_value(&mut self, value: u64) {
let _ = value;
todo!()
self.an[0] = value as usize;
}
}

View File

@ -5,7 +5,18 @@ use super::FrameOps;
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[derive(Clone, Debug, Default)]
#[repr(C)]
pub struct SavedFrame {}
pub struct SavedFrame {
// General-purpose
pub ra: usize,
pub gp: usize,
pub tn: [usize; 7],
pub sn: [usize; 12],
pub an: [usize; 8],
// Special
pub sp: usize,
pub ip: usize,
pub tp: usize,
}
impl FrameOps for SavedFrame {
fn set_user_ip(&mut self, value: usize) {