rv64: relax TLB flushes
This commit is contained in:
parent
6aa3e7f6be
commit
ca82e25cf6
@ -51,7 +51,9 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
|
|||||||
|
|
||||||
// Copy new SATP
|
// Copy new SATP
|
||||||
let satp = inner.satp.get();
|
let satp = inner.satp.get();
|
||||||
|
let asid = inner.satp.read(SATP::ASID);
|
||||||
if satp != SATP.get() {
|
if satp != SATP.get() {
|
||||||
|
mem::tlb_flush_asid(asid as usize);
|
||||||
SATP.set(satp);
|
SATP.set(satp);
|
||||||
}
|
}
|
||||||
cpu.smode_sp = self.stack_top;
|
cpu.smode_sp = self.stack_top;
|
||||||
@ -149,7 +151,6 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
|
|||||||
unsafe fn enter(&self) -> ! {
|
unsafe fn enter(&self) -> ! {
|
||||||
unsafe {
|
unsafe {
|
||||||
self.load_state();
|
self.load_state();
|
||||||
mem::tlb_flush_full();
|
|
||||||
__rv64_enter_task(self.inner.get())
|
__rv64_enter_task(self.inner.get())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -162,7 +163,6 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
|
|||||||
unsafe {
|
unsafe {
|
||||||
from.store_state();
|
from.store_state();
|
||||||
self.load_state();
|
self.load_state();
|
||||||
mem::tlb_flush_full();
|
|
||||||
__rv64_switch_task(self.inner.get(), from.inner.get())
|
__rv64_switch_task(self.inner.get(), from.inner.get())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -170,7 +170,6 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
|
|||||||
unsafe fn switch_and_drop(&self, thread: *const ()) {
|
unsafe fn switch_and_drop(&self, thread: *const ()) {
|
||||||
unsafe {
|
unsafe {
|
||||||
self.load_state();
|
self.load_state();
|
||||||
mem::tlb_flush_full();
|
|
||||||
__rv64_switch_task_and_drop(self.inner.get(), thread)
|
__rv64_switch_task_and_drop(self.inner.get(), thread)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -130,9 +130,10 @@ unsafe fn map_device_memory_l3(
|
|||||||
PageEntry::page(base.add(j * L3::SIZE), PageAttributes::W);
|
PageEntry::page(base.add(j * L3::SIZE), PageAttributes::W);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tlb_flush_full();
|
|
||||||
|
|
||||||
return Ok(DEVICE_MAPPING_OFFSET + i * L3::SIZE);
|
let start = DEVICE_MAPPING_OFFSET + i * L3::SIZE;
|
||||||
|
tlb_flush_range_va(start, count * L3::SIZE);
|
||||||
|
return Ok(start);
|
||||||
}
|
}
|
||||||
|
|
||||||
Err(Error::OutOfMemory)
|
Err(Error::OutOfMemory)
|
||||||
@ -157,12 +158,12 @@ unsafe fn map_device_memory_l2(
|
|||||||
for j in 0..count {
|
for j in 0..count {
|
||||||
DEVICE_MAPPING_L2[i + j] =
|
DEVICE_MAPPING_L2[i + j] =
|
||||||
PageEntry::<L2>::block(base.add(j * L2::SIZE), PageAttributes::W);
|
PageEntry::<L2>::block(base.add(j * L2::SIZE), PageAttributes::W);
|
||||||
// tlb_flush_va(DEVICE_MAPPING_OFFSET + (i + j) * L2::SIZE);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tlb_flush_full();
|
|
||||||
|
|
||||||
return Ok(DEVICE_MAPPING_OFFSET + i * L2::SIZE);
|
let start = DEVICE_MAPPING_OFFSET + i * L2::SIZE;
|
||||||
|
tlb_flush_range_va(start, count * L2::SIZE);
|
||||||
|
return Ok(start);
|
||||||
}
|
}
|
||||||
|
|
||||||
Err(Error::OutOfMemory)
|
Err(Error::OutOfMemory)
|
||||||
@ -221,9 +222,8 @@ pub(crate) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping<KernelTabl
|
|||||||
assert!(DEVICE_MAPPING_L3S[l2i][l3i].is_present());
|
assert!(DEVICE_MAPPING_L3S[l2i][l3i].is_present());
|
||||||
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::INVALID;
|
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::INVALID;
|
||||||
}
|
}
|
||||||
|
|
||||||
tlb_flush_va(page);
|
|
||||||
}
|
}
|
||||||
|
tlb_flush_range_va(map.base_address, map.page_count * L3::SIZE);
|
||||||
}
|
}
|
||||||
L2::SIZE => todo!(),
|
L2::SIZE => todo!(),
|
||||||
_ => unimplemented!(),
|
_ => unimplemented!(),
|
||||||
@ -259,7 +259,7 @@ pub unsafe fn unmap_lower_half() {
|
|||||||
let mut tables = KERNEL_TABLES.lock();
|
let mut tables = KERNEL_TABLES.lock();
|
||||||
let kernel_l1i_lower = page_index::<L1>(KERNEL_PHYS_BASE);
|
let kernel_l1i_lower = page_index::<L1>(KERNEL_PHYS_BASE);
|
||||||
tables.l1.data[kernel_l1i_lower] = 0;
|
tables.l1.data[kernel_l1i_lower] = 0;
|
||||||
tlb_flush_full();
|
tlb_flush_range_va(0x0, L1::SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets up run-time kernel translation tables.
|
/// Sets up run-time kernel translation tables.
|
||||||
@ -301,22 +301,52 @@ pub unsafe fn setup_fixed_tables() {
|
|||||||
tlb_flush_full();
|
tlb_flush_full();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn tlb_flush_global_full() {
|
||||||
|
tlb_flush_full();
|
||||||
|
// TODO send TLB shootdown IPI to other harts
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tlb_flush_global_va(va: usize) {
|
||||||
|
tlb_flush_va(va);
|
||||||
|
// TODO send TLB shootdown IPI to other harts
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tlb_flush_range_va(start: usize, size: usize) {
|
||||||
|
let end = (start + size).page_align_up::<L3>();
|
||||||
|
let start = start.page_align_down::<L3>();
|
||||||
|
|
||||||
|
for page in (start..end).step_by(L3::SIZE) {
|
||||||
|
tlb_flush_va(page);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tlb_flush_range_va_asid(asid: usize, start: usize, size: usize) {
|
||||||
|
let end = (start + size).page_align_up::<L3>();
|
||||||
|
let start = start.page_align_down::<L3>();
|
||||||
|
|
||||||
|
for page in (start..end).step_by(L3::SIZE) {
|
||||||
|
tlb_flush_va_asid(page, asid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
pub fn tlb_flush_full() {
|
pub fn tlb_flush_full() {
|
||||||
unsafe {
|
unsafe { core::arch::asm!("sfence.vma") };
|
||||||
core::arch::asm!("sfence.vma");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
pub fn tlb_flush_va(va: usize) {
|
pub fn tlb_flush_va(va: usize) {
|
||||||
unsafe {
|
unsafe { core::arch::asm!("sfence.vma {0}, zero", in(reg) va) };
|
||||||
core::arch::asm!("sfence.vma zero, {0}", in(reg) va);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn tlb_flush_va_asid(va: usize, asid: usize) {
|
#[inline]
|
||||||
unsafe {
|
pub fn tlb_flush_asid(asid: usize) {
|
||||||
core::arch::asm!("sfence.vma {0}, {1}", in(reg) asid, in(reg) va);
|
unsafe { core::arch::asm!("sfence.vma zero, {0}", in(reg) asid) };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn tlb_flush_va_asid(va: usize, asid: usize) {
|
||||||
|
unsafe { core::arch::asm!("sfence.vma {0}, {1}", in(reg) va, in(reg) asid) };
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn clone_kernel_tables(dst: &mut PageTable<L1>) {
|
pub fn clone_kernel_tables(dst: &mut PageTable<L1>) {
|
||||||
|
@ -110,8 +110,6 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
|
|||||||
|
|
||||||
l3[l3i] = entry;
|
l3[l3i] = entry;
|
||||||
tlb_flush_va_asid(virt, self.asid as usize);
|
tlb_flush_va_asid(virt, self.asid as usize);
|
||||||
// dc_cvac((&raw const l3[l3i]).addr());
|
|
||||||
// tlb_flush_vaae1(virt);
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -129,9 +127,6 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
|
|||||||
|
|
||||||
l3[l3i] = PageEntry::INVALID;
|
l3[l3i] = PageEntry::INVALID;
|
||||||
tlb_flush_va_asid(virt, self.asid as usize);
|
tlb_flush_va_asid(virt, self.asid as usize);
|
||||||
// ic_iallu();
|
|
||||||
// dc_cvac((&raw const l3[l3i]).addr());
|
|
||||||
// tlb_flush_vaae1(virt);
|
|
||||||
|
|
||||||
Ok(page)
|
Ok(page)
|
||||||
}
|
}
|
||||||
@ -160,6 +155,7 @@ impl<TA: TableAllocator> ProcessAddressSpaceImpl<TA> {
|
|||||||
|
|
||||||
impl<TA: TableAllocator> Drop for ProcessAddressSpaceImpl<TA> {
|
impl<TA: TableAllocator> Drop for ProcessAddressSpaceImpl<TA> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
|
// TODO
|
||||||
// // SAFETY: with safe usage of the ProcessAddressSpaceImpl, clearing and dropping
|
// // SAFETY: with safe usage of the ProcessAddressSpaceImpl, clearing and dropping
|
||||||
// // is safe, no one refers to the memory
|
// // is safe, no one refers to the memory
|
||||||
// unsafe {
|
// unsafe {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user