refactor: clean up kernel warnings

This commit is contained in:
Mark Poliakov 2023-11-16 11:11:10 +02:00
parent 75b1807e8e
commit ff04db65dc
25 changed files with 198 additions and 191 deletions

View File

@ -21,7 +21,7 @@ use crate::{
unsafe fn pre_init_mmu() {
if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran4::Supported) {
// TODO early panic
loop {}
todo!();
}
MAIR_EL1.write(
@ -115,7 +115,7 @@ unsafe extern "C" fn __aarch64_bsp_upper_entry(dtb: PhysicalAddress) -> ! {
kernel_main()
}
unsafe extern "C" fn __aarch64_el1_ap_lower_entry(sp: PhysicalAddress) -> ! {
unsafe extern "C" fn __aarch64_el1_ap_lower_entry() -> ! {
const AP_STACK_PAGES: usize = 8;
AArch64::set_interrupt_mask(true);
@ -147,7 +147,9 @@ extern "C" fn __aarch64_ap_upper_entry() -> ! {
exception::init_exceptions();
unsafe {
ARCHITECTURE.init_platform(false);
ARCHITECTURE
.init_platform(false)
.expect("Could not initialize the AP");
}
kernel_secondary_main()

View File

@ -3,10 +3,7 @@ use core::{arch::global_asm, cell::UnsafeCell};
use abi::error::Error;
use crate::{
mem::{address::IntoRaw, phys, PhysicalAddress},
task::context::TaskContextImpl,
};
use crate::{mem::phys, task::context::TaskContextImpl};
struct StackBuilder {
base: usize,
@ -86,8 +83,7 @@ impl TaskContextImpl for TaskContext {
fn kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Result<Self, Error> {
const KERNEL_TASK_PAGES: usize = 8;
let stack_base =
unsafe { phys::alloc_pages_contiguous(KERNEL_TASK_PAGES)?.virtualize_raw() };
let stack_base = phys::alloc_pages_contiguous(KERNEL_TASK_PAGES)?.virtualize_raw();
let mut stack = StackBuilder::new(stack_base, KERNEL_TASK_PAGES * 0x1000);
@ -110,7 +106,7 @@ impl TaskContextImpl for TaskContext {
fn user(entry: usize, arg: usize, ttbr0: u64, user_stack_sp: usize) -> Result<Self, Error> {
const USER_TASK_PAGES: usize = 16;
let stack_base = unsafe { phys::alloc_pages_contiguous(USER_TASK_PAGES)?.virtualize_raw() };
let stack_base = phys::alloc_pages_contiguous(USER_TASK_PAGES)?.virtualize_raw();
let mut stack = StackBuilder::new(stack_base, USER_TASK_PAGES * 0x1000);

View File

@ -20,7 +20,7 @@ use crate::{
device_tree_driver,
mem::{
address::FromRaw,
device::{DeviceMemoryIo, DeviceMemoryMapping, RawDeviceMemoryMapping},
device::{DeviceMemoryIo, RawDeviceMemoryMapping},
PhysicalAddress,
},
sync::IrqSafeSpinlock,

View File

@ -37,8 +37,7 @@ cfg_if! {
// Precomputed mappings
const KERNEL_L1_INDEX: usize = L1::index(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
const KERNEL_START_L2_INDEX: usize = L2::index(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE);
const KERNEL_END_L2_INDEX: usize =
L2::index(KERNEL_VIRT_OFFSET + KERNEL_PHYS_BASE) + KERNEL_L3_COUNT;
const KERNEL_END_L2_INDEX: usize = KERNEL_START_L2_INDEX + KERNEL_L3_COUNT;
// Must not be zero, should be at 4MiB
const_assert_eq!(KERNEL_START_L2_INDEX, 0);
@ -52,12 +51,10 @@ const EARLY_MAPPING_L2I: usize = KERNEL_END_L2_INDEX + 1;
const HEAP_MAPPING_L1I: usize = KERNEL_L1_INDEX + 1;
// 1GiB max
const DEVICE_MAPPING_L1I: usize = KERNEL_L1_INDEX + 2;
// 16GiB max
pub(super) const RAM_MAPPING_L1_COUNT: usize = 16;
const RAM_MAPPING_START_L1I: usize = KERNEL_L1_INDEX + 3;
const RAM_MAPPING_END_L1I: usize = RAM_MAPPING_START_L1I + RAM_MAPPING_L1_COUNT;
const DEVICE_MAPPING_L3_COUNT: usize = 4;
// 16GiB max
const RAM_MAPPING_START_L1I: usize = KERNEL_L1_INDEX + 3;
pub(super) const RAM_MAPPING_L1_COUNT: usize = 16;
// 2MiB for early mappings
const EARLY_MAPPING_OFFSET: usize =
@ -85,19 +82,19 @@ pub struct EarlyMapping<'a, T: ?Sized> {
}
impl<'a, T: Sized> EarlyMapping<'a, T> {
pub unsafe fn map(physical: PhysicalAddress) -> Result<EarlyMapping<'a, T>, Error> {
let layout = Layout::new::<T>();
let aligned = physical.page_align_down::<L3>();
let offset = physical.page_offset::<L3>();
let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
// pub(super) unsafe fn map(physical: PhysicalAddress) -> Result<EarlyMapping<'a, T>, Error> {
// let layout = Layout::new::<T>();
// let aligned = physical.page_align_down::<L3>();
// let offset = physical.page_offset::<L3>();
// let page_count = (offset + layout.size() + L3::SIZE - 1) / L3::SIZE;
let virt = map_early_pages(aligned, page_count)?;
let value = &mut *((virt + offset) as *mut T);
// let virt = map_early_pages(aligned, page_count)?;
// let value = &mut *((virt + offset) as *mut T);
Ok(EarlyMapping { value, page_count })
}
// Ok(EarlyMapping { value, page_count })
// }
pub unsafe fn map_slice(
pub(super) unsafe fn map_slice(
physical: PhysicalAddress,
len: usize,
) -> Result<EarlyMapping<'a, [T]>, Error> {
@ -186,7 +183,7 @@ unsafe fn map_early_pages(physical: PhysicalAddress, count: usize) -> Result<usi
}
unsafe fn unmap_early_page(address: usize) {
if address < EARLY_MAPPING_OFFSET || address >= EARLY_MAPPING_OFFSET + L2::SIZE {
if !(EARLY_MAPPING_OFFSET..EARLY_MAPPING_OFFSET + L2::SIZE).contains(&address) {
panic!("Tried to unmap invalid early mapping: {:#x}", address);
}
@ -200,7 +197,7 @@ unsafe fn unmap_early_page(address: usize) {
pub(super) unsafe fn map_ram_l1(index: usize) {
if index >= RAM_MAPPING_L1_COUNT {
loop {}
todo!()
}
assert_eq!(KERNEL_TABLES.l1.data[index + RAM_MAPPING_START_L1I], 0);
@ -210,7 +207,7 @@ pub(super) unsafe fn map_ram_l1(index: usize) {
pub(super) unsafe fn map_heap_l2(index: usize, page: PhysicalAddress) {
if index >= 512 {
loop {}
todo!()
}
assert!(!HEAP_MAPPING_L2[index].is_present());
// TODO UXN, PXN
@ -321,8 +318,8 @@ pub(super) unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping) {
let l3i = L3::index(page);
assert!(DEVICE_MAPPING_L3S[l2i][l3i].is_present());
DEVICE_MAPPING_L3S[l2i][l3i] = PageEntry::INVALID;
// TODO flush the TLB entry
loop {}
todo!();
// intrinsics::flush_tlb_entry(page);
}
}

View File

@ -3,7 +3,7 @@ use core::sync::atomic::{AtomicU8, Ordering};
use abi::error::Error;
use crate::mem::{
address::{AsPhysicalAddress, IntoRaw},
address::AsPhysicalAddress,
phys,
pointer::PhysicalRefMut,
process::ProcessAddressSpaceManager,

View File

@ -125,7 +125,7 @@ impl Architecture for AArch64 {
) -> Result<(), Error> {
let end_l1i = L1::index(memory_end.page_align_up::<L1>().into_raw());
if end_l1i > mem::RAM_MAPPING_L1_COUNT {
loop {}
todo!()
}
// Map 1GiB chunks

View File

@ -5,13 +5,8 @@ use abi::error::Error;
use device_api::CpuBringupDevice;
use fdt_rs::prelude::PropReader;
use crate::arch::Architecture;
use crate::mem::address::IntoRaw;
use crate::arch::ARCHITECTURE;
use crate::mem::KERNEL_VIRT_OFFSET;
use crate::{
arch::{ArchitectureImpl, ARCHITECTURE},
mem::phys,
};
use crate::device::devtree::{self, DevTreeIndexNodePropGet, DeviceTree};

View File

@ -69,12 +69,22 @@ pub trait Architecture {
/// Only safe to call once during system init.
unsafe fn start_application_processors(&self) {}
/// Allocates a virtual mapping for the specified physical memory region
/// Allocates a virtual mapping for the specified physical memory region.
///
/// # Safety
///
/// The caller must ensure the validity of the provided region.
unsafe fn map_device_memory(
&self,
base: PhysicalAddress,
size: usize,
) -> Result<RawDeviceMemoryMapping, Error>;
/// Removes the provided mapping from the kernel's translation tables.
///
/// # Safety
///
/// The caller must ensure the mapping is and will no longer be used.
unsafe fn unmap_device_memory(&self, map: &RawDeviceMemoryMapping);
fn map_physical_memory<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
@ -177,6 +187,9 @@ pub trait Architecture {
/// The caller must ensure it is actually safe to reset, i.e. no critical processes will be
/// aborted and no data will be lost.
unsafe fn reset(&self) -> ! {
loop {}
Self::set_interrupt_mask(true);
loop {
Self::wait_for_interrupt();
}
}
}

View File

@ -20,6 +20,15 @@ struct Inner {
height: u32,
}
struct DrawGlyph {
sx: u32,
sy: u32,
c: u8,
fg: u32,
bg: u32,
bytes_per_line: usize,
}
/// Framebuffer console device wrapper
pub struct FramebufferConsole {
inner: IrqSafeSpinlock<Inner>,
@ -66,12 +75,14 @@ impl DisplayConsole for FramebufferConsole {
inner.draw_glyph(
font,
(col_idx as u32) * cw,
row_idx * ch,
glyph,
fg,
bg,
bytes_per_line,
DrawGlyph {
sx: (col_idx as u32) * cw,
sy: row_idx * ch,
c: glyph,
fg,
bg,
bytes_per_line,
},
);
}
}
@ -96,7 +107,7 @@ impl FramebufferConsole {
framebuffer: &'static LinearFramebuffer,
font: Option<PcScreenFont<'static>>,
) -> Result<Self, Error> {
let font = font.unwrap_or(PcScreenFont::default());
let font = font.unwrap_or_default();
let char_width = font.width();
let char_height = font.height();
let dim = framebuffer.dimensions();
@ -120,19 +131,10 @@ impl FramebufferConsole {
impl Inner {
#[optimize(speed)]
fn draw_glyph(
&mut self,
font: PcScreenFont<'static>,
sx: u32,
sy: u32,
c: u8,
fg: u32,
bg: u32,
bytes_per_line: usize,
) {
fn draw_glyph(&mut self, font: PcScreenFont<'static>, g: DrawGlyph) {
let mut fb = unsafe { self.framebuffer.lock() };
let mut c = c as u32;
let mut c = g.c as u32;
if c >= font.len() {
c = b'?' as u32;
}
@ -146,13 +148,13 @@ impl Inner {
let mut x = 0;
while x < font.width() {
let v = if glyph[0] & mask != 0 { fg } else { bg };
fb[sy + y][(sx + x) as usize] = v;
let v = if glyph[0] & mask != 0 { g.fg } else { g.bg };
fb[g.sy + y][(g.sx + x) as usize] = v;
mask >>= 1;
x += 1;
}
glyph = &glyph[bytes_per_line..];
glyph = &glyph[g.bytes_per_line..];
y += 1;
}
}

View File

@ -5,7 +5,7 @@ use bytemuck::{Pod, Zeroable};
use kernel_util::AlignedTo;
// static CONSOLE_FONT: &[u8] = include_bytes!("font.psfu");
static CONSOLE_FONT: &'static AlignedTo<u32, [u8]> = &AlignedTo {
static CONSOLE_FONT: &AlignedTo<u32, [u8]> = &AlignedTo {
align: [],
bytes: *include_bytes!("font.psfu"),
};
@ -53,6 +53,7 @@ impl<'a> PcScreenFont<'a> {
self.header.height
}
#[allow(clippy::len_without_is_empty)]
#[inline]
pub const fn len(&self) -> u32 {
self.header.num_glyph

View File

@ -21,6 +21,7 @@
#![allow(
clippy::new_without_default,
clippy::fn_to_numeric_cast,
clippy::match_ref_pats,
async_fn_in_trait
)]
// #![warn(missing_docs)]

View File

@ -7,7 +7,7 @@ use core::{
use crate::arch::{Architecture, ArchitectureImpl};
use super::{pointer::PhysicalPointer, table::EntryLevel, KERNEL_VIRT_OFFSET};
use super::{table::EntryLevel, KERNEL_VIRT_OFFSET};
#[repr(transparent)]
pub struct KernelImageObject<T> {
@ -29,12 +29,22 @@ pub trait IntoRaw<T> {
}
pub trait AsPhysicalAddress {
/// Returns the value's physical address.
///
/// # Safety
///
/// The caller must ensure the value has been constructed and obtained through proper means.
unsafe fn as_physical_address(&self) -> PhysicalAddress;
}
// KernelImageObject wrapper for objects inside the kernel
impl<T> KernelImageObject<T> {
/// Wraps a value in the [KernelImageObject], allowing its physical address calculation.
///
/// # Safety
///
/// The caller must ensure the T is a `static (mut)` binding inside the kernel.
pub const unsafe fn new(inner: T) -> Self {
Self { inner }
}
@ -98,6 +108,13 @@ impl PhysicalAddress {
self.0 as usize % align_of::<T>() == 0
}
/// Converts a previously virtualized physical address back into its physical form.
///
/// # Safety
///
/// The caller must ensure the function only receives addresses obtained through
/// [PhysicalAddress::virtualize_raw] or
/// [super::pointer::PhysicalRef]/[super::pointer::PhysicalRefMut] facilities.
pub unsafe fn from_virtualized(address: usize) -> Self {
ArchitectureImpl::physicalize(address).unwrap()
}
@ -105,24 +122,6 @@ impl PhysicalAddress {
pub fn virtualize_raw(self) -> usize {
ArchitectureImpl::virtualize(self).unwrap()
}
pub unsafe fn virtualize<T>(self) -> PhysicalPointer<T> {
if !self.is_aligned_for::<T>() {
todo!();
}
let base = self.virtualize_raw();
PhysicalPointer::from_raw(base as *mut T)
}
pub unsafe fn virtualize_slice<T: Sized>(self, len: usize) -> PhysicalPointer<[T]> {
if !self.is_aligned_for::<T>() {
todo!();
}
let base = self.virtualize_raw();
PhysicalPointer::from_raw_parts(base as *mut T, len)
}
}
impl Add for PhysicalAddress {

View File

@ -31,6 +31,11 @@ pub struct DeviceMemoryIo<'a, T: ?Sized> {
}
impl RawDeviceMemoryMapping {
/// Maps a region of physical memory as device memory of given size.
///
/// # Safety
///
/// The caller must ensure proper access synchronization, as well as the address' origin.
#[inline]
pub unsafe fn map(base: PhysicalAddress, size: usize) -> Result<Self, Error> {
ARCHITECTURE.map_device_memory(base, size)
@ -52,6 +57,13 @@ impl Drop for RawDeviceMemoryMapping {
}
impl DeviceMemoryMapping {
/// Maps a region of physical memory as device memory of given size.
///
/// See [RawDeviceMemoryMapping::map].
///
/// # Safety
///
/// The caller must ensure proper access synchronization, as well as the address' origin.
pub unsafe fn map(base: PhysicalAddress, size: usize) -> Result<Self, Error> {
let inner = RawDeviceMemoryMapping::map(base, size)?;
let address = inner.address;
@ -67,6 +79,12 @@ impl DeviceMemoryMapping {
}
impl<'a, T: Sized> DeviceMemoryIo<'a, T> {
/// Interprets a raw device memory mapping as pointing to a value of `T`.
///
/// # Safety
///
/// The caller must ensure the mapping actually contains the value of `T`, as well as proper
/// access synchronization.
pub unsafe fn from_raw(
inner: Arc<RawDeviceMemoryMapping>,
) -> Result<DeviceMemoryIo<'a, T>, Error> {
@ -78,6 +96,12 @@ impl<'a, T: Sized> DeviceMemoryIo<'a, T> {
Ok(DeviceMemoryIo { inner, value })
}
/// Maps a physical address as device memory of type `T`.
///
/// # Safety
///
/// The caller must ensure the address actually points to a value of type `T`, as well as
/// proper access synchronization.
pub unsafe fn map(base: PhysicalAddress) -> Result<DeviceMemoryIo<'a, T>, Error> {
let inner = RawDeviceMemoryMapping::map(base, size_of::<T>())?;
let value = &*(inner.address as *const T);

View File

@ -24,6 +24,12 @@ use self::{device::DeviceMemoryMapping, process::ProcessAddressSpace};
pub const KERNEL_VIRT_OFFSET: usize = ArchitectureImpl::KERNEL_VIRT_OFFSET;
/// Reads a value from an arbitrary physical address.
///
/// # Safety
///
/// The caller must ensure the correct origin of the address, its alignment and that the access is
/// properly synchronized.
pub unsafe fn read_memory<T>(address: PhysicalAddress) -> T {
let io = DeviceMemoryMapping::map(address, size_of::<T>()).unwrap();
let address = io.address();
@ -35,6 +41,12 @@ pub unsafe fn read_memory<T>(address: PhysicalAddress) -> T {
}
}
/// Writes a value to an arbitrary physical address.
///
/// # Safety
///
/// The caller must ensure the correct origin of the address, its alignment and that the access is
/// properly synchronized.
pub unsafe fn write_memory<T>(address: PhysicalAddress, value: T) {
let io = DeviceMemoryMapping::map(address, size_of::<T>()).unwrap();
let address = io.address();

View File

@ -1,6 +1,4 @@
//! Physical memory manager implementation
use core::mem::size_of;
use abi::error::Error;
use crate::mem::{
@ -11,7 +9,7 @@ use crate::mem::{
pub type BitmapWord = u64;
pub(super) const BITMAP_WORD_SIZE: usize = size_of::<BitmapWord>() * 8;
pub(super) const BITMAP_WORD_SIZE: usize = BitmapWord::BITS as usize;
pub(super) const BITMAP_PAGE_COUNT: usize = 256;
const HUGE_PAGE_WORD_COUNT: usize = 512 / BITMAP_WORD_SIZE;

View File

@ -5,10 +5,7 @@ use kernel_util::util::OneTimeInit;
use crate::{
arch::{Architecture, ARCHITECTURE},
mem::{
address::IntoRaw,
phys::{self, reserved::is_reserved},
},
mem::{address::IntoRaw, phys::reserved::is_reserved},
sync::IrqSafeSpinlock,
};
@ -166,7 +163,7 @@ pub unsafe fn init_from_iter<I: Iterator<Item = PhysicalMemoryRegion> + Clone>(
);
if IntoRaw::<u64>::into_raw(phys_start) & 0x1FFFFFF != 0 {
loop {}
todo!();
}
let mut manager =

View File

@ -1,17 +1,10 @@
use core::{
fmt,
mem::align_of,
ops::{Deref, DerefMut},
};
use super::{address::AsPhysicalAddress, PhysicalAddress};
#[derive(Clone, Copy, PartialEq, PartialOrd, Debug, Hash)]
#[repr(transparent)]
pub struct PhysicalPointer<T: ?Sized> {
pointer: *mut T,
}
#[repr(transparent)]
pub struct PhysicalRef<'a, T: ?Sized> {
value: &'a T,
@ -22,63 +15,28 @@ pub struct PhysicalRefMut<'a, T: ?Sized> {
value: &'a mut T,
}
// PhysicalPointer<T> wrapper for direct access to any memory location
impl<T: ?Sized> PhysicalPointer<T> {
pub fn into_address(self) -> usize {
self.pointer.addr()
}
}
impl<T: Sized> PhysicalPointer<T> {
#[inline(always)]
pub fn is_aligned(&self) -> bool {
self.pointer.addr() % align_of::<T>() == 0
}
#[inline(always)]
pub const unsafe fn from_raw(pointer: *mut T) -> PhysicalPointer<T> {
PhysicalPointer { pointer }
}
#[inline(always)]
pub unsafe fn from_raw_parts(base: *mut T, len: usize) -> PhysicalPointer<[T]> {
PhysicalPointer {
pointer: core::ptr::slice_from_raw_parts_mut(base, len),
}
}
pub unsafe fn write_unaligned(self, value: T) {
self.pointer.write_unaligned(value)
}
pub unsafe fn write_volatile(self, value: T) {
self.pointer.write_volatile(value)
}
pub unsafe fn read_unaligned(self) -> T {
self.pointer.read_unaligned()
}
pub unsafe fn read_volatile(self) -> T {
self.pointer.read_volatile()
}
}
impl<T: ?Sized> AsPhysicalAddress for PhysicalPointer<T> {
unsafe fn as_physical_address(&self) -> PhysicalAddress {
todo!()
}
}
// PhysicalRefMut<T> wrapper for safe mutable access to physical addresses
impl<'a, T: Sized> PhysicalRefMut<'a, T> {
/// Maps a physical address into the kernel space as &mut T, allowing mmutable access to it.
///
/// # Safety
///
/// The caller must ensure the correct origin of the physical address as well that it actually
/// contains T. The caller must also take care of access synchronization and make sure no
/// aliasing occurs.
pub unsafe fn map(physical: PhysicalAddress) -> PhysicalRefMut<'a, T> {
let value = virtualize_raw(physical);
PhysicalRefMut { value }
}
/// Maps a physical address into the kernel space as &mut [T], allowing mmutable access to it.
///
/// # Safety
///
/// The caller must ensure the correct origin of the physical address as well that it actually
/// contains [T; len]. The caller must also take care of access synchronization and make
/// sure no aliasing occurs.
pub unsafe fn map_slice(physical: PhysicalAddress, len: usize) -> PhysicalRefMut<'a, [T]> {
let value = virtualize_slice_raw(physical, len);
PhysicalRefMut { value }
@ -121,11 +79,24 @@ impl<T: ?Sized> fmt::Pointer for PhysicalRefMut<'_, T> {
// PhysicalRef<T>: same as PhysicalRefMut<T>, except immutable
impl<'a, T: Sized> PhysicalRef<'a, T> {
/// Maps a physical address into the kernel space as &T, allowing immutable access to it.
///
/// # Safety
///
/// The caller must ensure the correct origin of the physical address as well that it actually
/// contains T.
pub unsafe fn map(physical: PhysicalAddress) -> PhysicalRef<'a, T> {
let value = virtualize_raw(physical);
PhysicalRef { value }
}
/// Maps a physical address into the kernel space as &[T] of given len, allowing immutable
/// access to it.
///
/// # Safety
///
/// The caller must ensure the correct origin of the physical address as well that it actually
/// contains [T; len].
pub unsafe fn map_slice(physical: PhysicalAddress, len: usize) -> PhysicalRef<'a, [T]> {
let value = virtualize_slice_raw(physical, len);
PhysicalRef { value }

View File

@ -4,7 +4,7 @@ use vmalloc::VirtualMemoryAllocator;
use crate::{mem::phys, sync::IrqSafeSpinlock};
use super::{address::AsPhysicalAddress, table::MapAttributes, PhysicalAddress};
use super::{table::MapAttributes, PhysicalAddress};
cfg_if! {
if #[cfg(target_arch = "aarch64")] {
@ -22,6 +22,11 @@ pub trait ProcessAddressSpaceManager: Sized {
fn new() -> Result<Self, Error>;
/// Places a single PAGE_SIZE mapping into the address space.
///
/// # Safety
///
/// The caller must ensure the correct origin of the physical address being mapped.
unsafe fn map_page(
&mut self,
address: usize,
@ -29,6 +34,12 @@ pub trait ProcessAddressSpaceManager: Sized {
flags: MapAttributes,
) -> Result<(), Error>;
/// Removes a single PAGE_SIZE mapping from the address space.
///
/// # Safety
///
/// The caller must ensure the process to which this address space belongs does not and
/// will not access this page.
unsafe fn unmap_page(&mut self, address: usize) -> Result<PhysicalAddress, Error>;
fn translate(&self, address: usize) -> Result<(PhysicalAddress, MapAttributes), Error>;
@ -201,6 +212,14 @@ impl ProcessAddressSpace {
self.inner.lock().table.translate(address).map(|e| e.0)
}
/// Removes a single PAGE_SIZE mapping from the address space.
///
/// See [ProcessAddressSpaceManager::unmap].
///
/// # Safety
///
/// The caller must ensure the process to which this address space belongs does not and
/// will not access this page.
pub unsafe fn unmap(&self, address: usize, size: usize) -> Result<(), Error> {
assert_eq!(address & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);
assert_eq!(size & (ProcessAddressSpaceImpl::PAGE_SIZE - 1), 0);

View File

@ -7,8 +7,8 @@ use vfs::FileRef;
use crate::{
mem::{
address::AsPhysicalAddress, phys, pointer::PhysicalRefMut, process::ProcessAddressSpace,
table::MapAttributes, ForeignPointer,
phys, pointer::PhysicalRefMut, process::ProcessAddressSpace, table::MapAttributes,
ForeignPointer,
},
proc,
task::{context::TaskContextImpl, process::Process, TaskContext},
@ -39,14 +39,6 @@ impl<'a> EnvWriter<'a> {
}
}
fn write_2d_str_array(
space: &ProcessAddressSpace,
virt: usize,
array: &[&str],
) -> Result<(), Error> {
todo!()
}
// TODO I hate this function, it's ugly
fn setup_program_env(
space: &ProcessAddressSpace,
@ -66,12 +58,6 @@ fn setup_program_env(
let envs_data_size: usize = envs.iter().map(|x| x.len()).sum();
let ptrs_size = str_array_size(args.len()) + str_array_size(envs.len());
let total_size = args_data_size + envs_data_size + ptrs_size + HEADER_SIZE;
// 1 + arg ptr:len count
// let args_ptr_size = (1 + args.len() * 2) * size_of::<usize>();
// let envs_ptr_size = (1 + envs.len() * 2) * size_of::<usize>();
// Total size: offset arrays + data + args/env split position
// let total_size = args_size + args_ptr_size + envs_size + envs_ptr_size + size_of::<usize>();
if total_size > 0x1000 {
todo!();
@ -87,7 +73,7 @@ fn setup_program_env(
MapAttributes::USER_READ | MapAttributes::USER_WRITE | MapAttributes::NON_GLOBAL,
)?;
let mut slice = unsafe { PhysicalRefMut::map_slice(phys_page, 4096) };
let mut writer = EnvWriter::new(&mut *slice);
let mut writer = EnvWriter::new(&mut slice);
let args_array_offset = HEADER_SIZE;
let envs_array_offset = args_array_offset + str_array_size(args.len());
@ -95,35 +81,35 @@ fn setup_program_env(
let envs_data_offset = args_data_offset + args_data_size;
// Header
writer.write_usize(virt + args_array_offset);
writer.write_usize(virt + envs_array_offset);
writer.write_usize(virt + args_array_offset)?;
writer.write_usize(virt + envs_array_offset)?;
// Args array
writer.write_usize(args.len());
writer.write_usize(args.len())?;
let mut offset = args_data_offset;
for arg in args.iter() {
writer.write_usize(arg.len());
writer.write_usize(virt + offset);
writer.write_usize(arg.len())?;
writer.write_usize(virt + offset)?;
offset += arg.len();
}
// Envs array
writer.write_usize(envs.len());
writer.write_usize(envs.len())?;
let mut offset = envs_data_offset;
for env in envs.iter() {
writer.write_usize(env.len());
writer.write_usize(virt + offset);
writer.write_usize(env.len())?;
writer.write_usize(virt + offset)?;
offset += env.len();
}
// String data
for arg in args.iter() {
writer.write_bytes(arg.as_bytes());
writer.write_bytes(arg.as_bytes())?;
}
for env in envs.iter() {
writer.write_bytes(env.as_bytes());
writer.write_bytes(env.as_bytes())?;
}
Ok(())
@ -171,12 +157,7 @@ fn setup_binary<S: Into<String>>(
}
}
let context = TaskContext::user(
entry,
virt_args_base,
unsafe { space.as_address_with_asid() },
user_sp,
)?;
let context = TaskContext::user(entry, virt_args_base, space.as_address_with_asid(), user_sp)?;
Ok(Process::new_with_context(name, Some(space), context))
}

View File

@ -82,14 +82,12 @@ fn syscall_handler(func: SyscallFunction, args: &[u64]) -> Result<usize, Error>
todo!();
}
let res = space.allocate(
space.allocate(
None,
len,
|_| phys::alloc_page(),
MapAttributes::USER_WRITE | MapAttributes::USER_READ | MapAttributes::NON_GLOBAL,
);
res
)
}
SyscallFunction::UnmapMemory => {
let addr = args[0] as usize;

View File

@ -4,7 +4,7 @@ use abi::{arch::SavedFrame, error::Error, process::ExitCode};
use alloc::boxed::Box;
use cfg_if::cfg_if;
use crate::{mem::PhysicalAddress, task::process::Process};
use crate::task::process::Process;
cfg_if! {
if #[cfg(target_arch = "aarch64")] {

View File

@ -29,6 +29,7 @@ where
type Output = T;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
#[allow(clippy::needless_borrow)]
(unsafe { &mut self.get_unchecked_mut().f })(cx)
}
}

View File

@ -18,7 +18,7 @@ impl QueueWaker {
pub fn register(&self, waker: &Waker) {
let mut queue = self.queue.lock();
if queue.iter().find(|other| other.will_wake(waker)).is_some() {
if queue.iter().any(|other| other.will_wake(waker)) {
return;
}

View File

@ -81,6 +81,7 @@ impl CpuQueueInner {
/// None if the queue is empty or no valid task was found, in which case the scheduler should
/// go idle.
pub fn next_ready_task(&mut self) -> Option<Arc<Process>> {
#[allow(clippy::never_loop)]
while !self.queue.is_empty() {
let task = self.queue.pop_front().unwrap();

View File

@ -10,9 +10,8 @@ pub trait ResultIterator<T, E> {
impl<T, E, I: Iterator<Item = Result<T, E>>> ResultIterator<T, E> for I {
fn collect_error(self) -> Option<E> {
for item in self {
match item {
Err(e) => return Some(e),
_ => (),
if let Err(e) = item {
return Some(e);
}
}
None