refactor: fix warnings, fix i686 fp context misalign

This commit is contained in:
Mark Poliakov 2024-11-01 20:02:28 +02:00
parent a7f727b58e
commit ebedd96362
22 changed files with 169 additions and 106 deletions

1
Cargo.lock generated
View File

@ -900,6 +900,7 @@ dependencies = [
"bitflags 2.6.0",
"bytemuck",
"kernel-arch-interface",
"log",
"static_assertions",
"tock-registers",
]

View File

@ -32,6 +32,7 @@ struct TaskContextInner {
#[repr(align(0x10))]
pub struct FpContext {
// q0..q31 x 128bit + fpcr/fpsr
#[allow(unused)]
inner: [u8; 16 * 32 + 16],
}

View File

@ -1,5 +1,5 @@
#![no_std]
#![feature(strict_provenance, asm_const, naked_functions, trait_upcasting)]
#![feature(naked_functions, trait_upcasting)]
extern crate alloc;

View File

@ -363,6 +363,7 @@ pub fn tlb_flush_vaae1(mut page: usize) {
///
/// Unsafe, must only be called by BSP during its early init while still in "lower-half"
pub unsafe fn load_fixed_tables() {
#[allow(static_mut_refs)]
let ttbr0 = KERNEL_TABLES.l1.data.as_ptr() as u64;
TTBR0_EL1.set(ttbr0);
TTBR1_EL1.set(ttbr0);

View File

@ -24,8 +24,10 @@
.global __i686_switch_and_drop
__i686_task_enter_kernel:
pop %eax // Argument
pop %ecx // Entry
// %esp + 4: argument
// %esp + 0: entry
xor %ecx, %ecx
xchg (%esp), %ecx
// Enable IRQ in EFLAGS
pushfl
@ -76,7 +78,6 @@ __i686_switch_task:
SAVE_TASK_STATE
// TODO TSS
// Store stack to "from" context
mov %esp, (%ecx)
@ -85,8 +86,6 @@ __i686_switch_task:
LOAD_TASK_STATE
// TODO TSS
ret
__i686_enter_task:
@ -97,8 +96,6 @@ __i686_enter_task:
mov 4(%esp), %eax
mov (%eax), %esp
// TODO TSS
LOAD_TASK_STATE
ret
@ -117,20 +114,3 @@ __i686_switch_and_drop:
// TODO actually drop the thread
ret
# // TSS.RSP0
# mov 8(%rdi), %rax
# // Kernel stack
# mov 0(%rdi), %rdi
# mov %rdi, %rsp
# // Load TSS.RSP0
# mov %gs:(8), %rdi
# mov %rax, 4(%rdi)
# mov %rsi, %rdi
# call __arch_drop_thread
# LOAD_TASK_STATE
# ret

View File

@ -11,7 +11,7 @@ use yggdrasil_abi::{arch::SavedFrame, error::Error};
use crate::{
gdt::{self, TSS},
mem::KERNEL_TABLES,
mem::kernel_tables,
};
#[allow(unused)]
@ -82,7 +82,7 @@ pub struct TaskContextImpl<
PA: PhysicalMemoryAllocator<Address = PhysicalAddress>,
> {
inner: UnsafeCell<Inner>,
fpu_context: UnsafeCell<FpuContext>,
fpu_context: Option<UnsafeCell<FpuContext>>,
stack_base_phys: PhysicalAddress,
stack_size: usize,
@ -120,10 +120,11 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
let sp = stack.build();
let esp0 = stack_base + USER_TASK_PAGES * 0x1000;
let fpu_context = FpuContext::new(true);
Ok(Self {
inner: UnsafeCell::new(Inner { sp }),
fpu_context: UnsafeCell::new(FpuContext::new(true)),
fpu_context: Some(UnsafeCell::new(fpu_context)),
stack_base_phys,
stack_size: USER_TASK_PAGES * 0x1000,
@ -147,26 +148,30 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
let mut stack = StackBuilder::new(stack_base, KERNEL_TASK_PAGES * 0x1000);
// Entry and argument
stack.push(entry as _);
stack.push(arg);
stack.push(entry as _);
// XXX
setup_common_context(&mut stack, __i686_task_enter_kernel as _);
let sp = stack.build();
let cr3 = unsafe {
kernel_tables()
.as_physical_address()
.try_into_u32()
.unwrap()
};
// TODO stack is leaked
Ok(Self {
inner: UnsafeCell::new(Inner { sp }),
fpu_context: UnsafeCell::new(FpuContext::new(false)),
fpu_context: None,
stack_base_phys,
stack_size: KERNEL_TASK_PAGES * 0x1000,
tss_esp0: 0,
cr3: unsafe { KERNEL_TABLES.as_physical_address() }
.try_into_u32()
.unwrap(),
cr3,
gs_base: 0,
_pd: PhantomData,
@ -179,9 +184,13 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
if dst != src {
// Save the old context
FpuContext::store(from.fpu_context.get());
if let Some(src_fpu) = from.fpu_context.as_ref() {
FpuContext::store(src_fpu.get());
}
// Load next context
FpuContext::restore(self.fpu_context.get());
if let Some(dst_fpu) = self.fpu_context.as_ref() {
FpuContext::restore(dst_fpu.get());
}
gdt::set_gs_base(self.gs_base);
TSS.esp0 = self.tss_esp0;
@ -192,7 +201,9 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
}
unsafe fn enter(&self) -> ! {
FpuContext::restore(self.fpu_context.get());
if let Some(dst_fpu) = self.fpu_context.as_ref() {
FpuContext::restore(dst_fpu.get());
}
gdt::set_gs_base(self.gs_base);
TSS.esp0 = self.tss_esp0;
CR3.set(self.cr3 as _);
@ -201,7 +212,9 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
}
unsafe fn switch_and_drop(&self, thread: *const ()) {
FpuContext::restore(self.fpu_context.get());
if let Some(dst_fpu) = self.fpu_context.as_ref() {
FpuContext::restore(dst_fpu.get());
}
gdt::set_gs_base(self.gs_base);
TSS.esp0 = self.tss_esp0;
CR3.set(self.cr3 as _);

View File

@ -91,6 +91,7 @@ pub fn create_gdt() -> (&'static [Entry], &'static Tss) {
let tss = unsafe { &mut *addr_of_mut!(TSS) };
tss.ss0 = 0x10;
let tss_addr = (tss as *mut Tss).addr();
#[allow(static_mut_refs)]
let gdt = unsafe { GDT.get_mut() };
gdt[5] = Entry::tss(tss_addr as u32, (size_of::<Tss>() - 1) as u32);
@ -100,6 +101,7 @@ pub fn create_gdt() -> (&'static [Entry], &'static Tss) {
pub fn set_gs_base(gs_base: usize) {
let _guard = IrqGuard::<ArchitectureImpl>::acquire();
unsafe {
#[allow(static_mut_refs)]
GDT.get_mut()[6].set_base(gs_base);
core::arch::asm!("mov $0x33, %ax; mov %ax, %gs", out("ax") _, options(att_syntax, nostack));
}

View File

@ -3,11 +3,14 @@ use libk_mm_interface::{address::PhysicalAddress, table::EntryLevel, KernelImage
use yggdrasil_abi::error::Error;
use crate::{
mem::{flush_tlb_entry, table::PageAttributes, KERNEL_TABLES},
mem::{flush_tlb_entry, table::PageAttributes},
ArchitectureImpl,
};
use super::table::{PageEntry, PageTable, L0, L3};
use super::{
kernel_tables,
table::{PageEntry, PageTable, L0, L3},
};
pub const KERNEL_SPLIT_L0: usize = KERNEL_VIRT_OFFSET >> 22;
pub const DYNAMIC_MAP_COUNT: usize = 64;
@ -127,11 +130,12 @@ impl KernelDynamic {
}
pub fn clone_kernel_tables(dst: &mut PageTable<L0>) {
for (i, entry) in unsafe { KERNEL_TABLES.l0.kernel.iter().enumerate() } {
let tables = kernel_tables();
for (i, entry) in tables.l0.kernel.iter().enumerate() {
dst[i + KERNEL_SPLIT_L0] = *entry;
}
for (i, entry) in unsafe { KERNEL_TABLES.l0.dynamic.iter().enumerate() } {
for (i, entry) in tables.l0.dynamic.iter().enumerate() {
dst[i + KERNEL_SPLIT_L0 + FIXED_MAP_COUNT] = *entry;
}
}

View File

@ -1,6 +1,12 @@
use core::{
cell::UnsafeCell,
ops::{Deref, DerefMut},
};
use fixed::FixedTables;
use kernel_arch_interface::{
mem::{DeviceMemoryAttributes, KernelTableManager, RawDeviceMemoryMapping},
sync::{IrqSafeSpinlock, IrqSafeSpinlockGuard},
KERNEL_VIRT_OFFSET,
};
use libk_mm_interface::{
@ -17,12 +23,60 @@ pub mod table;
pub use process::ProcessAddressSpaceImpl;
use crate::ArchitectureImpl;
#[derive(Debug)]
pub struct KernelTableManagerImpl;
#[link_section = ".data.tables"]
pub static mut KERNEL_TABLES: KernelImageObject<FixedTables> =
unsafe { KernelImageObject::new(FixedTables::zeroed()) };
pub static KERNEL_TABLES: KernelTablesInner = KernelTablesInner::new();
static KERNEL_TABLES_LOCK: IrqSafeSpinlock<ArchitectureImpl, ()> = IrqSafeSpinlock::new(());
#[repr(C, align(0x1000))]
pub struct KernelTablesInner {
inner: UnsafeCell<KernelImageObject<FixedTables>>,
}
impl KernelTablesInner {
const fn new() -> Self {
Self {
inner: unsafe { UnsafeCell::new(KernelImageObject::new(FixedTables::zeroed())) },
}
}
}
unsafe impl Sync for KernelTablesInner {}
pub struct KernelTablesGuard<'a> {
inner: &'a KernelTablesInner,
_guard: IrqSafeSpinlockGuard<'a, ArchitectureImpl, ()>,
}
pub fn kernel_tables<'a>() -> KernelTablesGuard<'a> {
let _guard = KERNEL_TABLES_LOCK.lock();
let inner = &KERNEL_TABLES;
KernelTablesGuard { inner, _guard }
}
impl AsPhysicalAddress for KernelTablesGuard<'_> {
unsafe fn as_physical_address(&self) -> PhysicalAddress {
(*self.inner.inner.get()).as_physical_address()
}
}
impl Deref for KernelTablesGuard<'_> {
type Target = KernelImageObject<FixedTables>;
fn deref(&self) -> &Self::Target {
unsafe { &*self.inner.inner.get() }
}
}
impl DerefMut for KernelTablesGuard<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.inner.inner.get() }
}
}
impl KernelTableManager for KernelTableManagerImpl {
unsafe fn map_device_pages(
@ -43,7 +97,7 @@ impl KernelTableManager for KernelTableManagerImpl {
assert_eq!(base & 0xFFF, 0);
log::info!("map_device_pages({:#x}, {})", base, count);
let page_count = page_count::<L3>(count);
let virt = KERNEL_TABLES.map_dynamic_memory(base, page_count)?;
let virt = kernel_tables().map_dynamic_memory(base, page_count)?;
Ok(RawDeviceMemoryMapping::from_raw_parts(
virt, virt, page_count, 0,
@ -56,11 +110,12 @@ impl KernelTableManager for KernelTableManagerImpl {
}
fn virtualize(phys: u64) -> usize {
unsafe { KERNEL_TABLES.virtualize(PhysicalAddress::from_u64(phys)) }
kernel_tables().virtualize(PhysicalAddress::from_u64(phys))
}
fn physicalize(virt: usize) -> u64 {
unsafe { KERNEL_TABLES.physicalize(virt) }
kernel_tables()
.physicalize(virt)
.expect("Invalid virtual address")
.into_u64()
}
@ -82,26 +137,26 @@ impl KernelTableManager for KernelTableManagerImpl {
///
/// Only meant to be called once during early OS init.
pub unsafe fn init_fixed_tables() {
let mut tables = kernel_tables();
// Unmap lower stuff
for (i, entry) in KERNEL_TABLES.l0.lower.iter_mut().enumerate() {
for (i, entry) in tables.l0.lower.iter_mut().enumerate() {
*entry = PageEntry::INVALID;
flush_tlb_entry(i << 22);
}
// Map the rest of fixed translation
for (i, entry) in KERNEL_TABLES.l0.kernel.iter_mut().enumerate() {
for (i, entry) in tables.l0.kernel.iter_mut().enumerate() {
let virt = KERNEL_VIRT_OFFSET + (i << L0::SHIFT);
let phys = (i << L0::SHIFT) as u32;
*entry = PageEntry::block(PhysicalAddress::from_u32(phys), PageAttributes::WRITABLE);
flush_tlb_entry(virt);
}
for (l0_entry, entry) in Iterator::zip(
KERNEL_TABLES.l0.dynamic.iter_mut(),
KERNEL_TABLES.dynamic.lock().l3s.iter(),
) {
let phys = entry.as_physical_address();
*l0_entry = PageEntry::table(phys, PageAttributes::WRITABLE);
let dynamic_len = tables.l0.dynamic.len();
for i in 0..dynamic_len {
let phys = tables.dynamic.lock().l3s[i].as_physical_address();
tables.l0.dynamic[i] = PageEntry::table(phys, PageAttributes::WRITABLE);
}
}

View File

@ -28,6 +28,14 @@ impl<T> OneTimeInit<T> {
}
}
/// Wraps the value in an [OneTimeInit], providing an already initialized value.
pub const fn new_init(value: T) -> Self {
Self {
value: UnsafeCell::new(MaybeUninit::new(value)),
state: AtomicUsize::new(Self::STATE_INITIALIZED),
}
}
/// Returns `true` if the value has already been initialized
#[inline]
pub fn is_initialized(&self) -> bool {

View File

@ -10,6 +10,7 @@ bytemuck = { version = "1.16.1", features = ["derive"] }
bitflags = "2.6.0"
tock-registers = "0.8.1"
static_assertions = "1.1.0"
log = "0.4.22"
[lints.rust]
unexpected_cfgs = { level = "allow", check-cfg = ['cfg(rust_analyzer)'] }

View File

@ -1,4 +1,4 @@
#![feature(iter_chain)]
#![feature(iter_chain, new_zeroed_alloc, box_as_ptr)]
#![allow(clippy::new_without_default)]
#![no_std]

View File

@ -426,6 +426,7 @@ pub use msr::ia32_sfmask::MSR_IA32_SFMASK;
pub use msr::ia32_star::MSR_IA32_STAR;
pub use xcr0::XCR0;
use alloc::boxed::Box;
use bytemuck::{Pod, Zeroable};
use static_assertions::const_assert_eq;
@ -475,15 +476,15 @@ struct FpuContextInner {
const_assert_eq!(size_of::<FpuContextInner>(), 512);
#[repr(C, align(16))]
#[repr(C)]
pub struct FpuContext {
inner: FpuContextInner,
inner: Box<FpuContextInner>,
}
impl FpuContext {
pub fn new(mask_exceptions: bool) -> Self {
const ALL_EXCEPTIONS_MASK: u32 = (1 << 12) | (1 << 11) | (1 << 10) | (1 << 9) | (1 << 8);
let mut inner = FpuContextInner::zeroed();
let mut inner: Box<FpuContextInner> = unsafe { Box::new_zeroed().assume_init() };
if mask_exceptions {
inner.mxcsr |= ALL_EXCEPTIONS_MASK;
}
@ -493,22 +494,22 @@ impl FpuContext {
pub fn store(this: *mut Self) {
#[cfg(any(target_arch = "x86", rust_analyzer))]
unsafe {
core::arch::x86::_fxsave(this as _)
core::arch::x86::_fxsave(Box::as_mut_ptr(&mut (*this).inner) as _)
}
#[cfg(any(target_arch = "x86_64", rust_analyzer))]
unsafe {
core::arch::x86_64::_fxsave64(this as _)
core::arch::x86_64::_fxsave64(Box::as_mut_ptr(&mut (*this).inner) as _)
}
}
pub fn restore(this: *const Self) {
#[cfg(any(target_arch = "x86", rust_analyzer))]
unsafe {
core::arch::x86::_fxrstor(this as _)
core::arch::x86::_fxrstor(Box::as_ptr(&(*this).inner) as _)
}
#[cfg(any(target_arch = "x86_64", rust_analyzer))]
unsafe {
core::arch::x86_64::_fxrstor64(this as _)
core::arch::x86_64::_fxrstor64(Box::as_ptr(&(*this).inner) as _)
}
}
}

View File

@ -420,11 +420,13 @@ impl<K: KernelTableManager, PA: PhysicalMemoryAllocator<Address = PhysicalAddres
stack.push(entry as _);
stack.push(arg);
// XXX
setup_common_context(
&mut stack,
__x86_64_task_enter_kernel as _,
unsafe { KERNEL_TABLES.as_physical_address().into_u64() },
#[allow(static_mut_refs)]
unsafe {
KERNEL_TABLES.as_physical_address().into_u64()
},
0,
);

View File

@ -99,8 +99,10 @@ impl<'a> DeviceTree<'a> {
///
/// The caller must ensure the validity of the address.
pub unsafe fn from_addr(virt: usize) -> Self {
#[allow(static_mut_refs)]
FDT_INDEX_BUFFER.0.fill(0);
let tree = DevTree::from_raw_pointer(virt as _).unwrap();
#[allow(static_mut_refs)]
let index = DevTreeIndex::new(tree, &mut FDT_INDEX_BUFFER.0).unwrap();
Self { tree, index }
}

View File

@ -1,24 +1,25 @@
//! Utilities for handling reserved memory regions
use libk_mm_interface::address::PhysicalAddress;
use libk_util::StaticVector;
use libk_util::{sync::IrqSafeSpinlock, StaticVector};
use crate::phys::PhysicalMemoryRegion;
static mut RESERVED_MEMORY: StaticVector<PhysicalMemoryRegion, 8> = StaticVector::new();
static RESERVED_MEMORY: IrqSafeSpinlock<StaticVector<PhysicalMemoryRegion, 8>> =
IrqSafeSpinlock::new(StaticVector::new());
/// Marks a region of physical memory as reserved.
///
/// # Safety
/// # Note
///
/// Can only be called from initialization code **before** physical memory manager is initialized.
pub unsafe fn reserve_region(_reason: &str, region: PhysicalMemoryRegion) {
RESERVED_MEMORY.push(region);
/// This call only has any effect **before** physical memory manager is initialized.
pub fn reserve_region(_reason: &str, region: PhysicalMemoryRegion) {
RESERVED_MEMORY.lock().push(region);
}
/// Returns `true` if `addr` refers to any reserved memory region
pub fn is_reserved(addr: PhysicalAddress) -> bool {
for region in unsafe { RESERVED_MEMORY.iter() } {
for region in RESERVED_MEMORY.lock().iter() {
if region.range().contains(&addr) {
return true;
}

View File

@ -10,7 +10,7 @@ use elf::{
ElfStream, ParseError,
};
use libk_mm::{
pointer::{PhysicalRef, PhysicalRefMut},
pointer::PhysicalRefMut,
process::{ProcessAddressSpace, VirtualRangeBacking},
table::MapAttributes,
PageBox,
@ -211,7 +211,7 @@ pub fn load_elf_from_file<F: Read + Seek>(
}
// Load TLS master copy
let tls = handle_tls(&elf, &file, space)?;
let tls = handle_tls(&elf, &file)?;
// Fixup relocations
handle_relocations(&mut elf, space, image_load_base)?;
@ -402,7 +402,6 @@ fn handle_relocations<F: Read + Seek>(
fn handle_tls<F: Read + Seek>(
elf: &ElfStream<AnyEndian, FileReader<F>>,
file: &FileReader<F>,
space: &ProcessAddressSpace,
) -> Result<Option<ProcessTlsInfo>, Error> {
// TODO handle different TLS models
// TODO handle TLS segment attributes

View File

@ -1,8 +1,6 @@
use core::{alloc::Layout, mem::size_of};
use libk_mm::{
address::Virtualize, pointer::PhysicalRef, process::ProcessAddressSpace, table::EntryLevelExt,
};
use libk_mm::{address::Virtualize, process::ProcessAddressSpace};
use yggdrasil_abi::error::Error;
// XXX

View File

@ -1,7 +1,6 @@
use alloc::sync::Arc;
use core::{
fmt,
range::Range,
sync::atomic::{AtomicU32, AtomicU64, Ordering},
};

View File

@ -286,6 +286,7 @@ dependencies = [
"bitflags",
"bytemuck",
"kernel-arch-interface",
"log",
"static_assertions",
"tock-registers",
]

View File

@ -108,30 +108,26 @@ impl X86_64 {
match data {
BootData::YBoot(data) => {
// Reserve the memory map
unsafe {
reserve_region(
"mmap",
PhysicalMemoryRegion {
base: PhysicalAddress::from_u64(data.memory_map.address),
size: data.memory_map.len as usize * size_of::<AvailableMemoryRegion>(),
},
);
}
reserve_region(
"mmap",
PhysicalMemoryRegion {
base: PhysicalAddress::from_u64(data.memory_map.address),
size: data.memory_map.len as usize * size_of::<AvailableMemoryRegion>(),
},
);
// Reserve initrd, if not NULL
if data.initrd_address != 0 && data.initrd_size != 0 {
let aligned_start = data.initrd_address & !0xFFF;
let aligned_end = (data.initrd_address + data.initrd_size + 0xFFF) & !0xFFF;
unsafe {
reserve_region(
"initrd",
PhysicalMemoryRegion {
base: PhysicalAddress::from_u64(aligned_start),
size: (aligned_end - aligned_start) as usize,
},
);
}
reserve_region(
"initrd",
PhysicalMemoryRegion {
base: PhysicalAddress::from_u64(aligned_start),
size: (aligned_end - aligned_start) as usize,
},
);
}
}
}
@ -179,15 +175,13 @@ impl X86_64 {
let l2_tables_start = phys::find_contiguous_region(it, end_l1i)
.expect("Could not allocate the memory for RAM mapping L2 tables");
unsafe {
reserve_region(
"ram-l2-tables",
PhysicalMemoryRegion {
base: l2_tables_start,
size: end_l1i * L3::SIZE,
},
);
}
reserve_region(
"ram-l2-tables",
PhysicalMemoryRegion {
base: l2_tables_start,
size: end_l1i * L3::SIZE,
},
);
// Fill in the tables
for l1i in 0..end_l1i {

View File

@ -20,7 +20,7 @@ pub(crate) fn connect_socket(
run_with_io(&process, |mut io| {
let (local, fd) = match connect {
&mut SocketConnect::Udp(fd, remote) => {
&mut SocketConnect::Udp(_fd, _remote) => {
todo!("UDP socket connect");
}
&mut SocketConnect::Tcp(remote, timeout) => {