This commit is contained in:
Mark Poliakov 2021-09-03 21:48:46 +03:00
parent 74eedfe540
commit efdf8b2c0b
13 changed files with 27 additions and 394 deletions

@ -61,8 +61,7 @@ impl SerialDevice for AuxUart {
impl InterruptHandler for AuxUart {
fn do_irq(&self, _irq: u32) {
let byte = unsafe { mmio_read(Self::REG_AUX_MU_IO) } as u8;
debugln!("{}", byte as char);
let _byte = unsafe { mmio_read(Self::REG_AUX_MU_IO) } as u8;
}
}

@ -17,14 +17,21 @@ cfg_if! {
}
}
pub unsafe fn mmio_write(addr: PhysicalAddress, value: u32) {
pub trait MmioSize {}
impl MmioSize for u32 {}
impl MmioSize for i32 {}
impl MmioSize for u8 {}
#[inline]
pub unsafe fn mmio_write<T: MmioSize>(addr: PhysicalAddress, value: T) {
core::ptr::write_volatile(
VirtualAddress::<KernelSpace>::from(addr).as_mut_ptr(),
value,
);
}
pub unsafe fn mmio_read(addr: PhysicalAddress) -> u32 {
#[inline]
pub unsafe fn mmio_read<T: MmioSize>(addr: PhysicalAddress) -> T {
core::ptr::read_volatile(
VirtualAddress::<KernelSpace>::from(addr).as_mut_ptr(),
)

@ -24,9 +24,8 @@ impl Device for ArmTimer {
impl InterruptHandler for ArmTimer {
fn do_irq(&self, _irq: u32) {
debugln!("T");
unsafe {
intrin::write_cntp_tval_el0(10000000);
intrin::write_cntp_tval_el0(100000);
}
}
}

@ -74,7 +74,7 @@ cfg_if! {
pub fn get_intc() -> &'static impl InterruptController {
&INTC
}
} else {
} else if #[cfg(feature = "mach_virt")] {
pub mod gic;
use gic::Gic;
use crate::arch::machine;

@ -37,7 +37,7 @@ impl InterruptController for Gic {
fn is_irq_pending(&self, irq: u32) -> bool {
unsafe {
mmio_read(self.gicd_base + Self::GICD_ICPENDR + ((irq >> 3) & !0x3) as usize)
mmio_read::<u32>(self.gicd_base + Self::GICD_ICPENDR + ((irq >> 3) & !0x3) as usize)
& (1 << (irq & 0x1F))
!= 0
}

@ -10,10 +10,9 @@ pub struct Pl011 {
impl InterruptHandler for Pl011 {
fn do_irq(&self, _irq: u32) {
let tmp = unsafe { mmio_read(self.base + Self::UARTRIS) };
let tmp: u32 = unsafe { mmio_read(self.base + Self::UARTRIS) };
if tmp & Self::UARTRIS_RXRIS != 0 {
let ch = unsafe { mmio_read(self.base + Self::UARTDR) } as u8;
debugln!("{}", ch as char);
let _ch = unsafe { mmio_read::<u32>(self.base + Self::UARTDR) } as u8;
unsafe {
mmio_write(self.base + Self::UARTICR, Self::UARTICR_RXIC);
}
@ -45,7 +44,7 @@ impl Device for Pl011 {
impl SerialDevice for Pl011 {
fn send(&self, ch: u8) {
unsafe {
while mmio_read(self.base + Self::UARTFR) & Self::UARTFR_BUSY != 0 {}
while mmio_read::<u32>(self.base + Self::UARTFR) & Self::UARTFR_BUSY != 0 {}
mmio_write(self.base + Self::UARTDR, ch as u32);
}
}

@ -26,7 +26,7 @@ impl Device for Pl031 {
}
unsafe fn enable(&self) {
let tmp = mmio_read(self.base + Self::RTCDR);
let tmp: u32 = mmio_read(self.base + Self::RTCDR);
mmio_write(self.base + Self::RTCMR, tmp + 1);
mmio_write(self.base + Self::RTCIMSC, 1);
@ -38,7 +38,7 @@ impl Device for Pl031 {
impl InterruptHandler for Pl031 {
fn do_irq(&self, _irq: u32) {
let time_int = unsafe { mmio_read(self.base + Self::RTCDR) };
let time_int: u32 = unsafe { mmio_read(self.base + Self::RTCDR) };
unsafe {
mmio_write(self.base + Self::RTCICR, 1);
mmio_write(self.base + Self::RTCMR, time_int + 1);

@ -67,7 +67,7 @@ extern "C" fn kernel_bsp_main(fdt_base: PhysicalAddress) -> ! {
}
debug!("BSP init finished\n");
//smp::wakeup_ap_cpus();
// smp::wakeup_ap_cpus();
entry_common();
}

@ -10,7 +10,7 @@ use crate::{
use address::VirtualAddress;
use core::alloc::{GlobalAlloc, Layout};
use core::convert::TryFrom;
use core::mem::{size_of, MaybeUninit};
use core::mem::size_of;
use core::ptr::null_mut;
use spin::Mutex;
@ -19,7 +19,7 @@ const HEAP_SIZE: usize = 16 << 20; // 16MiB
struct Heap {
start: VirtualAddress<KernelSpace>,
mutex: MaybeUninit<Mutex<()>>,
mutex: Option<Mutex<()>>,
}
#[derive(Debug)]
@ -37,7 +37,7 @@ unsafe impl GlobalAlloc for Heap {
let count = ((layout.size() + 15) & !15) as u32;
// NOTE: that shouldn't be optimized away
let _lock = self.mutex.assume_init_ref().lock();
let _lock = self.mutex.as_ref().unwrap().lock();
// Check if the memory is corrupted
let mut block_it = self.first();
@ -71,7 +71,7 @@ unsafe impl GlobalAlloc for Heap {
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// NOTE: that shouldn't be optimized away
let _lock = self.mutex.assume_init_ref().lock();
let _lock = self.mutex.as_ref().unwrap().lock();
let address = VirtualAddress::from_ptr(ptr);
// Check heap boundaries
@ -124,7 +124,7 @@ impl Heap {
block.next = null_mut();
Heap {
start: addr,
mutex: MaybeUninit::new(Mutex::new(())),
mutex: Some(Mutex::new(())),
}
}
@ -135,7 +135,7 @@ impl Heap {
#[allow(dead_code)]
fn dump(&self) {
// NOTE: that shouldn't be optimized away
let _lock = unsafe { self.mutex.assume_init_ref().lock() };
let _lock = self.mutex.as_ref().unwrap().lock();
let mut block_it = self.first();
while let Some(ref mut block) = block_it {
@ -187,7 +187,7 @@ impl HeapBlock {
#[global_allocator]
static mut KERNEL_HEAP: Heap = Heap {
start: VirtualAddress::null(),
mutex: MaybeUninit::uninit(),
mutex: None,
};
#[alloc_error_handler]

@ -1,34 +0,0 @@
.include "kernel/src/arch/macros.S"
.section .text
.global context_switch_to
.global context_switch
context_enter_kernel:
mov x0, #5
msr spsr_el1, x0
ldp x0, x1, [sp]
msr elr_el1, x1
eret
context_switch:
msr daifset, #0xF
// Store old callee-saved regs on stack
__callee_save_ctx
// Store old stack pointer
mov x19, sp
str x19, [x1]
context_switch_to:
msr daifset, #0xF
// Load new stack
ldr x0, [x0]
mov sp, x0
// Load new callee-saved regs on stack
__callee_restore_ctx
// Simulate/perform a return
ret

@ -1,72 +0,0 @@
use crate::{
mem::phys::{self, PageUsage},
KernelSpace,
};
use address::VirtualAddress;
use core::mem::size_of;
global_asm!(include_str!("context.S"));
#[repr(C)]
pub(super) struct Context {
pub kernel_sp: VirtualAddress<KernelSpace>, // 0x00
cpu_id: u32, // 0x08
}
struct StackBuilder {
bp: VirtualAddress<KernelSpace>,
sp: VirtualAddress<KernelSpace>,
}
impl Context {
pub fn new_kernel(entry: usize, arg: usize) -> Context {
let kstack_phys = phys::alloc_contiguous_pages(PageUsage::Kernel, 4).unwrap();
let mut stack = unsafe { StackBuilder::new(kstack_phys.into(), 4096 * 4) };
debug!("Stack bounds: {:?}..{:?}\n", stack.sp, stack.bp);
stack.push(entry); // ELR before ERET
stack.push(arg);
stack.push(context_enter_kernel as usize); // x30 LR
stack.push(0usize); // padding
stack.push(0usize); // x29
stack.push(0usize); // x27
stack.push(0usize); // x26
stack.push(0usize); // x25
stack.push(0usize); // x24
stack.push(0usize); // x23
stack.push(0usize); // x22
stack.push(0usize); // x21
stack.push(0usize); // x20
stack.push(0usize); // x19
Context {
kernel_sp: stack.sp,
cpu_id: u32::MAX,
}
}
}
impl StackBuilder {
pub unsafe fn new(bp: VirtualAddress<KernelSpace>, size: usize) -> Self {
Self { bp, sp: bp + size }
}
pub fn push<A: Into<usize>>(&mut self, value: A) {
if self.sp == self.bp {
panic!("Stack overflow");
}
self.sp -= size_of::<usize>();
unsafe {
core::ptr::write(self.sp.as_mut_ptr(), value.into());
}
}
}
extern "C" {
pub(super) fn context_switch_to(dst: *mut Context);
pub(super) fn context_switch(dst: *mut Context, src: *mut Context);
fn context_enter_kernel();
}

@ -1,265 +0,0 @@
use crate::arch::{
cpu::{self, get_cpu},
intrin,
};
use core::mem::MaybeUninit;
use core::ptr::null_mut;
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use spin::Mutex;
pub mod context;
use context::{context_switch, context_switch_to, Context};
pub struct Process {
context: Context,
cpu: Option<u32>,
sched_prev: *mut Process,
sched_next: *mut Process,
}
pub struct Queue {
head: *mut Process,
current: *mut Process,
size: usize,
}
pub struct Scheduler {
queue: Mutex<Queue>,
ready: AtomicBool,
idle: MaybeUninit<Process>,
}
impl Process {
fn new_kernel(entry: usize, arg: usize) -> Self {
Self {
context: Context::new_kernel(entry, arg),
cpu: None,
sched_prev: null_mut(),
sched_next: null_mut(),
}
}
pub fn exit(&mut self) -> ! {
unsafe {
get_cpu().scheduler.unqueue(self);
}
panic!("This code should not run");
}
pub fn this() -> &'static mut Process {
// TODO Process can be rescheduled to some other CPU after/during this call
unsafe { &mut *get_cpu().scheduler.queue.lock().current }
}
}
impl Scheduler {
pub fn new() -> Self {
Self {
queue: Mutex::new(Queue {
head: null_mut(),
current: null_mut(),
size: 0,
}),
ready: AtomicBool::new(false),
idle: MaybeUninit::uninit(),
}
}
unsafe fn queue(&mut self, proc: *mut Process) {
let mut lock = self.queue.lock();
if !lock.head.is_null() {
let queue_tail = (*lock.head).sched_prev;
(*queue_tail).sched_next = proc;
(*proc).sched_prev = queue_tail;
(*lock.head).sched_prev = proc;
(*proc).sched_next = lock.head;
} else {
(*proc).sched_prev = proc;
(*proc).sched_next = proc;
lock.head = proc;
}
lock.size += 1;
}
unsafe fn unqueue(&mut self, proc: *mut Process) {
intrin::disable_irq();
let mut lock = self.queue.lock();
assert!((*proc).cpu.unwrap() == get_cpu().cpu_id);
// Can only unqueue current task
let sched_prev = (*proc).sched_prev;
let sched_next = (*proc).sched_next;
(*proc).sched_next = null_mut();
(*proc).sched_prev = null_mut();
if sched_next == proc {
lock.head = null_mut();
drop(lock);
let ptr = self.idle.as_mut_ptr();
self.switch_to(ptr);
panic!("This code should not run (yet)");
}
if proc == lock.head {
lock.head = sched_next;
}
(*sched_next).sched_prev = sched_prev;
(*sched_prev).sched_next = sched_next;
drop(lock);
self.switch_to(sched_next);
}
unsafe fn switch_to(&mut self, proc: *mut Process) {
intrin::disable_irq();
let mut lock = self.queue.lock();
let from = lock.current;
lock.current = proc;
(*proc).cpu = Some(get_cpu().cpu_id);
if from.is_null() {
drop(lock);
context_switch_to(&mut (*proc).context);
} else {
drop(lock);
context_switch(&mut (*proc).context, &mut (*from).context);
}
}
unsafe fn enter(&mut self) -> ! {
let lock = self.queue.lock();
self.ready.store(true, Ordering::Release);
let proc = if let Some(first) = lock.head.as_mut() {
first
} else {
self.idle.as_mut_ptr()
};
drop(lock);
self.switch_to(proc);
panic!("This code should not run");
}
unsafe fn init_idle(&mut self) {
self.idle.write(Process::new_kernel(idle_fn as usize, 0));
}
unsafe fn sched(&mut self) {
let mut lock = self.queue.lock();
let from = lock.current;
assert!(!from.is_null());
let from = &mut *from;
let to = if !from.sched_next.is_null() {
from.sched_next
} else if !lock.head.is_null() {
lock.head
} else {
self.idle.as_mut_ptr()
};
assert!(!to.is_null());
drop(lock);
self.switch_to(to);
}
}
fn idle_fn(_arg: usize) {
loop {
unsafe {
intrin::enable_irq();
intrin::wfi();
}
}
}
pub fn sched_yield() {
let cpu = get_cpu();
if cpu.scheduler.ready.load(Ordering::Acquire) {
unsafe {
cpu.scheduler.sched();
}
}
}
pub fn sched_queue(proc: *mut Process) {
let mut min_idx = 0;
let mut min_val = usize::MAX;
for index in 0..cpu::CPU_COUNT.load(Ordering::Relaxed) {
let lock = unsafe { cpu::CPUS[index].assume_init_ref().scheduler.queue.lock() };
if lock.size < min_val {
min_idx = index;
min_val = lock.size;
}
}
debugln!("Queue to cpu{}", min_idx);
unsafe {
cpu::CPUS[min_idx].assume_init_mut().scheduler.queue(proc);
};
}
#[inline(never)]
fn make_delay(d: usize) {
for _ in 0..d {
crate::arch::intrin::nop();
}
}
extern "C" fn f0(arg: usize) {
for _ in 0..600 + arg * 600 {
make_delay(10000);
unsafe {
COUNTERS[arg]
.assume_init_mut()
.fetch_add(1, Ordering::Relaxed);
}
}
debugln!("Exit task #{}", arg);
Process::this().exit();
}
pub static mut COUNTERS: [MaybeUninit<AtomicUsize>; cpu::MAX_CPU * 2] = MaybeUninit::uninit_array();
static TASK_COUNT: AtomicUsize = AtomicUsize::new(0);
static mut C: [MaybeUninit<Process>; cpu::MAX_CPU * 2] = MaybeUninit::uninit_array();
pub fn spawn_task() {
let c = TASK_COUNT.load(Ordering::Acquire);
if c >= unsafe { COUNTERS.len() } {
return;
}
TASK_COUNT.fetch_add(1, Ordering::Release);
unsafe {
debugln!("Start task #{}", c);
COUNTERS[c].write(AtomicUsize::new(0));
C[c].write(Process::new_kernel(f0 as usize, c));
sched_queue(C[c].assume_init_mut());
}
}
pub fn enter() -> ! {
unsafe {
let cpu = get_cpu();
debug!("Setting up a task for cpu{}\n", cpu.cpu_id);
let id = cpu.cpu_id as usize;
// Initialize the idle task
cpu.scheduler.init_idle();
debug!("Entering scheduler on cpu{}\n", cpu.cpu_id);
cpu.scheduler.enter();
}
}

@ -13,7 +13,7 @@ fi
ARCH=aarch64-unknown-none-${MACH}
KERNEL=target/${ARCH}/debug/kernel
QEMU_OPTS="-chardev stdio,nowait,id=char0,mux=on \
QEMU_OPTS="-chardev stdio,wait=off,id=char0,mux=on \
-mon chardev=char0"
if [ "$QEMU_DINT" = 1 ]; then