feat: proof-of-concept ELF loading

This commit is contained in:
Mark Poliakov 2021-10-18 19:15:42 +03:00
parent 1ef337a306
commit 1970e24808
15 changed files with 506 additions and 34 deletions

4
Cargo.lock generated
View File

@ -61,6 +61,10 @@ dependencies = [
"unsafe_unwrap",
]
[[package]]
name = "init"
version = "0.1.0"
[[package]]
name = "kernel"
version = "0.1.0"

View File

@ -10,5 +10,6 @@ edition = "2018"
[workspace]
members = [
"kernel",
"init",
"error"
]

View File

@ -5,6 +5,8 @@ endif
GDB?=gdb-multiarch
LLVM_BASE=$(shell llvm-config --bindir)
CLANG=clang-14
LDLLD=ld.lld-12
OBJCOPY=$(LLVM_BASE)/llvm-objcopy
MKIMAGE?=mkimage
@ -26,6 +28,7 @@ $(error TODO)
else
ifeq ($(MACH),qemu)
QEMU_OPTS+=-kernel $(O)/kernel.bin \
-initrd $(O)/initrd.img \
-M virt,virtualization=on \
-cpu cortex-a72 \
-m 512 \
@ -55,6 +58,8 @@ all: kernel
kernel:
cd kernel && cargo build $(CARGO_BUILD_OPTS)
cd init && cargo build --target=../etc/$(ARCH)-osdev5.json -Z build-std=core
cp target/$(ARCH)-osdev5/debug/init $(O)/initrd.img
ifeq ($(ARCH),aarch64)
$(LLVM_BASE)/llvm-strip -o $(O)/kernel.strip $(O)/kernel
$(LLVM_BASE)/llvm-size $(O)/kernel.strip

17
etc/aarch64-osdev5.json Normal file
View File

@ -0,0 +1,17 @@
{
"arch": "aarch64",
"data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128",
"disable-redzone": true,
"executables": true,
"features": "+strict-align,+neon,+fp-armv8",
"linker": "rust-lld",
"linker-flavor": "ld.lld",
"llvm-target": "aarch64-unknown-none",
"max-atomic-width": 128,
"panic-strategy": "abort",
"relocation-model": "static",
"target-pointer-width": "64",
"pre-link-args": {
"ld.lld": [ "-Tetc/aarch64-osdev5.ld" ]
}
}

31
etc/aarch64-osdev5.ld Normal file
View File

@ -0,0 +1,31 @@
ENTRY(_start);
PHDRS {
text PT_LOAD ;
rodata PT_LOAD ;
data PT_LOAD ;
}
SECTIONS {
. = 0x400000;
.text : {
*(.text._start)
*(.text*)
} :text
. = ALIGN(0x1000);
.rodata : {
*(.rodata*)
} :rodata
. = ALIGN(0x1000);
.data : {
*(.data*)
} :data
.bss : {
*(COMMON)
*(.bss*)
} :data
}

8
init/Cargo.toml Normal file
View File

@ -0,0 +1,8 @@
[package]
name = "init"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

38
init/src/main.rs Normal file
View File

@ -0,0 +1,38 @@
#![feature(asm)]
#![no_std]
#![no_main]
use core::panic::PanicInfo;
static RODATA: [u8; 4] = [1, 2, 3, 4];
static mut WDATA: [u8; 4] = [1, 2, 3, 4];
static mut WBSS: [u8; 16] = [0; 16];
#[link_section = ".text._start"]
#[no_mangle]
extern "C" fn _start(_arg: usize) -> ! {
let mut c0;
unsafe {
let d: &mut [u8; 4] = &mut *(&WBSS as *const _ as *mut _);
d[0] = 2;
}
c0 = unsafe { &mut WDATA as *mut _ as usize };
c0 = unsafe { &mut WBSS as *mut _ as usize };
let mut c1 = 1u64;
loop {
unsafe {
asm!("svc #0", inout("x0") c0, in("x1") c1);
}
for _ in 0..1000000 {
unsafe { asm!("nop"); }
}
}
}
#[panic_handler]
fn panic_handler(_pi: &PanicInfo) -> ! {
loop {}
}

View File

@ -2,6 +2,7 @@
use crate::arch::{aarch64::reg::CPACR_EL1, machine};
use crate::dev::{fdt::DeviceTree, irq::IntSource, Device};
use crate::debug::Level;
use crate::mem::{
self, heap,
phys::{self, PageUsage},
@ -51,14 +52,18 @@ extern "C" fn __aa64_bsp_main(fdt_base: usize) -> ! {
machine::init_board().unwrap();
let initrd;
if fdt_base != 0 {
let fdt = DeviceTree::from_phys(fdt_base + 0xFFFFFF8000000000);
if let Ok(_fdt) = fdt {
if let Ok(fdt) = fdt {
// fdt.dump(Level::Debug);
initrd = fdt.initrd();
} else {
initrd = None;
errorln!("Failed to init FDT");
}
} else {
initrd = None;
warnln!("No FDT present");
}
@ -68,7 +73,7 @@ extern "C" fn __aa64_bsp_main(fdt_base: usize) -> ! {
machine::local_timer().enable().unwrap();
machine::local_timer().init_irqs().unwrap();
proc::enter();
proc::enter(initrd);
}
}

View File

@ -10,7 +10,14 @@ __aa64_ctx_enter_kernel:
msr sp_el0, x0
msr ttbr0_el1, x1
tst x0, x0
mov x0, #5
bne 1f
b 2f
1:
mov x0, #4
2:
msr spsr_el1, x0
ldp x0, x1, [sp, #16]
msr elr_el1, x1

View File

@ -64,7 +64,7 @@ fn dump_data_abort(level: Level, esr: u64, far: u64) {
} else {
print!(level, " at UNKNOWN");
}
print!(level, "");
println!(level, "");
}
#[no_mangle]
@ -80,6 +80,7 @@ extern "C" fn __aa64_exc_sync_handler(exc: &mut ExceptionFrame) {
}
EC_SVC_AA64 => {
infoln!("{:#x} {:#x}", exc.x[0], exc.x[1]);
exc.x[0] += 1;
return;
}
_ => {}

View File

@ -1,9 +1,10 @@
use crate::debug::Level;
use crate::util;
use error::Errno;
use fdt_rs::prelude::*;
use crate::debug::Level;
use fdt_rs::{
base::DevTree,
index::{DevTreeIndex, DevTreeIndexNode},
index::{DevTreeIndex, DevTreeIndexNode, DevTreeIndexProp},
};
#[repr(align(16))]
@ -14,6 +15,7 @@ struct Wrap {
static mut INDEX_BUFFER: Wrap = Wrap { data: [0; 65536] };
type INode<'a> = DevTreeIndexNode<'a, 'a, 'a>;
type IProp<'a> = DevTreeIndexProp<'a, 'a, 'a>;
#[allow(dead_code)]
pub struct DeviceTree {
@ -71,11 +73,52 @@ fn dump_node(level: Level, node: &INode, depth: usize) {
println!(level, "}}");
}
fn find_node<'a>(at: INode<'a>, path: &str) -> Option<INode<'a>> {
let (item, path) = util::path_component_left(path);
if item == "" {
assert_eq!(path, "");
Some(at)
} else {
let child = at.children().find(|c| c.name().unwrap() == item)?;
if path == "" {
Some(child)
} else {
find_node(child, path)
}
}
}
fn find_prop<'a>(at: INode<'a>, name: &str) -> Option<IProp<'a>> {
at.props().find(|p| p.name().unwrap() == name)
}
fn read_cells(prop: &IProp, off: usize, cells: u32) -> Option<u64> {
Some(match cells {
1 => prop.u32(off).ok()? as u64,
2 => (prop.u32(off).ok()? as u64) | ((prop.u32(off + 1).ok()? as u64) << 32),
_ => todo!(),
})
}
impl DeviceTree {
pub fn dump(&self, level: Level) {
dump_node(level, &self.index.root(), 0);
}
pub fn node_by_path(&self, path: &str) -> Option<INode> {
find_node(self.index.root(), path.trim_start_matches('/'))
}
pub fn initrd(&self) -> Option<(usize, usize)> {
let chosen = self.node_by_path("/chosen")?;
let initrd_start = find_prop(chosen.clone(), "linux,initrd-start")?
.u32(0)
.ok()?;
let initrd_end = find_prop(chosen, "linux,initrd-end")?.u32(0).ok()?;
Some((initrd_start as usize, initrd_end as usize))
}
pub fn from_phys(base: usize) -> Result<DeviceTree, Errno> {
// TODO virtualize address
let tree = unsafe { DevTree::from_raw_pointer(base as *const _) }

View File

@ -42,6 +42,10 @@ bitflags! {
const UXN = 1 << 54;
/// PXN bit -- if set, page may not be used for instruction fetching from EL1
const PXN = 1 << 53;
// AP field
/// If set, the page referred to by this entry is read-only for both EL0/EL1
const AP_BOTH_READONLY = 3 << 6;
}
}

226
kernel/src/proc/elf.rs Normal file
View File

@ -0,0 +1,226 @@
//!
use crate::mem::{
self,
phys::{self, PageUsage},
virt::{MapAttributes, Space},
};
use error::Errno;
trait Elf {
type Addr;
type Half;
type SHalf;
type Off;
type Sword;
type Word;
type Xword;
type Sxword;
}
struct Elf64;
impl Elf for Elf64 {
type Addr = u64;
type Half = u16;
type SHalf = i16;
type Off = u64;
type Sword = i32;
type Word = u32;
type Xword = u64;
type Sxword = i64;
}
#[repr(C)]
struct Ehdr<E: Elf> {
ident: [u8; 16],
typ: E::Half,
machine: E::Half,
version: E::Word,
entry: E::Addr,
phoff: E::Off,
shoff: E::Off,
flags: E::Word,
ehsize: E::Half,
phentsize: E::Half,
phnum: E::Half,
shentsize: E::Half,
shnum: E::Half,
shstrndx: E::Half,
}
#[repr(C)]
struct Phdr<E: Elf> {
typ: E::Word,
flags: E::Word,
offset: E::Off,
vaddr: E::Addr,
paddr: E::Addr,
filesz: E::Xword,
memsz: E::Xword,
align: E::Xword,
}
unsafe fn load_bytes(
space: &mut Space,
dst_virt: usize,
src: *const u8,
size: usize,
flags: usize,
) -> Result<(), Errno> {
let mut off = 0usize;
let mut rem = size;
// TODO unaligned loads
assert!(dst_virt & 0xFFF == 0);
while rem != 0 {
let page_idx = off / mem::PAGE_SIZE;
let page_off = off % mem::PAGE_SIZE;
let count = core::cmp::min(rem, mem::PAGE_SIZE - page_off);
let page = phys::alloc_page(PageUsage::Kernel)?;
let mut dst_flags = MapAttributes::NOT_GLOBAL | MapAttributes::SH_OUTER;
if flags & (1 << 0) /* PF_X */ == 0 {
dst_flags |= MapAttributes::UXN | MapAttributes::PXN;
}
match (flags & (3 << 1)) >> 1 {
// No access: not sure if such mapping should exist at all
0 => todo!(),
// Write-only: not sure if such mapping should exist at all
1 => todo!(),
// Read-only
2 => dst_flags |= MapAttributes::AP_BOTH_READONLY,
// Read+Write
3 => {}
_ => unreachable!(),
};
debugln!(
"Mapping {:#x} {:?}",
dst_virt + page_idx * mem::PAGE_SIZE,
dst_flags
);
space.map(dst_virt + page_idx * mem::PAGE_SIZE, page, dst_flags)?;
let dst =
core::slice::from_raw_parts_mut(mem::virtualize(page + page_off) as *mut u8, count);
let src = core::slice::from_raw_parts(src.add(off), count);
dst.copy_from_slice(src);
rem -= count;
off += count;
}
Ok(())
}
unsafe fn zero_bytes(
space: &mut Space,
dst_virt: usize,
size: usize,
flags: usize,
) -> Result<(), Errno> {
let mut off = 0usize;
let mut rem = size;
while rem != 0 {
let page_idx = (dst_virt + off - (dst_virt & !0xFFF)) / mem::PAGE_SIZE;
let page_off = (dst_virt + off) % mem::PAGE_SIZE;
let count = core::cmp::min(rem, mem::PAGE_SIZE - page_off);
let page = phys::alloc_page(PageUsage::Kernel)?;
let mut dst_flags = MapAttributes::NOT_GLOBAL | MapAttributes::SH_OUTER;
if flags & (1 << 0) /* PF_X */ == 0 {
dst_flags |= MapAttributes::UXN | MapAttributes::PXN;
}
match (flags & (3 << 1)) >> 1 {
// No access: not sure if such mapping should exist at all
0 => todo!(),
// Write-only: not sure if such mapping should exist at all
1 => todo!(),
// Read-only
2 => dst_flags |= MapAttributes::AP_BOTH_READONLY,
// Read+Write
3 => {}
_ => unreachable!(),
};
debugln!(
"Mapping {:#x} {:?}",
dst_virt + page_idx * mem::PAGE_SIZE,
dst_flags
);
if let Err(e) = space.map(dst_virt + page_idx * mem::PAGE_SIZE, page, dst_flags) {
if e != Errno::AlreadyExists {
return Err(e);
}
}
let dst =
core::slice::from_raw_parts_mut(mem::virtualize(page + page_off) as *mut u8, count);
dst.fill(0);
rem -= count;
off += count;
}
Ok(())
}
///
pub fn load_elf(space: &mut Space, elf_base: *const u8) -> Result<usize, Errno> {
let ehdr: &Ehdr<Elf64> = unsafe { &*(elf_base as *const _) };
if &ehdr.ident[0..4] != b"\x7FELF" {
return Err(Errno::InvalidArgument);
}
for i in 0..(ehdr.phnum as usize) {
let phdr: &Phdr<Elf64> = unsafe {
&*(elf_base.add(ehdr.phoff as usize + ehdr.phentsize as usize * i) as *const _)
};
if phdr.typ == 1
/* PT_LOAD */
{
debugln!(
"Load region {:#x}..{:#x}..{:#x}",
phdr.vaddr,
phdr.vaddr + phdr.filesz,
phdr.vaddr + phdr.memsz
);
if phdr.filesz > 0 {
unsafe {
load_bytes(
space,
phdr.vaddr as usize,
elf_base.add(phdr.offset as usize),
phdr.filesz as usize,
phdr.flags as usize,
)?;
}
}
if phdr.memsz > phdr.filesz {
let len = (phdr.memsz - phdr.filesz) as usize;
unsafe {
zero_bytes(
space,
phdr.vaddr as usize + phdr.filesz as usize,
len,
phdr.flags as usize,
)?;
}
}
}
}
Ok(ehdr.entry as usize)
}

View File

@ -11,9 +11,13 @@ use alloc::collections::{BTreeMap, VecDeque};
use alloc::rc::Rc;
use core::cell::UnsafeCell;
use core::sync::atomic::{AtomicU32, Ordering};
use alloc::boxed::Box;
use error::Errno;
pub use crate::arch::platform::context::{self, Context};
pub mod elf;
/// Wrapper type for a process struct reference
pub type ProcessRef = Rc<UnsafeCell<Process>>;
@ -39,24 +43,28 @@ pub struct Scheduler {
inner: InitOnce<IrqSafeNullLock<SchedulerInner>>,
}
static LAST_PID: AtomicU32 = AtomicU32::new(0);
impl SchedulerInner {
fn new_kernel(&mut self, entry: usize, arg: usize) -> u32 {
static LAST_PID: AtomicU32 = AtomicU32::new(0);
const USTACK_PAGE_COUNT: usize = 8;
const USTACK_VIRT_TOP: usize = 0x100000000;
const USTACK_VIRT_BASE: usize = USTACK_VIRT_TOP - USTACK_PAGE_COUNT * mem::PAGE_SIZE;
const USTACK_VIRT_TOP: usize = 0x100000000;
fn new_kernel<F: FnOnce(&mut Space) -> Result<usize, Errno>>(
&mut self,
loader: F,
ustack_pages: usize,
arg: usize,
) -> u32 {
let id = LAST_PID.fetch_add(1, Ordering::Relaxed);
if id == 256 {
panic!("Ran out of ASIDs (TODO FIXME)");
}
let space = Space::alloc_empty().unwrap();
for i in 0..USTACK_PAGE_COUNT {
let ustack_virt_bottom = Self::USTACK_VIRT_TOP - ustack_pages * mem::PAGE_SIZE;
for i in 0..ustack_pages {
let page = phys::alloc_page(PageUsage::Kernel).unwrap();
space
.map(
USTACK_VIRT_BASE + i * mem::PAGE_SIZE,
ustack_virt_bottom + i * mem::PAGE_SIZE,
page,
MapAttributes::SH_OUTER
| MapAttributes::NOT_GLOBAL
@ -66,12 +74,18 @@ impl SchedulerInner {
.unwrap();
}
let entry = loader(space).unwrap();
let proc = Process {
ctx: Context::kernel(
entry,
arg,
((space as *mut _ as usize) - mem::KERNEL_OFFSET) | ((id as usize) << 48),
USTACK_VIRT_TOP,
if ustack_pages != 0 {
Self::USTACK_VIRT_TOP
} else {
0
},
),
space,
id,
@ -86,6 +100,10 @@ impl SchedulerInner {
id
}
fn new_idle(&mut self) -> u32 {
self.new_kernel(|_| Ok(idle_fn as usize), 0, 0)
}
fn new() -> Self {
let mut this = Self {
processes: BTreeMap::new(),
@ -94,7 +112,7 @@ impl SchedulerInner {
current: None,
};
this.idle = this.new_kernel(idle_fn as usize, 0);
this.idle = this.new_idle();
this
}
@ -104,8 +122,16 @@ impl Scheduler {
/// Constructs a new kernel-space process with `entry` and `arg`.
/// Returns resulting process ID
// TODO see the first TODO here
pub fn new_kernel(&self, entry: usize, arg: usize) -> u32 {
self.inner.get().lock().new_kernel(entry, arg)
pub fn new_kernel<F: FnOnce(&mut Space) -> Result<usize, Errno>>(
&self,
loader: F,
ustack_pages: usize,
arg: usize,
) -> u32 {
self.inner
.get()
.lock()
.new_kernel(loader, ustack_pages, arg)
}
/// Initializes inner data structure:
@ -170,6 +196,61 @@ impl Scheduler {
}
}
}
///
pub fn current_process(&self) -> ProcessRef {
let inner = self.inner.get().lock();
let current = inner.current.unwrap();
inner.processes.get(&current).unwrap().clone()
}
}
impl Process {
///
pub fn execve<F: FnOnce(&mut Space) -> Result<usize, Errno>>(
&mut self,
loader: F,
arg: usize,
) -> Result<(), Errno> {
unsafe {
// Run with interrupts disabled
asm!("msr daifset, #2");
}
let ustack_pages = 4;
let new_space = Space::alloc_empty()?;
let new_space_phys = ((new_space as *mut _ as usize) - mem::KERNEL_OFFSET); // | ((id as usize) << 48),
let ustack_virt_bottom = SchedulerInner::USTACK_VIRT_TOP - ustack_pages * mem::PAGE_SIZE;
for i in 0..ustack_pages {
let page = phys::alloc_page(PageUsage::Kernel).unwrap();
new_space
.map(
ustack_virt_bottom + i * mem::PAGE_SIZE,
page,
MapAttributes::SH_OUTER
| MapAttributes::NOT_GLOBAL
| MapAttributes::UXN
| MapAttributes::PXN,
)
.unwrap();
}
let entry = loader(new_space)?;
self.ctx = Context::kernel(
entry,
0,
new_space_phys | ((self.id as usize) << 48),
SchedulerInner::USTACK_VIRT_TOP,
);
self.space = new_space;
unsafe {
self.ctx.enter();
}
panic!("This should not run");
}
}
extern "C" fn idle_fn(_a: usize) -> ! {
@ -177,22 +258,13 @@ extern "C" fn idle_fn(_a: usize) -> ! {
}
#[inline(never)]
extern "C" fn f1(u: usize) {
let mut x = u;
while x != 0 {
cortex_a::asm::nop();
x -= 1;
}
}
extern "C" fn init_fn(initrd_ptr: usize) -> ! {
debugln!("Running kernel init process");
#[inline(never)]
extern "C" fn f0(a: usize) -> ! {
loop {
unsafe {
asm!("svc #0", in("x0") a, in("x1") &a);
}
f1(10000000);
}
let (start, _end) = unsafe { *(initrd_ptr as *const (usize, usize)) };
let proc = unsafe { &mut *SCHED.current_process().get() };
proc.execve(|space| elf::load_elf(space, start as *const u8), 0).unwrap();
loop {}
}
/// Performs a task switch.
@ -213,10 +285,11 @@ static SCHED: Scheduler = Scheduler {
/// # Safety
///
/// Unsafe: May only be called once.
pub unsafe fn enter() -> ! {
pub unsafe fn enter(initrd: Option<(usize, usize)>) -> ! {
SCHED.init();
for i in 0..4 {
SCHED.enqueue(SCHED.new_kernel(f0 as usize, i));
if let Some((start, end)) = initrd {
let initrd = Box::into_raw(Box::new((mem::virtualize(start), mem::virtualize(end))));
SCHED.enqueue(SCHED.new_kernel(|_| Ok(init_fn as usize), 0, initrd as usize));
}
SCHED.enter();
}

View File

@ -51,3 +51,12 @@ impl<T> InitOnce<T> {
}
unsafe impl<T> Sync for InitOnce<T> {}
///
pub fn path_component_left(path: &str) -> (&str, &str) {
if let Some((left, right)) = path.split_once('/') {
(left, right.trim_start_matches('/'))
} else {
(path, "")
}
}