Move files a bit, add sys_exit

This commit is contained in:
Mark 2020-01-30 15:43:41 +02:00
parent df4eeefc8f
commit e718112e72
9 changed files with 315 additions and 302 deletions

View File

@ -35,6 +35,7 @@ OBJS+=$(O)/sys/debug.o \
$(O)/sys/config.o \
$(O)/sys/ctype.o \
$(O)/sys/sched.o \
$(O)/sys/thread.o \
$(O)/sys/time.o
ifeq ($(DEBUG_COUNTERS),1)

View File

@ -0,0 +1,18 @@
#pragma once
#define THREAD_RSP0 0x00
#define THREAD_RSP0_TOP 0x08
#define THREAD_CR3 0x10
#if !defined(__ASM__)
#include "sys/types.h"
struct thread_data {
uintptr_t rsp0; // 0x00
uintptr_t rsp0_top; // 0x08
uintptr_t cr3; // 0x10
uintptr_t rsp0_base, rsp0_size;
uintptr_t rsp3_base, rsp3_size;
};
#endif

View File

@ -1,4 +1,17 @@
#pragma once
struct thread;
// Enter a newly-created task
extern void context_enter(struct thread *thr);
// Enter a task from exec
extern void context_exec_enter(void *arg, struct thread *thr, uintptr_t stack3, uintptr_t entry);
// Stores current task context, loads new one's
extern void context_switch_to(struct thread *thr, struct thread *from);
// No current task, only load the first task to begin execution
extern void context_switch_first(struct thread *thr);
void sched_queue(struct thread *thr);
void sched_unqueue(struct thread *thr);
void sched_init(void);
void sched_enter(void);

View File

@ -1,23 +1,15 @@
#pragma once
#include "sys/amd64/asm/asm_thread.h"
struct thread {
// Platform data
struct thread_data {
uintptr_t rsp0; // 0x00
uintptr_t syscall_rsp; // 0x08
struct thread_data data;
uintptr_t rsp0_top; // 0x10
uintptr_t cr3; // 0x18
uintptr_t syscall_rip; // 0x20
uintptr_t rsp0_base;
size_t rsp0_size;
uintptr_t rsp3_base;
size_t rsp3_size;
} data;
int pid;
pid_t pid;
struct thread *prev, *next;
};
pid_t thread_alloc_pid(int is_user);
int thread_init(struct thread *thr, uintptr_t entry, void *arg, int user);

View File

@ -1,4 +1,6 @@
#include "sys/amd64/asm/asm_cpu.h"
#include "sys/amd64/asm/asm_thread.h"
.section .text
.global context_enter
context_enter:
@ -20,7 +22,7 @@ context_exec_enter:
// %rsi - thread
// %rdx - stack
// %rcx - entry
movq (%rsi), %rsp
movq THREAD_RSP0(%rsi), %rsp
pushq $0x1B
pushq %rdx
@ -68,7 +70,7 @@ context_switch_first:
// TODO: switch cr3 here
// Load new %rsp
movq (%rdi), %rsp
movq THREAD_RSP0(%rdi), %rsp
popq %rbx
popq %rbp
@ -79,14 +81,14 @@ context_switch_first:
// Load TSS.RSP0 for user -> kernel transition
// %rax = top of task's stack
movq 0x10(%rdi), %rax
movq THREAD_RSP0_TOP(%rdi), %rax
// %rcx = &tss
movq get_cpu(0x10), %rcx
// &tss->rsp0 = %rax
movq %rax, 4(%rcx)
// Load new %cr3 if changed
movq 0x18(%rdi), %rax
movq THREAD_CR3(%rdi), %rax
movq %cr3, %rcx
test %rcx, %rax
jz 1f

View File

@ -6,11 +6,12 @@
#define MSR_IA32_SFMASK 0xC0000084
extern void syscall_entry(void);
extern int sys_exec(void *(*)(void *), void *arg);
extern __attribute__((noreturn)) void sys_exit(int status);
void *syscall_table[256] = {
NULL,
[124] = sys_exec,
[60] = sys_exit,
};
void syscall_undefined(uint64_t rax) {

View File

@ -1,3 +1,4 @@
#include "sys/amd64/asm/asm_thread.h"
.section .bss
tmp0:
.quad 0
@ -12,7 +13,7 @@ syscall_entry:
// Switch to kernel stack
movq %rsp, tmp0(%rip)
movq thread_current(%rip), %rsp
movq 0(%rsp), %rsp
movq THREAD_RSP0(%rsp), %rsp
cmpq $123, %rax
jz _syscall_fork

View File

@ -1,23 +1,12 @@
#include "sys/amd64/mm/phys.h"
#include "sys/amd64/mm/pool.h"
#include "sys/amd64/hw/irq.h"
#include "sys/amd64/hw/idt.h"
#include "sys/amd64/cpu.h"
#include "sys/vmalloc.h"
#include "sys/assert.h"
#include "sys/thread.h"
#include "sys/sched.h"
#include "sys/debug.h"
#include "sys/mm.h"
// Enter a newly-created task
extern void context_enter(struct thread *thr);
// Enter a task from exec
extern void context_exec_enter(void *arg, struct thread *thr, uintptr_t stack3, uintptr_t entry);
// Stores current task context, loads new one's
extern void context_switch_to(struct thread *thr, struct thread *from);
// No current task, only load the first task to begin execution
extern void context_switch_first(struct thread *thr);
void yield(void);
//// Thread queueing
@ -71,115 +60,6 @@ void sched_unqueue(struct thread *thr) {
////
static void init_thread(struct thread *thr, void *(*entry)(void *), void *arg, int user) {
uintptr_t stack_pages = amd64_phys_alloc_page();
_assert(stack_pages != MM_NADDR);
thr->data.rsp0_base = MM_VIRTUALIZE(stack_pages);
thr->data.rsp0_size = MM_PAGE_SIZE;
thr->data.rsp0_top = thr->data.rsp0_base + thr->data.rsp0_size;
uint64_t *stack = (uint64_t *) (thr->data.rsp0_base + thr->data.rsp0_size);
if (user) {
mm_space_t space = amd64_mm_pool_alloc();
mm_space_clone(space, mm_kernel, MM_CLONE_FLG_KERNEL);
thr->data.cr3 = MM_PHYS(space);
uintptr_t ustack_base = vmalloc(space, 0x1000000, 0xF0000000, 4, MM_PAGE_WRITE | MM_PAGE_USER);
thr->data.rsp3_base = ustack_base;
thr->data.rsp3_size = MM_PAGE_SIZE * 4;
// Allow this thread to access upper pages for testing
space[AMD64_MM_STRIPSX(KERNEL_VIRT_BASE) >> 39] |= MM_PAGE_USER;
uint64_t *pdpt = (uint64_t *) MM_VIRTUALIZE(space[AMD64_MM_STRIPSX(KERNEL_VIRT_BASE) >> 39] & ~0xFFF);
for (uint64_t i = 0; i < 4; ++i) {
pdpt[((AMD64_MM_STRIPSX(KERNEL_VIRT_BASE) >> 30) + i) & 0x1FF] |= MM_PAGE_USER;
}
} else {
thr->data.cr3 = MM_PHYS(mm_kernel);
}
// Initial thread context
// Entry context
if (user) {
// ss
*--stack = 0x1B;
// rsp
*--stack = thr->data.rsp3_base + thr->data.rsp3_size;
// rflags
*--stack = 0x200;
// cs
*--stack = 0x23;
// rip
*--stack = (uintptr_t) entry;
} else {
// ss
*--stack = 0x10;
// rsp. Once this context is popped from the stack, stack top is going to be a new
// stack pointer for kernel threads
*--stack = thr->data.rsp0_base + thr->data.rsp0_size;
// rflags
*--stack = 0x200;
// cs
*--stack = 0x08;
// rip
*--stack = (uintptr_t) entry;
}
// Caller-saved
// r11
*--stack = 0;
// r10
*--stack = 0;
// r9
*--stack = 0;
// r8
*--stack = 0;
// rcx
*--stack = 0;
// rdx
*--stack = 0;
// rsi
*--stack = 0;
// rdi
*--stack = (uintptr_t) arg;
// rax
*--stack = 0;
// Small stub so that context switch enters the thread properly
*--stack = (uintptr_t) context_enter;
// Callee-saved
// r15
*--stack = 0;
// r14
*--stack = 0;
// r13
*--stack = 0;
// r12
*--stack = 0;
// rbp
*--stack = 0;
// rbx
*--stack = 0;
// Thread lifecycle:
// * context_switch_to():
// - pops callee-saved registers (initializing them to 0)
// - enters context_enter()
// * context_enter():
// - pops caller-saved registers (initializing them to 0 and setting up rdi)
// - enters proper execution context via iret
// ... Thread is running here until it yields
// * yield leads to context_switch_to():
// - call to yield() automatically (per ABI) stores caller-saved registers
// - context_switch_to() pushes callee-saved registers onto current stack
// - selects a new thread
// - step one
thr->data.rsp0 = (uintptr_t) stack;
}
static void *idle(void *arg) {
while (1) {
asm volatile ("hlt");
@ -206,167 +86,27 @@ static void *t0(void *arg) {
return 0;
}
struct sys_fork_frame {
uint64_t rdi, rsi, rdx, rcx;
uint64_t r8, r9, r10, r11;
uint64_t rbx;
uint64_t rbp;
uint64_t r12;
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint64_t rsp;
uint64_t rflags;
uint64_t rip;
};
int sys_exec(void *(*func)(void *), void *arg) {
_assert(func);
struct thread *thr = thread_current;
thr->data.rsp0 = thr->data.rsp0_top;
uintptr_t rsp3 = thr->data.rsp3_base + thr->data.rsp3_size;
context_exec_enter(arg, thr, rsp3, (uintptr_t) func);
panic("No\n");
}
int sys_fork(struct sys_fork_frame *frame) {
static int nfork = 0;
static struct thread forkt[3] = {0};
struct thread *dst = &forkt[nfork++];
struct thread *src = thread_current;
uintptr_t stack_pages = amd64_phys_alloc_page();
_assert(stack_pages != MM_NADDR);
dst->data.rsp0_base = MM_VIRTUALIZE(stack_pages);
dst->data.rsp0_size = MM_PAGE_SIZE;
dst->data.rsp0_top = dst->data.rsp0_base + dst->data.rsp0_size;
mm_space_t space = amd64_mm_pool_alloc();
mm_space_fork(space, (mm_space_t) MM_VIRTUALIZE(src->data.cr3), MM_CLONE_FLG_KERNEL | MM_CLONE_FLG_USER);
dst->data.rsp3_base = src->data.rsp3_base;
dst->data.rsp3_size = src->data.rsp3_size;
space[AMD64_MM_STRIPSX(KERNEL_VIRT_BASE) >> 39] |= MM_PAGE_USER;
uint64_t *pdpt = (uint64_t *) MM_VIRTUALIZE(space[AMD64_MM_STRIPSX(KERNEL_VIRT_BASE) >> 39] & ~0xFFF);
for (uint64_t i = 0; i < 4; ++i) {
pdpt[((AMD64_MM_STRIPSX(KERNEL_VIRT_BASE) >> 30) + i) & 0x1FF] |= MM_PAGE_USER;
}
dst->data.cr3 = MM_PHYS(space);
uint64_t *stack = (uint64_t *) (dst->data.rsp0_base + dst->data.rsp0_size);
// Initial thread context
// Entry context
// ss
*--stack = 0x1B;
// rsp
*--stack = frame->rsp;
// rflags
_assert(frame->rflags & 0x200);
*--stack = frame->rflags;
// cs
*--stack = 0x23;
// rip
*--stack = frame->rip;
// Caller-saved
// r11
*--stack = frame->r11;
// r10
*--stack = frame->r10;
// r9
*--stack = frame->r9;
// r8
*--stack = frame->r8;
// rcx
*--stack = frame->rcx;
// rdx
*--stack = frame->rdx;
// rsi
*--stack = frame->rsi;
// rdi
*--stack = frame->rdi;
// rax
*--stack = 0;
// Small stub so that context switch enters the thread properly
*--stack = (uintptr_t) context_enter;
// Callee-saved
// r15
*--stack = frame->r15;
// r14
*--stack = frame->r14;
// r13
*--stack = frame->r13;
// r12
*--stack = frame->r12;
// rbp
*--stack = frame->rbp;
// rbx
*--stack = frame->rbx;
// Thread lifecycle:
// * context_switch_to():
// - pops callee-saved registers (initializing them to 0)
// - enters context_enter()
// * context_enter():
// - pops caller-saved registers (initializing them to 0 and setting up rdi)
// - enters proper execution context via iret
// ... Thread is running here until it yields
// * yield leads to context_switch_to():
// - call to yield() automatically (per ABI) stores caller-saved registers
// - context_switch_to() pushes callee-saved registers onto current stack
// - selects a new thread
// - step one
dst->data.rsp0 = (uintptr_t) stack;
dst->pid = nfork + 20;
sched_queue(dst);
return dst->pid;
}
static void *u1(void *arg) {
uint16_t *ptr = (uint16_t *) MM_VIRTUALIZE(arg);
*ptr = 0;
while (1) {
*ptr ^= '1' | 0xC00;
for (size_t i = 0; i < 1000000; ++i);
}
return 0;
}
static void *u0(void *arg) {
int r;
asm volatile ("syscall":"=a"(r):"a"(123));
if (r == 0) {
(void) u1;
asm volatile ("leaq u1(%rip), %rdi; movq $0xB8040, %rsi; movq $124, %rax; syscall");
arg = (void *) ((uintptr_t) arg + 2);
}
asm volatile ("syscall":"=a"(r):"a"(123));
if (r == 0) {
(void) u1;
asm volatile ("leaq u1(%rip), %rdi; movq $0xB8050, %rsi; movq $124, %rax; syscall");
}
uint16_t *ptr = (uint16_t *) MM_VIRTUALIZE(0xB8030);
uint16_t *ptr = (uint16_t *) MM_VIRTUALIZE(0xB8030 + arg);
size_t cntr = 0;
*ptr = 0;
while (1) {
*ptr ^= '0' | 0xD00;
for (size_t i = 0; i < 1000000; ++i);
if (cntr == 1000 && r == 0) {
*ptr = 'X' | 0x300;
asm volatile ("xorq %rdi, %rdi; movq $60, %rax; syscall");
}
++cntr;
}
return 0;
@ -392,19 +132,16 @@ static struct thread t_n[3] = {0};
static struct thread t_u[3] = {0};
void sched_init(void) {
init_thread(&thread_idle, idle, 0, 0);
thread_init(&thread_idle, (uintptr_t) idle, 0, 0);
thread_idle.pid = -1;
init_thread(&t_n[0], t0, (void *) 0, 0);
init_thread(&t_n[1], t0, (void *) 1, 0);
init_thread(&t_n[2], t0, (void *) 2, 0);
for (size_t i = 0; i < 3; ++i) {
thread_init(&t_n[i], (uintptr_t) t0, (void *) i, 0);
t_n[i].pid = thread_alloc_pid(0);
}
t_n[0].pid = 1;
t_n[1].pid = 2;
t_n[2].pid = 3;
init_thread(&t_u[0], u0, (void *) 0, 1);
t_u[0].pid = 10;
thread_init(&t_u[0], (uintptr_t) u0, (void *) 0, 1);
t_u[0].pid = thread_alloc_pid(1);
sched_queue(&t_n[0]);
sched_queue(&t_n[1]);

248
sys/thread.c Normal file
View File

@ -0,0 +1,248 @@
#include "sys/amd64/mm/phys.h"
#include "sys/amd64/mm/pool.h"
#include "sys/vmalloc.h"
#include "sys/assert.h"
#include "sys/thread.h"
#include "sys/sched.h"
#include "sys/debug.h"
#include "sys/mm.h"
extern struct thread *thread_current;
struct sys_fork_frame {
uint64_t rdi, rsi, rdx, rcx;
uint64_t r8, r9, r10, r11;
uint64_t rbx;
uint64_t rbp;
uint64_t r12;
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint64_t rsp;
uint64_t rflags;
uint64_t rip;
};
////
static pid_t last_kernel_pid = 0;
static pid_t last_user_pid = 0;
pid_t thread_alloc_pid(int is_user) {
if (is_user) {
return ++last_user_pid;
} else {
return -(++last_kernel_pid);
}
}
////
int thread_init(struct thread *thr, uintptr_t entry, void *arg, int user) {
uintptr_t stack_pages = amd64_phys_alloc_page();
_assert(stack_pages != MM_NADDR);
thr->data.rsp0_base = MM_VIRTUALIZE(stack_pages);
thr->data.rsp0_size = MM_PAGE_SIZE;
thr->data.rsp0_top = thr->data.rsp0_base + thr->data.rsp0_size;
uint64_t *stack = (uint64_t *) (thr->data.rsp0_base + thr->data.rsp0_size);
if (user) {
mm_space_t space = amd64_mm_pool_alloc();
mm_space_clone(space, mm_kernel, MM_CLONE_FLG_KERNEL);
thr->data.cr3 = MM_PHYS(space);
uintptr_t ustack_base = vmalloc(space, 0x1000000, 0xF0000000, 4, MM_PAGE_WRITE | MM_PAGE_USER);
thr->data.rsp3_base = ustack_base;
thr->data.rsp3_size = MM_PAGE_SIZE * 4;
// Allow this thread to access upper pages for testing
space[AMD64_MM_STRIPSX(KERNEL_VIRT_BASE) >> 39] |= MM_PAGE_USER;
uint64_t *pdpt = (uint64_t *) MM_VIRTUALIZE(space[AMD64_MM_STRIPSX(KERNEL_VIRT_BASE) >> 39] & ~0xFFF);
for (uint64_t i = 0; i < 4; ++i) {
pdpt[((AMD64_MM_STRIPSX(KERNEL_VIRT_BASE) >> 30) + i) & 0x1FF] |= MM_PAGE_USER;
}
} else {
thr->data.cr3 = MM_PHYS(mm_kernel);
}
// Initial thread context
// Entry context
if (user) {
// ss
*--stack = 0x1B;
// rsp
*--stack = thr->data.rsp3_base + thr->data.rsp3_size;
// rflags
*--stack = 0x200;
// cs
*--stack = 0x23;
// rip
*--stack = (uintptr_t) entry;
} else {
// ss
*--stack = 0x10;
// rsp. Once this context is popped from the stack, stack top is going to be a new
// stack pointer for kernel threads
*--stack = thr->data.rsp0_base + thr->data.rsp0_size;
// rflags
*--stack = 0x200;
// cs
*--stack = 0x08;
// rip
*--stack = (uintptr_t) entry;
}
// Caller-saved
// r11
*--stack = 0;
// r10
*--stack = 0;
// r9
*--stack = 0;
// r8
*--stack = 0;
// rcx
*--stack = 0;
// rdx
*--stack = 0;
// rsi
*--stack = 0;
// rdi
*--stack = (uintptr_t) arg;
// rax
*--stack = 0;
// Small stub so that context switch enters the thread properly
*--stack = (uintptr_t) context_enter;
// Callee-saved
// r15
*--stack = 0;
// r14
*--stack = 0;
// r13
*--stack = 0;
// r12
*--stack = 0;
// rbp
*--stack = 0;
// rbx
*--stack = 0;
// Thread lifecycle:
// * context_switch_to():
// - pops callee-saved registers (initializing them to 0)
// - enters context_enter()
// * context_enter():
// - pops caller-saved registers (initializing them to 0 and setting up rdi)
// - enters proper execution context via iret
// ... Thread is running here until it yields
// * yield leads to context_switch_to():
// - call to yield() automatically (per ABI) stores caller-saved registers
// - context_switch_to() pushes callee-saved registers onto current stack
// - selects a new thread
// - step one
thr->data.rsp0 = (uintptr_t) stack;
return 0;
}
int sys_fork(struct sys_fork_frame *frame) {
static int nfork = 0;
static struct thread forkt[3] = {0};
struct thread *dst = &forkt[nfork++];
struct thread *src = thread_current;
uintptr_t stack_pages = amd64_phys_alloc_page();
_assert(stack_pages != MM_NADDR);
dst->data.rsp0_base = MM_VIRTUALIZE(stack_pages);
dst->data.rsp0_size = MM_PAGE_SIZE;
dst->data.rsp0_top = dst->data.rsp0_base + dst->data.rsp0_size;
mm_space_t space = amd64_mm_pool_alloc();
mm_space_fork(space, (mm_space_t) MM_VIRTUALIZE(src->data.cr3), MM_CLONE_FLG_KERNEL | MM_CLONE_FLG_USER);
dst->data.rsp3_base = src->data.rsp3_base;
dst->data.rsp3_size = src->data.rsp3_size;
space[AMD64_MM_STRIPSX(KERNEL_VIRT_BASE) >> 39] |= MM_PAGE_USER;
uint64_t *pdpt = (uint64_t *) MM_VIRTUALIZE(space[AMD64_MM_STRIPSX(KERNEL_VIRT_BASE) >> 39] & ~0xFFF);
for (uint64_t i = 0; i < 4; ++i) {
pdpt[((AMD64_MM_STRIPSX(KERNEL_VIRT_BASE) >> 30) + i) & 0x1FF] |= MM_PAGE_USER;
}
dst->data.cr3 = MM_PHYS(space);
uint64_t *stack = (uint64_t *) (dst->data.rsp0_base + dst->data.rsp0_size);
// Initial thread context
// Entry context
// ss
*--stack = 0x1B;
// rsp
*--stack = frame->rsp;
// rflags
_assert(frame->rflags & 0x200);
*--stack = frame->rflags;
// cs
*--stack = 0x23;
// rip
*--stack = frame->rip;
// Caller-saved
// r11
*--stack = frame->r11;
// r10
*--stack = frame->r10;
// r9
*--stack = frame->r9;
// r8
*--stack = frame->r8;
// rcx
*--stack = frame->rcx;
// rdx
*--stack = frame->rdx;
// rsi
*--stack = frame->rsi;
// rdi
*--stack = frame->rdi;
// rax
*--stack = 0;
// Small stub so that context switch enters the thread properly
*--stack = (uintptr_t) context_enter;
// Callee-saved
// r15
*--stack = frame->r15;
// r14
*--stack = frame->r14;
// r13
*--stack = frame->r13;
// r12
*--stack = frame->r12;
// rbp
*--stack = frame->rbp;
// rbx
*--stack = frame->rbx;
dst->data.rsp0 = (uintptr_t) stack;
// Allocate a new PID for userspace thread
dst->pid = thread_alloc_pid(1);
sched_queue(dst);
return dst->pid;
}
__attribute__((noreturn)) void sys_exit(int status) {
struct thread *thr = thread_current;
kdebug("Thread %d exited with status %d\n", thr->pid, status);
sched_unqueue(thr);
panic("This code shouldn't run\n");
}