2020-03-30 18:08:23 +03:00
|
|
|
#include "arch/amd64/hw/timer.h"
|
2020-07-06 23:39:31 +03:00
|
|
|
#include "user/errno.h"
|
2020-03-30 18:08:23 +03:00
|
|
|
#include "sys/thread.h"
|
|
|
|
#include "sys/assert.h"
|
|
|
|
#include "sys/sched.h"
|
|
|
|
#include "sys/debug.h"
|
2020-07-06 23:39:31 +03:00
|
|
|
#include "user/wait.h"
|
2020-03-30 18:08:23 +03:00
|
|
|
#include "sys/wait.h"
|
|
|
|
|
|
|
|
void thread_wait_io_init(struct io_notify *n) {
|
|
|
|
n->owner = NULL;
|
|
|
|
n->value = 0;
|
|
|
|
n->lock = 0;
|
|
|
|
list_head_init(&n->link);
|
|
|
|
// For wait_io_any
|
|
|
|
list_head_init(&n->own_link);
|
|
|
|
}
|
|
|
|
|
|
|
|
int thread_wait_io(struct thread *t, struct io_notify *n) {
|
|
|
|
uintptr_t irq;
|
|
|
|
while (1) {
|
|
|
|
spin_lock_irqsave(&n->lock, &irq);
|
|
|
|
// Check value
|
|
|
|
if (n->value) {
|
|
|
|
// Consume value
|
|
|
|
n->value = 0;
|
|
|
|
spin_release_irqrestore(&n->lock, &irq);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the value to change
|
|
|
|
// TODO: multiple threads waiting on same io_notify
|
|
|
|
_assert(!n->owner);
|
|
|
|
n->owner = t;
|
|
|
|
spin_release_irqrestore(&n->lock, &irq);
|
|
|
|
|
2020-03-31 13:04:27 +03:00
|
|
|
sched_unqueue(t, THREAD_WAITING);
|
2020-03-30 18:08:23 +03:00
|
|
|
|
|
|
|
// Check if we were interrupted during io wait
|
2020-07-06 23:39:31 +03:00
|
|
|
n->owner = NULL;
|
2020-03-30 18:08:23 +03:00
|
|
|
int r = thread_check_signal(t, 0);
|
|
|
|
if (r != 0) {
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void thread_notify_io(struct io_notify *n) {
|
|
|
|
uintptr_t irq;
|
|
|
|
struct thread *t = NULL;
|
|
|
|
spin_lock_irqsave(&n->lock, &irq);
|
|
|
|
++n->value;
|
|
|
|
t = n->owner;
|
|
|
|
n->owner = NULL;
|
|
|
|
spin_release_irqrestore(&n->lock, &irq);
|
|
|
|
|
|
|
|
if (t) {
|
2020-06-18 10:54:55 +03:00
|
|
|
// Prevent double task wakeup when
|
|
|
|
// two event sources simultaneously try
|
|
|
|
// to do it
|
|
|
|
if (t->sched_next) {
|
|
|
|
return;
|
|
|
|
}
|
2020-03-30 18:08:23 +03:00
|
|
|
sched_queue(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void thread_wait_io_add(struct thread *thr, struct io_notify *n) {
|
|
|
|
uintptr_t irq;
|
2020-06-01 12:58:51 +03:00
|
|
|
_assert(n);
|
2020-03-30 18:08:23 +03:00
|
|
|
spin_lock_irqsave(&n->lock, &irq);
|
|
|
|
_assert(!n->owner);
|
|
|
|
n->owner = thr;
|
|
|
|
list_add(&n->own_link, &thr->wait_head);
|
|
|
|
spin_release_irqrestore(&n->lock, &irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
int thread_wait_io_any(struct thread *thr, struct io_notify **r_n) {
|
|
|
|
uintptr_t irq;
|
|
|
|
struct io_notify *n, *it;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
// Check if any of values are non-zero
|
|
|
|
n = NULL;
|
|
|
|
list_for_each_entry(it, &thr->wait_head, own_link) {
|
|
|
|
spin_lock_irqsave(&it->lock, &irq);
|
|
|
|
if (it->value) {
|
|
|
|
n = it;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_release_irqrestore(&it->lock, &irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n) {
|
|
|
|
// Found ready descriptor
|
2020-07-29 18:54:00 +03:00
|
|
|
--n->value;
|
2020-03-30 18:08:23 +03:00
|
|
|
spin_release_irqrestore(&it->lock, &irq);
|
|
|
|
*r_n = n;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
// Wait
|
|
|
|
// TODO: reset owners
|
2020-03-31 13:04:27 +03:00
|
|
|
sched_unqueue(thr, THREAD_WAITING);
|
2020-03-30 18:08:23 +03:00
|
|
|
|
|
|
|
int r = thread_check_signal(thr, 0);
|
|
|
|
if (r != 0) {
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void thread_wait_io_clear(struct thread *t) {
|
|
|
|
while (!list_empty(&t->wait_head)) {
|
|
|
|
struct list_head *h = t->wait_head.next;
|
|
|
|
struct io_notify *n = list_entry(h, struct io_notify, own_link);
|
2020-03-31 11:30:26 +03:00
|
|
|
// TODO: maybe check here for sleep descriptors and cancel sleeps if needed
|
2020-03-30 18:08:23 +03:00
|
|
|
n->owner = NULL;
|
|
|
|
list_del_init(h);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int thread_sleep(struct thread *thr, uint64_t deadline, uint64_t *int_time) {
|
2020-07-08 13:29:52 +03:00
|
|
|
// Cancel previous sleep
|
|
|
|
list_del_init(&thr->sleep_notify.link);
|
|
|
|
thr->sleep_notify.value = 0;
|
|
|
|
|
2020-03-30 18:08:23 +03:00
|
|
|
thr->sleep_deadline = deadline;
|
|
|
|
timer_add_sleep(thr);
|
|
|
|
return thread_wait_io(thr, &thr->sleep_notify);
|
|
|
|
}
|
|
|
|
|
2020-07-06 23:39:31 +03:00
|
|
|
static int wait_check_pid(struct process *chld, int flags) {
|
|
|
|
if (chld->proc_state == PROC_FINISHED) {
|
|
|
|
return 0;
|
|
|
|
} else if ((flags & WSTOPPED) && chld->proc_state == PROC_SUSPENDED) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int wait_check_pgrp(struct process *proc_self, pid_t pgrp, int flags, struct process **chld) {
|
|
|
|
for (struct process *_chld = proc_self->first_child; _chld; _chld = _chld->next_child) {
|
2020-07-12 17:10:51 +03:00
|
|
|
if (pgrp == -1 || _chld->pgid == -pgrp) {
|
2020-07-06 23:39:31 +03:00
|
|
|
if (wait_check_pid(_chld, flags) == 0) {
|
|
|
|
*chld = _chld;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int sys_waitpid(pid_t pid, int *status, int flags) {
|
2020-07-12 17:10:51 +03:00
|
|
|
pid_t result_pid = -ECHILD;
|
2020-07-06 23:39:31 +03:00
|
|
|
struct thread *thr = thread_self;
|
|
|
|
_assert(thr);
|
|
|
|
struct process *proc_self = thr->proc;
|
|
|
|
_assert(proc_self);
|
|
|
|
|
2020-08-30 12:44:22 +03:00
|
|
|
struct process *chld = NULL;
|
2020-07-06 23:39:31 +03:00
|
|
|
struct io_notify *notify;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
if (pid > 0) {
|
|
|
|
chld = process_child(proc_self, pid);
|
|
|
|
|
|
|
|
if (!chld) {
|
|
|
|
return -ECHILD;
|
|
|
|
}
|
|
|
|
} else if (pid < -1) {
|
|
|
|
int any_proc = 0;
|
|
|
|
for (struct process *_chld = proc_self->first_child; _chld; _chld = _chld->next_child) {
|
|
|
|
if (_chld->pgid == -pid) {
|
|
|
|
++any_proc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!any_proc) {
|
|
|
|
return -ECHILD;
|
|
|
|
}
|
2020-07-12 17:10:51 +03:00
|
|
|
} else if (pid != -1) {
|
2020-07-06 23:39:31 +03:00
|
|
|
panic("Not implemented: waitpid(%d, ...)\n", pid);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
if (pid > 0) {
|
|
|
|
if (wait_check_pid(chld, flags) == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
res = thread_wait_io(thr, &chld->pid_notify);
|
2020-07-12 17:10:51 +03:00
|
|
|
} else if (pid <= -1) {
|
2020-07-06 23:39:31 +03:00
|
|
|
// Check if anybody in pgrp has changed status
|
2020-07-12 17:10:51 +03:00
|
|
|
if (wait_check_pgrp(proc_self, pid, flags, &chld) == 0) {
|
2020-07-06 23:39:31 +03:00
|
|
|
_assert(chld);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build wait list
|
|
|
|
for (struct process *_chld = proc_self->first_child; _chld; _chld = _chld->next_child) {
|
2020-07-12 17:10:51 +03:00
|
|
|
if (pid == -1 || _chld->pgid == -pid) {
|
2020-07-06 23:39:31 +03:00
|
|
|
thread_wait_io_add(thr, &_chld->pid_notify);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for any of pgrp
|
|
|
|
res = thread_wait_io_any(thr, ¬ify);
|
|
|
|
|
|
|
|
thread_wait_io_clear(thr);
|
2020-07-12 17:10:51 +03:00
|
|
|
} else {
|
|
|
|
panic("Shouldn't run\n");
|
2020-07-06 23:39:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (res < 0) {
|
|
|
|
// Likely interrupted
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-12 17:10:51 +03:00
|
|
|
result_pid = chld->pid;
|
2020-07-06 23:39:31 +03:00
|
|
|
if (chld->proc_state == PROC_FINISHED) {
|
|
|
|
if (status) {
|
|
|
|
*status = chld->exit_status;
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: automatically cleanup threads which don't have
|
|
|
|
// a parent like PID 1
|
|
|
|
process_unchild(chld);
|
|
|
|
list_del(&chld->g_link);
|
|
|
|
process_free(chld);
|
|
|
|
} else if (chld->proc_state == PROC_SUSPENDED) {
|
|
|
|
if (status) {
|
|
|
|
// WIFSTOPPED
|
|
|
|
*status = 127;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-12 17:10:51 +03:00
|
|
|
return result_pid;
|
2020-07-06 23:39:31 +03:00
|
|
|
}
|