sched: add thread management syscalls

This commit is contained in:
anna 2021-08-12 19:05:38 +02:00
parent a370ef69f6
commit 104578d072
Signed by: fef
GPG key ID: EC22E476DC2D3D84
22 changed files with 504 additions and 82 deletions

View file

@ -16,7 +16,7 @@
extern uint16_t __syscall_return_point;
#endif
void arch_enter(struct exc_context *context)
void enter_syscall(struct exc_context *context)
{
enum syscall number = sc_num(context);
long (*handler)(sysarg_t arg1, sysarg_t arg2, sysarg_t arg3,
@ -47,6 +47,8 @@ void arch_enter(struct exc_context *context)
return;
}
current->tcb.exc_context = context;
/* TODO: not every syscall uses the max amount of parameters (duh) */
sc_ret = handler(sc_arg1(context), sc_arg2(context), sc_arg3(context),
sc_arg4(context), sc_arg5(context), sc_arg6(context));
@ -54,6 +56,12 @@ void arch_enter(struct exc_context *context)
sc_set_rval(context, sc_ret);
}
void enter_sched(struct exc_context *context)
{
current->tcb.exc_context = context;
schedule();
}
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.

View file

@ -106,8 +106,6 @@ static void print_regs(struct exc_context *context)
print_reg("xPSR", context->sp->psr);
}
#include <arch/debug.h>
__naked __noreturn void handle_fault(struct exc_context *context, enum irqno irqno)
{
uart_emergency_setup();
@ -120,7 +118,6 @@ __naked __noreturn void handle_fault(struct exc_context *context, enum irqno irq
uart_write_sync("\nSystem halted, goodbye\n\n");
__breakpoint;
while (1);
}

View file

@ -4,14 +4,15 @@
.text
/* void schedule(void); */
.extern schedule
/* void enter_sched(struct exc_context *context); */
.extern enter_sched
/* void handle_pend_sv(void); */
func_begin handle_pend_sv
prepare_entry
bl schedule
mov r0, sp
bl enter_sched
prepare_leave
bx lr

View file

@ -4,15 +4,15 @@
.text
/* void arch_enter(struct exc_context *context); */
.extern arch_enter
/* void enter_syscall(struct exc_context *context); */
.extern enter_syscall
/* void handle_svc(void); */
func_begin handle_svc
prepare_entry
mov r0, sp
bl arch_enter /* arch_enter(sp); */
bl enter_syscall /* enter_syscall(sp); */
prepare_leave
bx lr

View file

@ -69,11 +69,17 @@ struct context {
/**
* @brief Task Control Block.
* This is a low level structure used by `do_switch()` to do the actual context
* switching,
* switching, and embedded into `struct task`. We do this nesting because it
* makes it easier to access the TCB's fields from assembly, and it also makes
* us less dependent on a specific architecture.
*/
struct tcb {
struct context context;
struct hw_context *hw_context;
/*
* Needed for exec() because the child task leaves kernel space over a
* different route than the parent one.
*/
struct exc_context *exc_context;
};
__always_inline sysarg_t sc_num(const struct exc_context *ctx)

View file

@ -14,6 +14,7 @@
#include <errno.h>
#include <string.h>
#include <unistd.h>
volatile unsigned long int tick = 0;
@ -51,13 +52,23 @@ int arch_sched_init(unsigned int freq)
return 0;
}
void arch_task_init(struct task *task, void (*entry)(void))
void task_init(struct task *task, int (*entry)(void))
{
task->bottom = task->stack + CONFIG_STACK_SIZE;
/* TODO: Use separate stacks for kernel and program */
struct hw_context *hw_context = task->bottom - sizeof(*hw_context);
struct exc_context *exc_context = (void *)hw_context - sizeof(*exc_context);
memset(hw_context, 0, task->bottom - (void *)hw_context);
/*
* The return value of entry(), which is the exit code, will be stored
* in r0 as per the AAPCS. Conveniently, this happens to be the same
* register that is also used for passing the first argument to a
* function, so by setting the initial link register to exit() we
* effectively inject a call to that function after the task's main
* routine returns.
*/
hw_context->lr = exit;
hw_context->pc = entry;
hw_context->psr = 0x01000000; /* Thumb = 1, unprivileged */
@ -67,29 +78,15 @@ void arch_task_init(struct task *task, void (*entry)(void))
memset(&task->tcb, 0, sizeof(task->tcb));
task->tcb.context.sp = exc_context;
task->tcb.context.pc = _leave;
task->tcb.exc_context = exc_context;
}
__naked __noreturn void _idle(void)
__naked int _idle(void)
{
/* TODO: put the CPU to sleep */
while (1);
}
int arch_idle_task_init(struct task *task)
{
void *stack = malloc(CONFIG_STACK_SIZE);
if (stack == NULL)
return -ENOMEM;
task->bottom = stack + CONFIG_STACK_SIZE; /* full-descending stack */
arch_task_init(task, _idle);
task->sleep = 0;
task->last_tick = 0;
task->state = TASK_READY;
task->pid = -1;
return 0;
}
unsigned long int ms_to_ticks(unsigned long int ms)
{
return ( ms * (unsigned long int)tick_freq ) / 1000lu /* 1 s = 1000 ms */;

View file

@ -16,16 +16,16 @@ struct task; /* see include/ardix/sched.h */
int arch_sched_init(unsigned int freq);
/**
* Initialize a new process.
* This requires the process' `stack_base` field to be initialized as the
* initial register values are written to the stack.
* @brief Initialize a new task.
*
* @param process: The process.
* @param entry: The process entry point.
*
* @param task Task to initialize
* @param entry Task entry point
*/
void arch_task_init(struct task *task, void (*entry)(void));
void task_init(struct task *task, int (*entry)(void));
int arch_idle_task_init(struct task *task);
/** @brief Idle task entry point. */
__naked int _idle(void);
/**
* @brief Convert milliseconds to system ticks, rounding to zero.

View file

@ -7,6 +7,9 @@
#define ARCH_SYS_sleep 2
#define ARCH_SYS_malloc 3
#define ARCH_SYS_free 4
#define ARCH_SYS_exec 5
#define ARCH_SYS_exit 6
#define ARCH_SYS_waitpid 7
/*
* This file is part of Ardix.

View file

@ -25,6 +25,8 @@ enum kevent_kind {
KEVENT_DEVICE,
/** @brief File has changed */
KEVENT_FILE,
/** @brief Task has exited */
KEVENT_TASK,
KEVENT_KIND_COUNT,
};

View file

@ -6,6 +6,8 @@
#include <ardix/kent.h>
#include <ardix/list.h>
#include <ardix/mutex.h>
#include <ardix/task.h>
#include <ardix/types.h>
#include <config.h>
@ -14,40 +16,6 @@
#warning "CONFIG_SCHED_MAXTASK is > 64, this could have a significant performance impact"
#endif
enum task_state {
/** Task is dead / doesn't exist */
TASK_DEAD,
/** Task is ready for execution or currently running. */
TASK_READY,
/** Task is waiting for its next time share. */
TASK_QUEUE,
/** Task is sleeping, `task::sleep` specifies for how many ticks. */
TASK_SLEEP,
/** Task is waiting for I/O to flush buffers. */
TASK_IOWAIT,
/** Task is waiting for a mutex to be unlocked. */
TASK_LOCKWAIT,
};
/** @brief Core structure holding information about a task. */
struct task {
struct tcb tcb;
struct kent kent;
/**
* @brief Points to the bottom of the stack.
* In a full-descending stack, this is one word after the highest stack address.
*/
void *bottom;
/** @brief If state is `TASK_SLEEP`, the total amount of ticks to sleep */
unsigned long int sleep;
/** @brief Last execution in ticks */
unsigned long int last_tick;
enum task_state state;
pid_t pid;
};
/** @brief Current task (access from syscall context only) */
extern struct task *volatile current;
@ -64,7 +32,7 @@ int sched_init(void);
* @brief Main scheduler routine.
* This will iterate over the process table and choose a new task to be run,
* which `current` is then updated to. If the old task was in state
* `TASK_READY`, it is set to `TASK_QUEUE`.
* `TASK_RUNNING`, it is set to `TASK_QUEUE`.
*/
void schedule(void);
@ -77,9 +45,10 @@ void schedule(void);
* setup work.
*
* @param task Task to make a copy of
* @param err Where to store the error code (will be written 0 on success)
* @returns The new (child) task copy, or `NULL` on failure
*/
struct task *task_clone(struct task *task);
struct task *task_clone(struct task *task, int *trr);
/**
* @brief Sleep for an approximate amount of milliseconds.

View file

@ -15,6 +15,9 @@ enum syscall {
SYS_sleep = ARCH_SYS_sleep,
SYS_malloc = ARCH_SYS_malloc,
SYS_free = ARCH_SYS_free,
SYS_exec = ARCH_SYS_exec,
SYS_exit = ARCH_SYS_exit,
SYS_waitpid = ARCH_SYS_waitpid,
NSYSCALLS
};
@ -31,6 +34,11 @@ long sys_stub(void);
long sys_read(int fd, void *buf, size_t len);
long sys_write(int fd, const void *buf, size_t len);
long sys_sleep(unsigned long millis);
long sys_malloc(size_t size);
void sys_free(void *ptr);
long sys_exec(int (*entry)(void));
void sys_exit(int code);
long sys_waitpid(pid_t pid, int *stat_loc, int options);
/*
* This file is part of Ardix.

95
include/ardix/task.h Normal file
View file

@ -0,0 +1,95 @@
/* See the end of this file for copyright, license, and warranty information. */
#pragma once
#include <arch/hardware.h>
#include <ardix/kent.h>
#include <ardix/kevent.h>
#include <ardix/malloc.h>
#include <ardix/sched.h>
#include <ardix/util.h>
enum task_state {
/** Task is dead / doesn't exist */
TASK_DEAD,
/** Task is currently running. */
TASK_RUNNING,
/** Task is waiting for its next time share. */
TASK_QUEUE,
/** Task is sleeping, `task::sleep` specifies for how many ticks. */
TASK_SLEEP,
/** Task is waiting for I/O to flush buffers. */
TASK_IOWAIT,
/** Task is waiting for a mutex to be unlocked. */
TASK_LOCKWAIT,
/** Task is waiting for child to */
TASK_WAITPID,
};
/** @brief Core structure holding information about a task. */
struct task {
struct tcb tcb;
struct kent kent;
/**
* @brief Points to the bottom of the stack.
* In a full-descending stack, this is one word after the highest stack address.
*/
void *bottom;
/** @brief Lowest address in the stack, as returned by malloc. */
void *stack;
/** @brief If state is `TASK_SLEEP`, the total amount of ticks to sleep */
unsigned long int sleep;
/** @brief Last execution in ticks */
unsigned long int last_tick;
/*
* if a child process exited before its parent called waitpid(),
* this is where the children are stored temporarily
*/
struct list_head pending_sigchld;
struct mutex pending_sigchld_lock;
enum task_state state;
pid_t pid;
};
__always_inline void task_get(struct task *task)
{
kent_get(&task->kent);
}
__always_inline void task_put(struct task *task)
{
kent_put(&task->kent);
}
__always_inline struct task *task_parent(struct task *task)
{
if (task->pid == 0)
return NULL;
else
return container_of(task->kent.parent, struct task, kent);
}
struct task_kevent {
struct kevent kevent;
struct task *task;
int status;
};
void task_kevent_create_and_dispatch(struct task *task, int status);
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/

21
include/sys/wait.h Normal file
View file

@ -0,0 +1,21 @@
/* See the end of this file for copyright, license, and warranty information. */
#pragma once
#include <stdint.h>
#include <toolchain.h>
__shared pid_t waitpid(pid_t pid, int *stat_loc, int options);
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/

View file

@ -3,10 +3,21 @@
#pragma once
#include <stdint.h>
#include <toolchain.h>
__shared ssize_t read(int fildes, void *buf, size_t nbyte);
__shared ssize_t write(int fildes, const void *buf, size_t nbyte);
__shared ssize_t sleep(unsigned long int millis);
/**
* @brief Create a new thread.
*
* Embedded systems typically don't have a MMU and thus no virtual memory,
* meaning it is impossible to implement a proper fork. So, the `fork()` and
* `execve()` system calls have to be combined into one.
*/
__shared pid_t exec(int (*entry)(void));
__shared __noreturn void exit(int status);
__shared pid_t waitpid(pid_t pid, int *stat_loc, int options);
/*
* This file is part of Ardix.

View file

@ -21,6 +21,7 @@ target_sources(ardix_kernel PRIVATE
sched.c
serial.c
syscall.c
task.c
userspace.c
)

View file

@ -44,9 +44,7 @@ void mutex_unlock(struct mutex *mutex)
spin_unlock(&mutex->wait_queue_lock);
if (waiter != NULL) {
struct task *task = waiter->task;
current->state = TASK_QUEUE;
do_switch(current, task);
waiter->task->state = TASK_QUEUE;
} else {
_mutex_unlock(&mutex->lock);
}

View file

@ -38,6 +38,7 @@
#include <ardix/kevent.h>
#include <ardix/malloc.h>
#include <ardix/sched.h>
#include <ardix/task.h>
#include <ardix/types.h>
#include <errno.h>
@ -48,6 +49,7 @@ extern uint32_t _sstack;
extern uint32_t _estack;
static struct task *tasks[CONFIG_SCHED_MAXTASK];
static MUTEX(tasks_lock);
struct task *volatile current;
static struct task kernel_task;
@ -56,7 +58,12 @@ static struct task idle_task;
static void task_destroy(struct kent *kent)
{
struct task *task = container_of(kent, struct task, kent);
mutex_lock(&tasks_lock);
tasks[task->pid] = NULL;
mutex_unlock(&tasks_lock);
kfree(task->stack);
kfree(task);
}
@ -72,8 +79,12 @@ int sched_init(void)
memset(&kernel_task.tcb, 0, sizeof(kernel_task.tcb));
kernel_task.bottom = &_estack;
kernel_task.stack = kernel_task.bottom - CONFIG_STACK_SIZE;
kernel_task.pid = 0;
kernel_task.state = TASK_READY;
kernel_task.state = TASK_RUNNING;
list_init(&kernel_task.pending_sigchld);
mutex_init(&kernel_task.pending_sigchld_lock);
tasks[0] = &kernel_task;
current = &kernel_task;
@ -85,11 +96,17 @@ int sched_init(void)
if (err != 0)
goto out;
err = arch_sched_init(CONFIG_SCHED_FREQ);
if (err != 0)
idle_task.stack = kmalloc(CONFIG_STACK_SIZE);
if (idle_task.stack == NULL)
goto out;
idle_task.bottom = idle_task.stack + CONFIG_STACK_SIZE;
idle_task.pid = -1;
idle_task.state = TASK_QUEUE;
list_init(&idle_task.pending_sigchld);
mutex_init(&idle_task.pending_sigchld_lock);
task_init(&idle_task, _idle);
err = arch_idle_task_init(&idle_task);
err = arch_sched_init(CONFIG_SCHED_FREQ);
if (err != 0)
goto out;
@ -113,11 +130,12 @@ static inline bool can_run(const struct task *task)
case TASK_SLEEP:
return tick - task->last_tick >= task->sleep;
case TASK_QUEUE:
case TASK_READY:
case TASK_RUNNING:
return true;
case TASK_DEAD:
case TASK_IOWAIT:
case TASK_LOCKWAIT:
case TASK_WAITPID:
return false;
}
@ -135,7 +153,7 @@ void schedule(void)
kevents_process();
if (old->state == TASK_READY)
if (old->state == TASK_RUNNING)
old->state = TASK_QUEUE;
for (unsigned int i = 0; i < ARRAY_SIZE(tasks); i++) {
@ -156,7 +174,7 @@ void schedule(void)
if (new == NULL)
new = &idle_task;
new->state = TASK_READY;
new->state = TASK_RUNNING;
new->last_tick = tick;
current = new;
@ -177,9 +195,67 @@ long sys_sleep(unsigned long int millis)
current->sleep = ms_to_ticks(millis);
yield(TASK_SLEEP);
/* TODO: return actual milliseconds */
/*
* TODO: actually, use fucking hardware timers which were specifically
* invented for this exact kind of feature because (1) the tick
* resolution is often less than 1 ms and (2) ticks aren't really
* supposed to be guaranteed to happen at regular intervals and
* (3) the scheduler doesn't even check whether there is a task
* whose sleep period just expired
*/
return 0;
}
long sys_exec(int (*entry)(void))
{
pid_t pid;
struct task *child = NULL;
mutex_lock(&tasks_lock);
for (pid = 1; pid < CONFIG_SCHED_MAXTASK; pid++) {
if (tasks[pid] == NULL)
break;
}
if (pid == CONFIG_SCHED_MAXTASK) {
pid = -EAGAIN;
goto out;
}
child = kmalloc(sizeof(*child));
if (child == NULL) {
pid = -ENOMEM;
goto out;
}
child->pid = pid;
child->stack = kmalloc(CONFIG_STACK_SIZE);
if (child->stack == NULL) {
pid = -ENOMEM;
goto err_stack_malloc;
}
child->kent.parent = &current->kent;
child->kent.destroy = task_destroy;
kent_init(&child->kent);
child->bottom = child->stack + CONFIG_STACK_SIZE;
task_init(child, entry);
list_init(&child->pending_sigchld);
mutex_init(&child->pending_sigchld_lock);
child->state = TASK_QUEUE;
tasks[pid] = child;
goto out;
err_stack_malloc:
kfree(child);
out:
mutex_unlock(&tasks_lock);
return pid;
}
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.

View file

@ -15,6 +15,9 @@ long (*const sys_table[NSYSCALLS])(sysarg_t arg1, sysarg_t arg2, sysarg_t arg3,
sys_table_entry(SYS_sleep, sys_sleep),
sys_table_entry(SYS_malloc, sys_malloc),
sys_table_entry(SYS_free, sys_free),
sys_table_entry(SYS_exec, sys_exec),
sys_table_entry(SYS_exit, sys_exit),
sys_table_entry(SYS_waitpid, sys_waitpid),
};
long sys_stub(void)

189
kernel/task.c Normal file
View file

@ -0,0 +1,189 @@
/* See the end of this file for copyright, license, and warranty information. */
#include <arch-generic/do_switch.h>
#include <ardix/kent.h>
#include <ardix/kevent.h>
#include <ardix/malloc.h>
#include <ardix/mutex.h>
#include <ardix/sched.h>
#include <ardix/syscall.h>
#include <ardix/task.h>
#include <ardix/userspace.h>
#include <ardix/util.h>
#include <errno.h>
#include <toolchain.h>
#include <arch/debug.h>
static void task_kevent_destroy(struct kent *kent)
{
struct kevent *kevent = container_of(kent, struct kevent, kent);
struct task_kevent *task_kevent = container_of(kevent, struct task_kevent, kevent);
kfree(task_kevent);
}
void task_kevent_create_and_dispatch(struct task *task, int status)
{
struct task_kevent *event = kmalloc(sizeof(*event));
if (event == NULL)
return; /* TODO: we're fucked here */
event->kevent.kent.parent = &task->kent;
event->kevent.kent.destroy = task_kevent_destroy;
event->kevent.kind = KEVENT_TASK;
kent_init(&event->kevent.kent);
event->task = task;
event->status = status;
kevent_dispatch(&event->kevent);
}
struct dead_child {
struct list_head link; /* -> task::pending_sigchld */
struct task *child;
int status;
};
__noreturn void sys_exit(int status)
{
struct task *task = current;
struct task *parent = task_parent(task);
task_kevent_create_and_dispatch(task, status);
if (parent->state != TASK_WAITPID) {
/*
* atomic_kmalloc wouldn't actually be needed here, but we use
* it anyway because it has a separate heap which is more likely
* to have an emergency reserve of memory. A failing allocation
* would *really* be inconvenient here.
*/
struct dead_child *entry = atomic_kmalloc(sizeof(*entry));
if (entry == NULL) {
schedule(); /* TODO: we're severely fucked here */
}
entry->child = task;
mutex_lock(&parent->pending_sigchld_lock);
list_insert(&parent->pending_sigchld, &entry->link);
mutex_unlock(&parent->pending_sigchld_lock);
}
task->state = TASK_DEAD;
schedule();
/* we should never get here, this is only needed to make gcc happy */
while (1);
}
struct task_kevent_extra {
struct task *parent;
/* this is a return value from the listener */
struct {
struct task *child;
int status;
} ret;
};
static int task_kevent_listener(struct kevent *event, void *_extra)
{
struct task_kevent_extra *extra = _extra;
struct task_kevent *task_kevent = container_of(event, struct task_kevent, kevent);
struct task *child = task_kevent->task;
if (extra->parent != task_parent(child))
return KEVENT_CB_NONE;
extra->parent->state = TASK_QUEUE;
extra->ret.child = child;
extra->ret.status = task_kevent->status;
return KEVENT_CB_STOP | KEVENT_CB_LISTENER_DEL;
}
/* manually poll for dead children if there is no memory for a kevent listener */
static int waitpid_poll(struct task *parent)
{
/* mutex is already locked here */
while (list_is_empty(&parent->pending_sigchld)) {
mutex_unlock(&parent->pending_sigchld_lock);
/*
* TODO: This has to be gotten rid of when the scheduler
* isn't a simple round robin one anymore!
*/
yield(TASK_QUEUE);
mutex_lock(&parent->pending_sigchld_lock);
}
mutex_unlock(&parent->pending_sigchld_lock);
struct dead_child *dead_child = list_first_entry(&parent->pending_sigchld,
struct dead_child,
link);
int status = dead_child->status;
task_put(dead_child->child);
list_delete(&dead_child->link);
kfree(dead_child);
return status;
}
/* use kevent system to wait for dying children */
static int waitpid_yield(struct task *parent)
{
/* mutex is already locked here */
struct task_kevent_extra extra = {
.parent = parent,
};
if (kevent_listener_add(KEVENT_TASK, task_kevent_listener, &extra) == NULL)
return waitpid_poll(parent);
mutex_unlock(&parent->pending_sigchld_lock);
yield(TASK_WAITPID);
/* extra.ret is set by task_kevent_listener */
task_put(extra.ret.child);
return extra.ret.status;
}
long sys_waitpid(pid_t pid, int __user *stat_loc, int options)
{
struct task *parent = current;
/*
* both waitpid_yield and waitpid_poll expect the mutex
* to be locked and will unlock it before returning
*/
mutex_lock(&parent->pending_sigchld_lock);
int status;
if (list_is_empty(&parent->pending_sigchld))
status = waitpid_yield(parent);
else
status = waitpid_poll(parent);
copy_to_user(stat_loc, &status, sizeof(*stat_loc));
return 0;
}
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/

View file

@ -14,6 +14,7 @@ target_sources(ardix_lib PRIVATE
stdlib.c
string.c
unistd.c
wait.c
)
# This file is part of Ardix.

View file

@ -19,6 +19,19 @@ ssize_t sleep(unsigned long int millis)
return syscall(SYS_sleep, (sysarg_t)millis);
}
pid_t exec(int (*entry)(void))
{
return (pid_t)syscall(SYS_exec, (sysarg_t)entry);
}
void exit(int status)
{
syscall(SYS_exit, status);
/* make gcc happy */
while (1);
}
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.

23
lib/wait.c Normal file
View file

@ -0,0 +1,23 @@
/* See the end of this file for copyright, license, and warranty information. */
#include <ardix/syscall.h>
#include <sys/wait.h>
pid_t waitpid(pid_t pid, int *stat_loc, int options)
{
return (pid_t)syscall(SYS_waitpid, (sysarg_t)pid, (sysarg_t)stat_loc, (sysarg_t)options);
}
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/