Compare commits
15 commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e86ef2acbd | |||
| 5d2539fc4a | |||
| 3e35afcfa9 | |||
| f293c6661e | |||
| 4177931774 | |||
| e80a6cb630 | |||
| ad76275721 | |||
| adccbef80d | |||
| c767d551d3 | |||
| 104578d072 | |||
| a370ef69f6 | |||
| 2ea850cead | |||
| fb9ec2a8bc | |||
| 040b5af5d6 | |||
| 8293d9372b |
56 changed files with 1104 additions and 328 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -1,4 +1,5 @@
|
|||
build/
|
||||
cmake-build-*/
|
||||
|
||||
.vscode/.*
|
||||
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ configure_file(
|
|||
|
||||
add_subdirectory(arch)
|
||||
|
||||
#add_subdirectory(init)
|
||||
add_subdirectory(init)
|
||||
add_subdirectory(kernel)
|
||||
add_subdirectory(lib)
|
||||
|
||||
|
|
@ -86,7 +86,7 @@ add_custom_command(
|
|||
COMMAND ${CMAKE_LINKER}
|
||||
ARGS ${ARDIX_LINKER_FLAGS} -o ardix.elf
|
||||
$<TARGET_FILE:ardix_arch>
|
||||
#$<TARGET_FILE:ardix_init>
|
||||
$<TARGET_FILE:ardix_init>
|
||||
$<TARGET_FILE:ardix_kernel>
|
||||
$<TARGET_FILE:ardix_kernel_fs>
|
||||
$<TARGET_FILE:ardix_lib>
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ To build the EEPROM image, execute the following command.
|
|||
Pass any other configuration options you want to set to the first command or use `cmake-gui(1)`.
|
||||
|
||||
```shell
|
||||
# Replace <target> with one of the target architectures from the list above
|
||||
# Replace <arch> with one of the target architectures from the list above
|
||||
# This will default to at91sam3x8e (Arduino Due)
|
||||
cmake -DARCH=<arch> -B build -S .
|
||||
cmake --build build
|
||||
|
|
|
|||
|
|
@ -19,9 +19,7 @@ configure_file(
|
|||
|
||||
target_sources(ardix_arch PRIVATE
|
||||
arch_init.c
|
||||
atom_get_put.S
|
||||
atom.c
|
||||
atomic.c
|
||||
atom.S
|
||||
do_switch.S
|
||||
entry.c
|
||||
handle_fault.c
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ extern uint32_t _eheap;
|
|||
|
||||
void __preinit_malloc(void)
|
||||
{
|
||||
malloc_init(&_sheap, (size_t)&_eheap - (size_t)&_sheap);
|
||||
kmalloc_init(&_sheap, (size_t)&_eheap - (size_t)&_sheap);
|
||||
}
|
||||
__preinit_call(__preinit_malloc);
|
||||
|
||||
|
|
|
|||
114
arch/at91sam3x8e/atom.S
Normal file
114
arch/at91sam3x8e/atom.S
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
.include "asm.S"
|
||||
|
||||
.text
|
||||
|
||||
/* int _atom_add(volatile int *atom, int val) */
|
||||
func_begin _atom_add
|
||||
push {r4}
|
||||
1: ldrex r2, [r0] /* int old = __ldrex(atom) */
|
||||
add r3, r2, r1 /* int new = old + val */
|
||||
strex r4, r3, [r0] /* int err = __strex(atom, new) */
|
||||
teq r4, #0 /* if (err) */
|
||||
bne 1b /* goto 1 */
|
||||
dmb /* data memory barrier */
|
||||
mov r0, r2 /* return old */
|
||||
pop {r4}
|
||||
bx lr
|
||||
func_end _atom_add
|
||||
|
||||
/* these are the same as _atom_add except for the instruction
|
||||
* in the LDREX/STREX pair, so i'm not gonna annotate them */
|
||||
|
||||
func_begin _atom_sub
|
||||
push {r4}
|
||||
1: ldrex r2, [r0]
|
||||
sub r3, r2, r1
|
||||
strex r4, r3, [r0]
|
||||
teq r4, #0
|
||||
bne 1b
|
||||
dmb
|
||||
mov r0, r2
|
||||
pop {r4}
|
||||
bx lr
|
||||
func_end _atom_sub
|
||||
|
||||
func_begin _atom_and
|
||||
push {r4}
|
||||
1: ldrex r2, [r0]
|
||||
and r3, r2, r1
|
||||
strex r4, r3, [r0]
|
||||
teq r4, #0
|
||||
bne 1b
|
||||
dmb
|
||||
mov r0, r2
|
||||
pop {r4}
|
||||
bx lr
|
||||
func_end _atom_and
|
||||
|
||||
func_begin _atom_or
|
||||
push {r4}
|
||||
1: ldrex r2, [r0]
|
||||
orr r3, r2, r1
|
||||
strex r4, r3, [r0]
|
||||
teq r4, #0
|
||||
bne 1b
|
||||
dmb
|
||||
mov r0, r2
|
||||
pop {r4}
|
||||
bx lr
|
||||
func_end _atom_or
|
||||
|
||||
func_begin _atom_xor
|
||||
push {r4}
|
||||
1: ldrex r2, [r0]
|
||||
eor r3, r2, r1
|
||||
strex r4, r3, [r0]
|
||||
teq r4, #0
|
||||
bne 1b
|
||||
dmb
|
||||
mov r0, r2
|
||||
pop {r4}
|
||||
bx lr
|
||||
func_end _atom_xor
|
||||
|
||||
/* int _atom_xchg(volatile int *atom, int val) */
|
||||
func_begin _atom_xchg
|
||||
ldrex r2, [r0] /* int old = __ldrex(atom) */
|
||||
strex r3, r1, [r0] /* int err = __strex(atom, val) */
|
||||
teq r3, #0 /* if (err) */
|
||||
bne _atom_xchg /* goto _atom_xchg */
|
||||
dmb /* data memory barrier */
|
||||
mov r0, r2 /* return old */
|
||||
bx lr
|
||||
func_end _atom_xchg
|
||||
|
||||
/* int _atom_cmpxchg(volatile int *atom, int cmp, int val) */
|
||||
func_begin _atom_cmpxchg
|
||||
push {r4}
|
||||
1: mov r4, #0 /* int err = 0 */
|
||||
ldrex r3, [r0] /* int old = __ldrex(atom) */
|
||||
teq r3, r1 /* if (old == cmp) */
|
||||
it eq
|
||||
strexeq r4, r1, [r0] /* err = __strex(atom, val) */
|
||||
teq r4, #0 /* if (err) */
|
||||
bne 1b /* goto 1b */
|
||||
dmb /* data memory barrier */
|
||||
mov r0, r3 /* return old */
|
||||
pop {r4}
|
||||
bx lr
|
||||
func_end _atom_cmpxchg
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2021 Felix Kopp <owo@fef.moe>.
|
||||
*
|
||||
* Ardix is non-violent software: you may only use, redistribute,
|
||||
* and/or modify it under the terms of the CNPLv6+ as found in
|
||||
* the LICENSE file in the source code root directory or at
|
||||
* <https://git.pixie.town/thufie/CNPL>.
|
||||
*
|
||||
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
|
||||
* permitted by applicable law. See the CNPLv6+ for details.
|
||||
*/
|
||||
|
|
@ -1,46 +0,0 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
.include "asm.S"
|
||||
|
||||
.text
|
||||
|
||||
/* int _atom_get(int *count); */
|
||||
func_begin _atom_get
|
||||
|
||||
ldrex r1, [r0] /* int tmp = atom->count */
|
||||
add r2, r1, #1 /* int newval = tmp + 1 */
|
||||
strex r3, r2, [r0] /* atom->count = newval */
|
||||
teq r3, #0 /* store successful? */
|
||||
bne _atom_get /* -> goto _atom_get to try again if not */
|
||||
dmb /* data memory barrier */
|
||||
mov r0, r2 /* return newval */
|
||||
bx lr
|
||||
|
||||
func_end _atom_get
|
||||
|
||||
/* int _atom_put(int *count); */
|
||||
func_begin _atom_put
|
||||
|
||||
ldrex r1, [r0] /* int tmp = atom->count */
|
||||
sub r2, r1, #1 /* int newval = tmp - 1 */
|
||||
strex r3, r2, [r0] /* atom->count = newval */
|
||||
teq r3, #0 /* store successful? */
|
||||
bne _atom_put /* -> goto _atom_put to try again if not */
|
||||
dmb /* data memory barrier */
|
||||
mov r0, r2 /* return newval */
|
||||
bx lr
|
||||
|
||||
func_end _atom_put
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2021 Felix Kopp <owo@fef.moe>.
|
||||
*
|
||||
* Ardix is non-violent software: you may only use, redistribute,
|
||||
* and/or modify it under the terms of the CNPLv6+ as found in
|
||||
* the LICENSE file in the source code root directory or at
|
||||
* <https://git.pixie.town/thufie/CNPL>.
|
||||
*
|
||||
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
|
||||
* permitted by applicable law. See the CNPLv6+ for details.
|
||||
*/
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
extern uint16_t __syscall_return_point;
|
||||
#endif
|
||||
|
||||
void arch_enter(struct exc_context *context)
|
||||
void enter_syscall(struct exc_context *context)
|
||||
{
|
||||
enum syscall number = sc_num(context);
|
||||
long (*handler)(sysarg_t arg1, sysarg_t arg2, sysarg_t arg3,
|
||||
|
|
@ -30,8 +30,8 @@ void arch_enter(struct exc_context *context)
|
|||
* the instructions are always 2-byte aligned. Additionally, the PC
|
||||
* points to the instruction *after* the SVC, not SVC itself.
|
||||
*/
|
||||
if (((uintptr_t)regs->sp->pc & 0xfffffffe) != (uintptr_t)&__syscall_return_point) {
|
||||
sc_set_rval(regs, -EACCES);
|
||||
if (((uintptr_t)context->sp->pc & 0xfffffffe) != (uintptr_t)&__syscall_return_point) {
|
||||
sc_set_rval(context, -EACCES);
|
||||
return;
|
||||
}
|
||||
# endif
|
||||
|
|
@ -47,6 +47,8 @@ void arch_enter(struct exc_context *context)
|
|||
return;
|
||||
}
|
||||
|
||||
current->tcb.exc_context = context;
|
||||
|
||||
/* TODO: not every syscall uses the max amount of parameters (duh) */
|
||||
sc_ret = handler(sc_arg1(context), sc_arg2(context), sc_arg3(context),
|
||||
sc_arg4(context), sc_arg5(context), sc_arg6(context));
|
||||
|
|
@ -54,6 +56,12 @@ void arch_enter(struct exc_context *context)
|
|||
sc_set_rval(context, sc_ret);
|
||||
}
|
||||
|
||||
void enter_sched(struct exc_context *context)
|
||||
{
|
||||
current->tcb.exc_context = context;
|
||||
schedule();
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ static void uart_write_sync(const char *s)
|
|||
}
|
||||
|
||||
/** Setup UART to manual byte-by-byte control */
|
||||
static inline void uart_emergency_setup(void)
|
||||
static void uart_emergency_setup(void)
|
||||
{
|
||||
UART->UART_IDR = 0xffffffff;
|
||||
|
||||
|
|
@ -34,7 +34,7 @@ static inline void uart_emergency_setup(void)
|
|||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wswitch"
|
||||
static inline void print_err_msg(enum irqno irqno)
|
||||
static void print_err_msg(enum irqno irqno)
|
||||
{
|
||||
uart_write_sync("\n\n########## SERIOUS BRUH MOMENT! ##########\n");
|
||||
|
||||
|
|
@ -100,14 +100,12 @@ static void print_regs(struct exc_context *context)
|
|||
print_reg("R10", context->r10);
|
||||
print_reg("R11", context->r11);
|
||||
print_reg("R12", context->sp->r12);
|
||||
print_reg("SP", *(word_t *)&context->sp);
|
||||
print_reg("LR", *(word_t *)&context->sp->lr);
|
||||
print_reg("PC", *(word_t *)&context->sp->pc);
|
||||
print_reg("SP", (word_t)context->sp);
|
||||
print_reg("LR", (word_t)context->sp->lr);
|
||||
print_reg("PC", (word_t)context->sp->pc);
|
||||
print_reg("xPSR", context->sp->psr);
|
||||
}
|
||||
|
||||
#include <arch/debug.h>
|
||||
|
||||
__naked __noreturn void handle_fault(struct exc_context *context, enum irqno irqno)
|
||||
{
|
||||
uart_emergency_setup();
|
||||
|
|
@ -120,7 +118,6 @@ __naked __noreturn void handle_fault(struct exc_context *context, enum irqno irq
|
|||
|
||||
uart_write_sync("\nSystem halted, goodbye\n\n");
|
||||
|
||||
__breakpoint;
|
||||
while (1);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,14 +4,15 @@
|
|||
|
||||
.text
|
||||
|
||||
/* void schedule(void); */
|
||||
.extern schedule
|
||||
/* void enter_sched(struct exc_context *context); */
|
||||
.extern enter_sched
|
||||
|
||||
/* void handle_pend_sv(void); */
|
||||
func_begin handle_pend_sv
|
||||
|
||||
prepare_entry
|
||||
bl schedule
|
||||
mov r0, sp
|
||||
bl enter_sched
|
||||
prepare_leave
|
||||
|
||||
bx lr
|
||||
|
|
|
|||
|
|
@ -4,15 +4,15 @@
|
|||
|
||||
.text
|
||||
|
||||
/* void arch_enter(struct exc_context *context); */
|
||||
.extern arch_enter
|
||||
/* void enter_syscall(struct exc_context *context); */
|
||||
.extern enter_syscall
|
||||
|
||||
/* void handle_svc(void); */
|
||||
func_begin handle_svc
|
||||
|
||||
prepare_entry
|
||||
mov r0, sp
|
||||
bl arch_enter /* arch_enter(sp); */
|
||||
bl enter_syscall /* enter_syscall(sp); */
|
||||
prepare_leave
|
||||
bx lr
|
||||
|
||||
|
|
|
|||
28
arch/at91sam3x8e/include/arch/atomic.h
Normal file
28
arch/at91sam3x8e/include/arch/atomic.h
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
#pragma once
|
||||
|
||||
#include <ardix/types.h>
|
||||
|
||||
#include <toolchain.h>
|
||||
|
||||
static __always_inline word_t _atomic_enter(void)
|
||||
{
|
||||
word_t primask;
|
||||
__asm__ volatile(
|
||||
" mrs %0, primask \n"
|
||||
" cpsid i \n"
|
||||
: "=r"(primask));
|
||||
return primask;
|
||||
}
|
||||
|
||||
static __always_inline void _atomic_restore(word_t context)
|
||||
{
|
||||
if (!(context & 1))
|
||||
__asm__ volatile("cpsie i");
|
||||
}
|
||||
|
||||
static inline int _is_atomic(void)
|
||||
{
|
||||
int primask;
|
||||
__asm__ volatile("mrs %0, primask" : "=r"(primask));
|
||||
return primask & 1;
|
||||
}
|
||||
|
|
@ -3,13 +3,32 @@
|
|||
#pragma once
|
||||
|
||||
#include <config.h>
|
||||
#include <toolchain.h>
|
||||
|
||||
#if 1
|
||||
|
||||
#ifdef DEBUG
|
||||
# define __breakpoint __asm__ volatile("bkpt")
|
||||
#else
|
||||
# define __breakpoint
|
||||
# define NDEBUG
|
||||
#endif
|
||||
|
||||
#else
|
||||
#define __breakpoint
|
||||
#endif
|
||||
|
||||
__always_inline int __is_kernel(void) {
|
||||
int psr_val;
|
||||
|
||||
__asm__ volatile(
|
||||
" mrs %0, psr \n"
|
||||
: "=&r" (psr_val)
|
||||
);
|
||||
|
||||
return psr_val & 0x01ff; /* bits 8-0 hold ISR_NUMBER */
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
|
|
|
|||
|
|
@ -69,11 +69,17 @@ struct context {
|
|||
/**
|
||||
* @brief Task Control Block.
|
||||
* This is a low level structure used by `do_switch()` to do the actual context
|
||||
* switching,
|
||||
* switching, and embedded into `struct task`. We do this nesting because it
|
||||
* makes it easier to access the TCB's fields from assembly, and it also makes
|
||||
* us less dependent on a specific architecture.
|
||||
*/
|
||||
struct tcb {
|
||||
struct context context;
|
||||
struct hw_context *hw_context;
|
||||
/*
|
||||
* Needed for exec() because the child task leaves kernel space over a
|
||||
* different route than the parent one.
|
||||
*/
|
||||
struct exc_context *exc_context;
|
||||
};
|
||||
|
||||
__always_inline sysarg_t sc_num(const struct exc_context *ctx)
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
volatile unsigned long int tick = 0;
|
||||
|
||||
|
|
@ -51,13 +52,23 @@ int arch_sched_init(unsigned int freq)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void arch_task_init(struct task *task, void (*entry)(void))
|
||||
void task_init(struct task *task, int (*entry)(void))
|
||||
{
|
||||
task->bottom = task->stack + CONFIG_STACK_SIZE;
|
||||
/* TODO: Use separate stacks for kernel and program */
|
||||
struct hw_context *hw_context = task->bottom - sizeof(*hw_context);
|
||||
struct exc_context *exc_context = (void *)hw_context - sizeof(*exc_context);
|
||||
|
||||
memset(hw_context, 0, task->bottom - (void *)hw_context);
|
||||
/*
|
||||
* The return value of entry(), which is the exit code, will be stored
|
||||
* in r0 as per the AAPCS. Conveniently, this happens to be the same
|
||||
* register that is also used for passing the first argument to a
|
||||
* function, so by setting the initial link register to exit() we
|
||||
* effectively inject a call to that function after the task's main
|
||||
* routine returns.
|
||||
*/
|
||||
hw_context->lr = exit;
|
||||
hw_context->pc = entry;
|
||||
hw_context->psr = 0x01000000; /* Thumb = 1, unprivileged */
|
||||
|
||||
|
|
@ -67,29 +78,15 @@ void arch_task_init(struct task *task, void (*entry)(void))
|
|||
memset(&task->tcb, 0, sizeof(task->tcb));
|
||||
task->tcb.context.sp = exc_context;
|
||||
task->tcb.context.pc = _leave;
|
||||
task->tcb.exc_context = exc_context;
|
||||
}
|
||||
|
||||
__naked __noreturn void _idle(void)
|
||||
__naked int _idle(void)
|
||||
{
|
||||
/* TODO: put the CPU to sleep */
|
||||
while (1);
|
||||
}
|
||||
|
||||
int arch_idle_task_init(struct task *task)
|
||||
{
|
||||
void *stack = malloc(CONFIG_STACK_SIZE);
|
||||
if (stack == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
task->bottom = stack + CONFIG_STACK_SIZE; /* full-descending stack */
|
||||
arch_task_init(task, _idle);
|
||||
task->sleep = 0;
|
||||
task->last_tick = 0;
|
||||
task->state = TASK_READY;
|
||||
task->pid = -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long int ms_to_ticks(unsigned long int ms)
|
||||
{
|
||||
return ( ms * (unsigned long int)tick_freq ) / 1000lu /* 1 s = 1000 ms */;
|
||||
|
|
|
|||
|
|
@ -67,7 +67,8 @@ void irq_can1(void) __weak __alias(_stub_handler);
|
|||
|
||||
extern uint32_t _estack;
|
||||
|
||||
__section(.vectors) const void *exception_table[] = {
|
||||
__section(.vectors)
|
||||
void *const exception_table[] = {
|
||||
&_estack, /* initial SP value (stack grows down) */
|
||||
handle_reset, /* reset vector */
|
||||
NULL, /* reserved */
|
||||
|
|
|
|||
|
|
@ -16,16 +16,16 @@ struct task; /* see include/ardix/sched.h */
|
|||
int arch_sched_init(unsigned int freq);
|
||||
|
||||
/**
|
||||
* Initialize a new process.
|
||||
* This requires the process' `stack_base` field to be initialized as the
|
||||
* initial register values are written to the stack.
|
||||
* @brief Initialize a new task.
|
||||
*
|
||||
* @param process: The process.
|
||||
* @param entry: The process entry point.
|
||||
*
|
||||
* @param task Task to initialize
|
||||
* @param entry Task entry point
|
||||
*/
|
||||
void arch_task_init(struct task *task, void (*entry)(void));
|
||||
void task_init(struct task *task, int (*entry)(void));
|
||||
|
||||
int arch_idle_task_init(struct task *task);
|
||||
/** @brief Idle task entry point. */
|
||||
__naked int _idle(void);
|
||||
|
||||
/**
|
||||
* @brief Convert milliseconds to system ticks, rounding to zero.
|
||||
|
|
|
|||
|
|
@ -5,6 +5,11 @@
|
|||
#define ARCH_SYS_read 0
|
||||
#define ARCH_SYS_write 1
|
||||
#define ARCH_SYS_sleep 2
|
||||
#define ARCH_SYS_malloc 3
|
||||
#define ARCH_SYS_free 4
|
||||
#define ARCH_SYS_exec 5
|
||||
#define ARCH_SYS_exit 6
|
||||
#define ARCH_SYS_waitpid 7
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
|
|
|
|||
|
|
@ -5,28 +5,119 @@
|
|||
#include <ardix/types.h>
|
||||
#include <toolchain.h>
|
||||
|
||||
#define ATOM(name) atom_t name = { .count = 0, }
|
||||
#define ATOM_DEFINE(val) { ._val = val, }
|
||||
#define ATOM(name, val) atom_t name = ATOM_DEFINE(val)
|
||||
|
||||
void atom_init(atom_t *atom);
|
||||
|
||||
extern int _atom_get(int *count);
|
||||
extern int _atom_put(int *count);
|
||||
|
||||
__always_inline int atom_get(atom_t *atom)
|
||||
static __always_inline void atom_init(atom_t *atom, int val)
|
||||
{
|
||||
return _atom_get(&atom->count);
|
||||
atom->_val = val;
|
||||
}
|
||||
|
||||
__always_inline int atom_put(atom_t *atom)
|
||||
static __always_inline int atom_read(const atom_t *atom)
|
||||
{
|
||||
return _atom_put(&atom->count);
|
||||
return atom->_val;
|
||||
}
|
||||
|
||||
int atom_count(atom_t *atom);
|
||||
/*
|
||||
* These are implemented in arch/<arch>/atom.S
|
||||
*/
|
||||
|
||||
extern int _atom_add(volatile int *atom, int val);
|
||||
extern int _atom_sub(volatile int *atom, int val);
|
||||
extern int _atom_and(volatile int *atom, int val);
|
||||
extern int _atom_or(volatile int *atom, int val);
|
||||
extern int _atom_xor(volatile int *atom, int val);
|
||||
extern int _atom_xchg(volatile int *atom, int val);
|
||||
extern int _atom_cmpxchg(volatile int *atom, int cmp, int val);
|
||||
|
||||
/**
|
||||
* @brief Atomically add `val` to `atom`.
|
||||
* @return The old value of `atom`, before the addition
|
||||
*/
|
||||
static __always_inline int atom_add(atom_t *atom, int val)
|
||||
{
|
||||
return _atom_add(&atom->_val, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Atomically subtract `val` from `atom`.
|
||||
* @return The old value of `atom` before the subtraction
|
||||
*/
|
||||
static __always_inline int atom_sub(atom_t *atom, int val)
|
||||
{
|
||||
return _atom_sub(&atom->_val, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Atomically do a bitwise AND of `val` and `atom`.
|
||||
* @return The old value of `atom`, before the AND
|
||||
*/
|
||||
static __always_inline int atom_and(atom_t *atom, int val)
|
||||
{
|
||||
return _atom_and(&atom->_val, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Atomically do a bitwise OR of `val` and `atom`.
|
||||
* @return The old value of `atom`, before the OR
|
||||
*/
|
||||
static __always_inline int atom_or(atom_t *atom, int val)
|
||||
{
|
||||
return _atom_or(&atom->_val, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Atomically do a bitwise XOR of `val` and `atom`.
|
||||
* @return The old value of `atom`, before the XOR
|
||||
*/
|
||||
static __always_inline int atom_xor(atom_t *atom, int val)
|
||||
{
|
||||
return _atom_xor(&atom->_val, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Atomically increment `atom` by 1.
|
||||
* @return The old value of `atom`, before the increment
|
||||
*/
|
||||
static __always_inline int atom_get(atom_t *atom)
|
||||
{
|
||||
return _atom_add(&atom->_val, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Atomically decrement `atom` by 1.
|
||||
* @return The old value of `atom`, before the decrement
|
||||
*/
|
||||
static __always_inline int atom_put(atom_t *atom)
|
||||
{
|
||||
return _atom_sub(&atom->_val, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Atomically exchange the value of `atom` with `val`.
|
||||
* @return The old value of `atom`
|
||||
*/
|
||||
static __always_inline int atom_xchg(atom_t *atom, int val)
|
||||
{
|
||||
return _atom_xchg(&atom->_val, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Atomically compare the value of `atom` with `cmp` and,
|
||||
* if found to be equal, exchange it with `val`.
|
||||
* @param atom atom to perform the operation on
|
||||
* @param cmp value to compare the atom with
|
||||
* @param val new value to be written to atom if it is equal to `cmp`
|
||||
* @return The old value of `atom`
|
||||
*/
|
||||
static __always_inline int atom_cmpxchg(atom_t *atom, int cmp, int val)
|
||||
{
|
||||
return _atom_cmpxchg(&atom->_val, cmp, val);
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
* Copyright (c) 2020, 2021, 2022 Felix Kopp <owo@fef.moe>.
|
||||
*
|
||||
* Ardix is non-violent software: you may only use, redistribute,
|
||||
* and/or modify it under the terms of the CNPLv6+ as found in
|
||||
|
|
|
|||
|
|
@ -2,6 +2,10 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <arch/atomic.h>
|
||||
|
||||
#include <toolchain.h>
|
||||
|
||||
/**
|
||||
* Enter atomic context.
|
||||
*
|
||||
|
|
@ -9,13 +13,22 @@
|
|||
* reference counter that is checked in the scheduler interrupt routine before
|
||||
* performing the context switch.
|
||||
*/
|
||||
void atomic_enter(void);
|
||||
static __always_inline word_t atomic_enter(void)
|
||||
{
|
||||
return _atomic_enter();
|
||||
}
|
||||
|
||||
/** Leave atomic context. */
|
||||
void atomic_leave(void);
|
||||
static __always_inline void atomic_restore(word_t context)
|
||||
{
|
||||
_atomic_restore(context);
|
||||
}
|
||||
|
||||
/** Return a nonzero value if the current process is in atomic context. */
|
||||
int is_atomic(void);
|
||||
static __always_inline int is_atomic(void)
|
||||
{
|
||||
return _is_atomic();
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
|
|
|
|||
|
|
@ -77,13 +77,13 @@ int devices_init(void);
|
|||
int device_init(struct device *dev);
|
||||
|
||||
/** @brief Increment a device's reference counter. */
|
||||
__always_inline void device_get(struct device *dev)
|
||||
static __always_inline void device_get(struct device *dev)
|
||||
{
|
||||
kent_get(&dev->kent);
|
||||
}
|
||||
|
||||
/** @brief Decrement a device's referece counter. */
|
||||
__always_inline void device_put(struct device *dev)
|
||||
static __always_inline void device_put(struct device *dev)
|
||||
{
|
||||
kent_put(&dev->kent);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,10 +25,16 @@ struct dmabuf {
|
|||
struct dmabuf *dmabuf_create(struct device *dev, size_t len);
|
||||
|
||||
/** Increment a DMA buffer's reference counter. */
|
||||
void dmabuf_get(struct dmabuf *buf);
|
||||
static __always_inline void dmabuf_get(struct dmabuf *buf)
|
||||
{
|
||||
kent_get(&buf->kent);
|
||||
}
|
||||
|
||||
/** Decrement a DMA buffer's reference counter. */
|
||||
void dmabuf_put(struct dmabuf *buf);
|
||||
static __always_inline void dmabuf_put(struct dmabuf *buf)
|
||||
{
|
||||
kent_put(&buf->kent);
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
|
|
|
|||
|
|
@ -48,7 +48,10 @@ int kent_init(struct kent *kent);
|
|||
*
|
||||
* @param kent: The kent.
|
||||
*/
|
||||
void kent_get(struct kent *kent);
|
||||
static __always_inline void kent_get(struct kent *kent)
|
||||
{
|
||||
atom_get(&kent->refcount);
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrement the reference counter.
|
||||
|
|
|
|||
|
|
@ -25,6 +25,8 @@ enum kevent_kind {
|
|||
KEVENT_DEVICE,
|
||||
/** @brief File has changed */
|
||||
KEVENT_FILE,
|
||||
/** @brief Task has exited */
|
||||
KEVENT_TASK,
|
||||
|
||||
KEVENT_KIND_COUNT,
|
||||
};
|
||||
|
|
@ -102,12 +104,12 @@ struct kevent_listener *kevent_listener_add(enum kevent_kind kind,
|
|||
*/
|
||||
void kevent_listener_del(struct kevent_listener *listener);
|
||||
|
||||
__always_inline void kevent_get(struct kevent *event)
|
||||
static __always_inline void kevent_get(struct kevent *event)
|
||||
{
|
||||
kent_get(&event->kent);
|
||||
}
|
||||
|
||||
__always_inline void kevent_put(struct kevent *event)
|
||||
static __always_inline void kevent_put(struct kevent *event)
|
||||
{
|
||||
kent_put(&event->kent);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
#include <toolchain.h>
|
||||
|
||||
/**
|
||||
* @defgroup malloc Memory Management
|
||||
* @defgroup kmalloc Kernel Memory Management
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
|
@ -15,18 +15,18 @@
|
|||
* @brief Allocate `size` bytes of memory *w/out initializing it*.
|
||||
*
|
||||
* This method may block if an allocation is already taking place.
|
||||
* Use `atomic_malloc()` if you are in kernel space and in atomic context.
|
||||
* Use `atomic_kmalloc()` if you are in kernel space and in atomic context.
|
||||
*
|
||||
* @param size The amount of bytes to allocate.
|
||||
* @return A pointer to the beginning of the memory area, or `NULL` if
|
||||
* `size` was 0 or there is not enough free memory left.
|
||||
*/
|
||||
__shared __malloc(free, 1) void *malloc(size_t size);
|
||||
__malloc(kfree, 1) void *kmalloc(size_t size);
|
||||
|
||||
/**
|
||||
* @brief Allocate `size` bytes of memory *w/out initializing it*.
|
||||
*
|
||||
* Unlike `malloc()`, this method is guaranteed not to sleep. It does this by
|
||||
* Unlike `kmalloc()`, this method is guaranteed not to sleep. It does this by
|
||||
* using a completely separate, smaller heap. Only use this if you already are
|
||||
* in atomic context, like when in an irq.
|
||||
*
|
||||
|
|
@ -34,18 +34,7 @@ __shared __malloc(free, 1) void *malloc(size_t size);
|
|||
* @return A pointer to the beginning of the memory area, or `NULL` if
|
||||
* `size` was 0 or there is not enough free memory left.
|
||||
*/
|
||||
__malloc(free, 1) void *atomic_malloc(size_t size);
|
||||
|
||||
/**
|
||||
* @brief Allocate an array and initialize the memory to zeroes.
|
||||
* The allocated size will be at least `nmemb * size`.
|
||||
* If the multiplication would overflow, the allocation fails.
|
||||
*
|
||||
* @param nmemb The amount of members.
|
||||
* @param size The size of an individual member.
|
||||
* @return A pointer to the zeroed-out memory, or `NULL` if OOM.
|
||||
*/
|
||||
__shared __malloc(free, 1) void *calloc(size_t nmemb, size_t size);
|
||||
__malloc(kfree, 1) void *atomic_kmalloc(size_t size);
|
||||
|
||||
/**
|
||||
* @brief Free a previously allocated memory region.
|
||||
|
|
@ -53,12 +42,12 @@ __shared __malloc(free, 1) void *calloc(size_t nmemb, size_t size);
|
|||
*
|
||||
* @param ptr The pointer, as returned by `malloc`/`calloc`.
|
||||
*/
|
||||
__shared void free(void *ptr);
|
||||
|
||||
/** @} */
|
||||
void kfree(void *ptr);
|
||||
|
||||
/** Initialize the memory allocator, this is only called by the bootloader on early bootstrap. */
|
||||
void malloc_init(void *heap, size_t size);
|
||||
void kmalloc_init(void *heap, size_t size);
|
||||
|
||||
/** @} */
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
|
|
|
|||
|
|
@ -39,14 +39,12 @@ __always_inline void spin_init(spin_t *spin)
|
|||
|
||||
__always_inline void spin_lock(spin_t *spin)
|
||||
{
|
||||
atomic_enter();
|
||||
_spin_lock(&spin->lock);
|
||||
}
|
||||
|
||||
__always_inline void spin_unlock(spin_t *spin)
|
||||
{
|
||||
_spin_unlock(&spin->lock);
|
||||
atomic_leave();
|
||||
}
|
||||
|
||||
__always_inline int spin_trylock(spin_t *spin)
|
||||
|
|
|
|||
|
|
@ -6,6 +6,8 @@
|
|||
|
||||
#include <ardix/kent.h>
|
||||
#include <ardix/list.h>
|
||||
#include <ardix/mutex.h>
|
||||
#include <ardix/task.h>
|
||||
#include <ardix/types.h>
|
||||
|
||||
#include <config.h>
|
||||
|
|
@ -14,40 +16,6 @@
|
|||
#warning "CONFIG_SCHED_MAXTASK is > 64, this could have a significant performance impact"
|
||||
#endif
|
||||
|
||||
enum task_state {
|
||||
/** Task is dead / doesn't exist */
|
||||
TASK_DEAD,
|
||||
/** Task is ready for execution or currently running. */
|
||||
TASK_READY,
|
||||
/** Task is waiting for its next time share. */
|
||||
TASK_QUEUE,
|
||||
/** Task is sleeping, `task::sleep` specifies for how many ticks. */
|
||||
TASK_SLEEP,
|
||||
/** Task is waiting for I/O to flush buffers. */
|
||||
TASK_IOWAIT,
|
||||
/** Task is waiting for a mutex to be unlocked. */
|
||||
TASK_LOCKWAIT,
|
||||
};
|
||||
|
||||
/** @brief Core structure holding information about a task. */
|
||||
struct task {
|
||||
struct tcb tcb;
|
||||
|
||||
struct kent kent;
|
||||
/**
|
||||
* @brief Points to the bottom of the stack.
|
||||
* In a full-descending stack, this is one word after the highest stack address.
|
||||
*/
|
||||
void *bottom;
|
||||
/** @brief If state is `TASK_SLEEP`, the total amount of ticks to sleep */
|
||||
unsigned long int sleep;
|
||||
/** @brief Last execution in ticks */
|
||||
unsigned long int last_tick;
|
||||
|
||||
enum task_state state;
|
||||
pid_t pid;
|
||||
};
|
||||
|
||||
/** @brief Current task (access from syscall context only) */
|
||||
extern struct task *volatile current;
|
||||
|
||||
|
|
@ -64,7 +32,7 @@ int sched_init(void);
|
|||
* @brief Main scheduler routine.
|
||||
* This will iterate over the process table and choose a new task to be run,
|
||||
* which `current` is then updated to. If the old task was in state
|
||||
* `TASK_READY`, it is set to `TASK_QUEUE`.
|
||||
* `TASK_RUNNING`, it is set to `TASK_QUEUE`.
|
||||
*/
|
||||
void schedule(void);
|
||||
|
||||
|
|
@ -77,9 +45,10 @@ void schedule(void);
|
|||
* setup work.
|
||||
*
|
||||
* @param task Task to make a copy of
|
||||
* @param err Where to store the error code (will be written 0 on success)
|
||||
* @returns The new (child) task copy, or `NULL` on failure
|
||||
*/
|
||||
struct task *task_clone(struct task *task);
|
||||
struct task *task_clone(struct task *task, int *trr);
|
||||
|
||||
/**
|
||||
* @brief Sleep for an approximate amount of milliseconds.
|
||||
|
|
|
|||
|
|
@ -13,6 +13,11 @@ enum syscall {
|
|||
SYS_read = ARCH_SYS_read,
|
||||
SYS_write = ARCH_SYS_write,
|
||||
SYS_sleep = ARCH_SYS_sleep,
|
||||
SYS_malloc = ARCH_SYS_malloc,
|
||||
SYS_free = ARCH_SYS_free,
|
||||
SYS_exec = ARCH_SYS_exec,
|
||||
SYS_exit = ARCH_SYS_exit,
|
||||
SYS_waitpid = ARCH_SYS_waitpid,
|
||||
NSYSCALLS
|
||||
};
|
||||
|
||||
|
|
@ -29,6 +34,11 @@ long sys_stub(void);
|
|||
long sys_read(int fd, void *buf, size_t len);
|
||||
long sys_write(int fd, const void *buf, size_t len);
|
||||
long sys_sleep(unsigned long millis);
|
||||
long sys_malloc(size_t size);
|
||||
void sys_free(void *ptr);
|
||||
long sys_exec(int (*entry)(void));
|
||||
void sys_exit(int code);
|
||||
long sys_waitpid(pid_t pid, int *stat_loc, int options);
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
|
|
|
|||
95
include/ardix/task.h
Normal file
95
include/ardix/task.h
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <arch/hardware.h>
|
||||
|
||||
#include <ardix/kent.h>
|
||||
#include <ardix/kevent.h>
|
||||
#include <ardix/malloc.h>
|
||||
#include <ardix/sched.h>
|
||||
#include <ardix/util.h>
|
||||
|
||||
enum task_state {
|
||||
/** Task is dead / doesn't exist */
|
||||
TASK_DEAD,
|
||||
/** Task is currently running. */
|
||||
TASK_RUNNING,
|
||||
/** Task is waiting for its next time share. */
|
||||
TASK_QUEUE,
|
||||
/** Task is sleeping, `task::sleep` specifies for how many ticks. */
|
||||
TASK_SLEEP,
|
||||
/** Task is waiting for I/O to flush buffers. */
|
||||
TASK_IOWAIT,
|
||||
/** Task is waiting for a mutex to be unlocked. */
|
||||
TASK_LOCKWAIT,
|
||||
/** Task is waiting for child to */
|
||||
TASK_WAITPID,
|
||||
};
|
||||
|
||||
/** @brief Core structure holding information about a task. */
|
||||
struct task {
|
||||
struct tcb tcb;
|
||||
|
||||
struct kent kent;
|
||||
/**
|
||||
* @brief Points to the bottom of the stack.
|
||||
* In a full-descending stack, this is one word after the highest stack address.
|
||||
*/
|
||||
void *bottom;
|
||||
/** @brief Lowest address in the stack, as returned by malloc. */
|
||||
void *stack;
|
||||
/** @brief If state is `TASK_SLEEP`, the total amount of ticks to sleep */
|
||||
unsigned long int sleep;
|
||||
/** @brief Last execution in ticks */
|
||||
unsigned long int last_tick;
|
||||
|
||||
/*
|
||||
* if a child process exited before its parent called waitpid(),
|
||||
* this is where the children are stored temporarily
|
||||
*/
|
||||
struct list_head pending_sigchld;
|
||||
struct mutex pending_sigchld_lock;
|
||||
|
||||
enum task_state state;
|
||||
pid_t pid;
|
||||
};
|
||||
|
||||
static __always_inline void task_get(struct task *task)
|
||||
{
|
||||
kent_get(&task->kent);
|
||||
}
|
||||
|
||||
static __always_inline void task_put(struct task *task)
|
||||
{
|
||||
kent_put(&task->kent);
|
||||
}
|
||||
|
||||
static inline struct task *task_parent(struct task *task)
|
||||
{
|
||||
if (task->pid == 0)
|
||||
return NULL;
|
||||
else
|
||||
return container_of(task->kent.parent, struct task, kent);
|
||||
}
|
||||
|
||||
struct task_kevent {
|
||||
struct kevent kevent;
|
||||
struct task *task;
|
||||
int status;
|
||||
};
|
||||
|
||||
void task_kevent_create_and_dispatch(struct task *task, int status);
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
*
|
||||
* Ardix is non-violent software: you may only use, redistribute,
|
||||
* and/or modify it under the terms of the CNPLv6+ as found in
|
||||
* the LICENSE file in the source code root directory or at
|
||||
* <https://git.pixie.town/thufie/CNPL>.
|
||||
*
|
||||
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
|
||||
* permitted by applicable law. See the CNPLv6+ for details.
|
||||
*/
|
||||
|
|
@ -16,9 +16,9 @@
|
|||
/** Process identifier. */
|
||||
typedef _PID_TYPE_ pid_t;
|
||||
|
||||
/** Simple atomic reference counter */
|
||||
/** Simple atomic integer */
|
||||
typedef struct {
|
||||
int count;
|
||||
volatile int _val;
|
||||
} atom_t;
|
||||
|
||||
#include <arch/hardware.h>
|
||||
|
|
|
|||
|
|
@ -1,17 +1,14 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#include <ardix/atom.h>
|
||||
#include <stddef.h>
|
||||
#pragma once
|
||||
|
||||
void atom_init(atom_t *atom)
|
||||
{
|
||||
atom->count = 0;
|
||||
}
|
||||
#include <arch/debug.h>
|
||||
|
||||
int atom_count(atom_t *atom)
|
||||
{
|
||||
return atom->count;
|
||||
}
|
||||
#ifdef NDEBUG
|
||||
# define assert(expr)
|
||||
#else
|
||||
# define assert(expr) if (!(expr)) { __breakpoint; }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
58
include/stdlib.h
Normal file
58
include/stdlib.h
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ardix/types.h>
|
||||
#include <toolchain.h>
|
||||
|
||||
/**
|
||||
* @defgroup malloc Memory Management
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Allocate `size` bytes of memory *w/out initializing it*.
|
||||
*
|
||||
* This method may block if an allocation is already taking place.
|
||||
* Use `atomickmalloc()` if you are in kernel space and in atomic context.
|
||||
*
|
||||
* @param size The amount of bytes to allocate.
|
||||
* @return A pointer to the beginning of the memory area, or `NULL` if
|
||||
* `size` was 0 or there is not enough free memory left.
|
||||
*/
|
||||
__shared __malloc(free, 1) void *malloc(size_t size);
|
||||
|
||||
/**
|
||||
* @brief Allocate an array and initialize the memory to zeroes.
|
||||
* The allocated size will be at least `nmemb * size`.
|
||||
* If the multiplication would overflow, the allocation fails.
|
||||
*
|
||||
* @param nmemb The amount of members.
|
||||
* @param size The size of an individual member.
|
||||
* @return A pointer to the zeroed-out memory, or `NULL` if OOM.
|
||||
*/
|
||||
__malloc(free, 1) void *calloc(size_t nmemb, size_t size);
|
||||
|
||||
/**
|
||||
* @brief Free a previously allocated memory region.
|
||||
* Passing `NULL` has no effect.
|
||||
*
|
||||
* @param ptr The pointer, as returned by `malloc`/`calloc`.
|
||||
*/
|
||||
__shared void free(void *ptr);
|
||||
|
||||
/** @} */
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
*
|
||||
* Ardix is non-violent software: you may only use, redistribute,
|
||||
* and/or modify it under the terms of the CNPLv6+ as found in
|
||||
* the LICENSE file in the source code root directory or at
|
||||
* <https://git.pixie.town/thufie/CNPL>.
|
||||
*
|
||||
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
|
||||
* permitted by applicable law. See the CNPLv6+ for details.
|
||||
*/
|
||||
21
include/sys/wait.h
Normal file
21
include/sys/wait.h
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <toolchain.h>
|
||||
|
||||
__shared pid_t waitpid(pid_t pid, int *stat_loc, int options);
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
*
|
||||
* Ardix is non-violent software: you may only use, redistribute,
|
||||
* and/or modify it under the terms of the CNPLv6+ as found in
|
||||
* the LICENSE file in the source code root directory or at
|
||||
* <https://git.pixie.town/thufie/CNPL>.
|
||||
*
|
||||
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
|
||||
* permitted by applicable law. See the CNPLv6+ for details.
|
||||
*/
|
||||
|
|
@ -3,10 +3,21 @@
|
|||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <toolchain.h>
|
||||
|
||||
__shared ssize_t read(int fildes, void *buf, size_t nbyte);
|
||||
__shared ssize_t write(int fildes, const void *buf, size_t nbyte);
|
||||
__shared ssize_t sleep(unsigned long int millis);
|
||||
/**
|
||||
* @brief Create a new thread.
|
||||
*
|
||||
* Embedded systems typically don't have a MMU and thus no virtual memory,
|
||||
* meaning it is impossible to implement a proper fork. So, the `fork()` and
|
||||
* `execve()` system calls have to be combined into one.
|
||||
*/
|
||||
__shared pid_t exec(int (*entry)(void));
|
||||
__shared __noreturn void exit(int status);
|
||||
__shared pid_t waitpid(pid_t pid, int *stat_loc, int options);
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
|
|
|
|||
52
init/main.c
Normal file
52
init/main.c
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#include <ardix/io.h>
|
||||
#include <ardix/kent.h>
|
||||
#include <ardix/kevent.h>
|
||||
#include <ardix/sched.h>
|
||||
|
||||
#include <config.h>
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
int child_test(void)
|
||||
{
|
||||
printf("[child ] i'm so sleempy,, calling sleep(),,\n");
|
||||
sleep(1000);
|
||||
printf("[child ] sleep() returned, i'm gonna kill myself now uwu\n");
|
||||
return 69;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief init daemon entry point.
|
||||
*/
|
||||
int init_main(void)
|
||||
{
|
||||
printf("[parent] calling exec()\n");
|
||||
pid_t pid = exec(child_test);
|
||||
printf("[parent] exec() returned, child pid = %d\n", pid);
|
||||
|
||||
int status;
|
||||
printf("[parent] calling waitpid()\n");
|
||||
waitpid(-1, &status, 0);
|
||||
printf("[parent] waitpid() returned, child exit code = %d\n", status);
|
||||
printf("[parent] my child has died, goodbye cruel world qwq\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
*
|
||||
* Ardix is non-violent software: you may only use, redistribute,
|
||||
* and/or modify it under the terms of the CNPLv6+ as found in
|
||||
* the LICENSE file in the source code root directory or at
|
||||
* <https://git.pixie.town/thufie/CNPL>.
|
||||
*
|
||||
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
|
||||
* permitted by applicable law. See the CNPLv6+ for details.
|
||||
*/
|
||||
|
|
@ -15,11 +15,13 @@ target_sources(ardix_kernel PRIVATE
|
|||
kent.c
|
||||
kevent.c
|
||||
main.c
|
||||
mm.c
|
||||
mutex.c
|
||||
ringbuf.c
|
||||
sched.c
|
||||
serial.c
|
||||
syscall.c
|
||||
task.c
|
||||
userspace.c
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ struct kent *devices_kent = NULL;
|
|||
static void devices_destroy(struct kent *kent)
|
||||
{
|
||||
/* should never be executed because the root devices kent is immortal */
|
||||
free(kent);
|
||||
kfree(kent);
|
||||
}
|
||||
|
||||
/** Initialize the devices subsystem. */
|
||||
|
|
@ -23,7 +23,7 @@ int devices_init(void)
|
|||
if (devices_kent != NULL)
|
||||
return -EEXIST;
|
||||
|
||||
devices_kent = malloc(sizeof(*devices_kent));
|
||||
devices_kent = kmalloc(sizeof(*devices_kent));
|
||||
if (devices_kent == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
@ -36,7 +36,7 @@ int devices_init(void)
|
|||
static void device_destroy(struct kent *kent)
|
||||
{
|
||||
struct device *dev = kent_to_device(kent);
|
||||
free(dev);
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
int device_init(struct device *dev)
|
||||
|
|
@ -54,12 +54,12 @@ static void device_kevent_destroy(struct kent *kent)
|
|||
{
|
||||
struct kevent *event = container_of(kent, struct kevent, kent);
|
||||
struct device_kevent *device_kevent = container_of(event, struct device_kevent, kevent);
|
||||
free(device_kevent);
|
||||
kfree(device_kevent);
|
||||
}
|
||||
|
||||
struct device_kevent *device_kevent_create(struct device *device, enum device_kevent_flags flags)
|
||||
{
|
||||
struct device_kevent *event = atomic_malloc(sizeof(*event));
|
||||
struct device_kevent *event = atomic_kmalloc(sizeof(*event));
|
||||
if (event == NULL)
|
||||
return NULL;
|
||||
|
||||
|
|
@ -70,7 +70,7 @@ struct device_kevent *device_kevent_create(struct device *device, enum device_ke
|
|||
event->kevent.kent.destroy = device_kevent_destroy;
|
||||
int err = kent_init(&event->kevent.kent);
|
||||
if (err) {
|
||||
free(event);
|
||||
kfree(event);
|
||||
event = NULL;
|
||||
}
|
||||
|
||||
|
|
|
|||
16
kernel/dma.c
16
kernel/dma.c
|
|
@ -12,7 +12,7 @@
|
|||
static void dmabuf_destroy(struct kent *kent)
|
||||
{
|
||||
struct dmabuf *buf = kent_to_dmabuf(kent);
|
||||
free(buf);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
struct dmabuf *dmabuf_create(struct device *dev, size_t len)
|
||||
|
|
@ -22,7 +22,7 @@ struct dmabuf *dmabuf_create(struct device *dev, size_t len)
|
|||
* allocation needs to be atomic because the buffer might be
|
||||
* free()d from within an irq handler which cannot sleep
|
||||
*/
|
||||
struct dmabuf *buf = atomic_malloc(sizeof(*buf) + len);
|
||||
struct dmabuf *buf = atomic_kmalloc(sizeof(*buf) + len);
|
||||
if (buf == NULL)
|
||||
return NULL;
|
||||
|
||||
|
|
@ -31,7 +31,7 @@ struct dmabuf *dmabuf_create(struct device *dev, size_t len)
|
|||
|
||||
err = kent_init(&buf->kent);
|
||||
if (err) {
|
||||
free(buf);
|
||||
kfree(buf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -40,16 +40,6 @@ struct dmabuf *dmabuf_create(struct device *dev, size_t len)
|
|||
return buf;
|
||||
}
|
||||
|
||||
void dmabuf_get(struct dmabuf *buf)
|
||||
{
|
||||
kent_get(&buf->kent);
|
||||
}
|
||||
|
||||
void dmabuf_put(struct dmabuf *buf)
|
||||
{
|
||||
kent_put(&buf->kent);
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ static void file_destroy(struct kent *kent)
|
|||
fdtab[file->fd] = NULL;
|
||||
mutex_unlock(&fdtab_lock);
|
||||
|
||||
free(file);
|
||||
kfree(file);
|
||||
}
|
||||
|
||||
struct file *file_create(struct device *device, enum file_type type, int *err)
|
||||
|
|
@ -41,7 +41,7 @@ struct file *file_create(struct device *device, enum file_type type, int *err)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
f = malloc(sizeof(*f));
|
||||
f = kmalloc(sizeof(*f));
|
||||
if (f == NULL) {
|
||||
*err = -ENOMEM;
|
||||
mutex_unlock(&fdtab_lock);
|
||||
|
|
@ -102,7 +102,7 @@ static int io_device_kevent_listener(struct kevent *event, void *_extra)
|
|||
return KEVENT_CB_NONE;
|
||||
|
||||
extra->task->state = TASK_QUEUE;
|
||||
free(extra);
|
||||
kfree(extra);
|
||||
file_put(extra->file);
|
||||
kent_put(&extra->task->kent);
|
||||
return KEVENT_CB_LISTENER_DEL | KEVENT_CB_STOP;
|
||||
|
|
@ -114,7 +114,7 @@ static int iowait_device(struct file *file, enum device_kevent_flags flags)
|
|||
kent_get(¤t->kent);
|
||||
|
||||
/* this must be atomic because event listeners can't sleep but need to call free() */
|
||||
struct io_device_kevent_extra *extra = atomic_malloc(sizeof(*extra));
|
||||
struct io_device_kevent_extra *extra = atomic_kmalloc(sizeof(*extra));
|
||||
if (extra == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
@ -201,12 +201,12 @@ static void file_kevent_destroy(struct kent *kent)
|
|||
{
|
||||
struct kevent *kevent = container_of(kent, struct kevent, kent);
|
||||
struct file_kevent *file_kevent = container_of(kevent, struct file_kevent, kevent);
|
||||
free(file_kevent);
|
||||
kfree(file_kevent);
|
||||
}
|
||||
|
||||
struct file_kevent *file_kevent_create(struct file *f, enum file_kevent_flags flags)
|
||||
{
|
||||
struct file_kevent *event = atomic_malloc(sizeof(*event));
|
||||
struct file_kevent *event = atomic_kmalloc(sizeof(*event));
|
||||
if (event == NULL)
|
||||
return NULL;
|
||||
|
||||
|
|
@ -217,7 +217,7 @@ struct file_kevent *file_kevent_create(struct file *f, enum file_kevent_flags fl
|
|||
event->kevent.kent.destroy = file_kevent_destroy;
|
||||
int err = kent_init(&event->kevent.kent);
|
||||
if (err != 0) {
|
||||
free(event);
|
||||
kfree(event);
|
||||
event = NULL;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ long sys_read(int fd, __user void *buf, size_t len)
|
|||
if (f == NULL)
|
||||
return -EBADF;
|
||||
|
||||
copy = malloc(len);
|
||||
copy = kmalloc(len);
|
||||
if (copy == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
@ -26,7 +26,7 @@ long sys_read(int fd, __user void *buf, size_t len)
|
|||
if (ret >= 0)
|
||||
ret = copy_to_user(buf, copy, ret);
|
||||
|
||||
free(copy);
|
||||
kfree(copy);
|
||||
file_put(f);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ long sys_write(int fd, __user const void *buf, size_t len)
|
|||
if (f == NULL)
|
||||
return -EBADF;
|
||||
|
||||
copy = malloc(len);
|
||||
copy = kmalloc(len);
|
||||
if (copy == NULL) {
|
||||
file_put(f);
|
||||
return -ENOMEM;
|
||||
|
|
@ -27,7 +27,7 @@ long sys_write(int fd, __user const void *buf, size_t len)
|
|||
len = copy_from_user(copy, buf, len);
|
||||
ret = file_write(f, copy, len);
|
||||
|
||||
free(copy);
|
||||
kfree(copy);
|
||||
file_put(f);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#include <ardix/atom.h>
|
||||
#include <ardix/malloc.h>
|
||||
#include <ardix/kent.h>
|
||||
#include <ardix/list.h>
|
||||
|
||||
#include <errno.h>
|
||||
#include <stddef.h>
|
||||
|
|
@ -19,8 +17,7 @@ int kent_root_init(void)
|
|||
|
||||
kent_root->parent = NULL;
|
||||
kent_root->destroy = NULL;
|
||||
atom_init(&kent_root->refcount);
|
||||
kent_get(kent_root);
|
||||
atom_init(&kent_root->refcount, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -31,27 +28,20 @@ int kent_init(struct kent *kent)
|
|||
return -EFAULT;
|
||||
kent_get(kent->parent);
|
||||
|
||||
atom_init(&kent->refcount);
|
||||
kent_get(kent);
|
||||
atom_init(&kent->refcount, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kent_get(struct kent *kent)
|
||||
{
|
||||
atom_get(&kent->refcount);
|
||||
}
|
||||
|
||||
void kent_put(struct kent *kent)
|
||||
{
|
||||
struct kent *parent = kent->parent;
|
||||
|
||||
if (atom_put(&kent->refcount) == 0) {
|
||||
do {
|
||||
struct kent *parent = kent->parent;
|
||||
if (atom_put(&kent->refcount) != 1)
|
||||
break;
|
||||
kent->destroy(kent);
|
||||
|
||||
if (parent != NULL)
|
||||
kent_put(parent);
|
||||
}
|
||||
kent = parent;
|
||||
} while (kent != NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ void kevents_init(void)
|
|||
}
|
||||
|
||||
/* called from scheduler context only */
|
||||
static inline void process_single_queue(struct kevent_queue *queue, struct list_head *listeners)
|
||||
static void process_single_queue(struct kevent_queue *queue, struct list_head *listeners)
|
||||
{
|
||||
struct kevent *event, *tmp_event;
|
||||
|
||||
|
|
@ -66,7 +66,7 @@ static inline void process_single_queue(struct kevent_queue *queue, struct list_
|
|||
|
||||
if (cb_ret & KEVENT_CB_LISTENER_DEL) {
|
||||
list_delete(&listener->link);
|
||||
free(listener);
|
||||
kfree(listener);
|
||||
}
|
||||
|
||||
if (cb_ret & KEVENT_CB_STOP)
|
||||
|
|
@ -102,7 +102,6 @@ void kevents_process(void)
|
|||
process_single_queue(&kev_queues[i], &kev_listeners[i]);
|
||||
}
|
||||
|
||||
/* called from irq context only */
|
||||
void kevent_dispatch(struct kevent *event)
|
||||
{
|
||||
struct kevent_queue *queue = &kev_queues[event->kind];
|
||||
|
|
@ -138,7 +137,7 @@ struct kevent_listener *kevent_listener_add(enum kevent_kind kind,
|
|||
int (*cb)(struct kevent *, void *),
|
||||
void *extra)
|
||||
{
|
||||
struct kevent_listener *listener = malloc(sizeof(*listener));
|
||||
struct kevent_listener *listener = kmalloc(sizeof(*listener));
|
||||
|
||||
if (listener != NULL) {
|
||||
listener->cb = cb;
|
||||
|
|
@ -158,7 +157,7 @@ void kevent_listener_del(struct kevent_listener *listener)
|
|||
list_delete(&listener->link);
|
||||
mutex_unlock(&kev_listeners_lock);
|
||||
|
||||
free(listener);
|
||||
kfree(listener);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@
|
|||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
extern int init_main(void); /* init/main.c */
|
||||
|
||||
/**
|
||||
* Core init routine.
|
||||
*
|
||||
|
|
@ -44,9 +46,10 @@ int main(void)
|
|||
printf("This is non-violent software, and there is NO WARRANTY.\n");
|
||||
printf("See <https://git.fef.moe/fef/ardix> for details.\n\n");
|
||||
|
||||
/* TODO: The next big step is to write initd and fork to it here. */
|
||||
while (1)
|
||||
sleep(1000);
|
||||
pid_t pid = exec(init_main);
|
||||
waitpid(pid, &err, 0);
|
||||
printf("initd exited with status %d, system halted\n", err);
|
||||
while (1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@
|
|||
* header containing its size w/out overhead; free blocks additionally have a
|
||||
* `struct list_head` after that in order to keep track of where the free blocks
|
||||
* are. This list is ordered by size ascendingly, so we can directly take the
|
||||
* first sufficiently sized block when iterating over the list in `malloc()`.
|
||||
* first sufficiently sized block when iterating over the list in kmalloc()`.
|
||||
*
|
||||
* Additionally, the effective block size is copied to the very end of the block
|
||||
* (directly after the last usable address) in order to be able to find a
|
||||
|
|
@ -106,7 +106,7 @@ struct memblk {
|
|||
/** @brief If the block is allocated, this will be overwritten */
|
||||
struct list_head list;
|
||||
|
||||
/** @brief Used as the return value for `malloc()` */
|
||||
/** @brief Used as the return value for kmalloc()` */
|
||||
uint8_t data[0];
|
||||
/** @brief Used to get the copy of the size field at the end of the block */
|
||||
size_t endsz[0];
|
||||
|
|
@ -164,7 +164,18 @@ static struct memblk *blk_try_merge(struct list_head *heap, struct memblk *blk);
|
|||
/** @brief Cut a slice from a free block and return the slice. */
|
||||
static struct memblk *blk_slice(struct list_head *heap, struct memblk *bottom, size_t bottom_size);
|
||||
|
||||
void malloc_init(void *heap, size_t size)
|
||||
long sys_malloc(size_t size)
|
||||
{
|
||||
void *ptr = kmalloc(size);
|
||||
return (long)ptr;
|
||||
}
|
||||
|
||||
void sys_free(void *ptr)
|
||||
{
|
||||
kfree(ptr);
|
||||
}
|
||||
|
||||
void kmalloc_init(void *heap, size_t size)
|
||||
{
|
||||
memset(heap, 0, size);
|
||||
|
||||
|
|
@ -191,7 +202,7 @@ void malloc_init(void *heap, size_t size)
|
|||
atomic_heap_free = blk_get_size(atomic_block);
|
||||
}
|
||||
|
||||
void *malloc(size_t size)
|
||||
void *kmalloc(size_t size)
|
||||
{
|
||||
if (size == 0)
|
||||
return NULL; /* as per POSIX */
|
||||
|
|
@ -220,6 +231,9 @@ void *malloc(size_t size)
|
|||
cursor = blk_slice(&generic_heap, cursor, size);
|
||||
generic_heap_free -= blk_get_size(cursor);
|
||||
ptr = cursor->data;
|
||||
# ifdef DEBUG
|
||||
memset(cursor->data, 0xaa, blk_get_size(cursor));
|
||||
# endif
|
||||
}
|
||||
|
||||
mutex_unlock(&generic_heap_lock);
|
||||
|
|
@ -227,7 +241,7 @@ void *malloc(size_t size)
|
|||
return ptr;
|
||||
}
|
||||
|
||||
void *atomic_malloc(size_t size)
|
||||
void *atomic_kmalloc(size_t size)
|
||||
{
|
||||
if (size == 0)
|
||||
return NULL;
|
||||
|
|
@ -249,28 +263,15 @@ void *atomic_malloc(size_t size)
|
|||
cursor = blk_slice(&atomic_heap, cursor, size);
|
||||
atomic_heap_free -= blk_get_size(cursor);
|
||||
ptr = cursor->data;
|
||||
# ifdef DEBUG
|
||||
memset(cursor->data, 0xaa, blk_get_size(cursor));
|
||||
# endif
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void *calloc(size_t nmemb, size_t size)
|
||||
{
|
||||
size_t total = nmemb * size;
|
||||
|
||||
/* check for overflow as mandated by POSIX */
|
||||
if (size != 0 && total / size != nmemb)
|
||||
return NULL;
|
||||
|
||||
void *ptr = malloc(total);
|
||||
|
||||
if (ptr != NULL)
|
||||
memset(ptr, 0, total);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void free(void *ptr)
|
||||
void kfree(void *ptr)
|
||||
{
|
||||
if (ptr == NULL)
|
||||
return; /* as per POSIX.1-2008 */
|
||||
|
|
@ -284,17 +285,27 @@ void free(void *ptr)
|
|||
mutex_lock(&generic_heap_lock);
|
||||
generic_heap_free += blk_get_size(blk);
|
||||
blk_clear_alloc(blk);
|
||||
blk_try_merge(&generic_heap, blk);
|
||||
blk = blk_try_merge(&generic_heap, blk);
|
||||
|
||||
# ifdef DEBUG
|
||||
memset(&blk->data[MIN_SIZE], 0xaa, blk_get_size(blk) - MIN_SIZE);
|
||||
# endif
|
||||
|
||||
mutex_unlock(&generic_heap_lock);
|
||||
} else if (ptr >= atomic_heap_start && ptr <= atomic_heap_end) {
|
||||
if (!blk_is_alloc(blk))
|
||||
__breakpoint;
|
||||
|
||||
atomic_enter();
|
||||
word_t context = atomic_enter();
|
||||
atomic_heap_free += blk_get_size(blk);
|
||||
blk_clear_alloc(blk);
|
||||
blk_try_merge(&atomic_heap, blk);
|
||||
atomic_leave();
|
||||
blk = blk_try_merge(&atomic_heap, blk);
|
||||
|
||||
# ifdef DEBUG
|
||||
memset(&blk->data[MIN_SIZE], 0xaa, blk_get_size(blk) - MIN_SIZE);
|
||||
# endif
|
||||
|
||||
atomic_restore(context);
|
||||
} else {
|
||||
__breakpoint;
|
||||
}
|
||||
|
|
@ -337,8 +348,8 @@ static struct memblk *blk_try_merge(struct list_head *heap, struct memblk *blk)
|
|||
}
|
||||
|
||||
static struct memblk *blk_merge(struct list_head *heap,
|
||||
struct memblk *bottom,
|
||||
struct memblk *top)
|
||||
struct memblk *bottom,
|
||||
struct memblk *top)
|
||||
{
|
||||
size_t bottom_size = blk_get_size(bottom);
|
||||
size_t top_size = blk_get_size(top);
|
||||
|
|
@ -357,7 +368,8 @@ static struct memblk *blk_merge(struct list_head *heap,
|
|||
return bottom;
|
||||
}
|
||||
|
||||
static struct memblk *blk_slice(struct list_head *heap, struct memblk *blk, size_t slice_size)
|
||||
static struct memblk *blk_slice(struct list_head *heap, struct memblk *blk,
|
||||
size_t slice_size)
|
||||
{
|
||||
list_delete(&blk->list);
|
||||
|
||||
|
|
@ -401,7 +413,7 @@ static struct memblk *blk_slice(struct list_head *heap, struct memblk *blk, size
|
|||
return blk;
|
||||
}
|
||||
|
||||
static inline size_t round_alloc_size_up(size_t size)
|
||||
static size_t round_alloc_size_up(size_t size)
|
||||
{
|
||||
size_t rounded = (size / MIN_SIZE) * MIN_SIZE;
|
||||
if (rounded < size)
|
||||
|
|
@ -425,7 +437,7 @@ static void blk_set_size(struct memblk *blk, size_t size)
|
|||
blk->endsz[words] |= size;
|
||||
}
|
||||
|
||||
static inline void blk_set_alloc(struct memblk *blk)
|
||||
static void blk_set_alloc(struct memblk *blk)
|
||||
{
|
||||
size_t words = blk->size / sizeof(blk->size);
|
||||
|
||||
|
|
@ -433,7 +445,7 @@ static inline void blk_set_alloc(struct memblk *blk)
|
|||
blk->endsz[words] |= ALLOC_FLAG;
|
||||
}
|
||||
|
||||
static inline void blk_clear_alloc(struct memblk *blk)
|
||||
static void blk_clear_alloc(struct memblk *blk)
|
||||
{
|
||||
size_t words = blk->size / sizeof(blk->size);
|
||||
|
||||
|
|
@ -461,32 +473,38 @@ static inline int blk_is_border_start(struct memblk *blk)
|
|||
return blk->size & BORDER_FLAG;
|
||||
}
|
||||
|
||||
static inline void blk_set_border_end(struct memblk *blk)
|
||||
static void blk_set_border_end(struct memblk *blk)
|
||||
{
|
||||
size_t words = blk->size / sizeof(blk->size);
|
||||
blk->endsz[words] |= BORDER_FLAG;
|
||||
}
|
||||
|
||||
static inline void blk_clear_border_end(struct memblk *blk)
|
||||
static void blk_clear_border_end(struct memblk *blk)
|
||||
{
|
||||
size_t words = blk->size / sizeof(blk->size);
|
||||
blk->endsz[words] &= ~BORDER_FLAG;
|
||||
}
|
||||
|
||||
static inline int blk_is_border_end(struct memblk *blk)
|
||||
static int blk_is_border_end(struct memblk *blk)
|
||||
{
|
||||
size_t words = blk->size / sizeof(blk->size);
|
||||
return blk->endsz[words] & BORDER_FLAG;
|
||||
}
|
||||
|
||||
static inline struct memblk *blk_prev(struct memblk *blk)
|
||||
static struct memblk *blk_prev(struct memblk *blk)
|
||||
{
|
||||
if (blk_is_border_start(blk))
|
||||
return NULL;
|
||||
|
||||
/* gcc does not like accessing index -1 of zero-length arrays */
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Warray-bounds"
|
||||
#pragma GCC diagnostic ignored "-Wzero-length-bounds"
|
||||
return (void *)blk - (blk->prevsz[-1] & SIZE_MSK) - OVERHEAD;
|
||||
#pragma GCC diagnostic pop
|
||||
}
|
||||
|
||||
static inline struct memblk *blk_next(struct memblk *blk)
|
||||
static struct memblk *blk_next(struct memblk *blk)
|
||||
{
|
||||
if (blk_is_border_end(blk))
|
||||
return NULL;
|
||||
|
|
@ -20,16 +20,15 @@ void mutex_init(struct mutex *mutex)
|
|||
void mutex_lock(struct mutex *mutex)
|
||||
{
|
||||
if (mutex_trylock(mutex) != 0) {
|
||||
struct mutex_wait *entry = malloc(sizeof(*entry));
|
||||
if (entry == NULL) {
|
||||
_spin_lock(&mutex->lock); /* fall back to spinning */
|
||||
return;
|
||||
}
|
||||
struct mutex_wait entry = {
|
||||
.task = current,
|
||||
};
|
||||
|
||||
word_t context = atomic_enter();
|
||||
spin_lock(&mutex->wait_queue_lock);
|
||||
entry->task = current;
|
||||
list_insert(&mutex->wait_queue, &entry->link);
|
||||
list_insert(&mutex->wait_queue, &entry.link);
|
||||
spin_unlock(&mutex->wait_queue_lock);
|
||||
atomic_restore(context);
|
||||
|
||||
yield(TASK_LOCKWAIT);
|
||||
}
|
||||
|
|
@ -39,18 +38,17 @@ void mutex_unlock(struct mutex *mutex)
|
|||
{
|
||||
struct mutex_wait *waiter = NULL;
|
||||
|
||||
word_t context = atomic_enter();
|
||||
spin_lock(&mutex->wait_queue_lock);
|
||||
if (!list_is_empty(&mutex->wait_queue)) {
|
||||
waiter = list_first_entry(&mutex->wait_queue, struct mutex_wait, link);
|
||||
list_delete(&waiter->link);
|
||||
}
|
||||
spin_unlock(&mutex->wait_queue_lock);
|
||||
atomic_restore(context);
|
||||
|
||||
if (waiter != NULL) {
|
||||
struct task *task = waiter->task;
|
||||
free(waiter);
|
||||
current->state = TASK_QUEUE;
|
||||
do_switch(current, task);
|
||||
waiter->task->state = TASK_QUEUE;
|
||||
} else {
|
||||
_mutex_unlock(&mutex->lock);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
struct ringbuf *ringbuf_create(size_t size)
|
||||
{
|
||||
struct ringbuf *buf = malloc(sizeof(*buf) + size);
|
||||
struct ringbuf *buf = kmalloc(sizeof(*buf) + size);
|
||||
if (buf == NULL)
|
||||
return NULL;
|
||||
|
||||
|
|
@ -24,7 +24,7 @@ struct ringbuf *ringbuf_create(size_t size)
|
|||
|
||||
inline void ringbuf_destroy(struct ringbuf *buf)
|
||||
{
|
||||
free(buf);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
size_t ringbuf_read(void *dest, struct ringbuf *buf, size_t len)
|
||||
|
|
|
|||
106
kernel/sched.c
106
kernel/sched.c
|
|
@ -38,6 +38,7 @@
|
|||
#include <ardix/kevent.h>
|
||||
#include <ardix/malloc.h>
|
||||
#include <ardix/sched.h>
|
||||
#include <ardix/task.h>
|
||||
#include <ardix/types.h>
|
||||
|
||||
#include <errno.h>
|
||||
|
|
@ -48,6 +49,7 @@ extern uint32_t _sstack;
|
|||
extern uint32_t _estack;
|
||||
|
||||
static struct task *tasks[CONFIG_SCHED_MAXTASK];
|
||||
static MUTEX(tasks_lock);
|
||||
struct task *volatile current;
|
||||
|
||||
static struct task kernel_task;
|
||||
|
|
@ -56,8 +58,13 @@ static struct task idle_task;
|
|||
static void task_destroy(struct kent *kent)
|
||||
{
|
||||
struct task *task = container_of(kent, struct task, kent);
|
||||
|
||||
mutex_lock(&tasks_lock);
|
||||
tasks[task->pid] = NULL;
|
||||
free(task);
|
||||
mutex_unlock(&tasks_lock);
|
||||
|
||||
kfree(task->stack);
|
||||
kfree(task);
|
||||
}
|
||||
|
||||
int sched_init(void)
|
||||
|
|
@ -72,8 +79,16 @@ int sched_init(void)
|
|||
|
||||
memset(&kernel_task.tcb, 0, sizeof(kernel_task.tcb));
|
||||
kernel_task.bottom = &_estack;
|
||||
/* gcc thinks &_estack is an array of size 1 */
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Warray-bounds"
|
||||
kernel_task.stack = kernel_task.bottom - CONFIG_STACK_SIZE;
|
||||
#pragma GCC diagnostic pop
|
||||
kernel_task.pid = 0;
|
||||
kernel_task.state = TASK_READY;
|
||||
kernel_task.state = TASK_RUNNING;
|
||||
|
||||
list_init(&kernel_task.pending_sigchld);
|
||||
mutex_init(&kernel_task.pending_sigchld_lock);
|
||||
|
||||
tasks[0] = &kernel_task;
|
||||
current = &kernel_task;
|
||||
|
|
@ -85,11 +100,17 @@ int sched_init(void)
|
|||
if (err != 0)
|
||||
goto out;
|
||||
|
||||
err = arch_sched_init(CONFIG_SCHED_FREQ);
|
||||
if (err != 0)
|
||||
idle_task.stack = kmalloc(CONFIG_STACK_SIZE);
|
||||
if (idle_task.stack == NULL)
|
||||
goto out;
|
||||
idle_task.bottom = idle_task.stack + CONFIG_STACK_SIZE;
|
||||
idle_task.pid = -1;
|
||||
idle_task.state = TASK_QUEUE;
|
||||
list_init(&idle_task.pending_sigchld);
|
||||
mutex_init(&idle_task.pending_sigchld_lock);
|
||||
task_init(&idle_task, _idle);
|
||||
|
||||
err = arch_idle_task_init(&idle_task);
|
||||
err = arch_sched_init(CONFIG_SCHED_FREQ);
|
||||
if (err != 0)
|
||||
goto out;
|
||||
|
||||
|
|
@ -104,20 +125,25 @@ out:
|
|||
/**
|
||||
* @brief Determine whether the specified task is a candidate for execution.
|
||||
*
|
||||
* This function is only called once from `schedule()` and performance critical,
|
||||
* hence the `__always_inline` attribute.
|
||||
*
|
||||
* @param task The task
|
||||
* @returns whether `task` could be run next
|
||||
*/
|
||||
static inline bool can_run(const struct task *task)
|
||||
__always_inline
|
||||
static bool can_run(const struct task *task)
|
||||
{
|
||||
switch (task->state) {
|
||||
case TASK_SLEEP:
|
||||
return tick - task->last_tick >= task->sleep;
|
||||
case TASK_QUEUE:
|
||||
case TASK_READY:
|
||||
case TASK_RUNNING:
|
||||
return true;
|
||||
case TASK_DEAD:
|
||||
case TASK_IOWAIT:
|
||||
case TASK_LOCKWAIT:
|
||||
case TASK_WAITPID:
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -126,7 +152,7 @@ static inline bool can_run(const struct task *task)
|
|||
|
||||
void schedule(void)
|
||||
{
|
||||
atomic_enter();
|
||||
word_t context = atomic_enter();
|
||||
|
||||
struct task *old = current;
|
||||
pid_t nextpid = old->pid;
|
||||
|
|
@ -135,7 +161,7 @@ void schedule(void)
|
|||
|
||||
kevents_process();
|
||||
|
||||
if (old->state == TASK_READY)
|
||||
if (old->state == TASK_RUNNING)
|
||||
old->state = TASK_QUEUE;
|
||||
|
||||
for (unsigned int i = 0; i < ARRAY_SIZE(tasks); i++) {
|
||||
|
|
@ -156,11 +182,11 @@ void schedule(void)
|
|||
if (new == NULL)
|
||||
new = &idle_task;
|
||||
|
||||
new->state = TASK_READY;
|
||||
new->state = TASK_RUNNING;
|
||||
new->last_tick = tick;
|
||||
current = new;
|
||||
|
||||
atomic_leave();
|
||||
atomic_restore(context);
|
||||
|
||||
if (old != new)
|
||||
do_switch(old, new);
|
||||
|
|
@ -177,9 +203,67 @@ long sys_sleep(unsigned long int millis)
|
|||
current->sleep = ms_to_ticks(millis);
|
||||
yield(TASK_SLEEP);
|
||||
/* TODO: return actual milliseconds */
|
||||
/*
|
||||
* TODO: actually, use fucking hardware timers which were specifically
|
||||
* invented for this exact kind of feature because (1) the tick
|
||||
* resolution is often less than 1 ms and (2) ticks aren't really
|
||||
* supposed to be guaranteed to happen at regular intervals and
|
||||
* (3) the scheduler doesn't even check whether there is a task
|
||||
* whose sleep period just expired
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
long sys_exec(int (*entry)(void))
|
||||
{
|
||||
pid_t pid;
|
||||
struct task *child = NULL;
|
||||
|
||||
mutex_lock(&tasks_lock);
|
||||
|
||||
for (pid = 1; pid < CONFIG_SCHED_MAXTASK; pid++) {
|
||||
if (tasks[pid] == NULL)
|
||||
break;
|
||||
}
|
||||
if (pid == CONFIG_SCHED_MAXTASK) {
|
||||
pid = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
child = kmalloc(sizeof(*child));
|
||||
if (child == NULL) {
|
||||
pid = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
child->pid = pid;
|
||||
child->stack = kmalloc(CONFIG_STACK_SIZE);
|
||||
if (child->stack == NULL) {
|
||||
pid = -ENOMEM;
|
||||
goto err_stack_malloc;
|
||||
}
|
||||
|
||||
child->kent.parent = ¤t->kent;
|
||||
child->kent.destroy = task_destroy;
|
||||
kent_init(&child->kent);
|
||||
|
||||
child->bottom = child->stack + CONFIG_STACK_SIZE;
|
||||
task_init(child, entry);
|
||||
|
||||
list_init(&child->pending_sigchld);
|
||||
mutex_init(&child->pending_sigchld_lock);
|
||||
|
||||
child->state = TASK_QUEUE;
|
||||
tasks[pid] = child;
|
||||
goto out;
|
||||
|
||||
err_stack_malloc:
|
||||
kfree(child);
|
||||
out:
|
||||
mutex_unlock(&tasks_lock);
|
||||
return pid;
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
|
|
|
|||
|
|
@ -13,6 +13,11 @@ long (*const sys_table[NSYSCALLS])(sysarg_t arg1, sysarg_t arg2, sysarg_t arg3,
|
|||
sys_table_entry(SYS_read, sys_read),
|
||||
sys_table_entry(SYS_write, sys_write),
|
||||
sys_table_entry(SYS_sleep, sys_sleep),
|
||||
sys_table_entry(SYS_malloc, sys_malloc),
|
||||
sys_table_entry(SYS_free, sys_free),
|
||||
sys_table_entry(SYS_exec, sys_exec),
|
||||
sys_table_entry(SYS_exit, sys_exit),
|
||||
sys_table_entry(SYS_waitpid, sys_waitpid),
|
||||
};
|
||||
|
||||
long sys_stub(void)
|
||||
|
|
|
|||
189
kernel/task.c
Normal file
189
kernel/task.c
Normal file
|
|
@ -0,0 +1,189 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#include <arch-generic/do_switch.h>
|
||||
|
||||
#include <ardix/kent.h>
|
||||
#include <ardix/kevent.h>
|
||||
#include <ardix/malloc.h>
|
||||
#include <ardix/mutex.h>
|
||||
#include <ardix/sched.h>
|
||||
#include <ardix/syscall.h>
|
||||
#include <ardix/task.h>
|
||||
#include <ardix/userspace.h>
|
||||
#include <ardix/util.h>
|
||||
|
||||
#include <errno.h>
|
||||
#include <toolchain.h>
|
||||
|
||||
#include <arch/debug.h>
|
||||
|
||||
static void task_kevent_destroy(struct kent *kent)
|
||||
{
|
||||
struct kevent *kevent = container_of(kent, struct kevent, kent);
|
||||
struct task_kevent *task_kevent = container_of(kevent, struct task_kevent, kevent);
|
||||
kfree(task_kevent);
|
||||
}
|
||||
|
||||
void task_kevent_create_and_dispatch(struct task *task, int status)
|
||||
{
|
||||
struct task_kevent *event = kmalloc(sizeof(*event));
|
||||
if (event == NULL)
|
||||
return; /* TODO: we're fucked here */
|
||||
|
||||
event->kevent.kent.parent = &task->kent;
|
||||
event->kevent.kent.destroy = task_kevent_destroy;
|
||||
event->kevent.kind = KEVENT_TASK;
|
||||
kent_init(&event->kevent.kent);
|
||||
event->task = task;
|
||||
event->status = status;
|
||||
kevent_dispatch(&event->kevent);
|
||||
}
|
||||
|
||||
struct dead_child {
|
||||
struct list_head link; /* -> task::pending_sigchld */
|
||||
struct task *child;
|
||||
int status;
|
||||
};
|
||||
|
||||
__noreturn void sys_exit(int status)
|
||||
{
|
||||
struct task *task = current;
|
||||
|
||||
struct task *parent = task_parent(task);
|
||||
task_kevent_create_and_dispatch(task, status);
|
||||
|
||||
if (parent->state != TASK_WAITPID) {
|
||||
/*
|
||||
* atomic_kmalloc wouldn't actually be needed here, but we use
|
||||
* it anyway because it has a separate heap which is more likely
|
||||
* to have an emergency reserve of memory. A failing allocation
|
||||
* would *really* be inconvenient here.
|
||||
*/
|
||||
struct dead_child *entry = atomic_kmalloc(sizeof(*entry));
|
||||
if (entry == NULL) {
|
||||
schedule(); /* TODO: we're severely fucked here */
|
||||
}
|
||||
|
||||
entry->child = task;
|
||||
|
||||
mutex_lock(&parent->pending_sigchld_lock);
|
||||
list_insert(&parent->pending_sigchld, &entry->link);
|
||||
mutex_unlock(&parent->pending_sigchld_lock);
|
||||
}
|
||||
|
||||
task->state = TASK_DEAD;
|
||||
|
||||
schedule();
|
||||
|
||||
/* we should never get here, this is only needed to make gcc happy */
|
||||
while (1);
|
||||
}
|
||||
|
||||
struct task_kevent_extra {
|
||||
struct task *parent;
|
||||
|
||||
/* this is a return value from the listener */
|
||||
struct {
|
||||
struct task *child;
|
||||
int status;
|
||||
} ret;
|
||||
};
|
||||
|
||||
static int task_kevent_listener(struct kevent *event, void *_extra)
|
||||
{
|
||||
struct task_kevent_extra *extra = _extra;
|
||||
struct task_kevent *task_kevent = container_of(event, struct task_kevent, kevent);
|
||||
struct task *child = task_kevent->task;
|
||||
|
||||
if (extra->parent != task_parent(child))
|
||||
return KEVENT_CB_NONE;
|
||||
|
||||
extra->parent->state = TASK_QUEUE;
|
||||
|
||||
extra->ret.child = child;
|
||||
extra->ret.status = task_kevent->status;
|
||||
|
||||
return KEVENT_CB_STOP | KEVENT_CB_LISTENER_DEL;
|
||||
}
|
||||
|
||||
/* manually poll for dead children if there is no memory for a kevent listener */
|
||||
static int waitpid_poll(struct task *parent)
|
||||
{
|
||||
/* mutex is already locked here */
|
||||
|
||||
while (list_is_empty(&parent->pending_sigchld)) {
|
||||
mutex_unlock(&parent->pending_sigchld_lock);
|
||||
/*
|
||||
* TODO: This has to be gotten rid of when the scheduler
|
||||
* isn't a simple round robin one anymore!
|
||||
*/
|
||||
yield(TASK_QUEUE);
|
||||
mutex_lock(&parent->pending_sigchld_lock);
|
||||
}
|
||||
|
||||
mutex_unlock(&parent->pending_sigchld_lock);
|
||||
|
||||
struct dead_child *dead_child = list_first_entry(&parent->pending_sigchld,
|
||||
struct dead_child,
|
||||
link);
|
||||
int status = dead_child->status;
|
||||
|
||||
task_put(dead_child->child);
|
||||
list_delete(&dead_child->link);
|
||||
kfree(dead_child);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/* use kevent system to wait for dying children */
|
||||
static int waitpid_yield(struct task *parent)
|
||||
{
|
||||
/* mutex is already locked here */
|
||||
|
||||
struct task_kevent_extra extra = {
|
||||
.parent = parent,
|
||||
};
|
||||
|
||||
if (kevent_listener_add(KEVENT_TASK, task_kevent_listener, &extra) == NULL)
|
||||
return waitpid_poll(parent);
|
||||
|
||||
mutex_unlock(&parent->pending_sigchld_lock);
|
||||
yield(TASK_WAITPID);
|
||||
|
||||
/* extra.ret is set by task_kevent_listener */
|
||||
task_put(extra.ret.child);
|
||||
return extra.ret.status;
|
||||
}
|
||||
|
||||
long sys_waitpid(pid_t pid, int __user *stat_loc, int options)
|
||||
{
|
||||
struct task *parent = current;
|
||||
|
||||
/*
|
||||
* both waitpid_yield and waitpid_poll expect the mutex
|
||||
* to be locked and will unlock it before returning
|
||||
*/
|
||||
mutex_lock(&parent->pending_sigchld_lock);
|
||||
|
||||
int status;
|
||||
if (list_is_empty(&parent->pending_sigchld))
|
||||
status = waitpid_yield(parent);
|
||||
else
|
||||
status = waitpid_poll(parent);
|
||||
|
||||
copy_to_user(stat_loc, &status, sizeof(*stat_loc));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
*
|
||||
* Ardix is non-violent software: you may only use, redistribute,
|
||||
* and/or modify it under the terms of the CNPLv6+ as found in
|
||||
* the LICENSE file in the source code root directory or at
|
||||
* <https://git.pixie.town/thufie/CNPL>.
|
||||
*
|
||||
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
|
||||
* permitted by applicable law. See the CNPLv6+ for details.
|
||||
*/
|
||||
|
|
@ -10,10 +10,11 @@ target_sources(ardix_lib PRIVATE
|
|||
ctype.c
|
||||
errno.c
|
||||
list.c
|
||||
malloc.c
|
||||
printf.c
|
||||
stdlib.c
|
||||
string.c
|
||||
unistd.c
|
||||
wait.c
|
||||
)
|
||||
|
||||
# This file is part of Ardix.
|
||||
|
|
|
|||
|
|
@ -1,13 +1,12 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#include <ardix/malloc.h>
|
||||
|
||||
#include <errno.h>
|
||||
/* Using GCC's stdarg.h is recommended even with -nodefaultlibs and -fno-builtin */
|
||||
#include <stdarg.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
|
|
@ -136,7 +135,7 @@ static int fmt_handle_uint(struct printf_buf *buf, unsigned int u)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int fmt_handle_int(struct printf_buf *buf, int i)
|
||||
static int fmt_handle_int(struct printf_buf *buf, int i)
|
||||
{
|
||||
int ret = 0;
|
||||
char minus = '-';
|
||||
|
|
@ -161,7 +160,7 @@ static inline int fmt_handle_int(struct printf_buf *buf, int i)
|
|||
* @param args: A pointer to the varargs list. Will be manipulated.
|
||||
* @returns The amount of bytes written, or a negative POSIX error code.
|
||||
*/
|
||||
static inline int fmt_handle(struct printf_buf *buf, const char **pos, va_list args)
|
||||
static int fmt_handle(struct printf_buf *buf, const char **pos, va_list args)
|
||||
{
|
||||
int ret = 0;
|
||||
union {
|
||||
|
|
|
|||
52
lib/stdlib.c
Normal file
52
lib/stdlib.c
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#include <ardix/syscall.h>
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
/*
|
||||
* kmalloc() and free() are system calls in Ardix because the heap is shared
|
||||
* among all tasks and locked using a mutex. If the lock is already claimed,
|
||||
* the `mutex_lock()` routine will suspend the current task until the lock
|
||||
* becomes available to the current process. However, this can only happen
|
||||
* when we already are in kernel space.
|
||||
*/
|
||||
|
||||
void *malloc(size_t size)
|
||||
{
|
||||
if (size == 0) {
|
||||
return NULL;
|
||||
} else {
|
||||
long int intptr = syscall(SYS_malloc, (sysarg_t)size);
|
||||
return (void *)intptr;
|
||||
}
|
||||
}
|
||||
|
||||
void *calloc(size_t nmemb, size_t size)
|
||||
{
|
||||
size_t total = nmemb * size;
|
||||
if (nmemb != 0 && total / nmemb != size)
|
||||
return NULL; /* overflow check as mandated by POSIX.1 */
|
||||
long int intptr = syscall(SYS_malloc, (sysarg_t)total);
|
||||
return (void *)intptr;
|
||||
}
|
||||
|
||||
void free(void *ptr)
|
||||
{
|
||||
if (ptr != NULL)
|
||||
syscall(SYS_free, (sysarg_t)ptr);
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
*
|
||||
* Ardix is non-violent software: you may only use, redistribute,
|
||||
* and/or modify it under the terms of the CNPLv6+ as found in
|
||||
* the LICENSE file in the source code root directory or at
|
||||
* <https://git.pixie.town/thufie/CNPL>.
|
||||
*
|
||||
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
|
||||
* permitted by applicable law. See the CNPLv6+ for details.
|
||||
*/
|
||||
13
lib/unistd.c
13
lib/unistd.c
|
|
@ -19,6 +19,19 @@ ssize_t sleep(unsigned long int millis)
|
|||
return syscall(SYS_sleep, (sysarg_t)millis);
|
||||
}
|
||||
|
||||
pid_t exec(int (*entry)(void))
|
||||
{
|
||||
return (pid_t)syscall(SYS_exec, (sysarg_t)entry);
|
||||
}
|
||||
|
||||
void exit(int status)
|
||||
{
|
||||
syscall(SYS_exit, status);
|
||||
|
||||
/* make gcc happy */
|
||||
while (1);
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
|
|
|
|||
|
|
@ -1,23 +1,12 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#include <ardix/atomic.h>
|
||||
#include <ardix/atom.h>
|
||||
#include <ardix/syscall.h>
|
||||
|
||||
static ATOM(atomic_context);
|
||||
#include <sys/wait.h>
|
||||
|
||||
void atomic_enter(void)
|
||||
pid_t waitpid(pid_t pid, int *stat_loc, int options)
|
||||
{
|
||||
atom_get(&atomic_context);
|
||||
}
|
||||
|
||||
void atomic_leave(void)
|
||||
{
|
||||
atom_put(&atomic_context);
|
||||
}
|
||||
|
||||
int is_atomic(void)
|
||||
{
|
||||
return atom_count(&atomic_context);
|
||||
return (pid_t)syscall(SYS_waitpid, (sysarg_t)pid, (sysarg_t)stat_loc, (sysarg_t)options);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -16,7 +16,7 @@ set(CONFIG_IOMEM_SIZE 8192 CACHE STRING "I/O memory size in bytes")
|
|||
|
||||
set(CONFIG_SCHED_MAXTASK 8 CACHE STRING "Maximum number of running tasks")
|
||||
|
||||
set(CONFIG_SCHED_FREQ 1000 CACHE STRING "Task switch frequency in Hz")
|
||||
set(CONFIG_SCHED_FREQ 200 CACHE STRING "Task switch frequency in Hz")
|
||||
|
||||
set(CONFIG_SERIAL_BAUD 115200 CACHE STRING "Default serial baud rate")
|
||||
set_property(CACHE CONFIG_SERIAL_BAUD PROPERTY STRINGS
|
||||
|
|
@ -27,7 +27,7 @@ set(CONFIG_SERIAL_BUFSZ 256 CACHE STRING "Default serial buffer size in bytes")
|
|||
|
||||
set(CONFIG_PRINTF_BUFSZ 64 CACHE STRING "Default buffer size for printf() and friends")
|
||||
|
||||
option(CONFIG_CHECK_SYSCALL_SOURCE "Prohibit inline syscalls" OFF)
|
||||
option(CONFIG_CHECK_SYSCALL_SOURCE "Prohibit inline syscalls" ${DEBUG})
|
||||
|
||||
# This file is part of Ardix.
|
||||
# Copyright (c) 2021 Felix Kopp <owo@fef.moe>.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue