Compare commits

...

8 Commits
paging ... main

Author SHA1 Message Date
anna e86ef2acbd
fix atomics (finally) 2 years ago
anna 5d2539fc4a
kent: avoid recursion 2 years ago
anna 3e35afcfa9
atom: redesign API 2 years ago
anna f293c6661e
enable debug features by default if DEBUG is on 2 years ago
anna 4177931774
arch/at91sam3x8e: fix vector_table const decl 2 years ago
anna e80a6cb630
update placeholder name in comment 2 years ago
anna ad76275721
fix remaining compiler warnings
This commit removes all inline annotations that
the compiler does not actually inline, and inserts
pragma directives to selectively disable warnings
where necessary.
2 years ago
anna adccbef80d
arch/at91sam3x8e: fix compile error
Enabling CONFIG_CHECK_SYSCALL_SOURCE resulted in a
compile error because the corresponding code block
used an outdated variable name.  This commit fixes
the issue.
2 years ago

1
.gitignore vendored

@ -1,4 +1,5 @@
build/
cmake-build-*/
.vscode/.*

@ -44,7 +44,7 @@ To build the EEPROM image, execute the following command.
Pass any other configuration options you want to set to the first command or use `cmake-gui(1)`.
```shell
# Replace <target> with one of the target architectures from the list above
# Replace <arch> with one of the target architectures from the list above
# This will default to at91sam3x8e (Arduino Due)
cmake -DARCH=<arch> -B build -S .
cmake --build build

@ -19,9 +19,7 @@ configure_file(
target_sources(ardix_arch PRIVATE
arch_init.c
atom_get_put.S
atom.c
atomic.c
atom.S
do_switch.S
entry.c
handle_fault.c

@ -0,0 +1,114 @@
/* See the end of this file for copyright, license, and warranty information. */
.include "asm.S"
.text
/* int _atom_add(volatile int *atom, int val) */
func_begin _atom_add
push {r4}
1: ldrex r2, [r0] /* int old = __ldrex(atom) */
add r3, r2, r1 /* int new = old + val */
strex r4, r3, [r0] /* int err = __strex(atom, new) */
teq r4, #0 /* if (err) */
bne 1b /* goto 1 */
dmb /* data memory barrier */
mov r0, r2 /* return old */
pop {r4}
bx lr
func_end _atom_add
/* these are the same as _atom_add except for the instruction
* in the LDREX/STREX pair, so i'm not gonna annotate them */
func_begin _atom_sub
push {r4}
1: ldrex r2, [r0]
sub r3, r2, r1
strex r4, r3, [r0]
teq r4, #0
bne 1b
dmb
mov r0, r2
pop {r4}
bx lr
func_end _atom_sub
func_begin _atom_and
push {r4}
1: ldrex r2, [r0]
and r3, r2, r1
strex r4, r3, [r0]
teq r4, #0
bne 1b
dmb
mov r0, r2
pop {r4}
bx lr
func_end _atom_and
func_begin _atom_or
push {r4}
1: ldrex r2, [r0]
orr r3, r2, r1
strex r4, r3, [r0]
teq r4, #0
bne 1b
dmb
mov r0, r2
pop {r4}
bx lr
func_end _atom_or
func_begin _atom_xor
push {r4}
1: ldrex r2, [r0]
eor r3, r2, r1
strex r4, r3, [r0]
teq r4, #0
bne 1b
dmb
mov r0, r2
pop {r4}
bx lr
func_end _atom_xor
/* int _atom_xchg(volatile int *atom, int val) */
func_begin _atom_xchg
ldrex r2, [r0] /* int old = __ldrex(atom) */
strex r3, r1, [r0] /* int err = __strex(atom, val) */
teq r3, #0 /* if (err) */
bne _atom_xchg /* goto _atom_xchg */
dmb /* data memory barrier */
mov r0, r2 /* return old */
bx lr
func_end _atom_xchg
/* int _atom_cmpxchg(volatile int *atom, int cmp, int val) */
func_begin _atom_cmpxchg
push {r4}
1: mov r4, #0 /* int err = 0 */
ldrex r3, [r0] /* int old = __ldrex(atom) */
teq r3, r1 /* if (old == cmp) */
it eq
strexeq r4, r1, [r0] /* err = __strex(atom, val) */
teq r4, #0 /* if (err) */
bne 1b /* goto 1b */
dmb /* data memory barrier */
mov r0, r3 /* return old */
pop {r4}
bx lr
func_end _atom_cmpxchg
/*
* This file is part of Ardix.
* Copyright (c) 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/

@ -1,27 +0,0 @@
/* See the end of this file for copyright, license, and warranty information. */
#include <ardix/atom.h>
#include <stddef.h>
void atom_init(atom_t *atom)
{
atom->count = 0;
}
int atom_count(atom_t *atom)
{
return atom->count;
}
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/

@ -1,46 +0,0 @@
/* See the end of this file for copyright, license, and warranty information. */
.include "asm.S"
.text
/* int _atom_get(int *count); */
func_begin _atom_get
ldrex r1, [r0] /* int tmp = atom->count */
add r2, r1, #1 /* int newval = tmp + 1 */
strex r3, r2, [r0] /* atom->count = newval */
teq r3, #0 /* store successful? */
bne _atom_get /* -> goto _atom_get to try again if not */
dmb /* data memory barrier */
mov r0, r2 /* return newval */
bx lr
func_end _atom_get
/* int _atom_put(int *count); */
func_begin _atom_put
ldrex r1, [r0] /* int tmp = atom->count */
sub r2, r1, #1 /* int newval = tmp - 1 */
strex r3, r2, [r0] /* atom->count = newval */
teq r3, #0 /* store successful? */
bne _atom_put /* -> goto _atom_put to try again if not */
dmb /* data memory barrier */
mov r0, r2 /* return newval */
bx lr
func_end _atom_put
/*
* This file is part of Ardix.
* Copyright (c) 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/

@ -1,34 +0,0 @@
/* See the end of this file for copyright, license, and warranty information. */
#include <ardix/atomic.h>
#include <ardix/atom.h>
static ATOM(atomic_context);
void atomic_enter(void)
{
atom_get(&atomic_context);
}
void atomic_leave(void)
{
atom_put(&atomic_context);
}
int is_atomic(void)
{
return atom_count(&atomic_context);
}
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/

@ -30,8 +30,8 @@ void enter_syscall(struct exc_context *context)
* the instructions are always 2-byte aligned. Additionally, the PC
* points to the instruction *after* the SVC, not SVC itself.
*/
if (((uintptr_t)regs->sp->pc & 0xfffffffe) != (uintptr_t)&__syscall_return_point) {
sc_set_rval(regs, -EACCES);
if (((uintptr_t)context->sp->pc & 0xfffffffe) != (uintptr_t)&__syscall_return_point) {
sc_set_rval(context, -EACCES);
return;
}
# endif

@ -17,7 +17,7 @@ static void uart_write_sync(const char *s)
}
/** Setup UART to manual byte-by-byte control */
static inline void uart_emergency_setup(void)
static void uart_emergency_setup(void)
{
UART->UART_IDR = 0xffffffff;
@ -34,7 +34,7 @@ static inline void uart_emergency_setup(void)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wswitch"
static inline void print_err_msg(enum irqno irqno)
static void print_err_msg(enum irqno irqno)
{
uart_write_sync("\n\n########## SERIOUS BRUH MOMENT! ##########\n");
@ -100,9 +100,9 @@ static void print_regs(struct exc_context *context)
print_reg("R10", context->r10);
print_reg("R11", context->r11);
print_reg("R12", context->sp->r12);
print_reg("SP", *(word_t *)&context->sp);
print_reg("LR", *(word_t *)&context->sp->lr);
print_reg("PC", *(word_t *)&context->sp->pc);
print_reg("SP", (word_t)context->sp);
print_reg("LR", (word_t)context->sp->lr);
print_reg("PC", (word_t)context->sp->pc);
print_reg("xPSR", context->sp->psr);
}

@ -0,0 +1,28 @@
#pragma once
#include <ardix/types.h>
#include <toolchain.h>
static __always_inline word_t _atomic_enter(void)
{
word_t primask;
__asm__ volatile(
" mrs %0, primask \n"
" cpsid i \n"
: "=r"(primask));
return primask;
}
static __always_inline void _atomic_restore(word_t context)
{
if (!(context & 1))
__asm__ volatile("cpsie i");
}
static inline int _is_atomic(void)
{
int primask;
__asm__ volatile("mrs %0, primask" : "=r"(primask));
return primask & 1;
}

@ -67,7 +67,8 @@ void irq_can1(void) __weak __alias(_stub_handler);
extern uint32_t _estack;
__section(.vectors) const void *exception_table[] = {
__section(.vectors)
void *const exception_table[] = {
&_estack, /* initial SP value (stack grows down) */
handle_reset, /* reset vector */
NULL, /* reserved */

@ -5,28 +5,119 @@
#include <ardix/types.h>
#include <toolchain.h>
#define ATOM(name) atom_t name = { .count = 0, }
#define ATOM_DEFINE(val) { ._val = val, }
#define ATOM(name, val) atom_t name = ATOM_DEFINE(val)
void atom_init(atom_t *atom);
static __always_inline void atom_init(atom_t *atom, int val)
{
atom->_val = val;
}
static __always_inline int atom_read(const atom_t *atom)
{
return atom->_val;
}
extern int _atom_get(int *count);
extern int _atom_put(int *count);
/*
* These are implemented in arch/<arch>/atom.S
*/
extern int _atom_add(volatile int *atom, int val);
extern int _atom_sub(volatile int *atom, int val);
extern int _atom_and(volatile int *atom, int val);
extern int _atom_or(volatile int *atom, int val);
extern int _atom_xor(volatile int *atom, int val);
extern int _atom_xchg(volatile int *atom, int val);
extern int _atom_cmpxchg(volatile int *atom, int cmp, int val);
__always_inline int atom_get(atom_t *atom)
/**
* @brief Atomically add `val` to `atom`.
* @return The old value of `atom`, before the addition
*/
static __always_inline int atom_add(atom_t *atom, int val)
{
return _atom_get(&atom->count);
return _atom_add(&atom->_val, val);
}
__always_inline int atom_put(atom_t *atom)
/**
* @brief Atomically subtract `val` from `atom`.
* @return The old value of `atom` before the subtraction
*/
static __always_inline int atom_sub(atom_t *atom, int val)
{
return _atom_put(&atom->count);
return _atom_sub(&atom->_val, val);
}
int atom_count(atom_t *atom);
/**
* @brief Atomically do a bitwise AND of `val` and `atom`.
* @return The old value of `atom`, before the AND
*/
static __always_inline int atom_and(atom_t *atom, int val)
{
return _atom_and(&atom->_val, val);
}
/**
* @brief Atomically do a bitwise OR of `val` and `atom`.
* @return The old value of `atom`, before the OR
*/
static __always_inline int atom_or(atom_t *atom, int val)
{
return _atom_or(&atom->_val, val);
}
/**
* @brief Atomically do a bitwise XOR of `val` and `atom`.
* @return The old value of `atom`, before the XOR
*/
static __always_inline int atom_xor(atom_t *atom, int val)
{
return _atom_xor(&atom->_val, val);
}
/**
* @brief Atomically increment `atom` by 1.
* @return The old value of `atom`, before the increment
*/
static __always_inline int atom_get(atom_t *atom)
{
return _atom_add(&atom->_val, 1);
}
/**
* @brief Atomically decrement `atom` by 1.
* @return The old value of `atom`, before the decrement
*/
static __always_inline int atom_put(atom_t *atom)
{
return _atom_sub(&atom->_val, 1);
}
/**
* @brief Atomically exchange the value of `atom` with `val`.
* @return The old value of `atom`
*/
static __always_inline int atom_xchg(atom_t *atom, int val)
{
return _atom_xchg(&atom->_val, val);
}
/**
* @brief Atomically compare the value of `atom` with `cmp` and,
* if found to be equal, exchange it with `val`.
* @param atom atom to perform the operation on
* @param cmp value to compare the atom with
* @param val new value to be written to atom if it is equal to `cmp`
* @return The old value of `atom`
*/
static __always_inline int atom_cmpxchg(atom_t *atom, int cmp, int val)
{
return _atom_cmpxchg(&atom->_val, cmp, val);
}
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
* Copyright (c) 2020, 2021, 2022 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in

@ -2,6 +2,10 @@
#pragma once
#include <arch/atomic.h>
#include <toolchain.h>
/**
* Enter atomic context.
*
@ -9,13 +13,22 @@
* reference counter that is checked in the scheduler interrupt routine before
* performing the context switch.
*/
void atomic_enter(void);
static __always_inline word_t atomic_enter(void)
{
return _atomic_enter();
}
/** Leave atomic context. */
void atomic_leave(void);
static __always_inline void atomic_restore(word_t context)
{
_atomic_restore(context);
}
/** Return a nonzero value if the current process is in atomic context. */
int is_atomic(void);
static __always_inline int is_atomic(void)
{
return _is_atomic();
}
/*
* This file is part of Ardix.

@ -77,13 +77,13 @@ int devices_init(void);
int device_init(struct device *dev);
/** @brief Increment a device's reference counter. */
__always_inline void device_get(struct device *dev)
static __always_inline void device_get(struct device *dev)
{
kent_get(&dev->kent);
}
/** @brief Decrement a device's referece counter. */
__always_inline void device_put(struct device *dev)
static __always_inline void device_put(struct device *dev)
{
kent_put(&dev->kent);
}

@ -25,10 +25,16 @@ struct dmabuf {
struct dmabuf *dmabuf_create(struct device *dev, size_t len);
/** Increment a DMA buffer's reference counter. */
void dmabuf_get(struct dmabuf *buf);
static __always_inline void dmabuf_get(struct dmabuf *buf)
{
kent_get(&buf->kent);
}
/** Decrement a DMA buffer's reference counter. */
void dmabuf_put(struct dmabuf *buf);
static __always_inline void dmabuf_put(struct dmabuf *buf)
{
kent_put(&buf->kent);
}
/*
* This file is part of Ardix.

@ -48,7 +48,10 @@ int kent_init(struct kent *kent);
*
* @param kent: The kent.
*/
void kent_get(struct kent *kent);
static __always_inline void kent_get(struct kent *kent)
{
atom_get(&kent->refcount);
}
/**
* Decrement the reference counter.

@ -104,12 +104,12 @@ struct kevent_listener *kevent_listener_add(enum kevent_kind kind,
*/
void kevent_listener_del(struct kevent_listener *listener);
__always_inline void kevent_get(struct kevent *event)
static __always_inline void kevent_get(struct kevent *event)
{
kent_get(&event->kent);
}
__always_inline void kevent_put(struct kevent *event)
static __always_inline void kevent_put(struct kevent *event)
{
kent_put(&event->kent);
}

@ -39,14 +39,12 @@ __always_inline void spin_init(spin_t *spin)
__always_inline void spin_lock(spin_t *spin)
{
atomic_enter();
_spin_lock(&spin->lock);
}
__always_inline void spin_unlock(spin_t *spin)
{
_spin_unlock(&spin->lock);
atomic_leave();
}
__always_inline int spin_trylock(spin_t *spin)

@ -55,17 +55,17 @@ struct task {
pid_t pid;
};
__always_inline void task_get(struct task *task)
static __always_inline void task_get(struct task *task)
{
kent_get(&task->kent);
}
__always_inline void task_put(struct task *task)
static __always_inline void task_put(struct task *task)
{
kent_put(&task->kent);
}
__always_inline struct task *task_parent(struct task *task)
static inline struct task *task_parent(struct task *task)
{
if (task->pid == 0)
return NULL;

@ -16,9 +16,9 @@
/** Process identifier. */
typedef _PID_TYPE_ pid_t;
/** Simple atomic reference counter */
/** Simple atomic integer */
typedef struct {
int count;
volatile int _val;
} atom_t;
#include <arch/hardware.h>

@ -40,16 +40,6 @@ struct dmabuf *dmabuf_create(struct device *dev, size_t len)
return buf;
}
void dmabuf_get(struct dmabuf *buf)
{
kent_get(&buf->kent);
}
void dmabuf_put(struct dmabuf *buf)
{
kent_put(&buf->kent);
}
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.

@ -1,9 +1,7 @@
/* See the end of this file for copyright, license, and warranty information. */
#include <ardix/atom.h>
#include <ardix/malloc.h>
#include <ardix/kent.h>
#include <ardix/list.h>
#include <errno.h>
#include <stddef.h>
@ -19,8 +17,7 @@ int kent_root_init(void)
kent_root->parent = NULL;
kent_root->destroy = NULL;
atom_init(&kent_root->refcount);
kent_get(kent_root);
atom_init(&kent_root->refcount, 1);
return 0;
}
@ -31,27 +28,20 @@ int kent_init(struct kent *kent)
return -EFAULT;
kent_get(kent->parent);
atom_init(&kent->refcount);
kent_get(kent);
atom_init(&kent->refcount, 1);
return 0;
}
void kent_get(struct kent *kent)
{
atom_get(&kent->refcount);
}
void kent_put(struct kent *kent)
{
struct kent *parent = kent->parent;
if (atom_put(&kent->refcount) == 0) {
do {
struct kent *parent = kent->parent;
if (atom_put(&kent->refcount) != 1)
break;
kent->destroy(kent);
if (parent != NULL)
kent_put(parent);
}
kent = parent;
} while (kent != NULL);
}
/*

@ -48,7 +48,7 @@ void kevents_init(void)
}
/* called from scheduler context only */
static inline void process_single_queue(struct kevent_queue *queue, struct list_head *listeners)
static void process_single_queue(struct kevent_queue *queue, struct list_head *listeners)
{
struct kevent *event, *tmp_event;

@ -167,7 +167,7 @@ static struct memblk *blk_slice(struct list_head *heap, struct memblk *bottom, s
long sys_malloc(size_t size)
{
void *ptr = kmalloc(size);
return *(long *)&ptr;
return (long)ptr;
}
void sys_free(void *ptr)
@ -296,7 +296,7 @@ void kfree(void *ptr)
if (!blk_is_alloc(blk))
__breakpoint;
atomic_enter();
word_t context = atomic_enter();
atomic_heap_free += blk_get_size(blk);
blk_clear_alloc(blk);
blk = blk_try_merge(&atomic_heap, blk);
@ -305,7 +305,7 @@ void kfree(void *ptr)
memset(&blk->data[MIN_SIZE], 0xaa, blk_get_size(blk) - MIN_SIZE);
# endif
atomic_leave();
atomic_restore(context);
} else {
__breakpoint;
}
@ -348,8 +348,8 @@ static struct memblk *blk_try_merge(struct list_head *heap, struct memblk *blk)
}
static struct memblk *blk_merge(struct list_head *heap,
struct memblk *bottom,
struct memblk *top)
struct memblk *bottom,
struct memblk *top)
{
size_t bottom_size = blk_get_size(bottom);
size_t top_size = blk_get_size(top);
@ -368,7 +368,8 @@ static struct memblk *blk_merge(struct list_head *heap,
return bottom;
}
static struct memblk *blk_slice(struct list_head *heap, struct memblk *blk, size_t slice_size)
static struct memblk *blk_slice(struct list_head *heap, struct memblk *blk,
size_t slice_size)
{
list_delete(&blk->list);
@ -412,7 +413,7 @@ static struct memblk *blk_slice(struct list_head *heap, struct memblk *blk, size
return blk;
}
static inline size_t round_alloc_size_up(size_t size)
static size_t round_alloc_size_up(size_t size)
{
size_t rounded = (size / MIN_SIZE) * MIN_SIZE;
if (rounded < size)
@ -436,7 +437,7 @@ static void blk_set_size(struct memblk *blk, size_t size)
blk->endsz[words] |= size;
}
static inline void blk_set_alloc(struct memblk *blk)
static void blk_set_alloc(struct memblk *blk)
{
size_t words = blk->size / sizeof(blk->size);
@ -444,7 +445,7 @@ static inline void blk_set_alloc(struct memblk *blk)
blk->endsz[words] |= ALLOC_FLAG;
}
static inline void blk_clear_alloc(struct memblk *blk)
static void blk_clear_alloc(struct memblk *blk)
{
size_t words = blk->size / sizeof(blk->size);
@ -472,32 +473,38 @@ static inline int blk_is_border_start(struct memblk *blk)
return blk->size & BORDER_FLAG;
}
static inline void blk_set_border_end(struct memblk *blk)
static void blk_set_border_end(struct memblk *blk)
{
size_t words = blk->size / sizeof(blk->size);
blk->endsz[words] |= BORDER_FLAG;
}
static inline void blk_clear_border_end(struct memblk *blk)
static void blk_clear_border_end(struct memblk *blk)
{
size_t words = blk->size / sizeof(blk->size);
blk->endsz[words] &= ~BORDER_FLAG;
}
static inline int blk_is_border_end(struct memblk *blk)
static int blk_is_border_end(struct memblk *blk)
{
size_t words = blk->size / sizeof(blk->size);
return blk->endsz[words] & BORDER_FLAG;
}
static inline struct memblk *blk_prev(struct memblk *blk)
static struct memblk *blk_prev(struct memblk *blk)
{
if (blk_is_border_start(blk))
return NULL;
/* gcc does not like accessing index -1 of zero-length arrays */
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
#pragma GCC diagnostic ignored "-Wzero-length-bounds"
return (void *)blk - (blk->prevsz[-1] & SIZE_MSK) - OVERHEAD;
#pragma GCC diagnostic pop
}
static inline struct memblk *blk_next(struct memblk *blk)
static struct memblk *blk_next(struct memblk *blk)
{
if (blk_is_border_end(blk))
return NULL;

@ -24,9 +24,11 @@ void mutex_lock(struct mutex *mutex)
.task = current,
};
word_t context = atomic_enter();
spin_lock(&mutex->wait_queue_lock);
list_insert(&mutex->wait_queue, &entry.link);
spin_unlock(&mutex->wait_queue_lock);
atomic_restore(context);
yield(TASK_LOCKWAIT);
}
@ -36,12 +38,14 @@ void mutex_unlock(struct mutex *mutex)
{
struct mutex_wait *waiter = NULL;
word_t context = atomic_enter();
spin_lock(&mutex->wait_queue_lock);
if (!list_is_empty(&mutex->wait_queue)) {
waiter = list_first_entry(&mutex->wait_queue, struct mutex_wait, link);
list_delete(&waiter->link);
}
spin_unlock(&mutex->wait_queue_lock);
atomic_restore(context);
if (waiter != NULL) {
waiter->task->state = TASK_QUEUE;

@ -79,7 +79,11 @@ int sched_init(void)
memset(&kernel_task.tcb, 0, sizeof(kernel_task.tcb));
kernel_task.bottom = &_estack;
/* gcc thinks &_estack is an array of size 1 */
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
kernel_task.stack = kernel_task.bottom - CONFIG_STACK_SIZE;
#pragma GCC diagnostic pop
kernel_task.pid = 0;
kernel_task.state = TASK_RUNNING;
@ -121,10 +125,14 @@ out:
/**
* @brief Determine whether the specified task is a candidate for execution.
*
* This function is only called once from `schedule()` and performance critical,
* hence the `__always_inline` attribute.
*
* @param task The task
* @returns whether `task` could be run next
*/
static inline bool can_run(const struct task *task)
__always_inline
static bool can_run(const struct task *task)
{
switch (task->state) {
case TASK_SLEEP:
@ -144,7 +152,7 @@ static inline bool can_run(const struct task *task)
void schedule(void)
{
atomic_enter();
word_t context = atomic_enter();
struct task *old = current;
pid_t nextpid = old->pid;
@ -178,7 +186,7 @@ void schedule(void)
new->last_tick = tick;
current = new;
atomic_leave();
atomic_restore(context);
if (old != new)
do_switch(old, new);

@ -135,7 +135,7 @@ static int fmt_handle_uint(struct printf_buf *buf, unsigned int u)
return ret;
}
static inline int fmt_handle_int(struct printf_buf *buf, int i)
static int fmt_handle_int(struct printf_buf *buf, int i)
{
int ret = 0;
char minus = '-';
@ -160,7 +160,7 @@ static inline int fmt_handle_int(struct printf_buf *buf, int i)
* @param args: A pointer to the varargs list. Will be manipulated.
* @returns The amount of bytes written, or a negative POSIX error code.
*/
static inline int fmt_handle(struct printf_buf *buf, const char **pos, va_list args)
static int fmt_handle(struct printf_buf *buf, const char **pos, va_list args)
{
int ret = 0;
union {

@ -19,7 +19,7 @@ void *malloc(size_t size)
return NULL;
} else {
long int intptr = syscall(SYS_malloc, (sysarg_t)size);
return *(void **)&intptr;
return (void *)intptr;
}
}
@ -29,7 +29,7 @@ void *calloc(size_t nmemb, size_t size)
if (nmemb != 0 && total / nmemb != size)
return NULL; /* overflow check as mandated by POSIX.1 */
long int intptr = syscall(SYS_malloc, (sysarg_t)total);
return *(void **)&intptr;
return (void *)intptr;
}
void free(void *ptr)

@ -27,7 +27,7 @@ set(CONFIG_SERIAL_BUFSZ 256 CACHE STRING "Default serial buffer size in bytes")
set(CONFIG_PRINTF_BUFSZ 64 CACHE STRING "Default buffer size for printf() and friends")
option(CONFIG_CHECK_SYSCALL_SOURCE "Prohibit inline syscalls" OFF)
option(CONFIG_CHECK_SYSCALL_SOURCE "Prohibit inline syscalls" ${DEBUG})
# This file is part of Ardix.
# Copyright (c) 2021 Felix Kopp <owo@fef.moe>.

Loading…
Cancel
Save