Compare commits

..

1 Commits
main ... paging

Author SHA1 Message Date
anna 0342739ee1
mm: add flags parameter to kmalloc() 3 years ago

1
.gitignore vendored

@ -1,5 +1,4 @@
build/ build/
cmake-build-*/
.vscode/.* .vscode/.*

@ -44,7 +44,7 @@ To build the EEPROM image, execute the following command.
Pass any other configuration options you want to set to the first command or use `cmake-gui(1)`. Pass any other configuration options you want to set to the first command or use `cmake-gui(1)`.
```shell ```shell
# Replace <arch> with one of the target architectures from the list above # Replace <target> with one of the target architectures from the list above
# This will default to at91sam3x8e (Arduino Due) # This will default to at91sam3x8e (Arduino Due)
cmake -DARCH=<arch> -B build -S . cmake -DARCH=<arch> -B build -S .
cmake --build build cmake --build build

@ -19,7 +19,9 @@ configure_file(
target_sources(ardix_arch PRIVATE target_sources(ardix_arch PRIVATE
arch_init.c arch_init.c
atom.S atom_get_put.S
atom.c
atomic.c
do_switch.S do_switch.S
entry.c entry.c
handle_fault.c handle_fault.c

@ -1,114 +0,0 @@
/* See the end of this file for copyright, license, and warranty information. */
.include "asm.S"
.text
/* int _atom_add(volatile int *atom, int val) */
func_begin _atom_add
push {r4}
1: ldrex r2, [r0] /* int old = __ldrex(atom) */
add r3, r2, r1 /* int new = old + val */
strex r4, r3, [r0] /* int err = __strex(atom, new) */
teq r4, #0 /* if (err) */
bne 1b /* goto 1 */
dmb /* data memory barrier */
mov r0, r2 /* return old */
pop {r4}
bx lr
func_end _atom_add
/* these are the same as _atom_add except for the instruction
* in the LDREX/STREX pair, so i'm not gonna annotate them */
func_begin _atom_sub
push {r4}
1: ldrex r2, [r0]
sub r3, r2, r1
strex r4, r3, [r0]
teq r4, #0
bne 1b
dmb
mov r0, r2
pop {r4}
bx lr
func_end _atom_sub
func_begin _atom_and
push {r4}
1: ldrex r2, [r0]
and r3, r2, r1
strex r4, r3, [r0]
teq r4, #0
bne 1b
dmb
mov r0, r2
pop {r4}
bx lr
func_end _atom_and
func_begin _atom_or
push {r4}
1: ldrex r2, [r0]
orr r3, r2, r1
strex r4, r3, [r0]
teq r4, #0
bne 1b
dmb
mov r0, r2
pop {r4}
bx lr
func_end _atom_or
func_begin _atom_xor
push {r4}
1: ldrex r2, [r0]
eor r3, r2, r1
strex r4, r3, [r0]
teq r4, #0
bne 1b
dmb
mov r0, r2
pop {r4}
bx lr
func_end _atom_xor
/* int _atom_xchg(volatile int *atom, int val) */
func_begin _atom_xchg
ldrex r2, [r0] /* int old = __ldrex(atom) */
strex r3, r1, [r0] /* int err = __strex(atom, val) */
teq r3, #0 /* if (err) */
bne _atom_xchg /* goto _atom_xchg */
dmb /* data memory barrier */
mov r0, r2 /* return old */
bx lr
func_end _atom_xchg
/* int _atom_cmpxchg(volatile int *atom, int cmp, int val) */
func_begin _atom_cmpxchg
push {r4}
1: mov r4, #0 /* int err = 0 */
ldrex r3, [r0] /* int old = __ldrex(atom) */
teq r3, r1 /* if (old == cmp) */
it eq
strexeq r4, r1, [r0] /* err = __strex(atom, val) */
teq r4, #0 /* if (err) */
bne 1b /* goto 1b */
dmb /* data memory barrier */
mov r0, r3 /* return old */
pop {r4}
bx lr
func_end _atom_cmpxchg
/*
* This file is part of Ardix.
* Copyright (c) 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/

@ -0,0 +1,27 @@
/* See the end of this file for copyright, license, and warranty information. */
#include <ardix/atom.h>
#include <stddef.h>
void atom_init(atom_t *atom)
{
atom->count = 0;
}
int atom_count(atom_t *atom)
{
return atom->count;
}
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/

@ -0,0 +1,46 @@
/* See the end of this file for copyright, license, and warranty information. */
.include "asm.S"
.text
/* int _atom_get(int *count); */
func_begin _atom_get
ldrex r1, [r0] /* int tmp = atom->count */
add r2, r1, #1 /* int newval = tmp + 1 */
strex r3, r2, [r0] /* atom->count = newval */
teq r3, #0 /* store successful? */
bne _atom_get /* -> goto _atom_get to try again if not */
dmb /* data memory barrier */
mov r0, r2 /* return newval */
bx lr
func_end _atom_get
/* int _atom_put(int *count); */
func_begin _atom_put
ldrex r1, [r0] /* int tmp = atom->count */
sub r2, r1, #1 /* int newval = tmp - 1 */
strex r3, r2, [r0] /* atom->count = newval */
teq r3, #0 /* store successful? */
bne _atom_put /* -> goto _atom_put to try again if not */
dmb /* data memory barrier */
mov r0, r2 /* return newval */
bx lr
func_end _atom_put
/*
* This file is part of Ardix.
* Copyright (c) 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/

@ -0,0 +1,34 @@
/* See the end of this file for copyright, license, and warranty information. */
#include <ardix/atomic.h>
#include <ardix/atom.h>
static ATOM(atomic_context);
void atomic_enter(void)
{
atom_get(&atomic_context);
}
void atomic_leave(void)
{
atom_put(&atomic_context);
}
int is_atomic(void)
{
return atom_count(&atomic_context);
}
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/

@ -30,8 +30,8 @@ void enter_syscall(struct exc_context *context)
* the instructions are always 2-byte aligned. Additionally, the PC * the instructions are always 2-byte aligned. Additionally, the PC
* points to the instruction *after* the SVC, not SVC itself. * points to the instruction *after* the SVC, not SVC itself.
*/ */
if (((uintptr_t)context->sp->pc & 0xfffffffe) != (uintptr_t)&__syscall_return_point) { if (((uintptr_t)regs->sp->pc & 0xfffffffe) != (uintptr_t)&__syscall_return_point) {
sc_set_rval(context, -EACCES); sc_set_rval(regs, -EACCES);
return; return;
} }
# endif # endif

@ -17,7 +17,7 @@ static void uart_write_sync(const char *s)
} }
/** Setup UART to manual byte-by-byte control */ /** Setup UART to manual byte-by-byte control */
static void uart_emergency_setup(void) static inline void uart_emergency_setup(void)
{ {
UART->UART_IDR = 0xffffffff; UART->UART_IDR = 0xffffffff;
@ -34,7 +34,7 @@ static void uart_emergency_setup(void)
#pragma GCC diagnostic push #pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wswitch" #pragma GCC diagnostic ignored "-Wswitch"
static void print_err_msg(enum irqno irqno) static inline void print_err_msg(enum irqno irqno)
{ {
uart_write_sync("\n\n########## SERIOUS BRUH MOMENT! ##########\n"); uart_write_sync("\n\n########## SERIOUS BRUH MOMENT! ##########\n");
@ -100,9 +100,9 @@ static void print_regs(struct exc_context *context)
print_reg("R10", context->r10); print_reg("R10", context->r10);
print_reg("R11", context->r11); print_reg("R11", context->r11);
print_reg("R12", context->sp->r12); print_reg("R12", context->sp->r12);
print_reg("SP", (word_t)context->sp); print_reg("SP", *(word_t *)&context->sp);
print_reg("LR", (word_t)context->sp->lr); print_reg("LR", *(word_t *)&context->sp->lr);
print_reg("PC", (word_t)context->sp->pc); print_reg("PC", *(word_t *)&context->sp->pc);
print_reg("xPSR", context->sp->psr); print_reg("xPSR", context->sp->psr);
} }

@ -1,28 +0,0 @@
#pragma once
#include <ardix/types.h>
#include <toolchain.h>
static __always_inline word_t _atomic_enter(void)
{
word_t primask;
__asm__ volatile(
" mrs %0, primask \n"
" cpsid i \n"
: "=r"(primask));
return primask;
}
static __always_inline void _atomic_restore(word_t context)
{
if (!(context & 1))
__asm__ volatile("cpsie i");
}
static inline int _is_atomic(void)
{
int primask;
__asm__ volatile("mrs %0, primask" : "=r"(primask));
return primask & 1;
}

@ -67,8 +67,7 @@ void irq_can1(void) __weak __alias(_stub_handler);
extern uint32_t _estack; extern uint32_t _estack;
__section(.vectors) __section(.vectors) const void *exception_table[] = {
void *const exception_table[] = {
&_estack, /* initial SP value (stack grows down) */ &_estack, /* initial SP value (stack grows down) */
handle_reset, /* reset vector */ handle_reset, /* reset vector */
NULL, /* reserved */ NULL, /* reserved */

@ -5,119 +5,28 @@
#include <ardix/types.h> #include <ardix/types.h>
#include <toolchain.h> #include <toolchain.h>
#define ATOM_DEFINE(val) { ._val = val, } #define ATOM(name) atom_t name = { .count = 0, }
#define ATOM(name, val) atom_t name = ATOM_DEFINE(val)
static __always_inline void atom_init(atom_t *atom, int val) void atom_init(atom_t *atom);
{
atom->_val = val;
}
static __always_inline int atom_read(const atom_t *atom)
{
return atom->_val;
}
/* extern int _atom_get(int *count);
* These are implemented in arch/<arch>/atom.S extern int _atom_put(int *count);
*/
extern int _atom_add(volatile int *atom, int val);
extern int _atom_sub(volatile int *atom, int val);
extern int _atom_and(volatile int *atom, int val);
extern int _atom_or(volatile int *atom, int val);
extern int _atom_xor(volatile int *atom, int val);
extern int _atom_xchg(volatile int *atom, int val);
extern int _atom_cmpxchg(volatile int *atom, int cmp, int val);
/** __always_inline int atom_get(atom_t *atom)
* @brief Atomically add `val` to `atom`.
* @return The old value of `atom`, before the addition
*/
static __always_inline int atom_add(atom_t *atom, int val)
{ {
return _atom_add(&atom->_val, val); return _atom_get(&atom->count);
} }
/** __always_inline int atom_put(atom_t *atom)
* @brief Atomically subtract `val` from `atom`.
* @return The old value of `atom` before the subtraction
*/
static __always_inline int atom_sub(atom_t *atom, int val)
{ {
return _atom_sub(&atom->_val, val); return _atom_put(&atom->count);
} }
/** int atom_count(atom_t *atom);
* @brief Atomically do a bitwise AND of `val` and `atom`.
* @return The old value of `atom`, before the AND
*/
static __always_inline int atom_and(atom_t *atom, int val)
{
return _atom_and(&atom->_val, val);
}
/**
* @brief Atomically do a bitwise OR of `val` and `atom`.
* @return The old value of `atom`, before the OR
*/
static __always_inline int atom_or(atom_t *atom, int val)
{
return _atom_or(&atom->_val, val);
}
/**
* @brief Atomically do a bitwise XOR of `val` and `atom`.
* @return The old value of `atom`, before the XOR
*/
static __always_inline int atom_xor(atom_t *atom, int val)
{
return _atom_xor(&atom->_val, val);
}
/**
* @brief Atomically increment `atom` by 1.
* @return The old value of `atom`, before the increment
*/
static __always_inline int atom_get(atom_t *atom)
{
return _atom_add(&atom->_val, 1);
}
/**
* @brief Atomically decrement `atom` by 1.
* @return The old value of `atom`, before the decrement
*/
static __always_inline int atom_put(atom_t *atom)
{
return _atom_sub(&atom->_val, 1);
}
/**
* @brief Atomically exchange the value of `atom` with `val`.
* @return The old value of `atom`
*/
static __always_inline int atom_xchg(atom_t *atom, int val)
{
return _atom_xchg(&atom->_val, val);
}
/**
* @brief Atomically compare the value of `atom` with `cmp` and,
* if found to be equal, exchange it with `val`.
* @param atom atom to perform the operation on
* @param cmp value to compare the atom with
* @param val new value to be written to atom if it is equal to `cmp`
* @return The old value of `atom`
*/
static __always_inline int atom_cmpxchg(atom_t *atom, int cmp, int val)
{
return _atom_cmpxchg(&atom->_val, cmp, val);
}
/* /*
* This file is part of Ardix. * This file is part of Ardix.
* Copyright (c) 2020, 2021, 2022 Felix Kopp <owo@fef.moe>. * Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
* *
* Ardix is non-violent software: you may only use, redistribute, * Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in * and/or modify it under the terms of the CNPLv6+ as found in

@ -2,10 +2,6 @@
#pragma once #pragma once
#include <arch/atomic.h>
#include <toolchain.h>
/** /**
* Enter atomic context. * Enter atomic context.
* *
@ -13,22 +9,13 @@
* reference counter that is checked in the scheduler interrupt routine before * reference counter that is checked in the scheduler interrupt routine before
* performing the context switch. * performing the context switch.
*/ */
static __always_inline word_t atomic_enter(void) void atomic_enter(void);
{
return _atomic_enter();
}
/** Leave atomic context. */ /** Leave atomic context. */
static __always_inline void atomic_restore(word_t context) void atomic_leave(void);
{
_atomic_restore(context);
}
/** Return a nonzero value if the current process is in atomic context. */ /** Return a nonzero value if the current process is in atomic context. */
static __always_inline int is_atomic(void) int is_atomic(void);
{
return _is_atomic();
}
/* /*
* This file is part of Ardix. * This file is part of Ardix.

@ -77,13 +77,13 @@ int devices_init(void);
int device_init(struct device *dev); int device_init(struct device *dev);
/** @brief Increment a device's reference counter. */ /** @brief Increment a device's reference counter. */
static __always_inline void device_get(struct device *dev) __always_inline void device_get(struct device *dev)
{ {
kent_get(&dev->kent); kent_get(&dev->kent);
} }
/** @brief Decrement a device's referece counter. */ /** @brief Decrement a device's referece counter. */
static __always_inline void device_put(struct device *dev) __always_inline void device_put(struct device *dev)
{ {
kent_put(&dev->kent); kent_put(&dev->kent);
} }

@ -25,16 +25,10 @@ struct dmabuf {
struct dmabuf *dmabuf_create(struct device *dev, size_t len); struct dmabuf *dmabuf_create(struct device *dev, size_t len);
/** Increment a DMA buffer's reference counter. */ /** Increment a DMA buffer's reference counter. */
static __always_inline void dmabuf_get(struct dmabuf *buf) void dmabuf_get(struct dmabuf *buf);
{
kent_get(&buf->kent);
}
/** Decrement a DMA buffer's reference counter. */ /** Decrement a DMA buffer's reference counter. */
static __always_inline void dmabuf_put(struct dmabuf *buf) void dmabuf_put(struct dmabuf *buf);
{
kent_put(&buf->kent);
}
/* /*
* This file is part of Ardix. * This file is part of Ardix.

@ -48,10 +48,7 @@ int kent_init(struct kent *kent);
* *
* @param kent: The kent. * @param kent: The kent.
*/ */
static __always_inline void kent_get(struct kent *kent) void kent_get(struct kent *kent);
{
atom_get(&kent->refcount);
}
/** /**
* Decrement the reference counter. * Decrement the reference counter.

@ -104,12 +104,12 @@ struct kevent_listener *kevent_listener_add(enum kevent_kind kind,
*/ */
void kevent_listener_del(struct kevent_listener *listener); void kevent_listener_del(struct kevent_listener *listener);
static __always_inline void kevent_get(struct kevent *event) __always_inline void kevent_get(struct kevent *event)
{ {
kent_get(&event->kent); kent_get(&event->kent);
} }
static __always_inline void kevent_put(struct kevent *event) __always_inline void kevent_put(struct kevent *event)
{ {
kent_put(&event->kent); kent_put(&event->kent);
} }

@ -11,6 +11,13 @@
* @{ * @{
*/ */
enum memflags {
MEM_KERNEL = (1 << 0),
MEM_USER = (1 << 1),
MEM_ATOMIC = (1 << 2),
MEM_STACK = (1 << 3),
};
/** /**
* @brief Allocate `size` bytes of memory *w/out initializing it*. * @brief Allocate `size` bytes of memory *w/out initializing it*.
* *
@ -21,20 +28,7 @@
* @return A pointer to the beginning of the memory area, or `NULL` if * @return A pointer to the beginning of the memory area, or `NULL` if
* `size` was 0 or there is not enough free memory left. * `size` was 0 or there is not enough free memory left.
*/ */
__malloc(kfree, 1) void *kmalloc(size_t size); __malloc(kfree, 1) void *kmalloc(size_t size, enum memflags flags);
/**
* @brief Allocate `size` bytes of memory *w/out initializing it*.
*
* Unlike `kmalloc()`, this method is guaranteed not to sleep. It does this by
* using a completely separate, smaller heap. Only use this if you already are
* in atomic context, like when in an irq.
*
* @param size Amount of bytes to allocate
* @return A pointer to the beginning of the memory area, or `NULL` if
* `size` was 0 or there is not enough free memory left.
*/
__malloc(kfree, 1) void *atomic_kmalloc(size_t size);
/** /**
* @brief Free a previously allocated memory region. * @brief Free a previously allocated memory region.

@ -39,12 +39,14 @@ __always_inline void spin_init(spin_t *spin)
__always_inline void spin_lock(spin_t *spin) __always_inline void spin_lock(spin_t *spin)
{ {
atomic_enter();
_spin_lock(&spin->lock); _spin_lock(&spin->lock);
} }
__always_inline void spin_unlock(spin_t *spin) __always_inline void spin_unlock(spin_t *spin)
{ {
_spin_unlock(&spin->lock); _spin_unlock(&spin->lock);
atomic_leave();
} }
__always_inline int spin_trylock(spin_t *spin) __always_inline int spin_trylock(spin_t *spin)

@ -55,17 +55,17 @@ struct task {
pid_t pid; pid_t pid;
}; };
static __always_inline void task_get(struct task *task) __always_inline void task_get(struct task *task)
{ {
kent_get(&task->kent); kent_get(&task->kent);
} }
static __always_inline void task_put(struct task *task) __always_inline void task_put(struct task *task)
{ {
kent_put(&task->kent); kent_put(&task->kent);
} }
static inline struct task *task_parent(struct task *task) __always_inline struct task *task_parent(struct task *task)
{ {
if (task->pid == 0) if (task->pid == 0)
return NULL; return NULL;

@ -16,9 +16,9 @@
/** Process identifier. */ /** Process identifier. */
typedef _PID_TYPE_ pid_t; typedef _PID_TYPE_ pid_t;
/** Simple atomic integer */ /** Simple atomic reference counter */
typedef struct { typedef struct {
volatile int _val; int count;
} atom_t; } atom_t;
#include <arch/hardware.h> #include <arch/hardware.h>

@ -23,7 +23,7 @@ int devices_init(void)
if (devices_kent != NULL) if (devices_kent != NULL)
return -EEXIST; return -EEXIST;
devices_kent = kmalloc(sizeof(*devices_kent)); devices_kent = kmalloc(sizeof(*devices_kent), MEM_KERNEL);
if (devices_kent == NULL) if (devices_kent == NULL)
return -ENOMEM; return -ENOMEM;
@ -59,7 +59,7 @@ static void device_kevent_destroy(struct kent *kent)
struct device_kevent *device_kevent_create(struct device *device, enum device_kevent_flags flags) struct device_kevent *device_kevent_create(struct device *device, enum device_kevent_flags flags)
{ {
struct device_kevent *event = atomic_kmalloc(sizeof(*event)); struct device_kevent *event = kmalloc(sizeof(*event), MEM_KERNEL | MEM_ATOMIC);
if (event == NULL) if (event == NULL)
return NULL; return NULL;

@ -22,7 +22,7 @@ struct dmabuf *dmabuf_create(struct device *dev, size_t len)
* allocation needs to be atomic because the buffer might be * allocation needs to be atomic because the buffer might be
* free()d from within an irq handler which cannot sleep * free()d from within an irq handler which cannot sleep
*/ */
struct dmabuf *buf = atomic_kmalloc(sizeof(*buf) + len); struct dmabuf *buf = kmalloc(sizeof(*buf) + len, MEM_KERNEL | MEM_ATOMIC);
if (buf == NULL) if (buf == NULL)
return NULL; return NULL;
@ -40,6 +40,16 @@ struct dmabuf *dmabuf_create(struct device *dev, size_t len)
return buf; return buf;
} }
void dmabuf_get(struct dmabuf *buf)
{
kent_get(&buf->kent);
}
void dmabuf_put(struct dmabuf *buf)
{
kent_put(&buf->kent);
}
/* /*
* This file is part of Ardix. * This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>. * Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.

@ -41,7 +41,7 @@ struct file *file_create(struct device *device, enum file_type type, int *err)
return NULL; return NULL;
} }
f = kmalloc(sizeof(*f)); f = kmalloc(sizeof(*f), MEM_KERNEL);
if (f == NULL) { if (f == NULL) {
*err = -ENOMEM; *err = -ENOMEM;
mutex_unlock(&fdtab_lock); mutex_unlock(&fdtab_lock);
@ -114,7 +114,7 @@ static int iowait_device(struct file *file, enum device_kevent_flags flags)
kent_get(&current->kent); kent_get(&current->kent);
/* this must be atomic because event listeners can't sleep but need to call free() */ /* this must be atomic because event listeners can't sleep but need to call free() */
struct io_device_kevent_extra *extra = atomic_kmalloc(sizeof(*extra)); struct io_device_kevent_extra *extra = kmalloc(sizeof(*extra), MEM_KERNEL | MEM_ATOMIC);
if (extra == NULL) if (extra == NULL)
return -ENOMEM; return -ENOMEM;
@ -206,7 +206,7 @@ static void file_kevent_destroy(struct kent *kent)
struct file_kevent *file_kevent_create(struct file *f, enum file_kevent_flags flags) struct file_kevent *file_kevent_create(struct file *f, enum file_kevent_flags flags)
{ {
struct file_kevent *event = atomic_kmalloc(sizeof(*event)); struct file_kevent *event = kmalloc(sizeof(*event), MEM_KERNEL | MEM_ATOMIC);
if (event == NULL) if (event == NULL)
return NULL; return NULL;

@ -18,7 +18,7 @@ long sys_read(int fd, __user void *buf, size_t len)
if (f == NULL) if (f == NULL)
return -EBADF; return -EBADF;
copy = kmalloc(len); copy = kmalloc(len, MEM_KERNEL);
if (copy == NULL) if (copy == NULL)
return -ENOMEM; return -ENOMEM;

@ -18,7 +18,7 @@ long sys_write(int fd, __user const void *buf, size_t len)
if (f == NULL) if (f == NULL)
return -EBADF; return -EBADF;
copy = kmalloc(len); copy = kmalloc(len, MEM_KERNEL);
if (copy == NULL) { if (copy == NULL) {
file_put(f); file_put(f);
return -ENOMEM; return -ENOMEM;

@ -1,7 +1,9 @@
/* See the end of this file for copyright, license, and warranty information. */ /* See the end of this file for copyright, license, and warranty information. */
#include <ardix/atom.h> #include <ardix/atom.h>
#include <ardix/malloc.h>
#include <ardix/kent.h> #include <ardix/kent.h>
#include <ardix/list.h>
#include <errno.h> #include <errno.h>
#include <stddef.h> #include <stddef.h>
@ -17,7 +19,8 @@ int kent_root_init(void)
kent_root->parent = NULL; kent_root->parent = NULL;
kent_root->destroy = NULL; kent_root->destroy = NULL;
atom_init(&kent_root->refcount, 1); atom_init(&kent_root->refcount);
kent_get(kent_root);
return 0; return 0;
} }
@ -28,20 +31,27 @@ int kent_init(struct kent *kent)
return -EFAULT; return -EFAULT;
kent_get(kent->parent); kent_get(kent->parent);
atom_init(&kent->refcount, 1); atom_init(&kent->refcount);
kent_get(kent);
return 0; return 0;
} }
void kent_get(struct kent *kent)
{
atom_get(&kent->refcount);
}
void kent_put(struct kent *kent) void kent_put(struct kent *kent)
{ {
do { struct kent *parent = kent->parent;
struct kent *parent = kent->parent;
if (atom_put(&kent->refcount) != 1) if (atom_put(&kent->refcount) == 0) {
break;
kent->destroy(kent); kent->destroy(kent);
kent = parent;
} while (kent != NULL); if (parent != NULL)
kent_put(parent);
}
} }
/* /*

@ -48,7 +48,7 @@ void kevents_init(void)
} }
/* called from scheduler context only */ /* called from scheduler context only */
static void process_single_queue(struct kevent_queue *queue, struct list_head *listeners) static inline void process_single_queue(struct kevent_queue *queue, struct list_head *listeners)
{ {
struct kevent *event, *tmp_event; struct kevent *event, *tmp_event;
@ -137,7 +137,7 @@ struct kevent_listener *kevent_listener_add(enum kevent_kind kind,
int (*cb)(struct kevent *, void *), int (*cb)(struct kevent *, void *),
void *extra) void *extra)
{ {
struct kevent_listener *listener = kmalloc(sizeof(*listener)); struct kevent_listener *listener = kmalloc(sizeof(*listener), MEM_KERNEL);
if (listener != NULL) { if (listener != NULL) {
listener->cb = cb; listener->cb = cb;

@ -166,8 +166,8 @@ static struct memblk *blk_slice(struct list_head *heap, struct memblk *bottom, s
long sys_malloc(size_t size) long sys_malloc(size_t size)
{ {
void *ptr = kmalloc(size); void *ptr = kmalloc(size, MEM_USER);
return (long)ptr; return *(long *)&ptr;
} }
void sys_free(void *ptr) void sys_free(void *ptr)
@ -202,11 +202,29 @@ void kmalloc_init(void *heap, size_t size)
atomic_heap_free = blk_get_size(atomic_block); atomic_heap_free = blk_get_size(atomic_block);
} }
void *kmalloc(size_t size) static void *atomic_kmalloc(size_t);
/*
* this is still the old algorithm and all flags except atomic are ignored,
* so that at least the code still compiles to do some testing
*/
void *kmalloc(size_t size, enum memflags flags)
{ {
# ifdef DEBUG
if ((flags & MEM_KERNEL) && (flags & MEM_USER))
__breakpoint;
if ((flags & (MEM_USER | MEM_KERNEL)) == 0)
__breakpoint;
if ((flags & MEM_USER) && (flags & MEM_ATOMIC))
__breakpoint;
# endif
if (size == 0) if (size == 0)
return NULL; /* as per POSIX */ return NULL; /* as per POSIX */
if (flags & MEM_ATOMIC)
return atomic_kmalloc(size);
if (size > generic_heap_free) if (size > generic_heap_free)
return NULL; return NULL;
@ -241,11 +259,8 @@ void *kmalloc(size_t size)
return ptr; return ptr;
} }
void *atomic_kmalloc(size_t size) static void *atomic_kmalloc(size_t size)
{ {
if (size == 0)
return NULL;
if (size > atomic_heap_free) if (size > atomic_heap_free)
return NULL; return NULL;
@ -296,7 +311,7 @@ void kfree(void *ptr)
if (!blk_is_alloc(blk)) if (!blk_is_alloc(blk))
__breakpoint; __breakpoint;
word_t context = atomic_enter(); atomic_enter();
atomic_heap_free += blk_get_size(blk); atomic_heap_free += blk_get_size(blk);
blk_clear_alloc(blk); blk_clear_alloc(blk);
blk = blk_try_merge(&atomic_heap, blk); blk = blk_try_merge(&atomic_heap, blk);
@ -305,7 +320,7 @@ void kfree(void *ptr)
memset(&blk->data[MIN_SIZE], 0xaa, blk_get_size(blk) - MIN_SIZE); memset(&blk->data[MIN_SIZE], 0xaa, blk_get_size(blk) - MIN_SIZE);
# endif # endif
atomic_restore(context); atomic_leave();
} else { } else {
__breakpoint; __breakpoint;
} }
@ -348,8 +363,8 @@ static struct memblk *blk_try_merge(struct list_head *heap, struct memblk *blk)
} }
static struct memblk *blk_merge(struct list_head *heap, static struct memblk *blk_merge(struct list_head *heap,
struct memblk *bottom, struct memblk *bottom,
struct memblk *top) struct memblk *top)
{ {
size_t bottom_size = blk_get_size(bottom); size_t bottom_size = blk_get_size(bottom);
size_t top_size = blk_get_size(top); size_t top_size = blk_get_size(top);
@ -368,8 +383,7 @@ static struct memblk *blk_merge(struct list_head *heap,
return bottom; return bottom;
} }
static struct memblk *blk_slice(struct list_head *heap, struct memblk *blk, static struct memblk *blk_slice(struct list_head *heap, struct memblk *blk, size_t slice_size)
size_t slice_size)
{ {
list_delete(&blk->list); list_delete(&blk->list);
@ -413,7 +427,7 @@ static struct memblk *blk_slice(struct list_head *heap, struct memblk *blk,
return blk; return blk;
} }
static size_t round_alloc_size_up(size_t size) static inline size_t round_alloc_size_up(size_t size)
{ {
size_t rounded = (size / MIN_SIZE) * MIN_SIZE; size_t rounded = (size / MIN_SIZE) * MIN_SIZE;
if (rounded < size) if (rounded < size)
@ -437,7 +451,7 @@ static void blk_set_size(struct memblk *blk, size_t size)
blk->endsz[words] |= size; blk->endsz[words] |= size;
} }
static void blk_set_alloc(struct memblk *blk) static inline void blk_set_alloc(struct memblk *blk)
{ {
size_t words = blk->size / sizeof(blk->size); size_t words = blk->size / sizeof(blk->size);
@ -445,7 +459,7 @@ static void blk_set_alloc(struct memblk *blk)
blk->endsz[words] |= ALLOC_FLAG; blk->endsz[words] |= ALLOC_FLAG;
} }
static void blk_clear_alloc(struct memblk *blk) static inline void blk_clear_alloc(struct memblk *blk)
{ {
size_t words = blk->size / sizeof(blk->size); size_t words = blk->size / sizeof(blk->size);
@ -473,38 +487,32 @@ static inline int blk_is_border_start(struct memblk *blk)
return blk->size & BORDER_FLAG; return blk->size & BORDER_FLAG;
} }
static void blk_set_border_end(struct memblk *blk) static inline void blk_set_border_end(struct memblk *blk)
{ {
size_t words = blk->size / sizeof(blk->size); size_t words = blk->size / sizeof(blk->size);
blk->endsz[words] |= BORDER_FLAG; blk->endsz[words] |= BORDER_FLAG;
} }
static void blk_clear_border_end(struct memblk *blk) static inline void blk_clear_border_end(struct memblk *blk)
{ {
size_t words = blk->size / sizeof(blk->size); size_t words = blk->size / sizeof(blk->size);
blk->endsz[words] &= ~BORDER_FLAG; blk->endsz[words] &= ~BORDER_FLAG;
} }
static int blk_is_border_end(struct memblk *blk) static inline int blk_is_border_end(struct memblk *blk)
{ {
size_t words = blk->size / sizeof(blk->size); size_t words = blk->size / sizeof(blk->size);
return blk->endsz[words] & BORDER_FLAG; return blk->endsz[words] & BORDER_FLAG;
} }
static struct memblk *blk_prev(struct memblk *blk) static inline struct memblk *blk_prev(struct memblk *blk)
{ {
if (blk_is_border_start(blk)) if (blk_is_border_start(blk))
return NULL; return NULL;
/* gcc does not like accessing index -1 of zero-length arrays */
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
#pragma GCC diagnostic ignored "-Wzero-length-bounds"
return (void *)blk - (blk->prevsz[-1] & SIZE_MSK) - OVERHEAD; return (void *)blk - (blk->prevsz[-1] & SIZE_MSK) - OVERHEAD;
#pragma GCC diagnostic pop
} }
static struct memblk *blk_next(struct memblk *blk) static inline struct memblk *blk_next(struct memblk *blk)
{ {
if (blk_is_border_end(blk)) if (blk_is_border_end(blk))
return NULL; return NULL;

@ -24,11 +24,9 @@ void mutex_lock(struct mutex *mutex)
.task = current, .task = current,
}; };
word_t context = atomic_enter();
spin_lock(&mutex->wait_queue_lock); spin_lock(&mutex->wait_queue_lock);
list_insert(&mutex->wait_queue, &entry.link); list_insert(&mutex->wait_queue, &entry.link);
spin_unlock(&mutex->wait_queue_lock); spin_unlock(&mutex->wait_queue_lock);
atomic_restore(context);
yield(TASK_LOCKWAIT); yield(TASK_LOCKWAIT);
} }
@ -38,14 +36,12 @@ void mutex_unlock(struct mutex *mutex)
{ {
struct mutex_wait *waiter = NULL; struct mutex_wait *waiter = NULL;
word_t context = atomic_enter();
spin_lock(&mutex->wait_queue_lock); spin_lock(&mutex->wait_queue_lock);
if (!list_is_empty(&mutex->wait_queue)) { if (!list_is_empty(&mutex->wait_queue)) {
waiter = list_first_entry(&mutex->wait_queue, struct mutex_wait, link); waiter = list_first_entry(&mutex->wait_queue, struct mutex_wait, link);
list_delete(&waiter->link); list_delete(&waiter->link);
} }
spin_unlock(&mutex->wait_queue_lock); spin_unlock(&mutex->wait_queue_lock);
atomic_restore(context);
if (waiter != NULL) { if (waiter != NULL) {
waiter->task->state = TASK_QUEUE; waiter->task->state = TASK_QUEUE;

@ -10,7 +10,7 @@
struct ringbuf *ringbuf_create(size_t size) struct ringbuf *ringbuf_create(size_t size)
{ {
struct ringbuf *buf = kmalloc(sizeof(*buf) + size); struct ringbuf *buf = kmalloc(sizeof(*buf) + size, MEM_KERNEL);
if (buf == NULL) if (buf == NULL)
return NULL; return NULL;

@ -79,11 +79,7 @@ int sched_init(void)
memset(&kernel_task.tcb, 0, sizeof(kernel_task.tcb)); memset(&kernel_task.tcb, 0, sizeof(kernel_task.tcb));
kernel_task.bottom = &_estack; kernel_task.bottom = &_estack;
/* gcc thinks &_estack is an array of size 1 */
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
kernel_task.stack = kernel_task.bottom - CONFIG_STACK_SIZE; kernel_task.stack = kernel_task.bottom - CONFIG_STACK_SIZE;
#pragma GCC diagnostic pop
kernel_task.pid = 0; kernel_task.pid = 0;
kernel_task.state = TASK_RUNNING; kernel_task.state = TASK_RUNNING;
@ -100,7 +96,7 @@ int sched_init(void)
if (err != 0) if (err != 0)
goto out; goto out;
idle_task.stack = kmalloc(CONFIG_STACK_SIZE); idle_task.stack = kmalloc(CONFIG_STACK_SIZE, MEM_USER | MEM_STACK);
if (idle_task.stack == NULL) if (idle_task.stack == NULL)
goto out; goto out;
idle_task.bottom = idle_task.stack + CONFIG_STACK_SIZE; idle_task.bottom = idle_task.stack + CONFIG_STACK_SIZE;
@ -125,14 +121,10 @@ out:
/** /**
* @brief Determine whether the specified task is a candidate for execution. * @brief Determine whether the specified task is a candidate for execution.
* *
* This function is only called once from `schedule()` and performance critical,
* hence the `__always_inline` attribute.
*
* @param task The task * @param task The task
* @returns whether `task` could be run next * @returns whether `task` could be run next
*/ */
__always_inline static inline bool can_run(const struct task *task)
static bool can_run(const struct task *task)
{ {
switch (task->state) { switch (task->state) {
case TASK_SLEEP: case TASK_SLEEP:
@ -152,7 +144,7 @@ static bool can_run(const struct task *task)
void schedule(void) void schedule(void)
{ {
word_t context = atomic_enter(); atomic_enter();
struct task *old = current; struct task *old = current;
pid_t nextpid = old->pid; pid_t nextpid = old->pid;
@ -186,7 +178,7 @@ void schedule(void)
new->last_tick = tick; new->last_tick = tick;
current = new; current = new;
atomic_restore(context); atomic_leave();
if (old != new) if (old != new)
do_switch(old, new); do_switch(old, new);
@ -230,14 +222,14 @@ long sys_exec(int (*entry)(void))
goto out; goto out;
} }
child = kmalloc(sizeof(*child)); child = kmalloc(sizeof(*child), MEM_KERNEL);
if (child == NULL) { if (child == NULL) {
pid = -ENOMEM; pid = -ENOMEM;
goto out; goto out;
} }
child->pid = pid; child->pid = pid;
child->stack = kmalloc(CONFIG_STACK_SIZE); child->stack = kmalloc(CONFIG_STACK_SIZE, MEM_USER | MEM_STACK);
if (child->stack == NULL) { if (child->stack == NULL) {
pid = -ENOMEM; pid = -ENOMEM;
goto err_stack_malloc; goto err_stack_malloc;

@ -26,7 +26,7 @@ static void task_kevent_destroy(struct kent *kent)
void task_kevent_create_and_dispatch(struct task *task, int status) void task_kevent_create_and_dispatch(struct task *task, int status)
{ {
struct task_kevent *event = kmalloc(sizeof(*event)); struct task_kevent *event = kmalloc(sizeof(*event), MEM_KERNEL);
if (event == NULL) if (event == NULL)
return; /* TODO: we're fucked here */ return; /* TODO: we're fucked here */
@ -54,12 +54,12 @@ __noreturn void sys_exit(int status)
if (parent->state != TASK_WAITPID) { if (parent->state != TASK_WAITPID) {
/* /*
* atomic_kmalloc wouldn't actually be needed here, but we use * the atomic flag wouldn't actually be needed here, but we use
* it anyway because it has a separate heap which is more likely * it anyway because it has a separate heap which is more likely
* to have an emergency reserve of memory. A failing allocation * to have an emergency reserve of memory. A failing allocation
* would *really* be inconvenient here. * would *really* be inconvenient here.
*/ */
struct dead_child *entry = atomic_kmalloc(sizeof(*entry)); struct dead_child *entry = kmalloc(sizeof(*entry), MEM_KERNEL | MEM_ATOMIC);
if (entry == NULL) { if (entry == NULL) {
schedule(); /* TODO: we're severely fucked here */ schedule(); /* TODO: we're severely fucked here */
} }

@ -135,7 +135,7 @@ static int fmt_handle_uint(struct printf_buf *buf, unsigned int u)
return ret; return ret;
} }
static int fmt_handle_int(struct printf_buf *buf, int i) static inline int fmt_handle_int(struct printf_buf *buf, int i)
{ {
int ret = 0; int ret = 0;
char minus = '-'; char minus = '-';
@ -160,7 +160,7 @@ static int fmt_handle_int(struct printf_buf *buf, int i)
* @param args: A pointer to the varargs list. Will be manipulated. * @param args: A pointer to the varargs list. Will be manipulated.
* @returns The amount of bytes written, or a negative POSIX error code. * @returns The amount of bytes written, or a negative POSIX error code.
*/ */
static int fmt_handle(struct printf_buf *buf, const char **pos, va_list args) static inline int fmt_handle(struct printf_buf *buf, const char **pos, va_list args)
{ {
int ret = 0; int ret = 0;
union { union {

@ -19,7 +19,7 @@ void *malloc(size_t size)
return NULL; return NULL;
} else { } else {
long int intptr = syscall(SYS_malloc, (sysarg_t)size); long int intptr = syscall(SYS_malloc, (sysarg_t)size);
return (void *)intptr; return *(void **)&intptr;
} }
} }
@ -29,7 +29,7 @@ void *calloc(size_t nmemb, size_t size)
if (nmemb != 0 && total / nmemb != size) if (nmemb != 0 && total / nmemb != size)
return NULL; /* overflow check as mandated by POSIX.1 */ return NULL; /* overflow check as mandated by POSIX.1 */
long int intptr = syscall(SYS_malloc, (sysarg_t)total); long int intptr = syscall(SYS_malloc, (sysarg_t)total);
return (void *)intptr; return *(void **)&intptr;
} }
void free(void *ptr) void free(void *ptr)

@ -27,7 +27,7 @@ set(CONFIG_SERIAL_BUFSZ 256 CACHE STRING "Default serial buffer size in bytes")
set(CONFIG_PRINTF_BUFSZ 64 CACHE STRING "Default buffer size for printf() and friends") set(CONFIG_PRINTF_BUFSZ 64 CACHE STRING "Default buffer size for printf() and friends")
option(CONFIG_CHECK_SYSCALL_SOURCE "Prohibit inline syscalls" ${DEBUG}) option(CONFIG_CHECK_SYSCALL_SOURCE "Prohibit inline syscalls" OFF)
# This file is part of Ardix. # This file is part of Ardix.
# Copyright (c) 2021 Felix Kopp <owo@fef.moe>. # Copyright (c) 2021 Felix Kopp <owo@fef.moe>.

Loading…
Cancel
Save