fix atomics (finally)
This commit is contained in:
parent
5d2539fc4a
commit
e86ef2acbd
16 changed files with 72 additions and 70 deletions
|
@ -20,7 +20,6 @@ configure_file(
|
|||
target_sources(ardix_arch PRIVATE
|
||||
arch_init.c
|
||||
atom.S
|
||||
atomic.c
|
||||
do_switch.S
|
||||
entry.c
|
||||
handle_fault.c
|
||||
|
|
|
@ -1,34 +0,0 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#include <ardix/atomic.h>
|
||||
#include <ardix/atom.h>
|
||||
|
||||
static ATOM(atomic_context, 0);
|
||||
|
||||
void atomic_enter(void)
|
||||
{
|
||||
atom_get(&atomic_context);
|
||||
}
|
||||
|
||||
void atomic_leave(void)
|
||||
{
|
||||
atom_put(&atomic_context);
|
||||
}
|
||||
|
||||
int is_atomic(void)
|
||||
{
|
||||
return atom_read(&atomic_context);
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
*
|
||||
* Ardix is non-violent software: you may only use, redistribute,
|
||||
* and/or modify it under the terms of the CNPLv6+ as found in
|
||||
* the LICENSE file in the source code root directory or at
|
||||
* <https://git.pixie.town/thufie/CNPL>.
|
||||
*
|
||||
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
|
||||
* permitted by applicable law. See the CNPLv6+ for details.
|
||||
*/
|
28
arch/at91sam3x8e/include/arch/atomic.h
Normal file
28
arch/at91sam3x8e/include/arch/atomic.h
Normal file
|
@ -0,0 +1,28 @@
|
|||
#pragma once
|
||||
|
||||
#include <ardix/types.h>
|
||||
|
||||
#include <toolchain.h>
|
||||
|
||||
static __always_inline word_t _atomic_enter(void)
|
||||
{
|
||||
word_t primask;
|
||||
__asm__ volatile(
|
||||
" mrs %0, primask \n"
|
||||
" cpsid i \n"
|
||||
: "=r"(primask));
|
||||
return primask;
|
||||
}
|
||||
|
||||
static __always_inline void _atomic_restore(word_t context)
|
||||
{
|
||||
if (!(context & 1))
|
||||
__asm__ volatile("cpsie i");
|
||||
}
|
||||
|
||||
static inline int _is_atomic(void)
|
||||
{
|
||||
int primask;
|
||||
__asm__ volatile("mrs %0, primask" : "=r"(primask));
|
||||
return primask & 1;
|
||||
}
|
|
@ -13,7 +13,7 @@ static __always_inline void atom_init(atom_t *atom, int val)
|
|||
atom->_val = val;
|
||||
}
|
||||
|
||||
static __always_inline int atom_read(atom_t *atom)
|
||||
static __always_inline int atom_read(const atom_t *atom)
|
||||
{
|
||||
return atom->_val;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,10 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <arch/atomic.h>
|
||||
|
||||
#include <toolchain.h>
|
||||
|
||||
/**
|
||||
* Enter atomic context.
|
||||
*
|
||||
|
@ -9,13 +13,22 @@
|
|||
* reference counter that is checked in the scheduler interrupt routine before
|
||||
* performing the context switch.
|
||||
*/
|
||||
void atomic_enter(void);
|
||||
static __always_inline word_t atomic_enter(void)
|
||||
{
|
||||
return _atomic_enter();
|
||||
}
|
||||
|
||||
/** Leave atomic context. */
|
||||
void atomic_leave(void);
|
||||
static __always_inline void atomic_restore(word_t context)
|
||||
{
|
||||
_atomic_restore(context);
|
||||
}
|
||||
|
||||
/** Return a nonzero value if the current process is in atomic context. */
|
||||
int is_atomic(void);
|
||||
static __always_inline int is_atomic(void)
|
||||
{
|
||||
return _is_atomic();
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
|
|
|
@ -77,13 +77,13 @@ int devices_init(void);
|
|||
int device_init(struct device *dev);
|
||||
|
||||
/** @brief Increment a device's reference counter. */
|
||||
__always_inline void device_get(struct device *dev)
|
||||
static __always_inline void device_get(struct device *dev)
|
||||
{
|
||||
kent_get(&dev->kent);
|
||||
}
|
||||
|
||||
/** @brief Decrement a device's referece counter. */
|
||||
__always_inline void device_put(struct device *dev)
|
||||
static __always_inline void device_put(struct device *dev)
|
||||
{
|
||||
kent_put(&dev->kent);
|
||||
}
|
||||
|
|
|
@ -25,10 +25,16 @@ struct dmabuf {
|
|||
struct dmabuf *dmabuf_create(struct device *dev, size_t len);
|
||||
|
||||
/** Increment a DMA buffer's reference counter. */
|
||||
void dmabuf_get(struct dmabuf *buf);
|
||||
static __always_inline void dmabuf_get(struct dmabuf *buf)
|
||||
{
|
||||
kent_get(&buf->kent);
|
||||
}
|
||||
|
||||
/** Decrement a DMA buffer's reference counter. */
|
||||
void dmabuf_put(struct dmabuf *buf);
|
||||
static __always_inline void dmabuf_put(struct dmabuf *buf)
|
||||
{
|
||||
kent_put(&buf->kent);
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
|
|
|
@ -48,7 +48,10 @@ int kent_init(struct kent *kent);
|
|||
*
|
||||
* @param kent: The kent.
|
||||
*/
|
||||
void kent_get(struct kent *kent);
|
||||
static __always_inline void kent_get(struct kent *kent)
|
||||
{
|
||||
atom_get(&kent->refcount);
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrement the reference counter.
|
||||
|
|
|
@ -104,12 +104,12 @@ struct kevent_listener *kevent_listener_add(enum kevent_kind kind,
|
|||
*/
|
||||
void kevent_listener_del(struct kevent_listener *listener);
|
||||
|
||||
__always_inline void kevent_get(struct kevent *event)
|
||||
static __always_inline void kevent_get(struct kevent *event)
|
||||
{
|
||||
kent_get(&event->kent);
|
||||
}
|
||||
|
||||
__always_inline void kevent_put(struct kevent *event)
|
||||
static __always_inline void kevent_put(struct kevent *event)
|
||||
{
|
||||
kent_put(&event->kent);
|
||||
}
|
||||
|
|
|
@ -39,14 +39,12 @@ __always_inline void spin_init(spin_t *spin)
|
|||
|
||||
__always_inline void spin_lock(spin_t *spin)
|
||||
{
|
||||
atomic_enter();
|
||||
_spin_lock(&spin->lock);
|
||||
}
|
||||
|
||||
__always_inline void spin_unlock(spin_t *spin)
|
||||
{
|
||||
_spin_unlock(&spin->lock);
|
||||
atomic_leave();
|
||||
}
|
||||
|
||||
__always_inline int spin_trylock(spin_t *spin)
|
||||
|
|
|
@ -55,17 +55,17 @@ struct task {
|
|||
pid_t pid;
|
||||
};
|
||||
|
||||
__always_inline void task_get(struct task *task)
|
||||
static __always_inline void task_get(struct task *task)
|
||||
{
|
||||
kent_get(&task->kent);
|
||||
}
|
||||
|
||||
__always_inline void task_put(struct task *task)
|
||||
static __always_inline void task_put(struct task *task)
|
||||
{
|
||||
kent_put(&task->kent);
|
||||
}
|
||||
|
||||
__always_inline struct task *task_parent(struct task *task)
|
||||
static inline struct task *task_parent(struct task *task)
|
||||
{
|
||||
if (task->pid == 0)
|
||||
return NULL;
|
||||
|
|
10
kernel/dma.c
10
kernel/dma.c
|
@ -40,16 +40,6 @@ struct dmabuf *dmabuf_create(struct device *dev, size_t len)
|
|||
return buf;
|
||||
}
|
||||
|
||||
void dmabuf_get(struct dmabuf *buf)
|
||||
{
|
||||
kent_get(&buf->kent);
|
||||
}
|
||||
|
||||
void dmabuf_put(struct dmabuf *buf)
|
||||
{
|
||||
kent_put(&buf->kent);
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
|
|
|
@ -33,11 +33,6 @@ int kent_init(struct kent *kent)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kent_get(struct kent *kent)
|
||||
{
|
||||
atom_get(&kent->refcount);
|
||||
}
|
||||
|
||||
void kent_put(struct kent *kent)
|
||||
{
|
||||
do {
|
||||
|
|
|
@ -296,7 +296,7 @@ void kfree(void *ptr)
|
|||
if (!blk_is_alloc(blk))
|
||||
__breakpoint;
|
||||
|
||||
atomic_enter();
|
||||
word_t context = atomic_enter();
|
||||
atomic_heap_free += blk_get_size(blk);
|
||||
blk_clear_alloc(blk);
|
||||
blk = blk_try_merge(&atomic_heap, blk);
|
||||
|
@ -305,7 +305,7 @@ void kfree(void *ptr)
|
|||
memset(&blk->data[MIN_SIZE], 0xaa, blk_get_size(blk) - MIN_SIZE);
|
||||
# endif
|
||||
|
||||
atomic_leave();
|
||||
atomic_restore(context);
|
||||
} else {
|
||||
__breakpoint;
|
||||
}
|
||||
|
|
|
@ -24,9 +24,11 @@ void mutex_lock(struct mutex *mutex)
|
|||
.task = current,
|
||||
};
|
||||
|
||||
word_t context = atomic_enter();
|
||||
spin_lock(&mutex->wait_queue_lock);
|
||||
list_insert(&mutex->wait_queue, &entry.link);
|
||||
spin_unlock(&mutex->wait_queue_lock);
|
||||
atomic_restore(context);
|
||||
|
||||
yield(TASK_LOCKWAIT);
|
||||
}
|
||||
|
@ -36,12 +38,14 @@ void mutex_unlock(struct mutex *mutex)
|
|||
{
|
||||
struct mutex_wait *waiter = NULL;
|
||||
|
||||
word_t context = atomic_enter();
|
||||
spin_lock(&mutex->wait_queue_lock);
|
||||
if (!list_is_empty(&mutex->wait_queue)) {
|
||||
waiter = list_first_entry(&mutex->wait_queue, struct mutex_wait, link);
|
||||
list_delete(&waiter->link);
|
||||
}
|
||||
spin_unlock(&mutex->wait_queue_lock);
|
||||
atomic_restore(context);
|
||||
|
||||
if (waiter != NULL) {
|
||||
waiter->task->state = TASK_QUEUE;
|
||||
|
|
|
@ -152,7 +152,7 @@ static bool can_run(const struct task *task)
|
|||
|
||||
void schedule(void)
|
||||
{
|
||||
atomic_enter();
|
||||
word_t context = atomic_enter();
|
||||
|
||||
struct task *old = current;
|
||||
pid_t nextpid = old->pid;
|
||||
|
@ -186,7 +186,7 @@ void schedule(void)
|
|||
new->last_tick = tick;
|
||||
current = new;
|
||||
|
||||
atomic_leave();
|
||||
atomic_restore(context);
|
||||
|
||||
if (old != new)
|
||||
do_switch(old, new);
|
||||
|
|
Loading…
Reference in a new issue