|
|
|
@ -4,56 +4,58 @@
|
|
|
|
|
#include <arch/cpufunc.h>
|
|
|
|
|
|
|
|
|
|
#include <gay/clist.h>
|
|
|
|
|
#include <gay/irq.h>
|
|
|
|
|
#include <gay/kprintf.h>
|
|
|
|
|
#include <gay/mutex.h>
|
|
|
|
|
#include <gay/sched.h>
|
|
|
|
|
#include <gay/systm.h>
|
|
|
|
|
#include <gay/util.h>
|
|
|
|
|
|
|
|
|
|
#include <errno.h>
|
|
|
|
|
#if CFG_DEBUG_MTX
|
|
|
|
|
#define MTX_ASSERT(x) KASSERT(x)
|
|
|
|
|
#else
|
|
|
|
|
#define MTX_ASSERT(x) ({})
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
void spin_init(spin_t *spin)
|
|
|
|
|
{
|
|
|
|
|
atom_write(&spin->lock, 0);
|
|
|
|
|
atom_init(&spin->lock, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void spin_lock(spin_t *spin)
|
|
|
|
|
{
|
|
|
|
|
MTX_ASSERT(in_critical());
|
|
|
|
|
|
|
|
|
|
spin_loop {
|
|
|
|
|
if (atom_cmp_xchg(&spin->lock, 0, 1) == 0)
|
|
|
|
|
if (atom_xchg(&spin->lock, 1) == 0)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int spin_trylock(spin_t *spin)
|
|
|
|
|
bool spin_trylock(spin_t *spin)
|
|
|
|
|
{
|
|
|
|
|
if (atom_cmp_xchg(&spin->lock, 0, 1) != 0)
|
|
|
|
|
return -EAGAIN;
|
|
|
|
|
return 0;
|
|
|
|
|
MTX_ASSERT(in_critical());
|
|
|
|
|
|
|
|
|
|
return atom_xchg(&spin->lock, 1) == 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void spin_unlock(spin_t *spin)
|
|
|
|
|
{
|
|
|
|
|
atom_write(&spin->lock, 0);
|
|
|
|
|
MTX_ASSERT(in_critical());
|
|
|
|
|
|
|
|
|
|
atom_init(&spin->lock, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void mtx_init(struct mtx *mtx)
|
|
|
|
|
{
|
|
|
|
|
atom_write(&mtx->lock, 1);
|
|
|
|
|
atom_init(&mtx->lock, 1);
|
|
|
|
|
spin_init(&mtx->wait_queue_lock);
|
|
|
|
|
clist_init(&mtx->wait_queue);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void mtx_lock(struct mtx *mtx)
|
|
|
|
|
{
|
|
|
|
|
# ifdef DEBUG
|
|
|
|
|
if (in_irq()) {
|
|
|
|
|
kprintf("mtx_lock() called from irq context!\n");
|
|
|
|
|
spin_loop {
|
|
|
|
|
if (atom_cmp_xchg(&mtx->lock, 1, 0) == 1)
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
# endif
|
|
|
|
|
MTX_ASSERT(!in_critical());
|
|
|
|
|
|
|
|
|
|
critical_enter();
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* When the mutex is locked, its lock value goes to 0.
|
|
|
|
@ -61,79 +63,92 @@ void mtx_lock(struct mtx *mtx)
|
|
|
|
|
* nonzero, meaning the lock value has become negative.
|
|
|
|
|
*/
|
|
|
|
|
if (atom_dec(&mtx->lock)) {
|
|
|
|
|
struct task *task = current;
|
|
|
|
|
/*
|
|
|
|
|
* It might not be the smartest idea to allocate this thing on
|
|
|
|
|
* the stack because it's gonna blow up if the task somehow dies
|
|
|
|
|
* before returning here. Let's see how this turns out.
|
|
|
|
|
*/
|
|
|
|
|
struct task *this_task = current;
|
|
|
|
|
struct lock_waiter waiter = {
|
|
|
|
|
.task = task,
|
|
|
|
|
.task = this_task,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
spin_lock(&mtx->wait_queue_lock);
|
|
|
|
|
clist_add(&mtx->wait_queue, &waiter.clink);
|
|
|
|
|
spin_unlock(&mtx->wait_queue_lock);
|
|
|
|
|
if (atom_cmp_xchg(&mtx->lock, 1, 0) == 1) {
|
|
|
|
|
/* mutex was unlocked after we failed to claim it, but
|
|
|
|
|
* before the other thread claimed wait_queue_lock */
|
|
|
|
|
spin_unlock(&mtx->wait_queue_lock);
|
|
|
|
|
|
|
|
|
|
task->state = TASK_BLOCKED;
|
|
|
|
|
schedule();
|
|
|
|
|
critical_leave();
|
|
|
|
|
} else {
|
|
|
|
|
this_task->state = TASK_BLOCKED;
|
|
|
|
|
clist_add(&mtx->wait_queue, &waiter.clink);
|
|
|
|
|
spin_unlock(&mtx->wait_queue_lock);
|
|
|
|
|
|
|
|
|
|
critical_leave();
|
|
|
|
|
|
|
|
|
|
schedule();
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
critical_leave();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mtx_trylock(struct mtx *mtx)
|
|
|
|
|
bool mtx_trylock(struct mtx *mtx)
|
|
|
|
|
{
|
|
|
|
|
if (atom_cmp_xchg(&mtx->lock, 1, 0) != 1)
|
|
|
|
|
return -EAGAIN;
|
|
|
|
|
return 0;
|
|
|
|
|
MTX_ASSERT(!in_critical());
|
|
|
|
|
|
|
|
|
|
return atom_cmp_xchg(&mtx->lock, 1, 0) == 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void mtx_unlock(struct mtx *mtx)
|
|
|
|
|
{
|
|
|
|
|
MTX_ASSERT(!in_critical());
|
|
|
|
|
|
|
|
|
|
critical_enter();
|
|
|
|
|
|
|
|
|
|
if (atom_add(&mtx->lock, 1) < 0) {
|
|
|
|
|
spin_lock(&mtx->wait_queue_lock);
|
|
|
|
|
struct lock_waiter *waiter =
|
|
|
|
|
clist_del_first_entry(&mtx->wait_queue, typeof(*waiter), clink);
|
|
|
|
|
if (!clist_is_empty(&mtx->wait_queue)) {
|
|
|
|
|
struct lock_waiter *waiter = clist_del_first_entry(
|
|
|
|
|
&mtx->wait_queue,
|
|
|
|
|
typeof(*waiter),
|
|
|
|
|
clink
|
|
|
|
|
);
|
|
|
|
|
waiter->task->state = TASK_READY;
|
|
|
|
|
}
|
|
|
|
|
spin_unlock(&mtx->wait_queue_lock);
|
|
|
|
|
waiter->task->state = TASK_READY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
critical_leave();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void sem_init(struct sem *sem, int initial_count)
|
|
|
|
|
{
|
|
|
|
|
atom_write(&sem->count, initial_count);
|
|
|
|
|
atom_init(&sem->count, initial_count);
|
|
|
|
|
spin_init(&sem->wait_queue_lock);
|
|
|
|
|
clist_init(&sem->wait_queue);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int sem_down(struct sem *sem)
|
|
|
|
|
{
|
|
|
|
|
# ifdef DEBUG
|
|
|
|
|
if (in_irq()) {
|
|
|
|
|
kprintf("sem_down() called from IRQ context!\n");
|
|
|
|
|
spin_loop {
|
|
|
|
|
int old = atom_sub(&sem->count, 1);
|
|
|
|
|
if (old >= 0)
|
|
|
|
|
return old;
|
|
|
|
|
atom_inc(&sem->count);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
# endif
|
|
|
|
|
MTX_ASSERT(!in_critical());
|
|
|
|
|
|
|
|
|
|
int ret = atom_sub(&sem->count, 1);
|
|
|
|
|
critical_enter();
|
|
|
|
|
|
|
|
|
|
int ret = atom_sub(&sem->count, 1);
|
|
|
|
|
if (ret < 0) {
|
|
|
|
|
struct task *task = current;
|
|
|
|
|
struct task *this_task = current;
|
|
|
|
|
struct lock_waiter waiter = {
|
|
|
|
|
.task = task,
|
|
|
|
|
.task = this_task,
|
|
|
|
|
};
|
|
|
|
|
this_task->state = TASK_BLOCKED;
|
|
|
|
|
|
|
|
|
|
spin_lock(&sem->wait_queue_lock);
|
|
|
|
|
clist_add(&sem->wait_queue, &waiter.clink);
|
|
|
|
|
spin_unlock(&sem->wait_queue_lock);
|
|
|
|
|
|
|
|
|
|
task->state = TASK_BLOCKED;
|
|
|
|
|
critical_leave();
|
|
|
|
|
|
|
|
|
|
schedule();
|
|
|
|
|
ret = 0;
|
|
|
|
|
} else {
|
|
|
|
|
critical_leave();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
@ -141,30 +156,45 @@ int sem_down(struct sem *sem)
|
|
|
|
|
|
|
|
|
|
int sem_up(struct sem *sem)
|
|
|
|
|
{
|
|
|
|
|
int ret = atom_add(&sem->count, 1);
|
|
|
|
|
MTX_ASSERT(!in_critical());
|
|
|
|
|
|
|
|
|
|
critical_enter();
|
|
|
|
|
|
|
|
|
|
int ret = atom_add(&sem->count, 1);
|
|
|
|
|
if (ret <= 0) {
|
|
|
|
|
spin_lock(&sem->wait_queue_lock);
|
|
|
|
|
struct lock_waiter *waiter =
|
|
|
|
|
clist_del_first_entry(&sem->wait_queue, typeof(*waiter), clink);
|
|
|
|
|
if (!clist_is_empty(&sem->wait_queue)) {
|
|
|
|
|
struct lock_waiter *waiter = clist_del_first_entry(
|
|
|
|
|
&sem->wait_queue,
|
|
|
|
|
typeof(*waiter),
|
|
|
|
|
clink
|
|
|
|
|
);
|
|
|
|
|
waiter->task->state = TASK_READY;
|
|
|
|
|
}
|
|
|
|
|
spin_unlock(&sem->wait_queue_lock);
|
|
|
|
|
|
|
|
|
|
waiter->task->state = TASK_READY;
|
|
|
|
|
ret = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
critical_leave();
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int sem_trydown(struct sem *sem)
|
|
|
|
|
{
|
|
|
|
|
int ret = atom_sub(&sem->count, 1);
|
|
|
|
|
MTX_ASSERT(!in_critical());
|
|
|
|
|
|
|
|
|
|
critical_enter();
|
|
|
|
|
|
|
|
|
|
int ret = atom_sub(&sem->count, 1);
|
|
|
|
|
if (ret < 0) {
|
|
|
|
|
atom_inc(&sem->count);
|
|
|
|
|
ret = -EAGAIN;
|
|
|
|
|
ret = -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
critical_leave();
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|