As of now, everything except the code imported from FreeBSD is proprietary. Of course, it won't be like this for long, only until we have decided which license we like to use. The rationale is that releasing everything under a copyleft license later is always easier than doing so immediately and then changing it afterwards. Naturally, any changes made before this commit are still subject to the terms of the CNPL.
199 lines
3.5 KiB
C
199 lines
3.5 KiB
C
/* Copyright (C) 2021 fef <owo@fef.moe>. All rights reserved. */
|
|
|
|
#include <arch/atom.h>
|
|
#include <arch/cpufunc.h>
|
|
|
|
#include <gay/clist.h>
|
|
#include <gay/mutex.h>
|
|
#include <gay/sched.h>
|
|
#include <gay/systm.h>
|
|
#include <gay/util.h>
|
|
|
|
#if CFG_DEBUG_MTX
|
|
#define MTX_ASSERT(x) KASSERT(x)
|
|
#else
|
|
#define MTX_ASSERT(x) ({})
|
|
#endif
|
|
|
|
void spin_init(spin_t *spin)
|
|
{
|
|
atom_init(&spin->lock, 0);
|
|
}
|
|
|
|
void spin_lock(spin_t *spin)
|
|
{
|
|
MTX_ASSERT(in_critical());
|
|
|
|
spin_loop {
|
|
if (atom_xchg(&spin->lock, 1) == 0)
|
|
break;
|
|
}
|
|
}
|
|
|
|
bool spin_trylock(spin_t *spin)
|
|
{
|
|
MTX_ASSERT(in_critical());
|
|
|
|
return atom_xchg(&spin->lock, 1) == 0;
|
|
}
|
|
|
|
void spin_unlock(spin_t *spin)
|
|
{
|
|
MTX_ASSERT(in_critical());
|
|
|
|
atom_init(&spin->lock, 0);
|
|
}
|
|
|
|
void mtx_init(struct mtx *mtx)
|
|
{
|
|
atom_init(&mtx->lock, 1);
|
|
spin_init(&mtx->wait_queue_lock);
|
|
clist_init(&mtx->wait_queue);
|
|
}
|
|
|
|
void mtx_lock(struct mtx *mtx)
|
|
{
|
|
MTX_ASSERT(!in_critical());
|
|
|
|
critical_enter();
|
|
|
|
/*
|
|
* When the mutex is locked, its lock value goes to 0.
|
|
* atom_dec() returns true if the value *after* the decrement is
|
|
* nonzero, meaning the lock value has become negative.
|
|
*/
|
|
if (atom_dec(&mtx->lock)) {
|
|
struct task *this_task = current;
|
|
struct lock_waiter waiter = {
|
|
.task = this_task,
|
|
};
|
|
|
|
spin_lock(&mtx->wait_queue_lock);
|
|
if (atom_cmp_xchg(&mtx->lock, 1, 0) == 1) {
|
|
/* mutex was unlocked after we failed to claim it, but
|
|
* before the other thread claimed wait_queue_lock */
|
|
spin_unlock(&mtx->wait_queue_lock);
|
|
|
|
critical_leave();
|
|
} else {
|
|
this_task->state = TASK_BLOCKED;
|
|
clist_add(&mtx->wait_queue, &waiter.clink);
|
|
spin_unlock(&mtx->wait_queue_lock);
|
|
|
|
critical_leave();
|
|
|
|
schedule();
|
|
}
|
|
} else {
|
|
critical_leave();
|
|
}
|
|
}
|
|
|
|
bool mtx_trylock(struct mtx *mtx)
|
|
{
|
|
MTX_ASSERT(!in_critical());
|
|
|
|
return atom_cmp_xchg(&mtx->lock, 1, 0) == 1;
|
|
}
|
|
|
|
void mtx_unlock(struct mtx *mtx)
|
|
{
|
|
MTX_ASSERT(!in_critical());
|
|
|
|
critical_enter();
|
|
|
|
if (atom_add(&mtx->lock, 1) < 0) {
|
|
spin_lock(&mtx->wait_queue_lock);
|
|
if (!clist_is_empty(&mtx->wait_queue)) {
|
|
struct lock_waiter *waiter = clist_del_first_entry(
|
|
&mtx->wait_queue,
|
|
typeof(*waiter),
|
|
clink
|
|
);
|
|
waiter->task->state = TASK_READY;
|
|
}
|
|
spin_unlock(&mtx->wait_queue_lock);
|
|
}
|
|
|
|
critical_leave();
|
|
}
|
|
|
|
void sem_init(struct sem *sem, int initial_count)
|
|
{
|
|
atom_init(&sem->count, initial_count);
|
|
spin_init(&sem->wait_queue_lock);
|
|
clist_init(&sem->wait_queue);
|
|
}
|
|
|
|
int sem_down(struct sem *sem)
|
|
{
|
|
MTX_ASSERT(!in_critical());
|
|
|
|
critical_enter();
|
|
|
|
int ret = atom_sub(&sem->count, 1);
|
|
if (ret < 0) {
|
|
struct task *this_task = current;
|
|
struct lock_waiter waiter = {
|
|
.task = this_task,
|
|
};
|
|
this_task->state = TASK_BLOCKED;
|
|
|
|
spin_lock(&sem->wait_queue_lock);
|
|
clist_add(&sem->wait_queue, &waiter.clink);
|
|
spin_unlock(&sem->wait_queue_lock);
|
|
|
|
critical_leave();
|
|
|
|
schedule();
|
|
ret = 0;
|
|
} else {
|
|
critical_leave();
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
int sem_up(struct sem *sem)
|
|
{
|
|
MTX_ASSERT(!in_critical());
|
|
|
|
critical_enter();
|
|
|
|
int ret = atom_add(&sem->count, 1);
|
|
if (ret <= 0) {
|
|
spin_lock(&sem->wait_queue_lock);
|
|
if (!clist_is_empty(&sem->wait_queue)) {
|
|
struct lock_waiter *waiter = clist_del_first_entry(
|
|
&sem->wait_queue,
|
|
typeof(*waiter),
|
|
clink
|
|
);
|
|
waiter->task->state = TASK_READY;
|
|
}
|
|
spin_unlock(&sem->wait_queue_lock);
|
|
|
|
ret = 0;
|
|
}
|
|
|
|
critical_leave();
|
|
|
|
return ret;
|
|
}
|
|
|
|
int sem_trydown(struct sem *sem)
|
|
{
|
|
MTX_ASSERT(!in_critical());
|
|
|
|
critical_enter();
|
|
|
|
int ret = atom_sub(&sem->count, 1);
|
|
if (ret < 0) {
|
|
atom_inc(&sem->count);
|
|
ret = -1;
|
|
}
|
|
|
|
critical_leave();
|
|
|
|
return ret;
|
|
}
|