mutex: add wait queue and spinlocks

This commit is contained in:
anna 2021-08-10 00:44:36 +02:00
parent faa99622df
commit d4411fd6b6
Signed by: fef
GPG key ID: EC22E476DC2D3D84
11 changed files with 150 additions and 92 deletions

View file

@ -7,10 +7,6 @@
#include <ardix/serial.h>
#include <ardix/util.h>
#ifndef CONFIG_ARCH_SERIAL_BUFSZ
#define CONFIG_ARCH_SERIAL_BUFSZ 32
#endif /* CONFIG_ARCH_SERIAL_BUFSZ */
/** Architecture-specific extension of `struct serial_device` */
struct arch_serial_device {
/** should always match REG_UART_PDC_TPR */

View file

@ -19,7 +19,8 @@
* Hardware automatically saves r0-r3, r12, pc, lr, and psr onto the
* stack that was used before exception entry. Bit 2 in lr is 1 if the
* psp was used, and 0 if msp was used. We store the appropriate stack
* pointer into r12 and push it onto the stack
* pointer into r12 and push it onto the stack along with the remaining
* registers.
*/
tst lr, #(1 << 2)
ite ne

View file

@ -2,10 +2,10 @@
.include "asm.S"
.section .text.shared
.text
/* void _mutex_lock(uint8_t *lock); */
func_begin _mutex_lock
/* void _spin_lock(uint8_t *lock); */
func_begin _spin_lock
mov r1, #1 /* uint8_t newval = 1; */
@ -21,10 +21,12 @@ func_begin _mutex_lock
dmb
bx lr
func_end _mutex_lock
func_end _spin_lock
/* int _mutex_trylock(uint8_t *lock); */
func_begin _mutex_trylock
/* int _spin_trylock(uint8_t *lock); */
func_begin _spin_trylock
mov r1, #1 /* uint8_t newval = 1; */
/* move lock to r2 to make room in r0 for the return value */
@ -40,16 +42,20 @@ func_begin _mutex_trylock
bx lr /* return tmp; */
func_end _spin_trylock
func_end _mutex_trylock
/* void _mutex_unlock(uint8_t *lock); */
func_begin _mutex_unlock
/* void _spin_unlock(uint8_t *lock); */
func_begin _spin_unlock
mov r1, #0
strb r1, [r0]
dmb
bx lr
func_end _spin_unlock
func_end _mutex_unlock
/*

View file

@ -2,6 +2,8 @@
#pragma once
#include <ardix/atomic.h>
#include <ardix/list.h>
#include <ardix/types.h>
#include <errno.h>
@ -13,23 +15,72 @@
* @{
*/
/**
* @brief A spinning mutex.
* This is the most primitive type of mutex. It is locked by looping (spinning)
* so long until the lock could be acquired, thus blocking the CPU from doing
* anything else. For this reason, tasks that hold a spinlock must not sleep
* while holding such a lock. This behavior is enforced by calling
* `atomic_enter()` in the `spin_lock()` function and `atomic_leave()` in
* `spin_unlock()`.
*/
typedef struct {
uint8_t lock;
} spin_t;
extern void _spin_lock(uint8_t *lock);
extern void _spin_unlock(uint8_t *lock);
extern int _spin_trylock(uint8_t *lock);
__always_inline void spin_init(spin_t *spin)
{
spin->lock = 0;
}
__always_inline void spin_lock(spin_t *spin)
{
atomic_enter();
_spin_lock(&spin->lock);
}
__always_inline void spin_unlock(spin_t *spin)
{
_spin_unlock(&spin->lock);
atomic_leave();
}
__always_inline int spin_trylock(spin_t *spin)
{
if (_spin_trylock(&spin->lock) == 0)
return 0;
else
return -EAGAIN;
}
__always_inline bool spin_is_locked(spin_t *spin)
{
return spin->lock != 0;
}
/**
* @brief A simple mutex.
*
* Mutexes can be locked using the `mutex_lock()` and `mutex_unlock()` methods
* respectively. The former will block until the lock is acquired and thus
* should never be used from interrupt context. Use `mutex_trylock()` if you
* don't want blocking.
* Mutexes can be locked and unlocked using the `mutex_lock()` and
* `mutex_unlock()` methods respectively. The former will block until the lock
* is acquired and thus should never be used from interrupt context.
* Use `mutex_trylock()` if you don't want blocking.
*/
struct mutex {
uint8_t lock; /**< Current lock value, don't read directly */
spin_t wait_queue_lock;
struct list_head wait_queue; /**< -> mutex_wait::link */
};
struct mutex_wait {
struct list_head link;
struct task *task;
};
/**
* @brief Internal assembly routine for `mutex_lock()`.
* @private
*/
extern void _mutex_lock(uint8_t *lock);
/**
* @brief Internal assembly routine for `mutex_trylock()`.
* @private
@ -49,28 +100,23 @@ extern void _mutex_unlock(uint8_t *lock);
__always_inline void mutex_init(struct mutex *mutex)
{
mutex->lock = 0;
list_init(&mutex->wait_queue);
}
/**
* @brief Acquire an exclusive lock on a mutex.
* This call will block until the lock was acquired and therefore cannot fail.
* This call may sleep if the lock cannot be acquired instantly.
*
* @param mutex Mutex to lock
*/
__always_inline void mutex_lock(struct mutex *mutex)
{
_mutex_lock(&mutex->lock);
}
void mutex_lock(struct mutex *mutex);
/**
* @brief Release an exclusive lock on a mutex.
*
* @param mutex Mutex to unlock
*/
__always_inline void mutex_unlock(struct mutex *mutex)
{
_mutex_unlock(&mutex->lock);
}
void mutex_unlock(struct mutex *mutex);
/**
* @brief Attempt to acquire an exclusive lock on a mutex.
@ -91,11 +137,11 @@ __always_inline int mutex_trylock(struct mutex *mutex)
* @brief Determine whether a mutex is locked.
*
* @param mutex Mutex to get the lock value of
* @returns Nonzero if the mutex is locked, zero otherwise
* @returns true if the mutex is locked
*/
__always_inline int mutex_is_locked(struct mutex *mutex)
__always_inline bool mutex_is_locked(struct mutex *mutex)
{
return mutex->lock;
return mutex->lock != 0;
}
/**

View file

@ -25,6 +25,8 @@ enum task_state {
TASK_SLEEP,
/** Task is waiting for I/O to flush buffers. */
TASK_IOWAIT,
/** Task is waiting for a mutex to be unlocked. */
TASK_LOCKWAIT,
};
/** @brief Core structure holding information about a task. */

View file

@ -14,6 +14,7 @@ target_sources(ardix_kernel PRIVATE
io.c
kent.c
kevent.c
mutex.c
ringbuf.c
sched.c
serial.c

View file

@ -59,7 +59,7 @@ static void device_kevent_destroy(struct kent *kent)
struct device_kevent *device_kevent_create(struct device *device, enum device_channel channel)
{
struct device_kevent *event = malloc(sizeof(*event));
struct device_kevent *event = atomic_malloc(sizeof(*event));
if (event == NULL)
return NULL;

View file

@ -77,19 +77,12 @@ struct file *file_get(int fd)
return f;
}
#include <arch/debug.h>
void file_put(struct file *f)
{
kent_put(&f->kent);
}
struct io_file_kevent_extra {
struct file *file;
struct task *task;
enum file_kevent_flags flags;
};
struct io_device_kevent_extra {
struct file *file;
struct task *task;
@ -115,43 +108,6 @@ static int io_device_kevent_listener(struct kevent *event, void *_extra)
return KEVENT_CB_LISTENER_DEL | KEVENT_CB_STOP;
}
static int io_file_kevent_listener(struct kevent *event, void *_extra)
{
struct io_file_kevent_extra *extra = _extra;
struct file *file = kevent_to_file(event);
if (file != extra->file)
return KEVENT_CB_NONE;
struct file_kevent *file_kevent = kevent_to_file_kevent(event);
if ((file_kevent->flags & extra->flags) == 0)
return KEVENT_CB_NONE;
extra->task->state = TASK_QUEUE;
free(extra);
file_put(extra->file);
kent_put(&extra->task->kent);
return KEVENT_CB_LISTENER_DEL | KEVENT_CB_STOP;
}
static int iowait_file(struct file *file, enum file_kevent_flags flags)
{
file_get(file->fd);
kent_get(&current->kent);
struct io_file_kevent_extra *extra = malloc(sizeof(*extra));
if (extra == NULL)
return -ENOMEM;
extra->file = file;
extra->task = current;
extra->flags = flags;
kevent_listener_add(KEVENT_FILE, io_file_kevent_listener, extra);
yield(TASK_IOWAIT);
return 0;
}
static int iowait_device(struct file *file, enum device_channel channel)
{
file_get(file->fd);
@ -177,11 +133,7 @@ ssize_t file_read(void *buf, struct file *file, size_t len)
ssize_t ret = 0;
while (mutex_trylock(&file->lock) != 0) {
ret = iowait_file(file, FILE_KEVENT_UNLOCK);
if (ret != 0)
return ret;
}
mutex_lock(&file->lock);
while (ret < (ssize_t)len) {
ssize_t tmp = file->device->read(buf, file->device, len, file->pos);
@ -214,23 +166,17 @@ ssize_t file_write(struct file *file, const void *buf, size_t len)
ssize_t ret = 0;
while (mutex_trylock(&file->lock) != 0) {
ret = iowait_file(file, FILE_KEVENT_UNLOCK);
if (ret != 0)
return ret;
}
mutex_lock(&file->lock);
while (ret < (ssize_t)len) {
ssize_t tmp = file->device->write(file->device, buf, len, file->pos);
if (tmp < 0) {
if (tmp == -EBUSY) {
__breakpoint;
tmp = iowait_device(file, DEVICE_CHANNEL_OUT);
if (tmp < 0) {
ret = tmp;
break;
}
__breakpoint;
} else {
ret = tmp;
break;
@ -259,7 +205,7 @@ static void file_kevent_destroy(struct kent *kent)
struct file_kevent *file_kevent_create(struct file *f, enum file_kevent_flags flags)
{
struct file_kevent *event = malloc(sizeof(*event));
struct file_kevent *event = atomic_malloc(sizeof(*event));
if (event == NULL)
return NULL;

63
kernel/mutex.c Normal file
View file

@ -0,0 +1,63 @@
/* See the end of this file for copyright, license, and warranty information. */
#include <arch-generic/do_switch.h>
#include <ardix/list.h>
#include <ardix/malloc.h>
#include <ardix/mutex.h>
#include <ardix/sched.h>
#include <ardix/util.h>
#include <errno.h>
#include <stddef.h>
void mutex_lock(struct mutex *mutex)
{
if (mutex_trylock(mutex) != 0) {
struct mutex_wait *entry = malloc(sizeof(*entry));
if (entry == NULL) {
_spin_lock(&mutex->lock); /* fall back to spinning */
return;
}
spin_lock(&mutex->wait_queue_lock);
entry->task = current;
list_insert(&mutex->wait_queue, &entry->link);
spin_unlock(&mutex->wait_queue_lock);
yield(TASK_LOCKWAIT);
}
}
void mutex_unlock(struct mutex *mutex)
{
struct mutex_wait *waiter = NULL;
spin_lock(&mutex->wait_queue_lock);
if (!list_is_empty(&mutex->wait_queue)) {
waiter = list_first_entry(&mutex->wait_queue, struct mutex_wait, link);
list_delete(&waiter->link);
}
spin_unlock(&mutex->wait_queue_lock);
if (waiter != NULL) {
struct task *task = waiter->task;
free(waiter);
current->state = TASK_QUEUE;
do_switch(current, task);
} else {
_mutex_unlock(&mutex->lock);
}
}
/*
* This file is part of Ardix.
* Copyright (c) 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/

View file

@ -90,6 +90,7 @@ static inline bool can_run(const struct task *task)
return true;
case TASK_DEAD:
case TASK_IOWAIT:
case TASK_LOCKWAIT:
return false;
}

View file

@ -85,9 +85,7 @@ ssize_t serial_read(void *dest, struct serial_device *dev, size_t len)
{
ssize_t ret;
atomic_enter();
ret = (ssize_t)ringbuf_read(dest, dev->rx, len);
atomic_leave();
return ret;
}
@ -96,9 +94,7 @@ ssize_t serial_write(struct serial_device *dev, const void *data, size_t len)
{
ssize_t ret;
atomic_enter();
ret = arch_serial_write(dev, data, len);
atomic_leave();
return ret;
}