file: add file kevent and i/o wait support

This commit is contained in:
anna 2021-08-08 20:58:54 +02:00
parent 364290f192
commit 83ee9603e7
Signed by: fef
GPG key ID: EC22E476DC2D3D84
6 changed files with 244 additions and 50 deletions

View file

@ -32,10 +32,15 @@ enum device_channel {
};
struct device_kevent {
struct kevent event;
struct kevent kevent;
enum device_channel channel;
};
__always_inline struct device_kevent *kevent_to_device_kevent(struct kevent *event)
{
return container_of(event, struct device_kevent, kevent);
}
__always_inline struct device *kevent_to_device(struct kevent *event)
{
return container_of(event->kent.parent, struct device, kent);

View file

@ -3,6 +3,7 @@
#pragma once
#include <ardix/kent.h>
#include <ardix/kevent.h>
#include <ardix/mutex.h>
#include <ardix/types.h>
@ -28,6 +29,33 @@ void file_put(struct file *file);
ssize_t file_write(struct file *file, const void *buf, size_t len);
ssize_t file_read(void *buf, struct file *file, size_t len);
enum file_kevent_flags {
FILE_KEVENT_READ = (1 << 0),
FILE_KEVENT_WRITE = (1 << 1),
FILE_KEVENT_CLOSE = (1 << 2),
FILE_KEVENT_EOF = (1 << 3),
FILE_KEVENT_UNLOCK = (1 << 4),
};
struct file_kevent {
struct kevent kevent;
enum file_kevent_flags flags;
};
__always_inline struct file *kevent_to_file(struct kevent *event)
{
return container_of(event->kent.parent, struct file, kent);
}
__always_inline struct file_kevent *kevent_to_file_kevent(struct kevent *event)
{
return container_of(event, struct file_kevent, kevent);
}
struct file_kevent *file_kevent_create(struct file *f, enum file_kevent_flags flags);
void file_kevent_create_and_dispatch(struct file *f, enum file_kevent_flags flags);
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.

View file

@ -21,7 +21,10 @@
*/
enum kevent_kind {
/** @brief Device state has changed */
KEVENT_DEVICE,
/** @brief File has changed */
KEVENT_FILE,
KEVENT_KIND_COUNT,
};

View file

@ -53,7 +53,7 @@ int device_init(struct device *dev)
static void device_kevent_destroy(struct kent *kent)
{
struct kevent *event = container_of(kent, struct kevent, kent);
struct device_kevent *device_kevent = container_of(event, struct device_kevent, event);
struct device_kevent *device_kevent = container_of(event, struct device_kevent, kevent);
free(device_kevent);
}
@ -64,11 +64,11 @@ struct device_kevent *device_kevent_create(struct device *device, enum device_ch
return NULL;
event->channel = channel;
event->event.kind = KEVENT_DEVICE;
event->kevent.kind = KEVENT_DEVICE;
event->event.kent.parent = &device->kent;
event->event.kent.destroy = device_kevent_destroy;
int err = kent_init(&event->event.kent);
event->kevent.kent.parent = &device->kent;
event->kevent.kent.destroy = device_kevent_destroy;
int err = kent_init(&event->kevent.kent);
if (err) {
free(event);
event = NULL;
@ -81,7 +81,7 @@ void device_kevent_create_and_dispatch(struct device *device, enum device_channe
{
struct device_kevent *event = device_kevent_create(device, channel);
if (event != NULL)
kevent_dispatch(&event->event);
kevent_dispatch(&event->kevent);
}
/*

View file

@ -3,6 +3,7 @@
#include <ardix/device.h>
#include <ardix/file.h>
#include <ardix/malloc.h>
#include <ardix/sched.h>
#include <config.h>
#include <errno.h>
@ -76,42 +77,213 @@ struct file *file_get(int fd)
return f;
}
#include <arch/debug.h>
void file_put(struct file *f)
{
kent_put(&f->kent);
}
struct io_file_kevent_extra {
struct file *file;
struct task *task;
enum file_kevent_flags flags;
};
struct io_device_kevent_extra {
struct file *file;
struct task *task;
enum device_channel channel;
};
static int io_device_kevent_listener(struct kevent *event, void *_extra)
{
struct io_device_kevent_extra *extra = _extra;
struct device *device = kevent_to_device(event);
if (device != extra->file->device)
return KEVENT_CB_NONE;
struct device_kevent *device_kevent = kevent_to_device_kevent(event);
if (device_kevent->channel != extra->channel)
return KEVENT_CB_NONE;
extra->task->state = TASK_QUEUE;
free(extra);
file_put(extra->file);
kent_put(&extra->task->kent);
return KEVENT_CB_LISTENER_DEL | KEVENT_CB_STOP;
}
static int io_file_kevent_listener(struct kevent *event, void *_extra)
{
struct io_file_kevent_extra *extra = _extra;
struct file *file = kevent_to_file(event);
if (file != extra->file)
return KEVENT_CB_NONE;
struct file_kevent *file_kevent = kevent_to_file_kevent(event);
if ((file_kevent->flags & extra->flags) == 0)
return KEVENT_CB_NONE;
extra->task->state = TASK_QUEUE;
free(extra);
file_put(extra->file);
kent_put(&extra->task->kent);
return KEVENT_CB_LISTENER_DEL | KEVENT_CB_STOP;
}
static int iowait_file(struct file *file, enum file_kevent_flags flags)
{
file_get(file->fd);
kent_get(&current->kent);
struct io_file_kevent_extra *extra = malloc(sizeof(*extra));
if (extra == NULL)
return -ENOMEM;
extra->file = file;
extra->task = current;
extra->flags = flags;
kevent_listener_add(KEVENT_FILE, io_file_kevent_listener, extra);
yield(TASK_IOWAIT);
return 0;
}
static int iowait_device(struct file *file, enum device_channel channel)
{
file_get(file->fd);
kent_get(&current->kent);
struct io_device_kevent_extra *extra = malloc(sizeof(*extra));
if (extra == NULL)
return -ENOMEM;
extra->file = file;
extra->task = current;
extra->channel = channel;
kevent_listener_add(KEVENT_DEVICE, io_device_kevent_listener, extra);
yield(TASK_IOWAIT);
return 0;
}
ssize_t file_read(void *buf, struct file *file, size_t len)
{
ssize_t ret = mutex_trylock(&file->lock);
if (len == 0)
return 0;
if (ret == 0) {
ret = file->device->read(buf, file->device, len, file->pos);
if (file->type == FILE_TYPE_REGULAR && ret > 0)
file->pos += ret;
ssize_t ret = 0;
mutex_unlock(&file->lock);
while (mutex_trylock(&file->lock) != 0) {
ret = iowait_file(file, FILE_KEVENT_UNLOCK);
if (ret != 0)
return ret;
}
while (ret < (ssize_t)len) {
ssize_t tmp = file->device->read(buf, file->device, len, file->pos);
if (tmp < 0) {
if (tmp == -EBUSY) {
tmp = iowait_device(file, DEVICE_CHANNEL_IN);
} else {
ret = tmp;
break;
}
}
if (file->type == FILE_TYPE_REGULAR)
file->pos += tmp;
ret += tmp;
buf += tmp;
}
mutex_unlock(&file->lock);
file_kevent_create_and_dispatch(file, FILE_KEVENT_READ | FILE_KEVENT_UNLOCK);
return ret;
}
ssize_t file_write(struct file *file, const void *buf, size_t len)
{
ssize_t ret = mutex_trylock(&file->lock);
if (len == 0)
return 0;
if (ret == 0) {
ret = file->device->write(file->device, buf, len, file->pos);
if (file->type == FILE_TYPE_REGULAR && ret > 0)
file->pos += ret;
ssize_t ret = 0;
mutex_unlock(&file->lock);
while (mutex_trylock(&file->lock) != 0) {
ret = iowait_file(file, FILE_KEVENT_UNLOCK);
if (ret != 0)
return ret;
}
while (ret < (ssize_t)len) {
ssize_t tmp = file->device->write(file->device, buf, len, file->pos);
if (tmp < 0) {
if (tmp == -EBUSY) {
__breakpoint;
tmp = iowait_device(file, DEVICE_CHANNEL_OUT);
if (tmp < 0) {
ret = tmp;
break;
}
__breakpoint;
} else {
ret = tmp;
break;
}
}
if (file->type == FILE_TYPE_REGULAR)
file->pos += tmp;
ret += tmp;
buf += tmp;
}
mutex_unlock(&file->lock);
file_kevent_create_and_dispatch(file, FILE_KEVENT_WRITE | FILE_KEVENT_UNLOCK);
return ret;
}
static void file_kevent_destroy(struct kent *kent)
{
struct kevent *kevent = container_of(kent, struct kevent, kent);
struct file_kevent *file_kevent = container_of(kevent, struct file_kevent, kevent);
free(file_kevent);
}
struct file_kevent *file_kevent_create(struct file *f, enum file_kevent_flags flags)
{
struct file_kevent *event = malloc(sizeof(*event));
if (event == NULL)
return NULL;
event->flags = flags;
event->kevent.kind = KEVENT_FILE;
event->kevent.kent.parent = &f->kent;
event->kevent.kent.destroy = file_kevent_destroy;
int err = kent_init(&event->kevent.kent);
if (err != 0) {
free(event);
event = NULL;
}
return event;
}
void file_kevent_create_and_dispatch(struct file *f, enum file_kevent_flags flags)
{
struct file_kevent *event = file_kevent_create(f, flags);
if (event != NULL)
kevent_dispatch(&event->kevent);
}
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.

View file

@ -53,10 +53,9 @@ static inline void process_single_queue(struct kevent_queue *queue, struct list_
struct kevent *event, *tmp_event;
/*
* This method is only invoked from scheduler context which has higher
* exception priority than all irqs, so if claiming the lock on this
* list fails it means we interrupted the irq. The current strategy is
* to just abort and try again during the next system tick.
* This method runs from scheduler context which has lower exception
* priority than irqs, so in theory this should never fail. Still, we
* only use trylock just in case.
*/
if (mutex_trylock(&queue->lock) == 0) {
list_for_each_entry_safe(&queue->list, event, tmp_event, link) {
@ -86,15 +85,14 @@ static inline void process_single_queue(struct kevent_queue *queue, struct list_
void kevents_process(void)
{
/*
* if this fails it means the scheduling interrupt happened while
* processing an irq, just ignore the cache and try again next time
* if that is the case
* Same thing as for process_single_queue: This should never fail
* because scheduling interrupts have the lowest exception priority.
*/
if (mutex_trylock(&kev_cache_lock) == 0) {
struct kevent *cursor, *tmp;
list_for_each_entry_safe(&kev_cache, cursor, tmp, link) {
list_delete(&cursor->link);
list_insert(&kev_queues[cursor->kind], &cursor->link);
list_insert(&kev_queues[cursor->kind].list, &cursor->link);
}
mutex_unlock(&kev_cache_lock);
@ -114,36 +112,24 @@ void kevent_dispatch(struct kevent *event)
mutex_unlock(&queue->lock);
} else {
/*
* We shouldn't ever be able to get here because irqs don't interrupt
* each other and if we get interrupted by the scheduler it doesn't
* matter because kevents_process() always releases its lock before
* returning again. If it still happens for whatever stupid reason,
* we insert the event in a temporary unsorted cache that is then
* ordered by the scheduler.
* If we got to here it means we preempted the scheduler.
* We just toss the event into a temporary pile and let the
* scheduler sort out the mess when it calls kevents_process()
* the next time.
*/
if (mutex_trylock(&kev_cache_lock) == 0) {
list_insert(&kev_cache, &event->link);
mutex_unlock(&kev_cache_lock);
} else {
/*
* If we ever make it to here, something *extremely* stupid
* has happened: we couldn't get the lock on the queue (which
* shouldn't ever happen in the first place), *and* the cache
* is locked as well. We will have to assume we got suspended
* at some point in these if branches, and just try getting
* the lock on the original queue again. If that fails as
* well, we just give up and discard the event all together.
*
* TODO: This solution is of course far from ideal and has to
* be refactored at some point before the first stable
* release. We'll just deal with that later(TM).
* If we ever make it to here, something of unfathomable stupidity has
* happened because there are only two contexts from which we are supposed
* to be accessing the event queue--irq and scheduler. That means we always
* have either the main queue or the temporary cache available to us, and
* if not, we forgot to release a lock during yet another sleep deprived
* episode of late night coding. Time to make us pay for what we did then.
*/
if (mutex_trylock(&queue->lock) == 0) {
list_insert(&queue->list, &event->link);
mutex_unlock(&queue->lock);
} else {
kevent_put(event);
}
}
}
}