From 83ee9603e7caf7b40d3c4532b7aee70b7bacb46e Mon Sep 17 00:00:00 2001 From: fef Date: Sun, 8 Aug 2021 20:58:54 +0200 Subject: [PATCH] file: add file kevent and i/o wait support --- include/ardix/device.h | 7 +- include/ardix/file.h | 28 ++++++ include/ardix/kevent.h | 3 + kernel/device.c | 12 +-- kernel/fs/file.c | 196 ++++++++++++++++++++++++++++++++++++++--- kernel/kevent.c | 48 ++++------ 6 files changed, 244 insertions(+), 50 deletions(-) diff --git a/include/ardix/device.h b/include/ardix/device.h index 689e1e3..4fcbfca 100644 --- a/include/ardix/device.h +++ b/include/ardix/device.h @@ -32,10 +32,15 @@ enum device_channel { }; struct device_kevent { - struct kevent event; + struct kevent kevent; enum device_channel channel; }; +__always_inline struct device_kevent *kevent_to_device_kevent(struct kevent *event) +{ + return container_of(event, struct device_kevent, kevent); +} + __always_inline struct device *kevent_to_device(struct kevent *event) { return container_of(event->kent.parent, struct device, kent); diff --git a/include/ardix/file.h b/include/ardix/file.h index faf73ec..2dd9e90 100644 --- a/include/ardix/file.h +++ b/include/ardix/file.h @@ -3,6 +3,7 @@ #pragma once #include +#include #include #include @@ -28,6 +29,33 @@ void file_put(struct file *file); ssize_t file_write(struct file *file, const void *buf, size_t len); ssize_t file_read(void *buf, struct file *file, size_t len); +enum file_kevent_flags { + FILE_KEVENT_READ = (1 << 0), + FILE_KEVENT_WRITE = (1 << 1), + FILE_KEVENT_CLOSE = (1 << 2), + FILE_KEVENT_EOF = (1 << 3), + FILE_KEVENT_UNLOCK = (1 << 4), +}; + +struct file_kevent { + struct kevent kevent; + enum file_kevent_flags flags; +}; + +__always_inline struct file *kevent_to_file(struct kevent *event) +{ + return container_of(event->kent.parent, struct file, kent); +} + +__always_inline struct file_kevent *kevent_to_file_kevent(struct kevent *event) +{ + return container_of(event, struct file_kevent, kevent); +} + +struct file_kevent *file_kevent_create(struct file *f, enum file_kevent_flags flags); + +void file_kevent_create_and_dispatch(struct file *f, enum file_kevent_flags flags); + /* * This file is part of Ardix. * Copyright (c) 2020, 2021 Felix Kopp . diff --git a/include/ardix/kevent.h b/include/ardix/kevent.h index a04b8a4..6ac38a5 100644 --- a/include/ardix/kevent.h +++ b/include/ardix/kevent.h @@ -21,7 +21,10 @@ */ enum kevent_kind { + /** @brief Device state has changed */ KEVENT_DEVICE, + /** @brief File has changed */ + KEVENT_FILE, KEVENT_KIND_COUNT, }; diff --git a/kernel/device.c b/kernel/device.c index 77a304f..aaf64c9 100644 --- a/kernel/device.c +++ b/kernel/device.c @@ -53,7 +53,7 @@ int device_init(struct device *dev) static void device_kevent_destroy(struct kent *kent) { struct kevent *event = container_of(kent, struct kevent, kent); - struct device_kevent *device_kevent = container_of(event, struct device_kevent, event); + struct device_kevent *device_kevent = container_of(event, struct device_kevent, kevent); free(device_kevent); } @@ -64,11 +64,11 @@ struct device_kevent *device_kevent_create(struct device *device, enum device_ch return NULL; event->channel = channel; - event->event.kind = KEVENT_DEVICE; + event->kevent.kind = KEVENT_DEVICE; - event->event.kent.parent = &device->kent; - event->event.kent.destroy = device_kevent_destroy; - int err = kent_init(&event->event.kent); + event->kevent.kent.parent = &device->kent; + event->kevent.kent.destroy = device_kevent_destroy; + int err = kent_init(&event->kevent.kent); if (err) { free(event); event = NULL; @@ -81,7 +81,7 @@ void device_kevent_create_and_dispatch(struct device *device, enum device_channe { struct device_kevent *event = device_kevent_create(device, channel); if (event != NULL) - kevent_dispatch(&event->event); + kevent_dispatch(&event->kevent); } /* diff --git a/kernel/fs/file.c b/kernel/fs/file.c index dbfbe4d..92bc30a 100644 --- a/kernel/fs/file.c +++ b/kernel/fs/file.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -76,42 +77,213 @@ struct file *file_get(int fd) return f; } +#include void file_put(struct file *f) { kent_put(&f->kent); } +struct io_file_kevent_extra { + struct file *file; + struct task *task; + enum file_kevent_flags flags; +}; + +struct io_device_kevent_extra { + struct file *file; + struct task *task; + enum device_channel channel; +}; + +static int io_device_kevent_listener(struct kevent *event, void *_extra) +{ + struct io_device_kevent_extra *extra = _extra; + + struct device *device = kevent_to_device(event); + if (device != extra->file->device) + return KEVENT_CB_NONE; + + struct device_kevent *device_kevent = kevent_to_device_kevent(event); + if (device_kevent->channel != extra->channel) + return KEVENT_CB_NONE; + + extra->task->state = TASK_QUEUE; + free(extra); + file_put(extra->file); + kent_put(&extra->task->kent); + return KEVENT_CB_LISTENER_DEL | KEVENT_CB_STOP; +} + +static int io_file_kevent_listener(struct kevent *event, void *_extra) +{ + struct io_file_kevent_extra *extra = _extra; + + struct file *file = kevent_to_file(event); + if (file != extra->file) + return KEVENT_CB_NONE; + + struct file_kevent *file_kevent = kevent_to_file_kevent(event); + if ((file_kevent->flags & extra->flags) == 0) + return KEVENT_CB_NONE; + + extra->task->state = TASK_QUEUE; + free(extra); + file_put(extra->file); + kent_put(&extra->task->kent); + return KEVENT_CB_LISTENER_DEL | KEVENT_CB_STOP; +} + +static int iowait_file(struct file *file, enum file_kevent_flags flags) +{ + file_get(file->fd); + kent_get(¤t->kent); + + struct io_file_kevent_extra *extra = malloc(sizeof(*extra)); + if (extra == NULL) + return -ENOMEM; + + extra->file = file; + extra->task = current; + extra->flags = flags; + + kevent_listener_add(KEVENT_FILE, io_file_kevent_listener, extra); + yield(TASK_IOWAIT); + return 0; +} + +static int iowait_device(struct file *file, enum device_channel channel) +{ + file_get(file->fd); + kent_get(¤t->kent); + + struct io_device_kevent_extra *extra = malloc(sizeof(*extra)); + if (extra == NULL) + return -ENOMEM; + + extra->file = file; + extra->task = current; + extra->channel = channel; + + kevent_listener_add(KEVENT_DEVICE, io_device_kevent_listener, extra); + yield(TASK_IOWAIT); + return 0; +} + ssize_t file_read(void *buf, struct file *file, size_t len) { - ssize_t ret = mutex_trylock(&file->lock); + if (len == 0) + return 0; - if (ret == 0) { - ret = file->device->read(buf, file->device, len, file->pos); - if (file->type == FILE_TYPE_REGULAR && ret > 0) - file->pos += ret; + ssize_t ret = 0; - mutex_unlock(&file->lock); + while (mutex_trylock(&file->lock) != 0) { + ret = iowait_file(file, FILE_KEVENT_UNLOCK); + if (ret != 0) + return ret; } + while (ret < (ssize_t)len) { + ssize_t tmp = file->device->read(buf, file->device, len, file->pos); + if (tmp < 0) { + if (tmp == -EBUSY) { + tmp = iowait_device(file, DEVICE_CHANNEL_IN); + } else { + ret = tmp; + break; + } + } + + if (file->type == FILE_TYPE_REGULAR) + file->pos += tmp; + + ret += tmp; + buf += tmp; + } + + mutex_unlock(&file->lock); + file_kevent_create_and_dispatch(file, FILE_KEVENT_READ | FILE_KEVENT_UNLOCK); + return ret; } ssize_t file_write(struct file *file, const void *buf, size_t len) { - ssize_t ret = mutex_trylock(&file->lock); + if (len == 0) + return 0; - if (ret == 0) { - ret = file->device->write(file->device, buf, len, file->pos); - if (file->type == FILE_TYPE_REGULAR && ret > 0) - file->pos += ret; + ssize_t ret = 0; - mutex_unlock(&file->lock); + while (mutex_trylock(&file->lock) != 0) { + ret = iowait_file(file, FILE_KEVENT_UNLOCK); + if (ret != 0) + return ret; } + while (ret < (ssize_t)len) { + ssize_t tmp = file->device->write(file->device, buf, len, file->pos); + if (tmp < 0) { + if (tmp == -EBUSY) { + __breakpoint; + tmp = iowait_device(file, DEVICE_CHANNEL_OUT); + if (tmp < 0) { + ret = tmp; + break; + } + __breakpoint; + } else { + ret = tmp; + break; + } + } + + if (file->type == FILE_TYPE_REGULAR) + file->pos += tmp; + + ret += tmp; + buf += tmp; + } + + mutex_unlock(&file->lock); + file_kevent_create_and_dispatch(file, FILE_KEVENT_WRITE | FILE_KEVENT_UNLOCK); + return ret; } +static void file_kevent_destroy(struct kent *kent) +{ + struct kevent *kevent = container_of(kent, struct kevent, kent); + struct file_kevent *file_kevent = container_of(kevent, struct file_kevent, kevent); + free(file_kevent); +} + +struct file_kevent *file_kevent_create(struct file *f, enum file_kevent_flags flags) +{ + struct file_kevent *event = malloc(sizeof(*event)); + if (event == NULL) + return NULL; + + event->flags = flags; + event->kevent.kind = KEVENT_FILE; + + event->kevent.kent.parent = &f->kent; + event->kevent.kent.destroy = file_kevent_destroy; + int err = kent_init(&event->kevent.kent); + if (err != 0) { + free(event); + event = NULL; + } + + return event; +} + +void file_kevent_create_and_dispatch(struct file *f, enum file_kevent_flags flags) +{ + struct file_kevent *event = file_kevent_create(f, flags); + if (event != NULL) + kevent_dispatch(&event->kevent); +} + /* * This file is part of Ardix. * Copyright (c) 2020, 2021 Felix Kopp . diff --git a/kernel/kevent.c b/kernel/kevent.c index 731a743..0bcb509 100644 --- a/kernel/kevent.c +++ b/kernel/kevent.c @@ -53,10 +53,9 @@ static inline void process_single_queue(struct kevent_queue *queue, struct list_ struct kevent *event, *tmp_event; /* - * This method is only invoked from scheduler context which has higher - * exception priority than all irqs, so if claiming the lock on this - * list fails it means we interrupted the irq. The current strategy is - * to just abort and try again during the next system tick. + * This method runs from scheduler context which has lower exception + * priority than irqs, so in theory this should never fail. Still, we + * only use trylock just in case. */ if (mutex_trylock(&queue->lock) == 0) { list_for_each_entry_safe(&queue->list, event, tmp_event, link) { @@ -86,15 +85,14 @@ static inline void process_single_queue(struct kevent_queue *queue, struct list_ void kevents_process(void) { /* - * if this fails it means the scheduling interrupt happened while - * processing an irq, just ignore the cache and try again next time - * if that is the case + * Same thing as for process_single_queue: This should never fail + * because scheduling interrupts have the lowest exception priority. */ if (mutex_trylock(&kev_cache_lock) == 0) { struct kevent *cursor, *tmp; list_for_each_entry_safe(&kev_cache, cursor, tmp, link) { list_delete(&cursor->link); - list_insert(&kev_queues[cursor->kind], &cursor->link); + list_insert(&kev_queues[cursor->kind].list, &cursor->link); } mutex_unlock(&kev_cache_lock); @@ -114,36 +112,24 @@ void kevent_dispatch(struct kevent *event) mutex_unlock(&queue->lock); } else { /* - * We shouldn't ever be able to get here because irqs don't interrupt - * each other and if we get interrupted by the scheduler it doesn't - * matter because kevents_process() always releases its lock before - * returning again. If it still happens for whatever stupid reason, - * we insert the event in a temporary unsorted cache that is then - * ordered by the scheduler. + * If we got to here it means we preempted the scheduler. + * We just toss the event into a temporary pile and let the + * scheduler sort out the mess when it calls kevents_process() + * the next time. */ if (mutex_trylock(&kev_cache_lock) == 0) { list_insert(&kev_cache, &event->link); mutex_unlock(&kev_cache_lock); } else { /* - * If we ever make it to here, something *extremely* stupid - * has happened: we couldn't get the lock on the queue (which - * shouldn't ever happen in the first place), *and* the cache - * is locked as well. We will have to assume we got suspended - * at some point in these if branches, and just try getting - * the lock on the original queue again. If that fails as - * well, we just give up and discard the event all together. - * - * TODO: This solution is of course far from ideal and has to - * be refactored at some point before the first stable - * release. We'll just deal with that later(TM). + * If we ever make it to here, something of unfathomable stupidity has + * happened because there are only two contexts from which we are supposed + * to be accessing the event queue--irq and scheduler. That means we always + * have either the main queue or the temporary cache available to us, and + * if not, we forgot to release a lock during yet another sleep deprived + * episode of late night coding. Time to make us pay for what we did then. */ - if (mutex_trylock(&queue->lock) == 0) { - list_insert(&queue->list, &event->link); - mutex_unlock(&queue->lock); - } else { - kevent_put(event); - } + } } }