2021-02-28 02:18:39 +01:00
|
|
|
/* See the end of this file for copyright, license, and warranty information. */
|
2020-06-12 11:49:20 +02:00
|
|
|
|
2021-08-10 17:49:29 +02:00
|
|
|
/**
|
|
|
|
* @file sched.c
|
|
|
|
* @brief Simple round-robin scheduler.
|
|
|
|
*
|
|
|
|
* Tasks are stored in a lookup table, `tasks`, which is indexed by pid.
|
|
|
|
* The global `current` variable points to the task that is currently running,
|
|
|
|
* which must only be accessed from scheduling context (i.e. from within a
|
|
|
|
* syscall or scheduling interrupt handler).
|
|
|
|
*
|
|
|
|
* When `schedule()` is called, it first processes the kevent queue in which irq
|
|
|
|
* handlers store broadcasts for changes in hardware state, such as a DMA buffer
|
|
|
|
* having been fully transmitted. Tasks register an event listener for the
|
|
|
|
* event they are waiting for before entering I/O wait, and remove their waiting
|
|
|
|
* flag in the listener callback.
|
|
|
|
*
|
|
|
|
* After all events are processed, `schedule()` iterates over the task table
|
|
|
|
* starting from one task after the one that has been currently running, and
|
|
|
|
* chooses the first one it encounters that is suitable for being woken back up
|
|
|
|
* (i.e. is in state `TASK_QUEUE`). Thus, the previously running task is only
|
|
|
|
* executed again if no other tasks are ready to be executed. If no task is
|
|
|
|
* runnable, the idle task is selected.
|
|
|
|
*
|
|
|
|
* The last step is performing the in-kernel context switch to the next task
|
|
|
|
* to be run, which is done by `do_switch()`. This routine stores the current
|
|
|
|
* register state in the old task's TCB and loads the registers from the new
|
|
|
|
* one. Execution then continues where the task that is switched to previously
|
|
|
|
* called `do_switch()`, and eventually returns back to userspace by returning
|
|
|
|
* from the exception handler.
|
|
|
|
*/
|
|
|
|
|
2021-08-08 20:48:55 +02:00
|
|
|
#include <arch-generic/do_switch.h>
|
2021-07-31 19:39:51 +02:00
|
|
|
#include <arch-generic/sched.h>
|
|
|
|
#include <arch-generic/watchdog.h>
|
2020-11-30 02:26:17 +01:00
|
|
|
|
2020-12-01 02:35:06 +01:00
|
|
|
#include <ardix/atomic.h>
|
2021-08-04 03:29:52 +02:00
|
|
|
#include <ardix/kevent.h>
|
2020-11-30 02:26:17 +01:00
|
|
|
#include <ardix/malloc.h>
|
2020-06-12 11:49:20 +02:00
|
|
|
#include <ardix/sched.h>
|
2021-08-12 19:05:38 +02:00
|
|
|
#include <ardix/task.h>
|
2020-06-12 11:49:20 +02:00
|
|
|
#include <ardix/types.h>
|
2020-11-30 02:26:17 +01:00
|
|
|
|
|
|
|
#include <errno.h>
|
2020-06-12 11:49:20 +02:00
|
|
|
#include <stddef.h>
|
2021-02-28 18:27:43 +01:00
|
|
|
#include <string.h>
|
2020-06-12 11:49:20 +02:00
|
|
|
|
|
|
|
extern uint32_t _sstack;
|
|
|
|
extern uint32_t _estack;
|
|
|
|
|
2021-08-10 17:49:29 +02:00
|
|
|
static struct task *tasks[CONFIG_SCHED_MAXTASK];
|
2021-08-12 19:05:38 +02:00
|
|
|
static MUTEX(tasks_lock);
|
2021-08-08 20:48:55 +02:00
|
|
|
struct task *volatile current;
|
2020-06-12 11:49:20 +02:00
|
|
|
|
2021-08-10 17:49:29 +02:00
|
|
|
static struct task kernel_task;
|
2021-08-04 03:25:04 +02:00
|
|
|
static struct task idle_task;
|
|
|
|
|
2021-07-31 15:58:29 +02:00
|
|
|
static void task_destroy(struct kent *kent)
|
2021-05-11 14:31:05 +02:00
|
|
|
{
|
|
|
|
struct task *task = container_of(kent, struct task, kent);
|
2021-08-12 19:05:38 +02:00
|
|
|
|
|
|
|
mutex_lock(&tasks_lock);
|
2021-08-10 17:49:29 +02:00
|
|
|
tasks[task->pid] = NULL;
|
2021-08-12 19:05:38 +02:00
|
|
|
mutex_unlock(&tasks_lock);
|
|
|
|
|
|
|
|
kfree(task->stack);
|
2021-08-12 14:34:18 +02:00
|
|
|
kfree(task);
|
2021-05-11 14:31:05 +02:00
|
|
|
}
|
|
|
|
|
2020-06-12 11:49:20 +02:00
|
|
|
int sched_init(void)
|
|
|
|
{
|
2021-08-08 20:48:55 +02:00
|
|
|
int err;
|
2020-06-12 11:49:20 +02:00
|
|
|
|
2021-08-10 17:49:29 +02:00
|
|
|
kernel_task.kent.parent = kent_root;
|
|
|
|
kernel_task.kent.destroy = task_destroy;
|
|
|
|
err = kent_init(&kernel_task.kent);
|
2021-08-08 20:48:55 +02:00
|
|
|
if (err != 0)
|
2021-08-04 03:25:04 +02:00
|
|
|
goto out;
|
2020-06-12 11:49:20 +02:00
|
|
|
|
2021-08-10 17:49:29 +02:00
|
|
|
memset(&kernel_task.tcb, 0, sizeof(kernel_task.tcb));
|
|
|
|
kernel_task.bottom = &_estack;
|
2022-09-26 13:03:56 +02:00
|
|
|
/* gcc thinks &_estack is an array of size 1 */
|
|
|
|
#pragma GCC diagnostic push
|
|
|
|
#pragma GCC diagnostic ignored "-Warray-bounds"
|
2021-08-12 19:05:38 +02:00
|
|
|
kernel_task.stack = kernel_task.bottom - CONFIG_STACK_SIZE;
|
2022-09-26 13:03:56 +02:00
|
|
|
#pragma GCC diagnostic pop
|
2021-08-10 17:49:29 +02:00
|
|
|
kernel_task.pid = 0;
|
2021-08-12 19:05:38 +02:00
|
|
|
kernel_task.state = TASK_RUNNING;
|
|
|
|
|
|
|
|
list_init(&kernel_task.pending_sigchld);
|
|
|
|
mutex_init(&kernel_task.pending_sigchld_lock);
|
2021-08-08 20:48:55 +02:00
|
|
|
|
2021-08-10 17:49:29 +02:00
|
|
|
tasks[0] = &kernel_task;
|
|
|
|
current = &kernel_task;
|
2020-06-12 11:49:20 +02:00
|
|
|
|
2021-08-10 17:49:29 +02:00
|
|
|
for (unsigned int i = 1; i < ARRAY_SIZE(tasks); i++)
|
|
|
|
tasks[i] = NULL;
|
2020-12-08 00:08:00 +01:00
|
|
|
|
2021-08-08 20:48:55 +02:00
|
|
|
err = arch_watchdog_init();
|
|
|
|
if (err != 0)
|
2021-08-04 03:25:04 +02:00
|
|
|
goto out;
|
|
|
|
|
2021-08-12 19:05:38 +02:00
|
|
|
idle_task.stack = kmalloc(CONFIG_STACK_SIZE);
|
|
|
|
if (idle_task.stack == NULL)
|
2021-08-04 03:25:04 +02:00
|
|
|
goto out;
|
2021-08-12 19:05:38 +02:00
|
|
|
idle_task.bottom = idle_task.stack + CONFIG_STACK_SIZE;
|
|
|
|
idle_task.pid = -1;
|
|
|
|
idle_task.state = TASK_QUEUE;
|
|
|
|
list_init(&idle_task.pending_sigchld);
|
|
|
|
mutex_init(&idle_task.pending_sigchld_lock);
|
|
|
|
task_init(&idle_task, _idle);
|
2021-08-04 03:25:04 +02:00
|
|
|
|
2021-08-12 19:05:38 +02:00
|
|
|
err = arch_sched_init(CONFIG_SCHED_FREQ);
|
2021-08-08 20:48:55 +02:00
|
|
|
if (err != 0)
|
2021-08-04 03:25:04 +02:00
|
|
|
goto out;
|
2021-05-11 14:31:05 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* we don't really need to deallocate resources on error because we
|
|
|
|
* are going to panic anyways if the scheduler fails to initialize.
|
|
|
|
*/
|
2021-08-04 03:25:04 +02:00
|
|
|
out:
|
2021-08-08 20:48:55 +02:00
|
|
|
return err;
|
2020-06-12 11:49:20 +02:00
|
|
|
}
|
|
|
|
|
2020-06-14 05:51:01 +02:00
|
|
|
/**
|
2021-08-05 16:14:18 +02:00
|
|
|
* @brief Determine whether the specified task is a candidate for execution.
|
2020-06-14 05:51:01 +02:00
|
|
|
*
|
2022-09-26 13:03:56 +02:00
|
|
|
* This function is only called once from `schedule()` and performance critical,
|
|
|
|
* hence the `__always_inline` attribute.
|
|
|
|
*
|
2021-08-05 16:14:18 +02:00
|
|
|
* @param task The task
|
2021-02-01 00:07:45 +01:00
|
|
|
* @returns whether `task` could be run next
|
2020-06-14 05:51:01 +02:00
|
|
|
*/
|
2022-09-26 13:03:56 +02:00
|
|
|
__always_inline
|
|
|
|
static bool can_run(const struct task *task)
|
2020-06-14 05:51:01 +02:00
|
|
|
{
|
2021-08-05 16:14:18 +02:00
|
|
|
switch (task->state) {
|
|
|
|
case TASK_SLEEP:
|
2021-08-05 18:52:51 +02:00
|
|
|
return tick - task->last_tick >= task->sleep;
|
2021-08-05 16:14:18 +02:00
|
|
|
case TASK_QUEUE:
|
2021-08-12 19:05:38 +02:00
|
|
|
case TASK_RUNNING:
|
2021-08-05 16:14:18 +02:00
|
|
|
return true;
|
|
|
|
case TASK_DEAD:
|
|
|
|
case TASK_IOWAIT:
|
2021-08-10 00:44:36 +02:00
|
|
|
case TASK_LOCKWAIT:
|
2021-08-12 19:05:38 +02:00
|
|
|
case TASK_WAITPID:
|
2021-08-05 16:14:18 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false; /* this shouldn't be reached */
|
2020-06-14 05:51:01 +02:00
|
|
|
}
|
|
|
|
|
2021-08-08 20:48:55 +02:00
|
|
|
void schedule(void)
|
2020-06-12 11:49:20 +02:00
|
|
|
{
|
2021-08-08 20:48:55 +02:00
|
|
|
atomic_enter();
|
2020-06-12 11:49:20 +02:00
|
|
|
|
2021-08-08 20:48:55 +02:00
|
|
|
struct task *old = current;
|
|
|
|
pid_t nextpid = old->pid;
|
|
|
|
|
|
|
|
struct task *new = NULL;
|
2021-08-04 03:29:52 +02:00
|
|
|
|
2021-08-08 20:48:55 +02:00
|
|
|
kevents_process();
|
2020-06-14 05:51:01 +02:00
|
|
|
|
2021-08-12 19:05:38 +02:00
|
|
|
if (old->state == TASK_RUNNING)
|
2021-08-08 20:48:55 +02:00
|
|
|
old->state = TASK_QUEUE;
|
2021-08-10 17:49:29 +02:00
|
|
|
|
|
|
|
for (unsigned int i = 0; i < ARRAY_SIZE(tasks); i++) {
|
2021-08-08 20:48:55 +02:00
|
|
|
/*
|
|
|
|
* increment nextpid before accessing the task table
|
|
|
|
* because it is -1 if the idle task was running
|
|
|
|
*/
|
2020-11-30 02:26:17 +01:00
|
|
|
nextpid++;
|
2021-08-10 17:49:29 +02:00
|
|
|
nextpid %= ARRAY_SIZE(tasks);
|
2021-05-11 14:31:05 +02:00
|
|
|
|
2021-08-10 17:49:29 +02:00
|
|
|
struct task *tmp = tasks[nextpid];
|
2021-08-04 03:25:04 +02:00
|
|
|
if (tmp != NULL && can_run(tmp)) {
|
2021-08-08 20:48:55 +02:00
|
|
|
new = tmp;
|
2020-06-12 11:49:20 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2021-08-04 03:25:04 +02:00
|
|
|
|
2021-08-08 20:48:55 +02:00
|
|
|
if (new == NULL)
|
|
|
|
new = &idle_task;
|
|
|
|
|
2021-08-12 19:05:38 +02:00
|
|
|
new->state = TASK_RUNNING;
|
2021-08-08 20:48:55 +02:00
|
|
|
new->last_tick = tick;
|
|
|
|
current = new;
|
|
|
|
|
|
|
|
atomic_leave();
|
2020-06-12 11:49:20 +02:00
|
|
|
|
2021-08-08 20:48:55 +02:00
|
|
|
if (old != new)
|
|
|
|
do_switch(old, new);
|
|
|
|
}
|
|
|
|
|
|
|
|
void yield(enum task_state state)
|
|
|
|
{
|
2021-08-10 17:49:29 +02:00
|
|
|
current->state = state;
|
2021-08-08 20:48:55 +02:00
|
|
|
schedule();
|
2020-11-29 20:33:18 +01:00
|
|
|
}
|
|
|
|
|
2021-08-09 19:20:40 +02:00
|
|
|
long sys_sleep(unsigned long int millis)
|
|
|
|
{
|
2021-08-10 17:49:29 +02:00
|
|
|
current->sleep = ms_to_ticks(millis);
|
|
|
|
yield(TASK_SLEEP);
|
2021-08-09 19:20:40 +02:00
|
|
|
/* TODO: return actual milliseconds */
|
2021-08-12 19:05:38 +02:00
|
|
|
/*
|
|
|
|
* TODO: actually, use fucking hardware timers which were specifically
|
|
|
|
* invented for this exact kind of feature because (1) the tick
|
|
|
|
* resolution is often less than 1 ms and (2) ticks aren't really
|
|
|
|
* supposed to be guaranteed to happen at regular intervals and
|
|
|
|
* (3) the scheduler doesn't even check whether there is a task
|
|
|
|
* whose sleep period just expired
|
|
|
|
*/
|
2021-08-09 19:20:40 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-08-12 19:05:38 +02:00
|
|
|
long sys_exec(int (*entry)(void))
|
|
|
|
{
|
|
|
|
pid_t pid;
|
|
|
|
struct task *child = NULL;
|
|
|
|
|
|
|
|
mutex_lock(&tasks_lock);
|
|
|
|
|
|
|
|
for (pid = 1; pid < CONFIG_SCHED_MAXTASK; pid++) {
|
|
|
|
if (tasks[pid] == NULL)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (pid == CONFIG_SCHED_MAXTASK) {
|
|
|
|
pid = -EAGAIN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
child = kmalloc(sizeof(*child));
|
|
|
|
if (child == NULL) {
|
|
|
|
pid = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
child->pid = pid;
|
|
|
|
child->stack = kmalloc(CONFIG_STACK_SIZE);
|
|
|
|
if (child->stack == NULL) {
|
|
|
|
pid = -ENOMEM;
|
|
|
|
goto err_stack_malloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
child->kent.parent = ¤t->kent;
|
|
|
|
child->kent.destroy = task_destroy;
|
|
|
|
kent_init(&child->kent);
|
|
|
|
|
|
|
|
child->bottom = child->stack + CONFIG_STACK_SIZE;
|
|
|
|
task_init(child, entry);
|
|
|
|
|
|
|
|
list_init(&child->pending_sigchld);
|
|
|
|
mutex_init(&child->pending_sigchld_lock);
|
|
|
|
|
|
|
|
child->state = TASK_QUEUE;
|
|
|
|
tasks[pid] = child;
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err_stack_malloc:
|
|
|
|
kfree(child);
|
|
|
|
out:
|
|
|
|
mutex_unlock(&tasks_lock);
|
|
|
|
return pid;
|
|
|
|
}
|
|
|
|
|
2020-10-11 19:35:30 +02:00
|
|
|
/*
|
2021-02-28 02:18:39 +01:00
|
|
|
* This file is part of Ardix.
|
|
|
|
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
2020-10-11 19:35:30 +02:00
|
|
|
*
|
2021-05-10 16:19:38 +02:00
|
|
|
* Ardix is non-violent software: you may only use, redistribute,
|
|
|
|
* and/or modify it under the terms of the CNPLv6+ as found in
|
|
|
|
* the LICENSE file in the source code root directory or at
|
|
|
|
* <https://git.pixie.town/thufie/CNPL>.
|
2020-10-11 19:35:30 +02:00
|
|
|
*
|
2021-05-10 16:19:38 +02:00
|
|
|
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
|
|
|
|
* permitted by applicable law. See the CNPLv6+ for details.
|
2020-10-11 19:35:30 +02:00
|
|
|
*/
|