sched: avoid memory allocation for kernel task

This commit is contained in:
anna 2021-08-10 17:49:29 +02:00
parent 4287759c46
commit 10e8b00fb0
Signed by: fef
GPG key ID: EC22E476DC2D3D84

View file

@ -1,5 +1,35 @@
/* See the end of this file for copyright, license, and warranty information. */
/**
* @file sched.c
* @brief Simple round-robin scheduler.
*
* Tasks are stored in a lookup table, `tasks`, which is indexed by pid.
* The global `current` variable points to the task that is currently running,
* which must only be accessed from scheduling context (i.e. from within a
* syscall or scheduling interrupt handler).
*
* When `schedule()` is called, it first processes the kevent queue in which irq
* handlers store broadcasts for changes in hardware state, such as a DMA buffer
* having been fully transmitted. Tasks register an event listener for the
* event they are waiting for before entering I/O wait, and remove their waiting
* flag in the listener callback.
*
* After all events are processed, `schedule()` iterates over the task table
* starting from one task after the one that has been currently running, and
* chooses the first one it encounters that is suitable for being woken back up
* (i.e. is in state `TASK_QUEUE`). Thus, the previously running task is only
* executed again if no other tasks are ready to be executed. If no task is
* runnable, the idle task is selected.
*
* The last step is performing the in-kernel context switch to the next task
* to be run, which is done by `do_switch()`. This routine stores the current
* register state in the old task's TCB and loads the registers from the new
* one. Execution then continues where the task that is switched to previously
* called `do_switch()`, and eventually returns back to userspace by returning
* from the exception handler.
*/
#include <arch-generic/do_switch.h>
#include <arch-generic/sched.h>
#include <arch-generic/watchdog.h>
@ -17,15 +47,16 @@
extern uint32_t _sstack;
extern uint32_t _estack;
static struct task *tasktab[CONFIG_SCHED_MAXTASK];
static struct task *tasks[CONFIG_SCHED_MAXTASK];
struct task *volatile current;
static struct task kernel_task;
static struct task idle_task;
static void task_destroy(struct kent *kent)
{
struct task *task = container_of(kent, struct task, kent);
tasktab[task->pid] = NULL;
tasks[task->pid] = NULL;
free(task);
}
@ -33,26 +64,22 @@ int sched_init(void)
{
int err;
struct task *ktask = malloc(sizeof(*ktask));
if (ktask == NULL)
return -ENOMEM;
ktask->kent.parent = kent_root;
ktask->kent.destroy = task_destroy;
err = kent_init(&ktask->kent);
kernel_task.kent.parent = kent_root;
kernel_task.kent.destroy = task_destroy;
err = kent_init(&kernel_task.kent);
if (err != 0)
goto out;
memset(&ktask->tcb, 0, sizeof(ktask->tcb));
ktask->bottom = &_estack;
ktask->pid = 0;
ktask->state = TASK_READY;
memset(&kernel_task.tcb, 0, sizeof(kernel_task.tcb));
kernel_task.bottom = &_estack;
kernel_task.pid = 0;
kernel_task.state = TASK_READY;
tasktab[0] = ktask;
current = ktask;
tasks[0] = &kernel_task;
current = &kernel_task;
for (unsigned int i = 1; i < ARRAY_SIZE(tasktab); i++)
tasktab[i] = NULL;
for (unsigned int i = 1; i < ARRAY_SIZE(tasks); i++)
tasks[i] = NULL;
err = arch_watchdog_init();
if (err != 0)
@ -110,15 +137,16 @@ void schedule(void)
if (old->state == TASK_READY)
old->state = TASK_QUEUE;
for (unsigned int i = 0; i < ARRAY_SIZE(tasktab); i++) {
for (unsigned int i = 0; i < ARRAY_SIZE(tasks); i++) {
/*
* increment nextpid before accessing the task table
* because it is -1 if the idle task was running
*/
nextpid++;
nextpid %= ARRAY_SIZE(tasktab);
nextpid %= ARRAY_SIZE(tasks);
struct task *tmp = tasktab[nextpid];
struct task *tmp = tasks[nextpid];
if (tmp != NULL && can_run(tmp)) {
new = tmp;
break;
@ -140,50 +168,14 @@ void schedule(void)
void yield(enum task_state state)
{
struct task *task = current;
task->state = state;
current->state = state;
schedule();
}
struct task *sched_fork(struct task *parent)
{
pid_t pid;
struct task *child = malloc(sizeof(*child));
if (child == NULL)
goto err_alloc;
for (pid = 0; pid < CONFIG_SCHED_MAXTASK; pid++) {
if (tasktab[pid] == NULL)
break;
}
if (pid == CONFIG_SCHED_MAXTASK)
goto err_maxtask;
child->kent.parent = &parent->kent;
child->kent.destroy = task_destroy;
if (kent_init(&child->kent) != 0)
goto err_kent;
child->pid = pid;
return child;
err_kent:
err_maxtask:
free(child);
err_alloc:
return NULL;
}
void msleep(unsigned long int ms)
{
current->sleep = ms_to_ticks(ms);
yield(TASK_SLEEP);
}
long sys_sleep(unsigned long int millis)
{
msleep(millis);
current->sleep = ms_to_ticks(millis);
yield(TASK_SLEEP);
/* TODO: return actual milliseconds */
return 0;
}