From 8b3a5fd68d648c1caec116546ecd08523790372c Mon Sep 17 00:00:00 2001 From: Fefie Date: Tue, 11 May 2021 14:31:05 +0200 Subject: [PATCH] sched: integrate into kent hierarchy This is an initial, rudimentary attempt at integrating the scheduler into the kent hierarchy. There are likely gonna be drastic changes in the future, and i haven't even tried running the whole thing. But hey, the code compiles again now! --- include/ardix/atomic.h | 8 ++++++- include/ardix/sched.h | 6 +++-- kernel/kent.c | 23 ++++-------------- kernel/sched.c | 53 +++++++++++++++++++++++++++++++----------- 4 files changed, 56 insertions(+), 34 deletions(-) diff --git a/include/ardix/atomic.h b/include/ardix/atomic.h index bed843f..75cab9a 100644 --- a/include/ardix/atomic.h +++ b/include/ardix/atomic.h @@ -2,7 +2,13 @@ #pragma once -/** Enter atomic context. */ +/** + * Enter atomic context. + * + * Since Ardix does not support SMP, this method will simply increment a + * reference counter that is checked in the scheduler interrupt routine before + * performing the context switch. + */ void atomic_enter(void); /** Leave atomic context. */ diff --git a/include/ardix/sched.h b/include/ardix/sched.h index 20fdadf..b3874b5 100644 --- a/include/ardix/sched.h +++ b/include/ardix/sched.h @@ -2,6 +2,7 @@ #pragma once +#include #include #include @@ -39,6 +40,7 @@ enum task_state { /** Stores an entire process image. */ struct task { + struct kent kent; /** current stack pointer (only gets updated for task switching) */ void *sp; /** first address of the stack (highest if the stack grows downwards) */ @@ -61,7 +63,7 @@ int sched_init(void); /** * Switch to the next task (interrupt context only). * Must be called directly from within an interrupt routine. - * This selects a new task to be run and updates the old and new task's `state` + * This selects a new task to be run and updates the old and new task's `state` * field to the appropriate value. * * @param curr_sp: stack pointer of the current task @@ -75,7 +77,7 @@ void *sched_switch(void *curr_sp); * @param task: the task to make a copy of * @returns the new (child) task copy, or `NULL` on failure */ -struct task *sched_task_clone(struct task *dest); +struct task *sched_task_clone(struct task *task); /** * Request the scheduler be invoked early, resulting in the current task to diff --git a/kernel/kent.c b/kernel/kent.c index 5204c8a..86b3962 100644 --- a/kernel/kent.c +++ b/kernel/kent.c @@ -8,31 +8,18 @@ #include #include -struct kent *kent_root = NULL; - -static void kent_root_destroy(struct kent *kent) -{ - /* - * this callback should never actually be executed in the first place - * because the kent root lives as long as the kernel is running but hey, - * it's not like our flash memory has a size limit or anything :) - */ - free(kent); - kent_root = NULL; -} - static struct kent_ops kent_root_ops = { - .destroy = &kent_root_destroy, + .destroy = NULL, }; +struct kent _kent_root; +struct kent *kent_root = NULL; + int kent_root_init(void) { if (kent_root != NULL) return -EEXIST; - - kent_root = malloc(sizeof(*kent_root)); - if (kent_root == NULL) - return -ENOMEM; + kent_root = &_kent_root; kent_root->parent = NULL; kent_root->operations = &kent_root_ops; diff --git a/kernel/sched.c b/kernel/sched.c index af6da30..3683d68 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -18,6 +18,17 @@ extern uint32_t _estack; static struct task *_sched_tasktab[CONFIG_SCHED_MAXTASK]; struct task *_sched_current_task; +static void sched_kent_destroy(struct kent *kent) +{ + struct task *task = container_of(kent, struct task, kent); + _sched_tasktab[task->pid] = NULL; + free(task); +} + +static struct kent_ops sched_kent_ops = { + .destroy = sched_kent_destroy, +}; + int sched_init(void) { int i; @@ -26,19 +37,29 @@ int sched_init(void) if (_sched_current_task == NULL) return -ENOMEM; - _sched_current_task->sp = &_sstack; - _sched_current_task->stack_bottom = &_estack; - _sched_current_task->pid = 0; - _sched_current_task->state = TASK_READY; - _sched_tasktab[0] = _sched_current_task; + _sched_current_task->kent.parent = kent_root; + _sched_current_task->kent.operations = &sched_kent_ops; + i = kent_init(&_sched_current_task->kent); + if (i == 0) { + _sched_current_task->sp = &_sstack; + _sched_current_task->stack_bottom = &_estack; + _sched_current_task->pid = 0; + _sched_current_task->state = TASK_READY; + _sched_tasktab[0] = _sched_current_task; - for (i = 1; i < CONFIG_SCHED_MAXTASK; i++) - _sched_tasktab[i] = NULL; + for (i = 1; i < CONFIG_SCHED_MAXTASK; i++) + _sched_tasktab[i] = NULL; - i = arch_watchdog_init(); + i = arch_watchdog_init(); - if (i == 0) - i = arch_sched_hwtimer_init(CONFIG_SCHED_MAXTASK); + if (i == 0) + i = arch_sched_hwtimer_init(CONFIG_SCHED_MAXTASK); + } + + /* + * we don't really need to deallocate resources on error because we + * are going to panic anyways if the scheduler fails to initialize. + */ return i; } @@ -71,6 +92,7 @@ void *sched_process_switch(void *curr_sp) while (1) { nextpid++; nextpid %= CONFIG_SCHED_MAXTASK; + tmp = _sched_tasktab[nextpid]; if (tmp != NULL && sched_task_should_run(tmp)) { _sched_current_task = tmp; @@ -98,10 +120,15 @@ struct task *sched_fork(struct task *parent) if (pid == CONFIG_SCHED_MAXTASK) goto err_maxtask; - child->pid = pid; - list_init(&child->children); - list_insert(&parent->children, &child->siblings); + child->kent.parent = &parent->kent; + child->kent.operations = &sched_kent_ops; + if (kent_init(&child->kent) != 0) + goto err_kent; + child->pid = pid; + return child; + +err_kent: err_maxtask: free(child); err_alloc: