sched: integrate into kent hierarchy

This is an initial, rudimentary attempt at integrating the scheduler
into the kent hierarchy.  There are likely gonna be drastic changes
in the future, and i haven't even tried running the whole thing.

But hey, the code compiles again now!
This commit is contained in:
anna 2021-05-11 14:31:05 +02:00
parent f0dc3a3433
commit 8b3a5fd68d
Signed by: fef
GPG key ID: EC22E476DC2D3D84
4 changed files with 56 additions and 34 deletions

View file

@ -2,7 +2,13 @@
#pragma once
/** Enter atomic context. */
/**
* Enter atomic context.
*
* Since Ardix does not support SMP, this method will simply increment a
* reference counter that is checked in the scheduler interrupt routine before
* performing the context switch.
*/
void atomic_enter(void);
/** Leave atomic context. */

View file

@ -2,6 +2,7 @@
#pragma once
#include <ardix/kent.h>
#include <ardix/list.h>
#include <ardix/types.h>
@ -39,6 +40,7 @@ enum task_state {
/** Stores an entire process image. */
struct task {
struct kent kent;
/** current stack pointer (only gets updated for task switching) */
void *sp;
/** first address of the stack (highest if the stack grows downwards) */
@ -61,7 +63,7 @@ int sched_init(void);
/**
* Switch to the next task (interrupt context only).
* Must be called directly from within an interrupt routine.
* This selects a new task to be run and updates the old and new task's `state`
* This selects a new task to be run and updates the old and new task's `state`
* field to the appropriate value.
*
* @param curr_sp: stack pointer of the current task
@ -75,7 +77,7 @@ void *sched_switch(void *curr_sp);
* @param task: the task to make a copy of
* @returns the new (child) task copy, or `NULL` on failure
*/
struct task *sched_task_clone(struct task *dest);
struct task *sched_task_clone(struct task *task);
/**
* Request the scheduler be invoked early, resulting in the current task to

View file

@ -8,31 +8,18 @@
#include <errno.h>
#include <stddef.h>
struct kent *kent_root = NULL;
static void kent_root_destroy(struct kent *kent)
{
/*
* this callback should never actually be executed in the first place
* because the kent root lives as long as the kernel is running but hey,
* it's not like our flash memory has a size limit or anything :)
*/
free(kent);
kent_root = NULL;
}
static struct kent_ops kent_root_ops = {
.destroy = &kent_root_destroy,
.destroy = NULL,
};
struct kent _kent_root;
struct kent *kent_root = NULL;
int kent_root_init(void)
{
if (kent_root != NULL)
return -EEXIST;
kent_root = malloc(sizeof(*kent_root));
if (kent_root == NULL)
return -ENOMEM;
kent_root = &_kent_root;
kent_root->parent = NULL;
kent_root->operations = &kent_root_ops;

View file

@ -18,6 +18,17 @@ extern uint32_t _estack;
static struct task *_sched_tasktab[CONFIG_SCHED_MAXTASK];
struct task *_sched_current_task;
static void sched_kent_destroy(struct kent *kent)
{
struct task *task = container_of(kent, struct task, kent);
_sched_tasktab[task->pid] = NULL;
free(task);
}
static struct kent_ops sched_kent_ops = {
.destroy = sched_kent_destroy,
};
int sched_init(void)
{
int i;
@ -26,19 +37,29 @@ int sched_init(void)
if (_sched_current_task == NULL)
return -ENOMEM;
_sched_current_task->sp = &_sstack;
_sched_current_task->stack_bottom = &_estack;
_sched_current_task->pid = 0;
_sched_current_task->state = TASK_READY;
_sched_tasktab[0] = _sched_current_task;
_sched_current_task->kent.parent = kent_root;
_sched_current_task->kent.operations = &sched_kent_ops;
i = kent_init(&_sched_current_task->kent);
if (i == 0) {
_sched_current_task->sp = &_sstack;
_sched_current_task->stack_bottom = &_estack;
_sched_current_task->pid = 0;
_sched_current_task->state = TASK_READY;
_sched_tasktab[0] = _sched_current_task;
for (i = 1; i < CONFIG_SCHED_MAXTASK; i++)
_sched_tasktab[i] = NULL;
for (i = 1; i < CONFIG_SCHED_MAXTASK; i++)
_sched_tasktab[i] = NULL;
i = arch_watchdog_init();
i = arch_watchdog_init();
if (i == 0)
i = arch_sched_hwtimer_init(CONFIG_SCHED_MAXTASK);
if (i == 0)
i = arch_sched_hwtimer_init(CONFIG_SCHED_MAXTASK);
}
/*
* we don't really need to deallocate resources on error because we
* are going to panic anyways if the scheduler fails to initialize.
*/
return i;
}
@ -71,6 +92,7 @@ void *sched_process_switch(void *curr_sp)
while (1) {
nextpid++;
nextpid %= CONFIG_SCHED_MAXTASK;
tmp = _sched_tasktab[nextpid];
if (tmp != NULL && sched_task_should_run(tmp)) {
_sched_current_task = tmp;
@ -98,10 +120,15 @@ struct task *sched_fork(struct task *parent)
if (pid == CONFIG_SCHED_MAXTASK)
goto err_maxtask;
child->pid = pid;
list_init(&child->children);
list_insert(&parent->children, &child->siblings);
child->kent.parent = &parent->kent;
child->kent.operations = &sched_kent_ops;
if (kent_init(&child->kent) != 0)
goto err_kent;
child->pid = pid;
return child;
err_kent:
err_maxtask:
free(child);
err_alloc: