sched: add idle task

This commit is contained in:
anna 2021-08-04 03:25:04 +02:00
parent e291a51d2f
commit f89aa9dc4e
Signed by: fef
GPG key ID: EC22E476DC2D3D84
4 changed files with 74 additions and 26 deletions

View file

@ -5,15 +5,17 @@
#include <arch/interrupt.h> #include <arch/interrupt.h>
#include <ardix/atomic.h> #include <ardix/atomic.h>
#include <ardix/malloc.h>
#include <ardix/sched.h> #include <ardix/sched.h>
#include <errno.h>
#include <string.h> #include <string.h>
void handle_sys_tick(void) void handle_sys_tick(void)
{ {
/* /*
* fire a PendSV exception and do the actual context switching there * fire a PendSV exception and do the actual context switching there
* because it is faster that way (according to the docs, at least) * because the docs say you're supposed to do it that way
*/ */
if (!is_atomic_context()) if (!is_atomic_context())
arch_irq_invoke(IRQNO_PEND_SV); arch_irq_invoke(IRQNO_PEND_SV);
@ -55,7 +57,7 @@ int arch_sched_hwtimer_init(unsigned int freq)
return 0; return 0;
} }
void arch_sched_task_init(struct task *task, void (*entry)(void)) void arch_task_init(struct task *task, void (*entry)(void))
{ {
struct reg_snapshot *regs = task->stack_bottom - sizeof(*regs); struct reg_snapshot *regs = task->stack_bottom - sizeof(*regs);
task->sp = regs; task->sp = regs;
@ -73,6 +75,29 @@ void yield(enum task_state state)
arch_irq_invoke(IRQNO_PEND_SV); arch_irq_invoke(IRQNO_PEND_SV);
} }
__naked __noreturn static void idle_task_entry(void)
{
__asm__ volatile(
"1: b 1b \n"
:::
);
}
int arch_idle_task_init(struct task *task)
{
void *sp = malloc(sizeof(struct reg_snapshot));
if (sp == NULL)
return -ENOMEM;
task->stack_bottom = sp + sizeof(struct reg_snapshot);
arch_task_init(task, idle_task_entry);
task->lastexec = 0;
task->sleep_usecs = 0;
task->state = TASK_READY;
task->pid = -1;
return 0;
}
/* /*
* This file is part of Ardix. * This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>. * Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.

View file

@ -133,6 +133,12 @@ void irq_uart(void)
if (state & REG_UART_SR_RXRDY_MASK) { if (state & REG_UART_SR_RXRDY_MASK) {
tmp = REG_UART_RHR; tmp = REG_UART_RHR;
ringbuf_write(arch_serial_default_device.device.rx, &tmp, sizeof(tmp)); ringbuf_write(arch_serial_default_device.device.rx, &tmp, sizeof(tmp));
/* TODO: we need some error handling mechanism for event creation */
struct device_kevent *event = device_kevent_create(&serial_default_device->device,
DEVICE_CHANNEL_IN);
if (event != NULL)
kevent_dispatch(&event->event);
} }
/* REG_UART_PDC_TCR has reached zero */ /* REG_UART_PDC_TCR has reached zero */
@ -146,6 +152,12 @@ void irq_uart(void)
if (arch_serial_default_device.tx_current == NULL) if (arch_serial_default_device.tx_current == NULL)
REG_UART_IDR = REG_UART_IDR_ENDTX_MASK; REG_UART_IDR = REG_UART_IDR_ENDTX_MASK;
/* TODO: we need some error handling mechanism for event creation */
struct device_kevent *event = device_kevent_create(&serial_default_device->device,
DEVICE_CHANNEL_OUT);
if (event != NULL)
kevent_dispatch(&event->event);
} }
/* check for error conditions */ /* check for error conditions */

View file

@ -6,7 +6,7 @@
#include <toolchain.h> #include <toolchain.h>
struct process; /* see include/ardix/sched.h */ struct task; /* see include/ardix/sched.h */
/** /**
* Initialize a hardware timer for schduling. * Initialize a hardware timer for schduling.
@ -23,7 +23,9 @@ int arch_sched_hwtimer_init(unsigned int freq);
* @param process: The process. * @param process: The process.
* @param entry: The process entry point. * @param entry: The process entry point.
*/ */
void arch_sched_process_init(struct process *process, void (*entry)(void)); void arch_task_init(struct task *task, void (*entry)(void));
int arch_idle_task_init(struct task *task);
/* /*
* This file is part of Ardix. * This file is part of Ardix.

View file

@ -18,6 +18,8 @@ extern uint32_t _estack;
static struct task *tasktab[CONFIG_SCHED_MAXTASK]; static struct task *tasktab[CONFIG_SCHED_MAXTASK];
struct task *current; struct task *current;
static struct task idle_task;
static void task_destroy(struct kent *kent) static void task_destroy(struct kent *kent)
{ {
struct task *task = container_of(kent, struct task, kent); struct task *task = container_of(kent, struct task, kent);
@ -36,27 +38,35 @@ int sched_init(void)
current->kent.parent = kent_root; current->kent.parent = kent_root;
current->kent.destroy = task_destroy; current->kent.destroy = task_destroy;
i = kent_init(&current->kent); i = kent_init(&current->kent);
if (i == 0) { if (i != 0)
current->sp = &_sstack; goto out;
current->stack_bottom = &_estack;
current->pid = 0;
current->state = TASK_READY;
tasktab[0] = current;
for (i = 1; i < CONFIG_SCHED_MAXTASK; i++) current->sp = &_sstack;
tasktab[i] = NULL; current->stack_bottom = &_estack;
current->pid = 0;
current->state = TASK_READY;
tasktab[0] = current;
i = arch_watchdog_init(); for (i = 1; i < CONFIG_SCHED_MAXTASK; i++)
tasktab[i] = NULL;
if (i == 0) i = arch_watchdog_init();
i = arch_sched_hwtimer_init(CONFIG_SCHED_FREQ); if (i != 0)
} goto out;
i = arch_sched_hwtimer_init(CONFIG_SCHED_FREQ);
if (i != 0)
goto out;
i = arch_idle_task_init(&idle_task);
if (i != 0)
goto out;
/* /*
* we don't really need to deallocate resources on error because we * we don't really need to deallocate resources on error because we
* are going to panic anyways if the scheduler fails to initialize. * are going to panic anyways if the scheduler fails to initialize.
*/ */
out:
return i; return i;
} }
@ -66,37 +76,36 @@ int sched_init(void)
* @param task: the task * @param task: the task
* @returns whether `task` could be run next * @returns whether `task` could be run next
*/ */
static inline bool sched_task_should_run(const struct task *task) static inline bool can_run(const struct task *task)
{ {
enum task_state state = task->state; enum task_state state = task->state;
return state == TASK_QUEUE || state == TASK_READY;
if (state == TASK_QUEUE || state == TASK_READY || state == TASK_IOWAIT)
return true;
return false;
} }
void *sched_process_switch(void *curr_sp) void *sched_process_switch(void *curr_sp)
{ {
struct task *tmp; struct task *tmp;
int i;
pid_t nextpid = current->pid; pid_t nextpid = current->pid;
current->sp = curr_sp; current->sp = curr_sp;
if (current->state != TASK_SLEEP && current->state != TASK_IOWAIT) if (current->state != TASK_SLEEP && current->state != TASK_IOWAIT)
current->state = TASK_QUEUE; current->state = TASK_QUEUE;
while (1) { for (i = 0; i < CONFIG_SCHED_MAXTASK; i++) {
nextpid++; nextpid++;
nextpid %= CONFIG_SCHED_MAXTASK; nextpid %= CONFIG_SCHED_MAXTASK;
tmp = tasktab[nextpid]; tmp = tasktab[nextpid];
if (tmp != NULL && sched_task_should_run(tmp)) { if (tmp != NULL && can_run(tmp)) {
current = tmp; current = tmp;
break; break;
} }
/* TODO: Add idle thread */
} }
if (i == CONFIG_SCHED_MAXTASK)
current = &idle_task;
current->state = TASK_READY; current->state = TASK_READY;
return current->sp; return current->sp;
} }