sched: refactor and implement sleep
This commit is contained in:
parent
0e6d0057a8
commit
6e269e0217
10 changed files with 130 additions and 44 deletions
arch/at91sam3x8e
include
kernel
|
@ -15,7 +15,7 @@ void atomic_leave(void)
|
|||
atom_put(&atomic_context);
|
||||
}
|
||||
|
||||
int is_atomic_context(void)
|
||||
int is_atomic(void)
|
||||
{
|
||||
return atom_count(&atomic_context);
|
||||
}
|
||||
|
|
|
@ -3,8 +3,9 @@
|
|||
#include <arch-generic/entry.h>
|
||||
#include <arch/hardware.h>
|
||||
|
||||
#include <ardix/syscall.h>
|
||||
#include <ardix/types.h>
|
||||
#include <ardix/sched.h>
|
||||
#include <ardix/syscall.h>
|
||||
|
||||
#include <errno.h>
|
||||
#include <stddef.h>
|
||||
|
@ -16,7 +17,7 @@
|
|||
extern uint16_t __syscall_return_point;
|
||||
#endif
|
||||
|
||||
void arch_enter(void *sp)
|
||||
int arch_enter(void *sp)
|
||||
{
|
||||
struct reg_snapshot *regs = sp;
|
||||
enum syscall sc_num = arch_syscall_num(regs);
|
||||
|
@ -39,13 +40,13 @@ void arch_enter(void *sp)
|
|||
|
||||
if (sc_num > NSYSCALLS) {
|
||||
arch_syscall_set_rval(regs, -ENOSYS);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
handler = sys_table[sc_num];
|
||||
if (handler == NULL) {
|
||||
arch_syscall_set_rval(regs, -ENOSYS);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* TODO: not every syscall uses the max amount of parameters (duh) */
|
||||
|
@ -53,6 +54,9 @@ void arch_enter(void *sp)
|
|||
arch_syscall_arg4(regs), arch_syscall_arg5(regs), arch_syscall_arg6(regs));
|
||||
|
||||
arch_syscall_set_rval(regs, sc_ret);
|
||||
int ret = need_resched;
|
||||
need_resched = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -4,7 +4,8 @@
|
|||
|
||||
.text
|
||||
|
||||
.extern sched_process_switch
|
||||
/* void *sched_switch(void *curr_sp); */
|
||||
.extern sched_switch
|
||||
|
||||
/* void handle_pend_sv(void); */
|
||||
func_begin handle_pend_sv
|
||||
|
@ -27,7 +28,7 @@ func_begin handle_pend_sv
|
|||
*/
|
||||
/* TODO: Implement banked stack pointer */
|
||||
mov r0, sp
|
||||
bl sched_process_switch /* sp = sched_process_switch(sp); */
|
||||
bl sched_switch /* sp = sched_switch(sp); */
|
||||
mov sp, r0
|
||||
|
||||
/*
|
||||
|
|
|
@ -4,8 +4,12 @@
|
|||
|
||||
.text
|
||||
|
||||
/* int arch_enter(void *sp); */
|
||||
.extern arch_enter
|
||||
|
||||
/* void *sched_switch(void *sp); */
|
||||
.extern sched_switch
|
||||
|
||||
/* void handle_svc(void); */
|
||||
func_begin handle_svc
|
||||
/*
|
||||
|
@ -28,8 +32,18 @@ func_begin handle_svc
|
|||
push {r4-r11,lr}
|
||||
|
||||
mov r0, sp
|
||||
bl arch_enter /* arch_enter(sp); */
|
||||
bl arch_enter /* int need_resched = arch_enter(sp); */
|
||||
|
||||
cmp r0, #0
|
||||
beq svc_out
|
||||
|
||||
mov r0, sp
|
||||
bl sched_switch /* sp = sched_switch(sp); */
|
||||
mov sp, r0
|
||||
|
||||
clrex
|
||||
|
||||
svc_out:
|
||||
pop {r4-r11,lr}
|
||||
|
||||
bx lr
|
||||
|
|
|
@ -11,13 +11,18 @@
|
|||
#include <errno.h>
|
||||
#include <string.h>
|
||||
|
||||
volatile unsigned long int tick = 0;
|
||||
unsigned int systick_reload;
|
||||
|
||||
void handle_sys_tick(void)
|
||||
{
|
||||
tick++;
|
||||
|
||||
/*
|
||||
* fire a PendSV exception and do the actual context switching there
|
||||
* because the docs say you're supposed to do it that way
|
||||
*/
|
||||
if (!is_atomic_context())
|
||||
if (!is_atomic())
|
||||
arch_irq_invoke(IRQNO_PEND_SV);
|
||||
}
|
||||
|
||||
|
@ -41,14 +46,14 @@ static inline void sched_nvic_set_prio_group(uint32_t prio_group)
|
|||
|
||||
int arch_sched_hwtimer_init(unsigned int freq)
|
||||
{
|
||||
uint32_t ticks = sys_core_clock / freq;
|
||||
if (ticks > REG_SYSTICK_LOAD_RELOAD_MASK)
|
||||
systick_reload = sys_core_clock / freq;
|
||||
if (systick_reload > REG_SYSTICK_LOAD_RELOAD_MASK)
|
||||
return 1;
|
||||
|
||||
/* Ensure SysTick and PendSV are preemptive */
|
||||
sched_nvic_set_prio_group(0b011);
|
||||
|
||||
REG_SYSTICK_LOAD = (ticks & REG_SYSTICK_LOAD_RELOAD_MASK) - 1;
|
||||
REG_SYSTICK_LOAD = (systick_reload & REG_SYSTICK_LOAD_RELOAD_MASK) - 1;
|
||||
REG_SYSTICK_VAL = 0U;
|
||||
REG_SYSTICK_CTRL = REG_SYSTICK_CTRL_CLKSOURCE_BIT /* MCK */
|
||||
| REG_SYSTICK_CTRL_TICKINT_BIT /* trigger exception */
|
||||
|
@ -70,7 +75,6 @@ void arch_task_init(struct task *task, void (*entry)(void))
|
|||
|
||||
void yield(enum task_state state)
|
||||
{
|
||||
REG_SYSTICK_VAL = 0U; /* Reset timer (TODO: don't do this lmao) */
|
||||
current->state = state;
|
||||
arch_irq_invoke(IRQNO_PEND_SV);
|
||||
}
|
||||
|
@ -91,13 +95,18 @@ int arch_idle_task_init(struct task *task)
|
|||
|
||||
task->stack_bottom = sp + sizeof(struct reg_snapshot);
|
||||
arch_task_init(task, idle_task_entry);
|
||||
task->lastexec = 0;
|
||||
task->sleep_usecs = 0;
|
||||
task->sleep = 0;
|
||||
task->last_tick = 0;
|
||||
task->state = TASK_READY;
|
||||
task->pid = -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long int ms_to_ticks(unsigned long int ms)
|
||||
{
|
||||
return (unsigned long int)systick_reload * ms / sys_core_clock;
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
|
|
|
@ -27,6 +27,14 @@ void arch_task_init(struct task *task, void (*entry)(void));
|
|||
|
||||
int arch_idle_task_init(struct task *task);
|
||||
|
||||
/**
|
||||
* @brief Convert milliseconds to system ticks, rounding to zero.
|
||||
*
|
||||
* @param ms Amount of milliseconds
|
||||
* @returns Equivalent time in system ticks
|
||||
*/
|
||||
unsigned long int ms_to_ticks(unsigned long ms);
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
|
|
|
@ -15,7 +15,7 @@ void atomic_enter(void);
|
|||
void atomic_leave(void);
|
||||
|
||||
/** Return a nonzero value if the current process is in atomic context. */
|
||||
int is_atomic_context(void);
|
||||
int is_atomic(void);
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
|
|
|
@ -12,11 +12,6 @@
|
|||
#warning "CONFIG_SCHED_MAXTASK is > 64, this could have a significant performance impact"
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_STACKSZ
|
||||
/** Per-task stack size in bytes */
|
||||
#define CONFIG_STACKSZ 4096U
|
||||
#endif
|
||||
|
||||
enum task_state {
|
||||
/** Task is dead / doesn't exist */
|
||||
TASK_DEAD,
|
||||
|
@ -24,60 +19,84 @@ enum task_state {
|
|||
TASK_READY,
|
||||
/** Task is waiting for its next time share. */
|
||||
TASK_QUEUE,
|
||||
/** Task is sleeping, `sleep_until` specifies when to wake it up. */
|
||||
/** Task is sleeping, `task::sleep` specifies for how many ticks. */
|
||||
TASK_SLEEP,
|
||||
/** Task is waiting for I/O to flush buffers. */
|
||||
TASK_IOWAIT,
|
||||
};
|
||||
|
||||
/** Stores an entire process image. */
|
||||
/** @brief Core structure holding information about a task. */
|
||||
struct task {
|
||||
struct kent kent;
|
||||
/** current stack pointer (only gets updated for task switching) */
|
||||
void *sp;
|
||||
/** first address of the stack (highest if the stack grows downwards) */
|
||||
void *stack_bottom;
|
||||
/** if `state` is `TASK_SLEEP`, the last execution time */
|
||||
unsigned long int lastexec;
|
||||
/** if `state` is `TASK_SLEEP`, the amount of us to sleep in total */
|
||||
unsigned long int sleep_usecs;
|
||||
/** @brief If state is `TASK_SLEEP`, the total amount of ticks to sleep */
|
||||
unsigned long int sleep;
|
||||
/** @brief Last execution in ticks */
|
||||
unsigned long int last_tick;
|
||||
|
||||
enum task_state state;
|
||||
pid_t pid;
|
||||
};
|
||||
|
||||
/** @brief Current task (access from syscall context only) */
|
||||
extern struct task *current;
|
||||
|
||||
/** @brief Global system tick counter (may overflow) */
|
||||
extern volatile unsigned long int tick;
|
||||
|
||||
/**
|
||||
* Initialize the scheduler subsystem.
|
||||
* @brief If nonzero, the scheduler is invoked after the current syscall.
|
||||
* This is checked and then cleared after every syscall. If it has a nonzero
|
||||
* value, `sched_switch()` is called after `arch_enter()`.
|
||||
*/
|
||||
extern int need_resched;
|
||||
|
||||
/**
|
||||
* @brief Initialize the scheduler subsystem.
|
||||
* This sets up a hardware interrupt timer (SysTick for Cortex-M3).
|
||||
*/
|
||||
int sched_init(void);
|
||||
|
||||
/**
|
||||
* Switch to the next task (interrupt context only).
|
||||
* @brief Switch to the next task (scheduler context only).
|
||||
* Must be called directly from within an interrupt routine.
|
||||
* This selects a new task to be run and updates the old and new task's `state`
|
||||
* field to the appropriate value.
|
||||
* field to the appropriate value. Called from the scheduler exception handler.
|
||||
*
|
||||
* @param curr_sp: stack pointer of the current task
|
||||
* @returns stack pointer of the new task
|
||||
* @param curr_sp Stack pointer of the current task
|
||||
* @returns Stack pointer of the new task
|
||||
*/
|
||||
void *sched_switch(void *curr_sp);
|
||||
|
||||
/**
|
||||
* Create a copy of the current process image and return it.
|
||||
* @brief Create a copy of the `current` task and return it.
|
||||
* The new task becomes a child of the `current` task and is inserted into the
|
||||
* process table so that it can be executed by the scheduler after its state
|
||||
* is set to `TASK_QUEUE`. When the task is returned, its initial state is
|
||||
* `TASK_UNKNOWN` so that the caller has time to do any additional required
|
||||
* setup work.
|
||||
*
|
||||
* @param task: the task to make a copy of
|
||||
* @returns the new (child) task copy, or `NULL` on failure
|
||||
* @param task Task to make a copy of
|
||||
* @returns The new (child) task copy, or `NULL` on failure
|
||||
*/
|
||||
struct task *sched_task_clone(struct task *task);
|
||||
struct task *task_clone(struct task *task);
|
||||
|
||||
/**
|
||||
* Request the scheduler be invoked early, resulting in the current task to
|
||||
* be suspended.
|
||||
* @brief Sleep for an approximate amount of milliseconds.
|
||||
* Must not be invoked from atomic or irq context.
|
||||
*
|
||||
* @param state: State the task should enter.
|
||||
* @param ms Amount of milliseconds
|
||||
*/
|
||||
void msleep(unsigned long int ms);
|
||||
|
||||
/**
|
||||
* @brief Suspend the `current` task and invoke the scheduler early.
|
||||
* May only be called from syscall context.
|
||||
*
|
||||
* @param state State the task should enter.
|
||||
* Allowed values are `TASK_SLEEP` and `TASK_IOWAIT`.
|
||||
*/
|
||||
void yield(enum task_state state);
|
||||
|
|
|
@ -53,6 +53,10 @@
|
|||
/** Function attribute for hinting this function has malloc-like behavior. */
|
||||
#define __malloc(deallocator, argn) __attribute__(( malloc ))
|
||||
|
||||
#define __preinit_call(fn) __section(.preinit_array) void (*fn##_ptr)(void) = fn
|
||||
|
||||
#define __init_call(fn) __section(.init_array) void (*fn##_ptr)(void) = fn
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
|
|
|
@ -21,6 +21,8 @@ struct task *current;
|
|||
|
||||
static struct task idle_task;
|
||||
|
||||
int need_resched = 0;
|
||||
|
||||
static void task_destroy(struct kent *kent)
|
||||
{
|
||||
struct task *task = container_of(kent, struct task, kent);
|
||||
|
@ -71,31 +73,46 @@ out:
|
|||
return i;
|
||||
}
|
||||
|
||||
#include <arch/debug.h>
|
||||
|
||||
/**
|
||||
* Determine whether the specified task is a candidate for execution.
|
||||
* @brief Determine whether the specified task is a candidate for execution.
|
||||
*
|
||||
* @param task: the task
|
||||
* @param task The task
|
||||
* @returns whether `task` could be run next
|
||||
*/
|
||||
static inline bool can_run(const struct task *task)
|
||||
{
|
||||
enum task_state state = task->state;
|
||||
return state == TASK_QUEUE || state == TASK_READY;
|
||||
switch (task->state) {
|
||||
case TASK_SLEEP:
|
||||
return tick - task->last_tick > task->sleep;
|
||||
case TASK_QUEUE:
|
||||
case TASK_READY:
|
||||
return true;
|
||||
case TASK_DEAD:
|
||||
case TASK_IOWAIT:
|
||||
return false;
|
||||
}
|
||||
|
||||
return false; /* this shouldn't be reached */
|
||||
}
|
||||
|
||||
void *sched_process_switch(void *curr_sp)
|
||||
void *sched_switch(void *curr_sp)
|
||||
{
|
||||
struct task *tmp;
|
||||
int i;
|
||||
pid_t nextpid = current->pid;
|
||||
current->sp = curr_sp;
|
||||
|
||||
//__breakpoint;
|
||||
|
||||
kevents_process();
|
||||
|
||||
if (current->state != TASK_SLEEP && current->state != TASK_IOWAIT)
|
||||
current->state = TASK_QUEUE;
|
||||
|
||||
for (i = 0; i < CONFIG_SCHED_MAXTASK; i++) {
|
||||
//__breakpoint;
|
||||
nextpid++;
|
||||
nextpid %= CONFIG_SCHED_MAXTASK;
|
||||
|
||||
|
@ -110,6 +127,8 @@ void *sched_process_switch(void *curr_sp)
|
|||
current = &idle_task;
|
||||
|
||||
current->state = TASK_READY;
|
||||
current->last_tick = tick;
|
||||
//__breakpoint;
|
||||
return current->sp;
|
||||
}
|
||||
|
||||
|
@ -143,6 +162,14 @@ err_alloc:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void msleep(unsigned long int ms)
|
||||
{
|
||||
//__breakpoint;
|
||||
current->sleep = ms_to_ticks(ms);
|
||||
yield(TASK_SLEEP);
|
||||
//__breakpoint;
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
|
|
Loading…
Reference in a new issue