sched: complete rework of context switching
The old strategy was to only do context switching from within the PendSV handler. This worked fine until now because all syscalls were handled either atomically or just returned -EAGAIN if the resource was locked or busy. However, with the introduction of I/O wait, we need to be able to sleep directly from within the kernel by moving the context switching completely into the kernel.
This commit is contained in:
parent
60f1ebea8a
commit
7e6dbad05f
18 changed files with 389 additions and 318 deletions
|
@ -22,12 +22,14 @@ target_sources(ardix_arch PRIVATE
|
|||
atom_get_put.S
|
||||
atom.c
|
||||
atomic.c
|
||||
do_switch.S
|
||||
entry.c
|
||||
handle_fault.c
|
||||
handle_fault.S
|
||||
handle_pend_sv.S
|
||||
handle_reset.c
|
||||
handle_svc.S
|
||||
leave.S
|
||||
mutex.S
|
||||
sched.c
|
||||
serial.c
|
||||
|
|
|
@ -1,20 +1,23 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#pragma once
|
||||
.include "asm.S"
|
||||
|
||||
/**
|
||||
* @brief Perform a syscall.
|
||||
*
|
||||
* This is called by the syscall exception handler. It is responsible for
|
||||
* finishing the context switch, obtaining the syscall number and arguments,
|
||||
* and invoking the respective system call. If the return value of this
|
||||
* function is nonzero, the scheduler is invoked after the call and before
|
||||
* returning to userspace.
|
||||
*
|
||||
* @param sp current stack pointer
|
||||
* @returns Whether rescheduling is required
|
||||
*/
|
||||
int arch_enter(void *sp);
|
||||
.text
|
||||
|
||||
/* void _do_switch(struct context *old, struct context *new); */
|
||||
func_begin _do_switch
|
||||
|
||||
/* ldm/stm can't use sp in the reglist, we need to store it individually */
|
||||
|
||||
stmia r0!, {r4-r11}
|
||||
str sp, [r0]
|
||||
str lr, [r0, #4] /* this becomes pc when we are switched back to */
|
||||
|
||||
ldmia r1!, {r4-r11}
|
||||
ldr sp, [r1]
|
||||
ldr pc, [r1, #4] /* this is the lr that we stored */
|
||||
|
||||
func_end _do_switch
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
|
@ -1,6 +1,5 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#include <arch-generic/entry.h>
|
||||
#include <arch/hardware.h>
|
||||
|
||||
#include <ardix/types.h>
|
||||
|
@ -17,13 +16,12 @@
|
|||
extern uint16_t __syscall_return_point;
|
||||
#endif
|
||||
|
||||
int arch_enter(void *sp)
|
||||
void arch_enter(struct exc_context *context)
|
||||
{
|
||||
struct reg_snapshot *regs = sp;
|
||||
enum syscall sc_num = arch_syscall_num(regs);
|
||||
int (*handler)(sysarg_t arg1, sysarg_t arg2, sysarg_t arg3,
|
||||
sysarg_t arg4, sysarg_t arg5, sysarg_t arg6);
|
||||
int sc_ret;
|
||||
enum syscall number = sc_num(context);
|
||||
long (*handler)(sysarg_t arg1, sysarg_t arg2, sysarg_t arg3,
|
||||
sysarg_t arg4, sysarg_t arg5, sysarg_t arg6);
|
||||
long sc_ret;
|
||||
|
||||
# ifdef CONFIG_CHECK_SYSCALL_SOURCE
|
||||
/*
|
||||
|
@ -32,31 +30,28 @@ int arch_enter(void *sp)
|
|||
* the instructions are always 2-byte aligned. Additionally, the PC
|
||||
* points to the instruction *after* the SVC, not SVC itself.
|
||||
*/
|
||||
if (((uintptr_t)regs->hw.pc & 0xfffffffe) != (uintptr_t)&__syscall_return_point) {
|
||||
arch_syscall_set_rval(regs, -EACCES);
|
||||
if (((uintptr_t)regs->sp->pc & 0xfffffffe) != (uintptr_t)&__syscall_return_point) {
|
||||
sc_set_rval(regs, -EACCES);
|
||||
return;
|
||||
}
|
||||
# endif
|
||||
|
||||
if (sc_num > NSYSCALLS) {
|
||||
arch_syscall_set_rval(regs, -ENOSYS);
|
||||
return 0;
|
||||
if (number > NSYSCALLS) {
|
||||
sc_set_rval(context, -ENOSYS);
|
||||
return;
|
||||
}
|
||||
|
||||
handler = sys_table[sc_num];
|
||||
handler = sys_table[number];
|
||||
if (handler == NULL) {
|
||||
arch_syscall_set_rval(regs, -ENOSYS);
|
||||
return 0;
|
||||
sc_set_rval(context, -ENOSYS);
|
||||
return;
|
||||
}
|
||||
|
||||
/* TODO: not every syscall uses the max amount of parameters (duh) */
|
||||
sc_ret = handler(arch_syscall_arg1(regs), arch_syscall_arg2(regs), arch_syscall_arg3(regs),
|
||||
arch_syscall_arg4(regs), arch_syscall_arg5(regs), arch_syscall_arg6(regs));
|
||||
sc_ret = handler(sc_arg1(context), sc_arg2(context), sc_arg3(context),
|
||||
sc_arg4(context), sc_arg5(context), sc_arg6(context));
|
||||
|
||||
arch_syscall_set_rval(regs, sc_ret);
|
||||
int ret = need_resched;
|
||||
need_resched = 0;
|
||||
return ret;
|
||||
sc_set_rval(context, sc_ret);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -4,35 +4,35 @@
|
|||
|
||||
.text
|
||||
|
||||
/* __naked __noreturn void arch_handle_fault(struct reg_snapshot *regs, int irqnum); */
|
||||
.extern arch_handle_fault
|
||||
/* __naked __noreturn void handle_fault(struct exc_context *context, int irqnum); */
|
||||
.extern handle_fault
|
||||
|
||||
func_begin handle_hard_fault
|
||||
push {r4-r11,lr}
|
||||
prepare_entry
|
||||
mov r0, sp
|
||||
mov r1, #-13 /* IRQNO_HARD_FAULT */
|
||||
b arch_handle_fault
|
||||
b handle_fault
|
||||
func_end handle_hard_fault
|
||||
|
||||
func_begin handle_mm_fault
|
||||
push {r4-r11,lr}
|
||||
prepare_entry
|
||||
mov r0, sp
|
||||
mov r1, #-12 /* IRQNO_MM_FAULT */
|
||||
b arch_handle_fault
|
||||
b handle_fault
|
||||
func_end handle_mm_fault
|
||||
|
||||
func_begin handle_bus_fault
|
||||
push {r4-r11,lr}
|
||||
prepare_entry
|
||||
mov r0, sp
|
||||
mov r1, #-11 /* IRQNO_BUS_FAULT */
|
||||
b arch_handle_fault
|
||||
b handle_fault
|
||||
func_end handle_bus_fault
|
||||
|
||||
func_begin handle_usage_fault
|
||||
push {r4-r11,lr}
|
||||
prepare_entry
|
||||
mov r0, sp
|
||||
mov r1, #-10 /* IRQNO_USAGE_FAULT */
|
||||
b arch_handle_fault
|
||||
b handle_fault
|
||||
func_end handle_usage_fault
|
||||
|
||||
/*
|
||||
|
|
|
@ -7,19 +7,6 @@
|
|||
|
||||
#include <toolchain.h>
|
||||
|
||||
/** Setup UART to manual byte-by-byte control */
|
||||
static inline void uart_emergency_setup(void)
|
||||
{
|
||||
UART->UART_PTCR = UART_PTCR_RXTDIS | UART_PTCR_TXTDIS;
|
||||
|
||||
UART->UART_CR = UART_CR_RXDIS | UART_CR_RSTRX
|
||||
| UART_CR_TXDIS | UART_CR_RSTTX;
|
||||
|
||||
UART->UART_IDR = 0xffffffff;
|
||||
|
||||
UART->UART_CR = UART_CR_RXEN | UART_CR_TXEN;
|
||||
}
|
||||
|
||||
static void uart_write_sync(const char *s)
|
||||
{
|
||||
char c;
|
||||
|
@ -29,10 +16,28 @@ static void uart_write_sync(const char *s)
|
|||
}
|
||||
}
|
||||
|
||||
/** Setup UART to manual byte-by-byte control */
|
||||
static inline void uart_emergency_setup(void)
|
||||
{
|
||||
UART->UART_IDR = 0xffffffff;
|
||||
|
||||
mom_are_we_there_yet(UART->UART_SR & UART_SR_TXRDY);
|
||||
UART->UART_PTCR = UART_PTCR_RXTDIS | UART_PTCR_TXTDIS;
|
||||
|
||||
UART->UART_CR = UART_CR_RXDIS | UART_CR_RSTRX
|
||||
| UART_CR_TXDIS | UART_CR_RSTTX;
|
||||
|
||||
UART->UART_CR = UART_CR_TXEN;
|
||||
|
||||
UART->UART_THR = '\0';
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wswitch"
|
||||
static inline void print_err_msg(enum irqno irqno)
|
||||
{
|
||||
uart_write_sync("\n\n########## SERIOUS BRUH MOMENT! ##########\n");
|
||||
|
||||
switch (irqno) {
|
||||
case IRQNO_HARD_FAULT:
|
||||
uart_write_sync("Hard");
|
||||
|
@ -48,11 +53,11 @@ static inline void print_err_msg(enum irqno irqno)
|
|||
break;
|
||||
}
|
||||
|
||||
uart_write_sync(" Fault encountered, system halted.\n\n");
|
||||
uart_write_sync(" Fault encountered, cannot continue\nRegister dump:\n");
|
||||
}
|
||||
#pragma GCC diagnostic pop /* -Wswitch */
|
||||
|
||||
static void reg_to_str(char *dest, uint32_t val)
|
||||
static void reg_to_str(char *dest, word_t val)
|
||||
{
|
||||
for (int i = 28; i >= 0; i -= 4) {
|
||||
uint8_t digit = (val >> i) & 0x0f;
|
||||
|
@ -66,91 +71,56 @@ static void reg_to_str(char *dest, uint32_t val)
|
|||
}
|
||||
}
|
||||
|
||||
static void print_regs(struct reg_snapshot *regs)
|
||||
static void print_reg(const char *name, word_t val)
|
||||
{
|
||||
static char reg_line[] = "r0 = 0x????????\n";
|
||||
char *reg_val = ®_line[8]; /* first question mark */
|
||||
/* static saves stack space, which might be limited */
|
||||
static char line[] = "???? = 0x????????\n";
|
||||
char c;
|
||||
char *name_pos = line;
|
||||
while ((c = *name++) != '\0')
|
||||
*name_pos++ = c;
|
||||
while (name_pos < &line[4])
|
||||
*name_pos++ = ' ';
|
||||
reg_to_str(&line[9], val);
|
||||
uart_write_sync(line);
|
||||
}
|
||||
|
||||
reg_to_str(reg_val, regs->hw.r0);
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[1] = '1';
|
||||
reg_to_str(reg_val, regs->hw.r1);
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[1] = '2';
|
||||
reg_to_str(reg_val, regs->hw.r2);
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[1] = '3';
|
||||
reg_to_str(reg_val, regs->hw.r3);
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[1] = '4';
|
||||
reg_to_str(reg_val, regs->sw.r4);
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[1] = '5';
|
||||
reg_to_str(reg_val, regs->sw.r5);
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[1] = '6';
|
||||
reg_to_str(reg_val, regs->sw.r6);
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[1] = '7';
|
||||
reg_to_str(reg_val, regs->sw.r7);
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[1] = '8';
|
||||
reg_to_str(reg_val, regs->sw.r8);
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[1] = '9';
|
||||
reg_to_str(reg_val, regs->sw.r9);
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[1] = '1';
|
||||
reg_line[2] = '0';
|
||||
reg_to_str(reg_val, regs->sw.r10);
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[2] = '1';
|
||||
reg_to_str(reg_val, regs->sw.r11);
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[2] = '2';
|
||||
reg_to_str(reg_val, regs->hw.r12);
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[0] = 's';
|
||||
reg_line[1] = 'p';
|
||||
reg_line[2] = ' ';
|
||||
reg_to_str(reg_val, (uint32_t)(regs + 1)); /* where SP was before reg save */
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[0] = 'l';
|
||||
reg_line[1] = 'r';
|
||||
reg_to_str(reg_val, (uint32_t)regs->hw.lr);
|
||||
uart_write_sync(reg_line);
|
||||
|
||||
reg_line[0] = 'p';
|
||||
reg_line[1] = 'c';
|
||||
reg_to_str(reg_val, (uint32_t)regs->hw.pc);
|
||||
uart_write_sync(reg_line);
|
||||
static void print_regs(struct exc_context *context)
|
||||
{
|
||||
print_reg("R0", context->sp->r0);
|
||||
print_reg("R1", context->sp->r1);
|
||||
print_reg("R2", context->sp->r2);
|
||||
print_reg("R3", context->sp->r3);
|
||||
print_reg("R4", context->r4);
|
||||
print_reg("R5", context->r5);
|
||||
print_reg("R6", context->r6);
|
||||
print_reg("R7", context->r7);
|
||||
print_reg("R8", context->r8);
|
||||
print_reg("R9", context->r9);
|
||||
print_reg("R10", context->r10);
|
||||
print_reg("R11", context->r11);
|
||||
print_reg("R12", context->sp->r12);
|
||||
print_reg("SP", *(word_t *)&context->sp);
|
||||
print_reg("LR", *(word_t *)&context->sp->lr);
|
||||
print_reg("PC", *(word_t *)&context->sp->pc);
|
||||
print_reg("xPSR", context->sp->psr);
|
||||
}
|
||||
|
||||
#include <arch/debug.h>
|
||||
|
||||
__naked __noreturn void arch_handle_fault(struct reg_snapshot *regs, enum irqno irqno)
|
||||
__naked __noreturn void handle_fault(struct exc_context *context, enum irqno irqno)
|
||||
{
|
||||
uart_emergency_setup();
|
||||
print_err_msg(irqno);
|
||||
print_regs(regs);
|
||||
|
||||
/* give developers a chance to inspect the system */
|
||||
print_err_msg(irqno);
|
||||
print_regs(context);
|
||||
|
||||
if (SCB->HFSR & SCB_HFSR_FORCED_Msk)
|
||||
print_reg("CFSR", SCB->CFSR);
|
||||
|
||||
uart_write_sync("\nSystem halted, goodbye\n\n");
|
||||
|
||||
__breakpoint;
|
||||
/* but never leave this function */
|
||||
while (1);
|
||||
}
|
||||
|
||||
|
|
|
@ -4,42 +4,15 @@
|
|||
|
||||
.text
|
||||
|
||||
/* void *sched_switch(void *curr_sp); */
|
||||
.extern sched_switch
|
||||
/* void schedule(void); */
|
||||
.extern schedule
|
||||
|
||||
/* void handle_pend_sv(void); */
|
||||
func_begin handle_pend_sv
|
||||
/*
|
||||
* Some registers have already been saved by hardware at this point,
|
||||
* we only need to take care of r4-r11 and lr (the latter of which is
|
||||
* required because lr is overwritten when entering the irq).
|
||||
* The stuff we push onto the stack manually looks about like this:
|
||||
*
|
||||
* <<< stack grow direction (decreasing addresses) <<<
|
||||
* r4 r5 r6 r7 r8 r9 r10 r11 lr
|
||||
*/
|
||||
|
||||
push {r4-r11,lr}
|
||||
|
||||
/*
|
||||
* Now that our stack is completely saved, we can proceed to call the
|
||||
* Kernel's scheduler. This updates `_current_process` to the process
|
||||
* we want to execute next.
|
||||
*/
|
||||
/* TODO: Implement banked stack pointer */
|
||||
mov r0, sp
|
||||
bl sched_switch /* sp = sched_switch(sp); */
|
||||
mov sp, r0
|
||||
|
||||
/*
|
||||
* The new stack pointer contains the state of the new process, so we
|
||||
* load it into our registers using the same procedure as above,
|
||||
* just in reverse order.
|
||||
*/
|
||||
|
||||
pop {r4-r11,lr}
|
||||
|
||||
clrex
|
||||
prepare_entry
|
||||
bl schedule
|
||||
prepare_leave
|
||||
|
||||
bx lr
|
||||
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
#include <config.h>
|
||||
|
||||
#ifdef DEBUG
|
||||
#define __breakpoint __asm__ volatile("\tbkpt\n" ::: )
|
||||
# define __breakpoint __asm__ volatile("bkpt")
|
||||
#else
|
||||
#define __breakpoint
|
||||
# define __breakpoint
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -3,17 +3,20 @@
|
|||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <toolchain.h>
|
||||
|
||||
typedef uint32_t word_t;
|
||||
typedef uint32_t sysarg_t;
|
||||
|
||||
/** Current system frequency in Hertz. */
|
||||
extern volatile uint32_t SystemCoreClock;
|
||||
|
||||
/**
|
||||
* All registers that are automatically saved by hardware routines when entering
|
||||
* an IRQ, in the correct order.
|
||||
* @brief Hardware context save upon entering kernel space.
|
||||
* This is stored on the stack of the thread that ran before exception entry,
|
||||
* i.e. PSP if user space and MSP if kernel space.
|
||||
*/
|
||||
struct reg_hw_snapshot {
|
||||
struct hw_context {
|
||||
word_t r0;
|
||||
word_t r1;
|
||||
word_t r2;
|
||||
|
@ -24,11 +27,12 @@ struct reg_hw_snapshot {
|
|||
word_t psr;
|
||||
};
|
||||
|
||||
struct reg_snapshot {
|
||||
word_t r0;
|
||||
word_t r1;
|
||||
word_t r2;
|
||||
word_t r3;
|
||||
/**
|
||||
* @brief Software context save from an exception handler upon entering kernel space.
|
||||
* This is always stored on the main stack.
|
||||
* The `prepare_entry` macro in `arch/include/asm.S` creates this snapshot.
|
||||
*/
|
||||
struct exc_context {
|
||||
word_t r4;
|
||||
word_t r5;
|
||||
word_t r6;
|
||||
|
@ -37,34 +41,92 @@ struct reg_snapshot {
|
|||
word_t r9;
|
||||
word_t r10;
|
||||
word_t r11;
|
||||
|
||||
/*
|
||||
* ATTENTION: the following registers might actually be stored on the
|
||||
* other stack; don't access them unless you know exactly
|
||||
* what you're doing
|
||||
* Old stack pointer used before exception entry.
|
||||
* Bit 2 in lr defines which stack was used.
|
||||
*/
|
||||
|
||||
word_t _r12;
|
||||
void *_lr; /* alias r14 */
|
||||
void *_pc; /* alias r15 */
|
||||
word_t _psr;
|
||||
struct hw_context *sp;
|
||||
void *lr;
|
||||
};
|
||||
|
||||
#define arch_syscall_num(reg_snap) ((reg_snap)->r7)
|
||||
#define arch_syscall_arg1(reg_snap) ((reg_snap)->r0)
|
||||
#define arch_syscall_arg2(reg_snap) ((reg_snap)->r1)
|
||||
#define arch_syscall_arg3(reg_snap) ((reg_snap)->r2)
|
||||
#define arch_syscall_arg4(reg_snap) ((reg_snap)->r3)
|
||||
#define arch_syscall_arg5(reg_snap) ((reg_snap)->r4)
|
||||
#define arch_syscall_arg6(reg_snap) ((reg_snap)->r5)
|
||||
/**
|
||||
* @brief Used for in-kernel context switching.
|
||||
* This is where `do_switch()` stores the register values.
|
||||
*/
|
||||
struct context {
|
||||
word_t r4;
|
||||
word_t r5;
|
||||
word_t r6;
|
||||
word_t r7;
|
||||
word_t r8;
|
||||
word_t r9;
|
||||
word_t r10;
|
||||
word_t r11;
|
||||
void *sp;
|
||||
void *pc;
|
||||
};
|
||||
|
||||
#define arch_syscall_set_rval(reg_snap, val) ((reg_snap)->r0 = (word_t)(val));
|
||||
/**
|
||||
* @brief Task Control Block.
|
||||
* This is a low level structure used by `do_switch()` to do the actual context
|
||||
* switching,
|
||||
*/
|
||||
struct tcb {
|
||||
struct context context;
|
||||
struct hw_context *hw_context;
|
||||
};
|
||||
|
||||
__always_inline sysarg_t sc_num(const struct exc_context *ctx)
|
||||
{
|
||||
return ctx->r7;
|
||||
}
|
||||
|
||||
__always_inline sysarg_t sc_arg1(const struct exc_context *ctx)
|
||||
{
|
||||
return ctx->sp->r0;
|
||||
}
|
||||
|
||||
__always_inline sysarg_t sc_arg2(const struct exc_context *ctx)
|
||||
{
|
||||
return ctx->sp->r1;
|
||||
}
|
||||
|
||||
__always_inline sysarg_t sc_arg3(const struct exc_context *ctx)
|
||||
{
|
||||
return ctx->sp->r2;
|
||||
}
|
||||
|
||||
__always_inline sysarg_t sc_arg4(const struct exc_context *ctx)
|
||||
{
|
||||
return ctx->sp->r3;
|
||||
}
|
||||
|
||||
__always_inline sysarg_t sc_arg5(const struct exc_context *ctx)
|
||||
{
|
||||
return ctx->r4;
|
||||
}
|
||||
|
||||
__always_inline sysarg_t sc_arg6(const struct exc_context *ctx)
|
||||
{
|
||||
return ctx->r5;
|
||||
}
|
||||
|
||||
__always_inline void sc_set_rval(struct exc_context *ctx, long rval)
|
||||
{
|
||||
/* raw cast */
|
||||
*(long *)&ctx->sp->r0 = rval;
|
||||
}
|
||||
|
||||
#ifdef ARDIX_ARCH
|
||||
# define __SAM3X8E__
|
||||
# define DONT_USE_CMSIS_INIT
|
||||
# define __PROGRAM_START
|
||||
|
||||
# include <sam3x8e.h>
|
||||
|
||||
# undef __PROGRAM_START
|
||||
# undef DONT_USE_CMSIS_INIT
|
||||
# undef __SAM3X8E__
|
||||
#endif /* ARDIX_ARCH */
|
||||
|
||||
/*
|
||||
|
|
26
arch/at91sam3x8e/leave.S
Normal file
26
arch/at91sam3x8e/leave.S
Normal file
|
@ -0,0 +1,26 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
.include "asm.S"
|
||||
|
||||
.text
|
||||
|
||||
/* void _leave(void); */
|
||||
func_begin _leave
|
||||
pop {r4-r12,lr}
|
||||
cpsie i
|
||||
msr psp, r12
|
||||
bx lr
|
||||
func_end _leave
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
*
|
||||
* Ardix is non-violent software: you may only use, redistribute,
|
||||
* and/or modify it under the terms of the CNPLv6+ as found in
|
||||
* the LICENSE file in the source code root directory or at
|
||||
* <https://git.pixie.town/thufie/CNPL>.
|
||||
*
|
||||
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
|
||||
* permitted by applicable law. See the CNPLv6+ for details.
|
||||
*/
|
|
@ -1,19 +1,24 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#include <arch-generic/do_switch.h>
|
||||
#include <arch-generic/sched.h>
|
||||
#include <arch/hardware.h>
|
||||
#include <arch/interrupt.h>
|
||||
#include <arch/linker.h>
|
||||
|
||||
#include <ardix/atomic.h>
|
||||
#include <ardix/kevent.h>
|
||||
#include <ardix/malloc.h>
|
||||
#include <ardix/sched.h>
|
||||
#include <ardix/serial.h>
|
||||
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
|
||||
volatile unsigned long int tick = 0;
|
||||
unsigned int systick_reload;
|
||||
unsigned int tick_freq;
|
||||
|
||||
static unsigned int systick_reload;
|
||||
static unsigned int tick_freq;
|
||||
|
||||
void handle_sys_tick(void)
|
||||
{
|
||||
|
@ -24,61 +29,42 @@ void handle_sys_tick(void)
|
|||
* because the docs say you're supposed to do it that way
|
||||
*/
|
||||
if (!is_atomic())
|
||||
arch_irq_invoke(IRQNO_PEND_SV);
|
||||
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the NVIC priority grouping field in `AIRCR`.
|
||||
* Only values from 0..7 are allowed, see the SAM3X documentation.
|
||||
*
|
||||
* @param prio_group: The new priority grouping value.
|
||||
*/
|
||||
static inline void sched_nvic_set_prio_group(uint32_t prio_group)
|
||||
{
|
||||
uint32_t reg_val = REG_SCB_AIRCR;
|
||||
|
||||
reg_val &= ~(REG_SCB_AIRCR_VECTKEY_MASK | REG_SCB_AIRCR_PRIGROUP_MASK);
|
||||
reg_val = reg_val
|
||||
| REG_SCB_AIRCR_VECTKEY_VAL(REG_SCB_AIRCR_VECTKEY_MAGIC)
|
||||
| REG_SCB_AIRCR_PRIGROUP_VAL(prio_group);
|
||||
|
||||
REG_SCB_AIRCR = reg_val;
|
||||
}
|
||||
|
||||
int arch_sched_hwtimer_init(unsigned int freq)
|
||||
int arch_sched_init(unsigned int freq)
|
||||
{
|
||||
tick_freq = freq;
|
||||
systick_reload = sys_core_clock / freq;
|
||||
if (systick_reload > REG_SYSTICK_LOAD_RELOAD_MASK)
|
||||
systick_reload = SystemCoreClock / freq;
|
||||
if ((systick_reload & SysTick_LOAD_RELOAD_Msk) != systick_reload)
|
||||
return 1;
|
||||
|
||||
/* Ensure SysTick and PendSV are preemptive */
|
||||
sched_nvic_set_prio_group(0b011);
|
||||
/* no subgrouping */
|
||||
NVIC_SetPriorityGrouping(0b011);
|
||||
|
||||
REG_SYSTICK_LOAD = (systick_reload & REG_SYSTICK_LOAD_RELOAD_MASK) - 1;
|
||||
REG_SYSTICK_VAL = 0U;
|
||||
REG_SYSTICK_CTRL = REG_SYSTICK_CTRL_CLKSOURCE_BIT /* MCK */
|
||||
| REG_SYSTICK_CTRL_TICKINT_BIT /* trigger exception */
|
||||
| REG_SYSTICK_CTRL_ENABLE_BIT; /* enable SysTick */
|
||||
SysTick_Config(systick_reload);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void arch_task_init(struct task *task, void (*entry)(void))
|
||||
{
|
||||
struct reg_snapshot *regs = task->stack_bottom - sizeof(*regs);
|
||||
task->sp = regs;
|
||||
struct hw_context *hw_context = task->bottom - sizeof(*hw_context);
|
||||
struct exc_context *exc_context = (void *)hw_context - sizeof(*exc_context);
|
||||
|
||||
memset(regs, 0, sizeof(*regs));
|
||||
regs->hw.pc = entry;
|
||||
regs->hw.psr = 0x01000000;
|
||||
regs->sw.lr = (void *)0xfffffff9;
|
||||
}
|
||||
memset(hw_context, 0, task->bottom - (void *)hw_context);
|
||||
|
||||
void yield(enum task_state state)
|
||||
{
|
||||
current->state = state;
|
||||
arch_irq_invoke(IRQNO_PEND_SV);
|
||||
exc_context->sp = hw_context;
|
||||
exc_context->lr = (void *)0xfffffff9; /* return to thread mode, use MSP */
|
||||
|
||||
hw_context->pc = entry;
|
||||
hw_context->psr = 0x01000000; /* Thumb state bit set, unprivileged */
|
||||
hw_context->lr = (void *)0xffffffff;
|
||||
task->tcb.hw_context = hw_context;
|
||||
|
||||
memset(&task->tcb.context, 0, sizeof(task->tcb.context));
|
||||
task->tcb.context.pc = _leave;
|
||||
task->tcb.context.sp = exc_context;
|
||||
}
|
||||
|
||||
__naked __noreturn static void idle_task_entry(void)
|
||||
|
@ -92,8 +78,7 @@ int arch_idle_task_init(struct task *task)
|
|||
if (stack == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
task->stack_bottom = stack + CONFIG_STACK_SIZE - 4;
|
||||
task->sp = task->stack_bottom - sizeof(struct reg_snapshot);
|
||||
task->bottom = stack + CONFIG_STACK_SIZE; /* full-descending stack */
|
||||
arch_task_init(task, idle_task_entry);
|
||||
task->sleep = 0;
|
||||
task->last_tick = 0;
|
||||
|
|
41
include/arch-generic/do_switch.h
Normal file
41
include/arch-generic/do_switch.h
Normal file
|
@ -0,0 +1,41 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <arch/hardware.h>
|
||||
#include <ardix/sched.h>
|
||||
|
||||
#include <toolchain.h>
|
||||
|
||||
/**
|
||||
* @brief New tasks point their initial program counter here.
|
||||
*/
|
||||
extern void _leave(void);
|
||||
|
||||
extern void _do_switch(struct context *old, struct context *new);
|
||||
|
||||
/**
|
||||
* @brief Perform an in-kernel context switch.
|
||||
* This is called from `schedule()`, and will suspend the current task.
|
||||
* The call returns when the current task is woken back up.
|
||||
*
|
||||
* @param old Currently running task in whose TCB the context will be stored
|
||||
* @param new Task to switch to, must not be the same as `old`
|
||||
*/
|
||||
__always_inline void do_switch(struct task *old, struct task *new)
|
||||
{
|
||||
_do_switch(&old->tcb.context, &new->tcb.context);
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
*
|
||||
* Ardix is non-violent software: you may only use, redistribute,
|
||||
* and/or modify it under the terms of the CNPLv6+ as found in
|
||||
* the LICENSE file in the source code root directory or at
|
||||
* <https://git.pixie.town/thufie/CNPL>.
|
||||
*
|
||||
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
|
||||
* permitted by applicable law. See the CNPLv6+ for details.
|
||||
*/
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <arch-generic/sched.h>
|
||||
|
||||
#include <ardix/kent.h>
|
||||
#include <ardix/list.h>
|
||||
#include <ardix/types.h>
|
||||
|
@ -27,11 +29,14 @@ enum task_state {
|
|||
|
||||
/** @brief Core structure holding information about a task. */
|
||||
struct task {
|
||||
struct tcb tcb;
|
||||
|
||||
struct kent kent;
|
||||
/** current stack pointer (only gets updated for task switching) */
|
||||
void *sp;
|
||||
/** first address of the stack (highest if the stack grows downwards) */
|
||||
void *stack_bottom;
|
||||
/**
|
||||
* @brief Points to the bottom of the stack.
|
||||
* In a full-descending stack, this is one word after the highest stack address.
|
||||
*/
|
||||
void *bottom;
|
||||
/** @brief If state is `TASK_SLEEP`, the total amount of ticks to sleep */
|
||||
unsigned long int sleep;
|
||||
/** @brief Last execution in ticks */
|
||||
|
@ -42,18 +47,11 @@ struct task {
|
|||
};
|
||||
|
||||
/** @brief Current task (access from syscall context only) */
|
||||
extern struct task *current;
|
||||
extern struct task *volatile current;
|
||||
|
||||
/** @brief Global system tick counter (may overflow) */
|
||||
extern volatile unsigned long int tick;
|
||||
|
||||
/**
|
||||
* @brief If nonzero, the scheduler is invoked after the current syscall.
|
||||
* This is checked and then cleared after every syscall. If it has a nonzero
|
||||
* value, `sched_switch()` is called after `arch_enter()`.
|
||||
*/
|
||||
extern int need_resched;
|
||||
|
||||
/**
|
||||
* @brief Initialize the scheduler subsystem.
|
||||
* This sets up a hardware interrupt timer (SysTick for Cortex-M3).
|
||||
|
@ -61,15 +59,12 @@ extern int need_resched;
|
|||
int sched_init(void);
|
||||
|
||||
/**
|
||||
* @brief Switch to the next task (scheduler context only).
|
||||
* Must be called directly from within an interrupt routine.
|
||||
* This selects a new task to be run and updates the old and new task's `state`
|
||||
* field to the appropriate value. Called from the scheduler exception handler.
|
||||
*
|
||||
* @param curr_sp Stack pointer of the current task
|
||||
* @returns Stack pointer of the new task
|
||||
* @brief Main scheduler routine.
|
||||
* This will iterate over the process table and choose a new task to be run,
|
||||
* which `current` is then updated to. If the old task was in state
|
||||
* `TASK_READY`, it is set to `TASK_QUEUE`.
|
||||
*/
|
||||
void *sched_switch(void *curr_sp);
|
||||
void schedule(void);
|
||||
|
||||
/**
|
||||
* @brief Create a copy of the `current` task and return it.
|
||||
|
@ -93,11 +88,13 @@ struct task *task_clone(struct task *task);
|
|||
void msleep(unsigned long int ms);
|
||||
|
||||
/**
|
||||
* @brief Suspend the `current` task and invoke the scheduler early.
|
||||
* May only be called from syscall context.
|
||||
* @brief Invoke the scheduler early and switch tasks if required.
|
||||
* May only be called from syscall context. Attention: If `state`
|
||||
* is `TASK_QUEUE`, this call is not guaranteed to suspend the
|
||||
* current task at all.
|
||||
*
|
||||
* @param state State the task should enter.
|
||||
* Allowed values are `TASK_SLEEP` and `TASK_IOWAIT`.
|
||||
* @param state State the current task should enter.
|
||||
* Allowed values are `TASK_QUEUE`, `TASK_SLEEP` and `TASK_IOWAIT`.
|
||||
*/
|
||||
void yield(enum task_state state);
|
||||
|
||||
|
|
|
@ -23,9 +23,6 @@ typedef struct {
|
|||
|
||||
#include <arch/hardware.h>
|
||||
|
||||
/* Syscall argument */
|
||||
typedef word_t sysarg_t;
|
||||
|
||||
/*
|
||||
* This file is part of Ardix.
|
||||
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
|
||||
|
|
|
@ -9,9 +9,9 @@
|
|||
#include <stddef.h>
|
||||
#include <toolchain.h>
|
||||
|
||||
ssize_t sys_read(int fd, __user void *buf, size_t len)
|
||||
long sys_read(int fd, __user void *buf, size_t len)
|
||||
{
|
||||
ssize_t ret;
|
||||
long ret;
|
||||
void *copy;
|
||||
|
||||
struct file *f = file_get(fd);
|
||||
|
|
|
@ -9,9 +9,9 @@
|
|||
#include <stddef.h>
|
||||
#include <toolchain.h>
|
||||
|
||||
ssize_t sys_write(int fd, __user const void *buf, size_t len)
|
||||
long sys_write(int fd, __user const void *buf, size_t len)
|
||||
{
|
||||
ssize_t ret;
|
||||
long ret = 0;
|
||||
void *copy;
|
||||
|
||||
struct file *f = file_get(fd);
|
||||
|
|
103
kernel/sched.c
103
kernel/sched.c
|
@ -1,5 +1,6 @@
|
|||
/* See the end of this file for copyright, license, and warranty information. */
|
||||
|
||||
#include <arch-generic/do_switch.h>
|
||||
#include <arch-generic/sched.h>
|
||||
#include <arch-generic/watchdog.h>
|
||||
|
||||
|
@ -17,12 +18,10 @@ extern uint32_t _sstack;
|
|||
extern uint32_t _estack;
|
||||
|
||||
static struct task *tasktab[CONFIG_SCHED_MAXTASK];
|
||||
struct task *current;
|
||||
struct task *volatile current;
|
||||
|
||||
static struct task idle_task;
|
||||
|
||||
int need_resched = 0;
|
||||
|
||||
static void task_destroy(struct kent *kent)
|
||||
{
|
||||
struct task *task = container_of(kent, struct task, kent);
|
||||
|
@ -32,37 +31,39 @@ static void task_destroy(struct kent *kent)
|
|||
|
||||
int sched_init(void)
|
||||
{
|
||||
int i;
|
||||
int err;
|
||||
|
||||
current = malloc(sizeof(*current));
|
||||
if (current == NULL)
|
||||
struct task *ktask = malloc(sizeof(*ktask));
|
||||
if (ktask == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
current->kent.parent = kent_root;
|
||||
current->kent.destroy = task_destroy;
|
||||
i = kent_init(¤t->kent);
|
||||
if (i != 0)
|
||||
ktask->kent.parent = kent_root;
|
||||
ktask->kent.destroy = task_destroy;
|
||||
err = kent_init(&ktask->kent);
|
||||
if (err != 0)
|
||||
goto out;
|
||||
|
||||
current->sp = &_sstack;
|
||||
current->stack_bottom = &_estack;
|
||||
current->pid = 0;
|
||||
current->state = TASK_READY;
|
||||
tasktab[0] = current;
|
||||
memset(&ktask->tcb, 0, sizeof(ktask->tcb));
|
||||
ktask->bottom = &_estack;
|
||||
ktask->pid = 0;
|
||||
ktask->state = TASK_READY;
|
||||
|
||||
for (i = 1; i < CONFIG_SCHED_MAXTASK; i++)
|
||||
tasktab[0] = ktask;
|
||||
current = ktask;
|
||||
|
||||
for (unsigned int i = 1; i < ARRAY_SIZE(tasktab); i++)
|
||||
tasktab[i] = NULL;
|
||||
|
||||
i = arch_watchdog_init();
|
||||
if (i != 0)
|
||||
err = arch_watchdog_init();
|
||||
if (err != 0)
|
||||
goto out;
|
||||
|
||||
i = arch_sched_init(CONFIG_SCHED_FREQ);
|
||||
if (i != 0)
|
||||
err = arch_sched_init(CONFIG_SCHED_FREQ);
|
||||
if (err != 0)
|
||||
goto out;
|
||||
|
||||
i = arch_idle_task_init(&idle_task);
|
||||
if (i != 0)
|
||||
err = arch_idle_task_init(&idle_task);
|
||||
if (err != 0)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
|
@ -70,7 +71,7 @@ int sched_init(void)
|
|||
* are going to panic anyways if the scheduler fails to initialize.
|
||||
*/
|
||||
out:
|
||||
return i;
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -95,40 +96,52 @@ static inline bool can_run(const struct task *task)
|
|||
return false; /* this shouldn't be reached */
|
||||
}
|
||||
|
||||
void *sched_switch(void *curr_sp)
|
||||
void schedule(void)
|
||||
{
|
||||
struct task *tmp;
|
||||
int i;
|
||||
/*
|
||||
* this is -1 if the idle task was running which would normally be a problem
|
||||
* because it is used as an index in tasktab, but the for loop always
|
||||
* increments it by 1 before doing actuall array accesses so it's okay here
|
||||
*/
|
||||
pid_t nextpid = current->pid;
|
||||
current->sp = curr_sp;
|
||||
atomic_enter();
|
||||
|
||||
struct task *old = current;
|
||||
pid_t nextpid = old->pid;
|
||||
|
||||
struct task *new = NULL;
|
||||
|
||||
kevents_process();
|
||||
|
||||
if (current->state == TASK_READY)
|
||||
current->state = TASK_QUEUE;
|
||||
|
||||
for (i = 0; i < CONFIG_SCHED_MAXTASK; i++) {
|
||||
if (old->state == TASK_READY)
|
||||
old->state = TASK_QUEUE;
|
||||
for (unsigned int i = 0; i < ARRAY_SIZE(tasktab); i++) {
|
||||
/*
|
||||
* increment nextpid before accessing the task table
|
||||
* because it is -1 if the idle task was running
|
||||
*/
|
||||
nextpid++;
|
||||
nextpid %= CONFIG_SCHED_MAXTASK;
|
||||
nextpid %= ARRAY_SIZE(tasktab);
|
||||
|
||||
tmp = tasktab[nextpid];
|
||||
struct task *tmp = tasktab[nextpid];
|
||||
if (tmp != NULL && can_run(tmp)) {
|
||||
current = tmp;
|
||||
new = tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == CONFIG_SCHED_MAXTASK)
|
||||
current = &idle_task;
|
||||
if (new == NULL)
|
||||
new = &idle_task;
|
||||
|
||||
current->state = TASK_READY;
|
||||
current->last_tick = tick;
|
||||
return current->sp;
|
||||
new->state = TASK_READY;
|
||||
new->last_tick = tick;
|
||||
current = new;
|
||||
|
||||
atomic_leave();
|
||||
|
||||
if (old != new)
|
||||
do_switch(old, new);
|
||||
}
|
||||
|
||||
void yield(enum task_state state)
|
||||
{
|
||||
struct task *task = current;
|
||||
task->state = state;
|
||||
schedule();
|
||||
}
|
||||
|
||||
struct task *sched_fork(struct task *parent)
|
||||
|
|
|
@ -5,16 +5,16 @@
|
|||
#include <toolchain.h>
|
||||
|
||||
#define sys_table_entry(number, func) \
|
||||
[number] (int (*)(sysarg_t, sysarg_t, sysarg_t, sysarg_t, sysarg_t, sysarg_t))(func)
|
||||
[number] (long (*)(sysarg_t, sysarg_t, sysarg_t, sysarg_t, sysarg_t, sysarg_t))(func)
|
||||
|
||||
__rodata
|
||||
int (*const sys_table[NSYSCALLS])(sysarg_t arg1, sysarg_t arg2, sysarg_t arg3,
|
||||
sysarg_t arg4, sysarg_t arg5, sysarg_t arg6) = {
|
||||
long (*const sys_table[NSYSCALLS])(sysarg_t arg1, sysarg_t arg2, sysarg_t arg3,
|
||||
sysarg_t arg4, sysarg_t arg5, sysarg_t arg6) = {
|
||||
sys_table_entry(SYS_read, sys_read),
|
||||
sys_table_entry(SYS_write, sys_write),
|
||||
};
|
||||
|
||||
int sys_stub(void)
|
||||
long sys_stub(void)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
#include <ardix/list.h>
|
||||
|
||||
#include <config.h>
|
||||
|
||||
void list_insert(struct list_head *head, struct list_head *new)
|
||||
{
|
||||
new->next = head->next;
|
||||
|
@ -24,6 +26,11 @@ void list_delete(struct list_head *head)
|
|||
{
|
||||
head->next->prev = head->prev;
|
||||
head->prev->next = head->next;
|
||||
|
||||
# ifdef DEBUG
|
||||
head->next = NULL;
|
||||
head->prev = NULL;
|
||||
# endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue