sched: minor refactor

This commit is contained in:
Felix Kopp 2020-11-30 02:26:17 +01:00
parent e690a6824e
commit 570f036752
No known key found for this signature in database
GPG key ID: C478BA0A85F75728
4 changed files with 60 additions and 97 deletions

View file

@ -17,6 +17,7 @@ void irq_sys_tick(void)
* fire a PendSV interrupt and do the actual context switching there
* because it is faster that way (according to the docs, at least)
*/
if (!_is_atomic_context)
arch_irq_invoke(IRQNO_PEND_SV);
}

View file

@ -3,19 +3,22 @@
#pragma once
#include <ardix/sched.h>
#include <arch/at91sam3x8e/interrupt.h>
#include <stdbool.h>
#include <toolchain.h>
/** Enter atomic context, i.e. disable preemption */
__always_inline void sched_atomic_enter(void)
{
arch_irq_disable(IRQNO_PEND_SV);
_is_atomic_context = true;
}
/** Leave atomic context, i.e. re-enable preemption */
__always_inline void sched_atomic_leave(void)
{
arch_irq_enable(IRQNO_PEND_SV);
_is_atomic_context = false;
}
/*

View file

@ -3,9 +3,12 @@
#pragma once
#include <ardix/list.h>
#include <ardix/types.h>
#include <arch/hardware.h>
#include <stdbool.h>
#ifndef CONFIG_SCHED_MAXPROC
/** The maximum number of processes. */
#define CONFIG_SCHED_MAXPROC 8
@ -40,8 +43,6 @@ enum proc_state {
/** Stores an entire process image. */
struct process {
/** Next process in the (circular) list. */
struct process *next;
/** Stack pointer. */
void *sp;
/** Bottom of the stack (i.e. the highest address). */
@ -59,6 +60,8 @@ struct process {
/** The currently executing process. */
extern struct process *_current_process;
extern bool _is_atomic_context;
/**
* Initialize the scheduler subsystem.
* This sets up a hardware interrupt timer (SysTick for Cortex-M3).
@ -102,38 +105,6 @@ struct process *sched_process_create(void (*entry)(void));
*/
void sched_switch_early(enum proc_state state);
/**
* Suspend the current process for the specified amount of milliseconds.
* Note that there are slight deviations from this time interval because of the
* round-robin scheduling algorithm.
* If the sleep time is required to be exactly accurate, use `atomic_udelay()`.
* Note, however, that this will block *all* other processes, even including
* I/O, for the entire time period.
*
* @param msecs: The amount of milliseconds to (approximately) sleep for.
*/
void msleep(unsigned long int msecs);
/**
* Block the entire CPU from execution for the specified amount of microseconds.
* Note that this will temporarily disable the scheduler, meaning that *nothing*
* (not even I/O) will be executed. The only reason you would ever want to use
* this is for mission-critical, very short (<= 100 us) periods of time.
*
* @param usecs: The amount of microseconds to halt the CPU for.
*/
void atomic_udelay(unsigned long int usecs);
/**
* Attempt to acquire an atomic lock.
*
* @param mutex: The pointer to the mutex.
* @returns `0` if the lock could be acquired, and `-EAGAIN` if not.
*/
int atomic_lock(atomic_t *mutex);
void atomic_unlock(atomic_t *mutex);
/*
* Copyright (c) 2020 Felix Kopp <sandtler@sandtler.club>
*

View file

@ -3,45 +3,42 @@
#include <arch/hardware.h>
#include <arch/sched.h>
#include <ardix/list.h>
#include <ardix/malloc.h>
#include <ardix/sched.h>
#include <ardix/string.h>
#include <ardix/types.h>
#include <errno.h>
#include <stddef.h>
extern uint32_t _sstack;
extern uint32_t _estack;
struct process *proc_table[CONFIG_SCHED_MAXPROC];
struct process *_current_process;
/**
* An array of all processes.
* The `pid` not only identifies each process, it is also the index of the
* struct in this array. Unused slots have a `pid` of `-1`, however.
*/
static struct process procs[CONFIG_SCHED_MAXPROC];
bool _is_atomic_context = false;
int sched_init(void)
{
int i;
_current_process = &procs[0];
_current_process->next = _current_process;
_current_process = malloc(sizeof(*_current_process));
if (_current_process == NULL)
return -ENOMEM;
_current_process->sp = &_sstack;
_current_process->stack_bottom = &_estack;
_current_process->pid = 0;
_current_process->state = PROC_READY;
proc_table[0] = _current_process;
for (i = 1; i < CONFIG_SCHED_MAXPROC; i++) {
procs[i].next = NULL;
procs[i].sp = NULL;
procs[i].stack_bottom = &_estack - (CONFIG_STACKSZ * (unsigned int)i);
procs[i].pid = -1;
procs[i].state = PROC_DEAD;
}
for (i = 1; i < CONFIG_SCHED_MAXPROC; i++)
proc_table[i] = NULL;
i = arch_sched_hwtimer_init(CONFIG_SCHED_INTR_FREQ);
return i;
return arch_sched_hwtimer_init(CONFIG_SCHED_INTR_FREQ);
}
/**
@ -62,67 +59,58 @@ static inline bool sched_proc_should_run(const struct process *proc)
void *sched_process_switch(void *curr_sp)
{
struct process *nextproc = _current_process;
pid_t nextpid = _current_process->pid;
_current_process->sp = curr_sp;
if (_current_process->state != PROC_SLEEP)
if (_current_process->state != PROC_SLEEP && _current_process->state != PROC_IOWAIT)
_current_process->state = PROC_QUEUE;
while (true) {
nextproc = nextproc->next;
if (sched_proc_should_run(nextproc)) {
nextproc->state = PROC_READY;
_current_process = nextproc;
while (1) {
nextpid++;
nextpid %= CONFIG_SCHED_MAXPROC;
if (proc_table[nextpid] != NULL && proc_table[nextpid]->state == PROC_QUEUE) {
_current_process = proc_table[nextpid];
break;
}
/* TODO: Let the CPU sleep if there is nothing to do */
/* TODO: Add idle thread */
}
_current_process = proc_table[nextpid];
_current_process->state = PROC_READY;
return _current_process->sp;
}
/**
* Find an unused process slot in the `procs` array, insert that process into
* the scheduler's ring queue and return it. Must run in atomic context.
*
* @returns A pointer to the new process slot, or `NULL` if none are available.
*/
static struct process *proclist_find_free_slot_and_link(void)
{
pid_t i;
struct process *newproc = NULL;
/* PID 0 is always reserved for the Kernel process, so start counting from 1 */
for (i = 1; i < CONFIG_SCHED_MAXPROC; i++) {
if (procs[i].pid == -1 && procs[i].state == PROC_DEAD) {
newproc = &procs[i];
newproc->next = procs[i - 1].next;
procs[i - 1].next = newproc;
newproc->pid = i;
break;
}
}
return newproc;
}
struct process *sched_process_create(void (*entry)(void))
{
struct process *proc;
pid_t pid;
struct process *proc = malloc(sizeof(*proc));
if (proc == NULL)
return NULL;
sched_atomic_enter();
proc = proclist_find_free_slot_and_link();
if (proc != NULL) {
proc->sp = proc->stack_bottom;
proc->lastexec = 0;
proc->sleep_usecs = 0;
proc->state = PROC_QUEUE;
arch_sched_process_init(proc, entry);
for (pid = 1; pid < CONFIG_SCHED_MAXPROC; pid++) {
if (proc_table[pid] == NULL)
break;
}
if (pid == CONFIG_SCHED_MAXPROC) {
/* max number of processess exceeded */
free(proc);
sched_atomic_leave();
return NULL;
}
proc->pid = pid;
proc->stack_bottom = &_estack - (pid * CONFIG_STACKSZ);
proc->lastexec = 0;
proc->sleep_usecs = 0;
proc->state = PROC_QUEUE;
arch_sched_process_init(proc, entry);
proc_table[pid] = proc;
sched_atomic_leave();
return proc;