sched: make sleeping actually work
This commit is contained in:
parent
b4046795c8
commit
e96ee14e68
4 changed files with 23 additions and 30 deletions
|
@ -13,6 +13,7 @@
|
|||
|
||||
volatile unsigned long int tick = 0;
|
||||
unsigned int systick_reload;
|
||||
unsigned int tick_freq;
|
||||
|
||||
void handle_sys_tick(void)
|
||||
{
|
||||
|
@ -46,6 +47,7 @@ static inline void sched_nvic_set_prio_group(uint32_t prio_group)
|
|||
|
||||
int arch_sched_hwtimer_init(unsigned int freq)
|
||||
{
|
||||
tick_freq = freq;
|
||||
systick_reload = sys_core_clock / freq;
|
||||
if (systick_reload > REG_SYSTICK_LOAD_RELOAD_MASK)
|
||||
return 1;
|
||||
|
@ -81,19 +83,17 @@ void yield(enum task_state state)
|
|||
|
||||
__naked __noreturn static void idle_task_entry(void)
|
||||
{
|
||||
__asm__ volatile(
|
||||
"1: b 1b \n"
|
||||
:::
|
||||
);
|
||||
while (1);
|
||||
}
|
||||
|
||||
int arch_idle_task_init(struct task *task)
|
||||
{
|
||||
void *sp = malloc(sizeof(struct reg_snapshot));
|
||||
if (sp == NULL)
|
||||
void *stack = malloc(CONFIG_STACK_SIZE);
|
||||
if (stack == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
task->stack_bottom = sp + sizeof(struct reg_snapshot);
|
||||
task->stack_bottom = stack + CONFIG_STACK_SIZE - 4;
|
||||
task->sp = task->stack_bottom - sizeof(struct reg_snapshot);
|
||||
arch_task_init(task, idle_task_entry);
|
||||
task->sleep = 0;
|
||||
task->last_tick = 0;
|
||||
|
@ -104,7 +104,7 @@ int arch_idle_task_init(struct task *task)
|
|||
|
||||
unsigned long int ms_to_ticks(unsigned long int ms)
|
||||
{
|
||||
return (unsigned long int)systick_reload * ms / sys_core_clock;
|
||||
return ( ms * (unsigned long int)tick_freq ) / 1000lu /* 1 s = 1000 ms */;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
*/
|
||||
int main(void)
|
||||
{
|
||||
volatile uint32_t count = 0;
|
||||
volatile unsigned int print_count = 0;
|
||||
|
||||
int err = kent_root_init();
|
||||
|
@ -56,18 +55,15 @@ int main(void)
|
|||
REG_PIOB_CODR = 1 << 27;
|
||||
|
||||
while (true) {
|
||||
if (count++ != 1000000)
|
||||
continue;
|
||||
|
||||
printf("hello, world (%u)\n", print_count);
|
||||
|
||||
msleep(1000);
|
||||
|
||||
print_count++;
|
||||
if (print_count % 2)
|
||||
REG_PIOB_CODR = 1 << 27;
|
||||
else
|
||||
REG_PIOB_SODR = 1 << 27;
|
||||
|
||||
count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ static struct list_head kev_listeners[KEVENT_KIND_COUNT];
|
|||
static MUTEX(kev_listeners_lock);
|
||||
|
||||
struct kevent_queue {
|
||||
struct list_head list; /* -> kevent_listener::link */
|
||||
struct list_head list; /* -> kevent::link */
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
|
@ -50,7 +50,7 @@ void kevents_init(void)
|
|||
/* called from scheduler context only */
|
||||
static inline void process_single_queue(struct kevent_queue *queue, struct list_head *listeners)
|
||||
{
|
||||
struct kevent *event, *tmp;
|
||||
struct kevent *event, *tmp_event;
|
||||
|
||||
/*
|
||||
* This method is only invoked from scheduler context which has higher
|
||||
|
@ -59,10 +59,10 @@ static inline void process_single_queue(struct kevent_queue *queue, struct list_
|
|||
* to just abort and try again during the next system tick.
|
||||
*/
|
||||
if (mutex_trylock(&queue->lock) == 0) {
|
||||
list_for_each_entry_safe(&queue->list, event, tmp, link) {
|
||||
struct kevent_listener *listener;
|
||||
list_for_each_entry_safe(&queue->list, event, tmp_event, link) {
|
||||
struct kevent_listener *listener, *tmp_listener;
|
||||
|
||||
list_for_each_entry(listeners, listener, link) {
|
||||
list_for_each_entry_safe(listeners, listener, tmp_listener, link) {
|
||||
int cb_ret = listener->cb(event, listener->extra);
|
||||
|
||||
if (cb_ret & KEVENT_CB_LISTENER_DEL) {
|
||||
|
@ -93,8 +93,8 @@ void kevents_process(void)
|
|||
if (mutex_trylock(&kev_cache_lock) == 0) {
|
||||
struct kevent *cursor, *tmp;
|
||||
list_for_each_entry_safe(&kev_cache, cursor, tmp, link) {
|
||||
list_insert(&kev_queues[cursor->kind].list, &cursor->link);
|
||||
list_delete(&cursor->link);
|
||||
list_insert(&kev_queues[cursor->kind], &cursor->link);
|
||||
}
|
||||
|
||||
mutex_unlock(&kev_cache_lock);
|
||||
|
|
|
@ -73,8 +73,6 @@ out:
|
|||
return i;
|
||||
}
|
||||
|
||||
#include <arch/debug.h>
|
||||
|
||||
/**
|
||||
* @brief Determine whether the specified task is a candidate for execution.
|
||||
*
|
||||
|
@ -85,7 +83,7 @@ static inline bool can_run(const struct task *task)
|
|||
{
|
||||
switch (task->state) {
|
||||
case TASK_SLEEP:
|
||||
return tick - task->last_tick > task->sleep;
|
||||
return tick - task->last_tick >= task->sleep;
|
||||
case TASK_QUEUE:
|
||||
case TASK_READY:
|
||||
return true;
|
||||
|
@ -101,18 +99,20 @@ void *sched_switch(void *curr_sp)
|
|||
{
|
||||
struct task *tmp;
|
||||
int i;
|
||||
/*
|
||||
* this is -1 if the idle task was running which would normally be a problem
|
||||
* because it is used as an index in tasktab, but the for loop always
|
||||
* increments it by 1 before doing actuall array accesses so it's okay here
|
||||
*/
|
||||
pid_t nextpid = current->pid;
|
||||
current->sp = curr_sp;
|
||||
|
||||
//__breakpoint;
|
||||
|
||||
kevents_process();
|
||||
|
||||
if (current->state != TASK_SLEEP && current->state != TASK_IOWAIT)
|
||||
if (current->state == TASK_READY)
|
||||
current->state = TASK_QUEUE;
|
||||
|
||||
for (i = 0; i < CONFIG_SCHED_MAXTASK; i++) {
|
||||
//__breakpoint;
|
||||
nextpid++;
|
||||
nextpid %= CONFIG_SCHED_MAXTASK;
|
||||
|
||||
|
@ -128,7 +128,6 @@ void *sched_switch(void *curr_sp)
|
|||
|
||||
current->state = TASK_READY;
|
||||
current->last_tick = tick;
|
||||
//__breakpoint;
|
||||
return current->sp;
|
||||
}
|
||||
|
||||
|
@ -164,10 +163,8 @@ err_alloc:
|
|||
|
||||
void msleep(unsigned long int ms)
|
||||
{
|
||||
//__breakpoint;
|
||||
current->sleep = ms_to_ticks(ms);
|
||||
yield(TASK_SLEEP);
|
||||
//__breakpoint;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue