fix compiler errors/include conflicts

What a hell of a nightmare this codebase has become over the past few
weeks fascinates even myself
pull/1/head
Felix Kopp 4 years ago
parent 63f78d7b2b
commit 99a51a5dd2
No known key found for this signature in database
GPG Key ID: C478BA0A85F75728

@ -26,9 +26,11 @@
ARDIX_ARCH_PWD = $(PWD)/arch/at91sam3x8e
ARDIX_SOURCES += \
$(ARDIX_ARCH_PWD)/atomic.c \
$(ARDIX_ARCH_PWD)/interrupt.c \
$(ARDIX_ARCH_PWD)/sched.c \
$(ARDIX_ARCH_PWD)/serial.c \
$(ARDIX_ARCH_PWD)/spinlock.c \
$(ARDIX_ARCH_PWD)/startup.c \
$(ARDIX_ARCH_PWD)/sys.c

@ -1,25 +1,24 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* See the end of this file for copyright, licensing, and warranty information. */
#pragma once
#include <ardix/sched.h>
#include <arch/at91sam3x8e/interrupt.h>
#include <arch/at91sam3x8e/spinlock.h>
#include <ardix/atomic.h>
static SPINLOCK_DEFINE(atomic_context);
#include <stdbool.h>
#include <toolchain.h>
void atomic_enter(void)
{
arch_spin_lock(&atomic_context);
}
/** Enter atomic context, i.e. disable preemption */
__always_inline void sched_atomic_enter(void)
void atomic_leave(void)
{
arch_spin_lock(&_in_atomic_context);
arch_spin_unlock(&atomic_context);
}
/** Leave atomic context, i.e. re-enable preemption */
__always_inline void sched_atomic_leave(void)
int is_atomic_context(void)
{
arch_spin_unlock(&_in_atomic_context);
return arch_spinlock_count(&atomic_context);
}
/*

@ -5,6 +5,7 @@
#include <arch/at91sam3x8e/hardware.h>
#include <arch/at91sam3x8e/interrupt.h>
#include <ardix/atomic.h>
#include <ardix/string.h>
#include <ardix/sched.h>
@ -19,7 +20,7 @@ void irq_sys_tick(void)
* fire a PendSV interrupt and do the actual context switching there
* because it is faster that way (according to the docs, at least)
*/
if (!spinlock_count(&_in_atomic_context))
if (!is_atomic_context())
arch_irq_invoke(IRQNO_PEND_SV);
}
@ -70,7 +71,7 @@ void arch_sched_process_init(struct process *process, void (*entry)(void))
regs->sw.lr = (void *)0xFFFFFFF9U;
}
void sched_switch_early(enum proc_state state)
void sched_yield(enum proc_state state)
{
REG_SYSTICK_VAL = 0U; /* Reset timer */
_current_process->state = state;

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* See the end of this file for copyright, licensing, and warranty information. */
#include <ardix/atomic.h>
#include <ardix/io.h>
#include <ardix/ringbuf.h>
#include <ardix/serial.h>
@ -9,7 +10,6 @@
#include <arch/at91sam3x8e/hardware.h>
#include <arch/at91sam3x8e/interrupt.h>
#include <arch/at91sam3x8e/sched.h>
#include <arch/serial.h>
#include <errno.h>
@ -91,16 +91,16 @@ void io_serial_buf_update(struct serial_interface *interface)
struct arch_serial_interface *arch_iface = to_arch_serial_interface(interface);
if (arch_iface->hw_txrdy) {
sched_atomic_enter();
atomic_enter();
len = (uint16_t)ringbuf_read(&arch_iface->txbuf[0], interface->tx,
CONFIG_ARCH_SERIAL_BUFSZ);
sched_atomic_leave();
atomic_leave();
if (len) {
arch_iface->hw_txrdy = false;
REG_UART_IER = REG_UART_IER_TXBUFE_MASK;
REG_UART_PDC_TPR = (uint32_t)&arch_iface->txbuf[0];
REG_UART_PDC_TCR = len;
REG_UART_IER = REG_UART_IER_TXBUFE_MASK;
}
}
}

@ -1,73 +1,58 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* See the end of this file for copyright, licensing, and warranty information. */
/*
* An over-simplified approach of implementing locks on a system that
* doesn't even support SMP. Describing the realization as "unfortunate"
* is pretty much an understatement, it is straight up horrible because it
* can lead to two threads failing to acquire a lock at the same time
* (but it is platform-agnostic thanks to C11, yay!).
*/
#pragma once
#include <errno.h>
#include <stdatomic.h>
#include <toolchain.h>
#include <arch/at91sam3x8e/spinlock.h>
struct atom {
atomic_int lock;
};
/* This code is basically stolen from arch/arm/include/asm/spinlock.h in Linux 5.9 */
/** Initialize an atom to be used as a lock. */
__always_inline void atom_init(struct atom *atom)
void arch_spinlock_init(spinlock_t *lock)
{
atom->lock = 0;
lock->lock = 0;
}
/**
* Destroy this atom or fail if it is currently locked.
* If successful, this will make any subsequent locking attempts fail.
*
* @param atom: The atom to be destroyed.
* @returns 0 on success, and `-EAGAIN` if the atom is currently locked.
*/
__always_inline int atom_destroy(struct atom *atom)
int arch_spin_lock(spinlock_t *lock)
{
if (atom->lock != 0)
return -EAGAIN;
atom->lock = -1;
return 0;
int tmp;
int newval;
spinlock_t lockval;
__asm__ volatile(
"1: ldrex %0, [%3] \n" /* lockval = *lock */
" add %1, %0, #1 \n" /* newval = lockval.lock + 1 */
" strex %2, %1, [%3] \n" /* *lock = newval */
" teq %2, #0 \n" /* store successful? */
" bne 1b \n" /* -> goto 1 if not */
" dmb " /* memory barrier */
: "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
: "r" (lock)
: "cc");
return newval;
}
/**
* Attempt to aquire a lock on an atom.
*
* @param atom: The atom to get the the lock on.
* @returns 0 on success, and `-EAGAIN` if the atom was already locked by
* another process.
*/
__always_inline int atom_lock(struct atom *atom)
int arch_spin_unlock(spinlock_t *lock)
{
atom->lock++;
if (atom->lock != 1) {
atom->lock--;
return -EAGAIN;
} else {
return 0;
}
int tmp;
int newval;
spinlock_t lockval;
__asm__ volatile(
"1: ldrex %0, [%3] \n"
" sub %1, %0, #1 \n"
" strex %2, %1, [%3] \n"
" teq %2, #0 \n"
" bne 1b \n"
" dmb "
: "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
: "r" (lock)
: "cc");
return newval;
}
/**
* Release the lock on an atom.
* Even though it is possible with the current implementation, releasing a
* lock that isn't yours is a bad idea for obvious reasons.
*
* @param atom: The atom the release the lock from.
*/
__always_inline void atom_unlock(struct atom *atom)
int arch_spinlock_count(spinlock_t *lock)
{
atom->lock--;
return lock->lock;
}
/*

@ -3,24 +3,20 @@
#pragma once
#include <stdint.h>
#include <toolchain.h>
#include <arch/at91sam3x8e/spinlock_type.h>
typedef struct spinlock {
int lock;
} spinlock_t;
#include <toolchain.h>
/* This code is basically stolen from arch/arm/include/asm/spinlock.h in Linux 5.9 */
#define SPINLOCK_DEFINE(name) spinlock_t name = { .lock = 0 }
/**
* Initialize a spinlock.
*
* @param lock: Pointer to the spinlock.
*/
inline void arch_spinlock_init(spinlock_t *lock)
{
lock->lock = 0;
}
void arch_spinlock_init(spinlock_t *lock);
/**
* Increment the lock count on a spinlock.
@ -28,25 +24,7 @@ inline void arch_spinlock_init(spinlock_t *lock)
* @param lock: Pointer to the spinlock.
* @returns The new lock count.
*/
inline int arch_spin_lock(spinlock_t *lock)
{
int tmp;
int newval;
spinlock_t lockval;
__asm__ volatile(
"1: ldrex %0, [%3] \n" /* lockval = *lock */
" add %1, %0, #1 \n" /* newval = lockval.lock + 1 */
" strex %2, %1, [%3] \n" /* *lock = newval */
" teq %2, #0 \n" /* store successful? */
" bne 1b \n" /* -> goto 1 if not */
" dmb " /* memory barrier */
: "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
: "r" (lock)
: "cc");
return newval;
}
int arch_spin_lock(spinlock_t *lock);
/**
* Decrement the lock count on a spinlock.
@ -54,35 +32,14 @@ inline int arch_spin_lock(spinlock_t *lock)
* @param lock: Pointer to the spinlock.
* @returns The new lock count.
*/
inline int arch_spin_unlock(spinlock_t *lock)
{
int tmp;
int newval;
spinlock_t lockval;
__asm__ volatile(
"1: ldrex %0, [%3] \n"
" sub %1, %0, #1 \n"
" strex %2, %1, [%3] \n"
" teq %2, #0 \n"
" bne 1b \n"
" dmb "
: "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
: "r" (lock)
: "cc");
return newval;
}
int arch_spin_unlock(spinlock_t *lock);
/**
* Get the lock count on a spinlock.
*
* @param lock: Pointer to the spinlock.
*/
__always_inline int arch_spinlock_count(spinlock_t *lock)
{
return lock->lock;
}
int arch_spinlock_count(spinlock_t *lock);
/*
* Copyright (c) 2020 Felix Kopp <sandtler@sandtler.club>

@ -0,0 +1,33 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* See the end of this file for copyright, licensing, and warranty information. */
#pragma once
typedef struct spinlock {
int lock;
} spinlock_t;
/*
* Copyright (c) 2020 Felix Kopp <sandtler@sandtler.club>
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be
* used to endorse or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
* WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

@ -3,7 +3,6 @@
#pragma once
#include <arch/arch_include.h>
#include <arch/hardware.h>
#include <stdbool.h>
@ -28,8 +27,6 @@ int arch_sched_hwtimer_init(unsigned int freq);
*/
void arch_sched_process_init(struct process *process, void (*entry)(void));
#include ARCH_INCLUDE(sched.h)
/*
* Copyright (c) 2020 Felix Kopp <sandtler@sandtler.club>
*

@ -0,0 +1,32 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* See the end of this file for copyright, licensing, and warranty information. */
#pragma once
#include <arch/arch_include.h>
#include ARCH_INCLUDE(spinlock_type.h)
/*
* Copyright (c) 2020 Felix Kopp <sandtler@sandtler.club>
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be
* used to endorse or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
* WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

@ -0,0 +1,38 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* See the end of this file for copyright, licensing, and warranty information. */
#pragma once
/** Enter atomic context. */
void atomic_enter(void);
/** Leave atomic context. */
void atomic_leave(void);
/** Return a nonzero value if the current process is in atomic context. */
int is_atomic_context(void);
/*
* Copyright (c) 2020 Felix Kopp <sandtler@sandtler.club>
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be
* used to endorse or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
* WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

@ -4,6 +4,7 @@
#pragma once
#include <ardix/list.h>
#include <ardix/spinlock.h>
#include <ardix/types.h>
#include <arch/hardware.h>
@ -60,8 +61,6 @@ struct process {
/** The currently executing process. */
extern struct process *_current_process;
extern spinlock_t _in_atomic_context;
/**
* Initialize the scheduler subsystem.
* This sets up a hardware interrupt timer (SysTick for Cortex-M3).
@ -103,7 +102,7 @@ struct process *sched_process_create(void (*entry)(void));
* @param state The state the process should enter.
* Allowed values are `PROC_SLEEP` and `PROC_IOWAIT`.
*/
void sched_switch_early(enum proc_state state);
void sched_yield(enum proc_state state);
/*
* Copyright (c) 2020 Felix Kopp <sandtler@sandtler.club>

@ -3,14 +3,15 @@
#pragma once
#include <arch/spinlock.h>
/*
* Spinlocks in Ardix work pretty much the same as they do on Linux
* (this is basically just a ripoff). See The Linux Kernel documentation
* for details.
*/
#include <arch/spinlock.h>
#include <toolchain.h>
/**
* Initialize a spinlock.
*

@ -14,7 +14,7 @@ static __naked void io_thread_entry(void)
while (1) {
io_serial_buf_update(serial_default_interface);
sched_switch_early(PROC_QUEUE);
sched_yield(PROC_QUEUE);
}
}

@ -1,10 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* See the end of this file for copyright, licensing, and warranty information. */
#include <arch/hardware.h>
#include <arch/sched.h>
#include <ardix/list.h>
#include <ardix/atomic.h>
#include <ardix/malloc.h>
#include <ardix/sched.h>
#include <ardix/string.h>
@ -19,8 +18,6 @@ extern uint32_t _estack;
struct process *proc_table[CONFIG_SCHED_MAXPROC];
struct process *_current_process;
bool _is_atomic_context = false;
int sched_init(void)
{
int i;
@ -86,7 +83,7 @@ struct process *sched_process_create(void (*entry)(void))
if (proc == NULL)
return NULL;
sched_atomic_enter();
atomic_enter();
for (pid = 1; pid < CONFIG_SCHED_MAXPROC; pid++) {
if (proc_table[pid] == NULL)
@ -96,7 +93,7 @@ struct process *sched_process_create(void (*entry)(void))
if (pid == CONFIG_SCHED_MAXPROC) {
/* max number of processess exceeded */
free(proc);
sched_atomic_leave();
atomic_leave();
return NULL;
}
@ -110,7 +107,7 @@ struct process *sched_process_create(void (*entry)(void))
proc_table[pid] = proc;
sched_atomic_leave();
atomic_leave();
return proc;
}

@ -1,11 +1,11 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* See the end of this file for copyright, licensing, and warranty information. */
#include <ardix/atomic.h>
#include <ardix/ringbuf.h>
#include <ardix/serial.h>
#include <ardix/sched.h>
#include <ardix/serial.h>
#include <arch/sched.h>
#include <arch/serial.h>
#include <stddef.h>
@ -50,9 +50,9 @@ ssize_t serial_read(void *dest, struct serial_interface *interface, size_t len)
{
ssize_t ret;
sched_atomic_enter();
atomic_enter();
ret = (ssize_t)ringbuf_read(dest, interface->rx, len);
sched_atomic_leave();
atomic_leave();
return ret;
}
@ -63,15 +63,15 @@ ssize_t serial_write(struct serial_interface *interface, const void *data, size_
size_t tmp;
while (1) {
sched_atomic_enter();
atomic_enter();
tmp = ringbuf_write(interface->tx, data, len);
sched_atomic_leave();
atomic_leave();
ret += tmp;
if (ret != len) { /* buffer full, suspend until I/O is ready */
len -= tmp;
data += tmp;
sched_switch_early(PROC_IOWAIT);
sched_yield(PROC_IOWAIT);
} else {
break;
}

Loading…
Cancel
Save