x86: add atomic primitives

main
anna 3 years ago
parent c3847487be
commit 582758e868
Signed by: fef
GPG Key ID: EC22E476DC2D3D84

@ -0,0 +1,238 @@
/* See the end of this file for copyright and license terms. */
#pragma once
#include <gay/types.h>
/**
* @brief Read an atom's current value.
* You usually shouldn't need this function because all the other atomic
* primitives return the value before the operation, and we are only really
* interested in how values *compare* between operations.
* Don't use `atom_read()` followed by another atomic operation, it defeats the
* whole purpose of using atomics in the first place.
*
* @param atom Atom to read the value of
* @return The atom's "current" value (at the time of reading it)
*/
inline int atom_read(const atom_t *atom)
{
return atom->_value;
}
/**
* @brief Write a new value to an atom.
*
* @param atom Atom to write to
* @param val New value
* @return The value of `atom` *before* the operation
*/
inline int atom_write(atom_t *atom, int val)
{
int eax;
__asm__ volatile(
" mov (%2), %0 \n"
"1: lock \n"
" cmpxchgl %1, (%2) \n"
" jne 1b \n"
: "=a"(eax)
: "r"(val), "r"(&atom->_value)
: "cc", "memory"
);
return eax;
}
/**
* @brief Perform an atomic load/add/store.
*
* @param atom Atom to add to
* @param val Value to add
* @return The value of `atom` *before* the operation
*/
inline int atom_add(atom_t *atom, int val)
{
__asm__ volatile(
" lock \n"
" xaddl %0, (%1) \n"
: "+r"(val)
: "r"(&atom->_value)
: "cc", "memory"
);
return val;
}
/**
* @brief Perform an atomic load/subtract/store.
*
* @param atom Atom to subtract from
* @param val Value to subtract
* @return The value of `atom` *before* the operation
*/
inline int atom_sub(atom_t *atom, int val)
{
return atom_add(atom, -val);
}
/**
* @brief Increment an atom by one.
*
* @param atom Atom to increment
* @return `true` if the vale *after* the operation is nonzero
*/
inline bool atom_inc(atom_t *atom)
{
bool nonzero = false;
__asm__ volatile(
" lock \n"
" incl (%1) \n"
" setne %0 \n"
: "+r"(nonzero) /* read+write to ensure the initial value isn't optimized out */
: "r"(&atom->_value)
: "cc", "memory"
);
return nonzero;
}
/**
* @brief Decrement an atom by one.
*
* @param atom Atom to decrement
* @return `true` if the value *after* the operation is nonzero
*/
inline bool atom_dec(atom_t *atom)
{
bool nonzero = false;
__asm__ volatile(
" lock \n"
" decl (%1) \n"
" setne %0 \n"
: "+r"(nonzero) /* read+write to ensure the initial value isn't optimized out */
: "r"(&atom->_value)
: "cc", "memory"
);
return nonzero;
}
/**
* @brief Perform an atomic bitwise AND and write the result back to the atom.
*
* @param atom Atom to perform the AND on
* @param val Value to AND with
* @return The value of `atom` *before* the operation
*/
inline int atom_and(atom_t *atom, int val)
{
int eax;
__asm__ volatile(
" movl (%2), %0 \n" /* eax = atom->_value */
"1: andl %0, %1 \n" /* val &= eax */
" lock \n"
" cmpxchgl %1, (%2) \n" /* if (atom->_value == eax) atom->_value = val */
" jne 1b \n" /* else goto 1 */
: "=a"(eax), "+r"(val)
: "r"(&atom->_value)
: "cc", "memory"
);
return eax;
}
/**
* @brief Perform an atomic bitwise OR and write the result back to the atom.
*
* @param atom Atom to perform the OR on
* @param val Value to OR with
* @return The value of `atom` *before* the operation
*/
inline int atom_or(atom_t *atom, int val)
{
int eax;
__asm__ volatile(
" movl (%2), %0 \n" /* eax = atom->_value */
"1: orl %0, %1 \n" /* eax |= eax */
" lock \n"
" cmpxchgl %1, (%2) \n" /* if (atom->_value == eax) atom->_value = eax */
" jne 1b \n" /* else goto 1 */
: "=a"(eax), "+r"(val)
: "r"(&atom->_value)
: "cc", "memory"
);
return eax;
}
/**
* @brief Perform an atomic bitwise XOR and write the result back to the atom.
*
* @param atom Atom to perform the XOR on
* @param val Value to XOR with
* @return The value of `atom` *before* the operation
*/
inline int atom_xor(atom_t *atom, int val)
{
int eax;
__asm__ volatile(
" movl (%2), %0 \n" /* eax = atom->_value */
"1: xorl %0, %1 \n" /* eax ^= eax */
" lock \n"
" cmpxchgl %1, (%2) \n" /* if (atom->_value == eax) atom->_value = eax */
" jne 1b \n" /* else goto 1 */
: "=a"(eax), "+r"(val)
: "r"(&atom->_value)
: "cc", "memory"
);
return eax;
}
/**
* @brief Atomically set a bit.
*
* @param atom Atom to set a bit of
* @param pos Bit position (starting from 0 for the LSB)
* @return `true` the bit was clear *before* the operation
*/
inline bool arch_atom_set_bit(atom_t *atom, int pos)
{
int mask = 1 << pos;
int oldval = atom_or(atom, mask);
return (oldval & mask) == 0;
}
/**
* @brief Atomically clear a bit.
*
* @param atom Atom to clear a bit of
* @param pos Bit position (starting from 0 for the LSB)
* @return `true` if the bit was set *before* the operation
*/
inline bool arch_atom_clr_bit(atom_t *atom, int pos)
{
int mask = 1 << pos;
int oldval = atom_and(atom, ~mask);
return (oldval & mask) != 0;
}
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/

@ -1,6 +1,7 @@
# See the end of this file for copyright and license terms.
target_sources(gay_arch PRIVATE
atom.S
idt.S
interrupt.c
irq.c

@ -0,0 +1,216 @@
/* See the end of this file for copyright and license terms. */
#include <asm/common.h>
/* int arch_atom_read(const atom_t *atom) */
ASM_ENTRY(arch_atom_read)
push %ebp
mov %esp, %ebp
mov 8(%ebp), %ecx
mov (%ecx), %eax
pop %ebp
ret
ASM_END(arch_atom_read)
/* int arch_atom_write(atom_t *atom, int val) */
ASM_ENTRY(arch_atom_write)
push %ebp
mov %esp, %ebp
mov 8(%ebp), %edx
mov 12(%ebp), %ecx
mov (%edx), %eax
1: lock
cmpxchgl %ecx, (%edx)
jne 1b
pop %ebp
ret
ASM_END(arch_atom_write)
/* bool arch_atom_inc(atom_t *atom) */
ASM_ENTRY(arch_atom_inc)
push %ebp
mov %esp, %ebp
mov 8(%ebp), %edx
lock
incl (%edx)
xor %eax, %eax
setne %al
pop %ebp
ret
ASM_END(arch_atom_inc)
/* bool arch_atom_dec(atom_t *atom) */
ASM_ENTRY(arch_atom_dec)
push %ebp
mov %esp, %ebp
mov 8(%ebp), %edx
lock
decl (%edx)
xor %eax, %eax
setne %al
pop %ebp
ret
ASM_END(arch_atom_dec)
/* int arch_atom_add(atom_t *atom, int val) */
ASM_ENTRY(arch_atom_add)
push %ebp
mov %esp, %ebp
mov 8(%ebp), %edx
mov 12(%ebp), %eax
lock
xaddl %eax, (%edx)
pop %ebp
ret
ASM_END(arch_atom_add)
/* int arch_atom_sub(atom_t *atom, int val) */
ASM_ENTRY(arch_atom_sub)
push %ebp
mov %esp, %ebp
mov 8(%ebp), %edx
mov 12(%ebp), %eax
/* there is no xsubl, so we add the two's complement */
not %eax
inc %eax
lock
xaddl %eax, (%edx)
pop %ebp
ret
ASM_END(arch_atom_sub)
/* int arch_atom_and(atom_t *atom, int val) */
ASM_ENTRY(arch_atom_and)
push %ebp
mov %esp, %ebp
mov 8(%ebp), %edx
mov (%edx), %eax
1: mov %eax, %ecx
and 12(%ebp), %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
pop %ebp
ret
ASM_END(arch_atom_and)
/* int arch_atom_or(atom_t *atom, int val) */
ASM_ENTRY(arch_atom_or)
push %ebp
mov %esp, %ebp
mov 8(%ebp), %edx
mov (%edx), %eax
1: mov %eax, %ecx
and 12(%ebp), %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
pop %ebp
ret
ASM_END(arch_atom_or)
/* int arch_atom_xor(atom_t *atom, int val) */
ASM_ENTRY(arch_atom_xor)
push %ebp
mov %esp, %ebp
mov 8(%ebp), %edx
mov (%edx), %eax
1: mov %eax, %ecx
xor 12(%ebp), %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
pop %ebp
ret
ASM_END(arch_atom_xor)
/* bool arch_atom_set_bit(atom_t *atom, int bit) */
ASM_ENTRY(arch_atom_set_bit)
push %ebp
mov %esp, %ebp
push %ebx
mov 8(%ebp), %edx
mov 12(%ebp), %ecx
mov $1, %ebx
shl %cl, %ebx
mov (%edx), %eax
1: mov %eax, %ecx
or %ebx, %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
/* return true if bit was clear before */
not %eax
and %ebx, %eax
shr %cl, %eax
pop %ebx
pop %ebp
ret
ASM_END(arch_atom_set_bit)
/* bool arch_atom_clr_bit(atom_t *atom, int bit) */
ASM_ENTRY(arch_atom_clr_bit)
push %ebp
mov %esp, %ebp
push %ebx
mov 8(%ebp), %edx
mov 12(%ebp), %ecx
mov $0xfffffffe, %ebx
rol %cl, %ebx
mov (%edx), %eax
1: mov %eax, %ecx
and %ebx, %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
/* return true if bit was set before */
not %ebx
and %ebx, %eax
shr %cl, %eax
pop %ebx
pop %ebp
ret
ASM_END(arch_atom_clr_bit)
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/
Loading…
Cancel
Save