mutex: avoid wait queue lock if possible
This also tidies up the atomic operations a little and adds a new atom_cmp_xchg() as well as the same APIs for longs and pointers.main
parent
c66b05216d
commit
c36b03d97c
@ -0,0 +1,193 @@
|
||||
/* See the end of this file for copyright and license terms. */
|
||||
|
||||
#pragma once
|
||||
#ifndef _ARCH_ATOM_H_
|
||||
#error "This file is not meant to be included directly, use <arch/atom.h>"
|
||||
#endif
|
||||
|
||||
#include <gay/cdefs.h>
|
||||
#include <gay/types.h>
|
||||
|
||||
#ifndef __LP64__
|
||||
#error "__LP64__ must be defined on amd64"
|
||||
#endif
|
||||
|
||||
static inline long latom_read(const latom_t *latom)
|
||||
{
|
||||
return latom->_value;
|
||||
}
|
||||
|
||||
static inline long latom_write(latom_t *latom, long val)
|
||||
{
|
||||
long rax;
|
||||
|
||||
__asm__ volatile(
|
||||
" movq (%2), %0 \n" /* rax = atom->_value */
|
||||
"1: lock \n"
|
||||
" cmpxchgq %1, (%2) \n" /* if (latom->_value == rax) latom->_value = val */
|
||||
" pause \n" /* intel says you're supposed to do this in spin loops */
|
||||
" jne 1b \n" /* else goto 1 (rax updated to new latom->_value) */
|
||||
: "=a"(rax)
|
||||
: "r"(val), "r"(&latom->_value)
|
||||
: "cc", "memory"
|
||||
);
|
||||
|
||||
return rax;
|
||||
}
|
||||
|
||||
static inline long latom_cmp_xchg(latom_t *latom, long compare, long val)
|
||||
{
|
||||
long rax = compare;
|
||||
|
||||
__asm__ volatile(
|
||||
" lock \n"
|
||||
" cmpxchlq %1, (%2) \n" /* if ((rax = latom->_value) == compare) latom->_value = val */
|
||||
: "+a"(rax)
|
||||
: "r"(val), "r"(&latom->_value)
|
||||
: "cc", "memory"
|
||||
);
|
||||
|
||||
return rax;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Perform an atomic load/add/store.
|
||||
*
|
||||
* @param atom Atom to add to
|
||||
* @param val Value to add
|
||||
* @return The value of `atom` *before* the operation
|
||||
*/
|
||||
static inline long latom_add(latom_t *latom, long val)
|
||||
{
|
||||
__asm__ volatile(
|
||||
" lock \n"
|
||||
" xaddq %0, (%1) \n"
|
||||
: "+r"(val)
|
||||
: "r"(&latom->_value)
|
||||
: "cc", "memory"
|
||||
);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline long latom_sub(latom_t *latom, long val)
|
||||
{
|
||||
return latom_add(latom, -val);
|
||||
}
|
||||
|
||||
static inline bool latom_inc(latom_t *latom)
|
||||
{
|
||||
bool nonzero = false;
|
||||
|
||||
__asm__ volatile(
|
||||
" lock \n"
|
||||
" incq (%1) \n"
|
||||
" setne %0 \n"
|
||||
: "+r"(nonzero) /* read+write to ensure the initial value isn't optimized out */
|
||||
: "r"(&latom->_value)
|
||||
: "cc", "memory"
|
||||
);
|
||||
|
||||
return nonzero;
|
||||
}
|
||||
|
||||
static inline bool latom_dec(latom_t *latom)
|
||||
{
|
||||
bool nonzero = false;
|
||||
|
||||
__asm__ volatile(
|
||||
" lock \n"
|
||||
" decq (%1) \n"
|
||||
" setne %0 \n"
|
||||
: "+r"(nonzero) /* read+write to ensure the initializer isn't optimized out */
|
||||
: "r"(&latom->_value)
|
||||
: "cc", "memory"
|
||||
);
|
||||
|
||||
return nonzero;
|
||||
}
|
||||
|
||||
static inline long latom_and(latom_t *latom, long val)
|
||||
{
|
||||
long rax;
|
||||
|
||||
__asm__ volatile(
|
||||
" movq (%2), %0 \n" /* rax = latom->_value */
|
||||
"1: andq %0, %1 \n" /* val &= rax */
|
||||
" lock \n"
|
||||
" cmpxchgq %1, (%2) \n" /* if (latom->_value == rax) latom->_value = val */
|
||||
" pause \n" /* intel says you're supposed to do this in spin loops */
|
||||
" jne 1b \n" /* else goto 1 (rax updated to new latom->_value) */
|
||||
: "=a"(rax), "+r"(val)
|
||||
: "r"(&latom->_value)
|
||||
: "cc", "memory"
|
||||
);
|
||||
|
||||
return rax;
|
||||
}
|
||||
|
||||
static inline long latom_or(latom_t *latom, long val)
|
||||
{
|
||||
long rax;
|
||||
|
||||
__asm__ volatile(
|
||||
" movq (%2), %0 \n" /* rax = latom->_value */
|
||||
"1: orq %0, %1 \n" /* val |= rax */
|
||||
" lock \n"
|
||||
" cmpxchgq %1, (%2) \n" /* if (latom->_value == rax) latom->_value = val */
|
||||
" pause \n" /* intel says you're supposed to do this in spin loops */
|
||||
" jne 1b \n" /* else goto 1 (rax updated to new latom->_value) */
|
||||
: "=a"(rax), "+r"(val)
|
||||
: "r"(&latom->_value)
|
||||
: "cc", "memory"
|
||||
);
|
||||
|
||||
return rax;
|
||||
}
|
||||
|
||||
static inline long latom_xor(latom_t *latom, long val)
|
||||
{
|
||||
long rax;
|
||||
|
||||
__asm__ volatile(
|
||||
" movq (%2), %0 \n" /* rax = latom->_value */
|
||||
"1: xorq %0, %1 \n" /* val ^= rax */
|
||||
" lock \n"
|
||||
" cmpxchgq %1, (%2) \n" /* if (latom->_value == rax) latom->_value = val */
|
||||
" pause \n" /* intel says you're supposed to do this in spin loops */
|
||||
" jne 1b \n" /* else goto 1 (rax updated to new latom->_value) */
|
||||
: "=a"(rax), "+r"(val)
|
||||
: "r"(&latom->_value)
|
||||
: "cc", "memory"
|
||||
);
|
||||
|
||||
return rax;
|
||||
}
|
||||
|
||||
static inline bool latom_set_bit(latom_t *latom, int pos)
|
||||
{
|
||||
int mask = 1 << pos;
|
||||
long oldval = latom_or(latom, mask);
|
||||
return (oldval & mask) == 0;
|
||||
}
|
||||
|
||||
static inline bool latom_clr_bit(latom_t *latom, int pos)
|
||||
{
|
||||
int mask = 1 << pos;
|
||||
long oldval = latom_and(latom, ~mask);
|
||||
return (oldval & mask) != 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of GayBSD.
|
||||
* Copyright (c) 2021 fef <owo@fef.moe>.
|
||||
*
|
||||
* GayBSD is nonviolent software: you may only use, redistribute, and/or
|
||||
* modify it under the terms of the Cooperative Nonviolent Public License
|
||||
* (CNPL) as found in the LICENSE file in the source code root directory
|
||||
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
|
||||
* of the license, or (at your option) any later version.
|
||||
*
|
||||
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
|
||||
* permitted by applicable law. See the CNPL for details.
|
||||
*/
|
@ -0,0 +1,91 @@
|
||||
/* See the end of this file for copyright and license terms. */
|
||||
|
||||
#pragma once
|
||||
#ifndef _ARCH_ATOM_H_
|
||||
#error "This file is not meant to be included directly, use <arch/atom.h>"
|
||||
#endif
|
||||
|
||||
#include <gay/cdefs.h>
|
||||
#include <gay/types.h>
|
||||
|
||||
/*
|
||||
* we use ILP32 on i386, long is the same as int
|
||||
*/
|
||||
|
||||
#ifndef __ILP32__
|
||||
#error "__ILP32__ must be defined on i386"
|
||||
#endif
|
||||
|
||||
static inline long latom_read(const latom_t *latom)
|
||||
{
|
||||
return latom->_value;
|
||||
}
|
||||
|
||||
static __always_inline long latom_write(latom_t *latom, long val)
|
||||
{
|
||||
return atom_write((atom_t *)latom, val);
|
||||
}
|
||||
|
||||
static __always_inline long latom_cmp_xchg(latom_t *latom, long compare, long val)
|
||||
{
|
||||
return atom_cmp_xchg((atom_t *)latom, compare, val);
|
||||
}
|
||||
|
||||
static __always_inline long latom_add(latom_t *latom, long val)
|
||||
{
|
||||
return atom_add((atom_t *)latom, val);
|
||||
}
|
||||
|
||||
static __always_inline long latom_sub(latom_t *latom, long val)
|
||||
{
|
||||
return atom_sub((atom_t *)latom, val);
|
||||
}
|
||||
|
||||
static __always_inline bool latom_inc(latom_t *latom)
|
||||
{
|
||||
return atom_inc((atom_t *)latom);
|
||||
}
|
||||
|
||||
static __always_inline bool latom_dec(latom_t *latom)
|
||||
{
|
||||
return atom_dec((atom_t *)latom);
|
||||
}
|
||||
|
||||
static __always_inline long latom_and(latom_t *latom, long val)
|
||||
{
|
||||
return atom_and((atom_t *)latom, val);
|
||||
}
|
||||
|
||||
static __always_inline long latom_or(latom_t *latom, long val)
|
||||
{
|
||||
return atom_or((atom_t *)latom, val);
|
||||
}
|
||||
|
||||
static __always_inline long latom_xor(latom_t *latom, long val)
|
||||
{
|
||||
return atom_xor((atom_t *)latom, val);
|
||||
}
|
||||
|
||||
static __always_inline bool latom_set_bit(latom_t *latom, int pos)
|
||||
{
|
||||
return atom_set_bit((atom_t *)latom, pos);
|
||||
}
|
||||
|
||||
static __always_inline bool latom_clr_bit(latom_t *latom, int pos)
|
||||
{
|
||||
return atom_clr_bit((atom_t *)latom, pos);
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of GayBSD.
|
||||
* Copyright (c) 2021 fef <owo@fef.moe>.
|
||||
*
|
||||
* GayBSD is nonviolent software: you may only use, redistribute, and/or
|
||||
* modify it under the terms of the Cooperative Nonviolent Public License
|
||||
* (CNPL) as found in the LICENSE file in the source code root directory
|
||||
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
|
||||
* of the license, or (at your option) any later version.
|
||||
*
|
||||
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
|
||||
* permitted by applicable law. See the CNPL for details.
|
||||
*/
|
@ -1,185 +0,0 @@
|
||||
/* See the end of this file for copyright and license terms. */
|
||||
|
||||
#include <asm/common.h>
|
||||
|
||||
/* int atom_read(const atom_t *atom) */
|
||||
ASM_ENTRY(atom_read)
|
||||
mov 4(%esp), %ecx
|
||||
mov (%ecx), %eax
|
||||
|
||||
ret
|
||||
ASM_END(atom_read)
|
||||
|
||||
/* int atom_write(atom_t *atom, int val) */
|
||||
ASM_ENTRY(atom_write)
|
||||
mov 4(%esp), %edx
|
||||
mov 8(%esp), %ecx
|
||||
mov (%edx), %eax
|
||||
|
||||
1: lock
|
||||
cmpxchg %ecx, (%edx)
|
||||
pause
|
||||
jne 1b
|
||||
|
||||
ret
|
||||
ASM_END(atom_write)
|
||||
|
||||
/* bool atom_inc(atom_t *atom) */
|
||||
ASM_ENTRY(atom_inc)
|
||||
mov 4(%esp), %edx
|
||||
|
||||
lock
|
||||
incl (%edx)
|
||||
xor %eax, %eax
|
||||
setne %al
|
||||
|
||||
ret
|
||||
ASM_END(atom_inc)
|
||||
|
||||
/* bool atom_dec(atom_t *atom) */
|
||||
ASM_ENTRY(atom_dec)
|
||||
mov 4(%esp), %edx
|
||||
|
||||
lock
|
||||
decl (%edx)
|
||||
xor %eax, %eax
|
||||
setne %al
|
||||
|
||||
ret
|
||||
ASM_END(atom_dec)
|
||||
|
||||
/* int atom_add(atom_t *atom, int val) */
|
||||
ASM_ENTRY(atom_add)
|
||||
mov 4(%esp), %edx
|
||||
mov 8(%esp), %eax
|
||||
|
||||
lock
|
||||
xadd %eax, (%edx)
|
||||
|
||||
ret
|
||||
ASM_END(atom_add)
|
||||
|
||||
/* int atom_sub(atom_t *atom, int val) */
|
||||
ASM_ENTRY(atom_sub)
|
||||
mov 4(%esp), %edx
|
||||
mov 8(%esp), %eax
|
||||
|
||||
/* there is no xsubl, so we add the two's complement */
|
||||
neg %eax
|
||||
lock
|
||||
xadd %eax, (%edx)
|
||||
|
||||
ret
|
||||
ASM_END(atom_sub)
|
||||
|
||||
/* int atom_and(atom_t *atom, int val) */
|
||||
ASM_ENTRY(atom_and)
|
||||
mov 4(%esp), %edx
|
||||
mov (%edx), %eax
|
||||
|
||||
1: mov %eax, %ecx
|
||||
and 8(%esp), %ecx
|
||||
lock
|
||||
cmpxchg %ecx, (%edx)
|
||||
pause
|
||||
jne 1b
|
||||
|
||||
ret
|
||||
ASM_END(atom_and)
|
||||
|
||||
/* int atom_or(atom_t *atom, int val) */
|
||||
ASM_ENTRY(atom_or)
|
||||
mov 4(%esp), %edx
|
||||
mov (%edx), %eax
|
||||
|
||||
1: mov %eax, %ecx
|
||||
and 8(%esp), %ecx
|
||||
lock
|
||||
cmpxchg %ecx, (%edx)
|
||||
pause
|
||||
jne 1b
|
||||
|
||||
ret
|
||||
ASM_END(atom_or)
|
||||
|
||||
/* int atom_xor(atom_t *atom, int val) */
|
||||
ASM_ENTRY(atom_xor)
|
||||
mov 4(%esp), %edx
|
||||
mov (%edx), %eax
|
||||
|
||||
1: mov %eax, %ecx
|
||||
xor 8(%esp), %ecx
|
||||
lock
|
||||
cmpxchg %ecx, (%edx)
|
||||
pause
|
||||
jne 1b
|
||||
|
||||
ret
|
||||
ASM_END(atom_xor)
|
||||
|
||||
/* bool atom_set_bit(atom_t *atom, int bit) */
|
||||
ASM_ENTRY(atom_set_bit)
|
||||
mov 4(%esp), %edx
|
||||
mov 8(%esp), %ecx
|
||||
|
||||
push %ebx
|
||||
mov $1, %ebx
|
||||
shl %cl, %ebx
|
||||
|
||||
mov (%edx), %eax
|
||||
|
||||
1: mov %eax, %ecx
|
||||
or %ebx, %ecx
|
||||
lock
|
||||
cmpxchg %ecx, (%edx)
|
||||
pause
|
||||
jne 1b
|
||||
|
||||
/* return true if bit was clear before */
|
||||
not %eax
|
||||
and %ebx, %eax
|
||||
shr %cl, %eax
|
||||
|
||||
pop %ebx
|
||||
ret
|
||||
ASM_END(atom_set_bit)
|
||||
|
||||
/* bool atom_clr_bit(atom_t *atom, int bit) */
|
||||
ASM_ENTRY(atom_clr_bit)
|
||||
mov 4(%esp), %edx
|
||||
mov 8(%esp), %ecx
|
||||
|
||||
push %ebx
|
||||
mov $0xfffffffe, %ebx
|
||||
rol %cl, %ebx
|
||||
mov (%edx), %eax
|
||||
|
||||
1: mov %eax, %ecx
|
||||
and %ebx, %ecx
|
||||
lock
|
||||
cmpxchg %ecx, (%edx)
|
||||
pause
|
||||
jne 1b
|
||||
|
||||
/* return true if bit was set before */
|
||||
not %ebx
|
||||
and %ebx, %eax
|
||||
shr %cl, %eax
|
||||
|
||||
pop %ebx
|
||||
ret
|
||||
ASM_END(atom_clr_bit)
|
||||
|
||||
/*
|
||||
* This file is part of GayBSD.
|
||||
* Copyright (c) 2021 fef <owo@fef.moe>.
|
||||
*
|
||||
* GayBSD is nonviolent software: you may only use, redistribute, and/or
|
||||
* modify it under the terms of the Cooperative Nonviolent Public License
|
||||
* (CNPL) as found in the LICENSE file in the source code root directory
|
||||
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
|
||||
* of the license, or (at your option) any later version.
|
||||
*
|
||||
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
|
||||
* permitted by applicable law. See the CNPL for details.
|
||||
*/
|
Loading…
Reference in New Issue