/* See the end of this file for copyright and license terms. */ #include #include #include #include #include #include #include void spin_init(spin_t *spin) { atom_write(&spin->lock, 0); } void spin_lock(spin_t *spin) { spin_loop { if (atom_cmp_xchg(&spin->lock, 0, 1) == 0) break; } } int spin_trylock(spin_t *spin) { if (atom_cmp_xchg(&spin->lock, 0, 1) != 0) return -EAGAIN; return 0; } void spin_unlock(spin_t *spin) { atom_write(&spin->lock, 0); } void mtx_init(struct mtx *mtx) { atom_write(&mtx->lock, 1); spin_init(&mtx->wait_queue_lock); clist_init(&mtx->wait_queue); } void mtx_lock(struct mtx *mtx) { # ifdef DEBUG if (in_irq()) { kprintf("mtx_lock() called from irq context!\n"); spin_loop { if (atom_cmp_xchg(&mtx->lock, 1, 0) == 1) return; } } # endif /* * When the mutex is locked, its lock value goes to 0. * atom_dec() returns true if the value *after* the decrement is * nonzero, meaning the lock value has become negative. */ if (atom_dec(&mtx->lock)) { struct task *task = current; /* * It might not be the smartest idea to allocate this thing on * the stack because it's gonna blow up if the task somehow dies * before returning here. Let's see how this turns out. */ struct lock_waiter waiter = { .task = task, }; spin_lock(&mtx->wait_queue_lock); clist_add(&mtx->wait_queue, &waiter.clink); spin_unlock(&mtx->wait_queue_lock); task->state = TASK_BLOCKED; schedule(); } } int mtx_trylock(struct mtx *mtx) { if (atom_cmp_xchg(&mtx->lock, 1, 0) != 1) return -EAGAIN; return 0; } void mtx_unlock(struct mtx *mtx) { if (atom_add(&mtx->lock, 1) < 0) { spin_lock(&mtx->wait_queue_lock); struct lock_waiter *waiter = clist_del_first_entry(&mtx->wait_queue, typeof(*waiter), clink); spin_unlock(&mtx->wait_queue_lock); waiter->task->state = TASK_READY; } } void sem_init(struct sem *sem, int initial_count) { atom_write(&sem->count, initial_count); spin_init(&sem->wait_queue_lock); clist_init(&sem->wait_queue); } int sem_down(struct sem *sem) { # ifdef DEBUG if (in_irq()) { kprintf("sem_down() called from IRQ context!\n"); spin_loop { int old = atom_sub(&sem->count, 1); if (old >= 0) return old; atom_inc(&sem->count); } } # endif int ret = atom_sub(&sem->count, 1); if (ret < 0) { struct task *task = current; struct lock_waiter waiter = { .task = task, }; spin_lock(&sem->wait_queue_lock); clist_add(&sem->wait_queue, &waiter.clink); spin_unlock(&sem->wait_queue_lock); task->state = TASK_BLOCKED; schedule(); ret = 0; } return ret; } int sem_up(struct sem *sem) { int ret = atom_add(&sem->count, 1); if (ret <= 0) { spin_lock(&sem->wait_queue_lock); struct lock_waiter *waiter = clist_del_first_entry(&sem->wait_queue, typeof(*waiter), clink); spin_unlock(&sem->wait_queue_lock); waiter->task->state = TASK_READY; ret = 0; } return ret; } int sem_trydown(struct sem *sem) { int ret = atom_sub(&sem->count, 1); if (ret < 0) { atom_inc(&sem->count); ret = -EAGAIN; } return ret; } /* * This file is part of GayBSD. * Copyright (c) 2021 fef . * * GayBSD is nonviolent software: you may only use, redistribute, and/or * modify it under the terms of the Cooperative Nonviolent Public License * (CNPL) as found in the LICENSE file in the source code root directory * or at ; either version 7 * of the license, or (at your option) any later version. * * GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent * permitted by applicable law. See the CNPL for details. */