mm: add basic slab allocator

This still needs some work and integration into
kmalloc() but hey i've tested it and it doesn't
immediately fall apart so why not commit it
main
anna 3 years ago
parent e561adbb6f
commit 2b3eaf4ff7
Signed by: fef
GPG Key ID: EC22E476DC2D3D84

@ -26,6 +26,14 @@ struct x86_page_table_entry {
unsigned _reserved1:3;
uintptr_t shifted_address:20; /**< Aligned pointer to the physical page */
} __packed;
#define __PFLAG_PRESENT (1 << 0)
#define __PFLAG_RW (1 << 1)
#define __PFLAG_USER (1 << 2)
#define __PFLAG_WRITE_THROUGH (1 << 3)
#define __PFLAG_NOCACHE (1 << 4)
#define __PFLAG_ACCESSED (1 << 5)
#define __PFLAG_DIRTY (1 << 6)
#define __PFLAG_GLOBAL (1 << 8)
struct x86_page_table {
struct x86_page_table_entry entries[1024];
@ -56,6 +64,7 @@ struct x86_page_directory_entry {
unsigned _ignored2:3;
uintptr_t shifted_address:20; /**< Aligned pointer to `struct x86_page_table` */
} __packed;
#define __PFLAG_HUGE (1 << 7)
struct x86_page_directory {
struct x86_page_directory_entry entries[1024];

@ -39,7 +39,7 @@ __asmlink struct x86_page_table pt0;
/** @brief First page directory for low memory. */
__asmlink struct x86_page_directory pd0;
int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags)
int map_page(uintptr_t phys, void *virt, enum pflags flags)
{
# ifdef DEBUG
if (phys != PAGE_ALIGN(phys))
@ -52,7 +52,7 @@ int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags)
usize pt_index = ((uintptr_t)virt >> PAGE_SHIFT) % 1024;
struct x86_page_directory_entry *pde = &X86_CURRENT_PD->entries[pd_index];
if (flags & MM_PAGE_HUGE) {
if (flags & PFLAG_HUGE) {
# ifdef DEBUG
if (phys != HUGEPAGE_ALIGN(phys)) {
kprintf("map_page(): unaligned physical address %p!\n",
@ -68,10 +68,10 @@ int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags)
*(unsigned long *)pde = 0;
pde->present = 1;
pde->huge = 1;
pde->rw = (flags & MM_PAGE_RW) != 0;
pde->user = (flags & MM_PAGE_USER) != 0;
pde->accessed = (flags & MM_PAGE_ACCESSED) != 0;
pde->cache_disabled = (flags & MM_PAGE_NOCACHE) != 0;
pde->rw = (flags & PFLAG_RW) != 0;
pde->user = (flags & PFLAG_USER) != 0;
pde->accessed = (flags & PFLAG_ACCESSED) != 0;
pde->cache_disabled = (flags & PFLAG_NOCACHE) != 0;
pde->shifted_address = phys >> PAGE_SHIFT;
return 0;
}
@ -98,9 +98,9 @@ int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags)
struct x86_page_table_entry *pte = &pt->entries[pt_index];
*(unsigned long *)pte = 0; /* zero out the entire entry first */
pte->rw = (flags & MM_PAGE_RW) != 0;
pte->user = (flags & MM_PAGE_USER) != 0;
pte->cache_disabled = (flags & MM_PAGE_NOCACHE) != 0;
pte->rw = (flags & PFLAG_RW) != 0;
pte->user = (flags & PFLAG_USER) != 0;
pte->cache_disabled = (flags & PFLAG_NOCACHE) != 0;
pte->shifted_address = phys >> PAGE_SHIFT;
pte->present = 1;
@ -210,10 +210,13 @@ uintptr_t vtophys(void *virt)
void vm_flush(void)
{
register_t tmp;
__asm__ volatile(
" mov %%cr3, %%eax \n"
" mov %%eax, %%cr3 \n"
::: "eax", "memory"
" mov %%cr3, %0 \n"
" mov %0, %%cr3 \n"
: "=r"(tmp)
:
: "memory"
);
}

@ -18,6 +18,12 @@ option(CFG_DEBUG_IRQ "Debug IRQs" ON)
option(CFG_DEBUG_PAGE_ALLOCS "Debug page frame allocations" OFF)
option(CFG_DEBUG_PAGE_ALLOCS_NOISY "Debug page frame allocations in full detail (VERY noisy)" OFF)
option(CFG_DEBUG_SLAB_ALLOCS "Debug slab allocations" OFF)
option(CFG_DEBUG_SLAB_ALLOCS_NOISY "Debug slab allocations in full detail (VERY noisy)" OFF)
# This file is part of GayBSD.
# Copyright (c) 2021 fef <owo@fef.moe>.
#

@ -37,6 +37,15 @@
/** @brief Debug page frame allocations */
#cmakedefine01 CFG_DEBUG_PAGE_ALLOCS
/** @brief Spit out the full details of page allocations */
#cmakedefine01 CFG_DEBUG_PAGE_ALLOCS_NOISY
/** @brief Debug slab allocations */
#cmakedefine01 CFG_DEBUG_SLAB_ALLOCS
/** @brief Spit out the full details of slab allocations */
#cmakedefine01 CFG_DEBUG_SLAB_ALLOCS_NOISY
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.

@ -9,6 +9,13 @@
* To avoid possible confusion, physical memory addresses always use type
* `uintptr_t` and virtual ones are `void *`. This should give us at least some
* type of compiler warning if they are accidentally mixed up.
*
* GayBSD uses a classic slab algorithm for its own data structures, which is
* backed by a buddy page frame allocator. The latter is also used for getting
* bigger areas of memory that is not physically contiguous (for regular user
* allocations). The high memory is statically mapped to the area after the
* kernel image, which starts at `CFG_KERN_ORIGIN + KERN_OFFSET`. Om i386,
* this results in a kernel image mapping to `0xf0100000`.
*/
#ifdef _KERNEL
@ -53,16 +60,16 @@ void *kmalloc(size_t size, enum mm_flags flags) __malloc_like __alloc_size(1);
*/
void kfree(void *ptr);
enum mm_page_flags {
MM_PAGE_PRESENT = (1 << 0),
MM_PAGE_RW = (1 << 1),
MM_PAGE_USER = (1 << 2),
MM_PAGE_ACCESSED = (1 << 3),
MM_PAGE_DIRTY = (1 << 4),
MM_PAGE_GLOBAL = (1 << 5),
MM_PAGE_NOCACHE = (1 << 6),
enum pflags {
PFLAG_PRESENT = __PFLAG_PRESENT,
PFLAG_RW = __PFLAG_RW,
PFLAG_USER = __PFLAG_USER,
PFLAG_ACCESSED = __PFLAG_ACCESSED,
PFLAG_DIRTY = __PFLAG_DIRTY,
PFLAG_GLOBAL = __PFLAG_GLOBAL,
PFLAG_NOCACHE = __PFLAG_NOCACHE,
#ifdef __HAVE_HUGEPAGES
MM_PAGE_HUGE = (1 << 7),
PFLAG_HUGE = __PFLAG_HUGE,
#endif
};
@ -77,7 +84,7 @@ enum mm_page_flags {
* @param flags Flags to apply to the page
* @returns 0 on success, or `-ENOMEM` if OOM (for allocating new page tables)
*/
int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags);
int map_page(uintptr_t phys, void *virt, enum pflags flags);
/**
* @brief Remove a page mapping.
@ -129,17 +136,37 @@ int pages_init(void);
* @param flags
* @return
*/
void *get_pages(usize count, enum mm_flags flags);
void *get_pages(usize count, enum mm_flags flags) __malloc_like;
void free_pages(void *ptr);
/**
* @brief Initialize the slab caches.
* This is called only once by `kmalloc_init()` after the buddy page frame
* allocator is initialized.
*/
void slab_init(void);
void free_pages(void *ptr, usize count);
/**
* @brief Allocate contiguous memory from the slab caches.
* This is only used internally by `kmalloc()` and for relatively small
* objects (<< PAGE_SIZE). If you need memory, use `kmalloc()` instead.
*
* @param size Requested memory size
* @param flags Flags that are passed to `get_pages` for creating new caches
* @return The allocated pointer, or `nil` if OOM or `size` was too big
*/
void *slab_alloc(usize size, enum mm_flags flags) __malloc_like __alloc_size(1);
void slab_free(void *ptr);
/**
* @brief Return where a physical address maps to in the direct memory area.
* The returned pointer will be within the range `DMAP_START` (inclusive)
* and `DMAP_END` (exclusive).
*
* @param phys
* @return
* @param phys Physical address
* @return Virtual address
*/
static __always_inline void *__v(uintptr_t phys)
{

@ -3,6 +3,7 @@
target_sources(gay_kernel PRIVATE
kmalloc.c
page.c
slab.c
)
# This file is part of GayBSD.

@ -14,7 +14,12 @@ void *kheap_end;
int kmalloc_init(uintptr_t _phys_start, uintptr_t _phys_end)
{
kprintf("kmalloc_init(%p, %p)\n", (void *)_phys_start, (void *)_phys_end);
/*
* The kernel image is very likely gonna be within the physical memory
* range, so we're gonna need to do some cropping in order to not hand
* out pages that actually contain kernel code.
* Furthermore, somebody should probably clean up this mess somehow.
*/
uintptr_t image_start_phys = (uintptr_t)&_image_start_phys;
uintptr_t image_end_phys = (uintptr_t)&_image_end_phys;
if (_phys_start < image_start_phys && _phys_end > image_start_phys) {
@ -32,7 +37,13 @@ int kmalloc_init(uintptr_t _phys_start, uintptr_t _phys_end)
phys_start = uintptr_align(_phys_start, +HUGEPAGE_SHIFT);
phys_end = uintptr_align(_phys_end, -HUGEPAGE_SHIFT);
kprintf("Aligning physical memory to 0x%08x-0x%08x\n", phys_start, phys_end);
return pages_init();
int err = pages_init();
if (err)
return err;
slab_init();
return 0;
}
/*

@ -32,10 +32,20 @@
#error "PAGE_SIZE must be an integral multiple of LONG_BIT"
#endif
#if __SIZEOF_POINTER__ != __SIZEOF_LONG__
#error "long must be as wide as a pointer"
#endif
#if CFG_DEBUG_PAGE_ALLOCS
#define page_debug(msg, ...) kprintf("[page] " msg, ##__VA_ARGS__)
# define page_debug(msg, ...) kprintf("[page] " msg, ##__VA_ARGS__)
# if CFG_DEBUG_PAGE_ALLOCS_NOISY
# define page_debug_noisy(msg, ...) kprintf("[page] " msg, ##__VA_ARGS__)
# else
# define page_debug_noisy(msg, ...)
# endif
#else
#define page_debug(msg, ...)
# define page_debug(msg, ...)
# define page_debug_noisy(msg, ...)
#endif
/**
@ -46,6 +56,8 @@
*/
#define CACHE_LEVELS (HUGEPAGE_SHIFT - PAGE_SHIFT + 1)
#define LEVEL_SHIFT(level) (PAGE_SHIFT + (level))
/** @brief There is one of this for every cache level. */
struct cache_pool {
/**
@ -110,8 +122,8 @@ int pages_init(void)
* map entire physical memory into the direct contiguous area
*/
for (uintptr_t physptr = phys_start; physptr < phys_end; physptr += HUGEPAGE_SIZE) {
const enum mm_page_flags pflags = MM_PAGE_HUGE | MM_PAGE_RW | MM_PAGE_GLOBAL;
map_page(physptr, (void *)(physptr + DMAP_OFFSET), pflags);
const enum pflags pflags = PFLAG_HUGE | PFLAG_RW | PFLAG_GLOBAL;
map_page(physptr, __v(physptr), pflags);
}
vm_flush();
@ -120,7 +132,7 @@ int pages_init(void)
*/
usize bitmap_bytes = 0;
for (int i = 0; i < CACHE_LEVELS; i++) {
usize bits = phys_size >> (PAGE_SHIFT + i);
usize bits = phys_size >> LEVEL_SHIFT(i);
/* round up to the next full long */
if (bits & ~LONG_BIT_MASK) {
bits &= LONG_BIT_MASK;
@ -145,9 +157,9 @@ int pages_init(void)
unsigned long *bitmap_pos = bitmap_start;
for (int i = 0; i < CACHE_LEVELS; i++) {
/* total amount of entries on this level */
usize total_bits = phys_size >> (PAGE_SHIFT + i);
usize total_bits = phys_size >> LEVEL_SHIFT(i);
/* number of entries on this level that the bitmap itself takes up */
usize wasted_bits = bitmap_bytes >> (PAGE_SHIFT + i);
usize wasted_bits = bitmap_bytes >> LEVEL_SHIFT(i);
if (wasted_bits == 0)
wasted_bits = 1;
bit_set_range(bitmap_pos, total_bits - wasted_bits, wasted_bits);
@ -168,7 +180,7 @@ int pages_init(void)
* stay empty until one of the large blocks gets split up
*/
struct cache_pool *high_pool = &caches[CACHE_LEVELS - 1];
usize step = 1 << (PAGE_SHIFT + CACHE_LEVELS - 1);
usize step = 1 << LEVEL_SHIFT(CACHE_LEVELS - 1);
for (void *pos = kheap_start; pos < kheap_end; pos += step) {
struct clist *entry = pos;
clist_add(&high_pool->freelist, entry);
@ -192,8 +204,7 @@ static void *split_buddy(void *ptr, int level);
/**
* @brief Attempt to coalesce a block with its buddy.
* If coalition is possible, the buddy is removed from its freelist at
* `level` and the union block is inserted at `level + 1`.
* If coalition is possible, the buddy is removed from its freelist at `level`.
*
* @param ptr Pointer to the block
* @param level Cache level, must be less than `CACHE_LEVELS - 1` (because you
@ -202,21 +213,18 @@ static void *split_buddy(void *ptr, int level);
*/
static void *try_join_buddy(void *ptr, int level);
static usize get_bit_number(void *ptr, int level);
static inline usize get_bit_number(void *ptr, int level)
{
return ((uintptr_t)ptr - (uintptr_t)kheap_start) >> LEVEL_SHIFT(level);
}
static int get_level(usize count)
void *get_pages(usize count, enum mm_flags flags)
{
int level;
for (level = 0; level < CACHE_LEVELS; level++) {
if ((1 << level) >= count)
break;
}
return level;
}
void *get_pages(usize count, enum mm_flags flags)
{
int level = get_level(count);
if (level == CACHE_LEVELS) {
page_debug("get_pages(%zu, %08x): count too large!\n", count, flags);
return nil;
@ -228,7 +236,7 @@ void *get_pages(usize count, enum mm_flags flags)
}
mtx_lock(&caches_lock);
struct clist *entry;
struct clist *entry = nil;
int entry_level;
for (entry_level = level; entry_level < CACHE_LEVELS; entry_level++) {
if (caches[entry_level].free_entries > 0) {
@ -236,10 +244,8 @@ void *get_pages(usize count, enum mm_flags flags)
break;
}
}
if (entry_level == CACHE_LEVELS) {
mtx_unlock(&caches_lock);
return nil;
}
if (entry_level == CACHE_LEVELS)
goto unlock;
clist_del(entry);
caches[entry_level].free_entries--;
@ -253,53 +259,58 @@ void *get_pages(usize count, enum mm_flags flags)
}
bit_set(caches[level].bitmap, bit_number);
unlock:
mtx_unlock(&caches_lock);
return (void *)entry;
}
void free_pages(void *ptr, usize count)
void free_pages(void *ptr)
{
int level = get_level(count);
if (level == CACHE_LEVELS) {
page_debug("free_pages(%p, %zu): count too large!\n", ptr, count);
return;
}
mtx_lock(&caches_lock);
usize bit_number = get_bit_number(ptr, level);
# if CFG_DEBUG_PAGE_ALLOCS
if (!bit_tst(caches[level].bitmap, bit_number)) {
kprintf("free_pages(%p, %zu): double free!\n", ptr, count);
mtx_unlock(&caches_lock);
if ((uintptr_t)ptr % PAGE_SIZE) {
kprintf("free_pages(%p): unaligned ptr!\n", ptr);
return;
}
# endif
bit_clr(caches[level].bitmap, bit_number);
mtx_lock(&caches_lock);
int level = 0;
usize bit_number = get_bit_number(ptr, level);
for (; level < CACHE_LEVELS; level++) {
if (bit_tst(caches[level].bitmap, bit_number))
break;
bit_number >>= 1;
}
if (level == CACHE_LEVELS) {
page_debug("free_pages(%p): double free!\n", ptr);
goto unlock;
}
while (level < CACHE_LEVELS - 1) {
bit_clr(ptr, bit_number);
ptr = try_join_buddy(ptr, level);
if (ptr == nil)
bit_clr(caches[level].bitmap, bit_number);
void *tmp = try_join_buddy(ptr, level);
if (tmp == nil)
break;
ptr = tmp;
level++;
bit_number >>= 1;
}
mtx_unlock(&caches_lock);
}
clist_add(&caches[level].freelist, (struct clist *)ptr);
caches[level].free_entries++;
static inline usize get_bit_number(void *ptr, int level)
{
return ((uintptr_t)ptr - (uintptr_t)kheap_start) >> (PAGE_SHIFT + level);
unlock:
mtx_unlock(&caches_lock);
}
static inline void *split_buddy(void *ptr, int level)
{
# if CFG_DEBUG_PAGE_ALLOCS
if ((uintptr_t)ptr % (1 << (PAGE_SHIFT + level))) {
if ((uintptr_t)ptr % (1 << LEVEL_SHIFT(level))) {
kprintf("split_buddy(ptr = %p, level = %d): unaligned ptr!\n", ptr, level);
return nil;
}
@ -309,18 +320,18 @@ static inline void *split_buddy(void *ptr, int level)
}
# endif
struct clist *high_buddy = ptr + (1 << (PAGE_SHIFT + level - 1));
struct clist *high_buddy = ptr + (1 << LEVEL_SHIFT(level - 1));
clist_add(&caches[level - 1].freelist, high_buddy);
caches[level - 1].free_entries++;
page_debug("split (%p:%p), lvl=%d\n", ptr, (void *)high_buddy, level);
page_debug_noisy("split (%p:%p), lvl=%d\n", ptr, (void *)high_buddy, level);
return ptr;
}
static void *try_join_buddy(void *ptr, int level)
{
const usize entry_size = 1 << (PAGE_SHIFT + level);
const usize entry_size = 1 << LEVEL_SHIFT(level);
# if CFG_DEBUG_PAGE_ALLOCS
if ((uintptr_t)ptr % entry_size) {
@ -346,20 +357,18 @@ static void *try_join_buddy(void *ptr, int level)
if (bit_tst(caches[level].bitmap, buddy_bitnum))
return nil;
page_debug("join (%p:%p), lvl=%d\n", ptr, (void *)buddy, level);
page_debug_noisy("join (%p:%p), lvl=%d\n", ptr, (void *)buddy, level);
/* If the buddy is free, we remove it from the freelist ... */
clist_del((struct clist *)buddy);
caches[level].free_entries--;
/*
* ... and add the coalesced block to the freelist one level above.
* ... and return a pointer to the coalesced block.
* We use the same trick as above to get to the even (lower) block, just
* that this time we're zeroing the bit out rather than flipping it.
*/
uintptr_t even = (uintptr_t)ptr & ~entry_size;
clist_add(&caches[level + 1].freelist, (struct clist *)even);
caches[level + 1].free_entries++;
return (void *)even;
}

@ -0,0 +1,196 @@
/* See the end of this file for copyright and license terms. */
#include <arch/page.h>
#include <gay/cdefs.h>
#include <gay/clist.h>
#include <gay/config.h>
#include <gay/kprintf.h>
#include <gay/mm.h>
#include <gay/types.h>
#include <string.h>
/**
* @brief This header sits at the beginning of each slab.
* The individual entries follow immediately after the struct itself.
*/
struct slab {
struct clist clink; /* -> pools[entry_size / SLAB_STEP - 1] (see below) */
/** @brief The individual clist nodes sit at the beginning of each free entry */
struct clist freelist;
/**
* @brief Number of free entries.
* The slabs are sorted within their pool by this value, so that we
* always hand out entries from the fullest slabs (increases locality
* and thus decreases the stress on the TLB).
*
* This is intentionally not a `usize` because entry sizes are really
* small anyway (we currently refuse to allocate anything bigger than
* `PAGE_SIZE`), so this saves a couple of bytes on systems where `int`
* is smaller than `usize`.
*/
unsigned int free_entries;
/**
* @brief Size of a single slab entry in bytes.
* Sizes must always be an integral multiple of `sizeof(void *)` and
* at least `sizeof(struct clist)`, because that's the data structure
* used for tracking what entries are free (`freelist`).
*
* Like `free_entries`, this is intentionally not a `usize`.
*/
unsigned int entry_size;
/* here would come the individual entries */
};
/** @brief All slabs currently have the same size of one full page. */
#define SLAB_SIZE PAGE_SIZE
/**
* @brief All slab entry sizes are an integral multiple of this.
* When allocating memory, the requested size gets rounded upwards.
*/
#define SLAB_STEP (sizeof(struct clist))
#define SLAB_OVERHEAD (sizeof(struct slab))
#define SLAB_EFFECTIVE_SIZE (SLAB_SIZE - SLAB_OVERHEAD)
#define SLAB_MAX_ALLOC (SLAB_SIZE - SLAB_OVERHEAD)
/* slabs are always aligned ... */
#define SLAB_PTR_MASK (~(SLAB_SIZE - 1))
/* ... so we can do this */
#define GET_SLAB(ptr) ( (struct slab *)((uintptr_t)(ptr) & SLAB_PTR_MASK) )
#if CFG_DEBUG_SLAB_ALLOCS
# define slab_debug(msg, ...) kprintf("[slab] " msg, ##__VA_ARGS__)
# if CFG_DEBUG_SLAB_ALLOCS_NOISY
# define slab_debug_noisy(msg, ...) kprintf("[slab] " msg, ##__VA_ARGS__)
# else
# define slab_debug_noisy(msg, ...)
# endif
#else
# define slab_debug(msg, ...)
# define slab_debug_noisy(msg, ...)
#endif
/** @brief All slab pools, indexed by `entry_size / SLAB_STEP - 1` */
struct clist pools[SLAB_MAX_ALLOC / SLAB_STEP];
static struct slab *slab_create(unsigned int entry_size, enum mm_flags flags);
static usize round_size(usize size);
void slab_init(void)
{
slab_debug("Initializing %zu cache pools (%zu~%zu bytes)\n",
ARRAY_SIZE(pools), SLAB_STEP, SLAB_MAX_ALLOC);
for (int i = 0; i < ARRAY_SIZE(pools); i++)
clist_init(&pools[i]);
}
void *slab_alloc(usize size, enum mm_flags flags)
{
size = round_size(size);
if (size == 0)
return nil;
struct clist *pool = &pools[size / SLAB_STEP - 1];
struct slab *slab = nil;
struct slab *cursor;
clist_foreach_entry(pool, cursor, clink) {
if (cursor->free_entries > 0) {
slab = cursor;
break;
}
}
if (slab == nil) {
slab = slab_create(size, flags);
if (slab == nil)
return nil; /* OOM */
clist_add_first(pool, &slab->clink);
}
/* list must have at least one entry, otherwise
* we would have created a completely new slab */
struct clist *ret = slab->freelist.next;
clist_del(ret);
slab->free_entries--;
# if CFG_POISON_HEAP
memset(ret, 'a', size);
# endif
return (void *)ret;
}
void slab_free(void *ptr)
{
# if CFG_DEBUG_SLAB_ALLOCS
if (ptr < kheap_start || ptr >= kheap_end) {
kprintf("slab_free(%p): invalid ptr!\n", ptr);
return;
}
if ((uintptr_t)ptr % SLAB_STEP) {
kprintf("slab_free(%p): unaligned ptr!\n", ptr);
}
# endif
struct slab *slab = GET_SLAB(ptr);
slab->free_entries++;
# if CFG_POISON_HEAP
memset(ptr, 'A', slab->entry_size);
# endif
if (slab->free_entries * slab->entry_size + slab->entry_size > SLAB_EFFECTIVE_SIZE) {
/* none of the entries are in use, free the slab */
slab_debug_noisy("Destroying empty cache of size %zu\n", slab->entry_size);
free_pages(slab);
} else {
clist_add(&slab->freelist, (struct clist *)ptr);
}
}
static struct slab *slab_create(unsigned int entry_size, enum mm_flags flags)
{
slab_debug_noisy("Creating new cache for size %zu\n", entry_size);
struct slab *slab = get_pages(SLAB_SIZE / PAGE_SIZE, flags);
if (slab != nil) {
clist_init(&slab->freelist);
slab->free_entries = 0;
slab->entry_size = entry_size;
void *startptr = (void *)slab + sizeof(*slab);
void *endptr = (void *)slab + SLAB_SIZE - entry_size;
for (void *pos = startptr; pos <= endptr; pos += entry_size) {
clist_add(&slab->freelist, (struct clist *)pos);
slab->free_entries++;
}
}
return slab;
}
static inline usize round_size(usize size)
{
if (size > SLAB_MAX_ALLOC)
return 0;
/* SLAB_STEP is a power of 2, so clang will (hopefully)
* replace these with fancy bit banging tricks */
if (size % SLAB_STEP)
size = (size / SLAB_STEP) * SLAB_STEP + SLAB_STEP;
return size;
}
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/
Loading…
Cancel
Save