kern/kernel/mm/slab.c
fef 5a5135f416
update license terms
As of now, everything except the code imported
from FreeBSD is proprietary.  Of course, it won't
be like this for long, only until we have decided
which license we like to use.  The rationale is
that releasing everything under a copyleft license
later is always easier than doing so immediately
and then changing it afterwards.
Naturally, any changes made before this commit are
still subject to the terms of the CNPL.
2021-11-15 19:23:22 +01:00

210 lines
5.6 KiB
C

/* Copyright (C) 2021 fef <owo@fef.moe>. All rights reserved. */
#include <arch/page.h>
#include <gay/cdefs.h>
#include <gay/clist.h>
#include <gay/config.h>
#include <gay/kprintf.h>
#include <gay/mm.h>
#include <gay/types.h>
#include <string.h>
/**
* @brief This header sits at the beginning of each slab.
* The individual entries follow immediately after the struct itself.
*/
struct slab {
struct clist clink; /* -> pools[entry_size / SLAB_STEP - 1] (see below) */
/** @brief The individual clist nodes sit at the beginning of each free entry */
struct clist freelist;
/**
* @brief Number of free entries.
* The slabs are sorted within their pool by this value, so that we
* always hand out entries from the fullest slabs (increases locality
* and thus decreases the stress on the TLB).
*
* This is intentionally not a `usize` because entry sizes are really
* small anyway (we currently refuse to allocate anything bigger than
* `PAGE_SIZE`), so this saves a couple of bytes on systems where `int`
* is smaller than `usize`.
*/
unsigned int free_entries;
/**
* @brief Size of a single slab entry in bytes.
* Sizes must always be an integral multiple of `sizeof(void *)` and
* at least `sizeof(struct clist)`, because that's the data structure
* used for tracking what entries are free (`freelist`).
*
* Like `free_entries`, this is intentionally not a `usize`.
*/
unsigned int entry_size;
/* here would come the individual entries */
};
/** @brief All slabs currently have the same size of one full page. */
#define SLAB_SIZE PAGE_SIZE
/**
* @brief All slab entry sizes are an integral multiple of this.
* When allocating memory, the requested size gets rounded upwards.
*/
#define SLAB_STEP (sizeof(struct clist))
#define SLAB_OVERHEAD (sizeof(struct slab))
#define SLAB_MAX_ALLOC (SLAB_SIZE - SLAB_OVERHEAD)
/* slabs are always aligned ... */
#define SLAB_PTR_MASK (~(SLAB_SIZE - 1))
/* ... so we can do this */
#define GET_SLAB(ptr) ( (struct slab *)((uintptr_t)(ptr) & SLAB_PTR_MASK) )
#if CFG_DEBUG_SLAB_ALLOCS
# define slab_debug(msg, ...) kprintf("[slab] " msg, ##__VA_ARGS__)
# if CFG_DEBUG_SLAB_ALLOCS_NOISY
# define slab_debug_noisy(msg, ...) kprintf("[slab] " msg, ##__VA_ARGS__)
# else
# define slab_debug_noisy(msg, ...) ({})
# endif
#else
# define slab_debug(msg, ...) ({})
# define slab_debug_noisy(msg, ...) ({})
#endif
/** @brief All slabs grouped by entry_size, indexed by `entry_size / SLAB_STEP - 1` */
struct clist pools[SLAB_MAX_ALLOC / SLAB_STEP];
static void *slab_alloc(usize size, enum mflags flags);
static void slab_free(void *ptr);
static struct slab *slab_create(unsigned int entry_size, enum mflags flags);
static inline int get_order(usize size)
{
int order;
usize order_size = PAGE_SIZE;
for (order = 0; order <= GET_PAGE_MAX_ORDER; order++) {
if (order_size >= size)
break;
order_size <<= 1;
}
return order;
}
void *kmalloc(usize size, enum mflags flags)
{
if (size > SLAB_MAX_ALLOC) {
if (flags & M_CONTIG) {
int order = get_order(size);
if (order > GET_PAGE_MAX_ORDER) {
slab_debug("Requested alloc size %zu too large for get_pages()\n",
size);
return nil;
} else {
return get_pages(order, flags);
}
} else {
slab_debug("Refusing to allocate %zu bytes as slabs\n", size);
return nil;
}
} else {
return slab_alloc(size, flags);
}
}
void kfree(void *ptr)
{
kprintf("kfree() is not implemented yet lmao\n");
}
void slab_init(void)
{
slab_debug("Initializing %zu cache pools (%zu~%zu bytes)\n",
ARRAY_SIZE(pools), SLAB_STEP, SLAB_MAX_ALLOC);
for (int i = 0; i < ARRAY_SIZE(pools); i++)
clist_init(&pools[i]);
}
static inline void *slab_alloc(usize size, enum mflags flags)
{
size = align_ceil(size, SLAB_STEP);
if (size == 0 || size > SLAB_MAX_ALLOC)
return nil;
struct clist *pool = &pools[size / SLAB_STEP - 1];
struct slab *slab = nil;
struct slab *cursor;
clist_foreach_entry(pool, cursor, clink) {
if (cursor->free_entries > 0) {
slab = cursor;
break;
}
}
if (slab == nil) {
slab = slab_create(size, flags);
if (slab == nil)
return nil; /* OOM */
clist_add_first(pool, &slab->clink);
}
/* list must have at least one entry, otherwise
* we would have created a completely new slab */
struct clist *ret = slab->freelist.next;
clist_del(ret);
slab->free_entries--;
# if CFG_POISON_HEAP
memset(ret, 'a', size);
# endif
return (void *)ret;
}
static inline void slab_free(void *ptr)
{
# if CFG_DEBUG_SLAB_ALLOCS
if (ptr < kheap_start || ptr >= kheap_end) {
kprintf("slab_free(%p): invalid ptr!\n", ptr);
return;
}
if ((uintptr_t)ptr % SLAB_STEP) {
kprintf("slab_free(%p): unaligned ptr!\n", ptr);
}
# endif
struct slab *slab = GET_SLAB(ptr);
slab->free_entries++;
# if CFG_POISON_HEAP
memset(ptr, 'A', slab->entry_size);
# endif
if (slab->free_entries * slab->entry_size + slab->entry_size > SLAB_MAX_ALLOC) {
/* none of the entries are in use, free the slab */
slab_debug_noisy("Destroying empty cache of size %zu\n", slab->entry_size);
free_pages(slab);
} else {
clist_add(&slab->freelist, (struct clist *)ptr);
}
}
static struct slab *slab_create(unsigned int entry_size, enum mflags flags)
{
slab_debug_noisy("Creating new cache for size %zu\n", entry_size);
struct slab *slab = get_pages(SLAB_SIZE / PAGE_SIZE, flags);
if (slab != nil) {
clist_init(&slab->freelist);
slab->free_entries = 0;
slab->entry_size = entry_size;
void *startptr = (void *)slab + sizeof(*slab);
void *endptr = (void *)slab + SLAB_SIZE - entry_size;
for (void *pos = startptr; pos <= endptr; pos += entry_size) {
clist_add(&slab->freelist, (struct clist *)pos);
slab->free_entries++;
}
}
return slab;
}