mm: minor page management refactor

This seems like a huge commit but it's really just
renaming a bunch of symbols.  The entire mm
subsystem is probably gonna have to go through
some major changes in the near future, so it's
best to start off with something that is not too
chaotic i guess.
This commit is contained in:
anna 2021-11-09 20:34:35 +01:00
parent 03f31df67f
commit a3941b6dc4
Signed by: fef
GPG key ID: EC22E476DC2D3D84
7 changed files with 307 additions and 222 deletions

View file

@ -58,6 +58,21 @@ int kmalloc_init(uintptr_t _phys_start, uintptr_t _phys_end)
return 0;
}
__weak void *malloc(usize size)
{
return kmalloc(size, M_KERN);
}
__weak void free(void *ptr)
{
kfree(ptr);
}
/*
* Looking for kmalloc() and kfree()?
* Those two are in slab.c for purely organizational reasons.
*/
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.

View file

@ -8,6 +8,7 @@
#include <gay/kprintf.h>
#include <gay/mm.h>
#include <gay/mutex.h>
#include <gay/systm.h>
#include <gay/types.h>
#include <gay/util.h>
@ -37,6 +38,7 @@
#endif
#if CFG_DEBUG_PAGE_ALLOCS
# define PAGE_ASSERT(x) KASSERT(x)
# define page_debug(msg, ...) kprintf("[page] " msg, ##__VA_ARGS__)
# if CFG_DEBUG_PAGE_ALLOCS_NOISY
# define page_debug_noisy(msg, ...) kprintf("[page] " msg, ##__VA_ARGS__)
@ -44,6 +46,7 @@
# define page_debug_noisy(msg, ...) ({})
# endif
#else
# define PAGE_ASSERT(x) ({})
# define page_debug(msg, ...) ({})
# define page_debug_noisy(msg, ...) ({})
#endif
@ -54,14 +57,14 @@
* the one below it, starting at one page per entry. The effective result is
* that a single entry in the cache on level L covers `(1 << L)` pages.
*/
#define CACHE_LEVELS GET_PAGE_LEVELS
#define CACHE_ORDERS GET_PAGE_ORDERS
#define LEVEL_SHIFT(level) (PAGE_SHIFT + (level))
#define ORDER_SHIFT(order) (PAGE_SHIFT + (order))
/** @brief There is one of this for every cache level. */
/** @brief There is one of this for every cache order. */
struct cache_pool {
/**
* @brief List of free blocks on this level of granularity.
* @brief List of free blocks on this order of granularity.
* The individual entries sit right at the beginning of each free block,
* and are always aligned to `entry_size` bytes.
*/
@ -74,11 +77,9 @@ struct cache_pool {
/** @brief Number of items in `freelist`. */
usize free_entries;
};
static struct cache_pool caches[CACHE_LEVELS];
static struct cache_pool caches[CACHE_ORDERS];
static MTX(caches_lock);
#define LONG_BIT_MASK (~(LONG_BIT - 1))
/* these get set in kmalloc_init() */
uintptr_t phys_start;
uintptr_t phys_end;
@ -91,28 +92,48 @@ uintptr_t __early_get_page(void)
static int sanity_check(void)
{
KASSERT(phys_start < phys_end);
KASSERT(phys_start == HUGEPAGE_ALIGN(phys_start));
/* phys_end is only page aligned, see kmalloc_init() */
if (phys_end != PAGE_ALIGN(phys_end) || phys_start != HUGEPAGE_ALIGN(phys_start)) {
kprintf("Unaligned memory, this should never be possible\n");
return 1;
}
KASSERT(phys_end == PAGE_ALIGN(phys_end));
if ((phys_end - phys_start) < (32 * 1024 * 1024)) {
kprintf("Less than 32 MB of usable RAM, this wouldn't go well\n");
return 1;
}
if (phys_start > phys_end) {
kprintf("Hey, this is funny. pages_init() was called with parameters "
"such that phys_start > phys_end (%p > %p), which "
"should absolutely never be possible. I can't really continue "
"like this, so have a nice day.\n", (void *)phys_start, (void *)phys_end);
return 1;
}
return 0;
}
/*
* Map the entire physical memory into the direct contiguous area.
* __early_map_page() might call __early_get_page() in order to allocate
* new page table structures, which in turn shrinks the physical memory
* size (see above).
*/
static inline void map_direct_area(void)
{
#ifdef __HAVE_HUGEPAGES
const usize step = HUGEPAGE_SIZE;
const enum pflags flags = P_PRESENT | P_RW | P_HUGE;
#else
const usize step = PAGE_SIZE;
const enum pflags flags = P_PRESENT | P_RW;
#endif
/*
* It might be necessary to use a volatile pointer to phys_end for this
* loop in case clang does The Optimization and caches its value for
* whatever reason, even though at least for x86 this is not the case
* (and i don't even thing the C standard allows it when calling
* external functions in between, but still, Never Trust The Compiler).
*/
for (uintptr_t pos = phys_start; pos <= phys_end - step; pos += step)
__early_map_page(pos, __v(pos), flags);
vm_flush();
}
/*
* This function maps the entire physical memory into the direct region
* (DMAP_START - DMAP_END) and sets up the caches.
@ -124,18 +145,7 @@ int pages_init(void)
if (sanity_check() != 0)
return 1;
/*
* Map the entire physical memory into the direct contiguous area.
* __early_map_page() might call __early_get_page() in order to allocate
* new page table structures, which in turn shrinks the physical memory
* size (see above).
* It might be necessary to use a volatile pointer to phys_end for this
* loop in case clang does The Optimization and caches its value for
* whatever reason, even though at least for x86 this is not the case.
*/
for (uintptr_t physptr = phys_start; physptr < phys_end; physptr += HUGEPAGE_SIZE)
__early_map_page(physptr, __v(physptr), PFLAG_HUGE | PFLAG_RW | PFLAG_GLOBAL);
vm_flush();
map_direct_area();
/* phys_end gets aligned, as promised by the comment in kmalloc_init() */
phys_end = align_floor(phys_end, HUGEPAGE_SIZE);
@ -145,13 +155,9 @@ int pages_init(void)
* calculate the size of each bitmap, as well as their combined size
*/
usize bitmap_bytes = 0;
for (int i = 0; i < CACHE_LEVELS; i++) {
usize bits = phys_size >> LEVEL_SHIFT(i);
/* round up to the next full long */
if (bits & ~LONG_BIT_MASK) {
bits &= LONG_BIT_MASK;
bits += LONG_BIT;
}
for (int i = 0; i < CACHE_ORDERS; i++) {
usize bits = phys_size >> ORDER_SHIFT(i);
bits = align_ceil(bits, LONG_BIT);
bitmap_bytes += bits / 8;
}
@ -169,11 +175,11 @@ int pages_init(void)
* preallocate entries that can't be handed out (i.e. the cache bitmaps)
*/
unsigned long *bitmap_pos = bitmap_start;
for (int i = 0; i < CACHE_LEVELS; i++) {
for (int i = 0; i < CACHE_ORDERS; i++) {
/* total amount of entries on this level */
usize total_bits = phys_size >> LEVEL_SHIFT(i);
usize total_bits = phys_size >> ORDER_SHIFT(i);
/* number of entries on this level that the bitmap itself takes up */
usize wasted_bits = bitmap_bytes >> LEVEL_SHIFT(i);
usize wasted_bits = bitmap_bytes >> ORDER_SHIFT(i);
if (wasted_bits == 0)
wasted_bits = 1;
bit_set_range(bitmap_pos, total_bits - wasted_bits, wasted_bits);
@ -190,11 +196,11 @@ int pages_init(void)
kheap_end = align_floor(bitmap_start, HUGEPAGE_SIZE);
/*
* populate the freelist on the highest level, all levels beneath it
* populate the freelist on the highest order, all orders beneath it
* stay empty until one of the large blocks gets split up
*/
struct cache_pool *high_pool = &caches[CACHE_LEVELS - 1];
usize step = 1 << LEVEL_SHIFT(CACHE_LEVELS - 1);
struct cache_pool *high_pool = &caches[CACHE_ORDERS - 1];
usize step = 1 << ORDER_SHIFT(CACHE_ORDERS - 1);
for (void *pos = kheap_start; pos < kheap_end; pos += step) {
struct clist *entry = pos;
clist_add(&high_pool->freelist, entry);
@ -218,62 +224,62 @@ static void *split_buddy(void *ptr, int level);
/**
* @brief Attempt to coalesce a block with its buddy.
* If coalition is possible, the buddy is removed from its freelist at `level`.
* If coalition is possible, the buddy is removed from its freelist at `order`.
*
* @param ptr Pointer to the block
* @param level Cache level, must be less than `CACHE_LEVELS - 1` (because you
* can't join blocks at the highest cache level)
* @param order Cache order, must be less than `CACHE_ORDERS - 1` (because you
* can't join blocks at the highest cache order)
* @return The joined block, or `nil` if coalition was not possible
*/
static void *try_join_buddy(void *ptr, int level);
static void *try_join_buddy(void *ptr, int order);
static inline usize get_bit_number(void *ptr, int level)
static inline usize get_bit_number(void *ptr, int order)
{
return ((uintptr_t)ptr - (uintptr_t)kheap_start) >> LEVEL_SHIFT(level);
return ((uintptr_t)ptr - (uintptr_t)kheap_start) >> ORDER_SHIFT(order);
}
void *get_pages(usize count, enum mm_flags flags)
void *get_pages(int order, enum mflags flags)
{
int level;
for (level = 0; level < CACHE_LEVELS; level++) {
if ((1 << level) >= count)
break;
}
if (level == CACHE_LEVELS) {
page_debug("get_pages(%zu, %08x): count too large!\n", count, flags);
PAGE_ASSERT(order >= 0);
if (order >= GET_PAGE_ORDERS) {
page_debug("get_pages(%d, %#08x): Order too high!\n", order, flags);
return nil;
}
if (flags & MM_NOSLEEP) {
kprintf("get_pages(): MM_NOSLEEP requested, this is not implemented yet :(\n");
if (flags & M_NOSLEEP) {
kprintf("get_pages(): M_NOSLEEP requested, this is not implemented yet :(\n");
return nil;
}
mtx_lock(&caches_lock);
struct clist *entry = nil;
int entry_level;
for (entry_level = level; entry_level < CACHE_LEVELS; entry_level++) {
if (caches[entry_level].free_entries > 0) {
entry = caches[entry_level].freelist.next;
int entry_order;
for (entry_order = order; entry_order < CACHE_ORDERS; entry_order++) {
if (caches[entry_order].free_entries > 0) {
entry = caches[entry_order].freelist.next;
break;
}
}
if (entry_level == CACHE_LEVELS)
goto unlock;
clist_del(entry);
caches[entry_level].free_entries--;
if (entry_order != CACHE_ORDERS) {
clist_del(entry);
caches[entry_order].free_entries--;
usize bit_number = get_bit_number(entry, entry_level);
while (entry_level > level) {
entry = split_buddy(entry, entry_level);
bit_set(caches[entry_level].bitmap, bit_number);
entry_level--;
bit_number <<= 1;
usize bit_number = get_bit_number(entry, entry_order);
while (entry_order > order) {
entry = split_buddy(entry, entry_order);
bit_set(caches[entry_order].bitmap, bit_number);
entry_order--;
bit_number <<= 1;
}
bit_set(caches[order].bitmap, bit_number);
# if CFG_POISON_PAGES
memset(entry, 'a', 1 << ORDER_SHIFT(order));
# endif
}
bit_set(caches[level].bitmap, bit_number);
unlock:
mtx_unlock(&caches_lock);
return (void *)entry;
}
@ -287,54 +293,66 @@ void free_pages(void *ptr)
}
# endif
mtx_lock(&caches_lock);
if (sus_nil(ptr)) {
page_debug("free_pages(%p): tried to free NULL!\n", ptr);
return;
}
int level = 0;
usize bit_number = get_bit_number(ptr, level);
for (; level < CACHE_LEVELS; level++) {
if (bit_tst(caches[level].bitmap, bit_number))
int order = 0;
usize bit_number = get_bit_number(ptr, order);
for (; order < CACHE_ORDERS; order++) {
if (bit_tst(caches[order].bitmap, bit_number))
break;
bit_number >>= 1;
}
if (level == CACHE_LEVELS) {
if (order == CACHE_ORDERS) {
page_debug("free_pages(%p): double free!\n", ptr);
goto unlock;
return;
}
int original_order = order;
while (level < CACHE_LEVELS - 1) {
bit_clr(caches[level].bitmap, bit_number);
mtx_lock(&caches_lock);
void *tmp = try_join_buddy(ptr, level);
while (order < CACHE_ORDERS - 1) {
bit_clr(caches[order].bitmap, bit_number);
void *tmp = try_join_buddy(ptr, order);
if (tmp == nil)
break;
ptr = tmp;
level++;
order++;
bit_number >>= 1;
}
clist_add(&caches[level].freelist, (struct clist *)ptr);
caches[level].free_entries++;
if (order == CACHE_ORDERS - 1 && original_order != CACHE_ORDERS - 1)
set_pflags(HUGEPAGE_ALIGN(ptr), P_HUGE | P_RW);
#if CFG_POISON_PAGES
memset(ptr, 'A', 1 << ORDER_SHIFT(order));
#endif
clist_add(&caches[order].freelist, (struct clist *)ptr);
caches[order].free_entries++;
unlock:
mtx_unlock(&caches_lock);
}
static inline void *split_buddy(void *ptr, int level)
{
# if CFG_DEBUG_PAGE_ALLOCS
if ((uintptr_t)ptr % (1 << LEVEL_SHIFT(level))) {
if ((uintptr_t)ptr % (1 << ORDER_SHIFT(level))) {
kprintf("split_buddy(ptr = %p, level = %d): unaligned ptr!\n", ptr, level);
return nil;
}
if (level < 1 || level >= CACHE_LEVELS) {
if (level < 1 || level >= CACHE_ORDERS) {
kprintf("split_buddy(ptr = %p, level = %d): invalid level!\n", ptr, level);
return nil;
}
# endif
struct clist *high_buddy = ptr + (1 << LEVEL_SHIFT(level - 1));
struct clist *high_buddy = ptr + (1 << ORDER_SHIFT(level - 1));
clist_add(&caches[level - 1].freelist, high_buddy);
caches[level - 1].free_entries++;
@ -343,19 +361,19 @@ static inline void *split_buddy(void *ptr, int level)
return ptr;
}
static void *try_join_buddy(void *ptr, int level)
static void *try_join_buddy(void *ptr, int order)
{
const usize entry_size = 1 << LEVEL_SHIFT(level);
const usize entry_size = 1 << ORDER_SHIFT(order);
# if CFG_DEBUG_PAGE_ALLOCS
if ((uintptr_t)ptr % entry_size) {
kprintf("try_join_buddy(%p, %d): unaligned ptr!\n", ptr, level);
kprintf("try_join_buddy(%p, %d): unaligned ptr!\n", ptr, order);
return nil;
}
/* level must be < CACHE_LEVELS - 1 because you
* can't join blocks on the topmost level */
if (level >= CACHE_LEVELS - 1) {
kprintf("try_join_buddy(%p, %d): level >= CACHE_LEVELS - 1!\n", ptr, level);
/* order must be < CACHE_ORDERS - 1 because you
* can't join blocks on the topmost order */
if (order >= CACHE_ORDERS - 1) {
kprintf("try_join_buddy(%p, %d): order >= CACHE_ORDERS - 1!\n", ptr, order);
return nil;
}
# endif
@ -367,15 +385,15 @@ static void *try_join_buddy(void *ptr, int level)
* for any if branches.
*/
uintptr_t buddy = (uintptr_t)ptr ^ entry_size;
usize buddy_bitnum = get_bit_number((void *)buddy, level);
if (bit_tst(caches[level].bitmap, buddy_bitnum))
usize buddy_bitnum = get_bit_number((void *)buddy, order);
if (bit_tst(caches[order].bitmap, buddy_bitnum))
return nil;
page_debug_noisy("join (%p:%p), lvl=%d\n", ptr, (void *)buddy, level);
page_debug_noisy("join (%p:%p), order=%d\n", ptr, (void *)buddy, order);
/* If the buddy is free, we remove it from the freelist ... */
clist_del((struct clist *)buddy);
caches[level].free_entries--;
caches[order].free_entries--;
/*
* ... and return a pointer to the coalesced block.

View file

@ -53,7 +53,6 @@ struct slab {
#define SLAB_STEP (sizeof(struct clist))
#define SLAB_OVERHEAD (sizeof(struct slab))
#define SLAB_EFFECTIVE_SIZE (SLAB_SIZE - SLAB_OVERHEAD)
#define SLAB_MAX_ALLOC (SLAB_SIZE - SLAB_OVERHEAD)
/* slabs are always aligned ... */
#define SLAB_PTR_MASK (~(SLAB_SIZE - 1))
@ -65,18 +64,60 @@ struct slab {
# if CFG_DEBUG_SLAB_ALLOCS_NOISY
# define slab_debug_noisy(msg, ...) kprintf("[slab] " msg, ##__VA_ARGS__)
# else
# define slab_debug_noisy(msg, ...)
# define slab_debug_noisy(msg, ...) ({})
# endif
#else
# define slab_debug(msg, ...)
# define slab_debug_noisy(msg, ...)
# define slab_debug(msg, ...) ({})
# define slab_debug_noisy(msg, ...) ({})
#endif
/** @brief All slab pools, indexed by `entry_size / SLAB_STEP - 1` */
/** @brief All slabs grouped by entry_size, indexed by `entry_size / SLAB_STEP - 1` */
struct clist pools[SLAB_MAX_ALLOC / SLAB_STEP];
static struct slab *slab_create(unsigned int entry_size, enum mm_flags flags);
static usize round_size(usize size);
static void *slab_alloc(usize size, enum mflags flags);
static void slab_free(void *ptr);
static struct slab *slab_create(unsigned int entry_size, enum mflags flags);
static inline int get_order(usize size)
{
int order;
usize order_size = PAGE_SIZE;
for (order = 0; order <= GET_PAGE_MAX_ORDER; order++) {
if (order_size >= size)
break;
order_size <<= 1;
}
return order;
}
void *kmalloc(usize size, enum mflags flags)
{
if (size > SLAB_MAX_ALLOC) {
if (flags & M_CONTIG) {
int order = get_order(size);
if (order > GET_PAGE_MAX_ORDER) {
slab_debug("Requested alloc size %zu too large for get_pages()\n",
size);
return nil;
} else {
return get_pages(order, flags);
}
} else {
slab_debug("Refusing to allocate %zu bytes as slabs\n", size);
return nil;
}
} else {
return slab_alloc(size, flags);
}
}
void kfree(void *ptr)
{
kprintf("kfree() is not implemented yet lmao\n");
}
void slab_init(void)
{
@ -86,10 +127,10 @@ void slab_init(void)
clist_init(&pools[i]);
}
void *slab_alloc(usize size, enum mm_flags flags)
static inline void *slab_alloc(usize size, enum mflags flags)
{
size = round_size(size);
if (size == 0)
size = align_ceil(size, SLAB_STEP);
if (size == 0 || size > SLAB_MAX_ALLOC)
return nil;
struct clist *pool = &pools[size / SLAB_STEP - 1];
@ -119,7 +160,7 @@ void *slab_alloc(usize size, enum mm_flags flags)
return (void *)ret;
}
void slab_free(void *ptr)
static inline void slab_free(void *ptr)
{
# if CFG_DEBUG_SLAB_ALLOCS
if (ptr < kheap_start || ptr >= kheap_end) {
@ -138,7 +179,7 @@ void slab_free(void *ptr)
memset(ptr, 'A', slab->entry_size);
# endif
if (slab->free_entries * slab->entry_size + slab->entry_size > SLAB_EFFECTIVE_SIZE) {
if (slab->free_entries * slab->entry_size + slab->entry_size > SLAB_MAX_ALLOC) {
/* none of the entries are in use, free the slab */
slab_debug_noisy("Destroying empty cache of size %zu\n", slab->entry_size);
free_pages(slab);
@ -147,7 +188,7 @@ void slab_free(void *ptr)
}
}
static struct slab *slab_create(unsigned int entry_size, enum mm_flags flags)
static struct slab *slab_create(unsigned int entry_size, enum mflags flags)
{
slab_debug_noisy("Creating new cache for size %zu\n", entry_size);
struct slab *slab = get_pages(SLAB_SIZE / PAGE_SIZE, flags);
@ -168,19 +209,6 @@ static struct slab *slab_create(unsigned int entry_size, enum mm_flags flags)
return slab;
}
static inline usize round_size(usize size)
{
if (size > SLAB_MAX_ALLOC)
return 0;
/* SLAB_STEP is a power of 2, so clang will (hopefully)
* replace these with fancy bit banging tricks */
if (size % SLAB_STEP)
size = (size / SLAB_STEP) * SLAB_STEP + SLAB_STEP;
return size;
}
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.