You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

420 lines
12 KiB
C

/* See the end of this file for copyright and license terms. */
#include <arch/page.h>
#include <gay/bits.h>
#include <gay/clist.h>
#include <gay/config.h>
#include <gay/kprintf.h>
#include <gay/mm.h>
#include <gay/mutex.h>
#include <gay/systm.h>
#include <gay/types.h>
#include <gay/util.h>
#include <limits.h>
#include <string.h>
#ifndef __HAVE_HUGEPAGES
#error "Systems without huge pages are currently unsupported because i'm a dumb bitch"
#endif
#if DMAP_OFFSET % HUGEPAGE_SIZE != 0
#error "DMAP_OFFSET must be an integral multiple of HUGEPAGE_SIZE"
#endif
/* this should be impossible because arch/page.h must also define PAGE_SHIFT
* and HUGEPAGE_SHIFT, meaning the two are definitively powers of 2 */
#if HUGEPAGE_SIZE % PAGE_SIZE != 0
#error "HUGEPAGE_SIZE must be an integral multiple of PAGE_SIZE"
#endif
#if PAGE_SIZE % LONG_BIT != 0
#error "PAGE_SIZE must be an integral multiple of LONG_BIT"
#endif
#if __SIZEOF_POINTER__ != __SIZEOF_LONG__
#error "long must be as wide as a pointer"
#endif
#if CFG_DEBUG_PAGE_ALLOCS
# define PAGE_ASSERT(x) KASSERT(x)
# define page_debug(msg, ...) kprintf("[page] " msg, ##__VA_ARGS__)
# if CFG_DEBUG_PAGE_ALLOCS_NOISY
# define page_debug_noisy(msg, ...) kprintf("[page] " msg, ##__VA_ARGS__)
# else
# define page_debug_noisy(msg, ...) ({})
# endif
#else
# define PAGE_ASSERT(x) ({})
# define page_debug(msg, ...) ({})
# define page_debug_noisy(msg, ...) ({})
#endif
/**
* We have cache levels for areas ranging from a single page up to a huge page
* on a logarithmic scale. Every level covers double the pages per entry than
* the one below it, starting at one page per entry. The effective result is
* that a single entry in the cache on level L covers `(1 << L)` pages.
*/
#define CACHE_ORDERS GET_PAGE_ORDERS
#define ORDER_SHIFT(order) (PAGE_SHIFT + (order))
/** @brief There is one of this for every cache order. */
struct cache_pool {
/**
* @brief List of free blocks on this order of granularity.
* The individual entries sit right at the beginning of each free block,
* and are always aligned to `entry_size` bytes.
*/
struct clist freelist;
/**
* @brief Bitmap that stores the allocated status of each entry.
* 1 means allocated, 0 means not.
*/
unsigned long *bitmap;
/** @brief Number of items in `freelist`. */
usize free_entries;
};
static struct cache_pool caches[CACHE_ORDERS];
static MTX(caches_lock);
/* these get set in kmalloc_init() */
uintptr_t phys_start;
uintptr_t phys_end;
uintptr_t __early_get_page(void)
{
phys_end -= PAGE_SIZE;
return phys_end;
}
static int sanity_check(void)
{
KASSERT(phys_start < phys_end);
KASSERT(phys_start == HUGEPAGE_ALIGN(phys_start));
/* phys_end is only page aligned, see kmalloc_init() */
KASSERT(phys_end == PAGE_ALIGN(phys_end));
if ((phys_end - phys_start) < (32 * 1024 * 1024)) {
kprintf("Less than 32 MB of usable RAM, this wouldn't go well\n");
return 1;
}
return 0;
}
/*
* Map the entire physical memory into the direct contiguous area.
* __early_map_page() might call __early_get_page() in order to allocate
* new page table structures, which in turn shrinks the physical memory
* size (see above).
*/
static inline void map_direct_area(void)
{
#ifdef __HAVE_HUGEPAGES
const usize step = HUGEPAGE_SIZE;
const enum pflags flags = P_PRESENT | P_RW | P_HUGE;
#else
const usize step = PAGE_SIZE;
const enum pflags flags = P_PRESENT | P_RW;
#endif
/*
* It might be necessary to use a volatile pointer to phys_end for this
* loop in case clang does The Optimization and caches its value for
* whatever reason, even though at least for x86 this is not the case
* (and i don't even thing the C standard allows it when calling
* external functions in between, but still, Never Trust The Compiler).
*/
for (uintptr_t pos = phys_start; pos <= phys_end - step; pos += step)
__early_map_page(pos, __v(pos), flags);
vm_flush();
}
/*
* This function maps the entire physical memory into the direct region
* (DMAP_START - DMAP_END) and sets up the caches.
* The bitmaps are stored one after another at the end of physical memory, and
*
*/
int pages_init(void)
{
if (sanity_check() != 0)
return 1;
map_direct_area();
/* phys_end gets aligned, as promised by the comment in kmalloc_init() */
phys_end = align_floor(phys_end, HUGEPAGE_SIZE);
usize phys_size = phys_end - phys_start;
/*
* calculate the size of each bitmap, as well as their combined size
*/
usize bitmap_bytes = 0;
for (int i = 0; i < CACHE_ORDERS; i++) {
usize bits = phys_size >> ORDER_SHIFT(i);
bits = align_ceil(bits, LONG_BIT);
bitmap_bytes += bits / 8;
}
page_debug("Page frame overhead = %zu bytes, %zu bytes total\n", bitmap_bytes, phys_size);
/*
* zero out all bitmaps
*/
uintptr_t bitmap_start_phys = phys_end - bitmap_bytes;
unsigned long *bitmap_start = __v(bitmap_start_phys);
memset(bitmap_start, 0, bitmap_bytes);
/*
* populate the remaining members of the cache_pool structures and
* preallocate entries that can't be handed out (i.e. the cache bitmaps)
*/
unsigned long *bitmap_pos = bitmap_start;
for (int i = 0; i < CACHE_ORDERS; i++) {
/* total amount of entries on this level */
usize total_bits = phys_size >> ORDER_SHIFT(i);
/* number of entries on this level that the bitmap itself takes up */
usize wasted_bits = bitmap_bytes >> ORDER_SHIFT(i);
if (wasted_bits == 0)
wasted_bits = 1;
bit_set_range(bitmap_pos, total_bits - wasted_bits, wasted_bits);
caches[i].bitmap = bitmap_pos;
bitmap_pos += total_bits / LONG_BIT;
clist_init(&caches[i].freelist);
caches[i].free_entries = 0;
}
/* kheap_start and kheap_end are globals */
kheap_start = __v(phys_start);
kheap_end = align_floor(bitmap_start, HUGEPAGE_SIZE);
/*
* populate the freelist on the highest order, all orders beneath it
* stay empty until one of the large blocks gets split up
*/
struct cache_pool *high_pool = &caches[CACHE_ORDERS - 1];
usize step = 1 << ORDER_SHIFT(CACHE_ORDERS - 1);
for (void *pos = kheap_start; pos < kheap_end; pos += step) {
struct clist *entry = pos;
clist_add(&high_pool->freelist, entry);
high_pool->free_entries++;
}
return 0;
}
/**
* @brief Split a block and return the lower half.
* The block is assumed to already have been removed from its freelist.
* The high half (i.e. the block that is *not* returned) is inserted into the
* freelist one level below `level`.
*
* @param ptr Pointer to the block
* @param level Current level of the block
* (`ptr` must be aligned to `1 << level` pages)
*/
static void *split_buddy(void *ptr, int level);
/**
* @brief Attempt to coalesce a block with its buddy.
* If coalition is possible, the buddy is removed from its freelist at `order`.
*
* @param ptr Pointer to the block
* @param order Cache order, must be less than `CACHE_ORDERS - 1` (because you
* can't join blocks at the highest cache order)
* @return The joined block, or `nil` if coalition was not possible
*/
static void *try_join_buddy(void *ptr, int order);
static inline usize get_bit_number(void *ptr, int order)
{
return ((uintptr_t)ptr - (uintptr_t)kheap_start) >> ORDER_SHIFT(order);
}
void *get_pages(int order, enum mflags flags)
{
PAGE_ASSERT(order >= 0);
if (order >= GET_PAGE_ORDERS) {
page_debug("get_pages(%d, %#08x): Order too high!\n", order, flags);
return nil;
}
if (flags & M_NOSLEEP) {
kprintf("get_pages(): M_NOSLEEP requested, this is not implemented yet :(\n");
return nil;
}
mtx_lock(&caches_lock);
struct clist *entry = nil;
int entry_order;
for (entry_order = order; entry_order < CACHE_ORDERS; entry_order++) {
if (caches[entry_order].free_entries > 0) {
entry = caches[entry_order].freelist.next;
break;
}
}
if (entry_order != CACHE_ORDERS) {
clist_del(entry);
caches[entry_order].free_entries--;
usize bit_number = get_bit_number(entry, entry_order);
while (entry_order > order) {
entry = split_buddy(entry, entry_order);
bit_set(caches[entry_order].bitmap, bit_number);
entry_order--;
bit_number <<= 1;
}
bit_set(caches[order].bitmap, bit_number);
# if CFG_POISON_PAGES
memset(entry, 'a', 1 << ORDER_SHIFT(order));
# endif
}
mtx_unlock(&caches_lock);
return (void *)entry;
}
void free_pages(void *ptr)
{
# if CFG_DEBUG_PAGE_ALLOCS
if ((uintptr_t)ptr % PAGE_SIZE) {
kprintf("free_pages(%p): unaligned ptr!\n", ptr);
return;
}
# endif
if (sus_nil(ptr)) {
page_debug("free_pages(%p): tried to free NULL!\n", ptr);
return;
}
int order = 0;
usize bit_number = get_bit_number(ptr, order);
for (; order < CACHE_ORDERS; order++) {
if (bit_tst(caches[order].bitmap, bit_number))
break;
bit_number >>= 1;
}
if (order == CACHE_ORDERS) {
page_debug("free_pages(%p): double free!\n", ptr);
return;
}
int original_order = order;
mtx_lock(&caches_lock);
while (order < CACHE_ORDERS - 1) {
bit_clr(caches[order].bitmap, bit_number);
void *tmp = try_join_buddy(ptr, order);
if (tmp == nil)
break;
ptr = tmp;
order++;
bit_number >>= 1;
}
if (order == CACHE_ORDERS - 1 && original_order != CACHE_ORDERS - 1)
set_pflags(HUGEPAGE_ALIGN(ptr), P_HUGE | P_RW);
#if CFG_POISON_PAGES
memset(ptr, 'A', 1 << ORDER_SHIFT(order));
#endif
clist_add(&caches[order].freelist, (struct clist *)ptr);
caches[order].free_entries++;
mtx_unlock(&caches_lock);
}
static inline void *split_buddy(void *ptr, int level)
{
# if CFG_DEBUG_PAGE_ALLOCS
if ((uintptr_t)ptr % (1 << ORDER_SHIFT(level))) {
kprintf("split_buddy(ptr = %p, level = %d): unaligned ptr!\n", ptr, level);
return nil;
}
if (level < 1 || level >= CACHE_ORDERS) {
kprintf("split_buddy(ptr = %p, level = %d): invalid level!\n", ptr, level);
return nil;
}
# endif
struct clist *high_buddy = ptr + (1 << ORDER_SHIFT(level - 1));
clist_add(&caches[level - 1].freelist, high_buddy);
caches[level - 1].free_entries++;
page_debug_noisy("split (%p:%p), lvl=%d\n", ptr, (void *)high_buddy, level);
return ptr;
}
static void *try_join_buddy(void *ptr, int order)
{
const usize entry_size = 1 << ORDER_SHIFT(order);
# if CFG_DEBUG_PAGE_ALLOCS
if ((uintptr_t)ptr % entry_size) {
kprintf("try_join_buddy(%p, %d): unaligned ptr!\n", ptr, order);
return nil;
}
/* order must be < CACHE_ORDERS - 1 because you
* can't join blocks on the topmost order */
if (order >= CACHE_ORDERS - 1) {
kprintf("try_join_buddy(%p, %d): order >= CACHE_ORDERS - 1!\n", ptr, order);
return nil;
}
# endif
/*
* Test whether the buddy block is allocated and return nil if it is.
* entry_size is a power of 2, so we can quickly get to the buddy block
* with a cheap XOR of the address and the entry size without the need
* for any if branches.
*/
uintptr_t buddy = (uintptr_t)ptr ^ entry_size;
usize buddy_bitnum = get_bit_number((void *)buddy, order);
if (bit_tst(caches[order].bitmap, buddy_bitnum))
return nil;
page_debug_noisy("join (%p:%p), order=%d\n", ptr, (void *)buddy, order);
/* If the buddy is free, we remove it from the freelist ... */
clist_del((struct clist *)buddy);
caches[order].free_entries--;
/*
* ... and return a pointer to the coalesced block.
* We use the same trick as above to get to the even (lower) block, just
* that this time we're zeroing the bit out rather than flipping it.
*/
uintptr_t even = (uintptr_t)ptr & ~entry_size;
return (void *)even;
}
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/