You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

404 lines
11 KiB
C

/* See the end of this file for copyright and license terms. */
#include <arch/page.h>
#include <gay/clist.h>
#include <gay/config.h>
#include <gay/kprintf.h>
#include <gay/mm.h>
#include <gay/types.h>
#include <gay/util.h>
#include <limits.h>
#include <string.h>
#ifndef __HAVE_HUGEPAGES
#error "Systems without huge pages are currently unsupported because i'm a dumb bitch"
#endif
#if DMAP_OFFSET % HUGEPAGE_SIZE != 0
#error "DMAP_OFFSET must be an integral multiple of HUGEPAGE_SIZE"
#endif
/* this should be impossible because arch/page.h must also define PAGE_SHIFT
* and HUGEPAGE_SHIFT, meaning the two are definitively powers of 2 */
#if HUGEPAGE_SIZE % PAGE_SIZE != 0
#error "HUGEPAGE_SIZE must be an integral multiple of PAGE_SIZE"
#endif
#if PAGE_SIZE % LONG_BIT != 0
#error "PAGE_SIZE must be an integral multiple of LONG_BIT"
#endif
#if CFG_DEBUG_PAGE_ALLOCS
#define page_debug(msg, ...) kprintf("[page] " msg, ##__VA_ARGS__)
#else
#define page_debug(msg, ...)
#endif
/**
* We have cache levels for areas ranging from a single page up to a huge page
* on a logarithmic scale. Every level covers double the pages per entry than
* the one below it, starting at one page per entry. The effective result is
* that a single entry in the cache on level L covers `(1 << L)` pages.
*/
#define CACHE_LEVELS (HUGEPAGE_SHIFT - PAGE_SHIFT + 1)
struct cache_pool {
struct clist freelist;
unsigned long *bitmap;
usize free_entries;
usize bitmap_len;
};
static struct cache_pool caches[CACHE_LEVELS];
#define LONG_MASK ( ~(usize)(sizeof(long) - 1) )
uintptr_t phys_start;
uintptr_t phys_end;
/**
* @brief Split a block and return the lower half.
* The block is assumed to already have been removed from its freelist.
* The high half (i.e. the block that is *not* returned) is inserted into the
* freelist one level below `level`.
*/
static void *split_buddy(void *ptr, int level);
/**
* @brief Attempt to coalesce a block with its buddy.
* If coalition is possible, the buddy is removed from its freelist at
* `level` and the union block is inserted at `level + 1`.
*
* @param ptr Pointer to the block
* @param level Cache level, must be less than `CACHE_LEVELS - 1` (because you
* can't join blocks at the highest cache level)
* @return The joined block, or `nil` if coalition was not possible
*/
static void *try_join_buddy(void *ptr, int level);
static usize get_bit_number(void *ptr, int level);
static void set_bits(unsigned long *bitfield, usize first, usize count);
static void clr_bits(unsigned long *bitfield, usize first, usize count);
static bool get_bit(const unsigned long *bitfield, usize bit_number);
static void set_bit(unsigned long *bitfield, usize bit_number);
static int sanity_check(void)
{
if (phys_end != HUGEPAGE_ALIGN(phys_end) || phys_start != HUGEPAGE_ALIGN(phys_start)) {
kprintf("Unaligned memory, this should never be possible\n");
return 1;
}
if ((phys_end - phys_start) < (32 * 1024 * 1024)) {
kprintf("Less than 32 MB of usable RAM, this wouldn't go well\n");
return 1;
}
if (phys_start > phys_end) {
kprintf("Hey, this is funny. pages_init() was called with parameters "
"such that phys_start > phys_end (%p > %p), which "
"should absolutely never be possible. I can't really continue "
"like this, so have a nice day.\n", (void *)phys_start, (void *)phys_end);
return 1;
}
return 0;
}
static void init_freelist(void)
{
for (int i = 0; i < CACHE_LEVELS; i++) {
clist_init(&caches[i].freelist);
caches[i].free_entries = 0;
}
struct cache_pool *pool = &caches[CACHE_LEVELS - 1];
const usize step = 1 << (CACHE_LEVELS + PAGE_SHIFT);
for (void *pos = kheap_start; pos < kheap_end; pos += step) {
struct clist *entry = pos;
clist_add(&pool->freelist, entry);
pool->free_entries += 1;
}
}
int pages_init(void)
{
usize phys_size = phys_end - phys_start;
if (sanity_check() != 0)
return 1;
/*
* map entire physical memory into the direct contiguous area
*/
for (uintptr_t physptr = phys_start; physptr < phys_end; physptr += HUGEPAGE_SIZE) {
const enum mm_page_flags pflags = MM_PAGE_HUGE | MM_PAGE_RW | MM_PAGE_GLOBAL;
map_page(physptr, (void *)(physptr + DMAP_OFFSET), pflags);
}
vm_flush();
/*
* calculate the size of each bitmap, as well as their combined size
*/
usize cache_bytes = 0;
for (int i = 0; i < CACHE_LEVELS; i++) {
usize bits = phys_size >> (PAGE_SHIFT + i);
/* round up to the next full long */
if (bits % LONG_BIT) {
bits &= LONG_MASK;
bits += LONG_BIT;
}
cache_bytes += bits / 8;
caches[i].bitmap_len = bits / LONG_BIT;
}
/* smol buffer in case we overshoot for whatever reason */
cache_bytes += sizeof(long);
page_debug("Page frame overhead = %zu bytes\n", cache_bytes);
/*
* zero out all bitmaps
*/
uintptr_t cache_start_phys = phys_end - cache_bytes;
unsigned long *cache_start = __v(cache_start_phys);
memset(cache_start, 0, cache_bytes);
/*
* populate the caches array and preallocate pages that can't be handed
* out (i.e. the cache bitmaps)
*/
unsigned long *cache_pos = cache_start;
for (int i = 0; i < CACHE_LEVELS; i++) {
usize total_bits = caches[i].bitmap_len * LONG_BIT;
usize wasted_bits = total_bits - (cache_bytes >> (PAGE_SHIFT + i));
if (wasted_bits == 0)
wasted_bits = 1;
set_bits(cache_pos, total_bits - wasted_bits, wasted_bits);
caches[i].bitmap = cache_pos;
cache_pos += caches[i].bitmap_len;
}
/* kheap_start and kheap_end are globals */
kheap_start = __v(phys_start);
kheap_end = cache_start;
init_freelist();
return 0;
}
static int get_level(usize count)
{
int level;
for (level = 0; level < CACHE_LEVELS; level++) {
if ((1 << level) >= count)
break;
}
return level;
}
void *get_pages(usize count, enum mm_flags flags)
{
int level = get_level(count);
if (level == CACHE_LEVELS)
return nil;
struct clist *entry;
int entry_level;
for (entry_level = level; entry_level < CACHE_LEVELS; entry_level++) {
if (caches[entry_level].free_entries > 0) {
entry = caches[entry_level].freelist.next;
break;
}
}
if (entry_level == CACHE_LEVELS)
return nil;
clist_del(entry);
caches[entry_level].free_entries--;
usize bit_number = get_bit_number(entry, level);
while (entry_level > level) {
entry = split_buddy(entry, entry_level);
set_bit(caches[entry_level].bitmap, bit_number);
entry_level--;
bit_number <<= 1;
}
do {
usize bit_count = 1 << (level - entry_level);
set_bits(caches[entry_level].bitmap, bit_number, bit_count);
} while (entry_level-- != 0);
return (void *)entry;
}
void free_pages(void *ptr, usize count)
{
int level = get_level(count);
if (level == CACHE_LEVELS)
return;
usize bit_number = get_bit_number(ptr, level);
usize bit_count = 1;
for (int i = level; i >= 0; i--) {
clr_bits(caches[i].bitmap, bit_number, bit_count);
bit_number <<= 1;
bit_count <<= 1;
}
while (ptr != nil && level < CACHE_LEVELS - 1) {
ptr = try_join_buddy(ptr, level);
level++;
}
}
static inline usize get_bit_number(void *ptr, int level)
{
return ((uintptr_t)ptr - (uintptr_t)kheap_start) >> (PAGE_SHIFT + level);
}
/**
* @brief Set a range of bits in a bitfield.
*
* @param bitfield Pointer to the beginning of the bitfield
* @param first Number of the first bit to set, counting from 0
* @param count Amount of bits to set
*/
static void set_bits(unsigned long *bitfield, usize first, usize count)
{
bitfield += first / LONG_BIT;
unsigned int bit = first % LONG_BIT;
if (bit != 0) {
unsigned long mask = (1lu << bit) - 1;
*bitfield++ |= ~mask;
count -= bit;
}
while (count >= LONG_BIT) {
*bitfield++ = ULONG_MAX;
count -= LONG_BIT;
}
if (count != 0) {
unsigned long mask = (1lu << count) - 1;
*bitfield |= mask;
}
}
/**
* @brief Clear a range of bits in a bitfield.
*
* The algorithm is similar to `set_bits()`, it just does the inverse.
*
* @param bitfield Pointer to the beginning of the bitfield
* @param first Number of the first bit to clear, counting from 0
* @param count Amount of bits to clear
*/
static void clr_bits(unsigned long *bitfield, usize first, usize count)
{
bitfield += first / LONG_BIT;
unsigned int bit = first % LONG_BIT;
if (bit != 0) {
unsigned long mask = (1lu << bit) - 1;
*bitfield++ &= mask;
count -= bit;
}
while (count >= LONG_BIT) {
*bitfield++ = 0;
count -= LONG_BIT;
}
if (count != 0) {
unsigned long mask = (1lu << bit) - 1;
*bitfield &= ~mask;
}
}
static inline bool get_bit(const unsigned long *bitfield, usize bit_number)
{
unsigned long longword = bitfield[bit_number / LONG_BIT];
unsigned long mask = 1lu << (bit_number % LONG_BIT);
return (longword & mask) != 0;
}
static inline void set_bit(unsigned long *bitfield, usize bit_number)
{
unsigned long mask = 1lu << (bit_number % LONG_BIT);
bitfield[bit_number / LONG_BIT] |= mask;
}
static inline void *split_buddy(void *ptr, int level)
{
# if CFG_DEBUG_PAGE_ALLOCS
if ((uintptr_t)ptr % (1 << (PAGE_SHIFT + level))) {
kprintf("split_buddy(%p, %d): unaligned ptr!\n", ptr, level);
return nil;
}
if (level < 1 || level >= CACHE_LEVELS) {
kprintf("split_buddy(%p, %d): invalid level!\n", ptr, level);
return nil;
}
# endif
struct clist *high_buddy = ptr + (1 << (PAGE_SHIFT + level - 1));
clist_add(&caches[level - 1].freelist, high_buddy);
caches[level - 1].free_entries++;
return ptr;
}
static void *try_join_buddy(void *ptr, int level)
{
const usize entry_size = 1 << (PAGE_SHIFT + level);
# if CFG_DEBUG_PAGE_ALLOCS
if ((uintptr_t)ptr % entry_size) {
kprintf("try_join_buddy(%p, %d): unaligned ptr!\n", ptr, level);
return nil;
}
/* level must be < CACHE_LEVELS - 1 because you
* can't join blocks on the topmost level */
if (level >= CACHE_LEVELS - 1) {
kprintf("try_join_buddy(%p, %d): level >= CACHE_LEVELS - 1!\n", ptr, level);
return nil;
}
# endif
/* test if the buddy block is allocated and return nil if it is */
uintptr_t buddy = (uintptr_t)ptr ^ entry_size;
usize buddy_bitnum = get_bit_number((void *)buddy, level);
if (get_bit(caches[level].bitmap, buddy_bitnum))
return nil;
/* if it is not, remove it from the freelist */
clist_del((struct clist *)buddy);
caches[level].free_entries--;
/* add the coalesced block to the freelist one level above */
struct clist *even = (struct clist *)((uintptr_t)ptr & ~entry_size);
clist_add(&caches[level + 1].freelist, even);
caches[level + 1].free_entries++;
return even;
}
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/