You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

465 lines
13 KiB
C

/* See the end of this file for copyright and license terms. */
#include <arch/page.h>
#include <gay/arith.h>
#include <gay/cdefs.h>
#include <gay/clist.h>
#include <gay/config.h>
#include <gay/errno.h>
#include <gay/kprintf.h>
#include <gay/mm.h>
#include <gay/types.h>
#include <string.h>
/*
* This allocator is based on the popular design by Doug Lea:
* <http://gee.cs.oswego.edu/dl/html/malloc.html>
* For a more in-depth description of how the individual parts work together,
* see also my implementation for Ardix which is very similar except that it
* doesn't have paging:
* <https://git.bsd.gay/fef/ardix/src/commit/c767d551d3301fc30f9fce30eda8f04e2f9a42ab/kernel/mm.c>
* As a matter of fact, this allocator is merely an extension of the one from
* Ardix with the only difference being that the heap can be extended upwards.
*/
/**
* Memory block header.
* This sits at the beginning of every memory block (duh).
*/
struct memblk {
/**
* @brief The usable low_size, i.e. the total block low_size minus `MEMBLK_OVERHEAD`.
*
* This low_size will also be written to the very end of the block, just after
* the last usable address. Additionally, since blocks are always aligned
* to at least 4 bytes anyways, we can use the LSB of this low_size as a flag
* for whether the block is currently allocated (1) or not (0). This is
* going to make it much easier to detect two free neighboring blocks when
* `kfree()`ing one.
*/
usize low_size[1];
union {
/** @brief If the block is allocated, this will be overwritten */
struct clist clink;
/** @brief Used as the return value for `kmalloc()` */
u8 data[0];
/**
* @brief Used to get the copy of the size at the end of
* the block, right after the last byte of `data`
*/
usize high_size[0];
};
};
/* overhead per memory block in bytes (the two block sizes at the beginning and end) */
#define OVERHEAD (2 * sizeof(usize))
/* every allocation is padded to a multiple of this */
#define MIN_SIZE (sizeof(struct clist))
/* memory blocks, sorted by increasing size */
static CLIST(blocks);
/*
* We play it *really* simple: Start at an arbitrary (page aligned, preferably
* even page table aligned) address in virtual memory and extend the area as
* needed as the heap grows. Efficiency doesn't matter for now; we always make
* the heap a contiguous area without holes. There isn't even a mechanism for
* releasing physical pages yet, i really just want to get to anything that is
* at all usable so i can finally work on the core system architecture.
*/
static void *heap_start = (void *)0xd0000000;
/*
* Points to the first address that is not part of the heap anymore, such that
* sizeof(heap) == heap_end - heap_start
* Thus, the heap initially has a size of zero.
*/
static void *heap_end = (void *)0xd0000000;
/**
* @brief Increase `heap_end` by up to `num_pages * PAGE_SIZE`.
*
* @param num_pages Number of pages to increase the heap by
* @returns The actual number of pages the heap was increased by; this may be
* less than `num_pages` if there were not enough free pages left
*/
static usize grow_heap(usize num_pages);
/**
* @brief Add a new block at the end of the heap by downloading more RAM (`grow_heap()`, actually). */
static struct memblk *blk_create(usize num_pages);
/** @brief Get the usable block size in bytes, without flags or overhead. */
static usize blk_get_size(struct memblk *blk);
/** @brief Set the usable block size without overhead and without affecting flags. */
static void blk_set_size(struct memblk *blk, usize size);
/** @brief Flag a block as allocated. */
static void blk_set_alloc(struct memblk *blk);
/** @brief Remove the allocated flag from a block. */
static void blk_clear_alloc(struct memblk *blk);
/** @brief Return nonzero if the block is allocated. */
static bool blk_is_alloc(struct memblk *blk);
/** @brief Set the border flag at the start of a block. */
static void blk_set_border_start(struct memblk *blk);
/** @brief Remove the border flag from the start of a block. */
static void blk_clear_border_start(struct memblk *blk);
/** @brief Return nonzero if a block has the border flag set at the start. */
static bool blk_is_border_start(struct memblk *blk);
/** @brief Set the border flag at the end of a block. */
static void blk_set_border_end(struct memblk *blk);
/** @brief Remove the border flag from the end of a block. */
static void blk_clear_border_end(struct memblk *blk);
/** @brief Return nonzero if a block has the border flag set at the end. */
static bool blk_is_border_end(struct memblk *blk);
/** @brief Get a block's immediate lower neighbor, or NULL if it doesn't have one. */
static struct memblk *blk_prev(struct memblk *blk);
/** @brief Get a block's immediate higher neighbor, or NULL if it doesn't have one. */
static struct memblk *blk_next(struct memblk *blk);
/** @brief Merge two contiguous free blocks into one, resort the list, and return the block. */
static struct memblk *blk_merge(struct memblk *bottom, struct memblk *top);
/** @brief Attempt to merge both the lower and higher neighbors of a free block. */
static struct memblk *blk_try_merge(struct memblk *blk);
/** @brief Cut a slice from a free block and return the slice. */
static struct memblk *blk_slice(struct memblk *blk, usize slice_size);
int kmalloc_init(uintptr_t phys_start, uintptr_t phys_end)
{
int err = mem_init(phys_start, phys_end);
if (err)
return err;
if (grow_heap(1) != 1)
return -ENOMEM;
struct memblk *blk = heap_start;
blk_set_size(blk, PAGE_SIZE - OVERHEAD);
blk_clear_alloc(blk);
blk_set_border_start(blk);
blk_set_border_end(blk);
clist_add(&blocks, &blk->clink);
return 0;
}
void *kmalloc(usize size, enum mm_flags flags)
{
if (flags != MM_KERN) {
kprintf("Invalid flags passed to kmalloc()\n");
return NULL;
}
if (size == 0)
return NULL;
if (size % MIN_SIZE != 0)
size = (size / MIN_SIZE) * MIN_SIZE + MIN_SIZE;
struct memblk *cursor;
struct memblk *blk = NULL;
clist_foreach_entry(&blocks, cursor, clink) {
if (blk_get_size(cursor) >= size) {
blk = cursor;
break;
}
}
if (blk == NULL) {
usize required_pages = ((size + OVERHEAD) / PAGE_SIZE) + 1;
blk = blk_create(required_pages);
if (blk == NULL) {
kprintf("Kernel OOM qwq\n");
return NULL;
}
clist_add(&blocks, &blk->clink);
}
blk = blk_slice(blk, size);
blk_set_alloc(blk);
# if CFG_POISON_HEAP
memset(blk->data, 'a', blk_get_size(blk));
# endif
return blk->data;
}
void kfree(void *ptr)
{
# ifdef DEBUG
if (ptr < heap_start || ptr > heap_end) {
kprintf("Tried to free %p which is outside the heap!\n", ptr);
return;
}
# endif
struct memblk *blk = ptr - sizeof(blk->low_size);
# ifdef DEBUG
if (!blk_is_alloc(blk)) {
kprintf("Double free of %p!\n", ptr);
return;
}
# endif
# if CFG_POISON_HEAP
memset(blk->data, 'A', blk_get_size(blk));
# endif
blk_clear_alloc(blk);
blk_try_merge(blk);
}
/*
* These wrappers are used for linking libc against the kernel itself.
* This is a "temporary" hack because i haven't figured out the whole C flags
* thingy for properly producing two versions of libc (one static one for the
* kernel and a shared one for user space).
*/
__weak void *malloc(usize size)
{
return kmalloc(size, MM_KERN);
}
__weak void free(void *ptr)
{
kfree(ptr);
}
static inline struct memblk *blk_create(usize num_pages)
{
usize blksize;
if (mul_overflow(&blksize, num_pages, PAGE_SIZE))
return NULL;
/*
* heap_end points to the first address that is not part of the heap
* anymore, so that's where the new block starts when we add pages
*/
struct memblk *blk = heap_end;
if (grow_heap(num_pages) != num_pages)
return NULL; /* OOM :( */
blk_set_size(blk, blksize - OVERHEAD);
blk_clear_alloc(blk);
blk_set_border_end(blk);
struct memblk *old_high = blk_prev(blk);
blk_clear_border_end(old_high);
if (!blk_is_alloc(old_high)) {
clist_del(&old_high->clink);
blk = blk_merge(old_high, blk);
}
return blk;
}
static inline usize grow_heap(usize num_pages)
{
usize i;
for (i = 0; i < num_pages; i++) {
uintptr_t page_phys = get_page();
if (!page_phys)
break;
if (map_page(page_phys, heap_end, MM_PAGE_RW) != 0) {
put_page(page_phys);
break;
}
heap_end += PAGE_SIZE;
}
vm_flush();
return i;
}
#define ALLOC_FLAG ((usize)1 << 0)
#define BORDER_FLAG ((usize)1 << 1)
#define SIZE_MASK ( ~(ALLOC_FLAG | BORDER_FLAG) )
static struct memblk *blk_try_merge(struct memblk *blk)
{
struct memblk *neighbor = blk_prev(blk);
if (neighbor != NULL && !blk_is_alloc(neighbor)) {
clist_del(&neighbor->clink);
blk = blk_merge(neighbor, blk);
}
neighbor = blk_next(blk);
if (neighbor != NULL && !blk_is_alloc(neighbor)) {
clist_del(&neighbor->clink);
blk = blk_merge(blk, neighbor);
}
struct memblk *cursor;
clist_foreach_entry(&blocks, cursor, clink) {
if (blk_get_size(cursor) >= blk_get_size(blk))
break;
}
clist_add(&cursor->clink, &blk->clink);
return blk;
}
static struct memblk *blk_merge(struct memblk *bottom, struct memblk *top)
{
usize bottom_size = blk_get_size(bottom);
usize top_size = blk_get_size(top);
usize total_size = bottom_size + top_size + OVERHEAD;
blk_set_size(bottom, total_size);
return bottom;
}
static struct memblk *blk_slice(struct memblk *blk, usize slice_size)
{
struct memblk *cursor = clist_prev_entry(blk, clink);
clist_del(&blk->clink);
/*
* If the remaining size is less than the minimum allocation unit, we
* hand out the entire block. Additionally, we must add an underflow
* check which happens if the slice size is less than OVERHEAD smaller
* than the full block size.
*/
usize rest_size = blk_get_size(blk) - slice_size - OVERHEAD;
bool carry = sub_underflow(&rest_size, blk_get_size(blk), slice_size + OVERHEAD);
if (rest_size < MIN_SIZE || carry) {
blk_set_alloc(blk);
return blk;
}
usize slice_words = slice_size / sizeof(blk->low_size);
struct memblk *rest = (void *)&blk->high_size[slice_words + 1];
blk_set_size(rest, rest_size);
blk_clear_alloc(rest);
blk_clear_border_start(rest);
blk_set_size(blk, slice_size);
blk_set_alloc(blk);
blk_clear_border_end(blk);
clist_foreach_entry_rev_continue(&blocks, cursor, clink) {
if (blk_get_size(cursor) <= rest_size)
break;
}
clist_add_first(&cursor->clink, &rest->clink);
return blk;
}
static inline struct memblk *blk_prev(struct memblk *blk)
{
if (blk_is_border_start(blk))
return NULL;
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Warray-bounds" /* trust me bro, this is fine */
return (void *)blk - (blk->low_size[-1] & SIZE_MASK) - OVERHEAD;
#pragma clang diagnostic pop
}
static inline struct memblk *blk_next(struct memblk *blk)
{
if (blk_is_border_end(blk))
return NULL;
usize index = blk->low_size[0] / sizeof(blk->low_size[0]);
return (void *)&blk->high_size[index + 1];
}
static inline usize blk_get_size(struct memblk *blk)
{
# ifdef DEBUG
usize index = blk->low_size[0] / sizeof(blk->low_size[0]);
if ((blk->low_size[0] & SIZE_MASK) != (blk->high_size[index] & SIZE_MASK))
kprintf("Memory corruption in block %p detected!\n", blk);
# endif
return blk->low_size[0] & SIZE_MASK;
}
static void blk_set_size(struct memblk *blk, usize size)
{
/* don't affect flags */
blk->low_size[0] &= ~SIZE_MASK;
# ifdef DEBUG
if (size & ~SIZE_MASK)
kprintf("Unaligned size in blk_set_size()\n");
# endif
blk->low_size[0] |= size & SIZE_MASK;
usize index = size / sizeof(blk->low_size[0]);
blk->high_size[index] &= ~SIZE_MASK;
blk->high_size[index] |= size & SIZE_MASK;
}
static inline void blk_set_alloc(struct memblk *blk)
{
usize index = blk->low_size[0] / sizeof(blk->low_size[0]);
blk->low_size[0] |= ALLOC_FLAG;
blk->high_size[index] |= ALLOC_FLAG;
}
static inline void blk_clear_alloc(struct memblk *blk)
{
usize index = blk->low_size[0] / sizeof(blk->low_size[0]);
blk->low_size[0] &= ~ALLOC_FLAG;
blk->high_size[index] &= ~ALLOC_FLAG;
}
static inline bool blk_is_alloc(struct memblk *blk)
{
return (blk->low_size[0] & ALLOC_FLAG) != 0;
}
static inline void blk_set_border_start(struct memblk *blk)
{
blk->low_size[0] |= BORDER_FLAG;
}
static inline void blk_clear_border_start(struct memblk *blk)
{
blk->low_size[0] &= ~BORDER_FLAG;
}
static inline bool blk_is_border_start(struct memblk *blk)
{
return (blk->low_size[0] & BORDER_FLAG) != 0;
}
static inline void blk_set_border_end(struct memblk *blk)
{
usize index = blk->low_size[0] / sizeof(blk->low_size[0]);
blk->high_size[index] |= BORDER_FLAG;
}
static inline void blk_clear_border_end(struct memblk *blk)
{
usize index = blk->low_size[0] / sizeof(blk->low_size[0]);
blk->high_size[index] &= ~BORDER_FLAG;
}
static inline bool blk_is_border_end(struct memblk *blk)
{
usize index = blk->low_size[0] / sizeof(blk->low_size[0]);
return (blk->high_size[index] & BORDER_FLAG) != 0;
}
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/