kmalloc: add actual memory allocator

Now that memory allocation finally kind of works,
we can finally start focusing on the core system
architecture.  This commit also fixes some bugs in
get_page() and friends, as well as performance
improvements because the page map is addressed as
unsigned longs rather than individual bytes.
main
anna 3 years ago
parent f1922723f0
commit 5c0fa715a4
Signed by: fef
GPG Key ID: EC22E476DC2D3D84

@ -11,9 +11,6 @@
.section .multiboot.data, "aw"
.global mb2_load_start
mb2_load_start:
.align MB2_HEADER_ALIGN
header_start: /* struct mb2_header */
/* magic */
@ -31,7 +28,7 @@ address_tag_start: /* struct mb2_header_tag_address */
.short MB2_HEADER_TAG_ADDRESS
/* flags */
.short MB2_HEADER_TAG_OPTIONAL
/* size */
/* low_size */
.long address_tag_end - address_tag_start
/* header_addr */
.long header_start
@ -132,10 +129,18 @@ ASM_ENTRY(_start)
1: cmp $_image_start_phys, %esi
jl 2f /* skip the pages that are below the kernel image */
/* TODO: grub stores the multiboot tags right after the kernel image,
so we might need to map more than just what we do here */
cmp $_image_end_phys, %esi
jge 3f /* exit the loop when we have mapped the entire kernel image */
/*
* GRUB stores the multiboot tags right after the kernel image (afaik).
* The previous streategy was to stop the loop after having reached the
* end of the kernel image (including bss), and keep our fingers crossed
* that the multiboot tags all fit into the space between the end of the
* kernel image and the end of that last page so it's still mapped.
* Now, we just continue mapping until we have reached the last slot in
* the page table and exit the loop only then (the last slot is for the
* BIOS character framebuffer, see below).
*/
cmp $(PAGE_SIZE * 1023), %esi
je 3f /* exit the loop when we have mapped every PT entry but the last one */
mov %esi, %edx
or $0x003, %edx /* set present and rw flags, see below */
@ -175,7 +180,7 @@ ASM_ENTRY(_start)
* because they are page aligned so they are used as flags for the MMU,
* see ../include/arch/page.h).
*
* The offset added to pd0 is the page number multiplied with the size
* The offset added to pd0 is the page number multiplied with the low_size
* of a single entry (a pointer size):
* (0x00000000 / PAGE_SIZE) / 1024 entries in a page table = 0
* (0xc0000000 / PAGE_SIZE) / 1024 entries in a page table = 786

@ -74,9 +74,9 @@ typedef struct x86_page_directory vm_info_t;
* @brief Get the physical address a virtual one is currently mapped to.
*
* @param virt virtual address
* @returns The physical address, or `NULL` if there is no mapping
* @returns The physical address, or `0` if there is no mapping
*/
void *virt_to_phys(void *virt);
uintptr_t virt_to_phys(void *virt);
/*
* This file is part of GayBSD.

@ -35,12 +35,13 @@ extern void _image_end_phys;
* @brief Page allocation bitmap.
* 0 = free, 1 = allocated.
*/
static u8 *pagemap;
static unsigned long *pagemap;
/** @brief Pagemap length as in number of `unsigned long`s, *not* bytes! */
static size_t pagemap_len;
/* first and last dynamic page address (watch out, these are physical) */
static void *dynpage_start;
static void *dynpage_end;
static uintptr_t dynpage_start;
static uintptr_t dynpage_end;
/**
* @brief First page table for low memory (0 - 4 M).
@ -53,7 +54,7 @@ struct x86_page_directory pd0;
static void setup_pagemap(void);
int mem_init(void *start_phys, void *end_phys)
int mem_init(uintptr_t start_phys, uintptr_t end_phys)
{
/*
* if the kernel image is loaded within the paging region (which is
@ -61,11 +62,11 @@ int mem_init(void *start_phys, void *end_phys)
* to the end of the kernel image so we won't hand out pages that
* actually store kernel data
*/
if (&_image_start_phys >= start_phys && &_image_start_phys <= end_phys)
start_phys = &_image_end_phys;
if ((uintptr_t)&_image_start_phys >= start_phys && (uintptr_t)&_image_start_phys <= end_phys)
start_phys = (uintptr_t)&_image_end_phys;
dynpage_start = ptr_align(start_phys, PAGE_SIZE_LOG2);
dynpage_end = ptr_align(end_phys, -PAGE_SIZE_LOG2);
dynpage_start = (uintptr_t)ptr_align((void *)start_phys, PAGE_SIZE_LOG2);
dynpage_end = (uintptr_t)ptr_align((void *)end_phys, -PAGE_SIZE_LOG2);
if (dynpage_end - dynpage_start < 1024 * PAGE_SIZE) {
kprintf("We have < 1024 pages for kmalloc(), this wouldn't go well\n");
@ -88,47 +89,52 @@ int mem_init(void *start_phys, void *end_phys)
return 0;
}
int map_page(void *phys, void *virt, enum mm_page_flags flags)
int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags)
{
# ifdef DEBUG
if (phys != PAGE_ALIGN(phys))
kprintf("map_page(): unaligned physical address %p!\n", phys);
kprintf("map_page(): unaligned physical address %p!\n", (void *)phys);
if (virt != PAGE_ALIGN(virt))
kprintf("map_page(): unaligned virtual address %p!\n", virt);
# endif
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
size_t pd_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) / 1024;
size_t pt_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) % 1024;
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
/*
* warning: pt might not be present yet before the if block below,
* we only define it here already so we can easily call memset() in
* the if block
*/
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[pd_index];
struct x86_page_directory_entry *pd_entry = &pd->entries[pd_index];
if (!pd_entry->present) {
void *page = get_page();
if (page == NULL)
uintptr_t pt_phys = get_page();
if (!pt_phys)
return -ENOMEM;
memset(page, 0, PAGE_SIZE);
*(unsigned long *)pd_entry = 0;
pd_entry->shifted_address = (uintptr_t)page >> X86_PAGE_DIRECTORY_ADDRESS_SHIFT;
pd_entry->shifted_address = pt_phys >> X86_PAGE_DIRECTORY_ADDRESS_SHIFT;
pd_entry->rw = 1;
pd_entry->present = 1;
vm_flush();
memset(pt, 0, sizeof(*pt));
}
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[pd_index];
struct x86_page_table_entry *pt_entry = &pt->entries[pt_index];
*(unsigned long *)pt_entry = 0;
*(unsigned long *)pt_entry = 0; /* zero out the entire entry first */
pt_entry->rw = (flags & MM_PAGE_RW) != 0;
pt_entry->user = (flags & MM_PAGE_USER) != 0;
pt_entry->cache_disabled = (flags & MM_PAGE_NOCACHE) != 0;
pt_entry->shifted_address = (uintptr_t)virt >> X86_PAGE_TABLE_ADDRESS_SHIFT;
pt_entry->shifted_address = phys >> X86_PAGE_TABLE_ADDRESS_SHIFT;
pt_entry->present = 1;
return 0;
}
void *unmap_page(void *virt)
uintptr_t unmap_page(void *virt)
{
# ifdef DEBUG
if (virt != PAGE_ALIGN(virt))
@ -142,41 +148,42 @@ void *unmap_page(void *virt)
struct x86_page_directory_entry *pd_entry = &pd->entries[pd_index];
if (!pd_entry->present)
return NULL;
return 0;
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[pd_index];
struct x86_page_table_entry *pt_entry = &pt->entries[pt_index];
if (!pt_entry->present)
return NULL;
return 0;
uintptr_t phys_shifted = pt_entry->shifted_address;
*(unsigned long *)pt_entry = 0;
return (void *)(phys_shifted << X86_PAGE_TABLE_ADDRESS_SHIFT);
return phys_shifted << X86_PAGE_TABLE_ADDRESS_SHIFT;
}
static inline int find_zero_bit(u8 bitfield)
static inline int find_zero_bit(unsigned long bitfield)
{
int i;
for (i = 0; i < 8; i++) {
if ((bitfield & (1 << i)) == 0)
for (i = 0; i < sizeof(bitfield) * 8; i++) {
if ((bitfield & (1lu << i)) == 0)
break;
}
return i;
}
void *get_page(void)
uintptr_t get_page(void)
{
void *page = NULL;
uintptr_t page = 0;
for (size_t i = 0; i < pagemap_len; i++) {
if (pagemap[i] != 0xff) {
if (~pagemap[i] != 0) {
int bit = find_zero_bit(pagemap[i]);
if (bit <= 8) {
page = dynpage_start + (i * 8 + bit) * PAGE_SIZE;
pagemap[i] |= (1 << bit);
if (bit < sizeof(*pagemap) * 8) {
unsigned long page_number = i * sizeof(*pagemap) * 8 + bit;
page = dynpage_start + page_number * PAGE_SIZE;
pagemap[i] |= (1lu << bit);
} else {
kprintf("Throw your computer in the garbage\n");
}
@ -185,55 +192,48 @@ void *get_page(void)
}
}
# ifdef CFG_POISON_PAGES
if (page != NULL)
memset(page, 'a', PAGE_SIZE);
# endif
return page;
}
void put_page(void *page)
void put_page(uintptr_t phys)
{
# ifdef DEBUG
if ((uintptr_t)page % PAGE_SIZE != 0) {
kprintf("Unaligned ptr %p passed to put_page()!\n", page);
if (phys % PAGE_SIZE != 0) {
kprintf("Unaligned ptr %p passed to put_page()!\n", (void *)phys);
return;
}
if (page < dynpage_start || page >= dynpage_end) {
kprintf("Page %p passed to put_page() is not in the dynamic area!\n", page);
if (phys < dynpage_start || phys >= dynpage_end) {
kprintf("Page %p passed to put_page() is not in the dynamic area!\n",
(void *)phys);
return;
}
# endif
# ifdef CFG_POISON_PAGES
memset(page, 'A', PAGE_SIZE);
# endif
size_t page_number = (page - dynpage_start) / PAGE_SIZE;
size_t index = page_number / 8;
int bit = page_number % 8;
if ((pagemap[index] & (1 << bit)) == 0)
kprintf("Double free of page %p!\n", page);
size_t page_number = (phys - dynpage_start) >> PAGE_SIZE_LOG2;
size_t index = page_number / (sizeof(*pagemap) * 8);
int bit = page_number % (sizeof(*pagemap) * 8);
if ((pagemap[index] & (1lu << bit)) == 0)
kprintf("Double free of page %p!\n", (void *)phys);
pagemap[index] &= ~(1 << bit);
pagemap[index] &= ~(1lu << bit);
}
void *virt_to_phys(void *virt)
uintptr_t virt_to_phys(void *virt)
{
size_t pd_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) / 1024;
size_t pt_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) % 1024;
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
if (!pd->entries[pd_index].present)
return NULL;
return 0;
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[pd_index];
if (!pt->entries[pt_index].present)
return NULL;
return 0;
uintptr_t address = pt->entries[pt_index].shifted_address << X86_PAGE_TABLE_ADDRESS_SHIFT;
return (void *)(address + ((uintptr_t)virt & ~PAGE_MASK));
uintptr_t phys = pt->entries[pt_index].shifted_address << X86_PAGE_TABLE_ADDRESS_SHIFT;
/* if the virtual address wasn't page aligned, add the offset into the page */
return phys | ((uintptr_t)virt & ~PAGE_MASK);
}
void vm_flush(void)
@ -259,7 +259,7 @@ static void setup_pagemap(void)
* that away from the usable dynamic page area. So these two lines are
* basically a replacement for a call to get_page().
*/
void *pt_phys = dynpage_start;
uintptr_t pt_phys = dynpage_start;
dynpage_start += PAGE_SIZE;
/*
@ -270,7 +270,7 @@ static void setup_pagemap(void)
* If you do the math, that page table therefore maps addresses
* 0xff800000-0xffbfffff, which is where we start off with the bitmap.
*/
pagemap = (u8 *)0xff800000;
pagemap = (unsigned long *)0xff800000;
/*
* Now that we have a physical page for the page table, we need to
@ -294,26 +294,26 @@ static void setup_pagemap(void)
* until there is enough space. We also need to map those pages to the
* virtual address, of course.
*/
void *pagemap_phys = dynpage_start;
uintptr_t pagemap_phys = dynpage_start;
size_t pt_index = 0;
do {
/*
* take one page away from the dynamic area and reserve it for
* the bitmap, and recalculate the required bitmap size in bytes
* the bitmap, and recalculate the required bitmap length
*/
dynpage_start += PAGE_SIZE;
pagemap_len = ((dynpage_end - dynpage_start) / PAGE_SIZE) / 8;
pagemap_len = (dynpage_end - dynpage_start) / (PAGE_SIZE * sizeof(*pagemap) * 8);
/* now add a page table entry for that page */
struct x86_page_table_entry *pt_entry = &pt->entries[pt_index];
*(unsigned long *)pt_entry = 0;
uintptr_t address = (uintptr_t)pagemap_phys + pt_index * PAGE_SIZE;
uintptr_t address = pagemap_phys + pt_index * PAGE_SIZE;
pt_entry->shifted_address = address >> X86_PAGE_TABLE_ADDRESS_SHIFT;
pt_entry->present = 1;
pt_entry->rw = 1;
pt_index++;
} while (pagemap_len > (dynpage_start - pagemap_phys) / 8);
} while (pagemap_len * sizeof(*pagemap) * 8 > (dynpage_start - pagemap_phys));
/*
* Great! We have enough space for the bitmap, and it is mapped
@ -322,7 +322,7 @@ static void setup_pagemap(void)
* clear the bitmap.
*/
vm_flush();
memset(pagemap, 0, pagemap_len);
memset(pagemap, 0, pagemap_len * sizeof(*pagemap));
}
/*

@ -12,7 +12,11 @@ set(KERNEL_ORIGIN "0x100000" CACHE STRING "Physical address where the kernel is
set(KERNEL_RELOCATE "0xc0000000" CACHE STRING "Virtual address the kernel is mapped to (don't touch this)")
set(POISON_PAGES "Poison pages after allocate and free" ON)
set(CFG_POISON_PAGES "Poison pages after allocate and free" ON)
set(CFG_POISON_HEAP "Poison heap memory after kmalloc() and kfree()" ON)
set(SCHED_MAX_TASKS "128" CACHE STRING "Maximum number of tasks")
# This file is part of GayBSD.
# Copyright (c) 2021 fef <owo@fef.moe>.

@ -100,11 +100,22 @@ void clist_del(struct clist *node);
* @param head The `struct clist *` that is the head node
* @param type Type of the structure embedding the list nodes
* @param member Name of the `struct clist` within the embedding structure
* @returns The `struct *` the list is embedded in
* @returns The last `struct *` in the list
*/
#define clist_first_entry(head, type, member) \
clist_entry((head)->next, type, member)
/**
* @brief Get the last entry in a list.
*
* @param head The `struct clist *` that is the head node
* @param type Type of the structure embedding the list nodes
* @param member Name of the `struct clist` within the embedding structure
* @returns The last `struct *` in the list
*/
#define clist_last_entry(head, type, member) \
clist_entry((head)->prev, type, member)
/**
* @brief Get the next entry in a clist.
*

@ -29,7 +29,13 @@
#define CFG_KERNEL_RELOCATE @KERNEL_RELOCATE@
/** @brief Poison dynamic pages when allocating and freeing them */
#define CFG_POISON_PAGES @POISON_PAGES@
#cmakedefine01 CFG_POISON_PAGES
/** @brief Poison heap areas after `kmalloc()` and `kfree()` */
#cmakedefine01 CFG_POISON_HEAP
/** @brief Maximum number of tasks */
#define CFG_SCHED_MAX_TASKS @SCHED_MAX_TASKS@
/*
* This file is part of GayBSD.

@ -5,6 +5,10 @@
/**
* @file include/gay/mm.h
* @brief Header for dynamic memory management
*
* To avoid possible confusion, physical memory addresses always use type
* `uintptr_t` and virtual ones are `void *`. This should give at least some
* type of compiler warning if they are accidentally mixed up.
*/
#include <arch/page.h>
@ -63,16 +67,16 @@ enum mm_page_flags {
*
* @returns A pointer to the beginning of the (physical) page address, or `NULL` if OOM
*/
void *get_page(void);
uintptr_t get_page(void);
/**
* @brief Release a memory page.
*
* This is only called internally by `kmalloc()`, don't use.
*
* @param page The pointer returned by `get_page()`
* @param phys The pointer returned by `get_page()`
*/
void put_page(void *page);
void put_page(uintptr_t phys);
/**
* @brief Map a page in physical memory to a virtual address.
@ -85,7 +89,7 @@ void put_page(void *page);
* @param flags Flags to apply to the page
* @returns 0 on success, or `-ENOMEM` if OOM (for allocating new page tables)
*/
int map_page(void *phys, void *virt, enum mm_page_flags flags);
int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags);
/**
* @brief Remove a page mapping.
@ -93,7 +97,7 @@ int map_page(void *phys, void *virt, enum mm_page_flags flags);
* @param virt Virtual address the page is mapped to, must be page aligned
* @returns The physical page address that was being mapped
*/
void *unmap_page(void *virt);
uintptr_t unmap_page(void *virt);
/** @brief Flush the TLB. */
void vm_flush(void);
@ -102,18 +106,18 @@ void vm_flush(void);
* @brief Called internally by `kmalloc_init()` to set up the page frame
* allocator and other low level paging related stuff.
*/
int mem_init(void *start, void *end);
int mem_init(uintptr_t start, uintptr_t end);
/**
* @brief Initialize the memory allocator.
*
* This can only be called once, from the early `_boot()` routine.
*
* @param start Start of the page area
* @param end End of the page area
* @param start Physical start address of the page area
* @param end Physical end address of the page area
* @returns 0 on success, or -1 if the pointers were garbage
*/
int kmalloc_init(void *start, void *end);
int kmalloc_init(uintptr_t start, uintptr_t end);
/*
* This file is part of GayBSD.

@ -18,7 +18,7 @@ void clist_add(struct clist *head, struct clist *new)
head->next = new;
}
void clist_add_end(struct clist *head, struct clist *new)
void clist_add_first(struct clist *head, struct clist *new)
{
head->prev->next = new;
new->next = head;

@ -2,37 +2,422 @@
#include <arch/page.h>
#include <gay/clist.h>
#include <gay/config.h>
#include <gay/errno.h>
#include <gay/kprintf.h>
#include <gay/mm.h>
#include <gay/types.h>
/* yeah this is probably the most stupid memory allocator there is */
#include <string.h>
int kmalloc_init(void *phys_start, void *phys_end)
/*
* This allocator is based on the popular design by Doug Lea:
* <http://gee.cs.oswego.edu/dl/html/malloc.html>
* For a more in-depth description of how the individual parts work together,
* see also my implementation for Ardix which is very similar except that it
* doesn't have paging:
* <https://git.bsd.gay/fef/ardix/src/commit/c767d551d3301fc30f9fce30eda8f04e2f9a42ab/kernel/mm.c>
* As a matter of fact, this allocator is merely an extension of the one from
* Ardix with the only difference being that the heap can grow upwards.
*/
/**
* Memory block header.
* This sits at the beginning of every memory block (duh).
*/
struct memblk {
/**
* @brief The usable low_size, i.e. the total block low_size minus `MEMBLK_OVERHEAD`.
*
* This low_size will also be written to the very end of the block, just after
* the last usable address. Additionally, since blocks are always aligned
* to at least 4 bytes anyways, we can use the LSB of this low_size as a flag
* for whether the block is currently allocated (1) or not (0). This is
* going to make it much easier to detect two free neighboring blocks when
* `kfree()`ing one.
*/
usize low_size[1];
union {
/** @brief If the block is allocated, this will be overwritten */
struct clist clink;
/** @brief Used as the return value for `kmalloc()` */
u8 data[0];
/**
* @brief Used to get the copy of the low_size field at the end of
* the block, right after the last byte of `data`
*/
usize high_size[0];
};
};
/* overhead per allocation in bytes */
#define OVERHEAD (2 * sizeof(usize))
/* every allocation is padded to a multiple of this */
#define MIN_SIZE (sizeof(struct clist))
/* memory blocks, sorted by increasing low_size */
static CLIST(blocks);
/*
* We play it *really* simple: Start at an arbitrary (page aligned, preferably
* even page table aligned) address in virtual memory and extend the area as
* needed as the heap grows. Efficiency doesn't matter for now; we always make
* the heap a contiguous area without holes. There isn't even a mechanism for
* releasing physical pages yet, i really just want to get to anything that is
* at all usable so i can finally work on the core system architecture.
*/
static void *heap_start = (void *)0xd0000000;
/*
* Points to the first address that is not part of the heap anymore, such that
* sizeof(heap) == heap_end - heap_start
* Thus, the heap initially has a size of zero.
*/
static void *heap_end = (void *)0xd0000000;
/**
* @brief Increase `heap_end` by up to `num_pages * PAGE_SIZE`.
*
* @param num_pages Number of pages to increase the heap by
* @returns The actual number of pages the heap was increased by; this may be
* less than `num_pages` if there were not enough free pages left
*/
static usize grow_heap(usize num_pages);
/**
* @brief Add a new block at the end of the heap by downloading more RAM (`grow_heap()`, actually). */
static struct memblk *blk_create(usize num_pages);
/** @brief Get the usable block low_size in bytes, without flags or overhead. */
static usize blk_get_size(struct memblk *blk);
/** @brief Set the usable block low_size without overhead and without affecting flags. */
static void blk_set_size(struct memblk *blk, usize size);
/** @brief Flag a block as allocated. */
static void blk_set_alloc(struct memblk *blk);
/** @brief Remove the allocated flag from a block. */
static void blk_clear_alloc(struct memblk *blk);
/** @brief Return nonzero if the block is allocated. */
static bool blk_is_alloc(struct memblk *blk);
/** @brief Set the border flag at the start of a block. */
static void blk_set_border_start(struct memblk *blk);
/** @brief Remove the border flag from the start of a block. */
static void blk_clear_border_start(struct memblk *blk);
/** @brief Return nonzero if a block has the border flag set at the start. */
static bool blk_is_border_start(struct memblk *blk);
/** @brief Set the border flag at the end of a block. */
static void blk_set_border_end(struct memblk *blk);
/** @brief Remove the border flag from the end of a block. */
static void blk_clear_border_end(struct memblk *blk);
/** @brief Return nonzero if a block has the border flag set at the end. */
static bool blk_is_border_end(struct memblk *blk);
/** @brief Get a block's immediate lower neighbor, or NULL if it doesn't have one. */
static struct memblk *blk_prev(struct memblk *blk);
/** @brief Get a block's immediate higher neighbor, or NULL if it doesn't have one. */
static struct memblk *blk_next(struct memblk *blk);
/** @brief Merge two contiguous free blocks into one, resort the list, and return the block. */
static struct memblk *blk_merge(struct memblk *bottom, struct memblk *top);
/** @brief Attempt to merge both the lower and higher neighbors of a free block. */
static struct memblk *blk_try_merge(struct memblk *blk);
/** @brief Cut a slice from a free block and return the slice. */
static struct memblk *blk_slice(struct memblk *blk, usize slice_size);
int kmalloc_init(uintptr_t phys_start, uintptr_t phys_end)
{
int err = mem_init(phys_start, phys_end);
if (err)
return err;
if (grow_heap(1) != 1)
return -ENOMEM;
struct memblk *blk = heap_start;
blk_set_size(blk, PAGE_SIZE - OVERHEAD);
blk_clear_alloc(blk);
blk_set_border_start(blk);
blk_set_border_end(blk);
clist_add(&blocks, &blk->clink);
return 0;
}
void *kmalloc(size_t size, enum mm_flags flags)
void *kmalloc(usize size, enum mm_flags flags)
{
if (flags != MM_KERNEL) {
kprintf("invalild flags passed to kmalloc()\n");
kprintf("Invalid flags passed to kmalloc()\n");
return NULL;
}
if (size > PAGE_SIZE) {
kprintf("Requested alloc size of %u > PAGE_SIZE, i can't do that yet qwq\n", size);
if (size == 0)
return NULL;
if (size % MIN_SIZE != 0)
size = (size / MIN_SIZE) * MIN_SIZE + MIN_SIZE;
struct memblk *cursor;
struct memblk *blk = NULL;
clist_foreach_entry(&blocks, cursor, clink) {
if (blk_get_size(cursor) >= size) {
blk = cursor;
break;
}
}
return get_page();
if (blk == NULL) {
usize required_pages = ((size + OVERHEAD) / PAGE_SIZE) + 1;
blk = blk_create(required_pages);
if (blk == NULL) {
kprintf("Kernel OOM qwq\n");
return NULL;
}
clist_add(&blocks, &blk->clink);
}
blk = blk_slice(blk, size);
blk_set_alloc(blk);
# if CFG_POISON_HEAP
memset(blk->data, 'a', blk_get_size(blk));
# endif
return blk->data;
}
void kfree(void *ptr)
{
put_page(ptr);
# ifdef DEBUG
if (ptr < heap_start || ptr > heap_end) {
kprintf("Tried to free %p which is outside the heap!\n", ptr);
return;
}
# endif
struct memblk *blk = ptr - sizeof(blk->low_size);
# ifdef DEBUG
if (!blk_is_alloc(blk)) {
kprintf("Double free of %p!\n", ptr);
return;
}
# endif
# if CFG_POISON_HEAP
memset(blk->data, 'A', blk_get_size(blk));
# endif
blk_clear_alloc(blk);
blk_try_merge(blk);
}
static inline struct memblk *blk_create(usize num_pages)
{
/*
* heap_end points to the first address that is not part of the heap
* anymore, so that's where the new block starts when we add pages
*/
struct memblk *blk = heap_end;
if (grow_heap(num_pages) != num_pages)
return NULL; /* OOM :( */
blk_set_size(blk, num_pages * PAGE_SIZE - OVERHEAD);
blk_clear_alloc(blk);
blk_set_border_end(blk);
struct memblk *old_high = blk_prev(blk);
blk_clear_border_end(old_high);
if (!blk_is_alloc(old_high)) {
clist_del(&old_high->clink);
blk = blk_merge(old_high, blk);
}
return blk;
}
static inline usize grow_heap(usize num_pages)
{
usize i;
for (i = 0; i < num_pages; i++) {
uintptr_t page_phys = get_page();
if (!page_phys)
break;
if (map_page(page_phys, heap_end, MM_PAGE_RW) != 0) {
put_page(page_phys);
break;
}
heap_end += PAGE_SIZE;
}
vm_flush();
return i;
}
#define ALLOC_FLAG ((usize)1 << 0)
#define BORDER_FLAG ((usize)1 << 1)
#define SIZE_MASK ( ~(ALLOC_FLAG | BORDER_FLAG) )
static struct memblk *blk_try_merge(struct memblk *blk)
{
struct memblk *neighbor = blk_prev(blk);
if (neighbor != NULL && !blk_is_alloc(neighbor)) {
clist_del(&neighbor->clink);
blk = blk_merge(neighbor, blk);
}
neighbor = blk_next(blk);
if (neighbor != NULL && !blk_is_alloc(neighbor)) {
clist_del(&neighbor->clink);
blk = blk_merge(blk, neighbor);
}
struct memblk *cursor;
clist_foreach_entry(&blocks, cursor, clink) {
if (blk_get_size(cursor) >= blk_get_size(blk))
break;
}
clist_add_first(&cursor->clink, &blk->clink);
return blk;
}
static struct memblk *blk_merge(struct memblk *bottom, struct memblk *top)
{
usize bottom_size = blk_get_size(bottom);
usize top_size = blk_get_size(top);
usize total_size = bottom_size + top_size + OVERHEAD;
blk_set_size(bottom, total_size);
return bottom;
}
static struct memblk *blk_slice(struct memblk *blk, usize slice_size)
{
clist_del(&blk->clink);
/*
* If the remaining low_size is less than the minimum allocation unit, we
* hand out the entire block. Additionally, we must add an underflow
* check which happens if the slice low_size is less than OVERHEAD smaller
* than the full block low_size.
*/
usize rest_size = blk_get_size(blk) - slice_size - OVERHEAD;
if (rest_size < MIN_SIZE || rest_size + OVERHEAD < rest_size) {
blk_set_alloc(blk);
return blk;
}
usize slice_words = slice_size / sizeof(blk->low_size);
struct memblk *rest = (void *)&blk->high_size[slice_words + 1];
blk_set_size(rest, rest_size);
blk_clear_alloc(rest);
blk_clear_border_start(rest);
blk_set_size(blk, slice_size);
blk_set_alloc(blk);
blk_clear_border_end(blk);
struct memblk *cursor;
clist_foreach_entry(&blocks, cursor, clink) {
if (blk_get_size(cursor) <= rest_size)
break;
}
clist_add_first(&cursor->clink, &rest->clink);
return blk;
}
static inline struct memblk *blk_prev(struct memblk *blk)
{
if (blk_is_border_start(blk))
return NULL;
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Warray-bounds" /* trust me bro, this is fine */
return (void *)blk - (blk->low_size[-1] & SIZE_MASK) - OVERHEAD;
#pragma clang diagnostic pop
}
static inline struct memblk *blk_next(struct memblk *blk)
{
if (blk_is_border_end(blk))
return NULL;
usize index = blk->low_size[0] / sizeof(blk->low_size[0]);
return (void *)&blk->high_size[index + 1];
}
static inline usize blk_get_size(struct memblk *blk)
{
return blk->low_size[0] & SIZE_MASK;
}
static void blk_set_size(struct memblk *blk, usize size)
{
/* don't affect flags */
blk->low_size[0] &= ~SIZE_MASK;
# ifdef DEBUG
if (size & SIZE_MASK)
kprintf("Unaligned size in blk_set_size()\n");
# endif
blk->low_size[0] |= size & SIZE_MASK;
usize index = size / sizeof(blk->low_size[0]);
blk->high_size[index] &= ~SIZE_MASK;
blk->high_size[index] |= size & SIZE_MASK;
}
static inline void blk_set_alloc(struct memblk *blk)
{
usize index = blk->low_size[0] / sizeof(blk->low_size[0]);
blk->low_size[0] |= ALLOC_FLAG;
blk->high_size[index] |= ALLOC_FLAG;
}
static inline void blk_clear_alloc(struct memblk *blk)
{
usize index = blk->low_size[0] / sizeof(blk->low_size[0]);
blk->low_size[0] &= ~ALLOC_FLAG;
blk->high_size[index] &= ~ALLOC_FLAG;
}
static inline bool blk_is_alloc(struct memblk *blk)
{
return (blk->low_size[0] & ALLOC_FLAG) != 0;
}
static inline void blk_set_border_start(struct memblk *blk)
{
blk->low_size[0] |= BORDER_FLAG;
}
static inline void blk_clear_border_start(struct memblk *blk)
{
blk->low_size[0] &= ~BORDER_FLAG;
}
static inline bool blk_is_border_start(struct memblk *blk)
{
return (blk->low_size[0] & BORDER_FLAG) != 0;
}
static inline void blk_set_border_end(struct memblk *blk)
{
usize index = blk->low_size[0] / sizeof(blk->low_size[0]);
blk->high_size[index] |= BORDER_FLAG;
}
static inline void blk_clear_border_end(struct memblk *blk)
{
usize index = blk->low_size[0] / sizeof(blk->low_size[0]);
blk->high_size[index] &= ~BORDER_FLAG;
}
static inline bool blk_is_border_end(struct memblk *blk)
{
usize index = blk->low_size[0] / sizeof(blk->low_size[0]);
return (blk->high_size[index] & BORDER_FLAG) != 0;
}
/*

Loading…
Cancel
Save