@ -2,37 +2,422 @@
# include <arch/page.h>
# include <gay/clist.h>
# include <gay/config.h>
# include <gay/errno.h>
# include <gay/kprintf.h>
# include <gay/mm.h>
# include <gay/types.h>
/* yeah this is probably the most stupid memory allocator there is */
# include <string.h>
int kmalloc_init ( void * phys_start , void * phys_end )
/*
* This allocator is based on the popular design by Doug Lea :
* < http : //gee.cs.oswego.edu/dl/html/malloc.html>
* For a more in - depth description of how the individual parts work together ,
* see also my implementation for Ardix which is very similar except that it
* doesn ' t have paging :
* < https : //git.bsd.gay/fef/ardix/src/commit/c767d551d3301fc30f9fce30eda8f04e2f9a42ab/kernel/mm.c>
* As a matter of fact , this allocator is merely an extension of the one from
* Ardix with the only difference being that the heap can grow upwards .
*/
/**
* Memory block header .
* This sits at the beginning of every memory block ( duh ) .
*/
struct memblk {
/**
* @ brief The usable low_size , i . e . the total block low_size minus ` MEMBLK_OVERHEAD ` .
*
* This low_size will also be written to the very end of the block , just after
* the last usable address . Additionally , since blocks are always aligned
* to at least 4 bytes anyways , we can use the LSB of this low_size as a flag
* for whether the block is currently allocated ( 1 ) or not ( 0 ) . This is
* going to make it much easier to detect two free neighboring blocks when
* ` kfree ( ) ` ing one .
*/
usize low_size [ 1 ] ;
union {
/** @brief If the block is allocated, this will be overwritten */
struct clist clink ;
/** @brief Used as the return value for `kmalloc()` */
u8 data [ 0 ] ;
/**
* @ brief Used to get the copy of the low_size field at the end of
* the block , right after the last byte of ` data `
*/
usize high_size [ 0 ] ;
} ;
} ;
/* overhead per allocation in bytes */
# define OVERHEAD (2 * sizeof(usize))
/* every allocation is padded to a multiple of this */
# define MIN_SIZE (sizeof(struct clist))
/* memory blocks, sorted by increasing low_size */
static CLIST ( blocks ) ;
/*
* We play it * really * simple : Start at an arbitrary ( page aligned , preferably
* even page table aligned ) address in virtual memory and extend the area as
* needed as the heap grows . Efficiency doesn ' t matter for now ; we always make
* the heap a contiguous area without holes . There isn ' t even a mechanism for
* releasing physical pages yet , i really just want to get to anything that is
* at all usable so i can finally work on the core system architecture .
*/
static void * heap_start = ( void * ) 0xd0000000 ;
/*
* Points to the first address that is not part of the heap anymore , such that
* sizeof ( heap ) = = heap_end - heap_start
* Thus , the heap initially has a size of zero .
*/
static void * heap_end = ( void * ) 0xd0000000 ;
/**
* @ brief Increase ` heap_end ` by up to ` num_pages * PAGE_SIZE ` .
*
* @ param num_pages Number of pages to increase the heap by
* @ returns The actual number of pages the heap was increased by ; this may be
* less than ` num_pages ` if there were not enough free pages left
*/
static usize grow_heap ( usize num_pages ) ;
/**
* @ brief Add a new block at the end of the heap by downloading more RAM ( ` grow_heap ( ) ` , actually ) . */
static struct memblk * blk_create ( usize num_pages ) ;
/** @brief Get the usable block low_size in bytes, without flags or overhead. */
static usize blk_get_size ( struct memblk * blk ) ;
/** @brief Set the usable block low_size without overhead and without affecting flags. */
static void blk_set_size ( struct memblk * blk , usize size ) ;
/** @brief Flag a block as allocated. */
static void blk_set_alloc ( struct memblk * blk ) ;
/** @brief Remove the allocated flag from a block. */
static void blk_clear_alloc ( struct memblk * blk ) ;
/** @brief Return nonzero if the block is allocated. */
static bool blk_is_alloc ( struct memblk * blk ) ;
/** @brief Set the border flag at the start of a block. */
static void blk_set_border_start ( struct memblk * blk ) ;
/** @brief Remove the border flag from the start of a block. */
static void blk_clear_border_start ( struct memblk * blk ) ;
/** @brief Return nonzero if a block has the border flag set at the start. */
static bool blk_is_border_start ( struct memblk * blk ) ;
/** @brief Set the border flag at the end of a block. */
static void blk_set_border_end ( struct memblk * blk ) ;
/** @brief Remove the border flag from the end of a block. */
static void blk_clear_border_end ( struct memblk * blk ) ;
/** @brief Return nonzero if a block has the border flag set at the end. */
static bool blk_is_border_end ( struct memblk * blk ) ;
/** @brief Get a block's immediate lower neighbor, or NULL if it doesn't have one. */
static struct memblk * blk_prev ( struct memblk * blk ) ;
/** @brief Get a block's immediate higher neighbor, or NULL if it doesn't have one. */
static struct memblk * blk_next ( struct memblk * blk ) ;
/** @brief Merge two contiguous free blocks into one, resort the list, and return the block. */
static struct memblk * blk_merge ( struct memblk * bottom , struct memblk * top ) ;
/** @brief Attempt to merge both the lower and higher neighbors of a free block. */
static struct memblk * blk_try_merge ( struct memblk * blk ) ;
/** @brief Cut a slice from a free block and return the slice. */
static struct memblk * blk_slice ( struct memblk * blk , usize slice_size ) ;
int kmalloc_init ( uintptr_t phys_start , uintptr_t phys_end )
{
int err = mem_init ( phys_start , phys_end ) ;
if ( err )
return err ;
if ( grow_heap ( 1 ) ! = 1 )
return - ENOMEM ;
struct memblk * blk = heap_start ;
blk_set_size ( blk , PAGE_SIZE - OVERHEAD ) ;
blk_clear_alloc ( blk ) ;
blk_set_border_start ( blk ) ;
blk_set_border_end ( blk ) ;
clist_add ( & blocks , & blk - > clink ) ;
return 0 ;
}
void * kmalloc ( size_t size , enum mm_flags flags )
void * kmalloc ( usize size , enum mm_flags flags )
{
if ( flags ! = MM_KERNEL ) {
kprintf ( " invalild flags passed to kmalloc() \n " ) ;
kprintf ( " Invali d flags passed to kmalloc()\n " ) ;
return NULL ;
}
if ( size > PAGE_SIZE ) {
kprintf ( " Requested alloc size of %u > PAGE_SIZE, i can't do that yet qwq \n " , size ) ;
if ( size = = 0 )
return NULL ;
if ( size % MIN_SIZE ! = 0 )
size = ( size / MIN_SIZE ) * MIN_SIZE + MIN_SIZE ;
struct memblk * cursor ;
struct memblk * blk = NULL ;
clist_foreach_entry ( & blocks , cursor , clink ) {
if ( blk_get_size ( cursor ) > = size ) {
blk = cursor ;
break ;
}
}
return get_page ( ) ;
if ( blk = = NULL ) {
usize required_pages = ( ( size + OVERHEAD ) / PAGE_SIZE ) + 1 ;
blk = blk_create ( required_pages ) ;
if ( blk = = NULL ) {
kprintf ( " Kernel OOM qwq \n " ) ;
return NULL ;
}
clist_add ( & blocks , & blk - > clink ) ;
}
blk = blk_slice ( blk , size ) ;
blk_set_alloc ( blk ) ;
# if CFG_POISON_HEAP
memset ( blk - > data , ' a ' , blk_get_size ( blk ) ) ;
# endif
return blk - > data ;
}
void kfree ( void * ptr )
{
put_page ( ptr ) ;
# ifdef DEBUG
if ( ptr < heap_start | | ptr > heap_end ) {
kprintf ( " Tried to free %p which is outside the heap! \n " , ptr ) ;
return ;
}
# endif
struct memblk * blk = ptr - sizeof ( blk - > low_size ) ;
# ifdef DEBUG
if ( ! blk_is_alloc ( blk ) ) {
kprintf ( " Double free of %p! \n " , ptr ) ;
return ;
}
# endif
# if CFG_POISON_HEAP
memset ( blk - > data , ' A ' , blk_get_size ( blk ) ) ;
# endif
blk_clear_alloc ( blk ) ;
blk_try_merge ( blk ) ;
}
static inline struct memblk * blk_create ( usize num_pages )
{
/*
* heap_end points to the first address that is not part of the heap
* anymore , so that ' s where the new block starts when we add pages
*/
struct memblk * blk = heap_end ;
if ( grow_heap ( num_pages ) ! = num_pages )
return NULL ; /* OOM :( */
blk_set_size ( blk , num_pages * PAGE_SIZE - OVERHEAD ) ;
blk_clear_alloc ( blk ) ;
blk_set_border_end ( blk ) ;
struct memblk * old_high = blk_prev ( blk ) ;
blk_clear_border_end ( old_high ) ;
if ( ! blk_is_alloc ( old_high ) ) {
clist_del ( & old_high - > clink ) ;
blk = blk_merge ( old_high , blk ) ;
}
return blk ;
}
static inline usize grow_heap ( usize num_pages )
{
usize i ;
for ( i = 0 ; i < num_pages ; i + + ) {
uintptr_t page_phys = get_page ( ) ;
if ( ! page_phys )
break ;
if ( map_page ( page_phys , heap_end , MM_PAGE_RW ) ! = 0 ) {
put_page ( page_phys ) ;
break ;
}
heap_end + = PAGE_SIZE ;
}
vm_flush ( ) ;
return i ;
}
# define ALLOC_FLAG ((usize)1 << 0)
# define BORDER_FLAG ((usize)1 << 1)
# define SIZE_MASK ( ~(ALLOC_FLAG | BORDER_FLAG) )
static struct memblk * blk_try_merge ( struct memblk * blk )
{
struct memblk * neighbor = blk_prev ( blk ) ;
if ( neighbor ! = NULL & & ! blk_is_alloc ( neighbor ) ) {
clist_del ( & neighbor - > clink ) ;
blk = blk_merge ( neighbor , blk ) ;
}
neighbor = blk_next ( blk ) ;
if ( neighbor ! = NULL & & ! blk_is_alloc ( neighbor ) ) {
clist_del ( & neighbor - > clink ) ;
blk = blk_merge ( blk , neighbor ) ;
}
struct memblk * cursor ;
clist_foreach_entry ( & blocks , cursor , clink ) {
if ( blk_get_size ( cursor ) > = blk_get_size ( blk ) )
break ;
}
clist_add_first ( & cursor - > clink , & blk - > clink ) ;
return blk ;
}
static struct memblk * blk_merge ( struct memblk * bottom , struct memblk * top )
{
usize bottom_size = blk_get_size ( bottom ) ;
usize top_size = blk_get_size ( top ) ;
usize total_size = bottom_size + top_size + OVERHEAD ;
blk_set_size ( bottom , total_size ) ;
return bottom ;
}
static struct memblk * blk_slice ( struct memblk * blk , usize slice_size )
{
clist_del ( & blk - > clink ) ;
/*
* If the remaining low_size is less than the minimum allocation unit , we
* hand out the entire block . Additionally , we must add an underflow
* check which happens if the slice low_size is less than OVERHEAD smaller
* than the full block low_size .
*/
usize rest_size = blk_get_size ( blk ) - slice_size - OVERHEAD ;
if ( rest_size < MIN_SIZE | | rest_size + OVERHEAD < rest_size ) {
blk_set_alloc ( blk ) ;
return blk ;
}
usize slice_words = slice_size / sizeof ( blk - > low_size ) ;
struct memblk * rest = ( void * ) & blk - > high_size [ slice_words + 1 ] ;
blk_set_size ( rest , rest_size ) ;
blk_clear_alloc ( rest ) ;
blk_clear_border_start ( rest ) ;
blk_set_size ( blk , slice_size ) ;
blk_set_alloc ( blk ) ;
blk_clear_border_end ( blk ) ;
struct memblk * cursor ;
clist_foreach_entry ( & blocks , cursor , clink ) {
if ( blk_get_size ( cursor ) < = rest_size )
break ;
}
clist_add_first ( & cursor - > clink , & rest - > clink ) ;
return blk ;
}
static inline struct memblk * blk_prev ( struct memblk * blk )
{
if ( blk_is_border_start ( blk ) )
return NULL ;
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Warray-bounds" /* trust me bro, this is fine */
return ( void * ) blk - ( blk - > low_size [ - 1 ] & SIZE_MASK ) - OVERHEAD ;
# pragma clang diagnostic pop
}
static inline struct memblk * blk_next ( struct memblk * blk )
{
if ( blk_is_border_end ( blk ) )
return NULL ;
usize index = blk - > low_size [ 0 ] / sizeof ( blk - > low_size [ 0 ] ) ;
return ( void * ) & blk - > high_size [ index + 1 ] ;
}
static inline usize blk_get_size ( struct memblk * blk )
{
return blk - > low_size [ 0 ] & SIZE_MASK ;
}
static void blk_set_size ( struct memblk * blk , usize size )
{
/* don't affect flags */
blk - > low_size [ 0 ] & = ~ SIZE_MASK ;
# ifdef DEBUG
if ( size & SIZE_MASK )
kprintf ( " Unaligned size in blk_set_size() \n " ) ;
# endif
blk - > low_size [ 0 ] | = size & SIZE_MASK ;
usize index = size / sizeof ( blk - > low_size [ 0 ] ) ;
blk - > high_size [ index ] & = ~ SIZE_MASK ;
blk - > high_size [ index ] | = size & SIZE_MASK ;
}
static inline void blk_set_alloc ( struct memblk * blk )
{
usize index = blk - > low_size [ 0 ] / sizeof ( blk - > low_size [ 0 ] ) ;
blk - > low_size [ 0 ] | = ALLOC_FLAG ;
blk - > high_size [ index ] | = ALLOC_FLAG ;
}
static inline void blk_clear_alloc ( struct memblk * blk )
{
usize index = blk - > low_size [ 0 ] / sizeof ( blk - > low_size [ 0 ] ) ;
blk - > low_size [ 0 ] & = ~ ALLOC_FLAG ;
blk - > high_size [ index ] & = ~ ALLOC_FLAG ;
}
static inline bool blk_is_alloc ( struct memblk * blk )
{
return ( blk - > low_size [ 0 ] & ALLOC_FLAG ) ! = 0 ;
}
static inline void blk_set_border_start ( struct memblk * blk )
{
blk - > low_size [ 0 ] | = BORDER_FLAG ;
}
static inline void blk_clear_border_start ( struct memblk * blk )
{
blk - > low_size [ 0 ] & = ~ BORDER_FLAG ;
}
static inline bool blk_is_border_start ( struct memblk * blk )
{
return ( blk - > low_size [ 0 ] & BORDER_FLAG ) ! = 0 ;
}
static inline void blk_set_border_end ( struct memblk * blk )
{
usize index = blk - > low_size [ 0 ] / sizeof ( blk - > low_size [ 0 ] ) ;
blk - > high_size [ index ] | = BORDER_FLAG ;
}
static inline void blk_clear_border_end ( struct memblk * blk )
{
usize index = blk - > low_size [ 0 ] / sizeof ( blk - > low_size [ 0 ] ) ;
blk - > high_size [ index ] & = ~ BORDER_FLAG ;
}
static inline bool blk_is_border_end ( struct memblk * blk )
{
usize index = blk - > low_size [ 0 ] / sizeof ( blk - > low_size [ 0 ] ) ;
return ( blk - > high_size [ index ] & BORDER_FLAG ) ! = 0 ;
}
/*