mm: implement runtime page mapping
This commit is contained in:
parent
17320f2571
commit
3d6258a06e
6 changed files with 253 additions and 58 deletions
|
@ -65,8 +65,6 @@ static void fb_init(enum vga_color fg, enum vga_color bg);
|
|||
|
||||
static void print_gay_propaganda(void);
|
||||
|
||||
/** @brief Translate a physical memory address to a virtual (mapped) one. */
|
||||
#define phys_to_virt(ptr) ( (typeof(ptr))( (void *)(ptr) + CFG_KERNEL_RELOCATE ) )
|
||||
static struct mb2_tag *next_tag(struct mb2_tag *tag);
|
||||
static void handle_tag(struct mb2_tag *tag);
|
||||
static void handle_mmap_tag(struct mb2_tag_mmap *tag);
|
||||
|
@ -85,8 +83,6 @@ void _boot(u32 magic, void *address)
|
|||
return;
|
||||
}
|
||||
|
||||
//kprintf("%p\n", address);
|
||||
|
||||
print_gay_propaganda();
|
||||
|
||||
/*
|
||||
|
@ -94,7 +90,7 @@ void _boot(u32 magic, void *address)
|
|||
* so we need to be careful to translate all pointers to virtual
|
||||
* addresses before accessing them.
|
||||
*/
|
||||
address = phys_to_virt(address);
|
||||
address += CFG_KERNEL_RELOCATE;
|
||||
for (struct mb2_tag *tag = address + 8; tag != NULL; tag = next_tag(tag))
|
||||
handle_tag(tag);
|
||||
|
||||
|
@ -126,7 +122,7 @@ static inline void handle_mmap_tag(struct mb2_tag_mmap *tag)
|
|||
while ((void *)entry < (void *)tag + tag->tag.size) {
|
||||
kprintf("[%p-%p] %s\n",
|
||||
(void *)entry->addr,
|
||||
(void *)entry->len,
|
||||
(void *)entry->addr + entry->len - 1,
|
||||
mmap_type_name(entry->type));
|
||||
|
||||
if (entry->type == 1 && entry->len > region_len) {
|
||||
|
@ -142,10 +138,10 @@ static inline void handle_mmap_tag(struct mb2_tag_mmap *tag)
|
|||
while (1);
|
||||
}
|
||||
|
||||
// if (kmalloc_init(region, region + region_len) != 0) {
|
||||
// kprintf("kmalloc_init() failed! Aborting.\n");
|
||||
// while (1);
|
||||
// }
|
||||
if (kmalloc_init(region, region + region_len) != 0) {
|
||||
kprintf("kmalloc_init() failed! Aborting.\n");
|
||||
while (1);
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct mb2_tag *next_tag(struct mb2_tag *tag)
|
||||
|
|
|
@ -119,9 +119,8 @@ header_end:
|
|||
|
||||
asmfn_begin(_start)
|
||||
/*
|
||||
* 1023 of the 1024 pages in the page table are mapped to the low memory
|
||||
* starting at 1 MiB, the address where the kernel image is loaded
|
||||
* ($_image_start_phys). We currently assume the kernel is < 4 MiB
|
||||
* The kernel image starts at 1 MiB into physical memory.
|
||||
* We currently assume the kernel is < 3 MiB
|
||||
* and therefore can be mapped within a single page table.
|
||||
* As the kernel gets more and more bloated, this might not be the case
|
||||
* in the future anymore, so we should ideally add support for multiple
|
||||
|
@ -147,8 +146,8 @@ asmfn_begin(_start)
|
|||
loop 1b
|
||||
|
||||
/*
|
||||
* Conveniently, the VGA character framebuffer fits exactly into one
|
||||
* page. The physical address range
|
||||
* Conveniently, the full VGA character framebuffer fits into one page
|
||||
* and even starts at a page aligned address. The physical range
|
||||
* 0x000b8000 - 0x000b8fff
|
||||
* gets mapped to the virtual address range
|
||||
* 0xc03ff000 - 0xc03fffff
|
||||
|
@ -184,6 +183,17 @@ asmfn_begin(_start)
|
|||
movl $(phys_addr(pt0) + 0x003), phys_addr(pd0) + 0 * 4 /* 0x00000000 */
|
||||
movl $(phys_addr(pt0) + 0x003), phys_addr(pd0) + 768 * 4 /* 0xc0000000 */
|
||||
|
||||
/*
|
||||
* The last entry in the page directory points to itself.
|
||||
* This has the effect of mapping all page tables in the page directory to
|
||||
* 0xffc00000 - 0xffffefff
|
||||
* and the page directory itself to
|
||||
* 0xfffff000 - 0xffffffff
|
||||
* because the page directory is being interpreted as a page table.
|
||||
* This allows us to manipulate the table while we are in virtual memory.
|
||||
*/
|
||||
movl $(phys_addr(pd0) + 0x003), phys_addr(pd0) + 1023 * 4 /* 0xffc00000 */
|
||||
|
||||
/* put the (physical) address of pd0 into cr3 so it will be used */
|
||||
mov $phys_addr(pd0), %ecx
|
||||
mov %ecx, %cr3
|
||||
|
@ -194,9 +204,9 @@ asmfn_begin(_start)
|
|||
mov %ecx, %cr0
|
||||
|
||||
/*
|
||||
* Alright, we are on virtual addresses!
|
||||
* Now, we are going to do an absolute jump to the mapped kernel code
|
||||
* somewhere at 0xc01*****.
|
||||
* Alright, we are in virtual address space! But %eip still points to
|
||||
* low memory (making use of the identity mapping), so we are going to
|
||||
* do an absolute jump to the mapped kernel code somewhere at 0xc01*****.
|
||||
*/
|
||||
lea 4f, %ecx
|
||||
jmp *%ecx
|
||||
|
@ -210,6 +220,8 @@ asmfn_end(_start)
|
|||
|
||||
.text
|
||||
|
||||
asmfn_begin(_start_virtual)
|
||||
|
||||
/*
|
||||
* Now that we've completely transitioned to high memory, we can remove
|
||||
* the identity mapping because we don't need it anymore.
|
||||
|
|
|
@ -35,7 +35,10 @@ struct x86_page_table_entry {
|
|||
* This may be used outside of `/arch/x86` for ensuring page alignment.
|
||||
* Regular code, except for the memory allocator, should never need this.
|
||||
*/
|
||||
#define PAGE_SIZE (1 << PAGE_SIZE_LOG2)
|
||||
#define PAGE_SIZE (1lu << PAGE_SIZE_LOG2)
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
|
||||
#define PAGE_ALIGN(ptr) ((typeof(ptr))( (uintptr_t)(ptr) & PAGE_MASK ))
|
||||
|
||||
struct x86_page_table {
|
||||
struct x86_page_table_entry entries[1024];
|
||||
|
@ -67,6 +70,14 @@ struct x86_page_directory {
|
|||
*/
|
||||
typedef struct x86_page_directory vm_info_t;
|
||||
|
||||
/**
|
||||
* @brief Get the physical address a virtual one is currently mapped to.
|
||||
*
|
||||
* @param virt virtual address
|
||||
* @returns The physical address, or `NULL` if there is no mapping
|
||||
*/
|
||||
void *virt_to_phys(void *virt);
|
||||
|
||||
/*
|
||||
* This file is part of GayBSD.
|
||||
* Copyright (c) 2021 fef <owo@fef.moe>.
|
||||
|
|
|
@ -6,6 +6,13 @@
|
|||
* If the bit is set, the page is in use. kmalloc() also just always hands
|
||||
* out entire pages, so let's just hope we never need more than PAGE_SIZE bytes
|
||||
* of contiguous memory lmao
|
||||
*
|
||||
* To manipulate the page directory once paging is enabled, we abuse the
|
||||
* structural similarity between page directory and page table by mapping the
|
||||
* last entry in the page directory to itself. This makes the MMU interpret the
|
||||
* page directory as if it were a page table, giving us access to the individual
|
||||
* directory entries at `0xffc00000-0xffffffff` virtual. The last page, at
|
||||
* address `0xfffff000-0xffffffff`, then points to the page directory itself.
|
||||
*/
|
||||
|
||||
#include <arch/page.h>
|
||||
|
@ -23,22 +30,33 @@
|
|||
extern void _image_start_phys;
|
||||
extern void _image_end_phys;
|
||||
|
||||
/* 0 = free, 1 = allocated */
|
||||
/**
|
||||
* @brief Page allocation bitmap.
|
||||
* 0 = free, 1 = allocated.
|
||||
*
|
||||
* The pagemap manipulation code below is specifically kept agnostic to
|
||||
* the type of the page map (u8/u16/u32 etc) so we can easily change it later
|
||||
* if it has performance benefits (which it almost certainly has)
|
||||
*/
|
||||
static u8 *pagemap;
|
||||
static size_t pagemap_len;
|
||||
static void *page_start;
|
||||
static void *page_end;
|
||||
|
||||
/* first and last dynamic page address (watch out, these are physical) */
|
||||
static void *dynpage_start;
|
||||
static void *dynpage_end;
|
||||
|
||||
/**
|
||||
* @brief First page table for low memory (0 - 4 M).
|
||||
* This is initialized by the early boot routine in assembly so that paging
|
||||
* can be enabled (the kernel itself is mapped to `0xc0000000` by default).
|
||||
* can be enabled (the kernel itself is mapped to `0xc0100000` by default).
|
||||
*/
|
||||
struct x86_page_table pt0;
|
||||
/** @brief First page directory for low memory. */
|
||||
struct x86_page_directory pd0;
|
||||
|
||||
int kmalloc_init(void *start, void *end)
|
||||
static void setup_pagemap(void);
|
||||
|
||||
int kmalloc_init(void *start_phys, void *end_phys)
|
||||
{
|
||||
/*
|
||||
* if the kernel image is loaded within the paging region (which is
|
||||
|
@ -46,46 +64,74 @@ int kmalloc_init(void *start, void *end)
|
|||
* to the end of the kernel image so we won't hand out pages that
|
||||
* actually store kernel data
|
||||
*/
|
||||
if (&_image_start_phys >= start && &_image_start_phys <= end)
|
||||
start = &_image_end_phys;
|
||||
if (&_image_start_phys >= start_phys && &_image_start_phys <= end_phys)
|
||||
start_phys = &_image_end_phys;
|
||||
|
||||
page_start = ptr_align(start, PAGE_SIZE_LOG2);
|
||||
page_end = ptr_align(end, -PAGE_SIZE_LOG2);
|
||||
dynpage_start = ptr_align(start_phys, PAGE_SIZE_LOG2);
|
||||
dynpage_end = ptr_align(end_phys, -PAGE_SIZE_LOG2);
|
||||
|
||||
if (page_end - page_start < 1024 * PAGE_SIZE) {
|
||||
if (dynpage_end - dynpage_start < 1024 * PAGE_SIZE) {
|
||||
kprintf("We have < 1024 pages for kmalloc(), this wouldn't go well\n");
|
||||
return -1;
|
||||
}
|
||||
/*
|
||||
* Add an arbitrary offset to where dynpages actually start.
|
||||
* I have no idea if this is necessary, but i think it might be possible
|
||||
* that grub stores its info tags right after the kernel image which
|
||||
* would blow up _boot(). Until this is resolved, we just throw away
|
||||
* a couple KiB of RAM to be on the safe side. Techbros cope.
|
||||
*/
|
||||
dynpage_start += 32 * PAGE_SIZE;
|
||||
|
||||
pagemap = start;
|
||||
pagemap_len = ((page_end - page_start) / PAGE_SIZE) / 8;
|
||||
while (page_start - (void *)pagemap < pagemap_len) {
|
||||
page_start += 8 * PAGE_SIZE;
|
||||
pagemap_len--;
|
||||
}
|
||||
setup_pagemap();
|
||||
|
||||
kprintf("Kernel image: %p - %p\n", &_image_start_phys, &_image_end_phys);
|
||||
kprintf("Page bitmap: %p - %p\n", pagemap, pagemap + pagemap_len);
|
||||
kprintf("Paging area: %p - %p\n", page_start, page_end);
|
||||
kprintf("Available memory: %u bytes (%u pages)\n",
|
||||
page_end - page_start, (page_end - page_start) / PAGE_SIZE);
|
||||
dynpage_end - dynpage_start, (dynpage_end - dynpage_start) / PAGE_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// int mem_init(void)
|
||||
// {
|
||||
// struct x86_page_directory *map = get_page();
|
||||
// if (map == NULL)
|
||||
// return -ENOMEM;
|
||||
// memset(map, 0, sizeof(*map));
|
||||
|
||||
|
||||
// }
|
||||
|
||||
void map_page(vm_info_t *map, void *physical, void *virtual, enum mm_flags flags)
|
||||
int map_page(void *phys, void *virt, enum mm_page_flags flags)
|
||||
{
|
||||
# ifdef DEBUG
|
||||
if (phys != PAGE_ALIGN(phys))
|
||||
kprintf("map_page(): unaligned physical address %p!\n", phys);
|
||||
if (virt != PAGE_ALIGN(virt))
|
||||
kprintf("map_page(): unaligned virtual address %p!\n", virt);
|
||||
# endif
|
||||
|
||||
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
|
||||
|
||||
size_t pd_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) / 1024;
|
||||
size_t pt_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) % 1024;
|
||||
|
||||
struct x86_page_directory_entry *pd_entry = &pd->entries[pd_index];
|
||||
if (!pd_entry->present) {
|
||||
void *page = get_page();
|
||||
if (page == NULL)
|
||||
return -ENOMEM;
|
||||
memset(page, 0, PAGE_SIZE);
|
||||
pd_entry->shifted_address = (uintptr_t)page >> X86_PAGE_DIRECTORY_ADDRESS_SHIFT;
|
||||
pd_entry->rw = 1;
|
||||
pd_entry->present = 1;
|
||||
vm_flush();
|
||||
}
|
||||
|
||||
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[pd_index];
|
||||
struct x86_page_table_entry *pt_entry = &pt->entries[pt_index];
|
||||
pt_entry->rw = (flags & MM_PAGE_RW) != 0;
|
||||
pt_entry->user = (flags & MM_PAGE_USER) != 0;
|
||||
pt_entry->write_through = 0;
|
||||
pt_entry->cache_disabled = 0;
|
||||
pt_entry->accessed = 0;
|
||||
pt_entry->dirty = 0;
|
||||
pt_entry->global = 0;
|
||||
pt_entry->shifted_address = (uintptr_t)virt >> X86_PAGE_TABLE_ADDRESS_SHIFT;
|
||||
|
||||
pt_entry->present = 1;
|
||||
vm_flush();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int find_zero_bit(u8 bitfield)
|
||||
|
@ -107,13 +153,14 @@ void *get_page(void)
|
|||
for (size_t i = 0; i < pagemap_len; i++) {
|
||||
if (pagemap[i] != 0xff) {
|
||||
int bit = find_zero_bit(pagemap[i]);
|
||||
if (bit == 8) {
|
||||
if (bit <= 8) {
|
||||
page = dynpage_start + (i * 8 + bit) * PAGE_SIZE;
|
||||
pagemap[i] |= (1 << bit);
|
||||
} else {
|
||||
kprintf("Throw your computer in the garbage\n");
|
||||
break;
|
||||
}
|
||||
|
||||
page = page_start + (i * 8 + bit) * PAGE_SIZE;
|
||||
pagemap[i] |= (1 << bit);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -132,7 +179,7 @@ void put_page(void *page)
|
|||
kprintf("Unaligned ptr %p passed to put_page()!\n", page);
|
||||
return;
|
||||
}
|
||||
if (page < page_start || page >= page_end) {
|
||||
if (page < dynpage_start || page >= dynpage_end) {
|
||||
kprintf("Page %p passed to put_page() is not in the dynamic area!\n", page);
|
||||
return;
|
||||
}
|
||||
|
@ -142,7 +189,7 @@ void put_page(void *page)
|
|||
memset(page, 'A', PAGE_SIZE);
|
||||
# endif
|
||||
|
||||
size_t page_number = (page - page_start) / PAGE_SIZE;
|
||||
size_t page_number = (page - dynpage_start) / PAGE_SIZE;
|
||||
size_t index = page_number / 8;
|
||||
int bit = page_number % 8;
|
||||
if ((pagemap[index] & (1 << bit)) == 0)
|
||||
|
@ -151,6 +198,109 @@ void put_page(void *page)
|
|||
pagemap[index] &= ~(1 << bit);
|
||||
}
|
||||
|
||||
void *virt_to_phys(void *virt)
|
||||
{
|
||||
size_t pd_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) / 1024;
|
||||
size_t pt_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) % 1024;
|
||||
|
||||
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
|
||||
if (!pd->entries[pd_index].present)
|
||||
return NULL;
|
||||
|
||||
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[pd_index];
|
||||
if (!pt->entries[pt_index].present)
|
||||
return NULL;
|
||||
|
||||
uintptr_t address = pt->entries[pt_index].shifted_address << X86_PAGE_TABLE_ADDRESS_SHIFT;
|
||||
return (void *)(address + ((uintptr_t)virt & ~PAGE_MASK));
|
||||
}
|
||||
|
||||
void vm_flush(void)
|
||||
{
|
||||
__asm__ volatile(
|
||||
" mov %%cr3, %%eax \n"
|
||||
" mov %%eax, %%cr3 \n"
|
||||
::: "eax"
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* So, this is going to be a little awkward. Pretty much the entire mm code
|
||||
* depends on the page bitmap, so we can't use any of it before the bitmap is
|
||||
* actually present. This means we have to do *everything* by hand here.
|
||||
*/
|
||||
static void setup_pagemap(void)
|
||||
{
|
||||
/*
|
||||
* If we blow up the pagemap we blow up the entire system, so we give
|
||||
* it its very own page table and map it somewhere far, far away from
|
||||
* anything else. A page table takes up exactly one page, so we cut
|
||||
* that away from the usable dynamic page area. So these two lines are
|
||||
* basically a replacement for a call to get_page().
|
||||
*/
|
||||
void *pt_phys = dynpage_start;
|
||||
dynpage_start += PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* As described in multiboot.S, the entry in the page directory points
|
||||
* to the page directory itself so we can still manipulate it while we
|
||||
* are in virtual address space. The second-last entry in the page
|
||||
* directory is still free, so we put the page table for the bitmap there.
|
||||
* If you do the math, the page table therefore maps addresses
|
||||
* 0xff800000-0xffbfffff, which is where we start off with the bitmap.
|
||||
*/
|
||||
pagemap = (u8 *)0xff800000;
|
||||
|
||||
/*
|
||||
* Now that we have a physical page for the page table, we need to
|
||||
* map it to a virtual address so we can fill its entries.
|
||||
* So this is basically a replacement for a call to map_page().
|
||||
*/
|
||||
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
|
||||
pd->entries[1022].shifted_address = (uintptr_t)pt_phys >> X86_PAGE_DIRECTORY_ADDRESS_SHIFT;
|
||||
pd->entries[1022].rw = 1;
|
||||
pd->entries[1022].present = 1;
|
||||
vm_flush();
|
||||
|
||||
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[1022];
|
||||
memset(pt, 0, sizeof(*pt));
|
||||
|
||||
/*
|
||||
* Alright, now we can actually fill the page table with entries for
|
||||
* the bitmap. Again, we just take away pages from the dynpage area,
|
||||
* until there is enough space. We also need to map those pages to the
|
||||
* virtual address, of course.
|
||||
*/
|
||||
void *pagemap_phys = dynpage_start;
|
||||
size_t pt_index = 0;
|
||||
do {
|
||||
/*
|
||||
* take one page away from the dynamic area and reserve it for
|
||||
* the bitmap, and recalculate the required bitmap size in bytes
|
||||
*/
|
||||
dynpage_start += PAGE_SIZE;
|
||||
pagemap_len = ((dynpage_end - dynpage_start) / PAGE_SIZE) / 8;
|
||||
|
||||
/* now add a page table entry for that page */
|
||||
struct x86_page_table_entry *pt_entry = &pt->entries[pt_index];
|
||||
uintptr_t address = (uintptr_t)pagemap_phys + pt_index * PAGE_SIZE;
|
||||
pt_entry->shifted_address = address >> X86_PAGE_TABLE_ADDRESS_SHIFT;
|
||||
pt_entry->present = 1;
|
||||
pt_entry->rw = 1;
|
||||
|
||||
pt_index++;
|
||||
} while (pagemap_len > (dynpage_start - pagemap_phys) / 8);
|
||||
|
||||
/*
|
||||
* Great! We have enough space for the bitmap, and it is mapped
|
||||
* correctly (at least i hope so). Now all that's left is to flush
|
||||
* the TLB once again to make the updated entries take effect, and
|
||||
* clear the bitmap.
|
||||
*/
|
||||
vm_flush();
|
||||
memset(pagemap, 0, pagemap_len);
|
||||
}
|
||||
|
||||
/*
|
||||
* This file is part of GayBSD.
|
||||
* Copyright (c) 2021 fef <owo@fef.moe>.
|
||||
|
|
|
@ -8,7 +8,7 @@ set_property(CACHE ARCH PROPERTY STRINGS
|
|||
)
|
||||
include("${CMAKE_CURRENT_LIST_DIR}/config-${ARCH}.cmake")
|
||||
|
||||
set(KERNEL_ORIGIN "0x100000" CACHE STRING "Physical address where the kernel is loaded")
|
||||
set(KERNEL_ORIGIN "0x100000" CACHE STRING "Physical address where the kernel is loaded (don't touch this)")
|
||||
|
||||
set(KERNEL_RELOCATE "0xc0000000" CACHE STRING "Virtual address the kernel is mapped to (don't touch this)")
|
||||
|
||||
|
|
|
@ -45,13 +45,23 @@ void *kmalloc(size_t size, enum mm_flags flags);
|
|||
*/
|
||||
void kfree(void *ptr);
|
||||
|
||||
enum mm_page_flags {
|
||||
MM_PAGE_PRESENT = (1 << 0),
|
||||
MM_PAGE_RW = (1 << 1),
|
||||
MM_PAGE_USER = (1 << 2),
|
||||
MM_PAGE_ACCESSED = (1 << 3),
|
||||
MM_PAGE_DIRTY = (1 << 4),
|
||||
MM_PAGE_GLOBAL = (1 << 5),
|
||||
MM_PAGE_NOCACHE = (1 << 6),
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Get a free memory page.
|
||||
*
|
||||
* This is only called internally by `kmalloc()`, don't use.
|
||||
* Must be deallocated with `put_page()` after use.
|
||||
*
|
||||
* @returns A pointer to the beginning of the page, or `NULL` if OOM
|
||||
* @returns A pointer to the beginning of the (physical) page address, or `NULL` if OOM
|
||||
*/
|
||||
void *get_page(void);
|
||||
|
||||
|
@ -64,6 +74,22 @@ void *get_page(void);
|
|||
*/
|
||||
void put_page(void *page);
|
||||
|
||||
/**
|
||||
* @brief Map a page in physical memory to a virtual address.
|
||||
* Remember that if `vm` is the memory map currently in use, you will most
|
||||
* likely need to call `vm_update()` when you've finished mapping everything
|
||||
* to flush the TLB.
|
||||
*
|
||||
* @param phys Physical address of the page
|
||||
* @param virt Virtual address to map the page to
|
||||
* @param flags Flags to apply to the page
|
||||
* @returns 0 on success, or `-ENOMEM?` if OOM (for allocating new page tables)
|
||||
*/
|
||||
int map_page(void *phys, void *virt, enum mm_page_flags flags);
|
||||
|
||||
/** @brief Flush the TLB. */
|
||||
void vm_flush(void);
|
||||
|
||||
/**
|
||||
* @brief Initialize the memory allocator.
|
||||
*
|
||||
|
|
Loading…
Reference in a new issue