You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

259 lines
8.3 KiB
C

/* Copyright (C) 2021,2022 fef <owo@fef.moe>. All rights reserved. */
#pragma once
/**
* @file include/gay/mm.h
* @brief Header for dynamic memory management
*
* To avoid possible confusion (and Not break systems where virtual addresses
* are less wide than physical ones, like IA-32 with PAE), physical memory
* addresses always use type `vm_paddr_t` and virtual ones are `void *`.
* This should give us at least some type of compiler warning if they are
* accidentally mixed up.
*
* GayBSD uses a classic slab algorithm for its own data structures, which is
* backed by a buddy page frame allocator. The latter is also used for getting
* bigger areas of memory that are not physically contiguous (for regular user
* allocations). The entire physical memory is mapped statically in the range
* `DMAP_START - DMAP_END` in order to make clearing pages without a specific
* mapping easier, even though regular code outside the mm subsystem should be
* completely oblivious to this fact.
*
* Memory is split up into (currently) two zones: `MM_ZONE_NORMAL` and
* `MM_ZONE_DMA`. As their names suggest, the former is for general purpose
* allocations and the latter for getting memory suitable for DMA transfers.
* Zones are further divided into pools, each of which hold a list of groups of
* free pages. The size of these page groups is determined by the pool's order,
* where the pool of order `n` holds groups of `1 << n` pages.
*
* The mm subsystem needs to allocate memory for initializing itself.
* Therefore, there is an additional boot page frame allocator, which gets the
* free areas from architecture dependent code (`arch/mm/.../init.c`).
*/
#ifdef _KERNEL
#include <arch/page.h>
#include <gay/cdefs.h>
#include <gay/clist.h>
#include <gay/config.h>
#include <gay/kprintf.h>
#include <gay/mutex.h>
#include <gay/types.h>
#include <string.h>
#define _M_ZONE_DMA 0
#define _M_ZONE_NORMAL 1
/* we use 2 bits because there are likely gonna be additional zones in the future */
#define _M_ZONE_INDEX(flags) ((flags) & 3)
#define _M_EMERG (1 << 2)
#define _M_NOWAIT (1 << 3)
#ifndef _HAVE_VM_PAGE_T
#define _HAVE_VM_PAGE_T 1
struct vm_page;
typedef struct vm_page *vm_page_t;
#endif
enum mm_zone_type {
MM_ZONE_DMA = _M_ZONE_DMA,
MM_ZONE_NORMAL = _M_ZONE_NORMAL,
MM_NR_ZONES = 2
};
/** @brief Boot memory area. */
struct _bmem_area {
struct clist link; /* -> struct mm_zone::_bmem_areas */
vm_paddr_t start;
vm_paddr_t end;
};
struct mm_pool {
struct clist freelist; /* -> vm_page_t::link */
/** @brief Number of items in `freelist`. */
usize free_entries;
/** @brief One bit per buddy *pair*, 1 if exactly one is allocated. */
latom_t *bitmap;
spin_t lock;
};
#define MM_NR_ORDERS 10
#define MM_MAX_ORDER (MM_NR_ORDERS - 1)
struct mm_zone {
/** @brief Current number of free pages in all pools */
latom_t free_count;
/** @brief Thresholds for OOM behavior */
struct {
/** @brief Minimum number of pages reserved for emergency allocations */
long emerg;
} thrsh;
struct mm_pool pools[MM_NR_ORDERS];
struct clist _bmem_areas; /* -> struct _bmem_area */
};
/**
* @brief Map of all memory zones.
*
* Memory is currently divided into two zones: DMA and normal.
* The mm subsystem isn't NUMA aware, because it's not really a thing on desktop
* grade machines anyway and would only complicate things unnecessarily.
*/
extern struct mm_zone mm_zones[MM_NR_ZONES]; /* kernel/mm/page.c */
/**
* @brief Memory allocation flags commonly used by all allocators.
* All of them are eventually passed down to `page_alloc()`, the physical page
* frame allocator,
*/
enum mflags {
/** @brief Use emergency memory reserves if necessary */
M_EMERG = _M_EMERG,
/** @brief Don't sleep during the allocation (required for atomic context) */
M_NOWAIT = _M_NOWAIT,
/** @brief Regular kernel memory */
M_KERN = _M_ZONE_NORMAL,
/** @brief Don't sleep, and use emergency reserves if necessary */
M_ATOMIC = _M_EMERG | _M_NOWAIT,
/** @brief Allocate low memory suitable for DMA transfers */
M_DMA = _M_ZONE_DMA,
};
/** @brief Initialize the slab allocator. */
void kmalloc_init(void);
/**
* @brief Allocate memory.
*
* Memory must be released with `kfree()` after use.
*
* @param size Memory size in bytes
* @param flags Allocation flags
* @returns The allocated memory area, or `NULL` if OOM
*/
void *kmalloc(size_t size, enum mflags flags) __malloc_like __alloc_size(1);
/**
* @brief Release memory.
*
* @param ptr The pointer returned by `kmalloc()`.
*/
void kfree(void *ptr);
/**
* @brief Initialize the buddy page frame allocator.
* This is only called once, from the arch dependent counterpart after it has
* reserved memory for and mapped `vm_page_array`, as well as mapped the direct
* area.
*/
void paging_init(vm_paddr_t phys_end);
/**
* @brief Allocate a physically contiguous region and initialize it with zeroes.
* The returned region will be `(1 << order) * PAGE_SIZE` bytes long.
*
* @param order Order of magnitude (as in `1 << order` pages)
* @param flags How to allocate
* @return The page group that was allocated (evaluates false on failure)
*/
vm_page_t page_alloc(u_int order, enum mflags flags) __malloc_like;
/**
* @brief Allocate and map a physically contiguous region in memory.
* The returned region will be `(1 << order) * PAGE_SIZE` bytes long,
* and initialized with zeroes.
*
* If filling the page with zeroes takes too much time, use `page_alloc()`.
* But only if you're careful and it's not an allocation for user space.
*
* @param order Order of magnitude (as in `1 << order` pages)
* @param flags How to allocate
* @return A pointer to the beginning of the region in the direct mapping area,
* or `nil` if the allocation failed
*/
void *get_pages(u_int order, enum mflags flags) __malloc_like;
/** @brief Alias for `get_pages(0, flags)`. */
void *get_page(enum mflags flags) __malloc_like;
/** @brief Free a page from `page_alloc()`. */
void page_free(vm_page_t page);
/**
* @brief Initialize the slab caches.
* This is called only once by `kmalloc_init()` after the buddy page frame
* allocator is initialized.
*/
void slab_init(void);
/**
* @brief Return where a physical address maps to in the direct memory area.
* The returned pointer will be within the range `DMAP_START` (inclusive)
* and `DMAP_END` (exclusive).
*
* @param phys Physical address
* @return Virtual address
*/
static inline void *__v(vm_paddr_t phys)
{
return (void *)phys + DMAP_OFFSET;
}
/**
* @brief Return where a virtual address in the direct mapping region is in
* physical memory. This does **not** perform a lookup in the page table
* structures, and should generally only be used from within mm code (hence the
* two underscores). The reliable way of determining where any virtual address
* maps to is `vtophys()`.
*
* @param virt Virtual address, must be within `DMAP_START - DMAP_END`
* @return The physical address, i.e. `virt - DMAP_OFFSET`
* @see vtophys()
*/
static inline vm_paddr_t __p(void *virt)
{
# ifdef DEBUG
if (virt < DMAP_START || virt >= DMAP_END) {
kprintf("__p(%p): virt ptr out of range!\n", virt);
return 0;
}
# endif
return (uintptr_t)virt - DMAP_OFFSET;
}
/*
* Boot page frame allocator stuff, don't use these in regular code
*/
/** @brief Initialize the boot page frame allocator (called from `<arch>_paging_init()`) */
void __boot_pmalloc_init(void);
/**
* @brief Tell the boot page frame allocator about a free area in RAM.
* The area may overlap with the kernel image; this is checked automatically.
*/
void __boot_register_mem_area(vm_paddr_t start, vm_paddr_t end, enum mm_zone_type zone_type);
/**
* @brief Allocate a physical memory area.
*
* @param log2 Binary logarithm of the desired allocation size (must be `>= PAGE_SHIFT`)
* @param zone_type What zone to allocate from (you always want `MM_ZONE_NORMAL`)
* @return Allocated region (will be aligned to at least its own size),
* or `BOOT_PMALLOC_ERR` if the request could not be satisfied either
* due to OOM or because the alignment constraints failed
*/
vm_paddr_t __boot_pmalloc(u_int log2, enum mm_zone_type zone_type);
#define BOOT_PMALLOC_ERR ((vm_paddr_t)0 - 1)
/**
* @brief Zero out a single physical page.
* @param addr Physical address of the page in memory (must be page aligned, obviously)
*/
void __boot_clear_page(vm_paddr_t addr); /* implemented in arch dependent code */
#endif /* _KERNEL */