You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

257 lines
8.2 KiB
C

/* Copyright (C) 2021,2022 fef <owo@fef.moe>. All rights reserved. */
#pragma once
/**
* @file include/gay/mm.h
* @brief Header for dynamic memory management
*
* To avoid possible confusion (and Not break 32-bit systems, even though they
* aren't really supported anyway), physical memory addresses always use type
* `vm_paddr_t` and virtual ones are `void *`. This should give us at least
* some type of compiler warning if they are accidentally mixed up.
*
* GayBSD uses a classic slab algorithm for its own data structures, which is
* backed by a buddy page frame allocator. The latter is also used for getting
* bigger areas of memory that are not physically contiguous (for regular user
* allocations). The entire physical memory is mapped statically in the range
* `DMAP_START - DMAP_END`.
*
* Memory is split up into (currently) two zones: `MM_ZONE_NORMAL` and
* `MM_ZONE_DMA`. As their names suggest, the former is for general purpose
* allocations and the latter for getting memory suitable for DMA transfers.
* Zones are further divided into pools, each of which hold a list of groups of
* free pages. The size of these page groups is determined by the pool's order,
* where the pool of order `n` holds groups of `1 << n` pages.
*/
#ifdef _KERNEL
#include <arch/page.h>
#include <gay/cdefs.h>
#include <gay/clist.h>
#include <gay/config.h>
#include <gay/kprintf.h>
#include <gay/mutex.h>
#include <gay/types.h>
#include <string.h>
#define _M_ZONE_NORMAL 0
#define _M_ZONE_DMA 1
#define _M_ZONE_INDEX(flags) ((flags) & 1)
#define _M_EMERG (1 << 1)
#define _M_NOWAIT (1 << 2)
enum mm_zone_type {
MM_ZONE_NORMAL = _M_ZONE_NORMAL,
MM_ZONE_DMA = _M_ZONE_DMA,
MM_NR_ZONES
};
/** @brief Boot memory area. */
struct _bmem_area {
struct clist link; /* -> struct mm_zone::_bmem_areas */
vm_paddr_t start;
vm_paddr_t end;
};
struct mm_pool {
struct clist freelist; /* -> vm_page_t::link */
/** @brief Number of items in `freelist`. */
usize free_entries;
/** @brief One bit per buddy *pair*, 1 if exactly one is allocated. */
latom_t *bitmap;
spin_t lock;
};
#define MM_NR_ORDERS 10
#define MM_MAX_ORDER (MM_NR_ORDERS - 1)
struct mm_zone {
/** @brief Current number of free pages in all pools */
latom_t free_count;
/** @brief Thresholds for OOM behavior */
struct {
/** @brief Minimum number of pages reserved for emergency allocations */
u_long emerg;
} thrsh;
struct mm_pool pools[MM_NR_ORDERS];
struct clist _bmem_areas; /* -> struct _bmem_area */
};
/**
* @brief Map of all memory zones.
*
* Memory is currently divided into two zones: DMA and normal.
* The mm subsystem isn't NUMA aware, because it's not really a thing on desktop
* grade machines anyway and would only complicate things unnecessarily.
*/
extern struct mm_zone mm_zones[MM_NR_ZONES]; /* kernel/mm/page.c */
/**
* @brief Memory allocation flags passed to `kmalloc()`.
*/
enum mflags {
/** @brief Use emergency memory reserves if necessary */
M_EMERG = _M_EMERG,
/** @brief Don't sleep during the allocation (required for atomic context) */
M_NOWAIT = _M_NOWAIT,
/** @brief Regular kernel memory */
M_KERN = _M_ZONE_NORMAL,
/** @brief Don't sleep, and use emergency reserves if necessary */
M_ATOMIC = _M_EMERG | _M_NOWAIT,
/** @brief Allocate low memory suitable for DMA transfers */
M_DMA = _M_ZONE_DMA,
};
/**
* @brief Allocate memory.
*
* Memory must be released with `kfree()` after use.
*
* @param size Memory size in bytes
* @param flags Allocation flags
* @returns The allocated memory area, or `NULL` if OOM
*/
void *kmalloc(size_t size, enum mflags flags) __malloc_like __alloc_size(1);
/**
* @brief Release memory.
*
* @param ptr The pointer returned by `kmalloc()`.
*/
void kfree(void *ptr);
/**
* @brief Flags for the paging structures.
*
* The macros with two underscores in front of them are defined in `arch/page.h`
* and match the respective bit positions in the platform's native hardware
* layout for better performance (no shifting around required).
*/
enum pflags {
P_PRESENT = __P_PRESENT, /**< @brief Page exists */
P_RW = __P_RW, /**< @brief Page is writable */
P_USER = __P_USER, /**< @brief Page is accessible from ring 3 */
P_ACCESSED = __P_ACCESSED, /**< @brief Page has been accessed */
P_DIRTY = __P_DIRTY, /**< @brief Page has been written */
P_GLOBAL = __P_GLOBAL, /**< @brief The entry survives `vm_flush()` */
P_NOCACHE = __P_NOCACHE, /**< @brief The TLB won't cache this entry */
P_SLAB = __P_SLAB, /**< @brief Page is used by the slab allocator */
P_NOSLEEP = __P_ATOMIC, /**< @brief Page is atomic */
#ifdef __HAVE_HUGEPAGES
/** @brief This page is `HUGEPAGE_SIZE` bytes long, rather than `PAGE_SIZE` */
P_HUGE = __P_HUGE,
#endif
#ifdef __HAVE_NOEXEC
/** @brief No instructions can be fetched from this page */
P_NOEXEC = __P_NOEXEC,
#endif
};
/**
* @brief Initialize the buddy page frame allocator.
* This is only called once, from the arch dependent counterpart after it has
* reserved memory for and mapped `vm_page_array`, as well as mapped the direct
* area.
*/
void paging_init(vm_paddr_t phys_end);
/**
* @brief Allocate a contiguous region in physical memory.
* The returned region will be `(1 << order) * PAGE_SIZE` bytes long.
*
* **The pages are not initialized.**
* If you want zeroed pages, use `get_zero_pages()`.
*
* @param order Order of magnitude (as in `1 << order` pages)
* @param flags How to allocate
* @return A pointer to the beginning of the region in the direct mapping area,
* or `nil` if the allocation failed
*/
void *get_pages(u_int order, enum mflags flags) __malloc_like;
void *get_page(enum mflags flags) __malloc_like;
void *get_zero_pages(u_int order, enum mflags flags) __malloc_like;
void *get_zero_page(enum mflags flags) __malloc_like;
void free_pages(void *ptr);
#define free_page(ptr) free_pages(ptr)
/**
* @brief Initialize the slab caches.
* This is called only once by `kmalloc_init()` after the buddy page frame
* allocator is initialized.
*/
void slab_init(void);
/**
* @brief Return where a physical address maps to in the direct memory area.
* The returned pointer will be within the range `DMAP_START` (inclusive)
* and `DMAP_END` (exclusive).
*
* @param phys Physical address
* @return Virtual address
*/
static inline void *__v(vm_paddr_t phys)
{
return (void *)phys + DMAP_OFFSET;
}
/**
* @brief Return where a virtual address in the direct mapping region is in
* physical memory. This does **not** perform a lookup in the page table
* structures, and should generally only be used from within mm code (hence the
* two underscores). The reliable way of determining where any virtual address
* maps to is `vtophys()`.
*
* @param virt Virtual address, must be within `DMAP_START - DMAP_END`
* @return The physical address, i.e. `virt - DMAP_OFFSET`
* @see vtophys()
*/
static inline vm_paddr_t __p(void *virt)
{
# ifdef DEBUG
if (virt < DMAP_START || virt >= DMAP_END) {
kprintf("__p(%p): virt ptr out of range!\n", virt);
return 0;
}
# endif
return (uintptr_t)virt - DMAP_OFFSET;
}
/*
* Boot page frame allocator stuff, don't use these in regular code
*/
/** @brief Initialize the boot page frame allocator (called from `<arch>_paging_init()`) */
void __boot_pmalloc_init(void);
/**
* @brief Tell the boot page frame allocator about a free area in RAM.
* The area may overlap with the kernel image; this is checked automatically.
*/
void __boot_register_mem_area(vm_paddr_t start, vm_paddr_t end, enum mm_zone_type zone_type);
/**
* @brief Allocate a physical memory area.
*
* @param log2 Binary logarithm of the desired allocation size (must be `>= PAGE_SHIFT`)
* @param zone_type What zone to allocate from (you always want `MM_ZONE_NORMAL`)
* @return Allocated region (will be aligned to at least its own size),
* or `BOOT_PMALLOC_ERR` if the request could not be satisfied either
* due to OOM or because the alignment constraints failed
*/
vm_paddr_t __boot_pmalloc(u_int log2, enum mm_zone_type zone_type);
#define BOOT_PMALLOC_ERR ((vm_paddr_t)0 - 1)
/**
* @brief Zero out a single physical page.
* @param addr Physical address of the page in memory (must be page aligned, obviously)
*/
void __boot_clear_page(vm_paddr_t addr); /* implemented in arch dependent code */
#endif /* _KERNEL */