You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
266 lines
8.4 KiB
C
266 lines
8.4 KiB
C
/* Copyright (C) 2021 fef <owo@fef.moe>. All rights reserved. */
|
|
|
|
#pragma once
|
|
|
|
/**
|
|
* @file include/gay/mm.h
|
|
* @brief Header for dynamic memory management
|
|
*
|
|
* To avoid possible confusion (and Not break 32-bit systems, even though they
|
|
* aren't really supported anyway), physical memory addresses always use type
|
|
* `vm_paddr_t` and virtual ones are `void *`. This should give us at least
|
|
* some type of compiler warning if they are accidentally mixed up.
|
|
*
|
|
* GayBSD uses a classic slab algorithm for its own data structures, which is
|
|
* backed by a buddy page frame allocator. The latter is also used for getting
|
|
* bigger areas of memory that are not physically contiguous (for regular user
|
|
* allocations). The entire physical memory is mapped statically in the range
|
|
* `DMAP_START - DMAP_END`.
|
|
*/
|
|
|
|
#ifdef _KERNEL
|
|
|
|
#include <arch/page.h>
|
|
|
|
#include <gay/cdefs.h>
|
|
#include <gay/kprintf.h>
|
|
#include <gay/types.h>
|
|
|
|
#define _M_ZONE_NORMAL 0
|
|
#define _M_ZONE_DMA 1
|
|
#define _M_ZONE_INDEX(flags) ((flags) & 1)
|
|
|
|
#define _M_EMERG (1 << 1)
|
|
#define _M_NOWAIT (1 << 2)
|
|
|
|
#define MM_ZONE_NORMAL 0
|
|
#define MM_ZONE_DMA 1
|
|
|
|
struct mm_zone {
|
|
patom_t freelist; /* -> struct page */
|
|
usize length;
|
|
};
|
|
|
|
/**
|
|
* @brief Map of all memory zones.
|
|
*
|
|
* Memory is currently divided into two zones: DMA and normal.
|
|
* The mm subsystem isn't NUMA aware, because it's not really a thing on desktop
|
|
* grade machines anyway and would only complicate things unnecessarily.
|
|
*/
|
|
extern struct mm_zone mm_zones[2];
|
|
|
|
/**
|
|
* @brief Memory allocation flags passed to `kmalloc()`.
|
|
*/
|
|
enum mflags {
|
|
/** @brief Use emergency memory reserves if necessary */
|
|
M_EMERG = _M_EMERG,
|
|
/** @brief Don't sleep during the allocation (required for atomic context) */
|
|
M_NOWAIT = _M_NOWAIT,
|
|
/** @brief Regular kernel memory */
|
|
M_KERN = _M_ZONE_NORMAL,
|
|
/** @brief Don't sleep, and use emergency reserves if necessary */
|
|
M_ATOMIC = _M_EMERG | _M_NOWAIT,
|
|
/** @brief Allocate low memory suitable for DMA transfers */
|
|
M_DMA = _M_ZONE_DMA,
|
|
};
|
|
|
|
/**
|
|
* @brief Allocate memory.
|
|
*
|
|
* Memory must be released with `kfree()` after use.
|
|
*
|
|
* @param size Memory size in bytes
|
|
* @param flags Allocation flags
|
|
* @returns The allocated memory area, or `NULL` if OOM
|
|
*/
|
|
void *kmalloc(size_t size, enum mflags flags) __malloc_like __alloc_size(1);
|
|
|
|
/**
|
|
* @brief Release memory.
|
|
*
|
|
* @param ptr The pointer returned by `kmalloc()`.
|
|
*/
|
|
void kfree(void *ptr);
|
|
|
|
/**
|
|
* @brief Flags for the paging structures.
|
|
*
|
|
* The macros with two underscores in front of them are defined in `arch/page.h`
|
|
* and match the respective bit positions in the platform's native hardware
|
|
* layout for better performance (no shifting around required).
|
|
*/
|
|
enum pflags {
|
|
P_PRESENT = __P_PRESENT, /**< @brief Page exists */
|
|
P_RW = __P_RW, /**< @brief Page is writable */
|
|
P_USER = __P_USER, /**< @brief Page is accessible from ring 3 */
|
|
P_ACCESSED = __P_ACCESSED, /**< @brief Page has been accessed */
|
|
P_DIRTY = __P_DIRTY, /**< @brief Page has been written */
|
|
P_GLOBAL = __P_GLOBAL, /**< @brief The entry survives `vm_flush()` */
|
|
P_NOCACHE = __P_NOCACHE, /**< @brief The TLB won't cache this entry */
|
|
P_SLAB = __P_SLAB, /**< @brief Page is used by the slab allocator */
|
|
P_NOSLEEP = __P_ATOMIC, /**< @brief Page is atomic */
|
|
#ifdef __HAVE_HUGEPAGES
|
|
/** @brief This page is `HUGEPAGE_SIZE` bytes long, rather than `PAGE_SIZE` */
|
|
P_HUGE = __P_HUGE,
|
|
#endif
|
|
#ifdef __HAVE_NOEXEC
|
|
/** @brief No instructions can be fetched from this page */
|
|
P_NOEXEC = __P_NOEXEC,
|
|
#endif
|
|
};
|
|
|
|
/*
|
|
* Terrible hack that allows us to map pages before the page frame allocator is
|
|
* set up. Don't ever use these anywhere, because they *will* break everything.
|
|
*/
|
|
void __early_map_page(uintptr_t phys, void *virt, enum pflags flags);
|
|
/* This just shrinks phys_end by PAGE_SIZE and returns the page */
|
|
uintptr_t __early_get_page(void);
|
|
|
|
/**
|
|
* @brief Map a page in physical memory to a virtual address.
|
|
* Remember that if `vm` is the memory map currently in use, you will most
|
|
* likely need to call `vm_update()` when you've finished mapping everything
|
|
* to flush the TLB.
|
|
*
|
|
* @param phys Physical address of the page
|
|
* @param virt Virtual address to map the page to
|
|
* @param flags Flags to apply to the page
|
|
* @returns 0 on success, or `-ENOMEM` if OOM (for allocating new page tables)
|
|
*/
|
|
int map_page(uintptr_t phys, void *virt, enum pflags flags);
|
|
|
|
/**
|
|
* @brief Remove a page mapping.
|
|
*
|
|
* @param virt Virtual address the page is mapped to, must be page aligned
|
|
* @returns The physical page address that was being mapped
|
|
*/
|
|
uintptr_t unmap_page(void *virt);
|
|
|
|
/**
|
|
* @brief Get a page's flags in the page tables.
|
|
*
|
|
* @param page Page to get the flags of (if the page is in a hugepage area,
|
|
* the flags for that hugepage will be returned with `P_HUGE = 1`)
|
|
* @return The flags, as currently stored in the page table structures
|
|
* (but not necessarily applied if they have been modified and `vm_flush()`
|
|
* has not been called yet!)
|
|
*/
|
|
enum pflags get_pflags(void *page);
|
|
|
|
/**
|
|
* @brief Update a page's flags in the page tables.
|
|
* You should always use this in conjunction with `get_pflags()`, as in getting
|
|
* the flags first, then toggling the flags you need to, and then setting them
|
|
* in the tables again. This is because this method will clear *any* previous
|
|
* flags.
|
|
*
|
|
* @param page Page to set flags for (if flags has `P_HUGE` set, must be
|
|
* `HUGEPAGE_SIZE` aligned, otherwise `PAGE_SIZE` aligned)
|
|
* @param flags Flags to set
|
|
* @return 0 on success, or a negative value if either a page table allocation
|
|
* failed or
|
|
*/
|
|
int set_pflags(void *page, enum pflags flags);
|
|
|
|
/**
|
|
* @brief Initialize the memory allocator.
|
|
*
|
|
* This can only be called once, from the early `_boot()` routine.
|
|
*
|
|
* @param _phys_start Physical start address of the page area
|
|
* @param _phys_end Physical end address of the page area
|
|
* @returns 0 on success, or -1 if the pointers were garbage
|
|
*/
|
|
int kmalloc_init(uintptr_t _phys_start, uintptr_t _phys_end);
|
|
|
|
/** @brief Start of the mapped, physically contiguous kernel heap */
|
|
extern void *kheap_start;
|
|
/** @brief End of the mapped, physically contiguous kernel heap */
|
|
extern void *kheap_end;
|
|
|
|
/** @brief Start of the kernel heap in physical memory */
|
|
extern uintptr_t phys_start;
|
|
/** @brief End of the kernel heap in physical memory */
|
|
extern uintptr_t phys_end;
|
|
|
|
/**
|
|
* @brief Initialize the buddy page frame allocator.
|
|
* This is only called once, internally from `kmalloc_init()`.
|
|
*
|
|
* @return 0 on success, or -1 if it messed up
|
|
*/
|
|
int pages_init(void);
|
|
|
|
/**
|
|
* @brief Allocate a contiguous region in physical memory.
|
|
* The returned region will be `(1 << order) * PAGE_SIZE` bytes long.
|
|
*
|
|
* @param order Order of magnitude (as in `1 << order`) for the region size
|
|
* @param flags How to allocate (`order` must be 0 if `M_NOWAIT` is specified)
|
|
* @return A pointer to the beginning of the region in the direct mapping area,
|
|
* or `nil` if the allocation failed
|
|
*/
|
|
void *get_pages(int order, enum mflags flags) __malloc_like;
|
|
#ifdef __HAVE_HUGEPAGES
|
|
#define GET_PAGE_ORDERS (HUGEPAGE_SHIFT - PAGE_SHIFT + 1)
|
|
#else
|
|
#define GET_PAGE_ORDERS 10
|
|
#endif
|
|
#define GET_PAGE_MAX_ORDER (GET_PAGE_ORDERS - 1)
|
|
|
|
void free_pages(void *ptr);
|
|
|
|
/**
|
|
* @brief Initialize the slab caches.
|
|
* This is called only once by `kmalloc_init()` after the buddy page frame
|
|
* allocator is initialized.
|
|
*/
|
|
void slab_init(void);
|
|
|
|
/**
|
|
* @brief Return where a physical address maps to in the direct memory area.
|
|
* The returned pointer will be within the range `DMAP_START` (inclusive)
|
|
* and `DMAP_END` (exclusive).
|
|
*
|
|
* @param phys Physical address
|
|
* @return Virtual address
|
|
*/
|
|
static inline void *__v(uintptr_t phys)
|
|
{
|
|
# ifdef DEBUG
|
|
if (phys > phys_end) {
|
|
kprintf("__v(%p): phys ptr out of range!\n", (void *)phys);
|
|
return nil;
|
|
}
|
|
# endif
|
|
return (void *)phys + DMAP_OFFSET;
|
|
}
|
|
|
|
/**
|
|
* @brief Return where a virtual address in the direct mapping region is in
|
|
* physical memory. This does **not** perform a lookup in the page table
|
|
* structures, and should generally only be used from within mm code (hence the
|
|
* two underscores). The reliable way of determining where any virtual address
|
|
* maps to is `vtophys()`.
|
|
*
|
|
* @param virt Virtual address, must be within `DMAP_START - DMAP_END`
|
|
* @return The physical address, i.e. `virt - DMAP_OFFSET`
|
|
* @see vtophys()
|
|
*/
|
|
static inline uintptr_t __p(void *virt)
|
|
{
|
|
# ifdef DEBUG
|
|
if (virt < DMAP_START || virt >= DMAP_END) {
|
|
kprintf("__p(%p): virt ptr out of range!\n", virt);
|
|
return 0;
|
|
}
|
|
# endif
|
|
return (uintptr_t)virt - DMAP_OFFSET;
|
|
}
|
|
|
|
#endif /* _KERNEL */
|