You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

258 lines
8.4 KiB
C

/* See the end of this file for copyright and license terms. */
#pragma once
/**
* @file include/gay/mm.h
* @brief Header for dynamic memory management
*
* To avoid possible confusion, physical memory addresses always use type
* `uintptr_t` and virtual ones are `void *`. This should give us at least some
* type of compiler warning if they are accidentally mixed up.
*
* GayBSD uses a classic slab algorithm for its own data structures, which is
* backed by a buddy page frame allocator. The latter is also used for getting
* bigger areas of memory that are not physically contiguous (for regular user
* allocations). The entire physical memory is mapped statically in the range
* `DMAP_START - DMAP_END`.
*/
#ifdef _KERNEL
#include <arch/page.h>
#include <gay/cdefs.h>
#include <gay/types.h>
/**
* @brief Memory allocation flags passed to `kmalloc()`.
*/
enum mflags {
/** @brief Physically contiguous memory for DMA. */
M_CONTIG = (1 << 0),
/** @brief Use emergency memory reserves if necessary. */
M_EMERG = (1 << 1),
/** @brief Don't sleep during the allocation. */
M_NOSLEEP = (1 << 2),
/** @brief Allocate userspace memory. */
M_USER = (1 << 4),
/** @brief Kernel memory */
M_KERN = M_CONTIG,
/** @brief Allocate memory in atomic (irq) context. */
M_ATOMIC = M_EMERG | M_NOSLEEP,
};
/**
* @brief Allocate memory.
*
* Memory must be released with `kfree()` after use.
*
* @param size Memory size in bytes
* @param flags Allocation flags
* @returns The allocated memory area, or `NULL` if OOM
*/
void *kmalloc(size_t size, enum mflags flags) __malloc_like __alloc_size(1);
/**
* @brief Release memory.
*
* @param ptr The pointer returned by `kmalloc()`.
*/
void kfree(void *ptr);
/**
* @brief Flags for the paging structures.
*
* The macros with two underscores in front of them are defined in `arch/page.h`
* and match the respective bit positions in the platform's native hardware
* layout for better performance (no shifting around required).
*/
enum pflags {
P_PRESENT = __PFLAG_PRESENT, /**< @brief Page exists */
P_RW = __PFLAG_RW, /**< @brief Page is writable */
P_USER = __PFLAG_USER, /**< @brief Page is accessible from ring 3 */
P_ACCESSED = __PFLAG_ACCESSED, /**< @brief Page has been accessed */
P_DIRTY = __PFLAG_DIRTY, /**< @brief Page has been written */
P_GLOBAL = __PFLAG_GLOBAL, /**< @brief The entry survives `vm_flush()` */
P_NOCACHE = __PFLAG_NOCACHE, /**< @brief The TLB won't cache this entry */
P_SLAB = __PFLAG_SLAB, /**< @brief Page is used by the slab allocator */
P_NOSLEEP = __PFLAG_ATOMIC, /**< @brief Page is atomic */
#ifdef __HAVE_HUGEPAGES
/** @brief This page is `HUGEPAGE_SIZE` bytes long, rather than `PAGE_SIZE` */
P_HUGE = __PFLAG_HUGE,
#endif
#ifdef __HAVE_NOEXEC
/** @brief No instructions can be fetched from this page */
P_NOEXEC = __PFLAG_NOEXEC,
#endif
};
/*
* Terrible hack that allows us to map pages before the page frame allocator is
* set up. Don't ever use these anywhere, because they *will* break everything.
*/
void __early_map_page(uintptr_t phys, void *virt, enum pflags flags);
/* This just shrinks phys_end by PAGE_SIZE and returns the page */
uintptr_t __early_get_page(void);
/**
* @brief Map a page in physical memory to a virtual address.
* Remember that if `vm` is the memory map currently in use, you will most
* likely need to call `vm_update()` when you've finished mapping everything
* to flush the TLB.
*
* @param phys Physical address of the page
* @param virt Virtual address to map the page to
* @param flags Flags to apply to the page
* @returns 0 on success, or `-ENOMEM` if OOM (for allocating new page tables)
*/
int map_page(uintptr_t phys, void *virt, enum pflags flags);
/**
* @brief Remove a page mapping.
*
* @param virt Virtual address the page is mapped to, must be page aligned
* @returns The physical page address that was being mapped
*/
uintptr_t unmap_page(void *virt);
/**
* @brief Get a page's flags in the page tables.
*
* @param page Page to get the flags of (if the page is in a hugepage area,
* the flags for that hugepage will be returned with `P_HUGE = 1`)
* @return The flags, as currently stored in the page table structures
* (but not necessarily applied if they have been modified and `vm_flush()`
* has not been called yet!)
*/
enum pflags get_pflags(void *page);
/**
* @brief Update a page's flags in the page tables.
* You should always use this in conjunction with `get_pflags()`, as in getting
* the flags first, then toggling the flags you need to, and then setting them
* in the tables again. This is because this method will clear *any* previous
* flags.
*
* @param page Page to set flags for (if flags has `P_HUGE` set, must be
* `HUGEPAGE_SIZE` aligned, otherwise `PAGE_SIZE` aligned)
* @param flags Flags to set
* @return 0 on success, or a negative value if either a page table allocation
* failed or
*/
int set_pflags(void *page, enum pflags flags);
/** @brief Flush the TLB. */
void vm_flush(void);
/**
* @brief Initialize the memory allocator.
*
* This can only be called once, from the early `_boot()` routine.
*
* @param _phys_start Physical start address of the page area
* @param _phys_end Physical end address of the page area
* @returns 0 on success, or -1 if the pointers were garbage
*/
int kmalloc_init(uintptr_t _phys_start, uintptr_t _phys_end);
/** @brief Start of the mapped, physically contiguous kernel heap */
extern void *kheap_start;
/** @brief End of the mapped, physically contiguous kernel heap */
extern void *kheap_end;
/** @brief Start of the kernel heap in physical memory */
extern uintptr_t phys_start;
/** @brief End of the kernel heap in physical memory */
extern uintptr_t phys_end;
/**
* @brief Initialize the buddy page frame allocator.
* This is only called once, internally from `kmalloc_init()`.
*
* @return 0 on success, or -1 if it messed up
*/
int pages_init(void);
/**
* @brief Allocate a contiguous region in physical memory.
* The returned region will be `(1 << order) * PAGE_SIZE` bytes long.
*
* @param order Order of magnitude (as in `1 << order`) for the region size
* @param flags How to allocate (`order` must be 0 if `M_NOSLEEP` is specified)
* @return A pointer to the beginning of the region in the direct mapping area,
* or `nil` if the allocation failed
*/
void *get_pages(int order, enum mflags flags) __malloc_like;
#ifdef __HAVE_HUGEPAGES
#define GET_PAGE_ORDERS (HUGEPAGE_SHIFT - PAGE_SHIFT + 1)
#else
#define GET_PAGE_ORDERS 10
#endif
#define GET_PAGE_MAX_ORDER (GET_PAGE_ORDERS - 1)
void free_pages(void *ptr);
/**
* @brief Initialize the slab caches.
* This is called only once by `kmalloc_init()` after the buddy page frame
* allocator is initialized.
*/
void slab_init(void);
/**
* @brief Return where a physical address maps to in the direct memory area.
* The returned pointer will be within the range `DMAP_START` (inclusive)
* and `DMAP_END` (exclusive).
*
* @param phys Physical address
* @return Virtual address
*/
static inline void *__v(uintptr_t phys)
{
# ifdef DEBUG
if (phys > phys_end) {
kprintf("__v(%p): phys ptr out of range!\n", (void *)phys);
return nil;
# endif
return (void *)phys + DMAP_OFFSET;
}
/**
* @brief Return where a virtual address in the direct mapping region is in
* physical memory. This does **not** perform a lookup in the page table
* structures, and should generally only be used from within mm code (hence the
* two underscores). The reliable way of determining where any virtual address
* maps to is `vtophys()`.
*
* @param virt Virtual address, must be within `DMAP_START - DMAP_END`
* @return The physical address, i.e. `virt - DMAP_OFFSET`
* @see vtophys()
*/
static inline uintptr_t __p(void *virt)
{
# ifdef DEBUG
if (virt < DMAP_START || virt >= DMAP_END) {
kprintf("__p(%p): virt ptr out of range!\n", virt);
return 0;
}
# endif
return (uintptr_t)virt - DMAP_OFFSET;
}
#endif /* _KERNEL */
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/