You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

124 lines
3.4 KiB
C

/* Copyright (C) 2021 fef <owo@fef.moe>. All rights reserved. */
#pragma once
#include <arch/page.h>
#include <gay/cdefs.h>
#include <gay/clist.h>
#include <gay/config.h>
#include <gay/systm.h>
#include <gay/types.h>
/*
* I'm trying really hard to keep the size of struct vm_page a power of two
* on LP64 systems, because that way we can quickly get to the page frame number
* by shifting the byte offset of the vm_page_t in vm_page_array to the right
* rather than doing a costly divide instruction (or store the page frame number
* within the structure itself, which takes up precious space).
*
* There is insane pressure on the size of this structure, because a typical
* system will have millions of instances of it. Every additional byte makes
* a significant difference in memory management overhead.
*/
/**
* @brief Stores information about a single page in physical memory.
* There is exactly one of these for every physical page, no matter what that
* page is used for or whether it is usable at all.
*/
struct vm_page {
/** @brief Reference count (0 = unused) */
atom_t count;
unsigned order:8;
/** @brief Various flags describing how and for what the page is used, see below */
unsigned flags:24;
struct clist link;
/**
* @brief Optional extra data pointer, reserved for private use.
* The current owner of the page may use this to track the underlying
* object in memory (or pretty much anything else), for example the
* `struct slab` if this page is currently used by the slab allocator.
*/
void *extra;
};
typedef struct vm_page *vm_page_t;
/* values for struct vm_page::flags */
/** @brief Page must never be accessed */
#define PG_RESERVED (1 << 0)
/** @brief Page is in a per-cpu cache */
#define PG_PCPU (1 << 1)
/** @brief Page is used by the slab allocator */
#define PG_SLAB (1 << 2)
/** @brief Page is in `MM_ZONE_DMA`, rather than `MM_ZONE_NORMAL` */
#define PG_DMA (1u << 3)
/** @brief Array of every single page in physical memory, indexed by page frame number. */
extern struct vm_page *const vm_page_array;
#if CFG_DEBUG_PGADDRS
extern vm_page_t _vm_page_array_end;
#define PGADDR_ASSERT(x) KASSERT(x)
#else
#define PGADDR_ASSERT(x) ({})
#endif
static inline bool page_get(vm_page_t page)
{
return atom_inc(&page->count);
}
static inline bool page_put(vm_page_t page)
{
return atom_dec(&page->count);
}
/** @brief Get the page frame number of a page. */
__pure2
static inline u_long pg2pfn(vm_page_t page)
{
PGADDR_ASSERT(page < _vm_page_array_end);
return page - vm_page_array;
}
__pure2
static inline vm_page_t vaddr2pg(void *vaddr)
{
PGADDR_ASSERT(vaddr >= DMAP_START && vaddr < (void *)_vm_page_array_end);
uintptr_t offset = (uintptr_t)vaddr - DMAP_OFFSET;
return &vm_page_array[offset >> PAGE_SHIFT];
}
__pure2
static inline u_long vaddr2pfn(void *vaddr)
{
u_long pfn = ((uintptr_t)vaddr - DMAP_OFFSET) >> PAGE_SHIFT;
PGADDR_ASSERT(vaddr >= DMAP_START && &vm_page_array[pfn] < _vm_page_array_end);
return pfn;
}
__pure2
static inline u_long paddr2pfn(vm_paddr_t paddr)
{
PGADDR_ASSERT(&vm_page_array[paddr >> PAGE_SHIFT] < _vm_page_array_end);
return paddr >> PAGE_SHIFT;
}
__pure2
static inline vm_page_t paddr2pg(vm_paddr_t paddr)
{
vm_page_t page = vm_page_array + (paddr >> PAGE_SHIFT);
PGADDR_ASSERT(page < _vm_page_array_end);
return page;
}
__pure2
static inline void *pfn2vaddr(u_long pfn)
{
PGADDR_ASSERT(&vm_page_array[pfn] < _vm_page_array_end);
return DMAP_START + (pfn << PAGE_SHIFT);
}