You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
278 lines
7.1 KiB
C
278 lines
7.1 KiB
C
/* Copyright (C) 2021,2022 fef <owo@fef.moe>. All rights reserved. */
|
|
|
|
#pragma once
|
|
|
|
#include <arch/atom.h>
|
|
#include <arch/page.h>
|
|
|
|
#include <gay/cdefs.h>
|
|
#include <gay/clist.h>
|
|
#include <gay/config.h>
|
|
#include <gay/systm.h>
|
|
#include <gay/types.h>
|
|
|
|
union vm_page_attr {
|
|
int _val;
|
|
struct {
|
|
unsigned order:8; /**< @brief Index into `mm_zones[zone].pools` */
|
|
bool lock:1; /**< @brief Page is locked */
|
|
bool rsvd:1; /**< @brief Page is reserved and must never be touched */
|
|
bool pcpu:1; /**< @brief Page is in a per-cpu cache */
|
|
bool slab:1; /**< @brief Page is used by the slab allocator */
|
|
unsigned zone:2; /**< @brief Index into `mm_zones` */
|
|
};
|
|
};
|
|
#define _PGA_ORDER_SHIFT 0
|
|
#define _PGA_ORDER_MASK (0xf << _PGA_ORDER_SHIFT)
|
|
#define _PGA_LOCK_SHIFT 8
|
|
#define _PGA_LOCK_MASK (1 << _PGA_LOCK_SHIFT)
|
|
#define _PGA_RSVD_SHIFT 9
|
|
#define _PGA_RSVD_MASK (1 << _PGA_RSVD_SHIFT)
|
|
#define _PGA_PCPU_SHIFT 10
|
|
#define _PGA_PCPU_MASK (1 << _PGA_PCPU_SHIFT)
|
|
#define _PGA_SLAB_SHIFT 11
|
|
#define _PGA_SLAB_MASK (1 << _PGA_SLAB_SHIFT)
|
|
#define _PGA_ZONE_SHIFT 12
|
|
#define _PGA_ZONE_MASK (3 << _PGA_ZONE_SHIFT)
|
|
|
|
typedef union vm_page_attr vm_page_attr_t;
|
|
|
|
/* defined in kernel/mm/slab.c */
|
|
struct slab_pool;
|
|
|
|
/**
|
|
* @brief Stores information about a single page in physical memory.
|
|
* There is exactly one of these for every physical page, no matter what that
|
|
* page is used for or whether it is usable at all.
|
|
*/
|
|
struct vm_page {
|
|
/** @brief Reference count (0 = unused, < 0 = locked) */
|
|
atom_t count;
|
|
/** @brief Page attributes, use the macros below to access this */
|
|
atom_t attr;
|
|
/** @brief Page frame number */
|
|
u_long pfn;
|
|
/**
|
|
* @brief If the page is free, this is its freelist.
|
|
* If the page is used in the slab allocator, this is the list for the
|
|
* pool in which it currently resides.
|
|
*/
|
|
struct clist link;
|
|
union {
|
|
struct {
|
|
void **freelist;
|
|
struct slab_pool *pool;
|
|
u_int entry_size;
|
|
u_int free_count;
|
|
} slab;
|
|
};
|
|
};
|
|
|
|
#define INVALID_PAGE nil
|
|
#define SLAB(page) (&(page)->slab)
|
|
|
|
#ifndef _HAVE_VM_PAGE_T
|
|
#define _HAVE_VM_PAGE_T 1
|
|
typedef struct vm_page *vm_page_t;
|
|
#endif
|
|
|
|
/** @brief Array of every single page in physical memory, indexed by page frame number. */
|
|
extern struct vm_page *const vm_page_array;
|
|
|
|
#if CFG_DEBUG_PGADDRS
|
|
extern vm_page_t _vm_page_array_end;
|
|
#define PGADDR_ASSERT(x) KASSERT(x)
|
|
#else
|
|
#define PGADDR_ASSERT(x) ({})
|
|
#endif
|
|
|
|
/** @brief Fill a page with zeroes (size depends on the current page order). */
|
|
void page_clear(vm_page_t page);
|
|
|
|
static inline u8 pga_order(vm_page_t page)
|
|
{
|
|
union vm_page_attr attr = { ._val = atom_read(&page->attr) };
|
|
return attr.order;
|
|
}
|
|
|
|
static inline bool pga_rsvd(vm_page_t page)
|
|
{
|
|
union vm_page_attr attr = { ._val = atom_read(&page->attr) };
|
|
return attr.rsvd;
|
|
}
|
|
|
|
static inline bool pga_pcpu(vm_page_t page)
|
|
{
|
|
union vm_page_attr attr = { ._val = atom_read(&page->attr) };
|
|
return attr.pcpu;
|
|
}
|
|
|
|
static inline bool pga_slab(vm_page_t page)
|
|
{
|
|
union vm_page_attr attr = { ._val = atom_read(&page->attr) };
|
|
return attr.slab;
|
|
}
|
|
|
|
static inline enum mm_zone_type pga_zone(vm_page_t page)
|
|
{
|
|
union vm_page_attr attr = { ._val = atom_read(&page->attr) };
|
|
return attr.zone;
|
|
}
|
|
|
|
static inline u8 pga_set_order(vm_page_t page, u8 order)
|
|
{
|
|
spin_loop {
|
|
union vm_page_attr old = { ._val = atom_read(&page->attr) };
|
|
union vm_page_attr new = old;
|
|
new.order = order;
|
|
if (atom_cmp_xchg(&page->attr, old._val, new._val) == old._val)
|
|
return old.order;
|
|
}
|
|
}
|
|
|
|
static inline bool pga_set_pcpu(vm_page_t page, bool pcpu)
|
|
{
|
|
if (pcpu)
|
|
return atom_set_bit(&page->attr, _PGA_PCPU_SHIFT);
|
|
else
|
|
return atom_clr_bit(&page->attr, _PGA_PCPU_SHIFT);
|
|
}
|
|
|
|
static inline bool pga_set_slab(vm_page_t page, bool slab)
|
|
{
|
|
if (slab)
|
|
return atom_set_bit(&page->attr, _PGA_SLAB_SHIFT);
|
|
else
|
|
return atom_clr_bit(&page->attr, _PGA_SLAB_SHIFT);
|
|
}
|
|
|
|
static inline enum mm_zone_type pga_set_zone(vm_page_t page, enum mm_zone_type zone)
|
|
{
|
|
spin_loop {
|
|
union vm_page_attr old = { ._val = atom_read(&page->attr) };
|
|
union vm_page_attr new = old;
|
|
new.zone = zone;
|
|
if (atom_cmp_xchg(&page->attr, old._val, new._val) == old._val)
|
|
return old.zone;
|
|
}
|
|
}
|
|
|
|
static __always_inline bool page_get(vm_page_t page)
|
|
{
|
|
return atom_inc(&page->count);
|
|
}
|
|
|
|
static __always_inline bool page_put(vm_page_t page)
|
|
{
|
|
return atom_dec(&page->count);
|
|
}
|
|
|
|
/* XXX we should probably use a wait queue for these rather than a spinlock like thing */
|
|
|
|
static inline void page_lock(vm_page_t page)
|
|
{
|
|
spin_loop {
|
|
if (atom_set_bit(&page->attr, _PGA_LOCK_SHIFT))
|
|
break;
|
|
}
|
|
}
|
|
|
|
static __always_inline void page_unlock(vm_page_t page)
|
|
{
|
|
atom_clr_bit(&page->attr, _PGA_LOCK_SHIFT);
|
|
}
|
|
|
|
static __always_inline bool page_trylock(vm_page_t page)
|
|
{
|
|
return atom_set_bit(&page->attr, _PGA_LOCK_SHIFT);
|
|
}
|
|
|
|
static inline void __page_set_flag(vm_page_t page, unsigned flag)
|
|
{
|
|
atom_or(&page->attr, (int)flag);
|
|
}
|
|
|
|
static inline void __page_clr_flag(vm_page_t page, unsigned mask)
|
|
{
|
|
atom_and(&page->attr, (int)~mask);
|
|
}
|
|
|
|
static __always_inline void page_attr_load(vm_page_attr_t *attr, vm_page_t page)
|
|
{
|
|
attr->_val = atom_read(&page->attr);
|
|
}
|
|
|
|
static __always_inline void page_attr_copy(vm_page_attr_t *dest, const vm_page_attr_t *src)
|
|
{
|
|
dest->_val = src->_val;
|
|
}
|
|
|
|
static __always_inline bool page_attr_cmp_xchg(vm_page_t page, const vm_page_attr_t *cmp,
|
|
const vm_page_attr_t *val)
|
|
{
|
|
return atom_cmp_xchg(&page->attr, cmp->_val, val->_val);
|
|
}
|
|
|
|
/** @brief Get the page frame number of a page. */
|
|
__pure2
|
|
static inline u_long pg2pfn(vm_page_t page)
|
|
{
|
|
PGADDR_ASSERT(page < _vm_page_array_end);
|
|
return page->pfn;
|
|
}
|
|
|
|
/**
|
|
* @brief Get the page that a virtual address points to.
|
|
* The address must point to the DMAP region (i.e. an address that is returned
|
|
* by either `get_pages()` and friends, or `kmalloc()` and friends).
|
|
*/
|
|
__pure2
|
|
static inline vm_page_t vaddr2pg(void *vaddr)
|
|
{
|
|
PGADDR_ASSERT(vaddr >= DMAP_START && vaddr < (void *)_vm_page_array_end);
|
|
uintptr_t offset = (uintptr_t)vaddr - DMAP_OFFSET;
|
|
struct vm_page *page = &vm_page_array[offset >> PAGE_SHIFT];
|
|
return page - page->pfn % (1 << pga_order(page));
|
|
}
|
|
|
|
/**
|
|
* @brief Get the page frame number for a virtual address.
|
|
* The address must point to the DMAP region (i.e. an address that is returned
|
|
* by either `get_pages()` and friends, or `kmalloc()` and friends).
|
|
*/
|
|
__pure2
|
|
static inline u_long vaddr2pfn(void *vaddr)
|
|
{
|
|
u_long pfn = ((uintptr_t)vaddr - DMAP_OFFSET) >> PAGE_SHIFT;
|
|
PGADDR_ASSERT(vaddr >= DMAP_START && &vm_page_array[pfn] < _vm_page_array_end);
|
|
return pfn;
|
|
}
|
|
|
|
/** @brief Get the page frame number for a physical address. */
|
|
__pure2
|
|
static inline u_long paddr2pfn(vm_paddr_t paddr)
|
|
{
|
|
PGADDR_ASSERT(&vm_page_array[paddr >> PAGE_SHIFT] < _vm_page_array_end);
|
|
return paddr >> PAGE_SHIFT;
|
|
}
|
|
|
|
/** @brief Get the page that a physical address belongs to. */
|
|
__pure2
|
|
static inline vm_page_t paddr2pg(vm_paddr_t paddr)
|
|
{
|
|
vm_page_t page = vm_page_array + (paddr >> PAGE_SHIFT);
|
|
PGADDR_ASSERT(page < _vm_page_array_end);
|
|
return page - page->pfn % (1 << pga_order(page));
|
|
}
|
|
|
|
/**
|
|
* @brief Translate a page frame number to its corresponding virtual address
|
|
* in the DMAP region.
|
|
*/
|
|
__pure2
|
|
static inline void *pfn2vaddr(u_long pfn)
|
|
{
|
|
PGADDR_ASSERT(&vm_page_array[pfn] < _vm_page_array_end);
|
|
return DMAP_START + (pfn << PAGE_SHIFT);
|
|
}
|