You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

195 lines
8.2 KiB
C

/* Copyright (C) 2021,2022 fef <owo@fef.moe>. All rights reserved. */
#pragma once
#ifndef _ARCH_PAGE_H_
#error "This file is not meant to be included directly, use <arch/page.h>"
#endif
/*
* Common abbreviations used throughout the entire x86 vm code base:
* PT - Page Table
* PDT - Page Directory Table
* PDPT - Page Directory Pointer Table
* PML4T - Page Map Level 4 Table
* PTE - Page Table Entry
* PDTE - Page Directory Table Entry
* PDPTE - Page Directory Pointer Table Entry
* PML4TE - Page Map Level 4 entry
* PTI - Page Table Index (range 0 - 511)
* PDTI - Page Directory Table Index (range 0 - 511)
* PDPTI - Page Directory Pointer Table Index (range 0 - 511)
* PML4TI - Page Map Level 4 Index (range 0 - 511)
*
* Quick recap on how the x86 transes virtual to physical addresses:
*
* |63 48|47 39|38 30|29 21|21 12|11 0|
* +------------------+-----------+-----------+-----------+-----------+--------------+
* | 16 bits | 9 bits | 9 bits | 9 bits | 9 bits | 12 bits |
* +------------------+-----------+-----------+-----------+-----------+--------------+
* (1) | (copy of bit 47) | PML4T | PDPT | PDT | PT | offset (4 K) |
* +------------------+-----------+-----------+-----------+-----------+--------------+
* (2) | (copy of bit 47) | PML4T | PDPT | PDT | offset (2 M) |
* +------------------+-----------+-----------+-----------+--------------------------+
* (3) | (copy of bit 47) | PML4T | PDPT | offset (1 G) |
* +------------------+-----------+-----------+--------------------------------------+
*
* %CR3: pointer to PML4T, 256 TB (2^36 pages)
* PML4T: 512 entries, 512 GB per entry (2^27 pages)
* PDPT: 512 entries, 1 GB per entry (2^18 pages)
* PDT: 512 entries, 2 MB per entry (2^9 pages)
* PT: 512 entries, 4 KB per entry (1 page)
*
* PDPT entries can either reference a PDT or a 1 GB region directly (if __P_HUGE is set)
* PDT entries can either reference a PT or a 2 MB region directly (if __P_HUGE is set)
*
* (1) shows a PML4T -> PDPT -> PDT -> PT regular mapping
* (2) shows a PML4T -> PDPT -> PDT hugepage mapping
* (3) shows a PML4T -> PDPT gigapage mapping
*
* Since the lowest 12 bits are always zero in any page map entry, they are
* used for flags. Additionally, bit 63 stores the NX (no execute) flag.
*/
#include <arch/vmparam.h>
#define X86_PT_SHIFT PAGE_SHIFT
#define X86_PDT_SHIFT (X86_PT_SHIFT + 9)
#define X86_PDPT_SHIFT (X86_PDT_SHIFT + 9)
#define X86_PML4T_SHIFT (X86_PDPT_SHIFT + 9)
#define __HAVE_NOEXEC
#define __HAVE_GIGAPAGES
/** @brief Binary logarithm of `HUGEPAGE_SIZE`. */
#define HUGEPAGE_SHIFT X86_PDT_SHIFT
/** @brief Binary logarithm of `GIGAPAGE_SIZE`. */
#define GIGAPAGE_SHIFT X86_PDPT_SHIFT
#ifndef _ASM_SOURCE
#include <gay/cdefs.h>
#include <gay/types.h>
/**
* @brief A single 64-bit page map entry, split up into its individual bit flags.
* The layout matches that of the Intel SDM, vol 3, sect 4.3, fig 4-4.
*/
struct x86_pmap_flags {
/* 0 */bool present:1; /**< Page Fault on access if 0 */
/* 1 */bool rw:1; /**< Page Fault on write if 0 */
/* 2 */bool user:1; /**< Page Fault on user mode access if 0 */
/* 3 */bool write_through:1; /**< Enable write-through caching */
/* 4 */bool cache_disabled:1; /**< Disable caching in TLB */
/* 5 */bool accessed:1; /**< 1 if page has been accessed */
/* 6 */bool dirty:1; /**< 1 if page has been written to */
/* 7 */bool huge:1; /**< Only valid for PDPTEs and PDTEs */
/* 8 */bool global:1; /**< Entry survives `vm_flush()` if 1 */
/* 9 */unsigned _unused:3;
/* 12 */vm_paddr_t shifted_address:51;
/* 63 */bool noexec:1; /**< Prevent instruction fetches */
} __packed;
/* bitmasks for the structure above */
#define __P_PRESENT (1 << 0)
#define __P_RW (1 << 1)
#define __P_USER (1 << 2)
#define __P_WRITE_THROUGH (1 << 3)
#define __P_NOCACHE (1 << 4)
#define __P_ACCESSED (1 << 5)
#define __P_DIRTY (1 << 6)
#define __P_HUGE (1 << 7)
#define __P_GLOBAL (1 << 8)
#define __P_SLAB (1 << 9)
#define __P_ATOMIC (1 << 10)
#define __P_NOEXEC (1ul << 63)
/** @brief Bitmask for extracting the physical address from a page map entry. */
#define X86_PMAP_MASK 0x7ffffffffffff000
/*
* these types are deliberately not merged into one so that the
* compiler can catch accidental assignments to the wrong type
*/
#define __pmap_entry_union union { \
struct x86_pmap_flags flags; \
vm_paddr_t val; \
}
/** @brief x86 Page Table Entry. */
typedef __pmap_entry_union x86_pte_t;
/** @brief x86 Page Directory Table Entry. */
typedef __pmap_entry_union x86_pdte_t;
/** @brief x86 Page Directory Pointer Table Entry. */
typedef __pmap_entry_union x86_pdpte_t;
/** @brief x86 Page Map Level 4 Table Entry. */
typedef __pmap_entry_union x86_pml4te_t;
/** @brief x86 Page Table. */
typedef struct { x86_pte_t entries[512]; } __aligned(PAGE_SIZE) x86_pt_t;
/** @brief x86 Page Directory Table. */
typedef struct { x86_pdte_t entries[512]; } __aligned(PAGE_SIZE) x86_pdt_t;
/** @brief x86 Page Directory Pointer Table. */
typedef struct { x86_pdpte_t entries[512]; } __aligned(PAGE_SIZE) x86_pdpt_t;
/** @brief x86 Page Map Level 4 Table. */
typedef struct { x86_pml4te_t entries[512]; } __aligned(PAGE_SIZE) x86_pml4t_t;
/* you aren't expected to understand any of these, they're just nasty offset calculations */
#define __V48_MASK ( ((uintptr_t)1 << 48) - 1 )
/** @brief Get the linear 48-bit address */
#define __V48(ptr) ((uintptr_t)(ptr) & __V48_MASK)
/**
* @brief Generate a 48-bit virtual address in user space, based on its pmap indices.
* Every index must be less than 512, or you'll get a garbage address.
* `pml4i` must be less than 256, or you'll hurt the MMU's feelings.
* This is because bits 63-48 of the virtual address must all match bit 47.
*/
#define UV48ADDR(pml4ti, pdpti, pdti, pti) ( \
(vm_paddr_t)(pml4ti) << X86_PML4T_SHIFT | \
(vm_paddr_t)(pdpti) << X86_PDPT_SHIFT | \
(vm_paddr_t)(pdti) << X86_PDT_SHIFT | \
(vm_paddr_t)(pti) << X86_PT_SHIFT \
)
/**
* @brief Generate a 48-bit virtual address in kernel space, based on its pmap indices.
* Every index must be less than 512, or you'll get a garbage address.
* `pml4i` must be at least 256, or you'll hurt the MMU's feelings.
* This is because bits 63-48 of the virtual address must all match bit 47.
*/
#define KV48ADDR(pml4ti, pdpti, pdti, pti) ( \
(vm_paddr_t)0xffff000000000000 | \
UV48ADDR(pml4ti, pdpti, pdti, pti) \
)
/** @brief Get the Page Table index for a given virtual address. */
#define X86_PTI(ptr) ((__V48(ptr) >> X86_PT_SHIFT ) % 512)
/** @brief Get the Page Directory Table index for a given virtual address. */
#define X86_PDTI(ptr) ((__V48(ptr) >> X86_PDT_SHIFT ) % 512)
/** @brief Get the Page Directory Pointer Table index for a given virtual address. */
#define X86_PDPTI(ptr) ((__V48(ptr) >> X86_PDPT_SHIFT ) % 512)
/** @brief Get the Page Map Level 4 Table index for a given virtual address. */
#define X86_PML4TI(ptr) (__V48(ptr) >> X86_PML4T_SHIFT)
/* Page Map Level 4 Table index for the recursive page map */
#define __PML4TI (X86_PML4TI(X86_PMAP_OFFSET)) /* = 256 */
#define __PT_BASE ( (x86_pt_t *)KV48ADDR(__PML4TI, 0, 0, 0) )
#define __PDT_BASE ( (x86_pdt_t *)KV48ADDR(__PML4TI, __PML4TI, 0, 0) )
#define __PDPT_BASE ( (x86_pdpt_t *)KV48ADDR(__PML4TI, __PML4TI, __PML4TI, 0) )
#define __PML4T_BASE ( (x86_pml4t_t *)KV48ADDR(__PML4TI, __PML4TI, __PML4TI, __PML4TI) )
/** @brief Get the Page Table Entry for a given virtual address. */
#define X86_PTE(ptr) ( &__PT_BASE->entries[__V48(ptr) >> X86_PT_SHIFT] )
/** @brief Get the Page Directory Table Entry for a given virtual address. */
#define X86_PDTE(ptr) ( &__PDT_BASE->entries[__V48(ptr) >> X86_PDT_SHIFT] )
/** @brief Get the Page Directory Pointer Table Entry for a given virtual address. */
#define X86_PDPTE(ptr) ( &__PDPT_BASE->entries[__V48(ptr) >> X86_PDPT_SHIFT] )
/** @brief Get the Page Map Level 4 Table Entry for a given virtual address. */
#define X86_PML4TE(ptr) ( &__PML4T_BASE->entries[__V48(ptr) >> X86_PML4T_SHIFT] )
#endif /* not _ASM_SOURCE */