/* Copyright (C) 2021,2022 fef . All rights reserved. */ #pragma once #define _ARCH_PAGE_H_ /** * @brief Data structures and constants for paging on x86 (please end my suffering). */ #include #define __HAVE_HUGEPAGES /** @brief Binary logarithm of `PAGE_SIZE`. */ #define PAGE_SHIFT 12 /** @brief Page size in bytes. */ #define PAGE_SIZE (1 << PAGE_SHIFT) #ifdef __x86_64__ #include #else #include #endif #define HUGEPAGE_SIZE (1 << HUGEPAGE_SHIFT) #ifdef __HAVE_GIGAPAGES #define GIGAPAGE_SIZE (1 << GIGAPAGE_SHIFT) #endif #ifndef _ASM_SOURCE #include /** @brief Initialize `vm_page_array` based on the multiboot memory map. */ void x86_paging_init(struct mb2_tag_mmap *mmap); /** @brief Pointer bitmask to get the base address of their page. */ #define PAGE_MASK ( ~((unsigned long)PAGE_SIZE - 1) ) /** @brief Pointer bitmask to get the base address of their huge page. */ #define HUGEPAGE_MASK ( ~((unsigned long)HUGEPAGE_SIZE - 1) ) #define PAGE_ALIGN(ptr) ((typeof(ptr))( (uintptr_t)(ptr) & PAGE_MASK )) #define HUGEPAGE_ALIGN(ptr) ((typeof(ptr))( (uintptr_t)(ptr) & HUGEPAGE_MASK )) #ifdef __HAVE_GIGAPAGES #define GIGAPAGE_MASK ( ~((unsigned long)GIGAPAGE_SIZE - 1) ) #define GIGAPAGE_ALIGN(ptr) ((typeof(ptr))( (uintptr_t)(ptr) & GIGAPAGE_MASK )) #endif /* page fault status code bits */ #define X86_PF_PRESENT (1u << 0) #define X86_PF_WRITE (1u << 1) #define X86_PF_USER (1u << 2) #define X86_PF_RESERVED (1u << 3) #define X86_PF_INSTR (1u << 4) #define X86_PF_PKEY (1u << 5) #define X86_PF_SHADOW_STACK (1u << 6) #define X86_PF_SGX (1u << 15) /** * @brief Get the physical address a virtual one is currently mapped to. * * @param virt virtual address * @returns The physical address, or -1 cast to `vm_paddr_t` if there is no mapping */ vm_paddr_t vtophys(void *virt); static inline void vm_flush(void) { register_t tmp; __asm__ volatile( " mov %%cr3, %0 \n" " mov %0, %%cr3 \n" : "=r"(tmp) : : "memory" ); } #endif /* not _ASM_SOURCE */