|
|
|
/* Copyright (C) 2021,2022 fef <owo@fef.moe>. All rights reserved. */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To manipulate the page directory while paging is enabled, we abuse the
|
|
|
|
* structural similarity between page directory and page table by mapping the
|
|
|
|
* last entry in the page directory to itself. This makes the MMU interpret the
|
|
|
|
* page directory as if it were a page table, giving us access to the individual
|
|
|
|
* directory entries at `0xffc00000-0xffffffff` virtual. The last page, at
|
|
|
|
* address `0xfffff000-0xffffffff`, then points to the page directory itself.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <arch/page.h>
|
|
|
|
#include <arch/trap.h>
|
|
|
|
|
|
|
|
#include <gay/cdefs.h>
|
|
|
|
#include <gay/config.h>
|
|
|
|
#include <gay/errno.h>
|
|
|
|
#include <gay/kprintf.h>
|
|
|
|
#include <gay/mm.h>
|
|
|
|
#include <gay/systm.h>
|
|
|
|
#include <gay/types.h>
|
|
|
|
|
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
/* from linker script */
|
|
|
|
extern void _image_start_phys;
|
|
|
|
extern void _image_end_phys;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief First page table for low memory (0 - 4 M).
|
|
|
|
* This is initialized by the early boot routine in assembly so that paging
|
|
|
|
* can be enabled (the kernel itself is mapped to `0xf0100000` by default).
|
|
|
|
*/
|
|
|
|
__asmlink struct x86_page_table pt0;
|
|
|
|
/** @brief First page directory for low memory. */
|
|
|
|
__asmlink struct x86_page_directory pd0;
|
|
|
|
|
|
|
|
int map_page(uintptr_t phys, void *virt, enum pflags flags)
|
|
|
|
{
|
|
|
|
# ifdef DEBUG
|
|
|
|
if (phys != PAGE_ALIGN(phys))
|
|
|
|
kprintf("map_page(): unaligned physical address %p!\n", (void *)phys);
|
|
|
|
if (virt != PAGE_ALIGN(virt))
|
|
|
|
kprintf("map_page(): unaligned virtual address %p!\n", virt);
|
|
|
|
# endif
|
|
|
|
|
|
|
|
usize pd_index = ((uintptr_t)virt >> PAGE_SHIFT) / 1024;
|
|
|
|
usize pt_index = ((uintptr_t)virt >> PAGE_SHIFT) % 1024;
|
|
|
|
|
|
|
|
struct x86_page_directory_entry *pde = &X86_CURRENT_PD->entries[pd_index];
|
|
|
|
if (flags & P_HUGE) {
|
|
|
|
# ifdef DEBUG
|
|
|
|
if (phys != HUGEPAGE_ALIGN(phys)) {
|
|
|
|
kprintf("map_page(): unaligned physical address %p!\n",
|
|
|
|
(void *)phys);
|
|
|
|
phys = HUGEPAGE_ALIGN(phys);
|
|
|
|
}
|
|
|
|
if (virt != HUGEPAGE_ALIGN(virt)) {
|
|
|
|
kprintf("map_page(): unaligned virtual address %p!\n",
|
|
|
|
virt);
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
|
|
|
|
if (pde->present && !pde->huge) {
|
|
|
|
void *pt = __v(pde->shifted_address << PAGE_SHIFT);
|
|
|
|
free_pages(pt);
|
|
|
|
}
|
|
|
|
|
|
|
|
*(unsigned long *)pde = 0;
|
|
|
|
pde->present = 1;
|
|
|
|
pde->huge = 1;
|
|
|
|
pde->rw = (flags & P_RW) != 0;
|
|
|
|
pde->user = (flags & P_USER) != 0;
|
|
|
|
pde->accessed = (flags & P_ACCESSED) != 0;
|
|
|
|
pde->cache_disabled = (flags & P_NOCACHE) != 0;
|
|
|
|
pde->shifted_address = phys >> PAGE_SHIFT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* warning: pt might not be present yet before the if block below,
|
|
|
|
* we only define it here already so we can easily call memset() in
|
|
|
|
* the if block
|
|
|
|
*/
|
|
|
|
struct x86_page_table *pt = X86_CURRENT_PT(pd_index);
|
|
|
|
|
|
|
|
if (!pde->present) {
|
|
|
|
uintptr_t pt_phys = vtophys(get_pages(1, M_ATOMIC));
|
|
|
|
if (!pt_phys)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
*(unsigned long *)pde = 0;
|
|
|
|
pde->shifted_address = pt_phys >> PAGE_SHIFT;
|
|
|
|
pde->rw = 1;
|
|
|
|
pde->present = 1;
|
|
|
|
vm_flush();
|
|
|
|
memset(pt, 0, sizeof(*pt));
|
|
|
|
}
|
|
|
|
|
|
|
|
struct x86_page_table_entry *pte = &pt->entries[pt_index];
|
|
|
|
*(unsigned long *)pte = 0; /* zero out the entire entry first */
|
|
|
|
pte->rw = (flags & P_RW) != 0;
|
|
|
|
pte->user = (flags & P_USER) != 0;
|
|
|
|
pte->cache_disabled = (flags & P_NOCACHE) != 0;
|
|
|
|
pte->shifted_address = phys >> PAGE_SHIFT;
|
|
|
|
pte->present = 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The only difference between this and map_page() is that we can't allocate
|
|
|
|
* new pages using get_pages() but have to use __early_get_page() instead here.
|
|
|
|
* So, all we need to do is ensure that map_page() doesn't need to allocate new
|
|
|
|
* pages when we call it, which it only does if pflags does not have P_HUGE
|
|
|
|
* set and the page table doesn't exist (present bit in the page directory is
|
|
|
|
* clear). Therefore, we just need to make sure that, if P_HUGE is *not*
|
|
|
|
* set, the page table is already allocated and marked as present in the page
|
|
|
|
* directory.
|
|
|
|
*/
|
|
|
|
void __early_map_page(uintptr_t phys, void *virt, enum pflags pflags)
|
|
|
|
{
|
|
|
|
if (!(pflags & P_HUGE)) {
|
|
|
|
usize pd_index = ((uintptr_t)virt >> PAGE_SHIFT) / 1024;
|
|
|
|
struct x86_page_directory_entry *pde = &X86_CURRENT_PD->entries[pd_index];
|
|
|
|
if (!pde->present) {
|
|
|
|
uintptr_t pt_phys = __early_get_page();
|
|
|
|
*(unsigned long *)pde = P_PRESENT | P_RW;
|
|
|
|
pde->shifted_address = pt_phys >> PAGE_SHIFT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
map_page(phys, virt, pflags);
|
|
|
|
}
|
|
|
|
|
|
|
|
uintptr_t unmap_page(void *virt)
|
|
|
|
{
|
|
|
|
# ifdef DEBUG
|
|
|
|
if (virt != PAGE_ALIGN(virt))
|
|
|
|
kprintf("map_page(): unaligned virtual address %p!\n", virt);
|
|
|
|
# endif
|
|
|
|
|
|
|
|
struct x86_page_directory *pd = X86_CURRENT_PD;
|
|
|
|
|
|
|
|
usize pd_index = ((uintptr_t)virt >> PAGE_SHIFT) / 1024;
|
|
|
|
usize pt_index = ((uintptr_t)virt >> PAGE_SHIFT) % 1024;
|
|
|
|
|
|
|
|
struct x86_page_directory_entry *pde = &pd->entries[pd_index];
|
|
|
|
if (!pde->present)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
uintptr_t phys = 0;
|
|
|
|
if (pde->huge) {
|
|
|
|
phys = pde->shifted_address << PAGE_SHIFT;
|
|
|
|
pde->present = 0;
|
|
|
|
} else {
|
|
|
|
struct x86_page_table *pt = X86_CURRENT_PT(pd_index);
|
|
|
|
struct x86_page_table_entry *pte = &pt->entries[pt_index];
|
|
|
|
if (pte->present) {
|
|
|
|
phys = pte->shifted_address << PAGE_SHIFT;
|
|
|
|
pte->present = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return phys;
|
|
|
|
}
|
|
|
|
|
|
|
|
enum pflags get_pflags(void *page)
|
|
|
|
{
|
|
|
|
usize pd_index = ((uintptr_t)page >> PAGE_SHIFT) / 1024;
|
|
|
|
usize pt_index = ((uintptr_t)page >> PAGE_SHIFT) % 1024;
|
|
|
|
|
|
|
|
struct x86_page_directory_entry *pde = &X86_CURRENT_PD->entries[pd_index];
|
|
|
|
if (pde->huge) {
|
|
|
|
return *(unsigned long *)pde & ~PAGE_MASK;
|
|
|
|
} else if (pde->present) {
|
|
|
|
struct x86_page_table_entry *pte = &X86_CURRENT_PT(pd_index)->entries[pt_index];
|
|
|
|
return *(unsigned long *)pte & ~PAGE_MASK;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int set_pflags(void *page, enum pflags pflags)
|
|
|
|
{
|
|
|
|
usize pd_index = ((uintptr_t)page >> PAGE_SHIFT) / 1024;
|
|
|
|
usize pt_index = ((uintptr_t)page >> PAGE_SHIFT) % 1024;
|
|
|
|
|
|
|
|
struct x86_page_directory_entry *pde = &X86_CURRENT_PD->entries[pd_index];
|
|
|
|
if (pflags & P_HUGE) {
|
|
|
|
/* if the PDE referred to a Page Table, free it first */
|
|
|
|
if (pde->present && !pde->huge)
|
|
|
|
free_pages((void *)((uintptr_t)pde->shifted_address << PAGE_SHIFT));
|
|
|
|
|
|
|
|
unsigned long pde_raw = *(unsigned long *)pde;
|
|
|
|
pde_raw &= PAGE_MASK;
|
|
|
|
pde_raw |= (pflags & ~PAGE_MASK);
|
|
|
|
*(unsigned long *)pde = pde_raw;
|
|
|
|
} else if (pde->present) {
|
|
|
|
struct x86_page_table_entry *pte = X86_CURRENT_PTE(pd_index, pt_index);
|
|
|
|
unsigned long pte_raw = *(unsigned long *)pte;
|
|
|
|
pte_raw &= PAGE_MASK;
|
|
|
|
pte_raw |= (pflags & ~PAGE_MASK);
|
|
|
|
*(unsigned long *)pte = pte_raw;
|
|
|
|
} else {
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void x86_isr_page_fault(trap_frame_t *frame, u32 error_code)
|
|
|
|
{
|
|
|
|
void *address;
|
|
|
|
__asm__ volatile(
|
|
|
|
" mov %%cr2, %0 \n"
|
|
|
|
: "=r"(address)
|
|
|
|
:
|
|
|
|
);
|
|
|
|
|
|
|
|
const char *space;
|
|
|
|
if (error_code & X86_PF_USER)
|
|
|
|
space = "user";
|
|
|
|
else
|
|
|
|
space = "kernel";
|
|
|
|
|
|
|
|
const char *rwx;
|
|
|
|
if (error_code & X86_PF_WRITE)
|
|
|
|
rwx = "write to";
|
|
|
|
else if (error_code & X86_PF_INSTR)
|
|
|
|
rwx = "exec at";
|
|
|
|
else
|
|
|
|
rwx = "read from";
|
|
|
|
|
|
|
|
const char *present;
|
|
|
|
if (error_code & X86_PF_PRESENT)
|
|
|
|
present = "";
|
|
|
|
else
|
|
|
|
present = " non-mapped";
|
|
|
|
|
|
|
|
kprintf("\n########## B O N K ##########\n");
|
|
|
|
kprintf("Illegal %s %s%s address %p!\n", space, rwx, present, address);
|
|
|
|
print_regs(frame);
|
|
|
|
kprintf("system halted");
|
|
|
|
__asm__ volatile(
|
|
|
|
" cli \n"
|
|
|
|
"1: hlt \n"
|
|
|
|
" jmp 1b \n"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
uintptr_t vtophys(void *virt)
|
|
|
|
{
|
|
|
|
usize pd_index = ((uintptr_t)virt >> PAGE_SHIFT) / 1024;
|
|
|
|
usize pt_index = ((uintptr_t)virt >> PAGE_SHIFT) % 1024;
|
|
|
|
|
|
|
|
struct x86_page_directory_entry *pde = X86_CURRENT_PDE(pd_index);
|
|
|
|
if (!pde->present)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
uintptr_t phys = 0;
|
|
|
|
if (pde->huge) {
|
|
|
|
phys = pde->shifted_address;
|
|
|
|
phys <<= PAGE_SHIFT; /* attention, this is not HUGEPAGE_SHIFT */
|
|
|
|
phys |= (uintptr_t)virt & ~HUGEPAGE_MASK;
|
|
|
|
} else {
|
|
|
|
struct x86_page_table_entry *pte = X86_CURRENT_PTE(pd_index, pt_index);
|
|
|
|
if (pte->present) {
|
|
|
|
phys = pte->shifted_address;
|
|
|
|
phys <<= PAGE_SHIFT;
|
|
|
|
phys |= (uintptr_t)virt & ~PAGE_MASK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return phys;
|
|
|
|
}
|
|
|
|
|
|
|
|
void vm_flush(void)
|
|
|
|
{
|
|
|
|
register_t tmp;
|
|
|
|
__asm__ volatile(
|
|
|
|
" mov %%cr3, %0 \n"
|
|
|
|
" mov %0, %%cr3 \n"
|
|
|
|
: "=r"(tmp)
|
|
|
|
:
|
|
|
|
: "memory"
|
|
|
|
);
|
|
|
|
}
|