You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
233 lines
6.1 KiB
C
233 lines
6.1 KiB
C
/* See the end of this file for copyright and license terms. */
|
|
|
|
/*
|
|
* To manipulate the page directory while paging is enabled, we abuse the
|
|
* structural similarity between page directory and page table by mapping the
|
|
* last entry in the page directory to itself. This makes the MMU interpret the
|
|
* page directory as if it were a page table, giving us access to the individual
|
|
* directory entries at `0xffc00000-0xffffffff` virtual. The last page, at
|
|
* address `0xfffff000-0xffffffff`, then points to the page directory itself.
|
|
*/
|
|
|
|
#include <arch/page.h>
|
|
#include <arch/trap.h>
|
|
|
|
#include <gay/cdefs.h>
|
|
#include <gay/config.h>
|
|
#include <gay/errno.h>
|
|
#include <gay/kprintf.h>
|
|
#include <gay/mm.h>
|
|
#include <gay/types.h>
|
|
#include <gay/util.h>
|
|
|
|
#include <string.h>
|
|
|
|
/* from linker script */
|
|
extern void _image_start_phys;
|
|
extern void _image_end_phys;
|
|
|
|
/* first and last dynamic page address (watch out, these are physical) */
|
|
static uintptr_t dynpage_start;
|
|
static uintptr_t dynpage_end;
|
|
|
|
/**
|
|
* @brief First page table for low memory (0 - 4 M).
|
|
* This is initialized by the early boot routine in assembly so that paging
|
|
* can be enabled (the kernel itself is mapped to `0xf0100000` by default).
|
|
*/
|
|
__asmlink struct x86_page_table pt0;
|
|
/** @brief First page directory for low memory. */
|
|
__asmlink struct x86_page_directory pd0;
|
|
|
|
int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags)
|
|
{
|
|
# ifdef DEBUG
|
|
if (phys != PAGE_ALIGN(phys))
|
|
kprintf("map_page(): unaligned physical address %p!\n", (void *)phys);
|
|
if (virt != PAGE_ALIGN(virt))
|
|
kprintf("map_page(): unaligned virtual address %p!\n", virt);
|
|
# endif
|
|
|
|
usize pd_index = ((uintptr_t)virt >> PAGE_SHIFT) / 1024;
|
|
usize pt_index = ((uintptr_t)virt >> PAGE_SHIFT) % 1024;
|
|
|
|
struct x86_page_directory_entry *pde = &X86_CURRENT_PD->entries[pd_index];
|
|
if (flags & MM_PAGE_HUGE) {
|
|
# ifdef DEBUG
|
|
if (phys != HUGEPAGE_ALIGN(phys)) {
|
|
kprintf("map_page(): unaligned physical address %p!\n",
|
|
(void *)phys);
|
|
phys = HUGEPAGE_ALIGN(phys);
|
|
}
|
|
if (virt != HUGEPAGE_ALIGN(virt)) {
|
|
kprintf("map_page(): unaligned virtual address %p!\n",
|
|
virt);
|
|
}
|
|
# endif
|
|
|
|
*(unsigned long *)pde = 0;
|
|
pde->present = 1;
|
|
pde->huge = 1;
|
|
pde->rw = (flags & MM_PAGE_RW) != 0;
|
|
pde->user = (flags & MM_PAGE_USER) != 0;
|
|
pde->accessed = (flags & MM_PAGE_ACCESSED) != 0;
|
|
pde->cache_disabled = (flags & MM_PAGE_NOCACHE) != 0;
|
|
pde->shifted_address = phys >> PAGE_SHIFT;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* warning: pt might not be present yet before the if block below,
|
|
* we only define it here already so we can easily call memset() in
|
|
* the if block
|
|
*/
|
|
struct x86_page_table *pt = X86_CURRENT_PT(pd_index);
|
|
|
|
if (!pde->present) {
|
|
uintptr_t pt_phys = vtophys(get_pages(1, MM_ATOMIC));
|
|
if (!pt_phys)
|
|
return -ENOMEM;
|
|
|
|
*(unsigned long *)pde = 0;
|
|
pde->shifted_address = pt_phys >> PAGE_SHIFT;
|
|
pde->rw = 1;
|
|
pde->present = 1;
|
|
vm_flush();
|
|
memset(pt, 0, sizeof(*pt));
|
|
}
|
|
|
|
struct x86_page_table_entry *pte = &pt->entries[pt_index];
|
|
*(unsigned long *)pte = 0; /* zero out the entire entry first */
|
|
pte->rw = (flags & MM_PAGE_RW) != 0;
|
|
pte->user = (flags & MM_PAGE_USER) != 0;
|
|
pte->cache_disabled = (flags & MM_PAGE_NOCACHE) != 0;
|
|
pte->shifted_address = phys >> PAGE_SHIFT;
|
|
pte->present = 1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
uintptr_t unmap_page(void *virt)
|
|
{
|
|
# ifdef DEBUG
|
|
if (virt != PAGE_ALIGN(virt))
|
|
kprintf("map_page(): unaligned virtual address %p!\n", virt);
|
|
# endif
|
|
|
|
struct x86_page_directory *pd = X86_CURRENT_PD;
|
|
|
|
usize pd_index = ((uintptr_t)virt >> PAGE_SHIFT) / 1024;
|
|
usize pt_index = ((uintptr_t)virt >> PAGE_SHIFT) % 1024;
|
|
|
|
struct x86_page_directory_entry *pde = &pd->entries[pd_index];
|
|
if (!pde->present)
|
|
return 0;
|
|
|
|
uintptr_t phys = 0;
|
|
if (pde->huge) {
|
|
phys = pde->shifted_address;
|
|
phys <<= HUGEPAGE_SHIFT;
|
|
*(unsigned long *)pde = 0;
|
|
} else {
|
|
struct x86_page_table *pt = X86_CURRENT_PT(pd_index);
|
|
struct x86_page_table_entry *pte = &pt->entries[pt_index];
|
|
if (pte->present) {
|
|
phys = pte->shifted_address;
|
|
phys <<= PAGE_SHIFT;
|
|
*(unsigned long *)pte = 0;
|
|
}
|
|
}
|
|
|
|
return phys;
|
|
}
|
|
|
|
void x86_isr_page_fault(struct x86_trap_frame *frame, u32 error_code)
|
|
{
|
|
void *address;
|
|
__asm__ volatile(
|
|
" mov %%cr2, %0 \n"
|
|
: "=r"(address)
|
|
:
|
|
);
|
|
|
|
const char *space;
|
|
if (error_code & X86_PF_USER)
|
|
space = "user";
|
|
else
|
|
space = "kernel";
|
|
|
|
const char *rwx;
|
|
if (error_code & X86_PF_WRITE)
|
|
rwx = "write to";
|
|
else if (error_code & X86_PF_INSTR)
|
|
rwx = "exec at";
|
|
else
|
|
rwx = "read from";
|
|
|
|
const char *present;
|
|
if (error_code & X86_PF_PRESENT)
|
|
present = "";
|
|
else
|
|
present = " non-mapped";
|
|
|
|
kprintf("\n########## B O N K ##########\n");
|
|
kprintf("Illegal %s %s%s address %p!\n", space, rwx, present, address);
|
|
x86_print_regs(frame);
|
|
kprintf("system halted");
|
|
__asm__ volatile(
|
|
" cli \n"
|
|
"1: hlt \n"
|
|
" jmp 1b \n"
|
|
);
|
|
}
|
|
|
|
uintptr_t vtophys(void *virt)
|
|
{
|
|
usize pd_index = ((uintptr_t)virt >> PAGE_SHIFT) / 1024;
|
|
usize pt_index = ((uintptr_t)virt >> PAGE_SHIFT) % 1024;
|
|
|
|
struct x86_page_directory_entry *pde = &X86_CURRENT_PD->entries[pd_index];
|
|
if (!pde->present)
|
|
return 0;
|
|
|
|
uintptr_t phys = 0;
|
|
if (pde->huge) {
|
|
phys = pde->shifted_address;
|
|
phys <<= PAGE_SHIFT; /* attention, this is not HUGEPAGE_SHIFT */
|
|
phys |= (uintptr_t)virt & ~HUGEPAGE_MASK;
|
|
} else {
|
|
struct x86_page_table *pt = X86_CURRENT_PT(pd_index);
|
|
struct x86_page_table_entry *pte = &pt->entries[pt_index];
|
|
if (pte->present) {
|
|
phys = pte->shifted_address;
|
|
phys <<= PAGE_SHIFT;
|
|
phys |= (uintptr_t)virt & ~PAGE_MASK;
|
|
}
|
|
}
|
|
|
|
return phys;
|
|
}
|
|
|
|
void vm_flush(void)
|
|
{
|
|
__asm__ volatile(
|
|
" mov %%cr3, %%eax \n"
|
|
" mov %%eax, %%cr3 \n"
|
|
::: "eax", "memory"
|
|
);
|
|
}
|
|
|
|
/*
|
|
* This file is part of GayBSD.
|
|
* Copyright (c) 2021 fef <owo@fef.moe>.
|
|
*
|
|
* GayBSD is nonviolent software: you may only use, redistribute, and/or
|
|
* modify it under the terms of the Cooperative Nonviolent Public License
|
|
* (CNPL) as found in the LICENSE file in the source code root directory
|
|
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
|
|
* of the license, or (at your option) any later version.
|
|
*
|
|
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
|
|
* permitted by applicable law. See the CNPL for details.
|
|
*/
|