/* Copyright (C) 2021,2022 fef . All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include /* * Initial Page Directory Pointer Table and Page Map Level 4 Table for the * assembly startup routine (see setup64.S). Used for statically mapping the * lowest 2 GB of physical memory into the -2 GB virtual area. */ __asmlink x86_pdpt_t _pdpt0; __asmlink x86_pml4t_t _pml4t; void x86_isr_page_fault(trap_frame_t *frame, u32 error_code) { void *address; __asm__ volatile( " mov %%cr2, %0 \n" : "=r"(address) : ); const char *space; if (error_code & X86_PF_USER) space = "user"; else space = "kernel"; const char *rwx; if (error_code & X86_PF_WRITE) rwx = "write to"; else if (error_code & X86_PF_INSTR) rwx = "exec at"; else rwx = "read from"; const char *present; if (error_code & X86_PF_PRESENT) present = ""; else present = " non-mapped"; kprintf("\n########## B O N K ##########\n"); kprintf("Illegal %s %s%s address %p!\n", space, rwx, present, address); print_regs(frame); /* print a stack trace if this came from kernel space */ if (frame->hw_frame.cs == X86_64_KERN_CS) ktrace_print_from((void *)frame->rbp); panic_notrace("Page fault"); } vm_paddr_t vtophys(void *virt) { x86_pml4te_t *pml4te = X86_PML4TE(virt); if (!pml4te->flags.present) return (vm_paddr_t)-1; x86_pdpte_t *pdpte = X86_PDPTE(virt); if (!pdpte->flags.present) return (vm_paddr_t)-1; if (pdpte->flags.huge) { vm_paddr_t phys_base = pdpte->val & X86_PMAP_MASK; return phys_base + ((vm_paddr_t)virt % (1 << X86_PDPT_SHIFT)); } x86_pdte_t *pdte = X86_PDTE(virt); if (!pdte->flags.present) return (vm_paddr_t)-1; if (pdte->flags.huge) { vm_paddr_t phys_base = pdte->val & X86_PMAP_MASK; return phys_base + ((vm_paddr_t)virt % (1 << X86_PDT_SHIFT)); } x86_pte_t *pte = X86_PTE(virt); if (!pte->flags.present) return (vm_paddr_t)-1; vm_paddr_t phys_base = pte->val & X86_PMAP_MASK; return phys_base + ((vm_paddr_t)virt % (1 << X86_PT_SHIFT)); } void page_clear(vm_page_t page) { register_t cpuflags = intr_disable(); page_lock(page); u64 *dest = DMAP_START + (pg2pfn(page) << PAGE_SHIFT); usize nbyte = (usize)1 << (pga_order(page) + PAGE_SHIFT); memset64(dest, 0, nbyte); page_unlock(page); intr_restore(cpuflags); }