x86/mm: disable page caching in direct map

main
anna 2 years ago
parent bd23d2cbc8
commit 79033fbc8b
Signed by: fef
GPG Key ID: EC22E476DC2D3D84

@ -19,8 +19,6 @@ struct vm_page *const vm_page_array = (vm_page_t)VM_PAGE_ARRAY_OFFSET;
vm_page_t _vm_page_array_end = (vm_page_t)(VM_PAGE_ARRAY_OFFSET + VM_PAGE_ARRAY_LENGTH);
#endif
/** @brief Initialize the members of `vm_page_array` within the given range. */
static void init_page_range(vm_paddr_t start, vm_paddr_t end, u_int flags);
static void print_mem_area(struct mb2_mmap_entry *entry);
static void register_area(struct mb2_mmap_entry *entry)
@ -131,7 +129,7 @@ void x86_paging_init(struct mb2_tag_mmap *mmap)
vm_paddr_t pml4te_val = __boot_pmalloc(PAGE_SHIFT, MM_ZONE_NORMAL);
panic_if(pml4te_val == BOOT_PMALLOC_ERR, "cannot reserve memory for vm_page_array");
__boot_clear_page(pml4te_val);
pml4te_val |= __P_PRESENT | __P_RW | __P_GLOBAL | __P_NOEXEC;
pml4te_val |= __P_PRESENT | __P_RW | __P_NOCACHE | __P_GLOBAL | __P_NOEXEC;
pml4te->val = pml4te_val;
vm_flush();
@ -147,8 +145,8 @@ void x86_paging_init(struct mb2_tag_mmap *mmap)
* that is not the case. I've checked the disassembly with -O2,
* and clang is emitting the check. So it's fine, i guess. */
if (pdpte_val != BOOT_PMALLOC_ERR) {
pdpte_val |= __P_PRESENT | __P_RW | __P_HUGE
| __P_GLOBAL | __P_NOEXEC;
pdpte_val |= __P_PRESENT | __P_RW | __P_NOCACHE
| __P_HUGE | __P_GLOBAL | __P_NOEXEC;
pdpte->val = pdpte_val;
map_pos += GIGAPAGE_SIZE;
if (map_pos >= map_end)
@ -162,7 +160,7 @@ void x86_paging_init(struct mb2_tag_mmap *mmap)
panic_if(pdpte_val == BOOT_PMALLOC_ERR,
"cannot reserve memory for vm_page_array");
__boot_clear_page(pdpte_val);
pdpte_val |= __P_PRESENT | __P_RW | __P_GLOBAL | __P_NOEXEC;
pdpte_val |= __P_PRESENT | __P_RW | __P_NOCACHE | __P_GLOBAL | __P_NOEXEC;
pdpte->val = pdpte_val;
vm_flush();
@ -175,8 +173,8 @@ void x86_paging_init(struct mb2_tag_mmap *mmap)
if (map_end - map_pos >= HUGEPAGE_SIZE) {
pdte_val = __boot_pmalloc(X86_PDT_SHIFT, MM_ZONE_NORMAL);
if (pdte_val != BOOT_PMALLOC_ERR) {
pdte_val |= __P_PRESENT | __P_RW | __P_GLOBAL
| __P_HUGE | __P_NOEXEC;
pdte_val |= __P_PRESENT | __P_RW | __P_NOCACHE
| __P_GLOBAL | __P_HUGE | __P_NOEXEC;
pdte->val = pdte_val;
map_pos += HUGEPAGE_SIZE;
if (map_pos >= map_end)
@ -190,7 +188,8 @@ void x86_paging_init(struct mb2_tag_mmap *mmap)
panic_if(pdte_val == BOOT_PMALLOC_ERR,
"cannot reserve memory for vm_page_array");
__boot_clear_page(pdpte_val);
pdte_val |= __P_PRESENT | __P_RW | __P_GLOBAL | __P_NOEXEC;
pdte_val |= __P_PRESENT | __P_RW | __P_NOCACHE
| __P_GLOBAL | __P_NOEXEC;
pdte->val = pdte_val;
vm_flush();
@ -200,7 +199,8 @@ void x86_paging_init(struct mb2_tag_mmap *mmap)
vm_paddr_t pte_val = __boot_pmalloc(X86_PT_SHIFT, MM_ZONE_NORMAL);
panic_if(pte_val == BOOT_PMALLOC_ERR,
"cannot reserve memory for vm_page_array");
pte_val |= __P_PRESENT | __P_RW | __P_GLOBAL | __P_NOEXEC;
pte_val |= __P_PRESENT | __P_RW | __P_NOCACHE
| __P_GLOBAL | __P_NOEXEC;
pte->val = pte_val;
map_pos += PAGE_SIZE;
@ -229,10 +229,10 @@ void __boot_clear_page(vm_paddr_t paddr)
vm_offset_t offset = paddr - pbase;
void *vbase = (void *)KERNBASE - (1 << X86_PDPT_SHIFT);
x86_pdpte_t *pdpe = X86_PDPTE(vbase);
pdpe->val = pbase | __P_PRESENT | __P_RW | __P_HUGE | __P_NOEXEC;
pdpe->val = pbase | __P_PRESENT | __P_RW | __P_NOCACHE | __P_HUGE | __P_NOEXEC;
vm_flush();
memset(vbase + offset, 0, PAGE_SIZE);
pdpe->flags.present = false;
memset64(vbase + offset, 0, PAGE_SIZE);
pdpe->val = 0;
vm_flush();
}

Loading…
Cancel
Save