You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

241 lines
7.4 KiB
C

/* Copyright (C) 2021 fef <owo@fef.moe>. All rights reserved. */
#include <arch/atom.h>
#include <arch/dma.h>
#include <arch/multiboot.h>
#include <arch/vmparam.h>
#include <gay/linker.h>
#include <gay/mm.h>
#include <gay/vm/page.h>
#include <gay/systm.h>
#include <gay/util.h>
#include <inttypes.h>
#include <string.h>
struct vm_page *const vm_page_array = (vm_page_t)VM_PAGE_ARRAY_OFFSET;
#ifdef DEBUG
/* this gets updated in x86_setup_paging() once we know how big the array is */
vm_page_t _vm_page_array_end = (vm_page_t)(VM_PAGE_ARRAY_OFFSET + VM_PAGE_ARRAY_LENGTH);
#endif
/** @brief Initialize the members of `vm_page_array` within the given range. */
static void init_page_range(vm_paddr_t start, vm_paddr_t end, u_int flags);
static void print_mem_area(struct mb2_mmap_entry *entry);
static void register_area(struct mb2_mmap_entry *entry)
{
vm_paddr_t start = entry->addr;
vm_paddr_t end = start + entry->len;
if (start >= DMA_LIMIT) {
__boot_register_mem_area(start, end, MM_ZONE_NORMAL);
} else if (start < DMA_LIMIT && end > DMA_LIMIT) {
__boot_register_mem_area(start, DMA_LIMIT, MM_ZONE_DMA);
__boot_register_mem_area(DMA_LIMIT, end, MM_ZONE_NORMAL);
} else if (start < DMA_LIMIT && end <= DMA_LIMIT) {
__boot_register_mem_area(start, end, MM_ZONE_DMA);
} else {
panic("congratulations, you reached an unreachable branch");
}
}
/*
* "Oh cool another deeply nested 100-liner that nobody understands"
*/
void x86_paging_init(struct mb2_tag_mmap *mmap)
{
__boot_pmalloc_init();
/*
* insert all free areas and find the end of physical memory
*/
struct mb2_mmap_entry *entry = mmap->entries;
vm_paddr_t end = 0;
kprintf("Memory map:\n");
while ((void *)entry - (void *)mmap < mmap->tag.size) {
vm_paddr_t entry_end = entry->addr + entry->len;
end = max(end, entry_end);
print_mem_area(entry);
if (entry->type == MB2_MEMORY_AVAILABLE)
register_area(entry);
entry = (void *)entry + mmap->entry_size;
}
/*
* allocate and map vm_page_array into virtual memory at VM_PAGE_ARRAY_OFFSET
* (this is gonna be a long one)
*/
struct vm_page *vm_page_array_end = vm_page_array + (end >> PAGE_SHIFT);
#ifdef DEBUG
_vm_page_array_end = vm_page_array_end;
#endif
void *map_pos = vm_page_array;
usize remaining_size = (void *)vm_page_array_end - (void *)vm_page_array;
remaining_size = align_ceil(remaining_size, PAGE_SIZE);
kprintf("Mapping %zu bytes for vm_page_array\n", remaining_size);
/* PML4T loop */
while (remaining_size != 0) {
/* Is vm_page_array so huge that it spans almost the entire 2 TB
* kernel region? If that's the case, something has gone terribly
* wrong, unless we somehow happen to have about an Exabyte of RAM
* (which is not physically addressable by the CPU's 40-bit bus). */
KASSERT(map_pos < (void *)KERNBASE);
x86_pml4te_t *pml4te = X86_PML4TE(map_pos);
vm_paddr_t pml4te_val = __boot_pmalloc(PAGE_SHIFT);
panic_if(pml4te_val == BOOT_PMALLOC_ERR, "cannot reserve memory for vm_page_array");
__boot_clear_page(pml4te_val);
pml4te_val |= __P_PRESENT | __P_RW | __P_GLOBAL | __P_NOEXEC;
pml4te->val = pml4te_val;
vm_flush();
/* PDPT loop */
for (int pdpt_index = 0; pdpt_index < 512; pdpt_index++) {
x86_pdpte_t *pdpte = X86_PDPTE(map_pos);
vm_paddr_t pdpte_val;
/* try allocating a 1 GB gigapage first */
if (remaining_size >= 1 << X86_PDPT_SHIFT) {
pdpte_val = __boot_pmalloc(X86_PDPT_SHIFT);
/* CLion is warning about this condition being always true, but
* that is not the case. I've checked the disassembly with -O2,
* and clang is emitting the check. So it's fine, i guess. */
if (pdpte_val != BOOT_PMALLOC_ERR) {
pdpte_val |= __P_PRESENT | __P_RW | __P_HUGE
| __P_GLOBAL | __P_NOEXEC;
pdpte->val = pdpte_val;
remaining_size -= 1 << X86_PDPT_SHIFT;
map_pos += 1 << X86_PDPT_SHIFT;
if (remaining_size == 0)
goto map_done;
continue;
}
}
/* couldn't use a gigapage, continue in hugepage steps */
pdpte_val = __boot_pmalloc(PAGE_SHIFT);
panic_if(pdpte_val == BOOT_PMALLOC_ERR,
"cannot reserve memory for vm_page_array");
__boot_clear_page(pdpte_val);
pdpte_val |= __P_PRESENT | __P_RW | __P_GLOBAL | __P_NOEXEC;
pdpte->val = pdpte_val;
vm_flush();
/* PDT loop */
for (int pdt_index = 0; pdt_index < 512; pdt_index++) {
x86_pdte_t *pdte = X86_PDTE(map_pos);
vm_paddr_t pdte_val;
/* try allocating a 2 MB hugepage first */
if (remaining_size >= (1 << X86_PDT_SHIFT)) {
pdte_val = __boot_pmalloc(X86_PDT_SHIFT);
if (pdte_val != BOOT_PMALLOC_ERR) {
pdte_val |= __P_PRESENT | __P_RW | __P_GLOBAL
| __P_HUGE | __P_NOEXEC;
pdte->val = pdte_val;
remaining_size -= 1 << X86_PDT_SHIFT;
map_pos += 1 << X86_PDT_SHIFT;
if (remaining_size == 0)
goto map_done;
continue;
}
}
/* couldn't use a hugepage, continue in page steps */
pdte_val = __boot_pmalloc(PAGE_SHIFT);
panic_if(pdte_val == BOOT_PMALLOC_ERR,
"cannot reserve memory for vm_page_array");
__boot_clear_page(pdpte_val);
pdte_val |= __P_PRESENT | __P_RW | __P_GLOBAL | __P_NOEXEC;
pdte->val = pdte_val;
vm_flush();
/* PT loop */
for (int pt_index = 0; pt_index < 512; pt_index++) {
x86_pte_t *pte = X86_PTE(map_pos);
vm_paddr_t pte_val = __boot_pmalloc(X86_PT_SHIFT);
panic_if(pte_val == BOOT_PMALLOC_ERR,
"cannot reserve memory for vm_page_array");
pte_val |= __P_PRESENT | __P_RW | __P_GLOBAL | __P_NOEXEC;
pte->val = pte_val;
remaining_size -= 1 << X86_PT_SHIFT;
map_pos += 1 << X86_PT_SHIFT;
if (remaining_size == 0)
goto map_done;
} /* end of PT loop */
} /* end of PDT loop */
} /* end of PDPT loop */
} /* end of PML4T loop */
map_done:
vm_flush();
}
static void init_page_range(vm_paddr_t start, vm_paddr_t end, u_int flags)
{
KASSERT(start <= end);
vm_page_t cursor = vm_page_array + (start >> PAGE_SHIFT);
usize count = (end - start) >> PAGE_SHIFT;
if (flags == 0) {
memset(cursor, 0, count * sizeof(*cursor));
} else {
while (count--) {
atom_init(&cursor->count, 0);
cursor->flags = flags;
cursor->try_free = nil;
cursor->extra = nil;
cursor++;
}
}
}
/*
* It's really unfortunate that we have to zero a page before we can use it as
* a page table, yet also need to reference it in the page table structures
* (thereby mapping it into virtual memory) before we can zero it out.
* This little hack temporarily maps the area at one PDP entry before KERNBASE
* (meaning index 1022 of _pdp0), zeroes the area, and then unmaps it again.
*/
void __boot_clear_page(vm_paddr_t paddr)
{
vm_paddr_t pbase = align_floor(paddr, 1 << X86_PDPT_SHIFT);
vm_offset_t offset = paddr - pbase;
void *vbase = (void *)KERNBASE - (1 << X86_PDPT_SHIFT);
x86_pdpte_t *pdpe = X86_PDPTE(vbase);
pdpe->val = pbase | __P_PRESENT | __P_RW | __P_HUGE | __P_NOEXEC;
vm_flush();
memset(vbase + offset, 0, PAGE_SIZE);
pdpe->flags.present = false;
vm_flush();
}
static void print_mem_area(struct mb2_mmap_entry *entry)
{
const char *name;
switch (entry->type) {
case MB2_MEMORY_AVAILABLE:
name = "Available";
break;
case MB2_MEMORY_RESERVED:
name = "Reserved";
break;
case MB2_MEMORY_ACPI_RECLAIMABLE:
name = "ACPI (reclaimable)";
break;
case MB2_MEMORY_NVS:
name = "Non-Volatile Storage";
break;
case MB2_MEMORY_BADRAM:
name = "Bad RAM";
break;
}
kprintf(" [0x%016"PRIxVM_PADDR"-0x%016"PRIxVM_PADDR"] %s\n",
entry->addr, entry->addr + entry->len - 1, name);
}