x86: fix page allocator once and for all

This commit is contained in:
anna 2021-10-14 21:06:40 +02:00
parent 65899b35f1
commit 14e673b8dd
Signed by: fef
GPG key ID: EC22E476DC2D3D84
2 changed files with 24 additions and 18 deletions

View file

@ -10,11 +10,12 @@
#define PAGE_SHIFT 12 #define PAGE_SHIFT 12
/** @brief Page size in bytes. */ /** @brief Page size in bytes. */
#define PAGE_SIZE (1 << PAGE_SHIFT) #define PAGE_SIZE (1 << PAGE_SHIFT)
/** @brief Pointer bitmask to get the base address of their page. */
#define PAGE_MASK (~(PAGE_SIZE - 1))
#ifndef _ASM_SOURCE #ifndef _ASM_SOURCE
/** @brief Pointer bitmask to get the base address of their page. */
#define PAGE_MASK (~((unsigned long)PAGE_SIZE - 1))
#include <gay/cdefs.h> #include <gay/cdefs.h>
#include <gay/types.h> #include <gay/types.h>
@ -30,7 +31,7 @@ struct x86_page_table_entry {
unsigned global:1; /**< Don't update the TLB on table swap if 1 */ unsigned global:1; /**< Don't update the TLB on table swap if 1 */
unsigned _reserved1:3; unsigned _reserved1:3;
uintptr_t shifted_address:20; /**< Aligned pointer to the physical page */ uintptr_t shifted_address:20; /**< Aligned pointer to the physical page */
}; } __packed;
#define PAGE_ALIGN(ptr) ((typeof(ptr))( (uintptr_t)(ptr) & PAGE_MASK )) #define PAGE_ALIGN(ptr) ((typeof(ptr))( (uintptr_t)(ptr) & PAGE_MASK ))
@ -39,14 +40,16 @@ struct x86_page_table {
} __aligned(PAGE_SIZE); } __aligned(PAGE_SIZE);
/** /**
* @brief Currently active page table. * @brief Currently active page table at position `index` in the page directory.
* The last entry in the page directory is mapped to itself, therefore being * The last entry in the page directory is mapped to itself, therefore being
* interpreted by the MMU as a page table. This has the effect that the last * interpreted by the MMU as a page table. This has the effect that the last
* page table, i.e. the page directory again, maps the entire page directory * page table, i.e. the page directory again, maps the entire page directory
* structure so it can be manipulated while paging is active. See the comment * structure so it can be manipulated while paging is active. See the comment
* at the beginning of `arch/x86/mm/page.c` for a more detailed explanation. * at the beginning of `arch/x86/mm/page.c` for a more detailed explanation.
*
* @param index Table index in the page directory
*/ */
#define X86_CURRENT_PT_BASE ((struct x86_page_table *)0xffc00000) #define X86_CURRENT_PT(index) ( &((struct x86_page_table *)0xffc00000)[index] )
struct x86_page_directory_entry { struct x86_page_directory_entry {
unsigned present:1; /**< Page Fault on access if 0 */ unsigned present:1; /**< Page Fault on access if 0 */
@ -60,7 +63,7 @@ struct x86_page_directory_entry {
unsigned _reserved1:1; unsigned _reserved1:1;
unsigned _ignored2:3; unsigned _ignored2:3;
uintptr_t shifted_address:20; /**< Aligned pointer to `struct x86_page_table` */ uintptr_t shifted_address:20; /**< Aligned pointer to `struct x86_page_table` */
}; } __packed;
struct x86_page_directory { struct x86_page_directory {
struct x86_page_directory_entry entries[1024]; struct x86_page_directory_entry entries[1024];
@ -72,7 +75,7 @@ struct x86_page_directory {
* interpreted by the MMU as a page table. See the comment at the start of * interpreted by the MMU as a page table. See the comment at the start of
* `arch/x86/mm/page.c` for a more detailed explanation. * `arch/x86/mm/page.c` for a more detailed explanation.
*/ */
#define X86_CURRENT_PD ((struct x86_page_directory *)&X86_CURRENT_PT_BASE[1023]) #define X86_CURRENT_PD ((struct x86_page_directory *)X86_CURRENT_PT(1023))
/* page fault status code bits */ /* page fault status code bits */
#define X86_PF_PRESENT (1u << 0) #define X86_PF_PRESENT (1u << 0)

View file

@ -104,15 +104,14 @@ int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags)
usize pd_index = ((uintptr_t)virt >> PAGE_SHIFT) / 1024; usize pd_index = ((uintptr_t)virt >> PAGE_SHIFT) / 1024;
usize pt_index = ((uintptr_t)virt >> PAGE_SHIFT) % 1024; usize pt_index = ((uintptr_t)virt >> PAGE_SHIFT) % 1024;
struct x86_page_directory *pd = X86_CURRENT_PD;
/* /*
* warning: pt might not be present yet before the if block below, * warning: pt might not be present yet before the if block below,
* we only define it here already so we can easily call memset() in * we only define it here already so we can easily call memset() in
* the if block * the if block
*/ */
struct x86_page_table *pt = &X86_CURRENT_PT_BASE[pd_index]; struct x86_page_table *pt = X86_CURRENT_PT(pd_index);
struct x86_page_directory_entry *pd_entry = &pd->entries[pd_index]; struct x86_page_directory_entry *pd_entry = &X86_CURRENT_PD->entries[pd_index];
if (!pd_entry->present) { if (!pd_entry->present) {
uintptr_t pt_phys = get_page(); uintptr_t pt_phys = get_page();
if (!pt_phys) if (!pt_phys)
@ -153,7 +152,7 @@ uintptr_t unmap_page(void *virt)
if (!pd_entry->present) if (!pd_entry->present)
return 0; return 0;
struct x86_page_table *pt = &X86_CURRENT_PT_BASE[pd_index]; struct x86_page_table *pt = X86_CURRENT_PT(pd_index);
struct x86_page_table_entry *pt_entry = &pt->entries[pt_index]; struct x86_page_table_entry *pt_entry = &pt->entries[pt_index];
if (!pt_entry->present) if (!pt_entry->present)
return 0; return 0;
@ -170,8 +169,13 @@ uintptr_t get_page(void)
for (usize i = 0; i < pagemap_len; i++) { for (usize i = 0; i < pagemap_len; i++) {
if (~pagemap[i] != 0) { if (~pagemap[i] != 0) {
int bit = ffsl((long)~pagemap[i]); /*
if (bit < sizeof(*pagemap) * 8) { * for some stupid reason, the bit index returned by
* ffsl() starts at 1 rather than 0
* (and is 0 if there is no bit set)
*/
int bit = ffsl((long)~pagemap[i]) - 1;
if (bit >= 0) {
unsigned long page_number = i * sizeof(*pagemap) * 8 + bit; unsigned long page_number = i * sizeof(*pagemap) * 8 + bit;
page = dynpage_start + page_number * PAGE_SIZE; page = dynpage_start + page_number * PAGE_SIZE;
pagemap[i] |= (1lu << bit); pagemap[i] |= (1lu << bit);
@ -258,7 +262,7 @@ uintptr_t virt_to_phys(void *virt)
if (!pd->entries[pd_index].present) if (!pd->entries[pd_index].present)
return 0; return 0;
struct x86_page_table *pt = &X86_CURRENT_PT_BASE[pd_index]; struct x86_page_table *pt = X86_CURRENT_PT(pd_index);
if (!pt->entries[pt_index].present) if (!pt->entries[pt_index].present)
return 0; return 0;
@ -308,15 +312,14 @@ static void setup_pagemap(void)
* map it to a virtual address so we can fill its entries. * map it to a virtual address so we can fill its entries.
* So this is basically a replacement for a call to map_page(). * So this is basically a replacement for a call to map_page().
*/ */
struct x86_page_directory *pd = X86_CURRENT_PD; struct x86_page_directory_entry *pd_entry = &X86_CURRENT_PD->entries[1022];
struct x86_page_directory_entry *pd_entry = &pd->entries[1022];
*(unsigned long *)pd_entry = 0; *(unsigned long *)pd_entry = 0;
pd_entry->shifted_address = (uintptr_t)pt_phys >> PAGE_SHIFT; pd_entry->shifted_address = pt_phys >> PAGE_SHIFT;
pd_entry->rw = 1; pd_entry->rw = 1;
pd_entry->present = 1; pd_entry->present = 1;
vm_flush(); vm_flush();
struct x86_page_table *pt = &X86_CURRENT_PT_BASE[1022]; struct x86_page_table *pt = X86_CURRENT_PT(1022);
memset(pt, 0, sizeof(*pt)); memset(pt, 0, sizeof(*pt));
/* /*