x86: minor page management refactor

Even Uranium-223 ages better than my code
main
anna 3 years ago
parent d8e7939093
commit 65899b35f1
Signed by: fef
GPG Key ID: EC22E476DC2D3D84

@ -31,7 +31,13 @@ enum vga_color {
VGA_COLOR_WHITE = 15,
};
#define FB_ADDRESS 0xc03ff000 /* mapped from 0x000b8000 */
/*
* The character framebuffer sits at physical address 0x000b8000 and gets
* mapped in the last entry of the initial page table. With 1024 (0x400)
* entries per page table, each mapping 4096 (0x1000) bytes, this gives us an
* offset of 0x3ff * 0x1000 = 0x003ff000 for the virtual address.
*/
#define FB_ADDRESS (CFG_KERNEL_RELOCATE + 0x003ff000)
#define FB_LINES 24
#define FB_COLS 80
@ -55,9 +61,9 @@ enum vga_color fb_foreground;
#define cell_at(line, col) ( &framebuffer[(line) * FB_COLS + (col)] )
#define current_cell (cell_at(fb_line, fb_col))
static ssize_t fb_write(struct kprintf_printer *renderer, const void *buf, size_t size);
static ssize_t fb_flush(struct kprintf_printer *renderer);
static struct kprintf_printer fb_kprintf_renderer = {
static isize fb_write(struct kprintf_printer *printer, const void *buf, usize size);
static isize fb_flush(struct kprintf_printer *printer);
static struct kprintf_printer fb_kprintf_printer = {
.write = fb_write,
.flush = fb_flush,
};
@ -68,15 +74,15 @@ static void fb_init(enum vga_color fg, enum vga_color bg);
static void print_gay_propaganda(void);
static struct mb2_tag *next_tag(struct mb2_tag *tag);
static void handle_tag(struct mb2_tag *tag);
static void handle_mmap_tag(struct mb2_tag_mmap *tag);
static int handle_tag(struct mb2_tag *tag);
static int handle_mmap_tag(struct mb2_tag_mmap *tag);
static const char *mmap_type_name(u32 type);
extern int main(int argc, char *argv[]);
__asmlink void _boot(u32 magic, void *address) /* NOLINT */
{
kprintf_set_printer(&fb_kprintf_renderer);
kprintf_set_printer(&fb_kprintf_printer);
fb_init(VGA_COLOR_LIGHT_GREY, VGA_COLOR_BLACK);
x86_setup_interrupts();
@ -94,14 +100,21 @@ __asmlink void _boot(u32 magic, void *address) /* NOLINT */
* addresses before accessing them.
*/
address += CFG_KERNEL_RELOCATE;
for (struct mb2_tag *tag = address + 8; tag != NULL; tag = next_tag(tag))
handle_tag(tag);
int err = 0;
for (struct mb2_tag *tag = address + 8; tag != NULL; tag = next_tag(tag)) {
err = handle_tag(tag);
if (err)
break;
}
main(0, NULL);
if (!err)
main(0, NULL);
}
static inline void handle_tag(struct mb2_tag *tag)
static inline int handle_tag(struct mb2_tag *tag)
{
int ret = 0;
switch (tag->type) {
case MB2_TAG_TYPE_END:
break;
@ -109,46 +122,50 @@ static inline void handle_tag(struct mb2_tag *tag)
kprintf("Kernel command line: %s\n", ((struct mb2_tag_string *)tag)->string);
break;
case MB2_TAG_TYPE_MMAP:
handle_mmap_tag((struct mb2_tag_mmap *)tag);
ret = handle_mmap_tag((struct mb2_tag_mmap *)tag);
break;
default:
//kprintf("Unknown tag %u\n", tag->type);
break;
}
return ret;
}
static inline void handle_mmap_tag(struct mb2_tag_mmap *tag)
static inline int handle_mmap_tag(struct mb2_tag_mmap *tag)
{
kprintf("Memory map:\n");
void *region = NULL;
size_t region_len = 0;
uintptr_t region = 0;
usize region_len = 0;
struct mb2_mmap_entry *entry = &tag->entries[0];
while ((void *)entry < (void *)tag + tag->tag.size) {
kprintf("[%p-%p] %s\n",
kprintf(" [%p-%p] %s\n",
(void *)entry->addr,
(void *)entry->addr + entry->len - 1,
mmap_type_name(entry->type));
if (entry->type == 1 && entry->len > region_len) {
region = (void *)entry->addr;
region = entry->addr;
region_len = entry->len;
}
entry = (void *)entry + tag->entry_size;
}
if (region == NULL) {
if (region == 0) {
kprintf("No memory available! Aborting.\n");
while (1);
return 1;
}
int err = kmalloc_init((uintptr_t)region, (uintptr_t)(region + region_len));
int err = kmalloc_init(region, region + region_len);
if (err) {
kprintf("kmalloc_init() failed! Aborting.\n");
while (1);
return 1;
}
return 0;
}
static inline struct mb2_tag *next_tag(struct mb2_tag *tag)
@ -173,9 +190,9 @@ static void fb_newline(void)
}
}
static ssize_t fb_write(struct kprintf_printer *renderer, const void *buf, size_t size)
static isize fb_write(struct kprintf_printer *printer, const void *buf, usize size)
{
ssize_t ret = 0;
isize ret = 0;
const u8 *s = buf;
while (size > s - (const u8 *)buf) {
@ -200,7 +217,7 @@ static ssize_t fb_write(struct kprintf_printer *renderer, const void *buf, size_
return ret;
}
static ssize_t fb_flush(struct kprintf_printer *renderer)
static isize fb_flush(struct kprintf_printer *printer)
{
return 0;
}
@ -241,7 +258,7 @@ static void print_gay_propaganda(void)
enum vga_color fg_before = fb_foreground;
for (int i = 0; i < ARRAY_SIZE(rainbow); i++) {
fb_background = rainbow[i];
for (int i = 0; i < FB_COLS; i++)
for (int j = 0; j < FB_COLS; j++)
fb_write(NULL, " ", 1);
}
fb_background = bg_before;
@ -265,7 +282,7 @@ static const char *mmap_type_name(u32 type)
return "Reserved";
case MB2_MEMORY_ACPI_RECLAIMABLE:
return "ACPI";
case MB2_MEMORY_NVS:
case MB2_MEMORY_NVS: /* non-volatile storage */
return "NVS";
case MB2_MEMORY_BADRAM:
return "Bad RAM";

@ -3,6 +3,7 @@
#include <asm/common.h>
#include <gay/config.h>
#include <arch/multiboot.h>
#include <arch/page.h>
/* see arch/x86/config/kernel.ld */
.extern _image_start_phys
@ -107,7 +108,6 @@ header_end:
.section .multiboot.text, "a"
#define PAGE_SIZE 4096
/*
* referencing symbols from C requires subtracting the relocation offset
* first because the C code is linked against virtual address space

@ -6,6 +6,15 @@
* @brief Data structures and constants for paging on x86 (please end my suffering).
*/
/** @brief Binary logarithm of `PAGE_SIZE`. */
#define PAGE_SHIFT 12
/** @brief Page size in bytes. */
#define PAGE_SIZE (1 << PAGE_SHIFT)
/** @brief Pointer bitmask to get the base address of their page. */
#define PAGE_MASK (~(PAGE_SIZE - 1))
#ifndef _ASM_SOURCE
#include <gay/cdefs.h>
#include <gay/types.h>
@ -20,30 +29,25 @@ struct x86_page_table_entry {
unsigned _reserved0:1;
unsigned global:1; /**< Don't update the TLB on table swap if 1 */
unsigned _reserved1:3;
unsigned shifted_address:20; /**< Aligned pointer to the physical page */
#define X86_PAGE_TABLE_ADDRESS_SHIFT 12
uintptr_t shifted_address:20; /**< Aligned pointer to the physical page */
};
/**
* @brief The binary logarithm of `PAGE_SIZE`.
* This is useful for alignment checking and such.
*/
#define PAGE_SIZE_LOG2 X86_PAGE_TABLE_ADDRESS_SHIFT
/**
* @brief Size of a memory page in bytes (x86 version).
* This may be used outside of `/arch/x86` for ensuring page alignment.
* Regular code, except for the memory allocator, should never need this.
*/
#define PAGE_SIZE (1lu << PAGE_SIZE_LOG2)
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define PAGE_ALIGN(ptr) ((typeof(ptr))( (uintptr_t)(ptr) & PAGE_MASK ))
struct x86_page_table {
struct x86_page_table_entry entries[1024];
} __aligned(PAGE_SIZE);
/**
* @brief Currently active page table.
* The last entry in the page directory is mapped to itself, therefore being
* interpreted by the MMU as a page table. This has the effect that the last
* page table, i.e. the page directory again, maps the entire page directory
* structure so it can be manipulated while paging is active. See the comment
* at the beginning of `arch/x86/mm/page.c` for a more detailed explanation.
*/
#define X86_CURRENT_PT_BASE ((struct x86_page_table *)0xffc00000)
struct x86_page_directory_entry {
unsigned present:1; /**< Page Fault on access if 0 */
unsigned rw:1; /**< Page Fault on write if 0 */
@ -55,14 +59,21 @@ struct x86_page_directory_entry {
unsigned large:1; /**< 0 = 4K, 1 = 4M */
unsigned _reserved1:1;
unsigned _ignored2:3;
unsigned shifted_address:20; /**< Aligned pointer to `struct x86_page_table` */
#define X86_PAGE_DIRECTORY_ADDRESS_SHIFT 12
uintptr_t shifted_address:20; /**< Aligned pointer to `struct x86_page_table` */
};
struct x86_page_directory {
struct x86_page_directory_entry entries[1024];
} __aligned(PAGE_SIZE);
/**
* @brief Currently active page directory.
* The last entry in the page directory is mapped to itself, therefore being
* interpreted by the MMU as a page table. See the comment at the start of
* `arch/x86/mm/page.c` for a more detailed explanation.
*/
#define X86_CURRENT_PD ((struct x86_page_directory *)&X86_CURRENT_PT_BASE[1023])
/* page fault status code bits */
#define X86_PF_PRESENT (1u << 0)
#define X86_PF_WRITE (1u << 1)
@ -88,6 +99,8 @@ typedef struct x86_page_directory vm_info_t;
*/
uintptr_t virt_to_phys(void *virt);
#endif /* not _ASM_SOURCE */
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.

@ -27,6 +27,7 @@
#include <gay/util.h>
#include <string.h>
#include <strings.h>
/* from linker script */
extern void _image_start_phys;
@ -38,7 +39,7 @@ extern void _image_end_phys;
*/
static unsigned long *pagemap;
/** @brief Pagemap length as in number of `unsigned long`s, *not* bytes! */
static size_t pagemap_len;
static usize pagemap_len;
/* first and last dynamic page address (watch out, these are physical) */
static uintptr_t dynpage_start;
@ -66,8 +67,8 @@ int mem_init(uintptr_t start_phys, uintptr_t end_phys)
if ((uintptr_t)&_image_start_phys >= start_phys && (uintptr_t)&_image_start_phys <= end_phys)
start_phys = (uintptr_t)&_image_end_phys;
dynpage_start = (uintptr_t)ptr_align((void *)start_phys, PAGE_SIZE_LOG2);
dynpage_end = (uintptr_t)ptr_align((void *)end_phys, -PAGE_SIZE_LOG2);
dynpage_start = (uintptr_t)ptr_align((void *)start_phys, PAGE_SHIFT);
dynpage_end = (uintptr_t)ptr_align((void *)end_phys, -PAGE_SHIFT);
if (dynpage_end - dynpage_start < 1024 * PAGE_SIZE) {
kprintf("We have < 1024 pages for kmalloc(), this wouldn't go well\n");
@ -85,7 +86,8 @@ int mem_init(uintptr_t start_phys, uintptr_t end_phys)
setup_pagemap();
kprintf("Available memory: %zu bytes (%lu pages)\n",
dynpage_end - dynpage_start, (dynpage_end - dynpage_start) / PAGE_SIZE);
dynpage_end - dynpage_start,
(unsigned long)(dynpage_end - dynpage_start) / PAGE_SIZE);
return 0;
}
@ -99,16 +101,16 @@ int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags)
kprintf("map_page(): unaligned virtual address %p!\n", virt);
# endif
size_t pd_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) / 1024;
size_t pt_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) % 1024;
usize pd_index = ((uintptr_t)virt >> PAGE_SHIFT) / 1024;
usize pt_index = ((uintptr_t)virt >> PAGE_SHIFT) % 1024;
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
struct x86_page_directory *pd = X86_CURRENT_PD;
/*
* warning: pt might not be present yet before the if block below,
* we only define it here already so we can easily call memset() in
* the if block
*/
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[pd_index];
struct x86_page_table *pt = &X86_CURRENT_PT_BASE[pd_index];
struct x86_page_directory_entry *pd_entry = &pd->entries[pd_index];
if (!pd_entry->present) {
@ -117,7 +119,7 @@ int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags)
return -ENOMEM;
*(unsigned long *)pd_entry = 0;
pd_entry->shifted_address = pt_phys >> X86_PAGE_DIRECTORY_ADDRESS_SHIFT;
pd_entry->shifted_address = pt_phys >> PAGE_SHIFT;
pd_entry->rw = 1;
pd_entry->present = 1;
vm_flush();
@ -129,7 +131,7 @@ int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags)
pt_entry->rw = (flags & MM_PAGE_RW) != 0;
pt_entry->user = (flags & MM_PAGE_USER) != 0;
pt_entry->cache_disabled = (flags & MM_PAGE_NOCACHE) != 0;
pt_entry->shifted_address = phys >> X86_PAGE_TABLE_ADDRESS_SHIFT;
pt_entry->shifted_address = phys >> PAGE_SHIFT;
pt_entry->present = 1;
return 0;
@ -142,16 +144,16 @@ uintptr_t unmap_page(void *virt)
kprintf("map_page(): unaligned virtual address %p!\n", virt);
# endif
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
struct x86_page_directory *pd = X86_CURRENT_PD;
size_t pd_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) / 1024;
size_t pt_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) % 1024;
usize pd_index = ((uintptr_t)virt >> PAGE_SHIFT) / 1024;
usize pt_index = ((uintptr_t)virt >> PAGE_SHIFT) % 1024;
struct x86_page_directory_entry *pd_entry = &pd->entries[pd_index];
if (!pd_entry->present)
return 0;
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[pd_index];
struct x86_page_table *pt = &X86_CURRENT_PT_BASE[pd_index];
struct x86_page_table_entry *pt_entry = &pt->entries[pt_index];
if (!pt_entry->present)
return 0;
@ -159,28 +161,16 @@ uintptr_t unmap_page(void *virt)
uintptr_t phys_shifted = pt_entry->shifted_address;
*(unsigned long *)pt_entry = 0;
return phys_shifted << X86_PAGE_TABLE_ADDRESS_SHIFT;
}
static inline int find_zero_bit(unsigned long bitfield)
{
int i;
for (i = 0; i < sizeof(bitfield) * 8; i++) {
if ((bitfield & (1lu << i)) == 0)
break;
}
return i;
return phys_shifted << PAGE_SHIFT;
}
uintptr_t get_page(void)
{
uintptr_t page = 0;
for (size_t i = 0; i < pagemap_len; i++) {
for (usize i = 0; i < pagemap_len; i++) {
if (~pagemap[i] != 0) {
int bit = find_zero_bit(pagemap[i]);
int bit = ffsl((long)~pagemap[i]);
if (bit < sizeof(*pagemap) * 8) {
unsigned long page_number = i * sizeof(*pagemap) * 8 + bit;
page = dynpage_start + page_number * PAGE_SIZE;
@ -210,8 +200,8 @@ void put_page(uintptr_t phys)
}
# endif
size_t page_number = (phys - dynpage_start) >> PAGE_SIZE_LOG2;
size_t index = page_number / (sizeof(*pagemap) * 8);
usize page_number = (phys - dynpage_start) >> PAGE_SHIFT;
usize index = page_number / (sizeof(*pagemap) * 8);
int bit = page_number % (sizeof(*pagemap) * 8);
if ((pagemap[index] & (1lu << bit)) == 0)
kprintf("Double free of page %p!\n", (void *)phys);
@ -261,18 +251,18 @@ void x86_isr_page_fault(struct x86_trap_frame *frame, u32 error_code)
uintptr_t virt_to_phys(void *virt)
{
size_t pd_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) / 1024;
size_t pt_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) % 1024;
usize pd_index = ((uintptr_t)virt >> PAGE_SHIFT) / 1024;
usize pt_index = ((uintptr_t)virt >> PAGE_SHIFT) % 1024;
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
struct x86_page_directory *pd = X86_CURRENT_PD;
if (!pd->entries[pd_index].present)
return 0;
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[pd_index];
struct x86_page_table *pt = &X86_CURRENT_PT_BASE[pd_index];
if (!pt->entries[pt_index].present)
return 0;
uintptr_t phys = pt->entries[pt_index].shifted_address << X86_PAGE_TABLE_ADDRESS_SHIFT;
uintptr_t phys = pt->entries[pt_index].shifted_address << PAGE_SHIFT;
/* if the virtual address wasn't page aligned, add the offset into the page */
return phys | ((uintptr_t)virt & ~PAGE_MASK);
}
@ -318,15 +308,15 @@ static void setup_pagemap(void)
* map it to a virtual address so we can fill its entries.
* So this is basically a replacement for a call to map_page().
*/
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
struct x86_page_directory *pd = X86_CURRENT_PD;
struct x86_page_directory_entry *pd_entry = &pd->entries[1022];
*(unsigned long *)pd_entry = 0;
pd_entry->shifted_address = (uintptr_t)pt_phys >> X86_PAGE_DIRECTORY_ADDRESS_SHIFT;
pd_entry->shifted_address = (uintptr_t)pt_phys >> PAGE_SHIFT;
pd_entry->rw = 1;
pd_entry->present = 1;
vm_flush();
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[1022];
struct x86_page_table *pt = &X86_CURRENT_PT_BASE[1022];
memset(pt, 0, sizeof(*pt));
/*
@ -336,7 +326,7 @@ static void setup_pagemap(void)
* virtual address, of course.
*/
uintptr_t pagemap_phys = dynpage_start;
size_t pt_index = 0;
usize pt_index = 0;
do {
/*
* take one page away from the dynamic area and reserve it for
@ -349,7 +339,7 @@ static void setup_pagemap(void)
struct x86_page_table_entry *pt_entry = &pt->entries[pt_index];
*(unsigned long *)pt_entry = 0;
uintptr_t address = pagemap_phys + pt_index * PAGE_SIZE;
pt_entry->shifted_address = address >> X86_PAGE_TABLE_ADDRESS_SHIFT;
pt_entry->shifted_address = address >> PAGE_SHIFT;
pt_entry->present = 1;
pt_entry->rw = 1;

Loading…
Cancel
Save