mm: replace GRUB's GDT with our own

main
anna 3 years ago
parent 66a1f8726e
commit 8129518640
Signed by: fef
GPG Key ID: EC22E476DC2D3D84

@ -117,7 +117,7 @@ header_end:
*/
#define phys_addr(c_symbol) (c_symbol - CFG_KERNEL_RELOCATE)
asmfn_begin(_start)
ASM_ENTRY(_start)
/*
* The kernel image starts at 1 MiB into physical memory.
* We currently assume the kernel is < 3 MiB
@ -159,10 +159,10 @@ asmfn_begin(_start)
/*
* We are mapping the lowest 4 MiB of physical memory both to itself and
* to the relocated region. Thus, the physical address range:
* 0x00000000 - 0x00400000
* 0x00000000 - 0x003fffff
* becomes available at two virtual address ranges:
* 0x00000000 - 0x00400000 (identity mapping)
* 0xc0000000 - 0xc0400000 (relocated mapping)
* 0x00000000 - 0x003fffff (identity mapping)
* 0xc0000000 - 0xc03fffff (relocated mapping)
*
* The identity mapping is necessary because when we turn on paging in
* the next lines, the program counter still refers to physical memory
@ -208,7 +208,7 @@ asmfn_begin(_start)
* low memory (making use of the identity mapping), so we are going to
* do an absolute jump to the mapped kernel code somewhere at 0xc01*****.
*/
lea 4f, %ecx
lea _start_virtual, %ecx
jmp *%ecx
/*
@ -216,17 +216,19 @@ asmfn_begin(_start)
* the actual code flow makes a jump from low (.multiboot.text section)
* to high memory (.text section).
*/
asmfn_end(_start)
ASM_END(_start)
.text
asmfn_begin(_start_virtual)
.extern x86_replace_gdt
ASM_ENTRY(_start_virtual)
/*
* Now that we've completely transitioned to high memory, we can remove
* the identity mapping because we don't need it anymore.
*/
4: movl $0, pd0 + 0 * 4
movl $0, pd0 + 0 * 4
/* bonk the TLB by reloading cr3 to apply the updated page table */
mov %cr3, %ecx
@ -240,12 +242,17 @@ asmfn_begin(_start_virtual)
pushl $0
popf
/* these are set by GRUB, see the comment at the beginning of _start */
/* parameter 2 for _boot() is header address */
push %ebx
/* parameter 1 for _boot() is MB2_BOOTLOADER_MAGIC */
push %eax
/* call _boot() from boot.c */
/*
* but before we call _boot(), we replace the GDT provided by GRUB
* with our own (see arch/mm/segment.S)
*/
call x86_replace_gdt
call _boot
/* this should never(TM) be reached */
@ -253,6 +260,8 @@ asmfn_begin(_start_virtual)
5: hlt
jmp 5b
ASM_END(_start_virtual)
/* TODO: There are probably nicer ways of reserving stack memory. */
.section .bootstrap_stack, "aw", @nobits
stack_bottom:

@ -0,0 +1,111 @@
/* See the end of this file for copyright and license terms. */
#pragma once
#include <gay/config.h>
/** @brief Kernel code segment GDT selector */
#define X86_KERN_CS 0x08
/** @brief Kernel data segment GDT selector */
#define X86_KERN_DS 0x10
/** @brief Userland code segment GDT selector */
#define X86_USER_CS 0x18
/** @brief Userland data segment GDT selector */
#define X86_USER_DS 0x20
#ifndef _ASM_SOURCE
/*
* This code is mostly obsolete (until we need to actually manipulate the GDT
* when there is long mode support and we want to enable 32-bit compatibility
* mode). I wrote this because i initially wanted to use it for an encoder that
* would dynamically initialize the GDT entries at runtime, but fuck that.
* I've lost way more than enough time on the utterly useless GDT, so we'll just
* leave this here as it is until we actually need it. See arch/mm/segment.S
* for the actual GDT entries and how the GDT is loaded.
*/
#include <arch/page.h>
#include <gay/cdefs.h>
#include <gay/types.h>
/** @brief x86 Global Descriptor Table entry as used by the kernel. */
struct x86_gdt_entry {
u32 base;
unsigned limit:20;
unsigned flags:4;
#define X86_GDT_SIZE (1u << 2)
#define X86_GDT_GRANULARITY (1u << 3)
u8 access;
#define X86_GDT_ACCESSED (1u << 0)
#define X86_GDT_RW (1u << 1)
#define X86_GDT_DIRECTION (1u << 2) /* for data selectors (executable not set) */
#define X86_GDT_CONFORMING (1u << 2) /* for code selectors (executable set) */
#define X86_GDT_EXEC (1u << 3)
#define X86_GDT_TYPE (1u << 4) /* 1 for code/data, 0 for system (e.g. TSS) */
#define X86_GDT_PRIVL_SHIFT (5)
#define X86_GDT_PRIVL_MASK (3u << X86_GDT_PRIVL_SHIFT)
#define X86_GDT_PRIVL(n) (((n) << X86_GDT_PRIVL_SHIFT) & X86_GDT_PRIVL_MASK)
#define X86_GDT_PRESENT (1u << 7)
};
/** @brief x86 Global Descriptor Table entry as laid out in hardware. */
struct x86_insane_gdt_entry {
u16 limit0;
u16 base0;
u8 base1;
u8 access;
unsigned limit1:4;
unsigned flags:4;
u8 base2;
} __packed;
/** @brief The main GDT (defined in `arch/mm/segment.S`) */
extern struct x86_insane_gdt_entry x86_gdt[5];
struct x86_tss {
u16 link; u16 _reserved0;
u32 esp0;
u16 ss0; u16 _reserved1;
u32 esp1;
u16 ss1; u16 _reserved2;
u32 esp2;
u16 ss2; u16 _reserved3;
u32 cr3;
u32 eip;
u32 eflags;
u32 eax;
u32 ecx;
u32 edx;
u32 ebx;
u32 esp;
u32 ebp;
u32 esi;
u32 edi;
u16 es; u16 _reserved4;
u16 cs; u16 _reserved5;
u16 ss; u16 _reserved6;
u16 ds; u16 _reserved7;
u16 fs; u16 _reserved8;
u16 gs; u16 _reserved9;
u16 ldtr; u16 _reserved10;
u16 _reserved11; u16 iopb_offset;
};
#endif /* not _ASM_SOURCE */
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/

@ -2,6 +2,7 @@
target_sources(gay_arch PRIVATE
page.c
segment.S
)
# This file is part of GayBSD.

@ -27,16 +27,13 @@
#include <string.h>
/* from linker script */
extern void _image_start_phys;
extern void _image_end_phys;
/**
* @brief Page allocation bitmap.
* 0 = free, 1 = allocated.
*
* The pagemap manipulation code below is specifically kept agnostic to
* the type of the page map (u8/u16/u32 etc) so we can easily change it later
* if it has performance benefits (which it almost certainly has)
*/
static u8 *pagemap;
static size_t pagemap_len;
@ -56,7 +53,7 @@ struct x86_page_directory pd0;
static void setup_pagemap(void);
int kmalloc_init(void *start_phys, void *end_phys)
int mem_init(void *start_phys, void *end_phys)
{
/*
* if the kernel image is loaded within the paging region (which is
@ -111,6 +108,8 @@ int map_page(void *phys, void *virt, enum mm_page_flags flags)
if (page == NULL)
return -ENOMEM;
memset(page, 0, PAGE_SIZE);
*(unsigned long *)pd_entry = 0;
pd_entry->shifted_address = (uintptr_t)page >> X86_PAGE_DIRECTORY_ADDRESS_SHIFT;
pd_entry->rw = 1;
pd_entry->present = 1;
@ -119,21 +118,43 @@ int map_page(void *phys, void *virt, enum mm_page_flags flags)
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[pd_index];
struct x86_page_table_entry *pt_entry = &pt->entries[pt_index];
*(unsigned long *)pt_entry = 0;
pt_entry->rw = (flags & MM_PAGE_RW) != 0;
pt_entry->user = (flags & MM_PAGE_USER) != 0;
pt_entry->write_through = 0;
pt_entry->cache_disabled = 0;
pt_entry->accessed = 0;
pt_entry->dirty = 0;
pt_entry->global = 0;
pt_entry->cache_disabled = (flags & MM_PAGE_NOCACHE) != 0;
pt_entry->shifted_address = (uintptr_t)virt >> X86_PAGE_TABLE_ADDRESS_SHIFT;
pt_entry->present = 1;
vm_flush();
return 0;
}
void *unmap_page(void *virt)
{
# ifdef DEBUG
if (virt != PAGE_ALIGN(virt))
kprintf("map_page(): unaligned virtual address %p!\n", virt);
# endif
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
size_t pd_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) / 1024;
size_t pt_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) % 1024;
struct x86_page_directory_entry *pd_entry = &pd->entries[pd_index];
if (!pd_entry->present)
return NULL;
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[pd_index];
struct x86_page_table_entry *pt_entry = &pt->entries[pt_index];
if (!pt_entry->present)
return NULL;
uintptr_t phys_shifted = pt_entry->shifted_address;
*(unsigned long *)pt_entry = 0;
return (void *)(phys_shifted << X86_PAGE_TABLE_ADDRESS_SHIFT);
}
static inline int find_zero_bit(u8 bitfield)
{
int i;
@ -220,7 +241,7 @@ void vm_flush(void)
__asm__ volatile(
" mov %%cr3, %%eax \n"
" mov %%eax, %%cr3 \n"
::: "eax"
::: "eax", "memory"
);
}
@ -242,11 +263,11 @@ static void setup_pagemap(void)
dynpage_start += PAGE_SIZE;
/*
* As described in multiboot.S, the entry in the page directory points
* As described in multiboot.S, the last entry in the page directory points
* to the page directory itself so we can still manipulate it while we
* are in virtual address space. The second-last entry in the page
* directory is still free, so we put the page table for the bitmap there.
* If you do the math, the page table therefore maps addresses
* If you do the math, that page table therefore maps addresses
* 0xff800000-0xffbfffff, which is where we start off with the bitmap.
*/
pagemap = (u8 *)0xff800000;
@ -257,9 +278,11 @@ static void setup_pagemap(void)
* So this is basically a replacement for a call to map_page().
*/
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
pd->entries[1022].shifted_address = (uintptr_t)pt_phys >> X86_PAGE_DIRECTORY_ADDRESS_SHIFT;
pd->entries[1022].rw = 1;
pd->entries[1022].present = 1;
struct x86_page_directory_entry *pd_entry = &pd->entries[1022];
*(unsigned long *)pd_entry = 0;
pd_entry->shifted_address = (uintptr_t)pt_phys >> X86_PAGE_DIRECTORY_ADDRESS_SHIFT;
pd_entry->rw = 1;
pd_entry->present = 1;
vm_flush();
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[1022];
@ -283,6 +306,7 @@ static void setup_pagemap(void)
/* now add a page table entry for that page */
struct x86_page_table_entry *pt_entry = &pt->entries[pt_index];
*(unsigned long *)pt_entry = 0;
uintptr_t address = (uintptr_t)pagemap_phys + pt_index * PAGE_SIZE;
pt_entry->shifted_address = address >> X86_PAGE_TABLE_ADDRESS_SHIFT;
pt_entry->present = 1;

@ -0,0 +1,65 @@
/* See the end of this file for copyright and license terms. */
#include <arch/segment.h>
#include <asm/common.h>
ASM_ENTRY(x86_replace_gdt)
push %ebp
mov %esp, %ebp
lgdt x86_gdt_desc
ljmp $(X86_KERN_CS), $1f
1: movl $(X86_KERN_DS), %eax
movw %ax, %ds
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
movw %ax, %ss
pop %ebp
ret
ASM_END(x86_replace_gdt)
.data
.align 4
.word 0 /* padding */
x86_gdt_desc:
.word x86_gdt - x86_gdt_end - 1 /* limit */
.long x86_gdt /* base */
/*
* TODO: The GDT entry structure is so fundamentally fucked up that i gave
* up writing an encoder for it half way through, so we just use these
* hardcoded values for now. They were generated using the code on
* <https://wiki.osdev.org/GDT_Tutorial#Some_stuff_to_make_your_life_easy>
* and assign the entire 4 GiB region for both code and data as well as
* kernel and user mode. Even the Intel manual says you're not supposed
* to use segmentation anymore and just rely on paging for memory
* protection instead, so we gladly accept their advice.
*/
.align 8
x86_gdt:
.quad 0x0000000000000000 /* 0x00 null descriptor */
.quad 0x00cf9a000000ffff /* 0x08 kernel code, full 4 GiB */
.quad 0x00cf92000000ffff /* 0x10 kernel data, full 4 GiB */
.quad 0x00cffa000000ffff /* 0x18 user code, full 4 GiB */
.quad 0x00cff2000000ffff /* 0x20 user data, full 4 GiB */
x86_gdt_end:
.size x86_gdt, . - x86_gdt
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/

@ -6,12 +6,12 @@
#error "This header is only intended to be included from assembly files"
#endif
#define asmfn_begin(name) \
#define ASM_ENTRY(name) \
.global name; \
.type name, @function; \
name:
#define asmfn_end(name) \
#define ASM_END(name) \
.size name, . - name
/*

@ -14,6 +14,8 @@
#define __aligned(bytes) __attribute__(( aligned(bytes) ))
#define __packed __attribute__(( packed ))
#define __section(name) __attribute__(( section(#name) ))
#ifndef NULL

@ -7,7 +7,7 @@
#pragma once
#include <gay/cdefs.h>
#include <gay/util.h>
/**
* @brief Simple circular list header.

@ -83,13 +83,27 @@ void put_page(void *page);
* @param phys Physical address of the page
* @param virt Virtual address to map the page to
* @param flags Flags to apply to the page
* @returns 0 on success, or `-ENOMEM?` if OOM (for allocating new page tables)
* @returns 0 on success, or `-ENOMEM` if OOM (for allocating new page tables)
*/
int map_page(void *phys, void *virt, enum mm_page_flags flags);
/**
* @brief Remove a page mapping.
*
* @param virt Virtual address the page is mapped to, must be page aligned
* @returns The physical page address that was being mapped
*/
void *unmap_page(void *virt);
/** @brief Flush the TLB. */
void vm_flush(void);
/**
* @brief Called internally by `kmalloc_init()` to set up the page frame
* allocator and other low level paging related stuff.
*/
int mem_init(void *start, void *end);
/**
* @brief Initialize the memory allocator.
*

@ -9,6 +9,12 @@
/* yeah this is probably the most stupid memory allocator there is */
int kmalloc_init(void *phys_start, void *phys_end)
{
int err = mem_init(phys_start, phys_end);
return err;
}
void *kmalloc(size_t size, enum mm_flags flags)
{
if (flags != MM_KERNEL) {

Loading…
Cancel
Save