boot: enable paging in boot sequence

main
anna 3 years ago
parent d436d9b203
commit 17320f2571
Signed by: fef
GPG Key ID: EC22E476DC2D3D84

@ -4,6 +4,7 @@
#include <arch/multiboot.h>
#include <gay/config.h>
#include <gay/kprintf.h>
#include <gay/mm.h>
#include <gay/types.h>
@ -28,7 +29,7 @@ enum vga_color {
VGA_COLOR_WHITE = 15,
};
#define FB_ADDRESS 0xb8000
#define FB_ADDRESS 0xc03ff000 /* mapped from 0x000b8000 */
#define FB_LINES 24
#define FB_COLS 80
@ -64,12 +65,15 @@ static void fb_init(enum vga_color fg, enum vga_color bg);
static void print_gay_propaganda(void);
/** @brief Translate a physical memory address to a virtual (mapped) one. */
#define phys_to_virt(ptr) ( (typeof(ptr))( (void *)(ptr) + CFG_KERNEL_RELOCATE ) )
static struct mb2_tag *next_tag(struct mb2_tag *tag);
static void handle_tag(struct mb2_tag *tag);
static void handle_mmap_tag(struct mb2_tag_mmap *tag);
static const char *mmap_type_name(u32 type);
extern int main(int argc, char *argv[]);
extern void _start(void);
void _boot(u32 magic, void *address)
{
@ -81,8 +85,16 @@ void _boot(u32 magic, void *address)
return;
}
//kprintf("%p\n", address);
print_gay_propaganda();
/*
* all data structures passed from grub are physical addresses,
* so we need to be careful to translate all pointers to virtual
* addresses before accessing them.
*/
address = phys_to_virt(address);
for (struct mb2_tag *tag = address + 8; tag != NULL; tag = next_tag(tag))
handle_tag(tag);
@ -110,9 +122,9 @@ static inline void handle_mmap_tag(struct mb2_tag_mmap *tag)
void *region = NULL;
size_t region_len = 0;
struct mb2_mmap_entry *entry = tag->entries;
struct mb2_mmap_entry *entry = &tag->entries[0];
while ((void *)entry < (void *)tag + tag->tag.size) {
kprintf("start = %p, len = %p, type = %s\n",
kprintf("[%p-%p] %s\n",
(void *)entry->addr,
(void *)entry->len,
mmap_type_name(entry->type));
@ -130,10 +142,10 @@ static inline void handle_mmap_tag(struct mb2_tag_mmap *tag)
while (1);
}
if (kmalloc_init(region, region + region_len) != 0) {
kprintf("kmalloc_init() failed! Aborting.\n");
while (1);
}
// if (kmalloc_init(region, region + region_len) != 0) {
// kprintf("kmalloc_init() failed! Aborting.\n");
// while (1);
// }
}
static inline struct mb2_tag *next_tag(struct mb2_tag *tag)

@ -5,14 +5,13 @@
#include <arch/multiboot.h>
/* see arch/x86/config/kernel.ld */
.extern _kernel_end
.extern _image_end
.extern _stack_end
.extern _image_start_phys
.extern _kernel_end_phys
.extern _image_end_phys
.extern _boot
.section .multiboot
.section .multiboot.data, "aw"
.global mb2_load_start
mb2_load_start:
.align MB2_HEADER_ALIGN
@ -37,11 +36,11 @@ address_tag_start: /* struct mb2_header_tag_address */
/* header_addr */
.long header_start
/* load_addr */
.long mb2_load_start
.long _image_start_phys
/* load_end_addr */
.long _kernel_end
.long _kernel_end_phys
/* bss_end_addr */
.long _image_end
.long _image_end_phys
address_tag_end:
.align MB2_TAG_ALIGN
@ -85,11 +84,145 @@ end_tag_start: /* struct mb2_header_tag */
end_tag_end:
header_end:
.size header_start, . - header_start
.text
/*
* Actual boot sequence comes here.
*
* This is based on the example code from the OSDev.org wiki:
* <https://wiki.osdev.org/Higher_Half_x86_Bare_Bones>
*
* We basically just turn on paging and map the lower 4 MB (including the VGA
* console framebuffer) to 0xc0000000 where the kernel image itself is linked
* to, and then jump to that region and set up the stack. Form there, _boot()
* takes care of everything.
*
* We need to pay special attention to not touch eax and ebx during the entire
* routine, because those store the multiboot2 magic number and tag table
* pointer respectively which need to be passed on to _boot(). We can't push
* them on the stack, because the stack doesn't exist yet.
*/
.extern _boot /* main boot routine -- see ./boot.c */
.extern pd0 /* initial page directory -- see ../mm/page.c */
.extern pt0 /* first page table -- see ../mm/page.c */
.section .multiboot.text, "a"
#define PAGE_SIZE 4096
/*
* referencing symbols from C requires subtracting the relocation offset
* first because the C code is linked against virtual address space
*/
#define phys_addr(c_symbol) (c_symbol - CFG_KERNEL_RELOCATE)
asmfn_begin(_start)
mov $_stack_end, %esp
/*
* 1023 of the 1024 pages in the page table are mapped to the low memory
* starting at 1 MiB, the address where the kernel image is loaded
* ($_image_start_phys). We currently assume the kernel is < 4 MiB
* and therefore can be mapped within a single page table.
* As the kernel gets more and more bloated, this might not be the case
* in the future anymore, so we should ideally add support for multiple
* page tables soon.
*/
mov $phys_addr(pt0), %edi
xor %esi, %esi
1: cmp $_image_start_phys, %esi
jl 2f /* skip the pages that are below the kernel image */
/* TODO: grub stores the multiboot tags right after the kernel image,
so we might need to map more than just what we do here */
cmp $_image_end_phys, %esi
jge 3f /* exit the loop when we have mapped the entire kernel image */
mov %esi, %edx
or $0x003, %edx /* set present and rw flags, see below */
mov %edx, (%edi)
2: add $PAGE_SIZE, %esi /* advance to next physical page address */
add $4, %edi /* advance to next pointer in the page table */
loop 1b
/*
* Conveniently, the VGA character framebuffer fits exactly into one
* page. The physical address range
* 0x000b8000 - 0x000b8fff
* gets mapped to the virtual address range
* 0xc03ff000 - 0xc03fffff
* which is the last page of our 4 MiB page table (index 1023).
* We also set the Present and RW bits by OR'ing with 0x003.
*/
3: movl $(0x000b8000 | 0x003), phys_addr(pt0) + 1023 * 4
/*
* We are mapping the lowest 4 MiB of physical memory both to itself and
* to the relocated region. Thus, the physical address range:
* 0x00000000 - 0x00400000
* becomes available at two virtual address ranges:
* 0x00000000 - 0x00400000 (identity mapping)
* 0xc0000000 - 0xc0400000 (relocated mapping)
*
* The identity mapping is necessary because when we turn on paging in
* the next lines, the program counter still refers to physical memory
* and would thus immediately cause a page fault.
*
* The address of pt0 is OR'd with 0x003 (we actually have to add here
* because of dumb compiler stuff but that doesn't matter because it's
* the same result) to set the Present and RW flags in the page table
* entry (the lower 12 bits in page table addresses are always zero
* because they are page aligned so they are used as flags for the MMU,
* see ../include/arch/page.h).
*
* The offset added to pd0 is the page number multiplied with the size
* of a single entry (a pointer size):
* (0x00000000 / PAGE_SIZE) / 1024 entries in a page table = 0
* (0xc0000000 / PAGE_SIZE) / 1024 entries in a page table = 786
*/
movl $(phys_addr(pt0) + 0x003), phys_addr(pd0) + 0 * 4 /* 0x00000000 */
movl $(phys_addr(pt0) + 0x003), phys_addr(pd0) + 768 * 4 /* 0xc0000000 */
/* put the (physical) address of pd0 into cr3 so it will be used */
mov $phys_addr(pd0), %ecx
mov %ecx, %cr3
/* set the paging and write-protect bit in cr0 */
mov %cr0, %ecx
or $0x80010000, %ecx
mov %ecx, %cr0
/*
* Alright, we are on virtual addresses!
* Now, we are going to do an absolute jump to the mapped kernel code
* somewhere at 0xc01*****.
*/
lea 4f, %ecx
jmp *%ecx
/*
* this is just because you can't span functions across multiple sections,
* the actual code flow makes a jump from low (.multiboot.text section)
* to high memory (.text section).
*/
asmfn_end(_start)
.text
/*
* Now that we've completely transitioned to high memory, we can remove
* the identity mapping because we don't need it anymore.
*/
4: movl $0, pd0 + 0 * 4
/* bonk the TLB by reloading cr3 to apply the updated page table */
mov %cr3, %ecx
mov %ecx, %cr3
/* set up the initial stack frame */
mov $stack_top, %ebp
mov %ebp, %esp
/* reset EFLAGS */
pushl $0
@ -104,10 +237,15 @@ asmfn_begin(_start)
call _boot
/* this should never(TM) be reached */
halt_loop:
hlt
jmp halt_loop
asmfn_end(_start)
cli
5: hlt
jmp 5b
/* TODO: There are probably nicer ways of reserving stack memory. */
.section .bootstrap_stack, "aw", @nobits
stack_bottom:
.skip 16384 /* 16 KiB for the stack should be plenty for now */
stack_top:
/*
* This file is part of GayBSD.

@ -3,59 +3,67 @@
OUTPUT_FORMAT("elf32-i386")
OUTPUT_ARCH(i386)
KERNEL_ORIGIN = DEFINED(KERNEL_ORIGIN) ? KERNEL_ORIGIN : 0x00100000;
STACK_SIZE = 0x1000;
/* not strictly needed because we produce a binary image but can't hurt */
ENTRY(_start)
SECTIONS {
. = KERNEL_ORIGIN;
_image_start = .;
_kernel_start = .;
_image_start = . + KERNEL_RELOCATE;
_image_start_phys = .;
_kernel_start = . + KERNEL_RELOCATE;
_kernel_start_phys = .;
.multiboot.data : {
. = ALIGN(8);
KEEP(*(.multiboot.data))
}
.multiboot.text : {
. = ALIGN(8);
KEEP(*(.multiboot.text))
}
.text BLOCK(4K) : ALIGN(4K) {
. += KERNEL_RELOCATE;
/*
* All sections from here on are page aligned so we can
* set different access permissions for each of them
*/
.text ALIGN(4K) : AT(ADDR(.text) - KERNEL_RELOCATE) {
_text_start = .;
/*
* This is where the multiboot header is stored which grub2
* or any other compliant bootloader picks up, don't move away
*/
KEEP(*(.multiboot))
*(.text .text.* .gnu.linkonce.t.*)
_text_end = .;
}
.rodata BLOCK(4K) : ALIGN(4K) {
.rodata ALIGN(4K) : AT(ADDR(.rodata) - KERNEL_RELOCATE) {
_rodata_start = .;
*(.rodata .rodata.* .gnu.linkonce.r.*)
_rodata_end = .;
}
.data BLOCK(4K) : ALIGN(4K) {
.data ALIGN(4K) : AT(ADDR(.data) - KERNEL_RELOCATE) {
_data_start = .;
*(.data .data.*)
_data_end = .;
}
_kernel_end = .;
_kernel_end_phys = . - KERNEL_RELOCATE;
.bss BLOCK(4K) : ALIGN(4K) {
_bss_start = . ;
.bss ALIGN(4K) : AT(ADDR(.bss) - KERNEL_RELOCATE) {
_bss_start = .;
*(COMMON)
*(.bss .bss.*)
_bss_end = . ;
}
.stack(NOLOAD) : {
. = ALIGN(8);
_stack_start = .;
. = . + STACK_SIZE;
. = ALIGN(8);
*(.bss)
_bss_end = .;
_start_start = .;
*(.bootstrap_stack)
_stack_end = .;
}
_image_end = .;
_image_end_phys = . - KERNEL_RELOCATE;
}
/*

@ -1,6 +1,7 @@
/* See the end of this file for copyright and license terms. */
KERNEL_ORIGIN = @KERNEL_ORIGIN@;
KERNEL_RELOCATE = @KERNEL_RELOCATE@;
/*
* This file is part of GayBSD.

@ -10,7 +10,9 @@
#include <arch/page.h>
#include <gay/cdefs.h>
#include <gay/config.h>
#include <gay/errno.h>
#include <gay/kprintf.h>
#include <gay/mm.h>
#include <gay/types.h>
@ -18,8 +20,8 @@
#include <string.h>
extern void _image_start;
extern void _image_end;
extern void _image_start_phys;
extern void _image_end_phys;
/* 0 = free, 1 = allocated */
static u8 *pagemap;
@ -27,6 +29,15 @@ static size_t pagemap_len;
static void *page_start;
static void *page_end;
/**
* @brief First page table for low memory (0 - 4 M).
* This is initialized by the early boot routine in assembly so that paging
* can be enabled (the kernel itself is mapped to `0xc0000000` by default).
*/
struct x86_page_table pt0;
/** @brief First page directory for low memory. */
struct x86_page_directory pd0;
int kmalloc_init(void *start, void *end)
{
/*
@ -35,8 +46,8 @@ int kmalloc_init(void *start, void *end)
* to the end of the kernel image so we won't hand out pages that
* actually store kernel data
*/
if (&_image_start >= start && &_image_start <= end)
start = &_image_end;
if (&_image_start_phys >= start && &_image_start_phys <= end)
start = &_image_end_phys;
page_start = ptr_align(start, PAGE_SIZE_LOG2);
page_end = ptr_align(end, -PAGE_SIZE_LOG2);
@ -53,7 +64,7 @@ int kmalloc_init(void *start, void *end)
pagemap_len--;
}
kprintf("Kernel image: %p - %p\n", &_image_start, &_image_end);
kprintf("Kernel image: %p - %p\n", &_image_start_phys, &_image_end_phys);
kprintf("Page bitmap: %p - %p\n", pagemap, pagemap + pagemap_len);
kprintf("Paging area: %p - %p\n", page_start, page_end);
kprintf("Available memory: %u bytes (%u pages)\n",
@ -62,6 +73,21 @@ int kmalloc_init(void *start, void *end)
return 0;
}
// int mem_init(void)
// {
// struct x86_page_directory *map = get_page();
// if (map == NULL)
// return -ENOMEM;
// memset(map, 0, sizeof(*map));
// }
void map_page(vm_info_t *map, void *physical, void *virtual, enum mm_flags flags)
{
}
static inline int find_zero_bit(u8 bitfield)
{
int i;

@ -10,6 +10,8 @@ include("${CMAKE_CURRENT_LIST_DIR}/config-${ARCH}.cmake")
set(KERNEL_ORIGIN "0x100000" CACHE STRING "Physical address where the kernel is loaded")
set(KERNEL_RELOCATE "0xc0000000" CACHE STRING "Virtual address the kernel is mapped to (don't touch this)")
set(POISON_PAGES "Poison pages after allocate and free" ON)
# This file is part of GayBSD.

@ -8,7 +8,7 @@
#define asmfn_begin(name) \
.global name; \
.type name, function; \
.type name, @function; \
name:
#define asmfn_end(name) \

@ -25,8 +25,8 @@
/** @brief Physical address where the kernel is loaded */
#define CFG_KERNEL_ORIGIN @KERNEL_ORIGIN@
/** @brief Initial kernel stack size in bytes */
#define CFG_STACK_SIZE @STACK_SIZE@
/** @brief Virtual address the kernel is mapped to */
#define CFG_KERNEL_RELOCATE @KERNEL_RELOCATE@
/** @brief Poison dynamic pages when allocating and freeing them */
#define CFG_POISON_PAGES @POISON_PAGES@

Loading…
Cancel
Save