|
|
|
@ -5,14 +5,13 @@
|
|
|
|
|
#include <arch/multiboot.h>
|
|
|
|
|
|
|
|
|
|
/* see arch/x86/config/kernel.ld */
|
|
|
|
|
.extern _kernel_end
|
|
|
|
|
.extern _image_end
|
|
|
|
|
.extern _stack_end
|
|
|
|
|
.extern _image_start_phys
|
|
|
|
|
.extern _kernel_end_phys
|
|
|
|
|
.extern _image_end_phys
|
|
|
|
|
|
|
|
|
|
.extern _boot
|
|
|
|
|
|
|
|
|
|
.section .multiboot
|
|
|
|
|
.section .multiboot.data, "aw"
|
|
|
|
|
|
|
|
|
|
.global mb2_load_start
|
|
|
|
|
mb2_load_start:
|
|
|
|
|
|
|
|
|
|
.align MB2_HEADER_ALIGN
|
|
|
|
@ -37,11 +36,11 @@ address_tag_start: /* struct mb2_header_tag_address */
|
|
|
|
|
/* header_addr */
|
|
|
|
|
.long header_start
|
|
|
|
|
/* load_addr */
|
|
|
|
|
.long mb2_load_start
|
|
|
|
|
.long _image_start_phys
|
|
|
|
|
/* load_end_addr */
|
|
|
|
|
.long _kernel_end
|
|
|
|
|
.long _kernel_end_phys
|
|
|
|
|
/* bss_end_addr */
|
|
|
|
|
.long _image_end
|
|
|
|
|
.long _image_end_phys
|
|
|
|
|
address_tag_end:
|
|
|
|
|
|
|
|
|
|
.align MB2_TAG_ALIGN
|
|
|
|
@ -85,11 +84,145 @@ end_tag_start: /* struct mb2_header_tag */
|
|
|
|
|
end_tag_end:
|
|
|
|
|
|
|
|
|
|
header_end:
|
|
|
|
|
.size header_start, . - header_start
|
|
|
|
|
|
|
|
|
|
.text
|
|
|
|
|
/*
|
|
|
|
|
* Actual boot sequence comes here.
|
|
|
|
|
*
|
|
|
|
|
* This is based on the example code from the OSDev.org wiki:
|
|
|
|
|
* <https://wiki.osdev.org/Higher_Half_x86_Bare_Bones>
|
|
|
|
|
*
|
|
|
|
|
* We basically just turn on paging and map the lower 4 MB (including the VGA
|
|
|
|
|
* console framebuffer) to 0xc0000000 where the kernel image itself is linked
|
|
|
|
|
* to, and then jump to that region and set up the stack. Form there, _boot()
|
|
|
|
|
* takes care of everything.
|
|
|
|
|
*
|
|
|
|
|
* We need to pay special attention to not touch eax and ebx during the entire
|
|
|
|
|
* routine, because those store the multiboot2 magic number and tag table
|
|
|
|
|
* pointer respectively which need to be passed on to _boot(). We can't push
|
|
|
|
|
* them on the stack, because the stack doesn't exist yet.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
.extern _boot /* main boot routine -- see ./boot.c */
|
|
|
|
|
|
|
|
|
|
.extern pd0 /* initial page directory -- see ../mm/page.c */
|
|
|
|
|
.extern pt0 /* first page table -- see ../mm/page.c */
|
|
|
|
|
|
|
|
|
|
.section .multiboot.text, "a"
|
|
|
|
|
|
|
|
|
|
#define PAGE_SIZE 4096
|
|
|
|
|
/*
|
|
|
|
|
* referencing symbols from C requires subtracting the relocation offset
|
|
|
|
|
* first because the C code is linked against virtual address space
|
|
|
|
|
*/
|
|
|
|
|
#define phys_addr(c_symbol) (c_symbol - CFG_KERNEL_RELOCATE)
|
|
|
|
|
|
|
|
|
|
asmfn_begin(_start)
|
|
|
|
|
mov $_stack_end, %esp
|
|
|
|
|
/*
|
|
|
|
|
* 1023 of the 1024 pages in the page table are mapped to the low memory
|
|
|
|
|
* starting at 1 MiB, the address where the kernel image is loaded
|
|
|
|
|
* ($_image_start_phys). We currently assume the kernel is < 4 MiB
|
|
|
|
|
* and therefore can be mapped within a single page table.
|
|
|
|
|
* As the kernel gets more and more bloated, this might not be the case
|
|
|
|
|
* in the future anymore, so we should ideally add support for multiple
|
|
|
|
|
* page tables soon.
|
|
|
|
|
*/
|
|
|
|
|
mov $phys_addr(pt0), %edi
|
|
|
|
|
xor %esi, %esi
|
|
|
|
|
|
|
|
|
|
1: cmp $_image_start_phys, %esi
|
|
|
|
|
jl 2f /* skip the pages that are below the kernel image */
|
|
|
|
|
|
|
|
|
|
/* TODO: grub stores the multiboot tags right after the kernel image,
|
|
|
|
|
so we might need to map more than just what we do here */
|
|
|
|
|
cmp $_image_end_phys, %esi
|
|
|
|
|
jge 3f /* exit the loop when we have mapped the entire kernel image */
|
|
|
|
|
|
|
|
|
|
mov %esi, %edx
|
|
|
|
|
or $0x003, %edx /* set present and rw flags, see below */
|
|
|
|
|
mov %edx, (%edi)
|
|
|
|
|
|
|
|
|
|
2: add $PAGE_SIZE, %esi /* advance to next physical page address */
|
|
|
|
|
add $4, %edi /* advance to next pointer in the page table */
|
|
|
|
|
loop 1b
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Conveniently, the VGA character framebuffer fits exactly into one
|
|
|
|
|
* page. The physical address range
|
|
|
|
|
* 0x000b8000 - 0x000b8fff
|
|
|
|
|
* gets mapped to the virtual address range
|
|
|
|
|
* 0xc03ff000 - 0xc03fffff
|
|
|
|
|
* which is the last page of our 4 MiB page table (index 1023).
|
|
|
|
|
* We also set the Present and RW bits by OR'ing with 0x003.
|
|
|
|
|
*/
|
|
|
|
|
3: movl $(0x000b8000 | 0x003), phys_addr(pt0) + 1023 * 4
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We are mapping the lowest 4 MiB of physical memory both to itself and
|
|
|
|
|
* to the relocated region. Thus, the physical address range:
|
|
|
|
|
* 0x00000000 - 0x00400000
|
|
|
|
|
* becomes available at two virtual address ranges:
|
|
|
|
|
* 0x00000000 - 0x00400000 (identity mapping)
|
|
|
|
|
* 0xc0000000 - 0xc0400000 (relocated mapping)
|
|
|
|
|
*
|
|
|
|
|
* The identity mapping is necessary because when we turn on paging in
|
|
|
|
|
* the next lines, the program counter still refers to physical memory
|
|
|
|
|
* and would thus immediately cause a page fault.
|
|
|
|
|
*
|
|
|
|
|
* The address of pt0 is OR'd with 0x003 (we actually have to add here
|
|
|
|
|
* because of dumb compiler stuff but that doesn't matter because it's
|
|
|
|
|
* the same result) to set the Present and RW flags in the page table
|
|
|
|
|
* entry (the lower 12 bits in page table addresses are always zero
|
|
|
|
|
* because they are page aligned so they are used as flags for the MMU,
|
|
|
|
|
* see ../include/arch/page.h).
|
|
|
|
|
*
|
|
|
|
|
* The offset added to pd0 is the page number multiplied with the size
|
|
|
|
|
* of a single entry (a pointer size):
|
|
|
|
|
* (0x00000000 / PAGE_SIZE) / 1024 entries in a page table = 0
|
|
|
|
|
* (0xc0000000 / PAGE_SIZE) / 1024 entries in a page table = 786
|
|
|
|
|
*/
|
|
|
|
|
movl $(phys_addr(pt0) + 0x003), phys_addr(pd0) + 0 * 4 /* 0x00000000 */
|
|
|
|
|
movl $(phys_addr(pt0) + 0x003), phys_addr(pd0) + 768 * 4 /* 0xc0000000 */
|
|
|
|
|
|
|
|
|
|
/* put the (physical) address of pd0 into cr3 so it will be used */
|
|
|
|
|
mov $phys_addr(pd0), %ecx
|
|
|
|
|
mov %ecx, %cr3
|
|
|
|
|
|
|
|
|
|
/* set the paging and write-protect bit in cr0 */
|
|
|
|
|
mov %cr0, %ecx
|
|
|
|
|
or $0x80010000, %ecx
|
|
|
|
|
mov %ecx, %cr0
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Alright, we are on virtual addresses!
|
|
|
|
|
* Now, we are going to do an absolute jump to the mapped kernel code
|
|
|
|
|
* somewhere at 0xc01*****.
|
|
|
|
|
*/
|
|
|
|
|
lea 4f, %ecx
|
|
|
|
|
jmp *%ecx
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* this is just because you can't span functions across multiple sections,
|
|
|
|
|
* the actual code flow makes a jump from low (.multiboot.text section)
|
|
|
|
|
* to high memory (.text section).
|
|
|
|
|
*/
|
|
|
|
|
asmfn_end(_start)
|
|
|
|
|
|
|
|
|
|
.text
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Now that we've completely transitioned to high memory, we can remove
|
|
|
|
|
* the identity mapping because we don't need it anymore.
|
|
|
|
|
*/
|
|
|
|
|
4: movl $0, pd0 + 0 * 4
|
|
|
|
|
|
|
|
|
|
/* bonk the TLB by reloading cr3 to apply the updated page table */
|
|
|
|
|
mov %cr3, %ecx
|
|
|
|
|
mov %ecx, %cr3
|
|
|
|
|
|
|
|
|
|
/* set up the initial stack frame */
|
|
|
|
|
mov $stack_top, %ebp
|
|
|
|
|
mov %ebp, %esp
|
|
|
|
|
|
|
|
|
|
/* reset EFLAGS */
|
|
|
|
|
pushl $0
|
|
|
|
@ -104,10 +237,15 @@ asmfn_begin(_start)
|
|
|
|
|
call _boot
|
|
|
|
|
|
|
|
|
|
/* this should never(TM) be reached */
|
|
|
|
|
halt_loop:
|
|
|
|
|
hlt
|
|
|
|
|
jmp halt_loop
|
|
|
|
|
asmfn_end(_start)
|
|
|
|
|
cli
|
|
|
|
|
5: hlt
|
|
|
|
|
jmp 5b
|
|
|
|
|
|
|
|
|
|
/* TODO: There are probably nicer ways of reserving stack memory. */
|
|
|
|
|
.section .bootstrap_stack, "aw", @nobits
|
|
|
|
|
stack_bottom:
|
|
|
|
|
.skip 16384 /* 16 KiB for the stack should be plenty for now */
|
|
|
|
|
stack_top:
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This file is part of GayBSD.
|
|
|
|
|