kern/arch/x86/boot/multiboot.S

284 lines
8.5 KiB
ArmAsm
Raw Normal View History

/* See the end of this file for copyright and license terms. */
#include <asm/common.h>
#include <gay/config.h>
2021-09-20 17:59:15 +02:00
#include <arch/multiboot.h>
/* see arch/x86/config/kernel.ld */
2021-09-22 03:50:55 +02:00
.extern _image_start_phys
.extern _kernel_end_phys
.extern _image_end_phys
2021-09-22 03:50:55 +02:00
.section .multiboot.data, "aw"
2021-09-22 03:50:55 +02:00
.global mb2_load_start
mb2_load_start:
2021-09-18 16:58:17 +02:00
.align MB2_HEADER_ALIGN
header_start: /* struct mb2_header */
/* magic */
.long MB2_HEADER_MAGIC
/* architecture */
.long MB2_ARCHITECTURE_I386
/* heaer_length */
.long header_end - header_start
/* checksum */
.long (1 << 33) - MB2_HEADER_MAGIC - MB2_ARCHITECTURE_I386 - (header_end - header_start)
.align MB2_TAG_ALIGN
address_tag_start: /* struct mb2_header_tag_address */
/* type */
.short MB2_HEADER_TAG_ADDRESS
/* flags */
.short MB2_HEADER_TAG_OPTIONAL
/* size */
.long address_tag_end - address_tag_start
/* header_addr */
.long header_start
/* load_addr */
2021-09-22 03:50:55 +02:00
.long _image_start_phys
/* load_end_addr */
2021-09-22 03:50:55 +02:00
.long _kernel_end_phys
/* bss_end_addr */
2021-09-22 03:50:55 +02:00
.long _image_end_phys
address_tag_end:
.align MB2_TAG_ALIGN
entry_address_tag_start: /* struct mb2_header_tag_entry_address */
/* type */
.short MB2_HEADER_TAG_ENTRY_ADDRESS
/* flags */
.short MB2_HEADER_TAG_OPTIONAL
/* size */
.long entry_address_tag_end - entry_address_tag_start
/* entry_addr */
.long _start
entry_address_tag_end:
#if 0 /* TODO: implement graphics */
.align MB2_TAG_ALIGN
framebuffer_tag_start: /* struct mb2_header_tag_framebuffer */
/* type */
.short MB2_HEADER_TAG_FRAMEBUFFER
/* flags */
.short MB2_HEADER_TAG_OPTIONAL
/* size */
.long framebuffer_tag_end - framebuffer_tag_start
/* width */
.long 1024
/* height */
.long 768
/* depth */
.long 32
framebuffer_tag_end:
#endif /* framebuffer disabled */
.align MB2_TAG_ALIGN
end_tag_start: /* struct mb2_header_tag */
/* type */
.short MB2_HEADER_TAG_END
/* flags */
.short 0
/* size */
.long end_tag_end - end_tag_start
end_tag_end:
header_end:
2021-09-22 03:50:55 +02:00
.size header_start, . - header_start
2021-09-22 03:50:55 +02:00
/*
* Actual boot sequence comes here.
*
* This is based on the example code from the OSDev.org wiki:
* <https://wiki.osdev.org/Higher_Half_x86_Bare_Bones>
*
* We basically just turn on paging and map the lower 4 MB (including the VGA
* console framebuffer) to 0xc0000000 where the kernel image itself is linked
* to, and then jump to that region and set up the stack. Form there, _boot()
* takes care of everything.
*
* We need to pay special attention to not touch eax and ebx during the entire
* routine, because those store the multiboot2 magic number and tag table
* pointer respectively which need to be passed on to _boot(). We can't push
* them on the stack, because the stack doesn't exist yet.
*/
.extern _boot /* main boot routine -- see ./boot.c */
.extern pd0 /* initial page directory -- see ../mm/page.c */
.extern pt0 /* first page table -- see ../mm/page.c */
.section .multiboot.text, "a"
#define PAGE_SIZE 4096
/*
* referencing symbols from C requires subtracting the relocation offset
* first because the C code is linked against virtual address space
*/
#define phys_addr(c_symbol) (c_symbol - CFG_KERNEL_RELOCATE)
2021-09-28 00:48:19 +02:00
ASM_ENTRY(_start)
2021-09-22 03:50:55 +02:00
/*
2021-09-23 20:51:12 +02:00
* The kernel image starts at 1 MiB into physical memory.
* We currently assume the kernel is < 3 MiB
2021-09-22 03:50:55 +02:00
* and therefore can be mapped within a single page table.
* As the kernel gets more and more bloated, this might not be the case
* in the future anymore, so we should ideally add support for multiple
* page tables soon.
*/
mov $phys_addr(pt0), %edi
xor %esi, %esi
1: cmp $_image_start_phys, %esi
jl 2f /* skip the pages that are below the kernel image */
/* TODO: grub stores the multiboot tags right after the kernel image,
so we might need to map more than just what we do here */
cmp $_image_end_phys, %esi
jge 3f /* exit the loop when we have mapped the entire kernel image */
mov %esi, %edx
or $0x003, %edx /* set present and rw flags, see below */
mov %edx, (%edi)
2: add $PAGE_SIZE, %esi /* advance to next physical page address */
add $4, %edi /* advance to next pointer in the page table */
loop 1b
/*
2021-09-23 20:51:12 +02:00
* Conveniently, the full VGA character framebuffer fits into one page
* and even starts at a page aligned address. The physical range
2021-09-22 03:50:55 +02:00
* 0x000b8000 - 0x000b8fff
* gets mapped to the virtual address range
* 0xc03ff000 - 0xc03fffff
* which is the last page of our 4 MiB page table (index 1023).
* We also set the Present and RW bits by OR'ing with 0x003.
*/
3: movl $(0x000b8000 | 0x003), phys_addr(pt0) + 1023 * 4
/*
* We are mapping the lowest 4 MiB of physical memory both to itself and
* to the relocated region. Thus, the physical address range:
2021-09-28 00:48:19 +02:00
* 0x00000000 - 0x003fffff
2021-09-22 03:50:55 +02:00
* becomes available at two virtual address ranges:
2021-09-28 00:48:19 +02:00
* 0x00000000 - 0x003fffff (identity mapping)
* 0xc0000000 - 0xc03fffff (relocated mapping)
2021-09-22 03:50:55 +02:00
*
* The identity mapping is necessary because when we turn on paging in
* the next lines, the program counter still refers to physical memory
* and would thus immediately cause a page fault.
*
* The address of pt0 is OR'd with 0x003 (we actually have to add here
* because of dumb compiler stuff but that doesn't matter because it's
* the same result) to set the Present and RW flags in the page table
* entry (the lower 12 bits in page table addresses are always zero
* because they are page aligned so they are used as flags for the MMU,
* see ../include/arch/page.h).
*
* The offset added to pd0 is the page number multiplied with the size
* of a single entry (a pointer size):
* (0x00000000 / PAGE_SIZE) / 1024 entries in a page table = 0
* (0xc0000000 / PAGE_SIZE) / 1024 entries in a page table = 786
*/
movl $(phys_addr(pt0) + 0x003), phys_addr(pd0) + 0 * 4 /* 0x00000000 */
movl $(phys_addr(pt0) + 0x003), phys_addr(pd0) + 768 * 4 /* 0xc0000000 */
2021-09-23 20:51:12 +02:00
/*
* The last entry in the page directory points to itself.
* This has the effect of mapping all page tables in the page directory to
* 0xffc00000 - 0xffffefff
* and the page directory itself to
* 0xfffff000 - 0xffffffff
* because the page directory is being interpreted as a page table.
* This allows us to manipulate the table while we are in virtual memory.
*/
movl $(phys_addr(pd0) + 0x003), phys_addr(pd0) + 1023 * 4 /* 0xffc00000 */
2021-09-22 03:50:55 +02:00
/* put the (physical) address of pd0 into cr3 so it will be used */
mov $phys_addr(pd0), %ecx
mov %ecx, %cr3
/* set the paging and write-protect bit in cr0 */
mov %cr0, %ecx
or $0x80010000, %ecx
mov %ecx, %cr0
/*
2021-09-23 20:51:12 +02:00
* Alright, we are in virtual address space! But %eip still points to
* low memory (making use of the identity mapping), so we are going to
* do an absolute jump to the mapped kernel code somewhere at 0xc01*****.
2021-09-22 03:50:55 +02:00
*/
2021-09-28 00:48:19 +02:00
lea _start_virtual, %ecx
2021-09-22 03:50:55 +02:00
jmp *%ecx
/*
* this is just because you can't span functions across multiple sections,
* the actual code flow makes a jump from low (.multiboot.text section)
* to high memory (.text section).
*/
2021-09-28 00:48:19 +02:00
ASM_END(_start)
2021-09-22 03:50:55 +02:00
.text
2021-09-28 00:48:19 +02:00
.extern x86_replace_gdt
ASM_ENTRY(_start_virtual)
2021-09-23 20:51:12 +02:00
2021-09-22 03:50:55 +02:00
/*
* Now that we've completely transitioned to high memory, we can remove
* the identity mapping because we don't need it anymore.
*/
2021-09-28 00:48:19 +02:00
movl $0, pd0 + 0 * 4
2021-09-22 03:50:55 +02:00
/* bonk the TLB by reloading cr3 to apply the updated page table */
mov %cr3, %ecx
mov %ecx, %cr3
/* set up the initial stack frame */
mov $stack_top, %ebp
mov %ebp, %esp
/* reset EFLAGS */
pushl $0
popf
2021-09-28 00:48:19 +02:00
/* these are set by GRUB, see the comment at the beginning of _start */
/* parameter 2 for _boot() is header address */
push %ebx
/* parameter 1 for _boot() is MB2_BOOTLOADER_MAGIC */
push %eax
2021-09-28 00:48:19 +02:00
/*
* but before we call _boot(), we replace the GDT provided by GRUB
* with our own (see arch/mm/segment.S)
*/
call x86_replace_gdt
call _boot
/* this should never(TM) be reached */
2021-09-22 03:50:55 +02:00
cli
5: hlt
jmp 5b
2021-09-28 00:48:19 +02:00
ASM_END(_start_virtual)
2021-09-22 03:50:55 +02:00
/* TODO: There are probably nicer ways of reserving stack memory. */
.section .bootstrap_stack, "aw", @nobits
stack_bottom:
.skip 16384 /* 16 KiB for the stack should be plenty for now */
stack_top:
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/