You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

222 lines
7.3 KiB
ArmAsm

/* See the end of this file for copyright and license terms. */
#include <asm/common.h>
#include <arch/page.h>
#include <arch/sched.h>
#include <arch/vmparam.h>
#include <gay/config.h>
/*
* Early boot sequence on i386.
*
* This is based on the example code from the OSDev.org wiki:
* <https://wiki.osdev.org/Higher_Half_x86_Bare_Bones>
*
* We basically just turn on paging and map the lower 4 MB (including the VGA
* console framebuffer) to 0xf0000000 where the kernel image itself is linked
* to, and then jump to that region and set up the stack. Form there, _boot()
* takes care of everything.
*
* We need to pay special attention to not touch eax and ebx during the entire
* routine, because those store the multiboot2 magic number and tag table
* pointer respectively which need to be passed on to _boot(). We can't push
* them to the stack, because the stack doesn't exist yet.
*/
.extern _image_start_phys
.extern _image_end_phys
.extern _boot /* main boot routine -- see ./boot.c */
.extern pd0 /* initial page directory -- see ../mm/page.c */
.extern pt0 /* first page table -- see ../mm/page.c */
.section .multiboot.text, "ax", @progbits
/*
* referencing symbols from C requires subtracting the relocation offset
* first because the C code is linked against virtual address space
*/
#define phys_addr(c_symbol) (c_symbol - KERN_OFFSET)
ASM_ENTRY(_setup)
/*
* The kernel image starts at 1 MiB into physical memory.
* We currently assume the kernel is < 3 MiB
* and therefore can be mapped within a single page table.
* As the kernel gets more and more bloated, this might not be the case
* in the future anymore, so we should ideally add support for multiple
* page tables soon.
*/
mov $phys_addr(pt0), %edi
xor %esi, %esi
/*
* GRUB stores the multiboot tags right after the kernel image (afaik).
* The previous streategy was to stop the loop after having reached the
* end of the kernel image (including bss), and keep our fingers crossed
* that the multiboot tags all fit into the space between the end of the
* kernel image and the end of that last page so it's still mapped.
* Now, we just continue mapping until we have reached the last slot in
* the page table and exit the loop only then (the last slot is for the
* BIOS character framebuffer, see below).
*/
mov $1023, %ecx
1: cmp $_image_start_phys, %esi
jl 2f /* don't map pages below the start of the kernel image */
mov %esi, %edx
or $0x003, %edx /* set present and rw flags, see below */
mov %edx, (%edi)
2: add $PAGE_SIZE, %esi /* advance to next physical page address */
add $4, %edi /* advance to next pointer in the page table */
loop 1b
/*
* Conveniently, the full VGA character framebuffer fits into one page
* and even starts at a page aligned address. The physical range
* 0x000b8000 - 0x000b8fff
* gets mapped to the virtual address range
* 0xf03ff000 - 0xf03fffff
* which is the last page of our 4 MiB page table (index 1023).
* We also set the Present and RW bits by OR'ing with 0x003.
*/
movl $(0x000b8000 | 0x003), (%edi)
/*
* We are mapping the lowest 4 MiB of physical memory both to itself and
* to the relocated region. Thus, the physical address range:
* 0x00000000 - 0x003fffff
* becomes available at two virtual address ranges:
* 0x00000000 - 0x003fffff (identity mapping)
* 0xf0000000 - 0xf03fffff (highmem mapping)
*
* The identity mapping is necessary because when we turn on paging in
* the next lines, the program counter still refers to physical memory
* and would thus immediately cause a page fault.
*
* The address of pt0 is OR'd with 0x003 (we actually have to add here
* because of dumb compiler stuff but that doesn't matter because the
* result is the same) to set the Present and RW flags in the page table
* entry (the lower 12 bits in page table addresses are always zero
* because they are page aligned so they are used as flags for the MMU,
* see ../include/arch/page.h).
*
* The offset added to pd0 is the page table number multiplied with the
* size of a page directory entry (4 bytes). The page table number can
* be calculated by dividing the number of the first page it maps
* through the total number of entries in the page directory.
*/
movl $(phys_addr(pt0) + 0x003), phys_addr(pd0) + (( 0x00000000 / PAGE_SIZE) / 1024) * 4
movl $(phys_addr(pt0) + 0x003), phys_addr(pd0) + ((KERN_OFFSET / PAGE_SIZE) / 1024) * 4
/*
* The last entry in the page directory points to itself.
* This has the effect of mapping all page tables in the page directory to
* 0xffc00000 - 0xffffefff
* and the page directory itself to
* 0xfffff000 - 0xffffffff
* because the page directory is being interpreted as a page table.
* This allows us to manipulate the table while we are in virtual memory.
*/
movl $(phys_addr(pd0) + 0x003), phys_addr(pd0) + 1023 * 4 /* 0xffc00000 */
/* set the Page Size Extensions (4) and Page Global Enable (7) bits in cr4 */
mov %cr4, %ecx
or $0x00000090, %ecx
mov %ecx, %cr4
/* put the (physical) address of pd0 into cr3 so it will be used */
mov $phys_addr(pd0), %ecx
mov %ecx, %cr3
/* set the Paging (31) and Write Protect (16) bits in cr0 */
mov %cr0, %ecx
or $0x80010000, %ecx
mov %ecx, %cr0
/*
* Alright, we are in virtual address space! But %eip still points to
* low memory (making use of the identity mapping), so we are going to
* do an absolute jump to the mapped kernel code somewhere at 0xf01*****.
*/
lea _setup_highmem, %ecx
jmp *%ecx
/*
* this is just because you can't span functions across multiple sections,
* the actual code flow makes a jump from low (.multiboot.text section)
* to high memory (.text section).
*/
ASM_END(_setup)
.text
.extern x86_replace_gdt
.extern x86_load_idt
ASM_ENTRY(_setup_highmem)
/*
* Now that we've completely transitioned to high memory, we can remove
* the identity mapping because we don't need it anymore.
*/
movl $0, pd0 + 0 * 4
/* bonk the TLB by reloading cr3 to apply the updated page table */
mov %cr3, %ecx
mov %ecx, %cr3
/* set up the initial stack frame */
mov $stack_top, %ebp
mov %ebp, %esp
pushl $0 /* cpu number, see smp_cpuid() in arch/smp.h */
/* reset EFLAGS */
pushl $0
popf
/* these are set by GRUB, see the comment at the beginning of _start */
/* parameter 2 for _boot() is header address */
push %ebx
/* parameter 1 for _boot() is MB2_BOOTLOADER_MAGIC */
push %eax
/*
* but before we call _boot(), we replace the GDT provided by GRUB
* with our own (arch/mm/segment.S) and load the IDT (arch/sys/idt.S).
*/
call x86_replace_gdt
call _boot
/* this should never(TM) be reached */
cli
3: hlt
jmp 3b
ASM_END(_setup_highmem)
.section .bootstrap_stack, "aw", @nobits
.align KERN_STACK_SIZE
stack_bottom:
.skip KERN_STACK_SIZE
stack_top:
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/