You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

201 lines
5.0 KiB
ArmAsm

/* Copyright (C) 2021,2022 fef <owo@fef.moe>. All rights reserved. */
#include <asm/common.h>
#include <arch/page.h>
#include <arch/sched.h>
#include <arch/segment.h>
#include <arch/vmparam.h>
#include <gay/config.h>
/*
* Early boot sequence on i386.
*
* This is based on the example code from the OSDev.org wiki:
* <https://wiki.osdev.org/Higher_Half_x86_Bare_Bones>
*
* We basically just turn on paging and map the lower 4 MB (including the VGA
* console framebuffer) to 0xf0000000 where the kernel image itself is linked
* to, and then jump to that region and set up the stack. Form there, _boot()
* takes care of everything.
*
* We need to pay special attention to not touch eax and ebx during the entire
* routine, because those store the multiboot2 magic number and tag table
* pointer respectively which need to be passed on to _boot(). We can't push
* them to the stack, because the stack doesn't exist yet.
*/
.extern _image_start_phys
.extern _image_end_phys
.extern _boot /* main boot routine -- see ./boot.c */
.extern pd0 /* initial page directory -- see ../mm/i386/page.c */
.extern pt0 /* first page table -- see ../mm/i386/page.c */
.extern _x86_gdt_desc /* GDT Descriptor -- see ../mm/segment.S */
.extern _x86_write_tss_base
.extern _x86_check_multiboot
.extern _x86_check_cpuid
.section .multiboot.text, "ax", @progbits
/*
* referencing symbols from C requires subtracting the relocation offset
* first because the C code is linked against virtual address space
*/
#define PADDR(c_symbol) (c_symbol - KERNBASE)
ENTRY(_setup)
/*
* set up initial stack frame
*/
movl $PADDR(stack_top), %esp
/* "previous" %eip for ktrace */
pushl $0
/* "previous" %ebp for ktrace */
pushl $0
movl %esp, %ebp
pushl %ebx /* save multiboot tag address */
/* check multiboot magic number */
pushl %eax
call _x86_check_multiboot
addl $4, %esp
/*
* check if the CPUID instruction is supported
* (prints an error and halts if not)
*/
call _x86_check_cpuid
/*
* load the base values for kernel and user TSS into the corresponding GDT entries
*/
pushl $PADDR(_x86_kern_tss)
pushl $PADDR(_x86_gdt + X86_KERN_TSS)
call _x86_write_tss_base
pushl $PADDR(_x86_user_tss)
pushl $PADDR(_x86_gdt + X86_USER_TSS)
call _x86_write_tss_base
addl $16, %esp
/*
* identity map the entire low memory (~ 258 MB)
*/
movl $(KERN_LENGTH >> HUGEPAGE_SHIFT), %ecx
movl $0x083, %esi /* page flags: present, writeable, huge */
movl $PADDR(pd0), %edi /* pd entry */
1: movl %esi, (%edi)
addl $HUGEPAGE_SIZE, %esi
addl $4, %edi
loop 1b
/*
* mirror that mapping to high memory (KERNBASE)
*/
movl $(KERN_LENGTH >> HUGEPAGE_SHIFT), %ecx
movl $0x183, %esi /* page flags: present, writeable, huge, global */
movl $PADDR(pd0 + (KERNBASE >> HUGEPAGE_SHIFT) * 4), %edi /* pd entry */
2: movl %esi, (%edi)
addl $HUGEPAGE_SIZE, %esi
addl $4, %edi
loop 2b
/*
* The last entry in the page directory points to itself.
* This has the effect of mapping all page tables in the page directory to
* 0xffc00000 - 0xffffefff
* and the page directory itself to
* 0xfffff000 - 0xffffffff
* because the page directory is being interpreted as a page table.
* This allows us to manipulate the table while we are in virtual memory.
*/
movl $(PADDR(pd0) + 0x003), PADDR(pd0) + 1023 * 4 /* 0xffc00000 */
/* set the Page Size Extensions (4) and Page Global Enable (7) bits in cr4 */
mov %cr4, %ecx
or $0x00000090, %ecx
mov %ecx, %cr4
/* put the (physical) address of pd0 into cr3 so it will be used */
mov $PADDR(pd0), %ecx
mov %ecx, %cr3
/* set the Paging (31) and Write Protect (16) bits in cr0 */
mov %cr0, %ecx
or $0x80010000, %ecx
mov %ecx, %cr0
/*
* Alright, we are in virtual address space! But %eip still points to
* low memory (making use of the identity mapping), so we are going to
* do an absolute jump to the mapped kernel code somewhere at 0xf01*****.
*/
lea _setup_highmem, %ecx
jmp *%ecx
/*
* this is just because you can't span functions across multiple sections,
* the actual code flow makes a jump from low (.multiboot.text section)
* to high memory (.text section).
*/
END(_setup)
.text
ENTRY(_setup_highmem)
addl $KERNBASE, %esp
addl $KERNBASE, %ebp
/*
* Now that we've completely transitioned to high memory, we can remove
* the identity mapping because we don't need it anymore.
*/
movl $(KERN_LENGTH >> HUGEPAGE_SHIFT), %ecx
xor %esi, %esi
movl pd0, %edi
1: movl %esi, (%edi)
addl $4, %edi
loop 1b
/* bonk the TLB by reloading cr3 to apply the updated page table */
mov %cr3, %ecx
mov %ecx, %cr3
/* replace the GDT provided by the bootloader (if any) with our own */
lgdt _x86_gdt_desc
ljmp $(X86_32_KERN_CS), $1f
1: movl $(X86_KERN_DS), %ecx
movw %cx, %ds
movw %cx, %es
movw %cx, %fs
movw %cx, %gs
movw %cx, %ss
/* reset EFLAGS */
pushl $0
popf
addl $KERNBASE, %ebx
push %ebx
call _boot
/* this should never(TM) be reached */
cli
2: hlt
jmp 2b
END(_setup_highmem)
.section .bootstrap_stack, "aw", @nobits
.align KERN_STACK_SIZE
stack_bottom:
.skip KERN_STACK_SIZE
stack_top: