x86: begin preparations for amd64 support

This is a huge commit, but it mainly just moves
some files around and doesn't change their
contents much.
A lot of stuff works the same on amd64 as it does
on i386, so i'm moving the parts that are specific
to the latter into separate subdirectories while
the rest can be shared with the amd64 codebase.
main
anna 3 years ago
parent 4e770a6e58
commit 3fee893f21
Signed by: fef
GPG Key ID: EC22E476DC2D3D84

@ -9,6 +9,12 @@ implemented at the moment.
The kernel compiles to a binary i686 image that implements the GNU
[Multiboot 2](https://www.gnu.org/software/grub/manual/multiboot2/multiboot.html)
specification.
amd64 support is currently being implemented, and will be the main focus for
further development.
32-bit platforms are likely to be abandoned all together in the near future,
simply due to the fact that the additional housekeeping for maintaining correct
memory mappings in this constrained address space has turned out to be way too
much as to be feasible for the current size of this project.
### Required Tools

@ -5,6 +5,12 @@ target_sources(gay_arch PRIVATE
boot.c
)
if(X86_ARCH STREQUAL "amd64")
target_sources(gay_arch PRIVATE setup64.S)
else()
target_sources(gay_arch PRIVATE setup32.S)
endif()
# This file is part of GayBSD.
# Copyright (c) 2021 fef <owo@fef.moe>.
#

@ -4,6 +4,7 @@
#include <arch/interrupt.h>
#include <arch/multiboot.h>
#include <arch/vmparam.h>
#include <gay/cdefs.h>
#include <gay/config.h>
@ -37,7 +38,7 @@ enum vga_color {
* entries per page table, each mapping 4096 (0x1000) bytes, this gives us an
* offset of 0x3ff * 0x1000 = 0x003ff000 for the virtual address.
*/
#define FB_ADDRESS (CFG_KERN_OFFSET + 0x003ff000)
#define FB_ADDRESS (KERN_OFFSET + 0x003ff000)
#define FB_LINES 24
#define FB_COLS 80
@ -80,7 +81,7 @@ static const char *mmap_type_name(u32 type);
extern int main(int argc, char *argv[]);
__asmlink void _boot(u32 magic, void *address) /* NOLINT */
__asmlink void _boot(u32 magic, uintptr_t phys_address) /* NOLINT */
{
kprintf_set_printer(&fb_kprintf_printer);
fb_init(VGA_COLOR_LIGHT_GREY, VGA_COLOR_BLACK);
@ -99,7 +100,7 @@ __asmlink void _boot(u32 magic, void *address) /* NOLINT */
* so we need to be careful to translate all pointers to virtual
* addresses before accessing them.
*/
address += CFG_KERN_OFFSET;
void *address = (void *)phys_address + KERN_OFFSET;
int err = 0;
for (struct mb2_tag *tag = address + 8; tag != NULL; tag = next_tag(tag)) {
err = handle_tag(tag);

@ -7,14 +7,17 @@
.code32
/* see arch/x86/config/kernel.ld */
/* see ../config/kernel{32,64}.ld */
.extern _image_start_phys
.extern _kernel_end_phys
.extern _image_end_phys
.section .multiboot.data, "aw"
.extern _setup /* setup{32,64}.S */
.section .multiboot.data, "a", @progbits
.align MB2_HEADER_ALIGN
.global header_start
header_start: /* struct mb2_header */
/* magic */
.long MB2_HEADER_MAGIC
@ -86,203 +89,18 @@ end_tag_end:
header_end:
.size header_start, . - header_start
/*
* Actual boot sequence comes here.
*
* This is based on the example code from the OSDev.org wiki:
* <https://wiki.osdev.org/Higher_Half_x86_Bare_Bones>
*
* We basically just turn on paging and map the lower 4 MB (including the VGA
* console framebuffer) to 0xc0000000 where the kernel image itself is linked
* to, and then jump to that region and set up the stack. Form there, _boot()
* takes care of everything.
*
* We need to pay special attention to not touch eax and ebx during the entire
* routine, because those store the multiboot2 magic number and tag table
* pointer respectively which need to be passed on to _boot(). We can't push
* them on the stack, because the stack doesn't exist yet.
*/
.extern _boot /* main boot routine -- see ./boot.c */
.extern pd0 /* initial page directory -- see ../mm/page.c */
.extern pt0 /* first page table -- see ../mm/page.c */
.section .multiboot.text, "a"
/*
* referencing symbols from C requires subtracting the relocation offset
* first because the C code is linked against virtual address space
*/
#define phys_addr(c_symbol) (c_symbol - CFG_KERN_OFFSET)
ASM_ENTRY(_start)
/* interrupts are disabled until we set up the IDT in x86_setup_idt() */
cli
/*
* The kernel image starts at 1 MiB into physical memory.
* We currently assume the kernel is < 3 MiB
* and therefore can be mapped within a single page table.
* As the kernel gets more and more bloated, this might not be the case
* in the future anymore, so we should ideally add support for multiple
* page tables soon.
*/
mov $phys_addr(pt0), %edi
xor %esi, %esi
1: cmp $_image_start_phys, %esi
jl 2f /* skip the pages that are below the kernel image */
/*
* GRUB stores the multiboot tags right after the kernel image (afaik).
* The previous streategy was to stop the loop after having reached the
* end of the kernel image (including bss), and keep our fingers crossed
* that the multiboot tags all fit into the space between the end of the
* kernel image and the end of that last page so it's still mapped.
* Now, we just continue mapping until we have reached the last slot in
* the page table and exit the loop only then (the last slot is for the
* BIOS character framebuffer, see below).
* So, this is weird.
* For some reason, the multiboot header is omitted in the compiled
* binary if the entry point isn't in the same file (i've tried way
* too many times). But, since the header is the same for both i386
* and amd64, we simply add this wrapper.
*/
cmp $(PAGE_SIZE * 1023), %esi
je 3f /* exit the loop when we have mapped every PT entry but the last one */
mov %esi, %edx
or $0x003, %edx /* set present and rw flags, see below */
mov %edx, (%edi)
2: add $PAGE_SIZE, %esi /* advance to next physical page address */
add $4, %edi /* advance to next pointer in the page table */
loop 1b
/*
* Conveniently, the full VGA character framebuffer fits into one page
* and even starts at a page aligned address. The physical range
* 0x000b8000 - 0x000b8fff
* gets mapped to the virtual address range
* 0xc03ff000 - 0xc03fffff
* which is the last page of our 4 MiB page table (index 1023).
* We also set the Present and RW bits by OR'ing with 0x003.
*/
3: movl $(0x000b8000 | 0x003), phys_addr(pt0) + 1023 * 4
/*
* We are mapping the lowest 4 MiB of physical memory both to itself and
* to the relocated region. Thus, the physical address range:
* 0x00000000 - 0x003fffff
* becomes available at two virtual address ranges:
* 0x00000000 - 0x003fffff (identity mapping)
* 0xc0000000 - 0xc03fffff (relocated mapping)
*
* The identity mapping is necessary because when we turn on paging in
* the next lines, the program counter still refers to physical memory
* and would thus immediately cause a page fault.
*
* The address of pt0 is OR'd with 0x003 (we actually have to add here
* because of dumb compiler stuff but that doesn't matter because it's
* the same result) to set the Present and RW flags in the page table
* entry (the lower 12 bits in page table addresses are always zero
* because they are page aligned so they are used as flags for the MMU,
* see ../include/arch/page.h).
*
* The offset added to pd0 is the page table number multiplied with the
* size of a page directory entry (4 bytes). The page table number can
* be calculated by dividing the number of the first page it maps
* through the total number of entries in the page directory.
*/
movl $(phys_addr(pt0) + 0x003), phys_addr(pd0) + (( 0x00000000 / PAGE_SIZE) / 1024) * 4
movl $(phys_addr(pt0) + 0x003), phys_addr(pd0) + ((CFG_KERN_OFFSET / PAGE_SIZE) / 1024) * 4
/*
* The last entry in the page directory points to itself.
* This has the effect of mapping all page tables in the page directory to
* 0xffc00000 - 0xffffefff
* and the page directory itself to
* 0xfffff000 - 0xffffffff
* because the page directory is being interpreted as a page table.
* This allows us to manipulate the table while we are in virtual memory.
*/
movl $(phys_addr(pd0) + 0x003), phys_addr(pd0) + 1023 * 4 /* 0xffc00000 */
/* set the Page Size Extensions bit in cr4 */
mov %cr4, %ecx
or $0x00000010, %ecx
mov %ecx, %cr4
/* put the (physical) address of pd0 into cr3 so it will be used */
mov $phys_addr(pd0), %ecx
mov %ecx, %cr3
/* set the paging and write-protect bit in cr0 */
mov %cr0, %ecx
or $0x80010000, %ecx
mov %ecx, %cr0
/*
* Alright, we are in virtual address space! But %eip still points to
* low memory (making use of the identity mapping), so we are going to
* do an absolute jump to the mapped kernel code somewhere at 0xc01*****.
*/
lea _start_virtual, %ecx
jmp *%ecx
/*
* this is just because you can't span functions across multiple sections,
* the actual code flow makes a jump from low (.multiboot.text section)
* to high memory (.text section).
*/
ASM_END(_start)
.text
.extern x86_replace_gdt
.extern x86_load_idt
ASM_ENTRY(_start_virtual)
/*
* Now that we've completely transitioned to high memory, we can remove
* the identity mapping because we don't need it anymore.
*/
movl $0, pd0 + 0 * 4
/* bonk the TLB by reloading cr3 to apply the updated page table */
mov %cr3, %ecx
mov %ecx, %cr3
/* set up the initial stack frame */
mov $stack_top, %ebp
mov %ebp, %esp
/* reset EFLAGS */
pushl $0
popf
/* these are set by GRUB, see the comment at the beginning of _start */
/* parameter 2 for _boot() is header address */
push %ebx
/* parameter 1 for _boot() is MB2_BOOTLOADER_MAGIC */
push %eax
/*
* but before we call _boot(), we replace the GDT provided by GRUB
* with our own (arch/mm/segment.S) and load the IDT (arch/sys/idt.S).
*/
call x86_replace_gdt
call _boot
/* this should never(TM) be reached */
.section .multiboot.text, "ax", @progbits
ASM_ENTRY(_start)
cli
5: hlt
jmp 5b
ASM_END(_start_virtual)
/* TODO: There are probably nicer ways of reserving stack memory. */
.section .bootstrap_stack, "aw", @nobits
stack_bottom:
.skip 16384 /* 16 KiB for the stack should be plenty for now */
stack_top:
jmp _setup
ASM_END(_start)
/*
* This file is part of GayBSD.

@ -0,0 +1,219 @@
/* See the end of this file for copyright and license terms. */
#include <asm/common.h>
#include <arch/page.h>
#include <arch/vmparam.h>
#include <gay/config.h>
/*
* Early boot sequence on amd64.
*
* This is based on the example code from the OSDev.org wiki:
* <https://wiki.osdev.org/Higher_Half_x86_Bare_Bones>
*
* We basically just turn on paging and map the lower 4 MB (including the VGA
* console framebuffer) to 0xf0000000 where the kernel image itself is linked
* to, and then jump to that region and set up the stack. Form there, _boot()
* takes care of everything.
*
* We need to pay special attention to not touch eax and ebx during the entire
* routine, because those store the multiboot2 magic number and tag table
* pointer respectively which need to be passed on to _boot(). We can't push
* them on the stack, because the stack doesn't exist yet.
*/
.extern _image_start_phys
.extern _image_end_phys
.extern _boot /* main boot routine -- see ./boot.c */
.extern pd0 /* initial page directory -- see ../mm/page.c */
.extern pt0 /* first page table -- see ../mm/page.c */
.section .multiboot.text, "ax", @progbits
/*
* referencing symbols from C requires subtracting the relocation offset
* first because the C code is linked against virtual address space
*/
#define phys_addr(c_symbol) (c_symbol - KERN_OFFSET)
ASM_ENTRY(_setup)
cli
/*
* The kernel image starts at 1 MiB into physical memory.
* We currently assume the kernel is < 3 MiB
* and therefore can be mapped within a single page table.
* As the kernel gets more and more bloated, this might not be the case
* in the future anymore, so we should ideally add support for multiple
* page tables soon.
*/
mov $phys_addr(pt0), %edi
xor %esi, %esi
/*
* GRUB stores the multiboot tags right after the kernel image (afaik).
* The previous streategy was to stop the loop after having reached the
* end of the kernel image (including bss), and keep our fingers crossed
* that the multiboot tags all fit into the space between the end of the
* kernel image and the end of that last page so it's still mapped.
* Now, we just continue mapping until we have reached the last slot in
* the page table and exit the loop only then (the last slot is for the
* BIOS character framebuffer, see below).
*/
mov $1023, %ecx
1: cmp $_image_start_phys, %esi
jl 2f /* don't map pages below the start of the kernel image */
mov %esi, %edx
or $0x003, %edx /* set present and rw flags, see below */
mov %edx, (%edi)
2: add $PAGE_SIZE, %esi /* advance to next physical page address */
add $4, %edi /* advance to next pointer in the page table */
loop 1b
/*
* Conveniently, the full VGA character framebuffer fits into one page
* and even starts at a page aligned address. The physical range
* 0x000b8000 - 0x000b8fff
* gets mapped to the virtual address range
* 0xf03ff000 - 0xf03fffff
* which is the last page of our 4 MiB page table (index 1023).
* We also set the Present and RW bits by OR'ing with 0x003.
*/
movl $(0x000b8000 | 0x003), (%edi)
/*
* We are mapping the lowest 4 MiB of physical memory both to itself and
* to the relocated region. Thus, the physical address range:
* 0x00000000 - 0x003fffff
* becomes available at two virtual address ranges:
* 0x00000000 - 0x003fffff (identity mapping)
* 0xf0000000 - 0xf03fffff (highmem mapping)
*
* The identity mapping is necessary because when we turn on paging in
* the next lines, the program counter still refers to physical memory
* and would thus immediately cause a page fault.
*
* The address of pt0 is OR'd with 0x003 (we actually have to add here
* because of dumb compiler stuff but that doesn't matter because the
* result is the same) to set the Present and RW flags in the page table
* entry (the lower 12 bits in page table addresses are always zero
* because they are page aligned so they are used as flags for the MMU,
* see ../include/arch/page.h).
*
* The offset added to pd0 is the page table number multiplied with the
* size of a page directory entry (4 bytes). The page table number can
* be calculated by dividing the number of the first page it maps
* through the total number of entries in the page directory.
*/
movl $(phys_addr(pt0) + 0x003), phys_addr(pd0) + (( 0x00000000 / PAGE_SIZE) / 1024) * 4
movl $(phys_addr(pt0) + 0x003), phys_addr(pd0) + ((KERN_OFFSET / PAGE_SIZE) / 1024) * 4
/*
* The last entry in the page directory points to itself.
* This has the effect of mapping all page tables in the page directory to
* 0xffc00000 - 0xffffefff
* and the page directory itself to
* 0xfffff000 - 0xffffffff
* because the page directory is being interpreted as a page table.
* This allows us to manipulate the table while we are in virtual memory.
*/
movl $(phys_addr(pd0) + 0x003), phys_addr(pd0) + 1023 * 4 /* 0xffc00000 */
/* set the Page Size Extensions bit in cr4 */
mov %cr4, %ecx
or $0x00000010, %ecx
mov %ecx, %cr4
/* put the (physical) address of pd0 into cr3 so it will be used */
mov $phys_addr(pd0), %ecx
mov %ecx, %cr3
/* set the paging and write-protect bit in cr0 */
mov %cr0, %ecx
or $0x80010000, %ecx
mov %ecx, %cr0
/*
* Alright, we are in virtual address space! But %eip still points to
* low memory (making use of the identity mapping), so we are going to
* do an absolute jump to the mapped kernel code somewhere at 0xf01*****.
*/
lea _setup_highmem, %ecx
jmp *%ecx
/*
* this is just because you can't span functions across multiple sections,
* the actual code flow makes a jump from low (.multiboot.text section)
* to high memory (.text section).
*/
ASM_END(_setup)
.text
.extern x86_replace_gdt
.extern x86_load_idt
ASM_ENTRY(_setup_highmem)
/*
* Now that we've completely transitioned to high memory, we can remove
* the identity mapping because we don't need it anymore.
*/
movl $0, pd0 + 0 * 4
/* bonk the TLB by reloading cr3 to apply the updated page table */
mov %cr3, %ecx
mov %ecx, %cr3
/* set up the initial stack frame */
mov $stack_top, %ebp
mov %ebp, %esp
/* reset EFLAGS */
pushl $0
popf
/* these are set by GRUB, see the comment at the beginning of _start */
/* parameter 2 for _boot() is header address */
push %ebx
/* parameter 1 for _boot() is MB2_BOOTLOADER_MAGIC */
push %eax
/*
* but before we call _boot(), we replace the GDT provided by GRUB
* with our own (arch/mm/segment.S) and load the IDT (arch/sys/idt.S).
*/
call x86_replace_gdt
call _boot
/* this should never(TM) be reached */
cli
3: hlt
jmp 3b
ASM_END(_setup_highmem)
.section .bootstrap_stack, "aw", @nobits
stack_bottom:
.skip 16384 /* 16 KiB for the stack should be plenty for now */
stack_top:
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/

@ -6,12 +6,14 @@ OUTPUT_ARCH(i386)
/* not strictly needed because we produce a binary image but can't hurt */
ENTRY(_start)
KERN_OFFSET = 0xf0000000;
SECTIONS {
. = CFG_KERN_ORIGIN;
_image_start = . + CFG_KERN_OFFSET;
_image_start = . + KERN_OFFSET;
_image_start_phys = .;
_kernel_start = . + CFG_KERN_OFFSET;
_kernel_start = . + KERN_OFFSET;
_kernel_start_phys = .;
.multiboot.data : {
@ -24,35 +26,35 @@ SECTIONS {
KEEP(*(.multiboot.text))
}
. += CFG_KERN_OFFSET;
. += KERN_OFFSET;
/*
* All sections from here on are page aligned so we can
* set different access permissions for each of them
*/
.text ALIGN(4K) : AT(ADDR(.text) - CFG_KERN_OFFSET) {
.text ALIGN(4K) : AT(ADDR(.text) - KERN_OFFSET) {
_text_start = .;
*(.text .text.* .gnu.linkonce.t.*)
_text_end = .;
}
.rodata ALIGN(4K) : AT(ADDR(.rodata) - CFG_KERN_OFFSET) {
.rodata ALIGN(4K) : AT(ADDR(.rodata) - KERN_OFFSET) {
_rodata_start = .;
*(.rodata .rodata.* .gnu.linkonce.r.*)
_rodata_end = .;
}
.data ALIGN(4K) : AT(ADDR(.data) - CFG_KERN_OFFSET) {
.data ALIGN(4K) : AT(ADDR(.data) - KERN_OFFSET) {
_data_start = .;
*(.data .data.*)
_data_end = .;
}
_kernel_end = .;
_kernel_end_phys = . - CFG_KERN_OFFSET;
_kernel_end_phys = . - KERN_OFFSET;
.bss ALIGN(4K) : AT(ADDR(.bss) - CFG_KERN_OFFSET) {
.bss ALIGN(4K) : AT(ADDR(.bss) - KERN_OFFSET) {
_bss_start = .;
*(COMMON)
*(.bss)
@ -63,7 +65,7 @@ SECTIONS {
}
_image_end = .;
_image_end_phys = . - CFG_KERN_OFFSET;
_image_end_phys = . - KERN_OFFSET;
}
/*

@ -1,7 +1,6 @@
/* See the end of this file for copyright and license terms. */
CFG_KERN_ORIGIN = @CFG_KERN_ORIGIN@;
CFG_KERN_OFFSET = @CFG_KERN_OFFSET@;
/*
* This file is part of GayBSD.

@ -3,9 +3,19 @@
set(TOOLCHAIN_PATH "/usr/bin" CACHE STRING "Toolchain directory")
set(CMAKE_SYSTEM_NAME Generic)
set(CMAKE_SYSTEM_PROCESSOR i686)
set(CMAKE_CROSSCOMPILING 1)
set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
if (CFG_X86_64)
set(CMAKE_SYSTEM_PROCESSOR x86-64)
set(_toolchain_common_flags "-m64 -march=x86-64")
set(_toolchain_triple x86_64-pc-none-elf)
set(X86_TARGET "elf64-amd64")
else()
set(CMAKE_SYSTEM_PROCESSOR i686)
set(_toolchain_common_flags "-m32 -march=i686")
set(_toolchain_triple i686-pc-none-elf)
set(_toolchain_bits 32)
endif()
set(CMAKE_AR ${TOOLCHAIN_PATH}/${CMAKE_EXECUTABLE_PREFIX}ar${CMAKE_EXECUTABLE_SUFFIX})
set(CMAKE_ASM_COMPILER ${TOOLCHAIN_PATH}/${CMAKE_EXECUTABLE_PREFIX}clang${CMAKE_EXECUTABLE_SUFFIX})
@ -16,12 +26,10 @@ set(CMAKE_RANLIB ${TOOLCHAIN_PATH}/${CMAKE_EXECUTABLE_PREFIX}ranlib${CMAK
set(CMAKE_SZE ${TOOLCHAIN_PATH}/${CMAKE_EXECUTABLE_PREFIX}szr${CMAKE_EXECUTABLE_SUFFIX} CACHE INTERNAL "")
set(CMAKE_STRIP ${TOOLCHAIN_PATH}/${CMAKE_EXECUTABLE_PREFIX}strip${CMAKE_EXECUTABLE_SUFFIX} CACHE INTERNAL "")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32 -march=i686")
set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -m32 -march=i686")
set(triple i686-pc-none-elf)
set(CMAKE_C_COMPILER_TARGET ${triple})
set(CMAKE_ASM_COMPILER_TARGET ${triple})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_toolchain_common_flags}")
set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${_toolchain_common_flags}")
set(CMAKE_C_COMPILER_TARGET ${_toolchain_triple})
set(CMAKE_ASM_COMPILER_TARGET ${_toolchain_triple})
set(CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld")
set(CMAKE_MODULE_LINKER_FLAGS_INIT "-fuse-ld=lld")
@ -31,7 +39,7 @@ configure_file(
"${CMAKE_CURRENT_LIST_DIR}/kernel_config.ld.in"
"${CMAKE_BINARY_DIR}/config/kernel_config.ld"
)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -T${CMAKE_BINARY_DIR}/config/kernel_config.ld -T${CMAKE_CURRENT_LIST_DIR}/kernel.ld")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -T${CMAKE_BINARY_DIR}/config/kernel_config.ld -T${CMAKE_CURRENT_LIST_DIR}/kernel${_toolchain_bits}.ld")
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)

@ -66,12 +66,12 @@
#include <gay/cdefs.h>
#include <gay/types.h>
__always_inline void enable_intr(void)
static __always_inline void enable_intr(void)
{
__asm__ volatile("sti");
}
__always_inline void disable_intr(void)
static __always_inline void disable_intr(void)
{
__asm__ volatile("cli" ::: "memory");
}
@ -83,7 +83,7 @@ __always_inline void disable_intr(void)
struct x86_idt_entry {
u16 offset0; /**< @brief ptr to handler, bits 0:15 */
u16 selector; /**< @brief GDT selector, use `X86_KERN_CS` */
u8 _zero; /**< @brief always 0 */
u8 _rsvd0; /**< @brief always 0 */
u8 attr;
#define X86_IDT_GATE_TASK32 0x5u
#define X86_IDT_GATE_INTR16 0x6u
@ -94,9 +94,12 @@ struct x86_idt_entry {
#define X86_IDT_DPL(x) (((x) & 3u) << 5)
#define X86_IDT_PRESENT (1u << 7)
u16 offset1; /**< @brief ptr to handler, bits 16:31 */
#ifdef __x86_64__
u32 offset2;
u32 _rsvd1; /**< @brief always zero */
#endif
} __packed;
/* idt.S */
extern struct x86_idt_entry x86_idt[X86_INTR_COUNT];
/**

@ -1,21 +1,27 @@
/* See the end of this file for copyright and license terms. */
#pragma once
#define _ARCH_PAGE_H_
/**
* @brief Data structures and constants for paging on x86 (please end my suffering).
*/
#define __HAS_HUGEPAGES
#include <arch/vmparam.h>
#define __HAVE_HUGEPAGES
/** @brief Binary logarithm of `PAGE_SIZE`. */
#define PAGE_SHIFT 12
/** @brief Page size in bytes. */
#define PAGE_SIZE (1 << PAGE_SHIFT)
/** @brief Binary logarithm of `HUGEPAGE_SIZE`. */
#define HUGEPAGE_SHIFT 22
/** @brief Huge page size in bytes. */
#ifdef __x86_64__
#include <amd64/page.h>
#else
#include <i386/page.h>
#endif
#define HUGEPAGE_SIZE (1 << HUGEPAGE_SHIFT)
#ifndef _ASM_SOURCE
@ -25,68 +31,9 @@
/** @brief Pointer bitmask to get the base address of their huge page. */
#define HUGEPAGE_MASK ( ~((unsigned long)HUGEPAGE_SIZE - 1) )
#include <gay/cdefs.h>
#include <gay/types.h>
struct x86_page_table_entry {
unsigned present:1; /**< Page Fault on access if 0 */
unsigned rw:1; /**< Page Fault on write if 0 */
unsigned user:1; /**< Page Fault on user mode access if 0 */
unsigned write_through:1; /**< Enable write-through caching */
unsigned cache_disabled:1; /**< Disable caching in TLB */
unsigned accessed:1; /**< 1 if page has been accessed */
unsigned dirty:1; /**< 1 if page has been written to */
unsigned _reserved0:1;
unsigned global:1; /**< Don't update the TLB on table swap if 1 */
unsigned _reserved1:3;
uintptr_t shifted_address:20; /**< Aligned pointer to the physical page */
} __packed;
#define PAGE_ALIGN(ptr) ((typeof(ptr))( (uintptr_t)(ptr) & PAGE_MASK ))
#define HUGEPAGE_ALIGN(ptr) ((typeof(ptr))( (uintptr_t)(ptr) & HUGEPAGE_MASK ))
struct x86_page_table {
struct x86_page_table_entry entries[1024];
} __aligned(PAGE_SIZE);
/**
* @brief Currently active page table at position `index` in the page directory.
* The last entry in the page directory is mapped to itself, therefore being
* interpreted by the MMU as a page table. This has the effect that the last
* page table, i.e. the page directory again, maps the entire page directory
* structure so it can be manipulated while paging is active. See the comment
* at the beginning of `arch/x86/mm/page.c` for a more detailed explanation.
*
* @param index Table index in the page directory
*/
#define X86_CURRENT_PT(index) ( &((struct x86_page_table *)0xffc00000)[index] )
struct x86_page_directory_entry {
unsigned present:1; /**< Page Fault on access if 0 */
unsigned rw:1; /**< Page Fault on write if 0 */
unsigned user:1; /**< Page Fault on user mode access if 0 */
unsigned write_through:1; /**< Enable write-through caching */
unsigned cache_disabled:1; /**< Disable caching in TLB */
unsigned accessed:1; /**< 1 if page has been accessed */
unsigned _reserved0:1;
unsigned huge:1; /**< 0 = 4K, 1 = 4M */
unsigned _reserved1:1;
unsigned _ignored2:3;
uintptr_t shifted_address:20; /**< Aligned pointer to `struct x86_page_table` */
} __packed;
struct x86_page_directory {
struct x86_page_directory_entry entries[1024];
} __aligned(PAGE_SIZE);
/**
* @brief Currently active page directory.
* The last entry in the page directory is mapped to itself, therefore being
* interpreted by the MMU as a page table. See the comment at the start of
* `arch/x86/mm/page.c` for a more detailed explanation.
*/
#define X86_CURRENT_PD ((struct x86_page_directory *)X86_CURRENT_PT(1023))
/* page fault status code bits */
#define X86_PF_PRESENT (1u << 0)
#define X86_PF_WRITE (1u << 1)
@ -97,20 +44,13 @@ struct x86_page_directory {
#define X86_PF_SHADOW_STACK (1u << 6)
#define X86_PF_SGX (1u << 15)
/**
* @brief Arch dependent virtual memory information data structure (x86 version).
* Outside of `/arch/x86`, this is treated as a completely obfuscated type,
* and only pointers to it are stored and passed around.
*/
typedef struct x86_page_directory vm_info_t;
/**
* @brief Get the physical address a virtual one is currently mapped to.
*
* @param virt virtual address
* @returns The physical address, or `0` if there is no mapping
*/
uintptr_t virt_to_phys(void *virt);
uintptr_t vtophys(void *virt);
#endif /* not _ASM_SOURCE */

@ -0,0 +1,33 @@
/* See the end of this file for copyright and license terms. */
#pragma once
#define _ARCH_VMPARAM_H_
#ifdef __x86_64__
#include <amd64/vmparam.h>
#else
#include <i386/vmparam.h>
#endif
#define USER_START ((void *)USER_OFFSET)
#define USER_END (USER_START + USER_LENGTH)
#define DMAP_START ((void *)DMAP_OFFSET)
#define DMAP_END (DMAP_START + DMAP_LENGTH)
#define KERN_START ((void *)KERN_OFFSET)
#define KERN_END (KERN_START + KERN_LENGTH)
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/

@ -0,0 +1,93 @@
/* See the end of this file for copyright and license terms. */
#pragma once
#ifndef _ARCH_PAGE_H_
#error "This file is not meant to be included directly, use <arch/page.h>"
#endif
/** @brief Binary logarithm of `HUGEPAGE_SIZE`. */
#define HUGEPAGE_SHIFT 22
#ifndef _ASM_SOURCE
#include <gay/cdefs.h>
#include <gay/types.h>
struct x86_page_table_entry {
unsigned present:1; /**< Page Fault on access if 0 */
unsigned rw:1; /**< Page Fault on write if 0 */
unsigned user:1; /**< Page Fault on user mode access if 0 */
unsigned write_through:1; /**< Enable write-through caching */
unsigned cache_disabled:1; /**< Disable caching in TLB */
unsigned accessed:1; /**< 1 if page has been accessed */
unsigned dirty:1; /**< 1 if page has been written to */
unsigned _reserved0:1;
unsigned global:1; /**< Don't update the TLB on table swap if 1 */
unsigned _reserved1:3;
uintptr_t shifted_address:20; /**< Aligned pointer to the physical page */
} __packed;
struct x86_page_table {
struct x86_page_table_entry entries[1024];
} __aligned(PAGE_SIZE);
/**
* @brief Currently active page table at position `index` in the page directory.
* The last entry in the page directory is mapped to itself, therefore being
* interpreted by the MMU as a page table. This has the effect that the last
* page table, i.e. the page directory again, maps the entire page directory
* structure so it can be manipulated while paging is active. See the comment
* at the beginning of `arch/x86/mm/page.c` for a more detailed explanation.
*
* @param index Table index in the page directory
*/
#define X86_CURRENT_PT(index) ( &((struct x86_page_table *)X86_PD_OFFSET)[index] )
struct x86_page_directory_entry {
unsigned present:1; /**< Page Fault on access if 0 */
unsigned rw:1; /**< Page Fault on write if 0 */
unsigned user:1; /**< Page Fault on user mode access if 0 */
unsigned write_through:1; /**< Enable write-through caching */
unsigned cache_disabled:1; /**< Disable caching in TLB */
unsigned accessed:1; /**< 1 if page has been accessed */
unsigned _reserved0:1;
unsigned huge:1; /**< 0 = 4K, 1 = 4M */
unsigned _reserved1:1;
unsigned _ignored2:3;
uintptr_t shifted_address:20; /**< Aligned pointer to `struct x86_page_table` */
} __packed;
struct x86_page_directory {
struct x86_page_directory_entry entries[1024];
} __aligned(PAGE_SIZE);
/**
* @brief Currently active page directory.
* The last entry in the page directory is mapped to itself, therefore being
* interpreted by the MMU as a page table. See the comment at the start of
* `arch/x86/mm/page.c` for a more detailed explanation.
*/
#define X86_CURRENT_PD ((struct x86_page_directory *)X86_CURRENT_PT(1023))
/**
* @brief Arch dependent virtual memory information data structure (x86 version).
* Outside of `/arch/x86`, this is treated as a completely obfuscated type,
* and only pointers to it are stored and passed around.
*/
typedef struct x86_page_directory vm_info_t;
#endif /* not _ASM_SOURCE */
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/

@ -0,0 +1,37 @@
/* See the end of this file for copyright and license terms. */
#pragma once
#ifndef _ARCH_VMPARAM_H_
#error "This file is not meant to be included directly, use <arch/vmparam.h>"
#endif
/** @brief Userland memory region */
#define USER_OFFSET 0x00000000
#define USER_LENGTH 0x80000000
/** @brief Direct (contiguous) mapping of physical memory, also used as heap */
#define DMAP_OFFSET 0x80000000
/* this gives us ~ 1.7 GB of effectively usable RAM for *everything*, much wow */
#define DMAP_LENGTH 0x70000000
/** @brief Kernel image (code + data) */
#define KERN_OFFSET 0xf0000000
#define KERN_LENGTH 0x0f000000
/** @brief Recursive Page Directory map */
#define X86_PD_OFFSET 0xffc00000
#define X86_PD_LENGTH 0x00400000
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/

@ -39,7 +39,7 @@ static uintptr_t dynpage_end;
/**
* @brief First page table for low memory (0 - 4 M).
* This is initialized by the early boot routine in assembly so that paging
* can be enabled (the kernel itself is mapped to `0xc0100000` by default).
* can be enabled (the kernel itself is mapped to `0xf0100000` by default).
*/
__asmlink struct x86_page_table pt0;
/** @brief First page directory for low memory. */
@ -112,6 +112,7 @@ int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags)
pde->user = (flags & MM_PAGE_USER) != 0;
pde->accessed = (flags & MM_PAGE_ACCESSED) != 0;
pde->cache_disabled = (flags & MM_PAGE_NOCACHE) != 0;
pde->shifted_address = phys >> PAGE_SHIFT;
return 0;
}

@ -1,17 +1,13 @@
# See the end of this file for copyright and license terms.
target_sources(gay_arch PRIVATE
atom.S
idt.S
interrupt.c
irq.c
irq.S
port.S
switch.S
trap.c
trap.S
)
add_subdirectory("${X86_ARCH}")
# This file is part of GayBSD.
# Copyright (c) 2021 fef <owo@fef.moe>.
#

@ -0,0 +1,22 @@
# See the end of this file for copyright and license terms.
target_sources(gay_arch PRIVATE
atom.S
idt.S
irq.S
port.S
switch.S
trap.S
)
# This file is part of GayBSD.
# Copyright (c) 2021 fef <owo@fef.moe>.
#
# GayBSD is nonviolent software: you may only use, redistribute, and/or
# modify it under the terms of the Cooperative Nonviolent Public License
# (CNPL) as found in the LICENSE file in the source code root directory
# or at <https://git.pixie.town/thufie/npl-builder>; either version 7
# of the license, or (at your option) any later version.
#
# GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
# permitted by applicable law. See the CNPL for details.

@ -14,21 +14,15 @@ ASM_END(x86_load_idt)
.data
.extern x86_idt
.align 4
.word 0 /* padding */
x86_idt_desc:
.word x86_idt_end - x86_idt - 1 /* limit (size in bytes - 1) */
.word X86_INTR_COUNT * 8 - 1 /* limit (size in bytes - 1) */
.long x86_idt /* base */
.size x86_idt_desc, . - x86_idt_desc
.align 16
.global x86_idt
x86_idt:
.fill X86_INTR_COUNT, 8, 0 /* empty idt, initialized dynamically */
x86_idt_end:
.size x86_idt, . - x86_idt
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.

@ -8,6 +8,8 @@
#include <gay/kprintf.h>
#include <gay/types.h>
struct x86_idt_entry x86_idt[X86_INTR_COUNT] __aligned(16);
void x86_setup_interrupts(void)
{
/* make sure all IRQs are masked first */
@ -42,11 +44,16 @@ void x86_setup_interrupts(void)
void x86_set_gate(u8 vector, void (*handler)(void), u8 flags)
{
struct x86_idt_entry *entry = &x86_idt[vector];
entry->offset0 = (uintptr_t)handler & 0xffff;
entry->offset1 = ((uintptr_t)handler >> 16) & 0xffff;
entry->selector = X86_KERN_CS;
entry->_zero = 0;
entry->_rsvd0 = 0;
entry->attr = flags;
entry->offset0 = (uintptr_t)handler & 0xffff;
entry->offset1 = ((uintptr_t)handler >> 16) & 0xffff;
#ifdef __x86_64__
entry->offset2 = ((uintptr_t)handler >> 32) & 0xffffffff;
entry->_rsvd1 = 0;
#endif
}
void x86_set_trap_gate(u8 vector, void (*isr)(void))

@ -5,6 +5,13 @@ set_property(CACHE BOOT_TYPE PROPERTY STRINGS
"bios"
)
option(CFG_X86_64 "64-bit kernel (32-bit support is fundamentally broken)" OFF)
if(CFG_X86_64)
set(X86_ARCH amd64)
else()
set(X86_ARCH i386)
endif()
# This file is part of GayBSD.
# Copyright (c) 2021 fef <owo@fef.moe>.
#

@ -10,8 +10,6 @@ include("${CMAKE_CURRENT_LIST_DIR}/config-${ARCH}.cmake")
set(CFG_KERN_ORIGIN "0x00100000" CACHE STRING "Physical address where the kernel is loaded (don't touch this)")
set(CFG_KERN_OFFSET "0xc0000000" CACHE STRING "Virtual address the kernel is mapped to (don't touch this)")
set(CFG_POISON_PAGES "Poison pages after allocate and free" ON)
set(CFG_POISON_HEAP "Poison heap memory after kmalloc() and kfree()" ON)

@ -0,0 +1,21 @@
# Virtual Memory Layout on i386
GayBSD's virtual memory map is loosely based on the one from FreeBSD.
The size specifiers here are powers of two (1 KB = 1024 B).
start address | offset | end address | size | description
:-------------:|-----------:|:-----------:|-----------:|:-----------------------
`00000000` | +0 | `7fffffff` | 2 GB | userland area
`80000000` | -2 GB | `efffffff` | ~ 1.8 GB | linear physical memory
`f0000000` | ~ -262 MB | `ffbfffff` | ~ 258 MB | kernel image
`ffc00000` | ~ -12.2 MB | `ffffffff` | ~ 12.2 MB | recursive page table
The linear physical memory is a direct mapping of physical RAM, which is
required because `kmalloc()` needs to be able to allocate *physically*
contiguous memory for DMA transfers.
It is also used as the heap area.
Unfortunately, because i don't have the resources to maintain it, the maximum
supported memory size on i386 is around 1.8 GB.
This is due to the required direct mapping and would require a fair bit of work
to get fixed, and 32-bit support is likely to be dropped in the near future.

@ -25,9 +25,6 @@
/** @brief Physical address where the kernel is loaded */
#define CFG_KERN_ORIGIN @CFG_KERN_ORIGIN@
/** @brief Virtual address the kernel is mapped to */
#define CFG_KERN_OFFSET @CFG_KERN_OFFSET@
/** @brief Poison dynamic pages when allocating and freeing them */
#cmakedefine01 CFG_POISON_PAGES

@ -3,6 +3,7 @@
#pragma once
#include <gay/cdefs.h>
#include <gay/types.h>
/**
* @brief Get the number of elements in a statically allocated array.
@ -62,6 +63,11 @@
*/
void *ptr_align(void *ptr, int log);
static __always_inline uintptr_t uintptr_align(uintptr_t ptr, int log)
{
return (uintptr_t)ptr_align((void *)ptr, log);
}
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.

@ -39,10 +39,12 @@
#if _GAY_SOURCE >= 202109L || _POSIX_C_SOURCE
#define SSIZE_MAX __SSIZE_MAX /* max value for an ssize_t */
#define ISIZE_MAX __SSIZE_MAX
#endif
#if _GAY_SOURCE >= 202109L || _POSIX_C_SOURCE >= 200112
#define SIZE_T_MAX __SIZE_T_MAX /* max value for a size_t */
#define USIZE_MAX __SIZE_T_MAX
#define OFF_MAX __OFF_MAX /* max value for an off_t */
#define OFF_MIN __OFF_MIN /* min value for an off_t */

Loading…
Cancel
Save