x86: better separation for i386 specific code

That's it, nothing major is gonna happen anymore
until i get amd64 support working and deprecate
the entire i386 branch.  32-bit just adds so many
extra complications to memory management that i
don't want to waste any more energy on this
platform which is obsolete anyway.
main
anna 3 years ago
parent 36985f51e2
commit 03f31df67f
Signed by: fef
GPG Key ID: EC22E476DC2D3D84

@ -40,7 +40,7 @@ static inline long latom_cmp_xchg(latom_t *latom, long compare, long val)
long rax = compare;
__asm__ volatile(
" lock \n"
X86_LOCK_PREFIX
" cmpxchgq %1, (%2) \n" /* if ((rax = latom->_value) == compare) latom->_value = val */
: "+a"(rax)
: "r"(val), "r"(&latom->_value)
@ -60,7 +60,7 @@ static inline long latom_cmp_xchg(latom_t *latom, long compare, long val)
static inline long latom_add(latom_t *latom, long val)
{
__asm__ volatile(
" lock \n"
X86_LOCK_PREFIX
" xaddq %0, (%1) \n"
: "+r"(val)
: "r"(&latom->_value)
@ -80,7 +80,7 @@ static inline bool latom_inc(latom_t *latom)
bool nonzero = false;
__asm__ volatile(
" lock \n"
X86_LOCK_PREFIX
" incq (%1) \n"
" setne %0 \n"
: "+r"(nonzero) /* read+write to ensure the initial value isn't optimized out */
@ -96,7 +96,7 @@ static inline bool latom_dec(latom_t *latom)
bool nonzero = false;
__asm__ volatile(
" lock \n"
X86_LOCK_PREFIX
" decq (%1) \n"
" setne %0 \n"
: "+r"(nonzero) /* read+write to ensure the initializer isn't optimized out */
@ -114,7 +114,7 @@ static inline long latom_and(latom_t *latom, long val)
__asm__ volatile(
" movq (%2), %0 \n" /* rax = latom->_value */
"1: andq %0, %1 \n" /* val &= rax */
" lock \n"
X86_LOCK_PREFIX
" cmpxchgq %1, (%2) \n" /* if (latom->_value == rax) latom->_value = val */
" pause \n" /* intel says you're supposed to do this in spin loops */
" jne 1b \n" /* else goto 1 (rax updated to new latom->_value) */
@ -133,7 +133,7 @@ static inline long latom_or(latom_t *latom, long val)
__asm__ volatile(
" movq (%2), %0 \n" /* rax = latom->_value */
"1: orq %0, %1 \n" /* val |= rax */
" lock \n"
X86_LOCK_PREFIX
" cmpxchgq %1, (%2) \n" /* if (latom->_value == rax) latom->_value = val */
" pause \n" /* intel says you're supposed to do this in spin loops */
" jne 1b \n" /* else goto 1 (rax updated to new latom->_value) */
@ -152,7 +152,7 @@ static inline long latom_xor(latom_t *latom, long val)
__asm__ volatile(
" movq (%2), %0 \n" /* rax = latom->_value */
"1: xorq %0, %1 \n" /* val ^= rax */
" lock \n"
X86_LOCK_PREFIX
" cmpxchgq %1, (%2) \n" /* if (latom->_value == rax) latom->_value = val */
" pause \n" /* intel says you're supposed to do this in spin loops */
" jne 1b \n" /* else goto 1 (rax updated to new latom->_value) */
@ -166,16 +166,34 @@ static inline long latom_xor(latom_t *latom, long val)
static inline bool latom_set_bit(latom_t *latom, int pos)
{
int mask = 1 << pos;
long oldval = latom_or(latom, mask);
return (oldval & mask) == 0;
bool ret = false;
__asm__ volatile(
X86_LOCK_PREFIX
" btsq %1, (%2) \n"
" setc %0 \n"
: "+r"(ret)
: "r"(pos), "r"(&atom->_value)
: "cc", "memory"
);
return ret;
}
static inline bool latom_clr_bit(latom_t *latom, int pos)
{
int mask = 1 << pos;
long oldval = latom_and(latom, ~mask);
return (oldval & mask) != 0;
bool ret = false;
__asm__ volatile(
X86_LOCK_PREFIX
" btrq %1, (%2) \n"
" setc %b0 \n"
: "+r"(ret)
: "r"(pos), "r"(&atom->_value)
: "cc", "memory"
);
return ret;
}
/*

@ -4,8 +4,15 @@
#define _ARCH_ATOM_H_
#include <gay/cdefs.h>
#include <gay/config.h>
#include <gay/types.h>
#ifdef CFG_SMP
#define X86_LOCK_PREFIX "\tlock\n"
#else
#define X86_LOCK_PREFIX ""
#endif
/**
* @brief Initialize an atom non-atomically.
* This function is **only** for initializing an atom, you should never use it
@ -24,8 +31,6 @@ static inline void atom_init(atom_t *atom, int val)
* You usually shouldn't need this function because all the other atomic
* primitives return the value before the operation, and we are only really
* interested in how values *compare* between operations in most cases.
* Don't use `atom_read()` followed by another atomic operation, it defeats the
* whole purpose of using atomics in the first place.
*
* @param atom Atom to read the value of
* @return The atom's "current" value (at the time of reading it)
@ -77,7 +82,7 @@ static inline int atom_cmp_xchg(atom_t *atom, int compare, int val)
int eax = compare;
__asm__ volatile(
" lock \n"
X86_LOCK_PREFIX
" cmpxchgl %1, (%2) \n" /* if (atom->_value == eax) atom->_value = val */
: "+a"(eax)
: "r"(val), "r"(&atom->_value)
@ -97,7 +102,7 @@ static inline int atom_cmp_xchg(atom_t *atom, int compare, int val)
static inline int atom_add(atom_t *atom, int val)
{
__asm__ volatile(
" lock \n"
X86_LOCK_PREFIX
" xaddl %0, (%1) \n"
: "+r"(val)
: "r"(&atom->_value)
@ -130,7 +135,7 @@ static inline bool atom_inc(atom_t *atom)
bool nonzero = false;
__asm__ volatile(
" lock \n"
X86_LOCK_PREFIX
" incl (%1) \n"
" setne %0 \n"
: "+r"(nonzero) /* read+write to ensure the initial value isn't optimized out */
@ -152,7 +157,7 @@ static inline bool atom_dec(atom_t *atom)
bool nonzero = false;
__asm__ volatile(
" lock \n"
X86_LOCK_PREFIX
" decl (%1) \n"
" setne %0 \n"
: "+r"(nonzero) /* read+write to ensure the initializer isn't optimized out */
@ -176,7 +181,7 @@ static inline int atom_and(atom_t *atom, int val)
__asm__ volatile(
"1: andl %0, %1 \n" /* val &= eax */
" lock \n"
X86_LOCK_PREFIX
" cmpxchgl %1, (%2) \n" /* if (atom->_value == eax) atom->_value = val */
" pause \n" /* intel says you're supposed to do this in spin loops */
" jne 1b \n" /* else goto 1 (eax updated to new atom->_value) */
@ -201,7 +206,7 @@ static inline int atom_or(atom_t *atom, int val)
__asm__ volatile(
"1: orl %0, %1 \n" /* val |= eax */
" lock \n"
X86_LOCK_PREFIX
" cmpxchgl %1, (%2) \n" /* if (atom->_value == eax) atom->_value = val */
" pause \n" /* intel says you're supposed to do this in spin loops */
" jne 1b \n" /* else goto 1 (eax updated to new atom->_value) */
@ -227,7 +232,7 @@ static inline int atom_xor(atom_t *atom, int val)
__asm__ volatile(
" movl (%2), %0 \n" /* eax = atom->_value */
"1: xorl %0, %1 \n" /* val ^= eax */
" lock \n"
X86_LOCK_PREFIX
" cmpxchgl %1, (%2) \n" /* if (atom->_value == eax) atom->_value = val */
" pause \n" /* intel says you're supposed to do this in spin loops */
" jne 1b \n" /* else goto 1 (eax updated to new atom->_value) */
@ -248,9 +253,18 @@ static inline int atom_xor(atom_t *atom, int val)
*/
static inline bool atom_set_bit(atom_t *atom, int pos)
{
int mask = 1 << pos;
int oldval = atom_or(atom, mask);
return (oldval & mask) == 0;
bool ret = false;
__asm__ volatile(
X86_LOCK_PREFIX
" bts %1, (%2) \n"
" setc %0 \n"
: "+r"(ret)
: "r"(pos), "r"(&atom->_value)
: "cc", "memory"
);
return ret;
}
/**
@ -262,9 +276,18 @@ static inline bool atom_set_bit(atom_t *atom, int pos)
*/
static inline bool atom_clr_bit(atom_t *atom, int pos)
{
int mask = 1 << pos;
int oldval = atom_and(atom, ~mask);
return (oldval & mask) != 0;
bool ret = false;
__asm__ volatile(
X86_LOCK_PREFIX
" btr %1, (%2) \n"
" setc %0 \n"
: "+r"(ret)
: "r"(pos), "r"(&atom->_value)
: "cc", "memory"
);
return ret;
}
#ifdef __x86_64__

@ -63,6 +63,12 @@ static inline void intr_restore(register_t flags)
enable_intr();
}
static inline bool intr_enabled(void)
{
register_t eflags = read_flags();
return (eflags & (1 << 9)) != 0;
}
static __always_inline void halt(void)
{
__asm__ volatile("hlt");

@ -1,6 +1,7 @@
/* See the end of this file for copyright and license terms. */
#pragma once
#define _ARCH_INTERRUPT_H_
/** @brief Total number of interrupt lines on x86. */
#define X86_INTR_COUNT 256
@ -66,6 +67,12 @@
#include <gay/cdefs.h>
#include <gay/types.h>
#ifdef __x86_64__
#include <amd64/interrupt.h>
#else
#include <i386/interrupt.h>
#endif
/**
* @brief A single entry in the Interrupt Descriptor Table as laid out in hardware.
* Luckily, this is not quite as deranged as the GDT layout.
@ -92,26 +99,6 @@ struct x86_idt_entry {
extern struct x86_idt_entry x86_idt[X86_INTR_COUNT];
/**
* @brief A hardware stack frame.
* I have no idea if the x86 are calling it hardware stack frame tbh, i just
* used the (ARM) terminology i'm used to. This is what gets pushed to the
* stack automatically when entering an ISR.
*/
struct x86_hw_frame {
u32 eip;
u16 cs; u16 _pad0;
u32 eflags;
/*
* These two are only pushed when coming from another (i.e. lower)
* privilege level (wtf???). This effectively means they don't exist
* unless the CS above is equal to X86_USER_CS (arch/segment.h).
*/
u32 user_esp;
u16 user_ss; u16 _pad1;
} __packed;
/**
* @brief Set a gate handler in the Interrupt Descriptor Table.
*

@ -1,6 +1,7 @@
/* See the end of this file for copyright and license terms. */
#pragma once
#define _ARCH_TRAP_H_
/**
* @file include/arch/trap.h
@ -20,84 +21,71 @@
#include <gay/cdefs.h>
#include <gay/types.h>
/**
* @brief x86 registers as pushed to the stack by the `pusha` instruction.
*/
struct x86_trap_frame {
/* extra pointer pushed manually, see the assembly routines */
struct x86_hw_frame *hw_frame;
u32 edi;
u32 esi;
u32 ebp;
u32 esp;
u32 ebx;
u32 edx;
u32 ecx;
u32 eax;
} __packed;
void x86_print_regs(const struct x86_trap_frame *frame);
#ifdef __x86_64__
#include <amd64/trap.h>
#else
#include <i386/trap.h>
#endif
extern void _x86_isr_divide_error(void);
__asmlink void x86_isr_divide_error(struct x86_trap_frame *frame);
__asmlink void x86_isr_divide_error(trap_frame_t *frame);
extern void _x86_isr_debug_exception(void);
__asmlink void x86_isr_debug_exception(struct x86_trap_frame *frame);
__asmlink void x86_isr_debug_exception(trap_frame_t *frame);
extern void _x86_isr_nmi(void);
__asmlink void x86_isr_nmi(struct x86_trap_frame *frame);
__asmlink void x86_isr_nmi(trap_frame_t *frame);
extern void _x86_isr_breakpoint(void);
__asmlink void x86_isr_breakpoint(struct x86_trap_frame *frame);
__asmlink void x86_isr_breakpoint(trap_frame_t *frame);
extern void _x86_isr_overflow(void);
__asmlink void x86_isr_overflow(struct x86_trap_frame *frame);
__asmlink void x86_isr_overflow(trap_frame_t *frame);
extern void _x86_isr_bound_range_exceeded(void);
__asmlink void x86_isr_bound_range_exceeded(struct x86_trap_frame *frame);
__asmlink void x86_isr_bound_range_exceeded(trap_frame_t *frame);
extern void _x86_isr_invalid_opcode(void);
__asmlink void x86_isr_invalid_opcode(struct x86_trap_frame *frame);
__asmlink void x86_isr_invalid_opcode(trap_frame_t *frame);
extern void _x86_isr_device_not_available(void);
__asmlink void x86_isr_device_not_available(struct x86_trap_frame *frame);
__asmlink void x86_isr_device_not_available(trap_frame_t *frame);
extern void _x86_isr_double_fault(void);
__asmlink void x86_isr_double_fault(struct x86_trap_frame *frame, u32 error_code);
__asmlink void x86_isr_double_fault(trap_frame_t *frame, u32 error_code);
extern void _x86_isr_invalid_tss(void);
__asmlink void x86_isr_invalid_tss(struct x86_trap_frame *frame, u32 error_code);
__asmlink void x86_isr_invalid_tss(trap_frame_t *frame, u32 error_code);
extern void _x86_isr_segment_not_present(void);
__asmlink void x86_isr_segment_not_present(struct x86_trap_frame *frame, u32 error_code);
__asmlink void x86_isr_segment_not_present(trap_frame_t *frame, u32 error_code);
extern void _x86_isr_stack_segment_fault(void);
__asmlink void x86_isr_stack_segment_fault(struct x86_trap_frame *frame, u32 error_code);
__asmlink void x86_isr_stack_segment_fault(trap_frame_t *frame, u32 error_code);
extern void _x86_isr_general_protection(void);
__asmlink void x86_isr_general_protection(struct x86_trap_frame *frame, u32 error_code);
__asmlink void x86_isr_general_protection(trap_frame_t *frame, u32 error_code);
extern void _x86_isr_page_fault(void);
__asmlink void x86_isr_page_fault(struct x86_trap_frame *frame, u32 error_code);
__asmlink void x86_isr_page_fault(trap_frame_t *frame, u32 error_code);
extern void _x86_isr_x87_fpu_error(void);
__asmlink void x86_isr_x87_fpu_error(struct x86_trap_frame *frame);
__asmlink void x86_isr_x87_fpu_error(trap_frame_t *frame);
extern void _x86_isr_alignment_check(void);
__asmlink void x86_isr_alignment_check(struct x86_trap_frame *frame, u32 error_code);
__asmlink void x86_isr_alignment_check(trap_frame_t *frame, u32 error_code);
extern void _x86_isr_machine_check(void);
__asmlink void x86_isr_machine_check(struct x86_trap_frame *frame);
__asmlink void x86_isr_machine_check(trap_frame_t *frame);
extern void _x86_isr_simd_floating_point_exception(void);
__asmlink void x86_isr_simd_floating_point_exception(struct x86_trap_frame *frame);
__asmlink void x86_isr_simd_floating_point_exception(trap_frame_t *frame);
extern void _x86_isr_virtualization_exception(void);
__asmlink void x86_isr_virtualization_exception(struct x86_trap_frame *frame);
__asmlink void x86_isr_virtualization_exception(trap_frame_t *frame);
extern void _x86_isr_control_protection_exception(void);
__asmlink void x86_isr_control_protection_exception(struct x86_trap_frame *frame, u32 error_code);
__asmlink void x86_isr_control_protection_exception(trap_frame_t *frame, u32 error_code);
/*
* This file is part of GayBSD.

@ -0,0 +1,41 @@
/* See the end of this file for copyright and license terms. */
#pragma once
#ifndef _ARCH_INTERRUPT_H_
#error "This file is not meant to be included directly, use <arch/interrupt.h>"
#endif
/**
* @brief A hardware stack frame on i386.
* I have no idea whether x86 people are actually calling it hardware stack
* frame tbh, this is just the (ARM) terminology i'm used to. Either way,
* this is what gets pushed to the stack automatically when entering an ISR.
*/
struct i386_hw_frame {
u32 eip;
u16 cs; u16 _pad0;
u32 eflags;
/*
* On i386, these two are only pushed when entering from another
* (i.e. lower) privilege level (???). This effectively means they don't
* exist unless the CS above is equal to X86_USER_CS (arch/segment.h).
*/
u32 user_esp;
u16 user_ss; u16 _pad1;
} __packed;
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/

@ -9,7 +9,7 @@
/**
* @brief In-kernel context save for the x86.
* This precise structure layout is hardcoded in assembly, so don't forget to
* update `arch/x86/sys/switch.S` if you need to change it for whatever reason.
* update `arch/x86/sys/i386/switch.S` if you need to change it.
*/
struct x86_context {
/**

@ -0,0 +1,39 @@
/* See the end of this file for copyright and license terms. */
#pragma once
#ifndef _ARCH_TRAP_H_
#error "This file is not meant to be included directly, use <arch/trap.h>"
#endif
/**
* @brief Complete context save on the i386.
*/
struct i386_trap_frame {
/* extra pointer pushed manually, see the assembly routines */
struct i386_hw_frame *hw_frame;
u32 edi;
u32 esi;
u32 ebp;
u32 esp;
u32 ebx;
u32 edx;
u32 ecx;
u32 eax;
} __packed;
typedef struct i386_trap_frame trap_frame_t;
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/

@ -5,6 +5,7 @@ target_sources(gay_arch PRIVATE
irq.S
port.S
switch.S
systm.c
trap.S
)

@ -0,0 +1,37 @@
/* See the end of this file for copyright and license terms. */
#include <arch/segment.h>
#include <arch/trap.h>
#include <gay/kprintf.h>
#include <gay/systm.h>
void print_regs(const struct i386_trap_frame *ctx)
{
u32 esp;
if (ctx->hw_frame->cs == X86_USER_CS)
esp = ctx->hw_frame->user_esp;
else
esp = ctx->esp - 3 * 4; /* eip, cs, eflags */
kprintf("EIP = %#x:%#08x\n", ctx->hw_frame->cs, ctx->hw_frame->eip);
kprintf("EFLAGS = %#08x\n", ctx->hw_frame->eflags);
kprintf("EAX = %#08x EDI = %#08x\n", ctx->eax, ctx->edi);
kprintf("EBX = %#08x ESI = %#08x\n", ctx->ebx, ctx->esi);
kprintf("ECX = %#08x ESP = %#08x\n", ctx->ecx, esp);
kprintf("EDX = %#08x EBP = %#08x\n", ctx->edx, ctx->ebp);
}
/*
* This file is part of GayBSD.
* Copyright (c) 2021 fef <owo@fef.moe>.
*
* GayBSD is nonviolent software: you may only use, redistribute, and/or
* modify it under the terms of the Cooperative Nonviolent Public License
* (CNPL) as found in the LICENSE file in the source code root directory
* or at <https://git.pixie.town/thufie/npl-builder>; either version 7
* of the license, or (at your option) any later version.
*
* GayBSD comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPL for details.
*/

@ -1,158 +1,143 @@
/* See the end of this file for copyright and license terms. */
#include <arch/segment.h>
#include <arch/cpufunc.h>
#include <arch/trap.h>
#include <gay/kprintf.h>
#include <gay/systm.h>
void x86_print_regs(const struct x86_trap_frame *context)
{
u32 esp;
if (context->hw_frame->cs == X86_USER_CS)
esp = context->hw_frame->user_esp;
else
esp = context->esp - 3 * 4; /* eip, cs, eflags */
kprintf("EIP = 0x%08x EFLAGS = 0x%08x\n",
context->hw_frame->eip, context->hw_frame->eflags);
kprintf("EAX = 0x%08x EDI = 0x%08x\n", context->eax, context->edi);
kprintf("EBX = 0x%08x ESI = 0x%08x\n", context->ebx, context->esi);
kprintf("ECX = 0x%08x ESP = 0x%08x\n", context->ecx, esp);
kprintf("EDX = 0x%08x EBP = 0x%08x\n", context->edx, context->ebp);
}
void x86_isr_divide_error(struct x86_trap_frame *frame)
void x86_isr_divide_error(trap_frame_t *frame)
{
kprintf("Divide Error\n");
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_debug_exception(struct x86_trap_frame *frame)
void x86_isr_debug_exception(trap_frame_t *frame)
{
kprintf("Debug Exception\n");
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_nmi(struct x86_trap_frame *frame)
void x86_isr_nmi(trap_frame_t *frame)
{
kprintf("Nonmaskable Interrupt\n");
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_breakpoint(struct x86_trap_frame *frame)
void x86_isr_breakpoint(trap_frame_t *frame)
{
kprintf("Breakpoint\n");
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_overflow(struct x86_trap_frame *frame)
void x86_isr_overflow(trap_frame_t *frame)
{
kprintf("Overflow\n");
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_bound_range_exceeded(struct x86_trap_frame *frame)
void x86_isr_bound_range_exceeded(trap_frame_t *frame)
{
kprintf("Bound Range Exceeded\n");
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_invalid_opcode(struct x86_trap_frame *frame)
void x86_isr_invalid_opcode(trap_frame_t *frame)
{
kprintf("Invalid Opcode\n");
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_device_not_available(struct x86_trap_frame *frame)
void x86_isr_device_not_available(trap_frame_t *frame)
{
kprintf("Device Not Available\n");
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_double_fault(struct x86_trap_frame *frame, u32 error_code)
void x86_isr_double_fault(trap_frame_t *frame, u32 error_code)
{
kprintf("Double Fault (error = %p)\n", (void *)error_code);
x86_print_regs(frame);
while (1);
disable_intr();
print_regs(frame);
panic("Double Fault (error_code = %#08x)", error_code);
}
void x86_isr_invalid_tss(struct x86_trap_frame *frame, u32 error_code)
void x86_isr_invalid_tss(trap_frame_t *frame, u32 error_code)
{
kprintf("Invalid TSS (error = %p)\n", (void *)error_code);
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_segment_not_present(struct x86_trap_frame *frame, u32 error_code)
void x86_isr_segment_not_present(trap_frame_t *frame, u32 error_code)
{
kprintf("Segment Not Present (error = %p)\n", (void *)error_code);
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_stack_segment_fault(struct x86_trap_frame *frame, u32 error_code)
void x86_isr_stack_segment_fault(trap_frame_t *frame, u32 error_code)
{
kprintf("Stack Segment Fault (error = %p)\n", (void *)error_code);
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_general_protection(struct x86_trap_frame *frame, u32 error_code)
void x86_isr_general_protection(trap_frame_t *frame, u32 error_code)
{
kprintf("General Protection Fault (external = %d, table = %d, index = %d)\n",
error_code & 1, (error_code >> 1) & 3, (error_code >> 3));
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_x87_fpu_error(struct x86_trap_frame *frame)
void x86_isr_x87_fpu_error(trap_frame_t *frame)
{
kprintf("x87 FPU Error\n");
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_alignment_check(struct x86_trap_frame *frame, u32 error_code)
void x86_isr_alignment_check(trap_frame_t *frame, u32 error_code)
{
kprintf("Alignment Check (error = %p)\n", (void *)error_code);
x86_print_regs(frame);
while (1);
kprintf("Alignment Check (error_code = %#08x)\n", error_code);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_machine_check(struct x86_trap_frame *frame)
void x86_isr_machine_check(trap_frame_t *frame)
{
kprintf("Machine Check\n");
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_simd_floating_point_exception(struct x86_trap_frame *frame)
void x86_isr_simd_floating_point_exception(trap_frame_t *frame)
{
kprintf("SIMD Floating Point Exception\n");
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_virtualization_exception(struct x86_trap_frame *frame)
void x86_isr_virtualization_exception(trap_frame_t *frame)
{
kprintf("Virtualization Exception\n");
x86_print_regs(frame);
while (1);
print_regs(frame);
panic("Unexpected interrupt");
}
void x86_isr_control_protection_exception(struct x86_trap_frame *frame, u32 error_code)
void x86_isr_control_protection_exception(trap_frame_t *frame, u32 error_code)
{
kprintf("Control Protection Exception (error = %p)\n", (void *)error_code);
x86_print_regs(frame);
while (1);
kprintf("Control Protection Exception (error_code = %#08x)\n", error_code);
print_regs(frame);
panic("Unexpected interrupt");
}
/*

@ -2,6 +2,8 @@
#pragma once
#include <arch/trap.h>
#include <gay/cdefs.h>
#include <gay/irq.h>
#include <gay/sched.h>
@ -14,6 +16,8 @@
*/
void panic(const char *fmt, ...) __noreturn __printflike(1, 2);
void print_regs(const trap_frame_t *ctx);
#ifdef DEBUG
/**
* @brief Assert that statement `x` is true.

@ -73,7 +73,7 @@ typedef __uintmax_t uintmax_t;
#ifndef _WCHAR_T_DECLARED
#define _WCHAR_T_DECLARED 1
typedef __WCHAR_TYPE__ wchar_t;
typedef ___wchar_t wchar_t;
#endif /* not _WCHAR_T_DECLARED */
#ifndef _WINT_T_DECLARED

Loading…
Cancel
Save