ktrace: don't unwind past ISR entry points

main
anna 3 years ago
parent 7f92690f84
commit 36d53093d4
Signed by: fef
GPG Key ID: EC22E476DC2D3D84

@ -34,6 +34,11 @@ SECTIONS {
.text ALIGN(4K) : AT(ADDR(.text) - KERNBASE) { .text ALIGN(4K) : AT(ADDR(.text) - KERNBASE) {
_text_start = .; _text_start = .;
/* put all ISRs into one contiguous region so the
* stack unwinder knows when to stop unwinding */
_isr_start = .;
KEEP(*(.text.isr))
_isr_end = .;
*(.text .text.* .gnu.linkonce.t.*) *(.text .text.* .gnu.linkonce.t.*)
_text_end = .; _text_end = .;
} }

@ -34,6 +34,11 @@ SECTIONS {
.text ALIGN(PAGE_SIZE) : AT(ADDR(.text) - KERNBASE) { .text ALIGN(PAGE_SIZE) : AT(ADDR(.text) - KERNBASE) {
_text_start = .; _text_start = .;
/* put all ISRs into one contiguous region so the
* stack unwinder knows when to stop unwinding */
_isr_start = .;
KEEP(*(.text.isr))
_isr_end = .;
*(.text .text.* .gnu.linkonce.t.*) *(.text .text.* .gnu.linkonce.t.*)
_text_end = .; _text_end = .;
} }

@ -26,7 +26,8 @@ set(CMAKE_RANLIB ${TOOLCHAIN_PATH}/${CMAKE_EXECUTABLE_PREFIX}ranlib${CMAK
set(CMAKE_SZE ${TOOLCHAIN_PATH}/${CMAKE_EXECUTABLE_PREFIX}szr${CMAKE_EXECUTABLE_SUFFIX} CACHE INTERNAL "") set(CMAKE_SZE ${TOOLCHAIN_PATH}/${CMAKE_EXECUTABLE_PREFIX}szr${CMAKE_EXECUTABLE_SUFFIX} CACHE INTERNAL "")
set(CMAKE_STRIP ${TOOLCHAIN_PATH}/${CMAKE_EXECUTABLE_PREFIX}strip${CMAKE_EXECUTABLE_SUFFIX} CACHE INTERNAL "") set(CMAKE_STRIP ${TOOLCHAIN_PATH}/${CMAKE_EXECUTABLE_PREFIX}strip${CMAKE_EXECUTABLE_SUFFIX} CACHE INTERNAL "")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_toolchain_common_flags} -mcmodel=kernel -mno-red-zone -mno-mmx -mno-sse -msoft-float") # XXX ktrace_print() currently relies on -fno-omit-frame-pointer
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_toolchain_common_flags} -mcmodel=kernel -mno-red-zone -mno-mmx -mno-sse -msoft-float -fno-omit-frame-pointer")
set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${_toolchain_common_flags}") set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${_toolchain_common_flags}")
set(CMAKE_C_COMPILER_TARGET ${_toolchain_triple}) set(CMAKE_C_COMPILER_TARGET ${_toolchain_triple})
set(CMAKE_ASM_COMPILER_TARGET ${_toolchain_triple}) set(CMAKE_ASM_COMPILER_TARGET ${_toolchain_triple})

@ -18,13 +18,14 @@
.extern irq_table .extern irq_table
/* there is probably a fancy CPU feature for this, but idk */ /* there is probably a fancy CPU feature for this, but idk */
irq_count: L_DATA(irq_count)
.long 0 .long 0
L_END(irq_count)
.text .text
/* bool in_irq(void); */ /* bool in_irq(void); */
ASM_ENTRY(in_irq) ENTRY(in_irq)
movabsq $irq_count, %rdx movabsq $irq_count, %rdx
xor %eax, %eax xor %eax, %eax
mov %eax, %ecx mov %eax, %ecx
@ -32,10 +33,12 @@ ASM_ENTRY(in_irq)
testl %ecx, (%rdx) testl %ecx, (%rdx)
setne %al setne %al
retq retq
ASM_END(in_irq) END(in_irq)
.section .text.isr
.macro gen_irq num .macro gen_irq num
ASM_ENTRY(_x86_isr_irq\num ) ENTRY(_x86_isr_irq\num )
push %rax push %rax
push %rcx push %rcx
push %rdx push %rdx
@ -53,7 +56,7 @@ ASM_ENTRY(_x86_isr_irq\num )
movabsq $(irq_table + \num * 8), %rax movabsq $(irq_table + \num * 8), %rax
callq *%rax callq *%rax
jmp leave_irq jmp leave_irq
ASM_END(_x86_isr_irq\num ) END(_x86_isr_irq\num )
.endm .endm
gen_irq 0 gen_irq 0
@ -73,8 +76,7 @@ ASM_END(_x86_isr_irq\num )
gen_irq 14 gen_irq 14
gen_irq 15 gen_irq 15
.align 4 L_ENTRY(leave_irq)
leave_irq:
movabsq $irq_count, %rax movabsq $irq_count, %rax
decl (%rax) decl (%rax)
pop %r11 pop %r11
@ -87,5 +89,4 @@ leave_irq:
pop %rcx pop %rcx
pop %rax pop %rax
iretq iretq
L_END(leave_irq)
.size leave_irq, . - leave_irq

@ -1,18 +1,29 @@
/* Copyright (C) 2021 fef <owo@fef.moe>. All rights reserved. */ /* Copyright (C) 2021 fef <owo@fef.moe>. All rights reserved. */
#include <arch/vmparam.h>
#include <gay/kprintf.h> #include <gay/kprintf.h>
#include <gay/ktrace.h> #include <gay/ktrace.h>
#include <gay/linker.h>
void ktrace_print(void) __naked void ktrace_print(void)
{ {
void **rbp; __asm__ volatile(
__asm__ volatile("movq (%%rbp), %0" : "=r"(rbp)); " movq %rbp, %rdi \n"
" jmp ktrace_print_from \n"
);
}
void ktrace_print_from(void *frame)
{
void **rbp = (void **)frame;
kprintf("Stack trace:\n"); kprintf("Stack trace:\n");
while (rbp >= (void **)KERNBASE) {
/* XXX Rather than spitting out raw addresses, parse the kernel image's
* ELF sections to figure out what the address actually belongs to */
while (rbp >= (void **)image_start && rbp < (void **)image_end) {
/* caller return address is immediately above the stack frame */
kprintf(" %p\n", rbp[1]); kprintf(" %p\n", rbp[1]);
if (rbp[1] >= isr_start && rbp[1] < isr_end)
break;
rbp = *rbp; rbp = *rbp;
} }
} }

@ -4,7 +4,7 @@
#include <asm/common.h> #include <asm/common.h>
.text .section .text.isr
/* /*
* push all registers except %rsi to the stack, and store a pointer to the * push all registers except %rsi to the stack, and store a pointer to the
@ -64,7 +64,7 @@
.macro gen_isr_noerror name .macro gen_isr_noerror name
.extern x86_isr_\name .extern x86_isr_\name
ASM_ENTRY(_x86_isr_\name ) ENTRY(_x86_isr_\name )
cld cld
pushq %rsi pushq %rsi
prepare_trap_entry prepare_trap_entry
@ -74,7 +74,7 @@ ASM_ENTRY(_x86_isr_\name )
prepare_trap_leave prepare_trap_leave
popq %rsi popq %rsi
iretq iretq
ASM_END(_x86_isr_\name ) END(_x86_isr_\name )
.endm .endm
/* /*
@ -85,7 +85,7 @@ ASM_END(_x86_isr_\name )
*/ */
.macro gen_isr_error name .macro gen_isr_error name
.extern x86_isr_\name .extern x86_isr_\name
ASM_ENTRY(_x86_isr_\name ) ENTRY(_x86_isr_\name )
cld cld
/* /*
@ -103,7 +103,7 @@ ASM_ENTRY(_x86_isr_\name )
prepare_trap_leave prepare_trap_leave
popq %rsi popq %rsi
iretq iretq
ASM_END(_x86_isr_\name ) END(_x86_isr_\name )
.endm .endm
gen_isr_noerror divide_error gen_isr_noerror divide_error

@ -3,6 +3,7 @@
target_sources(gay_arch PRIVATE target_sources(gay_arch PRIVATE
idt.S idt.S
irq.S irq.S
ktrace.c
switch.S switch.S
systm.c systm.c
trap.S trap.S

@ -12,21 +12,24 @@
.extern irq_table .extern irq_table
/* there is probably a fancy CPU feature for this, but idk */ /* there is probably a fancy CPU feature for this, but idk */
irq_count: L_DATA(irq_count)
.byte 0 .byte 0
L_END(irq_count)
.text .text
/* bool in_irq(void); */ /* bool in_irq(void); */
ASM_ENTRY(in_irq) ENTRY(in_irq)
xor %eax, %eax xor %eax, %eax
cmpb $0, irq_count cmpb $0, irq_count
setne %al setne %al
ret ret
ASM_END(in_irq) END(in_irq)
.section .text.isr
.macro gen_irq num .macro gen_irq num
ASM_ENTRY(_x86_isr_irq\num ) ENTRY(_x86_isr_irq\num )
push %eax push %eax
push %ecx push %ecx
push %edx push %edx
@ -39,7 +42,7 @@ ASM_ENTRY(_x86_isr_irq\num )
add $4, %esp add $4, %esp
#endif #endif
jmp leave_irq jmp leave_irq
ASM_END(_x86_isr_irq\num ) END(_x86_isr_irq\num )
.endm .endm
gen_irq 0 gen_irq 0
@ -59,12 +62,10 @@ ASM_END(_x86_isr_irq\num )
gen_irq 14 gen_irq 14
gen_irq 15 gen_irq 15
.align 4 L_ENTRY(leave_irq)
leave_irq:
decb irq_count decb irq_count
pop %edx pop %edx
pop %ecx pop %ecx
pop %eax pop %eax
iret iret
L_END(leave_irq)
.size leave_irq, . - leave_irq

@ -0,0 +1,30 @@
/* Copyright (C) 2021 fef <owo@fef.moe>. All rights reserved. */
#include <gay/kprintf.h>
#include <gay/ktrace.h>
#include <gay/linker.h>
__naked void ktrace_print(void)
{
__asm__ volatile(
" pushl %ebp \n"
" call ktrace_print_from \n"
" ret \n"
);
}
void ktrace_print_from(void *frame)
{
void **ebp = (void **)frame;
kprintf("Stack trace:\n");
/* XXX Rather than spitting out raw addresses, parse the kernel image's
* ELF sections to figure out what the address actually belongs to */
while (ebp >= (void **)image_start && ebp < (void **)image_end) {
/* caller return address is immediately above the stack frame */
kprintf(" %p\n", ebp[1]);
if (rbp[1] >= isr_start && rbp[1] < isr_end)
break;
rbp = *rbp;
}
}

@ -4,7 +4,7 @@
#include <asm/common.h> #include <asm/common.h>
.text .section .text.isr
/* /*
* Low level trap entry points, this is what gets put into the IDT. * Low level trap entry points, this is what gets put into the IDT.
@ -19,7 +19,7 @@
.macro gen_isr_noerror name .macro gen_isr_noerror name
.extern x86_isr_\name .extern x86_isr_\name
ASM_ENTRY(_x86_isr_\name ) ENTRY(_x86_isr_\name )
cld cld
pushal pushal
mov %esp, %eax mov %esp, %eax
@ -29,7 +29,7 @@ ASM_ENTRY(_x86_isr_\name )
add $4, %esp add $4, %esp
popal popal
iretl iretl
ASM_END(_x86_isr_\name ) END(_x86_isr_\name )
.endm .endm
/* /*
@ -43,7 +43,7 @@ ASM_END(_x86_isr_\name )
*/ */
.macro gen_isr_error name .macro gen_isr_error name
.extern x86_isr_\name .extern x86_isr_\name
ASM_ENTRY(_x86_isr_\name ) ENTRY(_x86_isr_\name )
cld cld
pushal pushal
mov %esp, %eax mov %esp, %eax
@ -63,7 +63,7 @@ ASM_ENTRY(_x86_isr_\name )
popal popal
add $4, %esp /* "pop" the hardware error code from the stack */ add $4, %esp /* "pop" the hardware error code from the stack */
iretl iretl
ASM_END(_x86_isr_\name ) END(_x86_isr_\name )
.endm .endm
gen_isr_noerror divide_error gen_isr_noerror divide_error

@ -51,6 +51,8 @@
*/ */
#define __alloc_size(argn) __attribute__(( alloc_size(argn) )) #define __alloc_size(argn) __attribute__(( alloc_size(argn) ))
#define __naked __attribute(( naked ))
#define __noreturn __attribute__(( noreturn )) #define __noreturn __attribute__(( noreturn ))
/** /**
@ -81,7 +83,7 @@
#define __pure2 __attribute__(( const )) #define __pure2 __attribute__(( const ))
/** @brief Put the annotated symbol in a specific section. */ /** @brief Put the annotated symbol in a specific section. */
#define __section(name) __attribute__(( section(#name) )) #define __section(name) __attribute__(( section(name) ))
/** @brief Mark the symbol as used, even if it really isn't. */ /** @brief Mark the symbol as used, even if it really isn't. */
#define __used __attribute__(( used )) #define __used __attribute__(( used ))

@ -3,4 +3,9 @@
#pragma once #pragma once
/** @brief Print a full stack trace to the kernel log, starting from the caller. */ /** @brief Print a full stack trace to the kernel log, starting from the caller. */
__asmlink
void ktrace_print(void); void ktrace_print(void);
/** @brief Print a full stack trace to the kernel log, starting from `frame`. */
__asmlink
void ktrace_print_from(void *frame);

Loading…
Cancel
Save