ktrace: don't unwind past ISR entry points

main
anna 2 years ago
parent 7f92690f84
commit 36d53093d4
Signed by: fef
GPG Key ID: EC22E476DC2D3D84

@ -34,6 +34,11 @@ SECTIONS {
.text ALIGN(4K) : AT(ADDR(.text) - KERNBASE) {
_text_start = .;
/* put all ISRs into one contiguous region so the
* stack unwinder knows when to stop unwinding */
_isr_start = .;
KEEP(*(.text.isr))
_isr_end = .;
*(.text .text.* .gnu.linkonce.t.*)
_text_end = .;
}

@ -34,6 +34,11 @@ SECTIONS {
.text ALIGN(PAGE_SIZE) : AT(ADDR(.text) - KERNBASE) {
_text_start = .;
/* put all ISRs into one contiguous region so the
* stack unwinder knows when to stop unwinding */
_isr_start = .;
KEEP(*(.text.isr))
_isr_end = .;
*(.text .text.* .gnu.linkonce.t.*)
_text_end = .;
}

@ -26,7 +26,8 @@ set(CMAKE_RANLIB ${TOOLCHAIN_PATH}/${CMAKE_EXECUTABLE_PREFIX}ranlib${CMAK
set(CMAKE_SZE ${TOOLCHAIN_PATH}/${CMAKE_EXECUTABLE_PREFIX}szr${CMAKE_EXECUTABLE_SUFFIX} CACHE INTERNAL "")
set(CMAKE_STRIP ${TOOLCHAIN_PATH}/${CMAKE_EXECUTABLE_PREFIX}strip${CMAKE_EXECUTABLE_SUFFIX} CACHE INTERNAL "")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_toolchain_common_flags} -mcmodel=kernel -mno-red-zone -mno-mmx -mno-sse -msoft-float")
# XXX ktrace_print() currently relies on -fno-omit-frame-pointer
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_toolchain_common_flags} -mcmodel=kernel -mno-red-zone -mno-mmx -mno-sse -msoft-float -fno-omit-frame-pointer")
set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${_toolchain_common_flags}")
set(CMAKE_C_COMPILER_TARGET ${_toolchain_triple})
set(CMAKE_ASM_COMPILER_TARGET ${_toolchain_triple})

@ -18,13 +18,14 @@
.extern irq_table
/* there is probably a fancy CPU feature for this, but idk */
irq_count:
L_DATA(irq_count)
.long 0
L_END(irq_count)
.text
/* bool in_irq(void); */
ASM_ENTRY(in_irq)
ENTRY(in_irq)
movabsq $irq_count, %rdx
xor %eax, %eax
mov %eax, %ecx
@ -32,10 +33,12 @@ ASM_ENTRY(in_irq)
testl %ecx, (%rdx)
setne %al
retq
ASM_END(in_irq)
END(in_irq)
.section .text.isr
.macro gen_irq num
ASM_ENTRY(_x86_isr_irq\num )
ENTRY(_x86_isr_irq\num )
push %rax
push %rcx
push %rdx
@ -53,7 +56,7 @@ ASM_ENTRY(_x86_isr_irq\num )
movabsq $(irq_table + \num * 8), %rax
callq *%rax
jmp leave_irq
ASM_END(_x86_isr_irq\num )
END(_x86_isr_irq\num )
.endm
gen_irq 0
@ -73,8 +76,7 @@ ASM_END(_x86_isr_irq\num )
gen_irq 14
gen_irq 15
.align 4
leave_irq:
L_ENTRY(leave_irq)
movabsq $irq_count, %rax
decl (%rax)
pop %r11
@ -87,5 +89,4 @@ leave_irq:
pop %rcx
pop %rax
iretq
.size leave_irq, . - leave_irq
L_END(leave_irq)

@ -1,18 +1,29 @@
/* Copyright (C) 2021 fef <owo@fef.moe>. All rights reserved. */
#include <arch/vmparam.h>
#include <gay/kprintf.h>
#include <gay/ktrace.h>
#include <gay/linker.h>
void ktrace_print(void)
__naked void ktrace_print(void)
{
void **rbp;
__asm__ volatile("movq (%%rbp), %0" : "=r"(rbp));
__asm__ volatile(
" movq %rbp, %rdi \n"
" jmp ktrace_print_from \n"
);
}
void ktrace_print_from(void *frame)
{
void **rbp = (void **)frame;
kprintf("Stack trace:\n");
while (rbp >= (void **)KERNBASE) {
/* XXX Rather than spitting out raw addresses, parse the kernel image's
* ELF sections to figure out what the address actually belongs to */
while (rbp >= (void **)image_start && rbp < (void **)image_end) {
/* caller return address is immediately above the stack frame */
kprintf(" %p\n", rbp[1]);
if (rbp[1] >= isr_start && rbp[1] < isr_end)
break;
rbp = *rbp;
}
}

@ -4,7 +4,7 @@
#include <asm/common.h>
.text
.section .text.isr
/*
* push all registers except %rsi to the stack, and store a pointer to the
@ -64,7 +64,7 @@
.macro gen_isr_noerror name
.extern x86_isr_\name
ASM_ENTRY(_x86_isr_\name )
ENTRY(_x86_isr_\name )
cld
pushq %rsi
prepare_trap_entry
@ -74,7 +74,7 @@ ASM_ENTRY(_x86_isr_\name )
prepare_trap_leave
popq %rsi
iretq
ASM_END(_x86_isr_\name )
END(_x86_isr_\name )
.endm
/*
@ -85,7 +85,7 @@ ASM_END(_x86_isr_\name )
*/
.macro gen_isr_error name
.extern x86_isr_\name
ASM_ENTRY(_x86_isr_\name )
ENTRY(_x86_isr_\name )
cld
/*
@ -103,7 +103,7 @@ ASM_ENTRY(_x86_isr_\name )
prepare_trap_leave
popq %rsi
iretq
ASM_END(_x86_isr_\name )
END(_x86_isr_\name )
.endm
gen_isr_noerror divide_error

@ -3,6 +3,7 @@
target_sources(gay_arch PRIVATE
idt.S
irq.S
ktrace.c
switch.S
systm.c
trap.S

@ -12,21 +12,24 @@
.extern irq_table
/* there is probably a fancy CPU feature for this, but idk */
irq_count:
L_DATA(irq_count)
.byte 0
L_END(irq_count)
.text
/* bool in_irq(void); */
ASM_ENTRY(in_irq)
ENTRY(in_irq)
xor %eax, %eax
cmpb $0, irq_count
setne %al
ret
ASM_END(in_irq)
END(in_irq)
.section .text.isr
.macro gen_irq num
ASM_ENTRY(_x86_isr_irq\num )
ENTRY(_x86_isr_irq\num )
push %eax
push %ecx
push %edx
@ -39,7 +42,7 @@ ASM_ENTRY(_x86_isr_irq\num )
add $4, %esp
#endif
jmp leave_irq
ASM_END(_x86_isr_irq\num )
END(_x86_isr_irq\num )
.endm
gen_irq 0
@ -59,12 +62,10 @@ ASM_END(_x86_isr_irq\num )
gen_irq 14
gen_irq 15
.align 4
leave_irq:
L_ENTRY(leave_irq)
decb irq_count
pop %edx
pop %ecx
pop %eax
iret
.size leave_irq, . - leave_irq
L_END(leave_irq)

@ -0,0 +1,30 @@
/* Copyright (C) 2021 fef <owo@fef.moe>. All rights reserved. */
#include <gay/kprintf.h>
#include <gay/ktrace.h>
#include <gay/linker.h>
__naked void ktrace_print(void)
{
__asm__ volatile(
" pushl %ebp \n"
" call ktrace_print_from \n"
" ret \n"
);
}
void ktrace_print_from(void *frame)
{
void **ebp = (void **)frame;
kprintf("Stack trace:\n");
/* XXX Rather than spitting out raw addresses, parse the kernel image's
* ELF sections to figure out what the address actually belongs to */
while (ebp >= (void **)image_start && ebp < (void **)image_end) {
/* caller return address is immediately above the stack frame */
kprintf(" %p\n", ebp[1]);
if (rbp[1] >= isr_start && rbp[1] < isr_end)
break;
rbp = *rbp;
}
}

@ -4,7 +4,7 @@
#include <asm/common.h>
.text
.section .text.isr
/*
* Low level trap entry points, this is what gets put into the IDT.
@ -19,7 +19,7 @@
.macro gen_isr_noerror name
.extern x86_isr_\name
ASM_ENTRY(_x86_isr_\name )
ENTRY(_x86_isr_\name )
cld
pushal
mov %esp, %eax
@ -29,7 +29,7 @@ ASM_ENTRY(_x86_isr_\name )
add $4, %esp
popal
iretl
ASM_END(_x86_isr_\name )
END(_x86_isr_\name )
.endm
/*
@ -43,7 +43,7 @@ ASM_END(_x86_isr_\name )
*/
.macro gen_isr_error name
.extern x86_isr_\name
ASM_ENTRY(_x86_isr_\name )
ENTRY(_x86_isr_\name )
cld
pushal
mov %esp, %eax
@ -63,7 +63,7 @@ ASM_ENTRY(_x86_isr_\name )
popal
add $4, %esp /* "pop" the hardware error code from the stack */
iretl
ASM_END(_x86_isr_\name )
END(_x86_isr_\name )
.endm
gen_isr_noerror divide_error

@ -51,6 +51,8 @@
*/
#define __alloc_size(argn) __attribute__(( alloc_size(argn) ))
#define __naked __attribute(( naked ))
#define __noreturn __attribute__(( noreturn ))
/**
@ -81,7 +83,7 @@
#define __pure2 __attribute__(( const ))
/** @brief Put the annotated symbol in a specific section. */
#define __section(name) __attribute__(( section(#name) ))
#define __section(name) __attribute__(( section(name) ))
/** @brief Mark the symbol as used, even if it really isn't. */
#define __used __attribute__(( used ))

@ -3,4 +3,9 @@
#pragma once
/** @brief Print a full stack trace to the kernel log, starting from the caller. */
__asmlink
void ktrace_print(void);
/** @brief Print a full stack trace to the kernel log, starting from `frame`. */
__asmlink
void ktrace_print_from(void *frame);

Loading…
Cancel
Save