Kernel: Fetch the frame pointer using __builtin_frame_address()

This compiler builtin abstracts away the specifics of fetching the frame
pointer. This will allow the KSyms.cpp to be build for the aarch64
target. While we're here, lets also change the
PerformanceEventBuffer.cpp to not rely on x86_64 specifics.
This commit is contained in:
Timon Kruiper 2022-05-02 22:22:33 +02:00 committed by Andreas Kling
parent dcd76db319
commit 442800db3e
2 changed files with 3 additions and 16 deletions

View file

@ -168,14 +168,8 @@ void dump_backtrace(PrintToScreen print_to_screen)
return;
TemporaryChange change(in_dump_backtrace, true);
TemporaryChange disable_kmalloc_stacks(g_dump_kmalloc_stacks, false);
FlatPtr base_pointer;
#if ARCH(I386)
asm volatile("movl %%ebp, %%eax"
: "=a"(base_pointer));
#else
asm volatile("movq %%rbp, %%rax"
: "=a"(base_pointer));
#endif
FlatPtr base_pointer = (FlatPtr)__builtin_frame_address(0);
dump_backtrace_impl(base_pointer, g_kernel_symbols_available, print_to_screen);
}

View file

@ -24,14 +24,7 @@ PerformanceEventBuffer::PerformanceEventBuffer(NonnullOwnPtr<KBuffer> buffer)
NEVER_INLINE ErrorOr<void> PerformanceEventBuffer::append(int type, FlatPtr arg1, FlatPtr arg2, StringView arg3, Thread* current_thread, FlatPtr arg4, u64 arg5, ErrorOr<FlatPtr> arg6)
{
FlatPtr base_pointer;
#if ARCH(I386)
asm volatile("movl %%ebp, %%eax"
: "=a"(base_pointer));
#else
asm volatile("movq %%rbp, %%rax"
: "=a"(base_pointer));
#endif
FlatPtr base_pointer = (FlatPtr)__builtin_frame_address(0);
return append_with_ip_and_bp(current_thread->pid(), current_thread->tid(), 0, base_pointer, type, 0, arg1, arg2, arg3, arg4, arg5, arg6);
}