Kernel: Support loading the kernel at almost arbitrary virtual addresses

This enables further work on implementing KASLR by adding relocation
support to the pre-kernel and updating the kernel to be less dependent
on specific virtual memory layouts.
This commit is contained in:
Gunnar Beutner 2021-07-26 15:10:51 +02:00 committed by Andreas Kling
parent e3d2ca6bd2
commit 57417a3d6e
20 changed files with 123 additions and 87 deletions

View file

@ -109,7 +109,7 @@ apic_ap_start32_2:
/* push the Processor pointer this CPU is going to use */
movl (ap_cpu_init_processor_info_array - apic_ap_start)(%ebp), %eax
addl kernel_base, %eax
addl kernel_load_base, %eax
movl 0(%eax, %esi, 4), %eax
push %eax

View file

@ -141,7 +141,7 @@ apic_ap_start64:
/* push the Processor pointer this CPU is going to use */
movq (ap_cpu_init_processor_info_array - apic_ap_start)(%ebp), %rax
leaq kernel_base(%rip), %r8
leaq kernel_load_base(%rip), %r8
movq (%r8), %r8
addq %r8, %rax
movq 0(%rax, %rsi, 4), %rax

View file

@ -14,7 +14,8 @@
extern "C" PhysicalAddress start_of_prekernel_image;
extern "C" PhysicalAddress end_of_prekernel_image;
extern "C" size_t physical_to_virtual_offset;
extern "C" FlatPtr kernel_base;
extern "C" FlatPtr kernel_mapping_base;
extern "C" FlatPtr kernel_load_base;
#if ARCH(X86_64)
extern "C" u32 gdt64ptr;
extern "C" u16 code64_sel;

View file

@ -16,8 +16,6 @@ set(KERNEL_HEAP_SOURCES
Heap/kmalloc.cpp
)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_STATIC}")
set(KERNEL_SOURCES
ACPI/DynamicParser.cpp
ACPI/Initialize.cpp
@ -350,7 +348,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mno-80387 -mno-mmx -mno-sse -mno-sse2")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-asynchronous-unwind-tables")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstack-protector-strong")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nostdlib -nostdinc -nostdinc++")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nodefaultlibs -nostdlib -nostdinc -nostdinc++")
# Apply any flags that are only available on >= GCC 11.1
if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 11.1)
@ -365,7 +363,7 @@ else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -faligned-new=4")
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pie -Wl,--no-dynamic-linker")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-pie")
# Kernel Coverage (KCOV) is an API to collect and expose program counters of
# kernel code that has been run to user space. It's rather slow and likely not

View file

@ -720,7 +720,7 @@ private:
{
if (!Process::current()->is_superuser())
return false;
builder.append(String::number(kernel_base));
builder.append(String::number(kernel_load_base));
return true;
}
};

View file

@ -83,7 +83,7 @@ UNMAP_AFTER_INIT static void load_kernel_symbols_from_data(ReadonlyBytes const&
}
}
auto& ksym = s_symbols[current_symbol_index];
ksym.address = address;
ksym.address = kernel_load_base + address;
char* name = static_cast<char*>(kmalloc_eternal((bufptr - start_of_name) + 1));
memcpy(name, start_of_name, bufptr - start_of_name);
name[bufptr - start_of_name] = '\0';
@ -118,7 +118,7 @@ NEVER_INLINE static void dump_backtrace_impl(FlatPtr base_pointer, bool use_ksym
if (use_ksyms) {
FlatPtr copied_stack_ptr[2];
for (FlatPtr* stack_ptr = (FlatPtr*)base_pointer; stack_ptr && recognized_symbol_count < max_recognized_symbol_count; stack_ptr = (FlatPtr*)copied_stack_ptr[0]) {
if ((FlatPtr)stack_ptr < kernel_base)
if ((FlatPtr)stack_ptr < kernel_load_base)
break;
void* fault_at;

View file

@ -4,6 +4,7 @@ set(SOURCES
init.cpp
UBSanitizer.cpp
../MiniStdLib.cpp
../../Userland/Libraries/LibELF/Relocation.cpp
)
if ("${SERENITY_ARCH}" STREQUAL "i686")
@ -12,6 +13,8 @@ else()
set(PREKERNEL_TARGET Prekernel64)
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static")
add_executable(${PREKERNEL_TARGET} ${SOURCES})
target_compile_options(${PREKERNEL_TARGET} PRIVATE -no-pie -fno-pic)

View file

@ -12,7 +12,7 @@
# include <Kernel/VirtualAddress.h>
#endif
#define MAX_KERNEL_SIZE 0x3000000
#define MAX_KERNEL_SIZE 0x4000000
#ifdef __cplusplus
namespace Kernel {
@ -21,7 +21,8 @@ struct [[gnu::packed]] BootInfo {
u32 start_of_prekernel_image;
u32 end_of_prekernel_image;
u64 physical_to_virtual_offset;
u64 kernel_base;
u64 kernel_mapping_base;
u64 kernel_load_base;
# if ARCH(X86_64)
u32 gdt64ptr;
u16 code64_sel;

View file

@ -31,8 +31,11 @@ boot_pd0_pts:
.global boot_pd_kernel
boot_pd_kernel:
.skip 4096
.global boot_pd_kernel_pts
boot_pd_kernel_pts:
.global boot_pd_kernel_pt0
boot_pd_kernel_pt0:
.skip 4096
.global boot_pd_kernel_image_pts
boot_pd_kernel_image_pts:
.skip 4096 * (MAX_KERNEL_SIZE >> 21)
.global boot_pd_kernel_pt1023
boot_pd_kernel_pt1023:

View file

@ -12,6 +12,7 @@
#include <Kernel/Prekernel/Prekernel.h>
#include <Kernel/VirtualAddress.h>
#include <LibC/elf.h>
#include <LibELF/Relocation.h>
// Defined in the linker script
extern size_t __stack_chk_guard;
@ -28,7 +29,8 @@ extern "C" u64 boot_pdpt[512];
extern "C" u64 boot_pd0[512];
extern "C" u64 boot_pd0_pts[512 * (MAX_KERNEL_SIZE >> 21 & 0x1ff)];
extern "C" u64 boot_pd_kernel[512];
extern "C" u64 boot_pd_kernel_pts[512 * (MAX_KERNEL_SIZE >> 21 & 0x1ff)];
extern "C" u64 boot_pd_kernel_pt0[512];
extern "C" u64 boot_pd_kernel_image_pts[512 * (MAX_KERNEL_SIZE >> 21 & 0x1ff)];
extern "C" u64 boot_pd_kernel_pt1023[512];
extern "C" char const kernel_cmdline[4096];
@ -38,10 +40,20 @@ extern "C" {
multiboot_info_t* multiboot_info_ptr;
}
[[noreturn]] static void halt()
{
asm volatile("hlt");
__builtin_unreachable();
}
void __stack_chk_fail()
{
asm("ud2");
__builtin_unreachable();
halt();
}
void __assertion_failed(char const*, char const*, unsigned int, char const*)
{
halt();
}
namespace Kernel {
@ -50,11 +62,6 @@ namespace Kernel {
// We declare them here to ensure their signatures don't accidentally change.
extern "C" [[noreturn]] void init();
static void halt()
{
asm volatile("hlt");
}
// SerenityOS Pre-Kernel Environment C++ entry point :^)
//
// This is where C++ execution begins, after boot.S transfers control here.
@ -75,50 +82,63 @@ extern "C" [[noreturn]] void init()
halt();
__builtin_memcpy(kernel_program_headers, kernel_image + kernel_elf_header.e_phoff, sizeof(ElfW(Phdr)) * kernel_elf_header.e_phnum);
FlatPtr kernel_load_base = 0;
FlatPtr kernel_physical_base = 0x200000;
#if ARCH(I386)
FlatPtr kernel_load_base = 0xc0200000;
#else
FlatPtr kernel_load_base = 0x2000200000;
#endif
FlatPtr kernel_load_end = 0;
for (size_t i = 0; i < kernel_elf_header.e_phnum; i++) {
auto& kernel_program_header = kernel_program_headers[i];
if (kernel_program_header.p_type != PT_LOAD)
continue;
auto start = kernel_program_header.p_vaddr;
auto start = kernel_load_base + kernel_program_header.p_vaddr;
auto end = start + kernel_program_header.p_memsz;
if (start < (FlatPtr)end_of_prekernel_image)
halt();
if (kernel_program_header.p_paddr < (FlatPtr)end_of_prekernel_image)
if (kernel_physical_base + kernel_program_header.p_paddr < (FlatPtr)end_of_prekernel_image)
halt();
if (kernel_load_base == 0 || start < kernel_load_base)
kernel_load_base = start;
if (end > kernel_load_end)
kernel_load_end = end;
}
// align to 1GB
kernel_load_base &= ~(FlatPtr)0x3fffffff;
FlatPtr kernel_mapping_base = kernel_load_base & ~(FlatPtr)0x3fffffff;
VERIFY(kernel_load_base % 0x1000 == 0);
VERIFY(kernel_load_base >= kernel_mapping_base + 0x200000);
#if ARCH(I386)
int pdpt_flags = 0x1;
#else
int pdpt_flags = 0x3;
#endif
boot_pdpt[(kernel_load_base >> 30) & 0x1ffu] = (FlatPtr)boot_pd_kernel | pdpt_flags;
boot_pdpt[(kernel_mapping_base >> 30) & 0x1ffu] = (FlatPtr)boot_pd_kernel | pdpt_flags;
for (size_t i = 0; i <= (kernel_load_end - kernel_load_base) >> 21; i++)
boot_pd_kernel[i] = (FlatPtr)&boot_pd_kernel_pts[i * 512] | 0x3;
boot_pd_kernel[0] = (FlatPtr)boot_pd_kernel_pt0 | 0x3;
__builtin_memset(boot_pd_kernel_pts, 0, sizeof(boot_pd_kernel_pts));
for (FlatPtr vaddr = kernel_load_base; vaddr <= kernel_load_end; vaddr += PAGE_SIZE * 512)
boot_pd_kernel[(vaddr - kernel_mapping_base) >> 21] = (FlatPtr)(&boot_pd_kernel_image_pts[(vaddr - kernel_load_base) >> 12]) | 0x3;
__builtin_memset(boot_pd_kernel_pt0, 0, sizeof(boot_pd_kernel_pt0));
VERIFY((size_t)end_of_prekernel_image < array_size(boot_pd_kernel_pt0) * PAGE_SIZE);
/* pseudo-identity map 0M - end_of_prekernel_image */
for (size_t i = 0; i < (FlatPtr)end_of_prekernel_image / PAGE_SIZE; i++)
boot_pd_kernel_pts[i] = i * PAGE_SIZE | 0x3;
boot_pd_kernel_pt0[i] = i * PAGE_SIZE | 0x3;
__builtin_memset(boot_pd_kernel_image_pts, 0, sizeof(boot_pd_kernel_image_pts));
for (size_t i = 0; i < kernel_elf_header.e_phnum; i++) {
auto& kernel_program_header = kernel_program_headers[i];
if (kernel_program_header.p_type != PT_LOAD)
continue;
for (FlatPtr offset = 0; offset < kernel_program_header.p_memsz; offset += PAGE_SIZE) {
auto pte_index = (kernel_program_header.p_vaddr + offset - kernel_load_base) >> 12;
boot_pd_kernel_pts[pte_index] = (kernel_program_header.p_paddr + offset) | 0x3;
auto pte_index = ((kernel_load_base & 0x1fffff) + kernel_program_header.p_vaddr + offset) >> 12;
boot_pd_kernel_image_pts[pte_index] = (kernel_physical_base + kernel_program_header.p_paddr + offset) | 0x3;
}
}
@ -130,28 +150,29 @@ extern "C" [[noreturn]] void init()
auto& kernel_program_header = kernel_program_headers[i];
if (kernel_program_header.p_type != PT_LOAD)
continue;
__builtin_memmove((u8*)kernel_program_header.p_vaddr, kernel_image + kernel_program_header.p_offset, kernel_program_header.p_filesz);
__builtin_memmove((u8*)kernel_load_base + kernel_program_header.p_vaddr, kernel_image + kernel_program_header.p_offset, kernel_program_header.p_filesz);
}
for (ssize_t i = kernel_elf_header.e_phnum - 1; i >= 0; i--) {
auto& kernel_program_header = kernel_program_headers[i];
if (kernel_program_header.p_type != PT_LOAD)
continue;
__builtin_memset((u8*)kernel_program_header.p_vaddr + kernel_program_header.p_filesz, 0, kernel_program_header.p_memsz - kernel_program_header.p_filesz);
__builtin_memset((u8*)kernel_load_base + kernel_program_header.p_vaddr + kernel_program_header.p_filesz, 0, kernel_program_header.p_memsz - kernel_program_header.p_filesz);
}
multiboot_info_ptr->mods_count--;
multiboot_info_ptr->mods_addr += sizeof(multiboot_module_entry_t);
auto adjust_by_load_base = [kernel_load_base](auto ptr) {
return (decltype(ptr))((FlatPtr)ptr + kernel_load_base);
auto adjust_by_mapping_base = [kernel_mapping_base](auto ptr) {
return (decltype(ptr))((FlatPtr)ptr + kernel_mapping_base);
};
BootInfo info;
info.start_of_prekernel_image = (PhysicalPtr)start_of_prekernel_image;
info.end_of_prekernel_image = (PhysicalPtr)end_of_prekernel_image;
info.physical_to_virtual_offset = kernel_load_base;
info.kernel_base = kernel_load_base;
info.physical_to_virtual_offset = kernel_load_base - kernel_physical_base;
info.kernel_mapping_base = kernel_mapping_base;
info.kernel_load_base = kernel_load_base;
#if ARCH(X86_64)
info.gdt64ptr = (PhysicalPtr)gdt64ptr;
info.code64_sel = code64_sel;
@ -160,12 +181,12 @@ extern "C" [[noreturn]] void init()
info.boot_pdpt = (PhysicalPtr)boot_pdpt;
info.boot_pd0 = (PhysicalPtr)boot_pd0;
info.boot_pd_kernel = (PhysicalPtr)boot_pd_kernel;
info.boot_pd_kernel_pt1023 = (FlatPtr)adjust_by_load_base(boot_pd_kernel_pt1023);
info.kernel_cmdline = (FlatPtr)adjust_by_load_base(kernel_cmdline);
info.boot_pd_kernel_pt1023 = (FlatPtr)adjust_by_mapping_base(boot_pd_kernel_pt1023);
info.kernel_cmdline = (FlatPtr)adjust_by_mapping_base(kernel_cmdline);
info.multiboot_flags = multiboot_info_ptr->flags;
info.multiboot_memory_map = adjust_by_load_base((FlatPtr)multiboot_info_ptr->mmap_addr);
info.multiboot_memory_map = adjust_by_mapping_base((FlatPtr)multiboot_info_ptr->mmap_addr);
info.multiboot_memory_map_count = multiboot_info_ptr->mmap_length / sizeof(multiboot_memory_map_t);
info.multiboot_modules = adjust_by_load_base((FlatPtr)multiboot_info_ptr->mods_addr);
info.multiboot_modules = adjust_by_mapping_base((FlatPtr)multiboot_info_ptr->mods_addr);
info.multiboot_modules_count = multiboot_info_ptr->mods_count;
info.multiboot_framebuffer_addr = multiboot_info_ptr->framebuffer_addr;
info.multiboot_framebuffer_pitch = multiboot_info_ptr->framebuffer_pitch;
@ -178,9 +199,11 @@ extern "C" [[noreturn]] void init()
#if ARCH(I386)
"add %0, %%esp"
#else
"add %0, %%rsp"
"movabs %0, %%rax\n"
"add %%rax, %%rsp"
#endif
::"g"(kernel_load_base));
::"g"(kernel_mapping_base)
: "ax");
// unmap the 0-1MB region
for (size_t i = 0; i < 256; i++)
@ -192,8 +215,10 @@ extern "C" [[noreturn]] void init()
reload_cr3();
void (*entry)(BootInfo const&) = (void (*)(BootInfo const&))kernel_elf_header.e_entry;
entry(*adjust_by_load_base(&info));
ELF::perform_relative_relocations(kernel_load_base);
void (*entry)(BootInfo const&) = (void (*)(BootInfo const&))(kernel_load_base + kernel_elf_header.e_entry);
entry(*adjust_by_mapping_base(&info));
__builtin_unreachable();
}

View file

@ -375,7 +375,7 @@ void Process::crash(int signal, FlatPtr ip, bool out_of_memory)
if (out_of_memory) {
dbgln("\033[31;1mOut of memory\033[m, killing: {}", *this);
} else {
if (ip >= kernel_base && g_kernel_symbols_available) {
if (ip >= kernel_load_base && g_kernel_symbols_available) {
auto* symbol = symbolicate_kernel_address(ip);
dbgln("\033[31;1m{:p} {} +{}\033[0m\n", ip, (symbol ? symbol->name : "(k?)"), (symbol ? ip - symbol->address : 0));
} else {

View file

@ -15,10 +15,10 @@
#define READONLY_AFTER_INIT __attribute__((section(".ro_after_init")))
#define UNMAP_AFTER_INIT NEVER_INLINE __attribute__((section(".unmap_after_init")))
#define KERNEL_PD_END (kernel_base + 0x31000000)
#define KERNEL_PT1024_BASE (kernel_base + 0x3FE00000)
#define KERNEL_PD_END (kernel_mapping_base + 0x31000000)
#define KERNEL_PT1024_BASE (kernel_mapping_base + 0x3FE00000)
#define KERNEL_QUICKMAP_PT (KERNEL_PT1024_BASE + 0x6000)
#define KERNEL_QUICKMAP_PD (KERNEL_PT1024_BASE + 0x7000)
#define KERNEL_QUICKMAP_PER_CPU_BASE (KERNEL_PT1024_BASE + 0x8000)
#define USER_RANGE_CEILING (kernel_base - 0x2000000)
#define USER_RANGE_CEILING (kernel_mapping_base - 0x2000000)

View file

@ -954,7 +954,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
return {};
}
fast_u32_fill((u32*)page->paddr().offset(kernel_base).as_ptr(), 0, PAGE_SIZE / sizeof(u32));
fast_u32_fill((u32*)page->paddr().offset(physical_to_virtual_offset).as_ptr(), 0, PAGE_SIZE / sizeof(u32));
++m_system_memory_info.super_physical_pages_used;
return page;
}

View file

@ -51,7 +51,7 @@ UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
dmesgln("MM: boot_pd_kernel @ {}", boot_pd_kernel);
m_directory_table = PhysicalPage::create(boot_pdpt, MayReturnToFreeList::No);
m_directory_pages[0] = PhysicalPage::create(boot_pd0, MayReturnToFreeList::No);
m_directory_pages[(kernel_base >> 30) & 0x1ff] = PhysicalPage::create(boot_pd_kernel, MayReturnToFreeList::No);
m_directory_pages[(kernel_mapping_base >> 30) & 0x1ff] = PhysicalPage::create(boot_pd_kernel, MayReturnToFreeList::No);
}
PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator)
@ -77,13 +77,13 @@ PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator)
m_directory_table = MM.allocate_user_physical_page();
if (!m_directory_table)
return;
auto kernel_pd_index = (kernel_base >> 30) & 0x1ffu;
auto kernel_pd_index = (kernel_mapping_base >> 30) & 0x1ffu;
for (size_t i = 0; i < kernel_pd_index; i++) {
m_directory_pages[i] = MM.allocate_user_physical_page();
if (!m_directory_pages[i])
return;
}
// Share the top 1 GiB of kernel-only mappings (>=kernel_base)
// Share the top 1 GiB of kernel-only mappings (>=kernel_mapping_base)
m_directory_pages[kernel_pd_index] = MM.kernel_page_directory().m_directory_pages[kernel_pd_index];
#if ARCH(X86_64)

View file

@ -83,7 +83,7 @@ public:
void set_mmap(bool mmap) { m_mmap = mmap; }
bool is_user() const { return !is_kernel(); }
bool is_kernel() const { return vaddr().get() < 0x00800000 || vaddr().get() >= kernel_base; }
bool is_kernel() const { return vaddr().get() < 0x00800000 || vaddr().get() >= kernel_mapping_base; }
PageFaultResponse handle_fault(PageFault const&);

View file

@ -110,8 +110,9 @@ static Processor s_bsp_processor; // global but let's keep it "private"
extern "C" {
READONLY_AFTER_INIT PhysicalAddress start_of_prekernel_image;
READONLY_AFTER_INIT PhysicalAddress end_of_prekernel_image;
READONLY_AFTER_INIT FlatPtr kernel_base;
READONLY_AFTER_INIT size_t physical_to_virtual_offset;
READONLY_AFTER_INIT FlatPtr kernel_mapping_base;
READONLY_AFTER_INIT FlatPtr kernel_load_base;
#if ARCH(X86_64)
READONLY_AFTER_INIT PhysicalAddress boot_pml4t;
#endif
@ -140,7 +141,8 @@ extern "C" [[noreturn]] UNMAP_AFTER_INIT void init(BootInfo const& boot_info)
start_of_prekernel_image = PhysicalAddress { boot_info.start_of_prekernel_image };
end_of_prekernel_image = PhysicalAddress { boot_info.end_of_prekernel_image };
physical_to_virtual_offset = boot_info.physical_to_virtual_offset;
kernel_base = boot_info.kernel_base;
kernel_mapping_base = boot_info.kernel_mapping_base;
kernel_load_base = boot_info.kernel_load_base;
#if ARCH(X86_64)
gdt64ptr = boot_info.gdt64ptr;
code64_sel = boot_info.code64_sel;

View file

@ -2,39 +2,37 @@
ENTRY(init)
#if ARCH(I386)
# define KERNEL_BASE 0xc0000000
#else
# define KERNEL_BASE 0x2000000000
#endif
#define PF_X 0x1
#define PF_W 0x2
#define PF_R 0x4
KERNEL_VIRTUAL_BASE = KERNEL_BASE;
PHDRS
{
elf_headers PT_LOAD FILEHDR PHDRS FLAGS(PF_R) ;
super_pages PT_LOAD FLAGS(PF_R | PF_W) ;
text PT_LOAD FLAGS(PF_R | PF_X) ;
data PT_LOAD FLAGS(PF_R | PF_W) ;
bss PT_LOAD FLAGS(PF_R | PF_W) ;
dynamic_segment PT_LOAD FLAGS(PF_R | PF_W) ;
dynamic PT_DYNAMIC FLAGS(PF_R | PF_W) ;
ksyms PT_LOAD FLAGS(PF_R) ;
}
SECTIONS
{
. = KERNEL_VIRTUAL_BASE + 0x00200000;
start_of_kernel_image = .;
.super_pages ALIGN(4K) (NOLOAD) : AT (ADDR(.super_pages) - KERNEL_VIRTUAL_BASE)
.elf_headers (SIZEOF_HEADERS) : AT (ADDR(.elf_headers) + SIZEOF_HEADERS)
{
start_of_elf_headers = .;
} :elf_headers
.super_pages ALIGN(4K) (NOLOAD) : AT (ADDR(.super_pages))
{
*(.super_pages)
} :super_pages
.text ALIGN(4K) : AT (ADDR(.text) - KERNEL_VIRTUAL_BASE)
.text ALIGN(4K) : AT (ADDR(.text))
{
start_of_kernel_text = .;
@ -48,7 +46,7 @@ SECTIONS
*(.text*)
} :text
.unmap_after_init ALIGN(4K) : AT (ADDR(.unmap_after_init) - KERNEL_VIRTUAL_BASE)
.unmap_after_init ALIGN(4K) : AT (ADDR(.unmap_after_init))
{
start_of_unmap_after_init = .;
*(.unmap_after_init*);
@ -57,7 +55,7 @@ SECTIONS
end_of_kernel_text = .;
} :text
.rodata ALIGN(4K) : AT (ADDR(.rodata) - KERNEL_VIRTUAL_BASE)
.rodata ALIGN(4K) : AT (ADDR(.rodata))
{
start_heap_ctors = .;
*libkernel_heap.a:*(.ctors)
@ -70,21 +68,21 @@ SECTIONS
*(.rodata*)
} :data
.data ALIGN(4K) : AT (ADDR(.data) - KERNEL_VIRTUAL_BASE)
.data ALIGN(4K) : AT (ADDR(.data))
{
start_of_kernel_data = .;
*(.data*)
end_of_kernel_data = .;
} :data
.ro_after_init ALIGN(4K) (NOLOAD) : AT(ADDR(.ro_after_init) - KERNEL_VIRTUAL_BASE)
.ro_after_init ALIGN(4K) (NOLOAD) : AT(ADDR(.ro_after_init))
{
start_of_ro_after_init = .;
*(.ro_after_init);
end_of_ro_after_init = .;
} :data
.bss ALIGN(4K) (NOLOAD) : AT (ADDR(.bss) - KERNEL_VIRTUAL_BASE)
.bss ALIGN(4K) (NOLOAD) : AT (ADDR(.bss))
{
start_of_kernel_bss = .;
*(page_tables)
@ -96,8 +94,12 @@ SECTIONS
*(.heap)
} :bss
.dynamic ALIGN(4K) : AT (ADDR(.dynamic))
{
*(.dynamic)
} :dynamic_segment :dynamic
.ksyms ALIGN(4K) : AT (ADDR(.ksyms) - KERNEL_VIRTUAL_BASE)
.ksyms ALIGN(4K) : AT (ADDR(.ksyms))
{
start_of_kernel_ksyms = .;
*(.kernel_symbols)

View file

@ -10,12 +10,19 @@
#
if [ "$SERENITY_ARCH" = "x86_64" ]; then
gdb_arch=i386:x86-64
prekernel_image=Prekernel64
kernel_base=0x2000200000
else
gdb_arch=i386:intel
prekernel_image=Prekernel
kernel_base=0xc0200000
fi
exec $SERENITY_KERNEL_DEBUGGER \
-ex "file $(dirname "$0")/../Build/${SERENITY_ARCH:-i686}/Kernel/Kernel" \
-ex "file $(dirname "$0")/../Build/${SERENITY_ARCH:-i686}/Kernel/Prekernel/$prekernel_image" \
-ex "set confirm off" \
-ex "add-symbol-file $(dirname "$0")/../Build/${SERENITY_ARCH:-i686}/Kernel/Kernel -o $kernel_base" \
-ex "set confirm on" \
-ex "set arch $gdb_arch" \
-ex 'target remote localhost:1234' \
-ex "source $(dirname "$0")/serenity_gdb.py" \

View file

@ -316,7 +316,7 @@ Result<NonnullOwnPtr<Profile>, String> Profile::load_from_perfcore_file(const St
if (maybe_kernel_base.has_value() && ptr >= maybe_kernel_base.value()) {
if (kernel_elf) {
symbol = kernel_elf->symbolicate(ptr, &offset);
symbol = kernel_elf->symbolicate(ptr - maybe_kernel_base.value(), &offset);
} else {
symbol = String::formatted("?? <{:p}>", ptr);
}

View file

@ -112,7 +112,6 @@ Vector<Symbol> symbolicate_thread(pid_t pid, pid_t tid)
FlatPtr base { 0 };
size_t size { 0 };
String path;
bool is_relative { true };
};
Vector<FlatPtr> stack;
@ -123,7 +122,6 @@ Vector<Symbol> symbolicate_thread(pid_t pid, pid_t tid)
.base = maybe_kernel_base.value(),
.size = 0x3fffffff,
.path = "/boot/Kernel.debug",
.is_relative = false,
});
}
@ -209,11 +207,7 @@ Vector<Symbol> symbolicate_thread(pid_t pid, pid_t tid)
continue;
}
FlatPtr adjusted_address;
if (found_region->is_relative)
adjusted_address = address - found_region->base;
else
adjusted_address = address;
FlatPtr adjusted_address = address - found_region->base;
// We're subtracting 1 from the address because this is the return address,
// i.e. it is one instruction past the call instruction.