Kernel: Remove i686 support

This commit is contained in:
Liav A 2022-10-04 03:05:54 +03:00 committed by Andreas Kling
parent 32270dcd20
commit 5ff318cf3a
75 changed files with 142 additions and 895 deletions

View file

@ -519,16 +519,11 @@ struct SC_faccessat_params {
void initialize();
int sync();
# if ARCH(I386) || ARCH(X86_64) || ARCH(AARCH64)
# if ARCH(X86_64) || ARCH(AARCH64)
inline uintptr_t invoke(Function function)
{
uintptr_t result;
# if ARCH(I386)
asm volatile("int $0x82"
: "=a"(result)
: "a"(function)
: "memory");
# elif ARCH(X86_64)
# if ARCH(X86_64)
asm volatile("syscall"
: "=a"(result)
: "a"(function)
@ -549,17 +544,12 @@ template<typename T1>
inline uintptr_t invoke(Function function, T1 arg1)
{
uintptr_t result;
# if ARCH(I386)
asm volatile("int $0x82"
: "=a"(result)
: "a"(function), "d"((uintptr_t)arg1)
: "memory");
# elif ARCH(X86_64)
# if ARCH(X86_64)
asm volatile("syscall"
: "=a"(result)
: "a"(function), "d"((uintptr_t)arg1)
: "rcx", "r11", "memory");
# else
# elif ARCH(AARCH64)
register uintptr_t x0 asm("x0");
register uintptr_t x1 asm("x1") = arg1;
register uintptr_t x8 asm("x8") = function;
@ -576,17 +566,12 @@ template<typename T1, typename T2>
inline uintptr_t invoke(Function function, T1 arg1, T2 arg2)
{
uintptr_t result;
# if ARCH(I386)
asm volatile("int $0x82"
: "=a"(result)
: "a"(function), "d"((uintptr_t)arg1), "c"((uintptr_t)arg2)
: "memory");
# elif ARCH(X86_64)
# if ARCH(X86_64)
asm volatile("syscall"
: "=a"(result)
: "a"(function), "d"((uintptr_t)arg1), "D"((uintptr_t)arg2)
: "rcx", "r11", "memory");
# else
# elif ARCH(AARCH64)
register uintptr_t x0 asm("x0");
register uintptr_t x1 asm("x1") = arg1;
register uintptr_t x2 asm("x2") = arg2;
@ -604,17 +589,12 @@ template<typename T1, typename T2, typename T3>
inline uintptr_t invoke(Function function, T1 arg1, T2 arg2, T3 arg3)
{
uintptr_t result;
# if ARCH(I386)
asm volatile("int $0x82"
: "=a"(result)
: "a"(function), "d"((uintptr_t)arg1), "c"((uintptr_t)arg2), "b"((uintptr_t)arg3)
: "memory");
# elif ARCH(X86_64)
# if ARCH(X86_64)
asm volatile("syscall"
: "=a"(result)
: "a"(function), "d"((uintptr_t)arg1), "D"((uintptr_t)arg2), "b"((uintptr_t)arg3)
: "rcx", "r11", "memory");
# else
# elif ARCH(AARCH64)
register uintptr_t x0 asm("x0");
register uintptr_t x1 asm("x1") = arg1;
register uintptr_t x2 asm("x2") = arg2;
@ -633,17 +613,12 @@ template<typename T1, typename T2, typename T3, typename T4>
inline uintptr_t invoke(Function function, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
uintptr_t result;
# if ARCH(I386)
asm volatile("int $0x82"
: "=a"(result)
: "a"(function), "d"((uintptr_t)arg1), "c"((uintptr_t)arg2), "b"((uintptr_t)arg3), "S"((uintptr_t)arg4)
: "memory");
# elif ARCH(X86_64)
# if ARCH(X86_64)
asm volatile("syscall"
: "=a"(result)
: "a"(function), "d"((uintptr_t)arg1), "D"((uintptr_t)arg2), "b"((uintptr_t)arg3), "S"((uintptr_t)arg4)
: "memory");
# else
# elif ARCH(AARCH64)
register uintptr_t x0 asm("x0");
register uintptr_t x1 asm("x1") = arg1;
register uintptr_t x2 asm("x2") = arg2;

View file

@ -16,7 +16,7 @@
#define LSB(x) ((x)&0xFF)
#define MSB(x) (((x) >> 8) & 0xFF)
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/CPU.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/CPU.h>

View file

@ -8,7 +8,7 @@
#include <AK/Platform.h>
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/IRQController.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/IRQController.h>

View file

@ -8,7 +8,7 @@
#include <AK/Platform.h>
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/InterruptManagement.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/InterruptManagement.h>

View file

@ -9,7 +9,7 @@
#include <AK/Platform.h>
#include <AK/Types.h>
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/Interrupts.h>
#endif

View file

@ -9,7 +9,7 @@
#include <AK/Platform.h>
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/PageDirectory.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/PageDirectory.h>

View file

@ -23,7 +23,7 @@ void restore_processor_interrupts_state(InterruptsState);
}
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/Processor.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/Processor.h>

View file

@ -8,7 +8,7 @@
#include <AK/Platform.h>
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/RegisterState.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/RegisterState.h>

View file

@ -8,7 +8,7 @@
#include <AK/Platform.h>
#if ARCH(X86_64) || ARCH(I386)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/TrapFrame.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/TrapFrame.h>

View file

@ -33,38 +33,6 @@ ALWAYS_INLINE FlatPtr cpu_flags()
return flags;
}
#if ARCH(I386)
ALWAYS_INLINE void set_fs(u16 segment)
{
asm volatile(
"mov %%ax, %%fs" ::"a"(segment)
: "memory");
}
ALWAYS_INLINE void set_gs(u16 segment)
{
asm volatile(
"mov %%ax, %%gs" ::"a"(segment)
: "memory");
}
ALWAYS_INLINE u16 get_fs()
{
u16 fs;
asm("mov %%fs, %%eax"
: "=a"(fs));
return fs;
}
ALWAYS_INLINE u16 get_gs()
{
u16 gs;
asm("mov %%gs, %%eax"
: "=a"(gs));
return gs;
}
#endif
template<typename T>
ALWAYS_INLINE T read_gs_value(FlatPtr offset)
{

View file

@ -14,29 +14,13 @@
#include <AK/Platform.h>
VALIDATE_IS_X86()
#if ARCH(I386)
# define GDT_SELECTOR_CODE0 0x08
# define GDT_SELECTOR_DATA0 0x10
# define GDT_SELECTOR_CODE3 0x18
# define GDT_SELECTOR_DATA3 0x20
# define GDT_SELECTOR_TLS 0x28
# define GDT_SELECTOR_PROC 0x30
# define GDT_SELECTOR_TSS 0x38
// SYSENTER makes certain assumptions on how the GDT is structured:
static_assert(GDT_SELECTOR_CODE0 + 8 == GDT_SELECTOR_DATA0); // SS0 = CS0 + 8
// SYSEXIT makes certain assumptions on how the GDT is structured:
static_assert(GDT_SELECTOR_CODE0 + 16 == GDT_SELECTOR_CODE3); // CS3 = CS0 + 16
static_assert(GDT_SELECTOR_CODE0 + 24 == GDT_SELECTOR_DATA3); // SS3 = CS0 + 32
#else
# define GDT_SELECTOR_CODE0 0x08
# define GDT_SELECTOR_DATA0 0x10
# define GDT_SELECTOR_DATA3 0x18
# define GDT_SELECTOR_CODE3 0x20
# define GDT_SELECTOR_TSS 0x28
# define GDT_SELECTOR_TSS_PART2 0x30
#endif
// Note: These values are x86-64.
#define GDT_SELECTOR_CODE0 0x08
#define GDT_SELECTOR_DATA0 0x10
#define GDT_SELECTOR_DATA3 0x18
#define GDT_SELECTOR_CODE3 0x20
#define GDT_SELECTOR_TSS 0x28
#define GDT_SELECTOR_TSS_PART2 0x30
namespace Kernel {
@ -122,14 +106,11 @@ struct [[gnu::packed]] IDTEntry
u16 offset_1; // offset bits 0..15
u16 selector; // a code segment selector in GDT or LDT
#if ARCH(I386)
u8 zero; // unused, set to 0
#else
struct {
u8 interrupt_stack_table : 3;
u8 zero : 5; // unused, set to 0
};
#endif
struct {
u8 gate_type : 4;
u8 storage_segment : 1;
@ -137,18 +118,14 @@ struct [[gnu::packed]] IDTEntry
u8 present : 1;
} type_attr; // type and attributes
u16 offset_2; // offset bits 16..31
#if !ARCH(I386)
u32 offset_3;
u32 zeros;
#endif
IDTEntry() = default;
IDTEntry(FlatPtr callback, u16 selector_, IDTEntryType type, u8 storage_segment, u8 privilege_level)
: offset_1 { (u16)((FlatPtr)callback & 0xFFFF) }
, selector { selector_ }
#if !ARCH(I386)
, interrupt_stack_table { 0 }
#endif
, zero { 0 }
, type_attr {
.gate_type = (u8)type,
@ -157,20 +134,14 @@ struct [[gnu::packed]] IDTEntry
.present = 1,
}
, offset_2 { (u16)((FlatPtr)callback >> 16) }
#if !ARCH(I386)
, offset_3 { (u32)(((FlatPtr)callback) >> 32) }
, zeros { 0 }
#endif
{
}
FlatPtr off() const
{
#if ARCH(I386)
return (u32)offset_2 << 16 & (u32)offset_1;
#else
return (u64)offset_3 << 32 & (u64)offset_2 << 16 & (u64)offset_1;
#endif
}
IDTEntryType type() const
{

View file

@ -19,11 +19,7 @@ class GenericInterruptHandler;
extern "C" void interrupt_common_asm_entry();
#if ARCH(I386)
# define INTERRUPT_HANDLER_PUSH_PADDING
#else
# define INTERRUPT_HANDLER_PUSH_PADDING "pushw $0\npushw $0\n"
#endif
#define INTERRUPT_HANDLER_PUSH_PADDING "pushw $0\npushw $0\n"
// clang-format off
#define GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(isr_number) \

View file

@ -31,14 +31,12 @@ class ProcessorInfo;
struct ProcessorMessage;
struct ProcessorMessageEntry;
#if ARCH(X86_64)
# define MSR_EFER 0xc0000080
# define MSR_STAR 0xc0000081
# define MSR_LSTAR 0xc0000082
# define MSR_SFMASK 0xc0000084
# define MSR_FS_BASE 0xc0000100
# define MSR_GS_BASE 0xc0000101
#endif
#define MSR_EFER 0xc0000080
#define MSR_STAR 0xc0000081
#define MSR_LSTAR 0xc0000082
#define MSR_SFMASK 0xc0000084
#define MSR_FS_BASE 0xc0000100
#define MSR_GS_BASE 0xc0000101
#define MSR_IA32_EFER 0xc0000080
#define MSR_IA32_PAT 0x277
@ -73,10 +71,8 @@ class Processor {
Processor* m_self;
#if ARCH(X86_64)
// Saved user stack for the syscall instruction.
void* m_user_stack;
#endif
DescriptorTablePointer m_gdtr;
alignas(Descriptor) Descriptor m_gdt[256];
@ -93,9 +89,7 @@ class Processor {
static Atomic<u32> g_total_processors;
u8 m_physical_address_bit_width;
u8 m_virtual_address_bit_width;
#if ARCH(X86_64)
bool m_has_qemu_hvf_quirk;
#endif
ProcessorInfo* m_info;
Thread* m_current_thread;
@ -240,7 +234,6 @@ public:
static bool is_smp_enabled();
#if ARCH(X86_64)
static constexpr u64 user_stack_offset()
{
return __builtin_offsetof(Processor, m_user_stack);
@ -249,7 +242,6 @@ public:
{
return __builtin_offsetof(Processor, m_tss) + __builtin_offsetof(TSS, rsp0l);
}
#endif
ALWAYS_INLINE static Processor& current()
{
@ -258,11 +250,7 @@ public:
ALWAYS_INLINE static bool is_initialized()
{
return
#if ARCH(I386)
get_gs() == GDT_SELECTOR_PROC &&
#endif
read_gs_ptr(__builtin_offsetof(Processor, m_self)) != 0;
return read_gs_ptr(__builtin_offsetof(Processor, m_self)) != 0;
}
template<typename T>

View file

@ -18,21 +18,6 @@ VALIDATE_IS_X86()
namespace Kernel {
struct [[gnu::packed]] RegisterState {
#if ARCH(I386)
FlatPtr ss;
FlatPtr gs;
FlatPtr fs;
FlatPtr es;
FlatPtr ds;
FlatPtr edi;
FlatPtr esi;
FlatPtr ebp;
FlatPtr esp;
FlatPtr ebx;
FlatPtr edx;
FlatPtr ecx;
FlatPtr eax;
#else
FlatPtr rdi;
FlatPtr rsi;
FlatPtr rbp;
@ -49,43 +34,17 @@ struct [[gnu::packed]] RegisterState {
FlatPtr r13;
FlatPtr r14;
FlatPtr r15;
#endif
u16 exception_code;
u16 isr_number;
#if ARCH(X86_64)
u32 padding;
#endif
#if ARCH(I386)
FlatPtr eip;
#else
FlatPtr rip;
#endif
FlatPtr cs;
#if ARCH(I386)
FlatPtr eflags;
FlatPtr userspace_esp;
FlatPtr userspace_ss;
#else
FlatPtr rflags;
FlatPtr userspace_rsp;
FlatPtr userspace_ss;
#endif
#if ARCH(I386)
FlatPtr userspace_sp() const
{
return userspace_esp;
}
void set_userspace_sp(FlatPtr value) { userspace_esp = value; }
FlatPtr ip() const { return eip; }
void set_ip(FlatPtr value) { eip = value; }
void set_dx(FlatPtr value) { edx = value; }
FlatPtr bp() const { return ebp; }
void set_bp(FlatPtr value) { ebp = value; }
FlatPtr flags() const { return eflags; }
void set_flags(FlatPtr value) { eflags = value; }
void set_return_reg(FlatPtr value) { eax = value; }
#elif ARCH(X86_64)
FlatPtr userspace_sp() const
{
return userspace_rsp;
@ -99,49 +58,23 @@ struct [[gnu::packed]] RegisterState {
FlatPtr flags() const { return rflags; }
void set_flags(FlatPtr value) { rflags = value; }
void set_return_reg(FlatPtr value) { rax = value; }
#endif
void capture_syscall_params(FlatPtr& function, FlatPtr& arg1, FlatPtr& arg2, FlatPtr& arg3, FlatPtr& arg4) const
{
#if ARCH(I386)
function = eax;
arg1 = edx;
arg2 = ecx;
arg3 = ebx;
arg4 = esi;
#else
// The syscall instruction clobbers rcx, so we must use a different calling convention to 32-bit.
function = rax;
arg1 = rdx;
arg2 = rdi;
arg3 = rbx;
arg4 = rsi;
#endif
}
};
#if ARCH(I386)
# define REGISTER_STATE_SIZE (19 * 4)
#define REGISTER_STATE_SIZE (22 * 8)
static_assert(AssertSize<RegisterState, REGISTER_STATE_SIZE>());
#elif ARCH(X86_64)
# define REGISTER_STATE_SIZE (22 * 8)
static_assert(AssertSize<RegisterState, REGISTER_STATE_SIZE>());
#endif
inline void copy_kernel_registers_into_ptrace_registers(PtraceRegisters& ptrace_regs, RegisterState const& kernel_regs)
{
#if ARCH(I386)
ptrace_regs.eax = kernel_regs.eax;
ptrace_regs.ecx = kernel_regs.ecx;
ptrace_regs.edx = kernel_regs.edx;
ptrace_regs.ebx = kernel_regs.ebx;
ptrace_regs.esp = kernel_regs.userspace_esp;
ptrace_regs.ebp = kernel_regs.ebp;
ptrace_regs.esi = kernel_regs.esi;
ptrace_regs.edi = kernel_regs.edi;
ptrace_regs.eip = kernel_regs.eip;
ptrace_regs.eflags = kernel_regs.eflags;
#else
ptrace_regs.rax = kernel_regs.rax;
ptrace_regs.rcx = kernel_regs.rcx;
ptrace_regs.rdx = kernel_regs.rdx;
@ -160,7 +93,6 @@ inline void copy_kernel_registers_into_ptrace_registers(PtraceRegisters& ptrace_
ptrace_regs.r14 = kernel_regs.r14;
ptrace_regs.r15 = kernel_regs.r15;
ptrace_regs.rflags = kernel_regs.rflags,
#endif
ptrace_regs.cs = 0;
ptrace_regs.ss = 0;
ptrace_regs.ds = 0;
@ -171,18 +103,6 @@ inline void copy_kernel_registers_into_ptrace_registers(PtraceRegisters& ptrace_
inline void copy_ptrace_registers_into_kernel_registers(RegisterState& kernel_regs, PtraceRegisters const& ptrace_regs)
{
#if ARCH(I386)
kernel_regs.eax = ptrace_regs.eax;
kernel_regs.ecx = ptrace_regs.ecx;
kernel_regs.edx = ptrace_regs.edx;
kernel_regs.ebx = ptrace_regs.ebx;
kernel_regs.esp = ptrace_regs.esp;
kernel_regs.ebp = ptrace_regs.ebp;
kernel_regs.esi = ptrace_regs.esi;
kernel_regs.edi = ptrace_regs.edi;
kernel_regs.eip = ptrace_regs.eip;
kernel_regs.eflags = (kernel_regs.eflags & ~safe_eflags_mask) | (ptrace_regs.eflags & safe_eflags_mask);
#else
kernel_regs.rax = ptrace_regs.rax;
kernel_regs.rcx = ptrace_regs.rcx;
kernel_regs.rdx = ptrace_regs.rdx;
@ -202,7 +122,6 @@ inline void copy_ptrace_registers_into_kernel_registers(RegisterState& kernel_re
kernel_regs.r15 = ptrace_regs.r15;
// FIXME: do we need a separate safe_rflags_mask here?
kernel_regs.rflags = (kernel_regs.rflags & ~safe_eflags_mask) | (ptrace_regs.rflags & safe_eflags_mask);
#endif
}
struct [[gnu::packed]] DebugRegisterState {

View file

@ -43,19 +43,11 @@ struct [[gnu::packed]] LegacyRegion {
u8 FTW;
u8 : 8;
u16 FOP;
#if ARCH(I386)
// 32-bit version
u32 FIP_32;
u16 FCS;
u16 : 16;
u32 FPD_32;
u16 FDS;
u16 : 16;
#elif ARCH(X86_64)
// 64-bit version
u64 FIP_64;
u64 FDP_64;
#endif
AK::MXCSR MXCSR;
u32 MXCSR_mask;
u8 st_mmx[128];

View file

@ -62,10 +62,6 @@ struct [[gnu::packed]] TSS64 {
u16 iomapbase;
};
#if ARCH(I386)
using TSS = TSS32;
#elif ARCH(X86_64)
using TSS = TSS64;
#endif
}

View file

@ -91,23 +91,7 @@ static_assert(AssertSize<HPETRegistersBlock, 0x500>());
static u64 read_register_safe64(HPETRegister const& reg)
{
#if ARCH(X86_64)
return reg.full;
#elif ARCH(I386)
// As per 2.4.7 this reads the 64 bit value in a consistent manner
// using only 32 bit reads
u32 low, high = reg.high;
for (;;) {
low = reg.low;
u32 new_high = reg.high;
if (new_high == high)
break;
high = new_high;
}
return ((u64)high << 32) | (u64)low;
#else
# error Unknown architecture
#endif
}
static HPET* s_hpet;

View file

@ -27,11 +27,7 @@ struct TrapFrame {
TrapFrame& operator=(TrapFrame&&) = delete;
};
#if ARCH(I386)
# define TRAP_FRAME_SIZE (3 * 4)
#else
# define TRAP_FRAME_SIZE (3 * 8)
#endif
#define TRAP_FRAME_SIZE (3 * 8)
static_assert(AssertSize<TrapFrame, TRAP_FRAME_SIZE>());

View file

@ -52,61 +52,6 @@ static EntropySource s_entropy_source_interrupts { EntropySource::Static::Interr
// clang-format off
#if ARCH(I386)
#define EH_ENTRY(ec, title) \
extern "C" void title##_asm_entry(); \
extern "C" void title##_handler(TrapFrame*) __attribute__((used)); \
NAKED void title##_asm_entry() { \
asm( \
" pusha\n" \
" pushl %ds\n" \
" pushl %es\n" \
" pushl %fs\n" \
" pushl %gs\n" \
" pushl %ss\n" \
" mov $" __STRINGIFY(GDT_SELECTOR_DATA0) ", %ax\n" \
" mov %ax, %ds\n" \
" mov %ax, %es\n" \
" mov $" __STRINGIFY(GDT_SELECTOR_PROC) ", %ax\n" \
" mov %ax, %gs\n" \
" pushl %esp \n" /* set TrapFrame::regs */ \
" subl $" __STRINGIFY(TRAP_FRAME_SIZE - 4) ", %esp \n" \
" pushl %esp \n" \
" cld\n" \
" call enter_trap_no_irq \n" \
" call " #title "_handler\n" \
" jmp common_trap_exit \n" \
); \
}
#define EH_ENTRY_NO_CODE(ec, title) \
extern "C" void title##_asm_entry(); \
extern "C" void title##_handler(TrapFrame*) __attribute__((used)); \
NAKED void title##_asm_entry() { \
asm( \
" pushl $0x0\n" \
" pusha\n" \
" pushl %ds\n" \
" pushl %es\n" \
" pushl %fs\n" \
" pushl %gs\n" \
" pushl %ss\n" \
" mov $" __STRINGIFY(GDT_SELECTOR_DATA0) ", %ax\n" \
" mov %ax, %ds\n" \
" mov %ax, %es\n" \
" mov $" __STRINGIFY(GDT_SELECTOR_PROC) ", %ax\n" \
" mov %ax, %gs\n" \
" pushl %esp \n" /* set TrapFrame::regs */ \
" subl $" __STRINGIFY(TRAP_FRAME_SIZE - 4) ", %esp \n" \
" pushl %esp \n" \
" cld\n" \
" call enter_trap_no_irq \n" \
" call " #title "_handler\n" \
" jmp common_trap_exit \n" \
); \
}
#elif ARCH(X86_64)
#define EH_ENTRY(ec, title) \
extern "C" void title##_asm_entry(); \
extern "C" void title##_handler(TrapFrame*) __attribute__((used)); \
@ -173,41 +118,19 @@ static EntropySource s_entropy_source_interrupts { EntropySource::Static::Interr
" jmp common_trap_exit \n" \
); \
}
#endif
// clang-format on
void dump_registers(RegisterState const& regs)
{
#if ARCH(I386)
u16 ss;
u32 esp;
if (!(regs.cs & 3)) {
ss = regs.ss;
esp = regs.esp;
} else {
ss = regs.userspace_ss;
esp = regs.userspace_esp;
}
#else
u64 rsp;
if (!(regs.cs & 3))
rsp = regs.rsp;
else
rsp = regs.userspace_rsp;
#endif
dbgln("Exception code: {:04x} (isr: {:04x})", regs.exception_code, regs.isr_number);
#if ARCH(I386)
dbgln(" pc={:#04x}:{:p} eflags={:p}", (u16)regs.cs, regs.eip, regs.eflags);
dbgln(" stack={:#04x}:{:p}", ss, esp);
dbgln(" ds={:#04x} es={:#04x} fs={:#04x} gs={:#04x}", (u16)regs.ds, (u16)regs.es, (u16)regs.fs, (u16)regs.gs);
dbgln(" eax={:p} ebx={:p} ecx={:p} edx={:p}", regs.eax, regs.ebx, regs.ecx, regs.edx);
dbgln(" ebp={:p} esp={:p} esi={:p} edi={:p}", regs.ebp, regs.esp, regs.esi, regs.edi);
dbgln(" cr0={:p} cr2={:p} cr3={:p} cr4={:p}", read_cr0(), read_cr2(), read_cr3(), read_cr4());
#else
dbgln(" pc={:#04x}:{:p} rflags={:p}", (u16)regs.cs, regs.rip, regs.rflags);
dbgln(" stack={:p}", rsp);
// FIXME: Add fs_base and gs_base here
@ -216,7 +139,6 @@ void dump_registers(RegisterState const& regs)
dbgln(" r8={:p} r9={:p} r10={:p} r11={:p}", regs.r8, regs.r9, regs.r10, regs.r11);
dbgln(" r12={:p} r13={:p} r14={:p} r15={:p}", regs.r12, regs.r13, regs.r14, regs.r15);
dbgln(" cr0={:p} cr2={:p} cr3={:p} cr4={:p}", read_cr0(), read_cr2(), read_cr3(), read_cr4());
#endif
}
EH_ENTRY_NO_CODE(6, illegal_instruction);

View file

@ -218,10 +218,8 @@ extern "C" FlatPtr ap_cpu_init_cr3;
extern "C" u32 ap_cpu_init_cr4;
extern "C" FlatPtr ap_cpu_gdtr;
extern "C" FlatPtr ap_cpu_idtr;
#if ARCH(X86_64)
extern "C" FlatPtr ap_cpu_kernel_map_base;
extern "C" FlatPtr ap_cpu_kernel_entry_function;
#endif
extern "C" [[noreturn]] void init_ap(FlatPtr, Processor*);
@ -377,11 +375,8 @@ UNMAP_AFTER_INIT void APIC::setup_ap_boot_environment()
auto const& idtr = get_idtr();
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_idtr) = FlatPtr(&idtr);
#if ARCH(X86_64)
// TODO: Use these also in i686 builds
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_kernel_map_base) = FlatPtr(kernel_mapping_base);
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_kernel_entry_function) = FlatPtr(&init_ap);
#endif
// Store the BSP's CR0 and CR4 values for the APs to use
*APIC_INIT_VAR_PTR(FlatPtr, apic_startup_region_ptr, ap_cpu_init_cr0) = read_cr0();

View file

@ -464,9 +464,7 @@ UNMAP_AFTER_INIT void Processor::cpu_detect()
}
}
#if ARCH(X86_64)
m_has_qemu_hvf_quirk = false;
#endif
if (max_extended_leaf >= 0x80000008) {
// CPUID.80000008H:EAX[7:0] reports the physical-address width supported by the processor.
@ -479,7 +477,6 @@ UNMAP_AFTER_INIT void Processor::cpu_detect()
m_physical_address_bit_width = has_feature(CPUFeature::PAE) ? 36 : 32;
// Processors that do not support CPUID function 80000008H, support a linear-address width of 32.
m_virtual_address_bit_width = 32;
#if ARCH(X86_64)
// Workaround QEMU hypervisor.framework bug
// https://gitlab.com/qemu-project/qemu/-/issues/664
//
@ -494,7 +491,6 @@ UNMAP_AFTER_INIT void Processor::cpu_detect()
m_virtual_address_bit_width = 48;
}
}
#endif
}
}
@ -565,7 +561,6 @@ UNMAP_AFTER_INIT void Processor::cpu_setup()
}
}
#if ARCH(X86_64)
// x86_64 processors must support the syscall feature.
VERIFY(has_feature(CPUFeature::SYSCALL));
MSR efer_msr(MSR_EFER);
@ -595,7 +590,6 @@ UNMAP_AFTER_INIT void Processor::cpu_setup()
// the RDGSBASE instruction until we implement proper GS swapping at the userspace/kernel boundaries
write_cr4(read_cr4() & ~0x10000);
}
#endif
// Query OS-enabled CPUID features again, and set the flags if needed.
CPUID processor_info(0x1);
@ -652,10 +646,8 @@ UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
dmesgln("CPU[{}]: No RDRAND support detected, randomness will be poor", current_id());
dmesgln("CPU[{}]: Physical address bit width: {}", current_id(), m_physical_address_bit_width);
dmesgln("CPU[{}]: Virtual address bit width: {}", current_id(), m_virtual_address_bit_width);
#if ARCH(X86_64)
if (m_has_qemu_hvf_quirk)
dmesgln("CPU[{}]: Applied correction for QEMU Hypervisor.framework quirk", current_id());
#endif
if (cpu == 0)
initialize_interrupts();
@ -1459,42 +1451,10 @@ UNMAP_AFTER_INIT void Processor::gdt_init()
m_gdtr.limit = 0;
write_raw_gdt_entry(0x0000, 0x00000000, 0x00000000);
#if ARCH(I386)
write_raw_gdt_entry(GDT_SELECTOR_CODE0, 0x0000ffff, 0x00cf9a00); // code0
write_raw_gdt_entry(GDT_SELECTOR_DATA0, 0x0000ffff, 0x00cf9200); // data0
write_raw_gdt_entry(GDT_SELECTOR_CODE3, 0x0000ffff, 0x00cffa00); // code3
write_raw_gdt_entry(GDT_SELECTOR_DATA3, 0x0000ffff, 0x00cff200); // data3
#else
write_raw_gdt_entry(GDT_SELECTOR_CODE0, 0x0000ffff, 0x00af9a00); // code0
write_raw_gdt_entry(GDT_SELECTOR_DATA0, 0x0000ffff, 0x00af9200); // data0
write_raw_gdt_entry(GDT_SELECTOR_DATA3, 0x0000ffff, 0x008ff200); // data3
write_raw_gdt_entry(GDT_SELECTOR_CODE3, 0x0000ffff, 0x00affa00); // code3
#endif
#if ARCH(I386)
Descriptor tls_descriptor {};
tls_descriptor.low = tls_descriptor.high = 0;
tls_descriptor.dpl = 3;
tls_descriptor.segment_present = 1;
tls_descriptor.granularity = 0;
tls_descriptor.operation_size64 = 0;
tls_descriptor.operation_size32 = 1;
tls_descriptor.descriptor_type = 1;
tls_descriptor.type = 2;
write_gdt_entry(GDT_SELECTOR_TLS, tls_descriptor); // tls3
Descriptor gs_descriptor {};
gs_descriptor.set_base(VirtualAddress { this });
gs_descriptor.set_limit(sizeof(Processor) - 1);
gs_descriptor.dpl = 0;
gs_descriptor.segment_present = 1;
gs_descriptor.granularity = 0;
gs_descriptor.operation_size64 = 0;
gs_descriptor.operation_size32 = 1;
gs_descriptor.descriptor_type = 1;
gs_descriptor.type = 2;
write_gdt_entry(GDT_SELECTOR_PROC, gs_descriptor); // gs0
#endif
Descriptor tss_descriptor {};
tss_descriptor.set_base(VirtualAddress { (size_t)&m_tss & 0xffffffff });
@ -1508,36 +1468,15 @@ UNMAP_AFTER_INIT void Processor::gdt_init()
tss_descriptor.type = Descriptor::SystemType::AvailableTSS;
write_gdt_entry(GDT_SELECTOR_TSS, tss_descriptor); // tss
#if ARCH(X86_64)
Descriptor tss_descriptor_part2 {};
tss_descriptor_part2.low = (size_t)&m_tss >> 32;
write_gdt_entry(GDT_SELECTOR_TSS_PART2, tss_descriptor_part2);
#endif
flush_gdt();
load_task_register(GDT_SELECTOR_TSS);
#if ARCH(X86_64)
MSR gs_base(MSR_GS_BASE);
gs_base.set((u64)this);
#else
asm volatile(
"mov %%ax, %%ds\n"
"mov %%ax, %%es\n"
"mov %%ax, %%fs\n"
"mov %%ax, %%ss\n" ::"a"(GDT_SELECTOR_DATA0)
: "memory");
set_gs(GDT_SELECTOR_PROC);
#endif
#if ARCH(I386)
// Make sure CS points to the kernel code descriptor.
// clang-format off
asm volatile(
"ljmpl $" __STRINGIFY(GDT_SELECTOR_CODE0) ", $sanity\n"
"sanity:\n");
// clang-format on
#endif
}
extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
@ -1594,13 +1533,6 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
: "=m"(from_thread->fpu_state()));
}
#if ARCH(I386)
from_regs.fs = get_fs();
from_regs.gs = get_gs();
set_fs(to_regs.fs);
set_gs(to_regs.gs);
#endif
if (from_thread->process().is_traced())
read_debug_registers_into(from_thread->debug_register_state());
@ -1611,14 +1543,8 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
}
auto& processor = Processor::current();
#if ARCH(I386)
auto& tls_descriptor = processor.get_gdt_entry(GDT_SELECTOR_TLS);
tls_descriptor.set_base(to_thread->thread_specific_data());
tls_descriptor.set_limit(to_thread->thread_specific_region_size());
#else
MSR fs_base_msr(MSR_FS_BASE);
fs_base_msr.set(to_thread->thread_specific_data().get());
#endif
if (from_regs.cr3 != to_regs.cr3)
write_cr3(to_regs.cr3);

View file

@ -42,14 +42,9 @@ namespace Kernel {
ALWAYS_INLINE bool validate_canonical_address(size_t address)
{
#if ARCH(X86_64)
auto most_significant_bits = Processor::current().virtual_address_bit_width() - 1;
auto insignificant_bits = address >> most_significant_bits;
return insignificant_bits == 0 || insignificant_bits == (0xffffffffffffffffull >> most_significant_bits);
#else
(void)address;
return true;
#endif
}
CODE_SECTION(".text.safemem")
@ -73,11 +68,7 @@ NEVER_INLINE bool safe_memcpy(void* dest_ptr, void const* src_ptr, size_t n, voi
asm volatile(
".globl safe_memcpy_ins_1 \n"
"safe_memcpy_ins_1: \n"
#if ARCH(I386)
"rep movsl \n"
#else
"rep movsq \n"
#endif
".globl safe_memcpy_1_faulted \n"
"safe_memcpy_1_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
: "=S"(src),
@ -168,11 +159,7 @@ NEVER_INLINE bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
asm volatile(
".globl safe_memset_ins_1 \n"
"safe_memset_ins_1: \n"
#if ARCH(I386)
"rep stosl \n"
#else
"rep stosq \n"
#endif
".globl safe_memset_1_faulted \n"
"safe_memset_1_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
: "=D"(dest),

View file

@ -1,62 +0,0 @@
/*
* Copyright (c) 2022, Idan Horowitz <idan.horowitz@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Types.h>
extern "C" {
#if defined(AK_COMPILER_GCC) // FIXME: Remove this file once GCC supports 8-byte atomics on i686
u64 kernel__atomic_compare_exchange_8(u64 volatile*, u64*, u64, int, int);
# pragma redefine_extname kernel__atomic_compare_exchange_8 __atomic_compare_exchange_8
u64 kernel__atomic_compare_exchange_8(u64 volatile* memory, u64* expected, u64 desired, int, int)
{
u64 previous;
asm volatile("lock; cmpxchg8b %1"
: "=A"(previous), "+m"(*memory)
: "b"((u32)desired), "c"((u32)(desired >> 32)), "0"(*expected));
return previous;
}
u64 kernel__atomic_load_8(u64 volatile*, int);
# pragma redefine_extname kernel__atomic_load_8 __atomic_load_8
u64 kernel__atomic_load_8(u64 volatile* memory, int)
{
u64 previous;
asm volatile("movl %%ebx, %%eax\n"
"movl %%ecx, %%edx\n"
"lock; cmpxchg8b %1"
: "=A"(previous), "+m"(*memory));
return previous;
}
void kernel__atomic_store_8(u64 volatile*, u64, int);
# pragma redefine_extname kernel__atomic_store_8 __atomic_store_8
void kernel__atomic_store_8(u64 volatile* memory, u64 value, int)
{
u64 expected = *memory;
asm volatile("1: lock; cmpxchg8b %0\n"
" jne 1b"
: "=m"(*memory)
: "b"((u32)value), "c"((u32)(value >> 32)), "A"(expected));
}
u64 kernel__atomic_fetch_add_8(u64 volatile*, u64, int);
# pragma redefine_extname kernel__atomic_fetch_add_8 __atomic_fetch_add_8
u64 kernel__atomic_fetch_add_8(u64 volatile* memory, u64 value, int memory_order)
{
u64 previous = *memory;
while (kernel__atomic_compare_exchange_8(memory, &previous, previous + value, memory_order, memory_order) != previous)
;
return previous;
}
#endif
}

View file

@ -122,9 +122,7 @@ READONLY_AFTER_INIT PhysicalAddress end_of_prekernel_image;
READONLY_AFTER_INIT size_t physical_to_virtual_offset;
READONLY_AFTER_INIT FlatPtr kernel_mapping_base;
READONLY_AFTER_INIT FlatPtr kernel_load_base;
#if ARCH(X86_64)
READONLY_AFTER_INIT PhysicalAddress boot_pml4t;
#endif
READONLY_AFTER_INIT PhysicalAddress boot_pdpt;
READONLY_AFTER_INIT PhysicalAddress boot_pd0;
READONLY_AFTER_INIT PhysicalAddress boot_pd_kernel;
@ -154,11 +152,9 @@ extern "C" [[noreturn]] UNMAP_AFTER_INIT void init(BootInfo const& boot_info)
physical_to_virtual_offset = boot_info.physical_to_virtual_offset;
kernel_mapping_base = boot_info.kernel_mapping_base;
kernel_load_base = boot_info.kernel_load_base;
#if ARCH(X86_64)
gdt64ptr = boot_info.gdt64ptr;
code64_sel = boot_info.code64_sel;
boot_pml4t = PhysicalAddress { boot_info.boot_pml4t };
#endif
boot_pdpt = PhysicalAddress { boot_info.boot_pdpt };
boot_pd0 = PhysicalAddress { boot_info.boot_pd0 };
boot_pd_kernel = PhysicalAddress { boot_info.boot_pd_kernel };

View file

@ -4,11 +4,7 @@
.global gdt64ptr
gdt64ptr:
#if ARCH(X86_64)
.quad 0
#else
.int 0
#endif
.global code64_sel
code64_sel:

View file

@ -7,7 +7,7 @@
#include <AK/ByteReader.h>
#include <AK/Error.h>
#include <AK/HashTable.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/PCI/Controller/HostBridge.h>
#endif
#include <Kernel/Bus/PCI/Access.h>
@ -110,7 +110,7 @@ UNMAP_AFTER_INIT bool Access::initialize_for_multiple_pci_domains(PhysicalAddres
return true;
}
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
UNMAP_AFTER_INIT bool Access::initialize_for_one_pci_domain()
{
VERIFY(!Access::is_initialized());

View file

@ -21,7 +21,7 @@ class Access {
public:
static bool initialize_for_multiple_pci_domains(PhysicalAddress mcfg_table);
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
static bool initialize_for_one_pci_domain();
#endif

View file

@ -7,8 +7,6 @@ endif()
if ("${SERENITY_ARCH}" STREQUAL "aarch64")
set(KERNEL_ARCH aarch64)
elseif ("${SERENITY_ARCH}" STREQUAL "i686")
set(KERNEL_ARCH i386)
elseif("${SERENITY_ARCH}" STREQUAL "x86_64")
set(KERNEL_ARCH x86_64)
endif()
@ -349,7 +347,7 @@ set(KERNEL_SOURCES
WorkQueue.cpp
)
if ("${SERENITY_ARCH}" STREQUAL "i686" OR "${SERENITY_ARCH}" STREQUAL "x86_64")
if ("${SERENITY_ARCH}" STREQUAL "x86_64")
set(KERNEL_SOURCES
${KERNEL_SOURCES}
Arch/x86/init.cpp
@ -435,13 +433,6 @@ if ("${SERENITY_ARCH}" STREQUAL "i686" OR "${SERENITY_ARCH}" STREQUAL "x86_64")
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/${KERNEL_ARCH}/SyscallEntry.cpp
)
endif()
if ("${SERENITY_ARCH}" STREQUAL "i686")
set(KERNEL_SOURCES
${KERNEL_SOURCES}
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/${KERNEL_ARCH}/Atomics.cpp
)
endif()
elseif("${SERENITY_ARCH}" STREQUAL "aarch64")
set(RPI_SOURCES
Arch/aarch64/RPi/DebugOutput.cpp
@ -556,7 +547,7 @@ add_compile_options(-fsigned-char)
add_compile_options(-Wno-unknown-warning-option -Wvla -Wnull-dereference)
add_compile_options(-fno-rtti -ffreestanding -fbuiltin)
if ("${SERENITY_ARCH}" STREQUAL "i686" OR "${SERENITY_ARCH}" STREQUAL "x86_64")
if ("${SERENITY_ARCH}" STREQUAL "x86_64")
add_compile_options(-mno-80387 -mno-mmx -mno-sse -mno-sse2)
elseif("${SERENITY_ARCH}" STREQUAL "aarch64")
add_compile_options(-mgeneral-regs-only)
@ -617,8 +608,6 @@ endmacro()
if ("${SERENITY_ARCH}" STREQUAL "x86_64")
add_compile_options(-mcmodel=large -mno-red-zone)
set_new_alignment(8)
elseif ("${SERENITY_ARCH}" STREQUAL "i686")
set_new_alignment(4)
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-pie")

View file

@ -149,7 +149,7 @@ UNMAP_AFTER_INIT PCIAccessLevel CommandLine::pci_access_level() const
auto value = lookup("pci"sv).value_or("ecam"sv);
if (value == "ecam"sv)
return PCIAccessLevel::MemoryAddressing;
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
if (value == "io"sv)
return PCIAccessLevel::IOAddressing;
#endif

View file

@ -32,7 +32,7 @@ enum class AcpiFeatureLevel {
enum class PCIAccessLevel {
None,
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
IOAddressing,
#endif
MemoryAddressing,

View file

@ -119,9 +119,7 @@ ErrorOr<void> Coredump::write_elf_header()
elf_file_header.e_ident[EI_MAG1] = 'E';
elf_file_header.e_ident[EI_MAG2] = 'L';
elf_file_header.e_ident[EI_MAG3] = 'F';
#if ARCH(I386)
elf_file_header.e_ident[EI_CLASS] = ELFCLASS32;
#elif ARCH(X86_64) || ARCH(AARCH64)
#if ARCH(X86_64) || ARCH(AARCH64)
elf_file_header.e_ident[EI_CLASS] = ELFCLASS64;
#else
# error Unknown architecture
@ -137,9 +135,7 @@ ErrorOr<void> Coredump::write_elf_header()
elf_file_header.e_ident[EI_PAD + 5] = 0;
elf_file_header.e_ident[EI_PAD + 6] = 0;
elf_file_header.e_type = ET_CORE;
#if ARCH(I386)
elf_file_header.e_machine = EM_386;
#elif ARCH(X86_64)
#if ARCH(X86_64)
elf_file_header.e_machine = EM_X86_64;
#elif ARCH(AARCH64)
elf_file_header.e_machine = EM_AARCH64;

View file

@ -5,7 +5,7 @@
*/
#include <AK/Platform.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/common/BochsDebugOutput.h>
#endif
#include <Kernel/Devices/ConsoleDevice.h>

View file

@ -6,7 +6,7 @@
#include <AK/Platform.h>
#include <AK/Singleton.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/ISABus/I8042Controller.h>
#endif
#include <Kernel/CommandLine.h>
@ -122,7 +122,7 @@ UNMAP_AFTER_INIT ErrorOr<void> HIDManagement::enumerate()
// set to emulate PS/2, we should not initialize the PS/2 controller.
if (kernel_command_line().disable_ps2_controller())
return {};
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
m_i8042_controller = I8042Controller::initialize();
// Note: If ACPI is disabled or doesn't indicate that we have an i8042, we

View file

@ -63,7 +63,7 @@ private:
size_t m_mouse_minor_number { 0 };
size_t m_keyboard_minor_number { 0 };
KeyboardClient* m_client { nullptr };
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
LockRefPtr<I8042Controller> m_i8042_controller;
#endif
NonnullLockRefPtrVector<HIDDevice> m_hid_devices;

View file

@ -5,7 +5,7 @@
*/
#include <AK/JsonObjectSerializer.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/ProcessorInfo.h>
#endif
#include <Kernel/FileSystem/SysFS/Subsystems/Kernel/CPUInfo.h>
@ -25,7 +25,7 @@ UNMAP_AFTER_INIT NonnullLockRefPtr<SysFSCPUInformation> SysFSCPUInformation::mus
ErrorOr<void> SysFSCPUInformation::try_generate(KBufferBuilder& builder)
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
auto array = TRY(JsonArraySerializer<>::try_create(builder));
TRY(Processor::try_for_each(
[&](Processor& proc) -> ErrorOr<void> {

View file

@ -6,7 +6,7 @@
*/
#include <AK/Platform.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/common/I8042Reboot.h>
# include <Kernel/Arch/x86/common/Shutdown.h>
#endif
@ -87,7 +87,7 @@ void SysFSPowerStateSwitchNode::reboot()
dbgln("attempting reboot via ACPI");
if (ACPI::is_enabled())
ACPI::Parser::the()->try_acpi_reboot();
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
i8042_reboot();
#endif
dbgln("reboot attempts failed, applications will stop responding.");
@ -106,7 +106,7 @@ void SysFSPowerStateSwitchNode::poweroff()
dbgln("syncing mounted filesystems...");
FileSystem::sync();
dbgln("attempting system shutdown...");
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
qemu_shutdown();
virtualbox_shutdown();
#endif

View file

@ -11,7 +11,7 @@
#include <AK/StringView.h>
#include <AK/Try.h>
#include <Kernel/InterruptDisabler.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/IO.h>
#endif
#include <Kernel/Bus/PCI/API.h>
@ -234,7 +234,7 @@ void Parser::access_generic_address(Structures::GenericAddressStructure const& s
{
switch ((GenericAddressStructure::AddressSpace)structure.address_space) {
case GenericAddressStructure::AddressSpace::SystemIO: {
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
IOAddress address(structure.address);
dbgln("ACPI: Sending value {:x} to {}", value, address);
switch (structure.access_size) {

View file

@ -7,7 +7,7 @@
#include <AK/Atomic.h>
#include <AK/Checked.h>
#include <AK/Try.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/Hypervisor/BochsDisplayConnector.h>
#endif
#include <Kernel/Bus/PCI/API.h>
@ -46,7 +46,7 @@ UNMAP_AFTER_INIT ErrorOr<void> BochsGraphicsAdapter::initialize_adapter(PCI::Dev
// Note: In non x86-builds, we should never encounter VirtualBox hardware nor Pure Bochs VBE graphics,
// so just assume we can use the QEMU BochsVBE-compatible graphics adapter only.
auto bar0_space_size = PCI::get_BAR_space_size(pci_device_identifier.address(), PCI::HeaderType0BaseRegister::BAR0);
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
bool virtual_box_hardware = (pci_device_identifier.hardware_id().vendor_id == 0x80ee && pci_device_identifier.hardware_id().device_id == 0xbeef);
if (pci_device_identifier.revision_id().value() == 0x0 || virtual_box_hardware) {
m_display_connector = BochsDisplayConnector::must_create(PhysicalAddress(PCI::get_BAR0(pci_device_identifier.address()) & 0xfffffff0), bar0_space_size, virtual_box_hardware);

View file

@ -6,7 +6,7 @@
#include <AK/Singleton.h>
#include <Kernel/Arch/Delay.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/Hypervisor/BochsDisplayConnector.h>
#endif
#include <Kernel/Bus/PCI/API.h>
@ -44,7 +44,7 @@ UNMAP_AFTER_INIT GraphicsManagement::GraphicsManagement()
void GraphicsManagement::disable_vga_emulation_access_permanently()
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
if (!m_vga_arbiter)
return;
m_vga_arbiter->disable_vga_emulation_access_permanently({});
@ -53,7 +53,7 @@ void GraphicsManagement::disable_vga_emulation_access_permanently()
void GraphicsManagement::enable_vga_text_mode_console_cursor()
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
if (!m_vga_arbiter)
return;
m_vga_arbiter->enable_vga_text_mode_console_cursor({});
@ -62,7 +62,7 @@ void GraphicsManagement::enable_vga_text_mode_console_cursor()
void GraphicsManagement::disable_vga_text_mode_console_cursor()
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
if (!m_vga_arbiter)
return;
m_vga_arbiter->disable_vga_text_mode_console_cursor({});
@ -71,7 +71,7 @@ void GraphicsManagement::disable_vga_text_mode_console_cursor()
void GraphicsManagement::set_vga_text_mode_cursor([[maybe_unused]] size_t console_width, [[maybe_unused]] size_t x, [[maybe_unused]] size_t y)
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
if (!m_vga_arbiter)
return;
m_vga_arbiter->set_vga_text_mode_cursor({}, console_width, x, y);
@ -195,7 +195,7 @@ UNMAP_AFTER_INIT bool GraphicsManagement::initialize()
}
}
});
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
m_vga_arbiter = VGAIOArbiter::must_create({});
#endif
@ -210,7 +210,7 @@ UNMAP_AFTER_INIT bool GraphicsManagement::initialize()
// Otherwise we risk using the Bochs VBE driver on a wrong physical address
// for the framebuffer.
if (PCI::Access::is_hardware_disabled() && !(graphics_subsystem_mode == CommandLine::GraphicsSubsystemMode::Limited && !multiboot_framebuffer_addr.is_null() && multiboot_framebuffer_type == MULTIBOOT_FRAMEBUFFER_TYPE_RGB)) {
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
auto vga_isa_bochs_display_connector = BochsDisplayConnector::try_create_for_vga_isa_connector();
if (vga_isa_bochs_display_connector) {
dmesgln("Graphics: Using a Bochs ISA VGA compatible adapter");

View file

@ -9,7 +9,7 @@
#include <AK/NonnullOwnPtr.h>
#include <AK/Platform.h>
#include <AK/Types.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/VGA/IOArbiter.h>
#endif
#include <Kernel/Bus/PCI/Definitions.h>
@ -65,7 +65,7 @@ private:
unsigned m_current_minor_number { 0 };
SpinlockProtected<IntrusiveList<&DisplayConnector::m_list_node>> m_display_connector_nodes { LockRank::None };
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
OwnPtr<VGAIOArbiter> m_vga_arbiter;
#endif
};

View file

@ -18,9 +18,7 @@
#include <Kernel/Sections.h>
#include <Kernel/StdLib.h>
#if ARCH(I386)
static constexpr size_t CHUNK_SIZE = 32;
#elif ARCH(X86_64) || ARCH(AARCH64)
#if ARCH(X86_64) || ARCH(AARCH64)
static constexpr size_t CHUNK_SIZE = 64;
#else
# error Unknown architecture

View file

@ -10,7 +10,7 @@
namespace Kernel {
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
ErrorOr<NonnullOwnPtr<IOWindow>> IOWindow::create_for_io_space(IOAddress address, u64 space_length)
{
VERIFY(!Checked<u64>::addition_would_overflow(address.get(), space_length));
@ -27,7 +27,7 @@ IOWindow::IOWindow(NonnullOwnPtr<IOAddressData> io_range)
ErrorOr<NonnullOwnPtr<IOWindow>> IOWindow::create_from_io_window_with_offset(u64 offset, u64 space_length)
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
if (m_space_type == SpaceType::IO) {
VERIFY(m_io_range);
if (Checked<u64>::addition_would_overflow(m_io_range->address(), space_length))
@ -39,19 +39,10 @@ ErrorOr<NonnullOwnPtr<IOWindow>> IOWindow::create_from_io_window_with_offset(u64
VERIFY(space_type() == SpaceType::Memory);
VERIFY(m_memory_mapped_range);
// Note: x86-IA32 is the only 32 bit CPU architecture currently being supported and
// probably will be the only such in the foreseeable future.
#if ARCH(I386)
if (Checked<u32>::addition_would_overflow(m_memory_mapped_range->paddr.get(), offset))
return Error::from_errno(EOVERFLOW);
if (Checked<u32>::addition_would_overflow(m_memory_mapped_range->paddr.get() + offset, space_length))
return Error::from_errno(EOVERFLOW);
#else
if (Checked<u64>::addition_would_overflow(m_memory_mapped_range->paddr.get(), offset))
return Error::from_errno(EOVERFLOW);
if (Checked<u64>::addition_would_overflow(m_memory_mapped_range->paddr.get() + offset, space_length))
return Error::from_errno(EOVERFLOW);
#endif
auto memory_mapped_range = TRY(Memory::adopt_new_nonnull_own_typed_mapping<u8 volatile>(m_memory_mapped_range->paddr.offset(offset), space_length, Memory::Region::Access::ReadWrite));
return TRY(adopt_nonnull_own_or_enomem(new (nothrow) IOWindow(move(memory_mapped_range))));
@ -60,7 +51,7 @@ ErrorOr<NonnullOwnPtr<IOWindow>> IOWindow::create_from_io_window_with_offset(u64
ErrorOr<NonnullOwnPtr<IOWindow>> IOWindow::create_from_io_window_with_offset(u64 offset)
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
if (m_space_type == SpaceType::IO) {
VERIFY(m_io_range);
VERIFY(m_io_range->space_length() >= offset);
@ -93,7 +84,7 @@ ErrorOr<NonnullOwnPtr<IOWindow>> IOWindow::create_for_pci_device_bar(PCI::Addres
return Error::from_errno(EIO);
if (pci_bar_space_type == PCI::BARSpaceType::IOSpace) {
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
if (Checked<u64>::addition_would_overflow(pci_bar_value, space_length))
return Error::from_errno(EOVERFLOW);
auto io_address_range = TRY(adopt_nonnull_own_or_enomem(new (nothrow) IOAddressData((pci_bar_value & 0xfffffffc), space_length)));
@ -148,7 +139,7 @@ bool IOWindow::is_access_in_range(u64 offset, size_t byte_size_access) const
{
if (Checked<u64>::addition_would_overflow(offset, byte_size_access))
return false;
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
if (m_space_type == SpaceType::IO) {
VERIFY(m_io_range);
VERIFY(!Checked<u64>::addition_would_overflow(m_io_range->address(), m_io_range->space_length()));
@ -273,7 +264,7 @@ u8 volatile* IOWindow::as_memory_address_pointer()
return m_memory_mapped_range->ptr();
}
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
IOAddress IOWindow::as_io_address() const
{
VERIFY(space_type() == SpaceType::IO);

View file

@ -9,7 +9,7 @@
#include <AK/ByteReader.h>
#include <AK/Platform.h>
#include <AK/Types.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/IO.h>
#endif
#include <Kernel/Bus/PCI/Definitions.h>
@ -21,7 +21,7 @@ namespace Kernel {
class IOWindow {
public:
enum class SpaceType {
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
IO,
#endif
Memory,
@ -32,7 +32,7 @@ public:
template<typename V>
void write();
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
static ErrorOr<NonnullOwnPtr<IOWindow>> create_for_io_space(IOAddress, u64 space_length);
#endif
static ErrorOr<NonnullOwnPtr<IOWindow>> create_for_pci_device_bar(PCI::DeviceIdentifier const&, PCI::HeaderType0BaseRegister, u64 space_length);
@ -70,7 +70,7 @@ public:
~IOWindow();
PhysicalAddress as_physical_memory_address() const;
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
IOAddress as_io_address() const;
#endif
@ -79,7 +79,7 @@ private:
u8 volatile* as_memory_address_pointer();
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
struct IOAddressData {
public:
IOAddressData(u64 address, u64 space_length)
@ -104,7 +104,7 @@ private:
template<typename T>
ALWAYS_INLINE void in(u64 start_offset, T& data)
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
if (m_space_type == SpaceType::IO) {
data = as_io_address().offset(start_offset).in<T>();
return;
@ -122,7 +122,7 @@ private:
template<typename T>
ALWAYS_INLINE void out(u64 start_offset, T value)
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
if (m_space_type == SpaceType::IO) {
VERIFY(m_io_range);
as_io_address().offset(start_offset).out<T>(value);
@ -142,7 +142,7 @@ private:
OwnPtr<Memory::TypedMapping<u8 volatile>> m_memory_mapped_range;
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
OwnPtr<IOAddressData> m_io_range;
#endif
};
@ -153,7 +153,7 @@ template<>
struct AK::Formatter<Kernel::IOWindow> : AK::Formatter<FormatString> {
ErrorOr<void> format(FormatBuilder& builder, Kernel::IOWindow const& value)
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
if (value.space_type() == Kernel::IOWindow::SpaceType::IO)
return Formatter<FormatString>::format(builder, "{}"sv, value.as_io_address());
#endif

View file

@ -306,11 +306,7 @@ ErrorOr<Vector<Region*, 2>> AddressSpace::try_split_region_around_range(Region c
void AddressSpace::dump_regions()
{
dbgln("Process regions:");
#if ARCH(I386)
char const* addr_padding = "";
#else
char const* addr_padding = " ";
#endif
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
addr_padding, addr_padding, addr_padding);

View file

@ -249,7 +249,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
// Register used memory regions that we know of.
m_global_data.with([&](auto& global_data) {
global_data.used_memory_ranges.ensure_capacity(4);
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
global_data.used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
#endif
global_data.used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image)).release_value_but_fixme_should_propagate_errors()) });
@ -1152,11 +1152,7 @@ void MemoryManager::unregister_kernel_region(Region& region)
void MemoryManager::dump_kernel_regions()
{
dbgln("Kernel regions:");
#if ARCH(I386)
char const* addr_padding = "";
#else
char const* addr_padding = " ";
#endif
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
addr_padding, addr_padding, addr_padding);
m_global_data.with([&](auto& global_data) {

View file

@ -57,11 +57,7 @@ ErrorOr<NonnullLockRefPtr<PageDirectory>> PageDirectory::try_create_for_userspac
auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*directory->m_directory_table);
for (size_t i = 0; i < sizeof(m_directory_pages) / sizeof(m_directory_pages[0]); i++) {
if (directory->m_directory_pages[i]) {
#if ARCH(I386)
table.raw[i] = (FlatPtr)directory->m_directory_pages[i]->paddr().as_ptr() | 1;
#else
table.raw[i] = (FlatPtr)directory->m_directory_pages[i]->paddr().as_ptr() | 7;
#endif
}
}

View file

@ -121,26 +121,6 @@ ErrorOr<VirtualRange> RegionTree::allocate_range_randomized(size_t size, size_t
if (!m_total_range.contains(random_address, size))
continue;
#if ARCH(I386)
// Attempt to limit the amount of wasted address space on platforms with small address sizes (read: i686).
// This works by only allowing arbitrary random allocations until a certain threshold, to create more possibilities for placing mappings.
// After the threshold has been reached, new allocations can only be placed randomly within a certain range from the adjacent allocations.
VirtualAddress random_address_end { random_address.get() + size };
constexpr size_t max_allocations_until_limited = 200;
constexpr size_t max_space_between_allocations = 1 * MiB;
if (m_regions.size() >= max_allocations_until_limited) {
auto* lower_allocation = m_regions.find_largest_not_above(random_address.get());
auto* upper_allocation = m_regions.find_smallest_not_below(random_address_end.get());
bool lower_in_range = (!lower_allocation || random_address - lower_allocation->range().end() <= VirtualAddress(max_space_between_allocations));
bool upper_in_range = (!upper_allocation || upper_allocation->range().base() - random_address_end <= VirtualAddress(max_space_between_allocations));
if (!upper_in_range && !lower_in_range)
continue;
}
#endif
auto range_or_error = allocate_range_specific(random_address, size);
if (!range_or_error.is_error())
return range_or_error.release_value();

View file

@ -13,7 +13,7 @@ namespace Kernel {
ScopedAddressSpaceSwitcher::ScopedAddressSpaceSwitcher(Process& process)
{
VERIFY(Thread::current() != nullptr);
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
m_previous_cr3 = read_cr3();
#elif ARCH(AARC64)
TODO_AARCH64();
@ -24,7 +24,7 @@ ScopedAddressSpaceSwitcher::ScopedAddressSpaceSwitcher(Process& process)
ScopedAddressSpaceSwitcher::~ScopedAddressSpaceSwitcher()
{
InterruptDisabler disabler;
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
Thread::current()->regs().cr3 = m_previous_cr3;
write_cr3(m_previous_cr3);
#elif ARCH(AARC64)

View file

@ -10,25 +10,17 @@ extern "C" {
void* memcpy(void* dest_ptr, void const* src_ptr, size_t n)
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
size_t dest = (size_t)dest_ptr;
size_t src = (size_t)src_ptr;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
# if ARCH(I386)
asm volatile(
"rep movsl\n"
: "=S"(src), "=D"(dest)
: "S"(src), "D"(dest), "c"(size_ts)
: "memory");
# else
asm volatile(
"rep movsq\n"
: "=S"(src), "=D"(dest)
: "S"(src), "D"(dest), "c"(size_ts)
: "memory");
# endif
n -= size_ts * sizeof(size_t);
if (n == 0)
return dest_ptr;
@ -59,25 +51,17 @@ void* memmove(void* dest, void const* src, size_t n)
void* memset(void* dest_ptr, int c, size_t n)
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
size_t dest = (size_t)dest_ptr;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
size_t expanded_c = explode_byte((u8)c);
# if ARCH(I386)
asm volatile(
"rep stosl\n"
: "=D"(dest)
: "D"(dest), "c"(size_ts), "a"(expanded_c)
: "memory");
# else
asm volatile(
"rep stosq\n"
: "=D"(dest)
: "D"(dest), "c"(size_ts), "a"(expanded_c)
: "memory");
# endif
n -= size_ts * sizeof(size_t);
if (n == 0)
return dest_ptr;

View file

@ -6,7 +6,7 @@
#include <AK/Format.h>
#include <Kernel/Arch/Processor.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/common/Shutdown.h>
#endif
#include <Kernel/CommandLine.h>
@ -18,7 +18,7 @@ namespace Kernel {
[[noreturn]] static void __shutdown()
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
qemu_shutdown();
virtualbox_shutdown();
#endif

View file

@ -7,10 +7,7 @@ set(SOURCES
../../Userland/Libraries/LibELF/Relocation.cpp
)
if ("${SERENITY_ARCH}" STREQUAL "i686")
set(PREKERNEL_TARGET Prekernel32)
elseif ("${SERENITY_ARCH}" STREQUAL "x86_64")
if ("${SERENITY_ARCH}" STREQUAL "x86_64")
set(PREKERNEL_TARGET Prekernel64)
elseif("${SERENITY_ARCH}" STREQUAL "aarch64")
message(SEND_ERROR "Prekernel is not needed on aarch64 and should not be compiled!")

View file

@ -15,7 +15,7 @@ extern "C" {
static void print_location(SourceLocation const&)
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
asm volatile("cli; hlt");
#else
for (;;) { }

View file

@ -78,7 +78,6 @@ the page tables each contain 512 PTEs that map individual 4KB pages
*/
#if ARCH(X86_64)
gdt64:
.quad 0
gdt64code:
@ -93,7 +92,6 @@ gdt64ptr:
.global code64_sel
code64_sel:
.short code64_sel_value
#endif
start:
jmp real_start
@ -370,8 +368,6 @@ kernel_not_too_large:
/* We should not return, but just in case, halt */
hlt
#if ARCH(X86_64)
pae_supported:
movl $0x80000001, %eax
cpuid
@ -393,15 +389,6 @@ long_mode_supported:
popl %ebx
popl %edx
popl %eax
#else
/* If PAE is supported, continue with booting the system */
pae_supported:
/* restore the pushed registers and continue with booting */
popl %ebx
popl %edx
popl %eax
#endif
/* We don't know where the bootloader might have put the command line.
* It might be at an inconvenient location that we're not about to map,
@ -416,7 +403,6 @@ pae_supported:
movl $kernel_cmdline, %edi
rep movsl
#if ARCH(X86_64)
/* clear pml4t */
movl $boot_pml4t, %edi
movl $1024, %ecx
@ -428,7 +414,6 @@ pae_supported:
movl $boot_pdpt, 0(%edi)
/* R/W + Present */
orl $0x3, 0(%edi)
#endif
/* clear pdpt */
movl $boot_pdpt, %edi
@ -438,11 +423,7 @@ pae_supported:
/* set up pdpt[0] and pdpt[3] */
movl $boot_pdpt, %edi
#if ARCH(X86_64)
movl $(boot_pd0 + 3), 0(%edi)
#else
movl $(boot_pd0 + 1), 0(%edi)
#endif
/* clear pd0 */
movl $boot_pd0, %edi
@ -482,13 +463,8 @@ pae_supported:
addl $4096, %eax
loop 1b
#if ARCH(X86_64)
/* point CR3 to PML4T */
movl $boot_pml4t, %eax
#else
/* point CR3 to PDPT */
movl $boot_pdpt, %eax
#endif
movl %eax, %cr3
@ -497,14 +473,12 @@ pae_supported:
orl $0x60, %eax
movl %eax, %cr4
#if ARCH(X86_64)
1:
/* Enter Long-mode! ref(https://wiki.osdev.org/Setting_Up_Long_Mode)*/
mov $0xC0000080, %ecx /* Set the C-register to 0xC0000080, which is the EFER MSR.*/
rdmsr /* Read from the model-specific register.*/
or $(1 << 8), %eax /* Set the LM-bit which is the 9th bit (bit 8).*/
wrmsr /* Write to the model-specific register.*/
#endif
/* enable PG */
movl %cr0, %eax
@ -515,7 +489,6 @@ pae_supported:
mov $stack_top, %esp
and $-16, %esp
#if ARCH(X86_64)
/* Now we are in 32-bit compatibility mode, We still need to load a 64-bit GDT */
mov $gdt64ptr, %eax
lgdt (%eax)
@ -532,9 +505,6 @@ pae_supported:
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
#else
movl %ebx, multiboot_info_ptr
#endif
call reload_cr3
call init
@ -545,17 +515,10 @@ loop:
jmp loop
reload_cr3:
#if ARCH(X86_64)
pushq %rax
mov %cr3, %rax
mov %rax, %cr3
popq %rax
#else
pushl %eax
movl %cr3, %eax
movl %eax, %cr3
popl %eax
#endif
ret

View file

@ -14,7 +14,7 @@
#include <LibC/elf.h>
#include <LibELF/Relocation.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/ASM_wrapper.h>
# include <Kernel/Arch/x86/CPUID.h>
#endif
@ -90,11 +90,7 @@ extern "C" [[noreturn]] void init()
__builtin_memcpy(kernel_program_headers, kernel_image + kernel_elf_header.e_phoff, sizeof(ElfW(Phdr)) * kernel_elf_header.e_phnum);
FlatPtr kernel_physical_base = 0x200000;
#if ARCH(I386)
FlatPtr default_kernel_load_base = 0xc0200000;
#else
FlatPtr default_kernel_load_base = 0x2000200000;
#endif
FlatPtr kernel_load_base = default_kernel_load_base;
@ -125,11 +121,8 @@ extern "C" [[noreturn]] void init()
VERIFY(kernel_load_base % 0x1000 == 0);
VERIFY(kernel_load_base >= kernel_mapping_base + 0x200000);
#if ARCH(I386)
int pdpt_flags = 0x1;
#else
int pdpt_flags = 0x3;
#endif
boot_pdpt[(kernel_mapping_base >> 30) & 0x1ffu] = (FlatPtr)boot_pd_kernel | pdpt_flags;
boot_pd_kernel[0] = (FlatPtr)boot_pd_kernel_pt0 | 0x3;
@ -213,13 +206,8 @@ extern "C" [[noreturn]] void init()
}
asm(
#if ARCH(I386)
"add %0, %%esp"
#else
"mov %0, %%rax\n"
"add %%rax, %%rsp"
#endif
::"g"(kernel_mapping_base)
"add %%rax, %%rsp" ::"g"(kernel_mapping_base)
: "ax");
// unmap the 0-1MB region
@ -244,7 +232,7 @@ u64 generate_secure_seed()
{
u32 seed = 0xFEEBDAED;
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
CPUID processor_info(0x1);
if (processor_info.edx() & (1 << 4)) // TSC
seed ^= read_tsc();

View file

@ -274,9 +274,7 @@ LockRefPtr<Process> Process::create_kernel_process(LockRefPtr<Thread>& first_thr
auto process = process_or_error.release_value();
first_thread->regs().set_ip((FlatPtr)entry);
#if ARCH(I386)
first_thread->regs().esp = FlatPtr(entry_data); // entry function argument is expected to be in regs.esp
#elif ARCH(X86_64)
#if ARCH(X86_64)
first_thread->regs().rdi = FlatPtr(entry_data); // entry function argument is expected to be in regs.rdi
#elif ARCH(AARCH64)
(void)entry_data;
@ -396,40 +394,7 @@ Process::~Process()
extern void signal_trampoline_dummy() __attribute__((used));
void signal_trampoline_dummy()
{
#if ARCH(I386)
// The trampoline preserves the current eax, pushes the signal code and
// then calls the signal handler. We do this because, when interrupting a
// blocking syscall, that syscall may return some special error code in eax;
// This error code would likely be overwritten by the signal handler, so it's
// necessary to preserve it here.
constexpr static auto offset_to_first_register_slot = sizeof(__ucontext) + sizeof(siginfo) + sizeof(FPUState) + 4 * sizeof(FlatPtr);
asm(
".intel_syntax noprefix\n"
".globl asm_signal_trampoline\n"
"asm_signal_trampoline:\n"
// stack state: 0, ucontext, signal_info, (alignment = 16), fpu_state (alignment = 16), 0, ucontext*, siginfo*, signal, (alignment = 16), handler
// Pop the handler into ecx
"pop ecx\n" // save handler
// we have to save eax 'cause it might be the return value from a syscall
"mov [esp+%P1], eax\n"
// Note that the stack is currently aligned to 16 bytes as we popped the extra entries above.
// and it's already setup to call the handler with the expected values on the stack.
// call the signal handler
"call ecx\n"
// drop the 4 arguments
"add esp, 16\n"
// Current stack state is just saved_eax, ucontext, signal_info, fpu_state?.
// syscall SC_sigreturn
"mov eax, %P0\n"
"int 0x82\n"
".globl asm_signal_trampoline_end\n"
"asm_signal_trampoline_end:\n"
".att_syntax"
:
: "i"(Syscall::SC_sigreturn),
"i"(offset_to_first_register_slot));
#elif ARCH(X86_64)
#if ARCH(X86_64)
// The trampoline preserves the current rax, pushes the signal code and
// then calls the signal handler. We do this because, when interrupting a
// blocking syscall, that syscall may return some special error code in eax;

View file

@ -7,7 +7,7 @@
#include <AK/Singleton.h>
#include <Kernel/Arch/Processor.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/Time/HPET.h>
# include <Kernel/Arch/x86/Time/RTC.h>
#endif
@ -28,7 +28,7 @@ KernelRng& KernelRng::the()
UNMAP_AFTER_INIT KernelRng::KernelRng()
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
bool supports_rdseed = Processor::current().has_feature(CPUFeature::RDSEED);
bool supports_rdrand = Processor::current().has_feature(CPUFeature::RDRAND);
if (supports_rdseed || supports_rdrand) {

View file

@ -508,7 +508,7 @@ void dump_thread_list(bool with_stack_traces)
dbgln("Scheduler thread list for processor {}:", Processor::current_id());
auto get_cs = [](Thread& thread) -> u16 {
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
if (!thread.current_trap())
return thread.regs().cs;
return thread.get_register_dump_from_stack().cs;

View file

@ -82,7 +82,7 @@ ErrorOr<void> IDEChannel::port_phy_reset()
return {};
}
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
ErrorOr<void> IDEChannel::allocate_resources_for_pci_ide_controller(Badge<PCIIDELegacyModeController>, bool force_pio)
{
return allocate_resources(force_pio);

View file

@ -36,7 +36,7 @@ namespace Kernel {
class AsyncBlockDeviceRequest;
class IDEController;
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
class PCIIDELegacyModeController;
class ISAIDEController;
#endif
@ -95,7 +95,7 @@ public:
virtual StringView purpose() const override { return "PATA Channel"sv; }
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
ErrorOr<void> allocate_resources_for_pci_ide_controller(Badge<PCIIDELegacyModeController>, bool force_pio);
ErrorOr<void> allocate_resources_for_isa_ide_controller(Badge<ISAIDEController>);
#endif

View file

@ -10,7 +10,7 @@
#include <AK/Singleton.h>
#include <AK/StringView.h>
#include <AK/UUID.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/ISABus/IDEController.h>
# include <Kernel/Arch/x86/PCI/IDELegacyModeController.h>
#endif
@ -102,8 +102,8 @@ UNMAP_AFTER_INIT void StorageManagement::enumerate_pci_controllers(bool force_pi
}
}
#if ARCH(X86_64)
auto subclass_code = static_cast<SubclassID>(device_identifier.subclass_code().value());
#if ARCH(I386) || ARCH(X86_64)
if (subclass_code == SubclassID::IDEController && kernel_command_line().is_ide_enabled()) {
m_controllers.append(PCIIDELegacyModeController::initialize(device_identifier, force_pio));
}
@ -426,7 +426,7 @@ UNMAP_AFTER_INIT void StorageManagement::initialize(StringView root_device, bool
VERIFY(s_storage_device_minor_number == 0);
m_boot_argument = root_device;
if (PCI::Access::is_disabled()) {
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
// Note: If PCI is disabled, we assume that at least we have an ISA IDE controller
// to probe and use
auto isa_ide_controller = MUST(ISAIDEController::initialize());

View file

@ -24,33 +24,7 @@ extern "C" void syscall_asm_entry();
NEVER_INLINE NAKED void syscall_asm_entry()
{
// clang-format off
#if ARCH(I386)
asm(
" pushl $0x0\n"
" pusha\n"
" pushl %ds\n"
" pushl %es\n"
" pushl %fs\n"
" pushl %gs\n"
" pushl %ss\n"
" mov $" __STRINGIFY(GDT_SELECTOR_DATA0) ", %ax\n"
" mov %ax, %ds\n"
" mov %ax, %es\n"
" mov $" __STRINGIFY(GDT_SELECTOR_PROC) ", %ax\n"
" mov %ax, %gs\n"
" cld\n"
" xor %esi, %esi\n"
" xor %edi, %edi\n"
" pushl %esp \n" // set TrapFrame::regs
" subl $" __STRINGIFY(TRAP_FRAME_SIZE - 4) ", %esp \n"
" movl %esp, %ebx \n"
" pushl %ebx \n" // push pointer to TrapFrame
" call enter_trap_no_irq \n"
" movl %ebx, 0(%esp) \n" // push pointer to TrapFrame
" call syscall_handler \n"
" movl %ebx, 0(%esp) \n" // push pointer to TrapFrame
" jmp common_trap_exit \n");
#elif ARCH(X86_64)
#if ARCH(X86_64)
asm(
" pushq $0x0\n"
" pushq %r15\n"

View file

@ -5,7 +5,7 @@
*/
#include <Kernel/CommandLine.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/common/PCSpeaker.h>
#endif
#include <Kernel/Process.h>
@ -17,7 +17,7 @@ ErrorOr<FlatPtr> Process::sys$beep()
VERIFY_NO_PROCESS_BIG_LOCK(this);
if (!kernel_command_line().is_pc_speaker_enabled())
return ENODEV;
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
PCSpeaker::tone_on(440);
auto result = Thread::current()->sleep(Time::from_nanoseconds(200'000'000));
PCSpeaker::tone_off();

View file

@ -149,16 +149,7 @@ static ErrorOr<FlatPtr> make_userspace_context_for_main_thread([[maybe_unused]]
// NOTE: The stack needs to be 16-byte aligned.
new_sp -= new_sp % 16;
#if ARCH(I386)
// GCC assumes that the return address has been pushed to the stack when it enters the function,
// so we need to reserve an extra pointer's worth of bytes below this to make GCC's stack alignment
// calculations work
new_sp -= sizeof(void*);
push_on_new_stack(envp);
push_on_new_stack(argv);
push_on_new_stack(argv_entries.size());
#elif ARCH(X86_64)
#if ARCH(X86_64)
regs.rdi = argv_entries.size();
regs.rsi = argv;
regs.rdx = envp;
@ -686,18 +677,8 @@ ErrorOr<void> Process::do_exec(NonnullLockRefPtr<OpenFileDescription> main_progr
auto& regs = new_main_thread->m_regs;
regs.cs = GDT_SELECTOR_CODE3 | 3;
#if ARCH(I386)
regs.ds = GDT_SELECTOR_DATA3 | 3;
regs.es = GDT_SELECTOR_DATA3 | 3;
regs.ss = GDT_SELECTOR_DATA3 | 3;
regs.fs = GDT_SELECTOR_DATA3 | 3;
regs.gs = GDT_SELECTOR_TLS | 3;
regs.eip = load_result.entry_eip;
regs.esp = new_userspace_sp;
#else
regs.rip = load_result.entry_eip;
regs.rsp = new_userspace_sp;
#endif
regs.cr3 = address_space().with([](auto& space) { return space->page_directory().cr3(); });
{

View file

@ -101,28 +101,7 @@ ErrorOr<FlatPtr> Process::sys$fork(RegisterState& regs)
child_first_thread->m_alternative_signal_stack = Thread::current()->m_alternative_signal_stack;
child_first_thread->m_alternative_signal_stack_size = Thread::current()->m_alternative_signal_stack_size;
#if ARCH(I386)
auto& child_regs = child_first_thread->m_regs;
child_regs.eax = 0; // fork() returns 0 in the child :^)
child_regs.ebx = regs.ebx;
child_regs.ecx = regs.ecx;
child_regs.edx = regs.edx;
child_regs.ebp = regs.ebp;
child_regs.esp = regs.userspace_esp;
child_regs.esi = regs.esi;
child_regs.edi = regs.edi;
child_regs.eflags = regs.eflags;
child_regs.eip = regs.eip;
child_regs.cs = regs.cs;
child_regs.ds = regs.ds;
child_regs.es = regs.es;
child_regs.fs = regs.fs;
child_regs.gs = regs.gs;
child_regs.ss = regs.userspace_ss;
dbgln_if(FORK_DEBUG, "fork: child will begin executing at {:#04x}:{:p} with stack {:#04x}:{:p}, kstack {:#04x}:{:p}",
child_regs.cs, child_regs.eip, child_regs.ss, child_regs.esp, child_regs.ss0, child_regs.esp0);
#elif ARCH(X86_64)
#if ARCH(X86_64)
auto& child_regs = child_first_thread->m_regs;
child_regs.rax = 0; // fork() returns 0 in the child :^)
child_regs.rbx = regs.rbx;

View file

@ -563,14 +563,8 @@ ErrorOr<FlatPtr> Process::sys$allocate_tls(Userspace<char const*> initial_data,
TRY(main_thread->make_thread_specific_region({}));
#if ARCH(I386)
auto& tls_descriptor = Processor::current().get_gdt_entry(GDT_SELECTOR_TLS);
tls_descriptor.set_base(main_thread->thread_specific_data());
tls_descriptor.set_limit(main_thread->thread_specific_region_size());
#else
MSR fs_base_msr(MSR_FS_BASE);
fs_base_msr.set(main_thread->thread_specific_data().get());
#endif
return m_master_tls_region.unsafe_ptr()->vaddr().get();
});

View file

@ -87,7 +87,7 @@ ErrorOr<FlatPtr> Process::sys$sigreturn([[maybe_unused]] RegisterState& register
// Stack state (created by the signal trampoline):
// saved_ax, ucontext, signal_info, fpu_state?.
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
// The FPU state is at the top here, pop it off and restore it.
// FIXME: The stack alignment is off by 8 bytes here, figure this out and remove this excessively aligned object.
alignas(alignof(FPUState) * 2) FPUState data {};
@ -107,8 +107,6 @@ ErrorOr<FlatPtr> Process::sys$sigreturn([[maybe_unused]] RegisterState& register
Thread::current()->m_currently_handled_signal = 0;
#if ARCH(X86_64)
auto sp = registers.rsp;
#elif ARCH(I386)
auto sp = registers.esp;
#endif
copy_ptrace_registers_into_kernel_registers(registers, static_cast<PtraceRegisters const&>(ucontext.uc_mcontext));
@ -116,9 +114,6 @@ ErrorOr<FlatPtr> Process::sys$sigreturn([[maybe_unused]] RegisterState& register
#if ARCH(X86_64)
registers.set_userspace_sp(registers.rsp);
registers.rsp = sp;
#elif ARCH(I386)
registers.set_userspace_sp(registers.esp);
registers.esp = sp;
#endif
return saved_ax;

View file

@ -22,9 +22,7 @@ ErrorOr<FlatPtr> Process::sys$uname(Userspace<utsname*> user_buf)
{}, // Hostname, filled in below.
{}, // "Release" (1.0-dev), filled in below.
{}, // "Revision" (git commit hash), filled in below.
#if ARCH(I386)
"i686",
#elif ARCH(X86_64)
#if ARCH(X86_64)
"x86_64",
#elif ARCH(AARCH64)
"AArch64",

View file

@ -8,7 +8,7 @@
#include <AK/StdLibExtras.h>
#include <Kernel/Arch/Delay.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/common/PCSpeaker.h>
#endif
#include <Kernel/CommandLine.h>
@ -329,7 +329,7 @@ void VirtualConsole::beep()
{
if (!kernel_command_line().is_pc_speaker_enabled())
return;
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
PCSpeaker::tone_on(440);
microseconds_delay(10000);
PCSpeaker::tone_off();

View file

@ -80,23 +80,7 @@ Thread::Thread(NonnullLockRefPtr<Process> process, NonnullOwnPtr<Memory::Region>
// Only IF is set when a process boots.
m_regs.set_flags(0x0202);
#if ARCH(I386)
if (m_process->is_kernel_process()) {
m_regs.cs = GDT_SELECTOR_CODE0;
m_regs.ds = GDT_SELECTOR_DATA0;
m_regs.es = GDT_SELECTOR_DATA0;
m_regs.fs = 0;
m_regs.ss = GDT_SELECTOR_DATA0;
m_regs.gs = GDT_SELECTOR_PROC;
} else {
m_regs.cs = GDT_SELECTOR_CODE3 | 3;
m_regs.ds = GDT_SELECTOR_DATA3 | 3;
m_regs.es = GDT_SELECTOR_DATA3 | 3;
m_regs.fs = GDT_SELECTOR_DATA3 | 3;
m_regs.ss = GDT_SELECTOR_DATA3 | 3;
m_regs.gs = GDT_SELECTOR_TLS | 3;
}
#elif ARCH(X86_64)
#if ARCH(X86_64)
if (m_process->is_kernel_process())
m_regs.cs = GDT_SELECTOR_CODE0;
else
@ -118,9 +102,6 @@ Thread::Thread(NonnullLockRefPtr<Process> process, NonnullOwnPtr<Memory::Region>
} else {
// Ring 3 processes get a separate stack for ring 0.
// The ring 3 stack will be assigned by exec().
#if ARCH(I386)
m_regs.ss0 = GDT_SELECTOR_DATA0;
#endif
m_regs.set_sp0(m_kernel_stack_top);
}
@ -1151,9 +1132,7 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
if (action.flags & SA_SIGINFO)
fill_signal_info_for_signal(signal_info);
#if ARCH(I386)
constexpr static FlatPtr thread_red_zone_size = 0;
#elif ARCH(X86_64)
#if ARCH(X86_64)
constexpr static FlatPtr thread_red_zone_size = 128;
#elif ARCH(AARCH64)
constexpr static FlatPtr thread_red_zone_size = 0; // FIXME
@ -1188,23 +1167,15 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
VERIFY(stack % 16 == 0);
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
// Save the FPU/SSE state
TRY(copy_value_on_user_stack(stack, fpu_state()));
#endif
#if ARCH(I386)
// Leave one empty slot to align the stack for a handler call.
TRY(push_value_on_user_stack(stack, 0));
#endif
TRY(push_value_on_user_stack(stack, pointer_to_ucontext));
TRY(push_value_on_user_stack(stack, pointer_to_signal_info));
TRY(push_value_on_user_stack(stack, signal));
#if ARCH(I386)
VERIFY(stack % 16 == 0);
#endif
TRY(push_value_on_user_stack(stack, handler_vaddr.get()));
// We write back the adjusted stack value into the register state.

View file

@ -50,24 +50,7 @@ struct ThreadSpecificData {
#define THREAD_AFFINITY_DEFAULT 0xffffffff
struct ThreadRegisters {
#if ARCH(I386)
FlatPtr ss;
FlatPtr gs;
FlatPtr fs;
FlatPtr es;
FlatPtr ds;
FlatPtr edi;
FlatPtr esi;
FlatPtr ebp;
FlatPtr esp;
FlatPtr ebx;
FlatPtr edx;
FlatPtr ecx;
FlatPtr eax;
FlatPtr eip;
FlatPtr esp0;
FlatPtr ss0;
#else
#if ARCH(X86_64)
FlatPtr rdi;
FlatPtr rsi;
FlatPtr rbp;
@ -89,14 +72,7 @@ struct ThreadRegisters {
#endif
FlatPtr cs;
#if ARCH(I386)
FlatPtr eflags;
FlatPtr flags() const { return eflags; }
void set_flags(FlatPtr value) { eflags = value; }
void set_sp(FlatPtr value) { esp = value; }
void set_sp0(FlatPtr value) { esp0 = value; }
void set_ip(FlatPtr value) { eip = value; }
#else
#if ARCH(X86_64)
FlatPtr rflags;
FlatPtr flags() const { return rflags; }
void set_flags(FlatPtr value) { rflags = value; }
@ -109,18 +85,14 @@ struct ThreadRegisters {
FlatPtr ip() const
{
#if ARCH(I386)
return eip;
#else
#if ARCH(X86_64)
return rip;
#endif
}
FlatPtr sp() const
{
#if ARCH(I386)
return esp;
#else
#if ARCH(X86_64)
return rsp;
#endif
}

View file

@ -8,8 +8,7 @@
#include <AK/Singleton.h>
#include <AK/StdLibExtras.h>
#include <AK/Time.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/Time/APICTimer.h>
# include <Kernel/Arch/x86/Time/HPET.h>
# include <Kernel/Arch/x86/Time/HPETComparator.h>
@ -125,7 +124,7 @@ Time TimeManagement::monotonic_time(TimePrecision precision) const
ticks = m_ticks_this_second;
if (do_query) {
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
// We may have to do this over again if the timer interrupt fires
// while we're trying to query the information. In that case, our
// seconds and ticks became invalid, producing an incorrect time.
@ -176,7 +175,7 @@ UNMAP_AFTER_INIT void TimeManagement::initialize([[maybe_unused]] u32 cpu)
// the TimeManagement class is completely initialized.
InterruptDisabler disabler;
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
if (cpu == 0) {
VERIFY(!s_the.is_initialized());
s_the.ensure_instance();
@ -229,7 +228,7 @@ time_t TimeManagement::ticks_per_second() const
Time TimeManagement::boot_time()
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
return RTC::boot_time();
#elif ARCH(AARCH64)
TODO_AARCH64();
@ -241,7 +240,7 @@ Time TimeManagement::boot_time()
UNMAP_AFTER_INIT TimeManagement::TimeManagement()
: m_time_page_region(MM.allocate_kernel_region(PAGE_SIZE, "Time page"sv, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow).release_value_but_fixme_should_propagate_errors())
{
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
bool probe_non_legacy_hardware_timers = !(kernel_command_line().is_legacy_time_enabled());
if (ACPI::is_enabled()) {
if (!ACPI::Parser::the()->x86_specific_flags().cmos_rtc_not_present) {
@ -312,7 +311,7 @@ bool TimeManagement::is_hpet_periodic_mode_allowed()
}
}
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
UNMAP_AFTER_INIT bool TimeManagement::probe_and_set_x86_non_legacy_hardware_timers()
{
if (!ACPI::is_enabled())

View file

@ -81,7 +81,7 @@ private:
TimePage& time_page();
void update_time_page();
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
bool probe_and_set_x86_legacy_hardware_timers();
bool probe_and_set_x86_non_legacy_hardware_timers();
void increment_time_since_boot_hpet();

View file

@ -8,7 +8,7 @@
#include <AK/StringView.h>
#include <AK/Types.h>
#include <Kernel/Arch/DebugOutput.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
# include <Kernel/Arch/x86/common/BochsDebugOutput.h>
#endif
#include <Kernel/Devices/ConsoleDevice.h>
@ -54,7 +54,7 @@ static void critical_console_out(char ch)
if (s_serial_debug_enabled)
serial_putch(ch);
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
// No need to output things to the real ConsoleDevice as no one is likely
// to read it (because we are in a fatal situation, so only print things and halt)
bochs_debug_output(ch);
@ -79,7 +79,7 @@ static void console_out(char ch)
if (DeviceManagement::the().is_console_device_attached()) {
DeviceManagement::the().console_device().put_char(ch);
} else {
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
bochs_debug_output(ch);
#endif
}
@ -139,7 +139,7 @@ static inline void internal_dbgputch(char ch)
{
if (s_serial_debug_enabled)
serial_putch(ch);
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
bochs_debug_output(ch);
#endif
}

View file

@ -200,7 +200,7 @@ int main(int argc, char** argv)
return Crash::Failure::UnexpectedError;
u8* makeshift_esp = makeshift_stack + 2048;
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
asm volatile("mov %%eax, %%esp" ::"a"(makeshift_esp));
#elif ARCH(AARCH64)
(void)makeshift_esp;
@ -216,7 +216,7 @@ int main(int argc, char** argv)
return Crash::Failure::UnexpectedError;
u8* bad_esp = bad_stack + 2048;
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
asm volatile("mov %%eax, %%esp" ::"a"(bad_esp));
#elif ARCH(AARCH64)
(void)bad_esp;
@ -281,7 +281,7 @@ int main(int argc, char** argv)
if (do_trigger_user_mode_instruction_prevention) {
any_failures |= !Crash("Trigger x86 User Mode Instruction Prevention", []() {
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
asm volatile("str %eax");
#elif ARCH(AARCH64)
TODO_AARCH64();

View file

@ -11,7 +11,7 @@
#include <string.h>
#include <unistd.h>
#if ARCH(I386) || ARCH(X86_64)
#if ARCH(X86_64)
asm("haxcode:\n"
"1: jmp 1b\n"
"haxcode_end:\n");