diff --git a/DevTools/UserspaceEmulator/Emulator.cpp b/DevTools/UserspaceEmulator/Emulator.cpp index 7da61f0dce..8e5e957f69 100644 --- a/DevTools/UserspaceEmulator/Emulator.cpp +++ b/DevTools/UserspaceEmulator/Emulator.cpp @@ -77,30 +77,30 @@ void Emulator::setup_stack(const Vector& arguments) auto stack_region = make(stack_location, stack_size); stack_region->set_stack(true); m_mmu.add_region(move(stack_region)); - m_cpu.set_esp(stack_location + stack_size); + m_cpu.set_esp(shadow_wrap_as_initialized(stack_location + stack_size)); Vector argv_entries; for (auto& argument : arguments) { m_cpu.push_string(argument.characters()); - argv_entries.append(m_cpu.esp()); + argv_entries.append(m_cpu.esp().value()); } - m_cpu.push32(0); // char** envp = { nullptr } - u32 envp = m_cpu.esp(); + m_cpu.push32(shadow_wrap_as_initialized(0)); // char** envp = { nullptr } + u32 envp = m_cpu.esp().value(); - m_cpu.push32(0); // char** argv = { argv_entries..., nullptr } + m_cpu.push32(shadow_wrap_as_initialized(0)); // char** argv = { argv_entries..., nullptr } for (ssize_t i = argv_entries.size() - 1; i >= 0; --i) - m_cpu.push32(argv_entries[i]); - u32 argv = m_cpu.esp(); + m_cpu.push32(shadow_wrap_as_initialized(argv_entries[i])); + u32 argv = m_cpu.esp().value(); - m_cpu.push32(0); // (alignment) + m_cpu.push32(shadow_wrap_as_initialized(0)); // (alignment) u32 argc = argv_entries.size(); - m_cpu.push32(envp); - m_cpu.push32(argv); - m_cpu.push32(argc); - m_cpu.push32(0); // (alignment) + m_cpu.push32(shadow_wrap_as_initialized(envp)); + m_cpu.push32(shadow_wrap_as_initialized(argv)); + m_cpu.push32(shadow_wrap_as_initialized(argc)); + m_cpu.push32(shadow_wrap_as_initialized(0)); // (alignment) } bool Emulator::load_elf() @@ -111,15 +111,18 @@ bool Emulator::load_elf() if (program_header.is_executable() && !program_header.is_writable()) region->set_text(true); memcpy(region->data(), program_header.raw_data(), program_header.size_in_image()); + memset(region->shadow_data(), 0x01, program_header.size_in_memory()); mmu().add_region(move(region)); return; } if (program_header.type() == PT_TLS) { auto tcb_region = make(0x20000000, program_header.size_in_memory()); memcpy(tcb_region->data(), program_header.raw_data(), program_header.size_in_image()); + memset(tcb_region->shadow_data(), 0x01, program_header.size_in_image()); auto tls_region = make(0, 4); - tls_region->write32(0, tcb_region->base() + 8); + tls_region->write32(0, shadow_wrap_as_initialized(tcb_region->base() + 8)); + memset(tls_region->shadow_data(), 0x01, 4); mmu().add_region(move(tcb_region)); mmu().set_tls_region(move(tls_region)); @@ -195,13 +198,15 @@ Vector Emulator::raw_backtrace() Vector backtrace; backtrace.append(m_cpu.eip()); - u32 frame_ptr = m_cpu.ebp(); + // FIXME: Maybe do something if the backtrace has uninitialized data in the frame chain. + + u32 frame_ptr = m_cpu.ebp().value(); while (frame_ptr) { - u32 ret_ptr = m_mmu.read32({ 0x20, frame_ptr + 4 }); + u32 ret_ptr = m_mmu.read32({ 0x20, frame_ptr + 4 }).value(); if (!ret_ptr) break; backtrace.append(ret_ptr); - frame_ptr = m_mmu.read32({ 0x20, frame_ptr }); + frame_ptr = m_mmu.read32({ 0x20, frame_ptr }).value(); } return backtrace; } diff --git a/DevTools/UserspaceEmulator/MallocTracer.cpp b/DevTools/UserspaceEmulator/MallocTracer.cpp index 696d5be114..47c0e53ae1 100644 --- a/DevTools/UserspaceEmulator/MallocTracer.cpp +++ b/DevTools/UserspaceEmulator/MallocTracer.cpp @@ -28,6 +28,7 @@ #include "Emulator.h" #include "MmapRegion.h" #include +#include //#define REACHABLE_DEBUG @@ -41,6 +42,13 @@ MallocTracer::MallocTracer() void MallocTracer::target_did_malloc(Badge, FlatPtr address, size_t size) { + auto* region = Emulator::the().mmu().find_region({ 0x20, address }); + ASSERT(region); + ASSERT(region->is_mmap()); + auto& mmap_region = static_cast(*region); + auto* shadow_bits = mmap_region.shadow_data() + address - mmap_region.base(); + memset(shadow_bits, 0, size); + if (auto* existing_mallocation = find_mallocation(address)) { ASSERT(existing_mallocation->freed); existing_mallocation->size = size; @@ -151,7 +159,7 @@ bool MallocTracer::is_reachable(const Mallocation& mallocation) const size_t pointers_in_mallocation = other_mallocation.size / sizeof(u32); for (size_t i = 0; i < pointers_in_mallocation; ++i) { auto value = Emulator::the().mmu().read32({ 0x20, other_mallocation.address + i * sizeof(u32) }); - if (value == mallocation.address) { + if (value.value() == mallocation.address && !value.is_uninitialized()) { #ifdef REACHABLE_DEBUG dbgprintf("mallocation %p is reachable from other mallocation %p\n", mallocation.address, other_mallocation.address); #endif @@ -176,7 +184,7 @@ bool MallocTracer::is_reachable(const Mallocation& mallocation) const size_t pointers_in_region = region.size() / sizeof(u32); for (size_t i = 0; i < pointers_in_region; ++i) { auto value = region.read32(i * sizeof(u32)); - if (value == mallocation.address) { + if (value.value() == mallocation.address && !value.is_uninitialized()) { #ifdef REACHABLE_DEBUG dbgprintf("mallocation %p is reachable from region %p-%p\n", mallocation.address, region.base(), region.end() - 1); #endif diff --git a/DevTools/UserspaceEmulator/MmapRegion.cpp b/DevTools/UserspaceEmulator/MmapRegion.cpp index 84cf8c3ec4..278776ab3c 100644 --- a/DevTools/UserspaceEmulator/MmapRegion.cpp +++ b/DevTools/UserspaceEmulator/MmapRegion.cpp @@ -51,10 +51,12 @@ MmapRegion::MmapRegion(u32 base, u32 size, int prot) : Region(base, size) , m_prot(prot) { + m_shadow_data = (u8*)calloc(1, size); } MmapRegion::~MmapRegion() { + free(m_shadow_data); if (m_file_backed) munmap(m_data, size()); else @@ -68,7 +70,7 @@ bool MmapRegion::is_malloc_block() const return !m_file_backed; } -u8 MmapRegion::read8(FlatPtr offset) +ValueWithShadow MmapRegion::read8(FlatPtr offset) { if (!is_readable()) { warn() << "8-bit read from unreadable MmapRegion @ " << (const void*)(base() + offset); @@ -82,10 +84,10 @@ u8 MmapRegion::read8(FlatPtr offset) } ASSERT(offset < size()); - return *reinterpret_cast(m_data + offset); + return { *reinterpret_cast(m_data + offset), *reinterpret_cast(m_shadow_data + offset) }; } -u16 MmapRegion::read16(u32 offset) +ValueWithShadow MmapRegion::read16(u32 offset) { if (!is_readable()) { warn() << "16-bit from unreadable MmapRegion @ " << (const void*)(base() + offset); @@ -99,10 +101,10 @@ u16 MmapRegion::read16(u32 offset) } ASSERT(offset + 1 < size()); - return *reinterpret_cast(m_data + offset); + return { *reinterpret_cast(m_data + offset), *reinterpret_cast(m_shadow_data + offset) }; } -u32 MmapRegion::read32(u32 offset) +ValueWithShadow MmapRegion::read32(u32 offset) { if (!is_readable()) { warn() << "32-bit read from unreadable MmapRegion @ " << (const void*)(base() + offset); @@ -116,10 +118,10 @@ u32 MmapRegion::read32(u32 offset) } ASSERT(offset + 3 < size()); - return *reinterpret_cast(m_data + offset); + return { *reinterpret_cast(m_data + offset), *reinterpret_cast(m_shadow_data + offset) }; } -void MmapRegion::write8(u32 offset, u8 value) +void MmapRegion::write8(u32 offset, ValueWithShadow value) { if (!is_writable()) { warn() << "8-bit write to unreadable MmapRegion @ " << (const void*)(base() + offset); @@ -133,10 +135,11 @@ void MmapRegion::write8(u32 offset, u8 value) } ASSERT(offset < size()); - *reinterpret_cast(m_data + offset) = value; + *reinterpret_cast(m_data + offset) = value.value(); + *reinterpret_cast(m_shadow_data + offset) = value.shadow(); } -void MmapRegion::write16(u32 offset, u16 value) +void MmapRegion::write16(u32 offset, ValueWithShadow value) { if (!is_writable()) { warn() << "16-bit write to unreadable MmapRegion @ " << (const void*)(base() + offset); @@ -150,10 +153,11 @@ void MmapRegion::write16(u32 offset, u16 value) } ASSERT(offset + 1 < size()); - *reinterpret_cast(m_data + offset) = value; + *reinterpret_cast(m_data + offset) = value.value(); + *reinterpret_cast(m_shadow_data + offset) = value.shadow(); } -void MmapRegion::write32(u32 offset, u32 value) +void MmapRegion::write32(u32 offset, ValueWithShadow value) { if (!is_writable()) { warn() << "32-bit write to unreadable MmapRegion @ " << (const void*)(base() + offset); @@ -167,7 +171,9 @@ void MmapRegion::write32(u32 offset, u32 value) } ASSERT(offset + 3 < size()); - *reinterpret_cast(m_data + offset) = value; + ASSERT(m_data != m_shadow_data); + *reinterpret_cast(m_data + offset) = value.value(); + *reinterpret_cast(m_shadow_data + offset) = value.shadow(); } } diff --git a/DevTools/UserspaceEmulator/MmapRegion.h b/DevTools/UserspaceEmulator/MmapRegion.h index 57c02ed73d..a32c5271d9 100644 --- a/DevTools/UserspaceEmulator/MmapRegion.h +++ b/DevTools/UserspaceEmulator/MmapRegion.h @@ -37,15 +37,16 @@ public: static NonnullOwnPtr create_file_backed(u32 base, u32 size, u32 prot, int flags, int fd, off_t offset); virtual ~MmapRegion() override; - virtual u8 read8(u32 offset) override; - virtual u16 read16(u32 offset) override; - virtual u32 read32(u32 offset) override; + virtual ValueWithShadow read8(u32 offset) override; + virtual ValueWithShadow read16(u32 offset) override; + virtual ValueWithShadow read32(u32 offset) override; - virtual void write8(u32 offset, u8 value) override; - virtual void write16(u32 offset, u16 value) override; - virtual void write32(u32 offset, u32 value) override; + virtual void write8(u32 offset, ValueWithShadow) override; + virtual void write16(u32 offset, ValueWithShadow) override; + virtual void write32(u32 offset, ValueWithShadow) override; u8* data() { return m_data; } + u8* shadow_data() { return m_shadow_data; } bool is_readable() const { return m_prot & PROT_READ; } bool is_writable() const { return m_prot & PROT_WRITE; } @@ -58,6 +59,7 @@ private: virtual bool is_mmap() const override { return true; } u8* m_data { nullptr }; + u8* m_shadow_data { nullptr }; int m_prot { 0 }; bool m_file_backed { false }; }; diff --git a/DevTools/UserspaceEmulator/SharedBufferRegion.cpp b/DevTools/UserspaceEmulator/SharedBufferRegion.cpp index b21338fbff..d6e2e4b1ea 100644 --- a/DevTools/UserspaceEmulator/SharedBufferRegion.cpp +++ b/DevTools/UserspaceEmulator/SharedBufferRegion.cpp @@ -42,46 +42,51 @@ SharedBufferRegion::SharedBufferRegion(u32 base, u32 size, int shbuf_id, u8* hos , m_data(host_data) , m_shbuf_id(shbuf_id) { + m_shadow_data = (u8*)calloc(1, size); } SharedBufferRegion::~SharedBufferRegion() { + free(m_shadow_data); } -u8 SharedBufferRegion::read8(FlatPtr offset) +ValueWithShadow SharedBufferRegion::read8(FlatPtr offset) { ASSERT(offset < size()); - return *reinterpret_cast(m_data + offset); + return { *reinterpret_cast(m_data + offset), *reinterpret_cast(m_shadow_data + offset) }; } -u16 SharedBufferRegion::read16(u32 offset) +ValueWithShadow SharedBufferRegion::read16(u32 offset) { ASSERT(offset + 1 < size()); - return *reinterpret_cast(m_data + offset); + return { *reinterpret_cast(m_data + offset), *reinterpret_cast(m_shadow_data + offset) }; } -u32 SharedBufferRegion::read32(u32 offset) +ValueWithShadow SharedBufferRegion::read32(u32 offset) { ASSERT(offset + 3 < size()); - return *reinterpret_cast(m_data + offset); + return { *reinterpret_cast(m_data + offset), *reinterpret_cast(m_shadow_data + offset) }; } -void SharedBufferRegion::write8(u32 offset, u8 value) +void SharedBufferRegion::write8(u32 offset, ValueWithShadow value) { ASSERT(offset < size()); - *reinterpret_cast(m_data + offset) = value; + *reinterpret_cast(m_data + offset) = value.value(); + *reinterpret_cast(m_shadow_data + offset) = value.shadow(); } -void SharedBufferRegion::write16(u32 offset, u16 value) +void SharedBufferRegion::write16(u32 offset, ValueWithShadow value) { ASSERT(offset + 1 < size()); - *reinterpret_cast(m_data + offset) = value; + *reinterpret_cast(m_data + offset) = value.value(); + *reinterpret_cast(m_shadow_data + offset) = value.shadow(); } -void SharedBufferRegion::write32(u32 offset, u32 value) +void SharedBufferRegion::write32(u32 offset, ValueWithShadow value) { ASSERT(offset + 3 < size()); - *reinterpret_cast(m_data + offset) = value; + *reinterpret_cast(m_data + offset) = value.value(); + *reinterpret_cast(m_shadow_data + offset) = value.shadow(); } int SharedBufferRegion::allow_all() diff --git a/DevTools/UserspaceEmulator/SharedBufferRegion.h b/DevTools/UserspaceEmulator/SharedBufferRegion.h index 9d27c6d43d..590f83c1d8 100644 --- a/DevTools/UserspaceEmulator/SharedBufferRegion.h +++ b/DevTools/UserspaceEmulator/SharedBufferRegion.h @@ -36,13 +36,13 @@ public: static NonnullOwnPtr create_with_shbuf_id(u32 base, u32 size, int shbuf_id, u8* shbuf_data); virtual ~SharedBufferRegion() override; - virtual u8 read8(u32 offset) override; - virtual u16 read16(u32 offset) override; - virtual u32 read32(u32 offset) override; + virtual ValueWithShadow read8(u32 offset) override; + virtual ValueWithShadow read16(u32 offset) override; + virtual ValueWithShadow read32(u32 offset) override; - virtual void write8(u32 offset, u8 value) override; - virtual void write16(u32 offset, u16 value) override; - virtual void write32(u32 offset, u32 value) override; + virtual void write8(u32 offset, ValueWithShadow) override; + virtual void write16(u32 offset, ValueWithShadow) override; + virtual void write32(u32 offset, ValueWithShadow) override; u8* data() { return m_data; } @@ -60,6 +60,7 @@ private: SharedBufferRegion(u32 base, u32 size, int shbuf_id, u8* shbuf_data); u8* m_data { nullptr }; + u8* m_shadow_data { nullptr }; int m_shbuf_id { 0 }; }; diff --git a/DevTools/UserspaceEmulator/SimpleRegion.cpp b/DevTools/UserspaceEmulator/SimpleRegion.cpp index c77ad7eefa..5e74971f5c 100644 --- a/DevTools/UserspaceEmulator/SimpleRegion.cpp +++ b/DevTools/UserspaceEmulator/SimpleRegion.cpp @@ -32,47 +32,52 @@ SimpleRegion::SimpleRegion(u32 base, u32 size) : Region(base, size) { m_data = (u8*)calloc(1, size); + m_shadow_data = (u8*)calloc(1, size); } SimpleRegion::~SimpleRegion() { + free(m_shadow_data); free(m_data); } -u8 SimpleRegion::read8(FlatPtr offset) +ValueWithShadow SimpleRegion::read8(FlatPtr offset) { ASSERT(offset < size()); - return *reinterpret_cast(m_data + offset); + return { *reinterpret_cast(m_data + offset), *reinterpret_cast(m_shadow_data + offset) }; } -u16 SimpleRegion::read16(u32 offset) +ValueWithShadow SimpleRegion::read16(u32 offset) { ASSERT(offset + 1 < size()); - return *reinterpret_cast(m_data + offset); + return { *reinterpret_cast(m_data + offset), *reinterpret_cast(m_shadow_data + offset) }; } -u32 SimpleRegion::read32(u32 offset) +ValueWithShadow SimpleRegion::read32(u32 offset) { ASSERT(offset + 3 < size()); - return *reinterpret_cast(m_data + offset); + return { *reinterpret_cast(m_data + offset), *reinterpret_cast(m_shadow_data + offset) }; } -void SimpleRegion::write8(u32 offset, u8 value) +void SimpleRegion::write8(u32 offset, ValueWithShadow value) { ASSERT(offset < size()); - *reinterpret_cast(m_data + offset) = value; + *reinterpret_cast(m_data + offset) = value.value(); + *reinterpret_cast(m_shadow_data + offset) = value.shadow(); } -void SimpleRegion::write16(u32 offset, u16 value) +void SimpleRegion::write16(u32 offset, ValueWithShadow value) { ASSERT(offset + 1 < size()); - *reinterpret_cast(m_data + offset) = value; + *reinterpret_cast(m_data + offset) = value.value(); + *reinterpret_cast(m_shadow_data + offset) = value.shadow(); } -void SimpleRegion::write32(u32 offset, u32 value) +void SimpleRegion::write32(u32 offset, ValueWithShadow value) { ASSERT(offset + 3 < size()); - *reinterpret_cast(m_data + offset) = value; + *reinterpret_cast(m_data + offset) = value.value(); + *reinterpret_cast(m_shadow_data + offset) = value.shadow(); } u8* SimpleRegion::cacheable_ptr(u32 offset) diff --git a/DevTools/UserspaceEmulator/SimpleRegion.h b/DevTools/UserspaceEmulator/SimpleRegion.h index 6cbe707c04..85055cbe73 100644 --- a/DevTools/UserspaceEmulator/SimpleRegion.h +++ b/DevTools/UserspaceEmulator/SimpleRegion.h @@ -35,20 +35,22 @@ public: SimpleRegion(u32 base, u32 size); virtual ~SimpleRegion() override; - virtual u8 read8(u32 offset) override; - virtual u16 read16(u32 offset) override; - virtual u32 read32(u32 offset) override; + virtual ValueWithShadow read8(u32 offset) override; + virtual ValueWithShadow read16(u32 offset) override; + virtual ValueWithShadow read32(u32 offset) override; - virtual void write8(u32 offset, u8 value) override; - virtual void write16(u32 offset, u16 value) override; - virtual void write32(u32 offset, u32 value) override; + virtual void write8(u32 offset, ValueWithShadow) override; + virtual void write16(u32 offset, ValueWithShadow) override; + virtual void write32(u32 offset, ValueWithShadow) override; u8* data() { return m_data; } + u8* shadow_data() { return m_shadow_data; } virtual u8* cacheable_ptr(u32 offset) override; private: u8* m_data { nullptr }; + u8* m_shadow_data { nullptr }; }; } diff --git a/DevTools/UserspaceEmulator/SoftCPU.cpp b/DevTools/UserspaceEmulator/SoftCPU.cpp index 4135f3281b..0556a2aef6 100644 --- a/DevTools/UserspaceEmulator/SoftCPU.cpp +++ b/DevTools/UserspaceEmulator/SoftCPU.cpp @@ -36,19 +36,28 @@ //#define MEMORY_DEBUG -#define DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(mnemonic, op) \ - void SoftCPU::mnemonic##_RM8_1(const X86::Instruction& insn) { generic_RM8_1(op, insn); } \ - void SoftCPU::mnemonic##_RM8_CL(const X86::Instruction& insn) { generic_RM8_CL(op, insn); } \ - void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8(op, insn); } \ - void SoftCPU::mnemonic##_RM16_1(const X86::Instruction& insn) { generic_RM16_1(op, insn); } \ - void SoftCPU::mnemonic##_RM16_CL(const X86::Instruction& insn) { generic_RM16_CL(op, insn); } \ - void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_imm8(op, insn); } \ - void SoftCPU::mnemonic##_RM32_1(const X86::Instruction& insn) { generic_RM32_1(op, insn); } \ - void SoftCPU::mnemonic##_RM32_CL(const X86::Instruction& insn) { generic_RM32_CL(op, insn); } \ - void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_imm8(op, insn); } +#define DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(mnemonic, op) \ + void SoftCPU::mnemonic##_RM8_1(const X86::Instruction& insn) { generic_RM8_1(op>, insn); } \ + void SoftCPU::mnemonic##_RM8_CL(const X86::Instruction& insn) { generic_RM8_CL(op>, insn); } \ + void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8(op>, insn); } \ + void SoftCPU::mnemonic##_RM16_1(const X86::Instruction& insn) { generic_RM16_1(op>, insn); } \ + void SoftCPU::mnemonic##_RM16_CL(const X86::Instruction& insn) { generic_RM16_CL(op>, insn); } \ + void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_unsigned_imm8(op>, insn); } \ + void SoftCPU::mnemonic##_RM32_1(const X86::Instruction& insn) { generic_RM32_1(op>, insn); } \ + void SoftCPU::mnemonic##_RM32_CL(const X86::Instruction& insn) { generic_RM32_CL(op>, insn); } \ + void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_unsigned_imm8(op>, insn); } namespace UserspaceEmulator { +template +void warn_if_uninitialized(T value_with_shadow, const char* message) +{ + if (value_with_shadow.is_uninitialized()) { + dbgprintf("\033[31;1mWarning! Use of uninitialized value: %s\033[0m\n", message); + Emulator::the().dump_backtrace(); + } +} + template inline constexpr T sign_extended_to(U value) { @@ -61,6 +70,7 @@ SoftCPU::SoftCPU(Emulator& emulator) : m_emulator(emulator) { memset(m_gpr, 0, sizeof(m_gpr)); + memset(m_gpr_shadow, 0, sizeof(m_gpr_shadow)); m_segment[(int)X86::SegmentRegister::CS] = 0x18; m_segment[(int)X86::SegmentRegister::DS] = 0x20; @@ -71,9 +81,11 @@ SoftCPU::SoftCPU(Emulator& emulator) void SoftCPU::dump() const { - printf("eax=%08x ebx=%08x ecx=%08x edx=%08x ", eax(), ebx(), ecx(), edx()); - printf("ebp=%08x esp=%08x esi=%08x edi=%08x ", ebp(), esp(), esi(), edi()); + printf("eax=%08x ebx=%08x ecx=%08x edx=%08x ", eax().value(), ebx().value(), ecx().value(), edx().value()); + printf("ebp=%08x esp=%08x esi=%08x edi=%08x ", ebp().value(), esp().value(), esi().value(), edi().value()); printf("o=%u s=%u z=%u a=%u p=%u c=%u\n", of(), sf(), zf(), af(), pf(), cf()); + printf("#ax=%08x #bx=%08x #cx=%08x #dx=%08x ", eax().shadow(), ebx().shadow(), ecx().shadow(), edx().shadow()); + printf("#bp=%08x #sp=%08x #si=%08x #di=%08x\n", ebp().shadow(), esp().shadow(), esi().shadow(), edi().shadow()); } void SoftCPU::did_receive_secret_data() @@ -98,59 +110,59 @@ void SoftCPU::update_code_cache() m_cached_code_end = region->cacheable_ptr(region->size()); } -u8 SoftCPU::read_memory8(X86::LogicalAddress address) +ValueWithShadow SoftCPU::read_memory8(X86::LogicalAddress address) { ASSERT(address.selector() == 0x18 || address.selector() == 0x20 || address.selector() == 0x28); auto value = m_emulator.mmu().read8(address); #ifdef MEMORY_DEBUG - printf("\033[36;1mread_memory8: @%08x:%08x -> %02x\033[0m\n", address.selector(), address.offset(), value); + printf("\033[36;1mread_memory8: @%08x:%08x -> %02x (%02x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow()); #endif return value; } -u16 SoftCPU::read_memory16(X86::LogicalAddress address) +ValueWithShadow SoftCPU::read_memory16(X86::LogicalAddress address) { ASSERT(address.selector() == 0x18 || address.selector() == 0x20 || address.selector() == 0x28); auto value = m_emulator.mmu().read16(address); #ifdef MEMORY_DEBUG - printf("\033[36;1mread_memory16: @%04x:%08x -> %04x\033[0m\n", address.selector(), address.offset(), value); + printf("\033[36;1mread_memory16: @%04x:%08x -> %04x (%04x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow()); #endif return value; } -u32 SoftCPU::read_memory32(X86::LogicalAddress address) +ValueWithShadow SoftCPU::read_memory32(X86::LogicalAddress address) { ASSERT(address.selector() == 0x18 || address.selector() == 0x20 || address.selector() == 0x28); auto value = m_emulator.mmu().read32(address); #ifdef MEMORY_DEBUG - printf("\033[36;1mread_memory32: @%04x:%08x -> %08x\033[0m\n", address.selector(), address.offset(), value); + printf("\033[36;1mread_memory32: @%04x:%08x -> %08x (%08x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow()); #endif return value; } -void SoftCPU::write_memory8(X86::LogicalAddress address, u8 value) +void SoftCPU::write_memory8(X86::LogicalAddress address, ValueWithShadow value) { ASSERT(address.selector() == 0x20 || address.selector() == 0x28); #ifdef MEMORY_DEBUG - printf("\033[35;1mwrite_memory8: @%04x:%08x <- %02x\033[0m\n", address.selector(), address.offset(), value); + printf("\033[35;1mwrite_memory8: @%04x:%08x <- %02x (%02x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow()); #endif m_emulator.mmu().write8(address, value); } -void SoftCPU::write_memory16(X86::LogicalAddress address, u16 value) +void SoftCPU::write_memory16(X86::LogicalAddress address, ValueWithShadow value) { ASSERT(address.selector() == 0x20 || address.selector() == 0x28); #ifdef MEMORY_DEBUG - printf("\033[35;1mwrite_memory16: @%04x:%08x <- %04x\033[0m\n", address.selector(), address.offset(), value); + printf("\033[35;1mwrite_memory16: @%04x:%08x <- %04x (%04x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow()); #endif m_emulator.mmu().write16(address, value); } -void SoftCPU::write_memory32(X86::LogicalAddress address, u32 value) +void SoftCPU::write_memory32(X86::LogicalAddress address, ValueWithShadow value) { ASSERT(address.selector() == 0x20 || address.selector() == 0x28); #ifdef MEMORY_DEBUG - printf("\033[35;1mwrite_memory32: @%04x:%08x <- %08x\033[0m\n", address.selector(), address.offset(), value); + printf("\033[35;1mwrite_memory32: @%04x:%08x <- %08x (%08x)\033[0m\n", address.selector(), address.offset(), value.value(), value.shadow()); #endif m_emulator.mmu().write32(address, value); } @@ -158,34 +170,38 @@ void SoftCPU::write_memory32(X86::LogicalAddress address, u32 value) void SoftCPU::push_string(const StringView& string) { size_t space_to_allocate = round_up_to_power_of_two(string.length() + 1, 16); - set_esp(esp() - space_to_allocate); - m_emulator.mmu().copy_to_vm(esp(), string.characters_without_null_termination(), string.length()); - m_emulator.mmu().write8({ 0x20, esp() + string.length() }, '\0'); + set_esp({ esp().value() - space_to_allocate, esp().shadow() }); + m_emulator.mmu().copy_to_vm(esp().value(), string.characters_without_null_termination(), string.length()); + m_emulator.mmu().write8({ 0x20, esp().value() + string.length() }, shadow_wrap_as_initialized((u8)'\0')); } -void SoftCPU::push32(u32 value) +void SoftCPU::push32(ValueWithShadow value) { - set_esp(esp() - sizeof(value)); - write_memory32({ ss(), esp() }, value); + set_esp({ esp().value() - sizeof(u32), esp().shadow() }); + warn_if_uninitialized(esp(), "push32"); + write_memory32({ ss(), esp().value() }, value); } -u32 SoftCPU::pop32() +ValueWithShadow SoftCPU::pop32() { - auto value = read_memory32({ ss(), esp() }); - set_esp(esp() + sizeof(value)); + warn_if_uninitialized(esp(), "pop32"); + auto value = read_memory32({ ss(), esp().value() }); + set_esp({ esp().value() + sizeof(u32), esp().shadow() }); return value; } -void SoftCPU::push16(u16 value) +void SoftCPU::push16(ValueWithShadow value) { - set_esp(esp() - sizeof(value)); - write_memory16({ ss(), esp() }, value); + warn_if_uninitialized(esp(), "push16"); + set_esp({ esp().value() - sizeof(u16), esp().shadow() }); + write_memory16({ ss(), esp().value() }, value); } -u16 SoftCPU::pop16() +ValueWithShadow SoftCPU::pop16() { - auto value = read_memory16({ ss(), esp() }); - set_esp(esp() + sizeof(value)); + warn_if_uninitialized(esp(), "pop16"); + auto value = read_memory16({ ss(), esp().value() }); + set_esp({ esp().value() + sizeof(u16), esp().shadow() }); return value; } @@ -195,7 +211,7 @@ void SoftCPU::do_once_or_repeat(const X86::Instruction& insn, Callback callback) if (!insn.has_rep_prefix()) return callback(); - while (loop_index(insn.a32())) { + while (loop_index(insn.a32()).value()) { callback(); decrement_loop_index(insn.a32()); if constexpr (check_zf) { @@ -210,21 +226,21 @@ void SoftCPU::do_once_or_repeat(const X86::Instruction& insn, Callback callback) template ALWAYS_INLINE static T op_inc(SoftCPU& cpu, T data) { - T result = 0; + typename T::ValueType result; u32 new_flags = 0; - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("incl %%eax\n" : "=a"(result) - : "a"(data)); - } else if constexpr (sizeof(T) == 2) { + : "a"(data.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("incw %%ax\n" : "=a"(result) - : "a"(data)); - } else if constexpr (sizeof(T) == 1) { + : "a"(data.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("incb %%al\n" : "=a"(result) - : "a"(data)); + : "a"(data.value())); } asm volatile( @@ -233,27 +249,27 @@ ALWAYS_INLINE static T op_inc(SoftCPU& cpu, T data) : "=b"(new_flags)); cpu.set_flags_oszap(new_flags); - return result; + return shadow_wrap_with_taint_from(result, data); } template ALWAYS_INLINE static T op_dec(SoftCPU& cpu, T data) { - T result = 0; + typename T::ValueType result; u32 new_flags = 0; - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("decl %%eax\n" : "=a"(result) - : "a"(data)); - } else if constexpr (sizeof(T) == 2) { + : "a"(data.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("decw %%ax\n" : "=a"(result) - : "a"(data)); - } else if constexpr (sizeof(T) == 1) { + : "a"(data.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("decb %%al\n" : "=a"(result) - : "a"(data)); + : "a"(data.value())); } asm volatile( @@ -262,27 +278,27 @@ ALWAYS_INLINE static T op_dec(SoftCPU& cpu, T data) : "=b"(new_flags)); cpu.set_flags_oszap(new_flags); - return result; + return shadow_wrap_with_taint_from(result, data); } template ALWAYS_INLINE static T op_xor(SoftCPU& cpu, const T& dest, const T& src) { - T result = 0; + typename T::ValueType result; u32 new_flags = 0; - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("xorl %%ecx, %%eax\n" : "=a"(result) - : "a"(dest), "c"((u32)src)); - } else if constexpr (sizeof(T) == 2) { + : "a"(dest.value()), "c"(src.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("xor %%cx, %%ax\n" : "=a"(result) - : "a"(dest), "c"((u16)src)); - } else if constexpr (sizeof(T) == 1) { + : "a"(dest.value()), "c"(src.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("xorb %%cl, %%al\n" : "=a"(result) - : "a"(dest), "c"((u8)src)); + : "a"(dest.value()), "c"(src.value())); } else { ASSERT_NOT_REACHED(); } @@ -293,27 +309,27 @@ ALWAYS_INLINE static T op_xor(SoftCPU& cpu, const T& dest, const T& src) : "=b"(new_flags)); cpu.set_flags_oszpc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, dest, src); } template ALWAYS_INLINE static T op_or(SoftCPU& cpu, const T& dest, const T& src) { - T result = 0; + typename T::ValueType result = 0; u32 new_flags = 0; - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("orl %%ecx, %%eax\n" : "=a"(result) - : "a"(dest), "c"((u32)src)); - } else if constexpr (sizeof(T) == 2) { + : "a"(dest.value()), "c"(src.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("or %%cx, %%ax\n" : "=a"(result) - : "a"(dest), "c"((u16)src)); - } else if constexpr (sizeof(T) == 1) { + : "a"(dest.value()), "c"(src.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("orb %%cl, %%al\n" : "=a"(result) - : "a"(dest), "c"((u8)src)); + : "a"(dest.value()), "c"(src.value())); } else { ASSERT_NOT_REACHED(); } @@ -324,27 +340,27 @@ ALWAYS_INLINE static T op_or(SoftCPU& cpu, const T& dest, const T& src) : "=b"(new_flags)); cpu.set_flags_oszpc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, dest, src); } template ALWAYS_INLINE static T op_sub(SoftCPU& cpu, const T& dest, const T& src) { - T result = 0; + typename T::ValueType result = 0; u32 new_flags = 0; - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("subl %%ecx, %%eax\n" : "=a"(result) - : "a"(dest), "c"((u32)src)); - } else if constexpr (sizeof(T) == 2) { + : "a"(dest.value()), "c"(src.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("subw %%cx, %%ax\n" : "=a"(result) - : "a"(dest), "c"((u16)src)); - } else if constexpr (sizeof(T) == 1) { + : "a"(dest.value()), "c"(src.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("subb %%cl, %%al\n" : "=a"(result) - : "a"(dest), "c"((u8)src)); + : "a"(dest.value()), "c"(src.value())); } else { ASSERT_NOT_REACHED(); } @@ -355,13 +371,13 @@ ALWAYS_INLINE static T op_sub(SoftCPU& cpu, const T& dest, const T& src) : "=b"(new_flags)); cpu.set_flags_oszapc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, dest, src); } template ALWAYS_INLINE static T op_sbb_impl(SoftCPU& cpu, const T& dest, const T& src) { - T result = 0; + typename T::ValueType result = 0; u32 new_flags = 0; if constexpr (cf) @@ -369,18 +385,18 @@ ALWAYS_INLINE static T op_sbb_impl(SoftCPU& cpu, const T& dest, const T& src) else asm volatile("clc"); - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("sbbl %%ecx, %%eax\n" : "=a"(result) - : "a"(dest), "c"((u32)src)); - } else if constexpr (sizeof(T) == 2) { + : "a"(dest.value()), "c"(src.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("sbbw %%cx, %%ax\n" : "=a"(result) - : "a"(dest), "c"((u16)src)); - } else if constexpr (sizeof(T) == 1) { + : "a"(dest.value()), "c"(src.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("sbbb %%cl, %%al\n" : "=a"(result) - : "a"(dest), "c"((u8)src)); + : "a"(dest.value()), "c"(src.value())); } else { ASSERT_NOT_REACHED(); } @@ -391,7 +407,7 @@ ALWAYS_INLINE static T op_sbb_impl(SoftCPU& cpu, const T& dest, const T& src) : "=b"(new_flags)); cpu.set_flags_oszapc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, dest, src); } template @@ -405,21 +421,21 @@ ALWAYS_INLINE static T op_sbb(SoftCPU& cpu, T& dest, const T& src) template ALWAYS_INLINE static T op_add(SoftCPU& cpu, T& dest, const T& src) { - T result = 0; + typename T::ValueType result = 0; u32 new_flags = 0; - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("addl %%ecx, %%eax\n" : "=a"(result) - : "a"(dest), "c"((u32)src)); - } else if constexpr (sizeof(T) == 2) { + : "a"(dest.value()), "c"(src.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("addw %%cx, %%ax\n" : "=a"(result) - : "a"(dest), "c"((u16)src)); - } else if constexpr (sizeof(T) == 1) { + : "a"(dest.value()), "c"(src.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("addb %%cl, %%al\n" : "=a"(result) - : "a"(dest), "c"((u8)src)); + : "a"(dest.value()), "c"(src.value())); } else { ASSERT_NOT_REACHED(); } @@ -430,13 +446,13 @@ ALWAYS_INLINE static T op_add(SoftCPU& cpu, T& dest, const T& src) : "=b"(new_flags)); cpu.set_flags_oszapc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, dest, src); } template ALWAYS_INLINE static T op_adc_impl(SoftCPU& cpu, T& dest, const T& src) { - T result = 0; + typename T::ValueType result = 0; u32 new_flags = 0; if constexpr (cf) @@ -444,18 +460,18 @@ ALWAYS_INLINE static T op_adc_impl(SoftCPU& cpu, T& dest, const T& src) else asm volatile("clc"); - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("adcl %%ecx, %%eax\n" : "=a"(result) - : "a"(dest), "c"((u32)src)); - } else if constexpr (sizeof(T) == 2) { + : "a"(dest.value()), "c"(src.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("adcw %%cx, %%ax\n" : "=a"(result) - : "a"(dest), "c"((u16)src)); - } else if constexpr (sizeof(T) == 1) { + : "a"(dest.value()), "c"(src.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("adcb %%cl, %%al\n" : "=a"(result) - : "a"(dest), "c"((u8)src)); + : "a"(dest.value()), "c"(src.value())); } else { ASSERT_NOT_REACHED(); } @@ -466,7 +482,7 @@ ALWAYS_INLINE static T op_adc_impl(SoftCPU& cpu, T& dest, const T& src) : "=b"(new_flags)); cpu.set_flags_oszapc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, dest, src); } template @@ -480,21 +496,21 @@ ALWAYS_INLINE static T op_adc(SoftCPU& cpu, T& dest, const T& src) template ALWAYS_INLINE static T op_and(SoftCPU& cpu, const T& dest, const T& src) { - T result = 0; + typename T::ValueType result = 0; u32 new_flags = 0; - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("andl %%ecx, %%eax\n" : "=a"(result) - : "a"(dest), "c"((u32)src)); - } else if constexpr (sizeof(T) == 2) { + : "a"(dest.value()), "c"(src.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("andw %%cx, %%ax\n" : "=a"(result) - : "a"(dest), "c"((u16)src)); - } else if constexpr (sizeof(T) == 1) { + : "a"(dest.value()), "c"(src.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("andb %%cl, %%al\n" : "=a"(result) - : "a"(dest), "c"((u8)src)); + : "a"(dest.value()), "c"(src.value())); } else { ASSERT_NOT_REACHED(); } @@ -505,7 +521,7 @@ ALWAYS_INLINE static T op_and(SoftCPU& cpu, const T& dest, const T& src) : "=b"(new_flags)); cpu.set_flags_oszpc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, dest, src); } template @@ -539,26 +555,26 @@ ALWAYS_INLINE static void op_imul(SoftCPU& cpu, const T& dest, const T& src, T& } template -ALWAYS_INLINE static T op_shr(SoftCPU& cpu, T data, u8 steps) +ALWAYS_INLINE static T op_shr(SoftCPU& cpu, T data, ValueWithShadow steps) { - if (steps == 0) - return data; + if (steps.value() == 0) + return shadow_wrap_with_taint_from(data.value(), data, steps); u32 result = 0; u32 new_flags = 0; - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("shrl %%cl, %%eax\n" : "=a"(result) - : "a"(data), "c"(steps)); - } else if constexpr (sizeof(T) == 2) { + : "a"(data.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("shrw %%cl, %%ax\n" : "=a"(result) - : "a"(data), "c"(steps)); - } else if constexpr (sizeof(T) == 1) { + : "a"(data.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("shrb %%cl, %%al\n" : "=a"(result) - : "a"(data), "c"(steps)); + : "a"(data.value()), "c"(steps.value())); } asm volatile( @@ -567,30 +583,30 @@ ALWAYS_INLINE static T op_shr(SoftCPU& cpu, T data, u8 steps) : "=b"(new_flags)); cpu.set_flags_oszapc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, data, steps); } template -ALWAYS_INLINE static T op_shl(SoftCPU& cpu, T data, u8 steps) +ALWAYS_INLINE static T op_shl(SoftCPU& cpu, T data, ValueWithShadow steps) { - if (steps == 0) - return data; + if (steps.value() == 0) + return shadow_wrap_with_taint_from(data.value(), data, steps); u32 result = 0; u32 new_flags = 0; - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("shll %%cl, %%eax\n" : "=a"(result) - : "a"(data), "c"(steps)); - } else if constexpr (sizeof(T) == 2) { + : "a"(data.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("shlw %%cl, %%ax\n" : "=a"(result) - : "a"(data), "c"(steps)); - } else if constexpr (sizeof(T) == 1) { + : "a"(data.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("shlb %%cl, %%al\n" : "=a"(result) - : "a"(data), "c"(steps)); + : "a"(data.value()), "c"(steps.value())); } asm volatile( @@ -599,26 +615,26 @@ ALWAYS_INLINE static T op_shl(SoftCPU& cpu, T data, u8 steps) : "=b"(new_flags)); cpu.set_flags_oszapc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, data, steps); } template -ALWAYS_INLINE static T op_shrd(SoftCPU& cpu, T data, T extra_bits, u8 steps) +ALWAYS_INLINE static T op_shrd(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow steps) { - if (steps == 0) - return data; + if (steps.value() == 0) + return shadow_wrap_with_taint_from(data.value(), data, steps); u32 result = 0; u32 new_flags = 0; - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("shrd %%cl, %%edx, %%eax\n" : "=a"(result) - : "a"(data), "d"(extra_bits), "c"(steps)); - } else if constexpr (sizeof(T) == 2) { + : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("shrd %%cl, %%dx, %%ax\n" : "=a"(result) - : "a"(data), "d"(extra_bits), "c"(steps)); + : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value())); } asm volatile( @@ -627,26 +643,26 @@ ALWAYS_INLINE static T op_shrd(SoftCPU& cpu, T data, T extra_bits, u8 steps) : "=b"(new_flags)); cpu.set_flags_oszapc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, data, steps); } template -ALWAYS_INLINE static T op_shld(SoftCPU& cpu, T data, T extra_bits, u8 steps) +ALWAYS_INLINE static T op_shld(SoftCPU& cpu, T data, T extra_bits, ValueWithShadow steps) { - if (steps == 0) - return data; + if (steps.value() == 0) + return shadow_wrap_with_taint_from(data.value(), data, steps); u32 result = 0; u32 new_flags = 0; - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("shld %%cl, %%edx, %%eax\n" : "=a"(result) - : "a"(data), "d"(extra_bits), "c"(steps)); - } else if constexpr (sizeof(T) == 2) { + : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("shld %%cl, %%dx, %%ax\n" : "=a"(result) - : "a"(data), "d"(extra_bits), "c"(steps)); + : "a"(data.value()), "d"(extra_bits.value()), "c"(steps.value())); } asm volatile( @@ -655,14 +671,14 @@ ALWAYS_INLINE static T op_shld(SoftCPU& cpu, T data, T extra_bits, u8 steps) : "=b"(new_flags)); cpu.set_flags_oszapc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, data, steps); } template ALWAYS_INLINE void SoftCPU::generic_AL_imm8(Op op, const X86::Instruction& insn) { auto dest = al(); - auto src = insn.imm8(); + auto src = shadow_wrap_as_initialized(insn.imm8()); auto result = op(*this, dest, src); if (update_dest) set_al(result); @@ -672,7 +688,7 @@ template ALWAYS_INLINE void SoftCPU::generic_AX_imm16(Op op, const X86::Instruction& insn) { auto dest = ax(); - auto src = insn.imm16(); + auto src = shadow_wrap_as_initialized(insn.imm16()); auto result = op(*this, dest, src); if (update_dest) set_ax(result); @@ -682,7 +698,7 @@ template ALWAYS_INLINE void SoftCPU::generic_EAX_imm32(Op op, const X86::Instruction& insn) { auto dest = eax(); - auto src = insn.imm32(); + auto src = shadow_wrap_as_initialized(insn.imm32()); auto result = op(*this, dest, src); if (update_dest) set_eax(result); @@ -691,8 +707,8 @@ ALWAYS_INLINE void SoftCPU::generic_EAX_imm32(Op op, const X86::Instruction& ins template ALWAYS_INLINE void SoftCPU::generic_RM16_imm16(Op op, const X86::Instruction& insn) { - auto dest = insn.modrm().read16(*this, insn); - auto src = insn.imm16(); + auto dest = insn.modrm().read16>(*this, insn); + auto src = shadow_wrap_as_initialized(insn.imm16()); auto result = op(*this, dest, src); if (update_dest) insn.modrm().write16(*this, insn, result); @@ -701,8 +717,18 @@ ALWAYS_INLINE void SoftCPU::generic_RM16_imm16(Op op, const X86::Instruction& in template ALWAYS_INLINE void SoftCPU::generic_RM16_imm8(Op op, const X86::Instruction& insn) { - auto dest = insn.modrm().read16(*this, insn); - auto src = sign_extended_to(insn.imm8()); + auto dest = insn.modrm().read16>(*this, insn); + auto src = shadow_wrap_as_initialized(sign_extended_to(insn.imm8())); + auto result = op(*this, dest, src); + if (update_dest) + insn.modrm().write16(*this, insn, result); +} + +template +ALWAYS_INLINE void SoftCPU::generic_RM16_unsigned_imm8(Op op, const X86::Instruction& insn) +{ + auto dest = insn.modrm().read16>(*this, insn); + auto src = shadow_wrap_as_initialized(insn.imm8()); auto result = op(*this, dest, src); if (update_dest) insn.modrm().write16(*this, insn, result); @@ -711,8 +737,8 @@ ALWAYS_INLINE void SoftCPU::generic_RM16_imm8(Op op, const X86::Instruction& ins template ALWAYS_INLINE void SoftCPU::generic_RM16_reg16(Op op, const X86::Instruction& insn) { - auto dest = insn.modrm().read16(*this, insn); - auto src = gpr16(insn.reg16()); + auto dest = insn.modrm().read16>(*this, insn); + auto src = const_gpr16(insn.reg16()); auto result = op(*this, dest, src); if (update_dest) insn.modrm().write16(*this, insn, result); @@ -721,9 +747,9 @@ ALWAYS_INLINE void SoftCPU::generic_RM16_reg16(Op op, const X86::Instruction& in template ALWAYS_INLINE void SoftCPU::generic_RM32_imm32(Op op, const X86::Instruction& insn) { - auto dest = insn.modrm().read32(*this, insn); + auto dest = insn.modrm().read32>(*this, insn); auto src = insn.imm32(); - auto result = op(*this, dest, src); + auto result = op(*this, dest, shadow_wrap_as_initialized(src)); if (update_dest) insn.modrm().write32(*this, insn, result); } @@ -731,8 +757,18 @@ ALWAYS_INLINE void SoftCPU::generic_RM32_imm32(Op op, const X86::Instruction& in template ALWAYS_INLINE void SoftCPU::generic_RM32_imm8(Op op, const X86::Instruction& insn) { - auto dest = insn.modrm().read32(*this, insn); + auto dest = insn.modrm().read32>(*this, insn); auto src = sign_extended_to(insn.imm8()); + auto result = op(*this, dest, shadow_wrap_as_initialized(src)); + if (update_dest) + insn.modrm().write32(*this, insn, result); +} + +template +ALWAYS_INLINE void SoftCPU::generic_RM32_unsigned_imm8(Op op, const X86::Instruction& insn) +{ + auto dest = insn.modrm().read32>(*this, insn); + auto src = shadow_wrap_as_initialized(insn.imm8()); auto result = op(*this, dest, src); if (update_dest) insn.modrm().write32(*this, insn, result); @@ -741,8 +777,8 @@ ALWAYS_INLINE void SoftCPU::generic_RM32_imm8(Op op, const X86::Instruction& ins template ALWAYS_INLINE void SoftCPU::generic_RM32_reg32(Op op, const X86::Instruction& insn) { - auto dest = insn.modrm().read32(*this, insn); - auto src = gpr32(insn.reg32()); + auto dest = insn.modrm().read32>(*this, insn); + auto src = const_gpr32(insn.reg32()); auto result = op(*this, dest, src); if (update_dest) insn.modrm().write32(*this, insn, result); @@ -751,9 +787,9 @@ ALWAYS_INLINE void SoftCPU::generic_RM32_reg32(Op op, const X86::Instruction& in template ALWAYS_INLINE void SoftCPU::generic_RM8_imm8(Op op, const X86::Instruction& insn) { - auto dest = insn.modrm().read8(*this, insn); + auto dest = insn.modrm().read8>(*this, insn); auto src = insn.imm8(); - auto result = op(*this, dest, src); + auto result = op(*this, dest, shadow_wrap_as_initialized(src)); if (update_dest) insn.modrm().write8(*this, insn, result); } @@ -761,8 +797,8 @@ ALWAYS_INLINE void SoftCPU::generic_RM8_imm8(Op op, const X86::Instruction& insn template ALWAYS_INLINE void SoftCPU::generic_RM8_reg8(Op op, const X86::Instruction& insn) { - auto dest = insn.modrm().read8(*this, insn); - auto src = gpr8(insn.reg8()); + auto dest = insn.modrm().read8>(*this, insn); + auto src = const_gpr8(insn.reg8()); auto result = op(*this, dest, src); if (update_dest) insn.modrm().write8(*this, insn, result); @@ -771,8 +807,8 @@ ALWAYS_INLINE void SoftCPU::generic_RM8_reg8(Op op, const X86::Instruction& insn template ALWAYS_INLINE void SoftCPU::generic_reg16_RM16(Op op, const X86::Instruction& insn) { - auto dest = gpr16(insn.reg16()); - auto src = insn.modrm().read16(*this, insn); + auto dest = const_gpr16(insn.reg16()); + auto src = insn.modrm().read16>(*this, insn); auto result = op(*this, dest, src); if (update_dest) gpr16(insn.reg16()) = result; @@ -781,8 +817,8 @@ ALWAYS_INLINE void SoftCPU::generic_reg16_RM16(Op op, const X86::Instruction& in template ALWAYS_INLINE void SoftCPU::generic_reg32_RM32(Op op, const X86::Instruction& insn) { - auto dest = gpr32(insn.reg32()); - auto src = insn.modrm().read32(*this, insn); + auto dest = const_gpr32(insn.reg32()); + auto src = insn.modrm().read32>(*this, insn); auto result = op(*this, dest, src); if (update_dest) gpr32(insn.reg32()) = result; @@ -791,8 +827,8 @@ ALWAYS_INLINE void SoftCPU::generic_reg32_RM32(Op op, const X86::Instruction& in template ALWAYS_INLINE void SoftCPU::generic_reg8_RM8(Op op, const X86::Instruction& insn) { - auto dest = gpr8(insn.reg8()); - auto src = insn.modrm().read8(*this, insn); + auto dest = const_gpr8(insn.reg8()); + auto src = insn.modrm().read8>(*this, insn); auto result = op(*this, dest, src); if (update_dest) gpr8(insn.reg8()) = result; @@ -801,42 +837,42 @@ ALWAYS_INLINE void SoftCPU::generic_reg8_RM8(Op op, const X86::Instruction& insn template ALWAYS_INLINE void SoftCPU::generic_RM8_1(Op op, const X86::Instruction& insn) { - auto data = insn.modrm().read8(*this, insn); - insn.modrm().write8(*this, insn, op(*this, data, 1)); + auto data = insn.modrm().read8>(*this, insn); + insn.modrm().write8(*this, insn, op(*this, data, shadow_wrap_as_initialized(1))); } template ALWAYS_INLINE void SoftCPU::generic_RM8_CL(Op op, const X86::Instruction& insn) { - auto data = insn.modrm().read8(*this, insn); + auto data = insn.modrm().read8>(*this, insn); insn.modrm().write8(*this, insn, op(*this, data, cl())); } template ALWAYS_INLINE void SoftCPU::generic_RM16_1(Op op, const X86::Instruction& insn) { - auto data = insn.modrm().read16(*this, insn); - insn.modrm().write16(*this, insn, op(*this, data, 1)); + auto data = insn.modrm().read16>(*this, insn); + insn.modrm().write16(*this, insn, op(*this, data, shadow_wrap_as_initialized(1))); } template ALWAYS_INLINE void SoftCPU::generic_RM16_CL(Op op, const X86::Instruction& insn) { - auto data = insn.modrm().read16(*this, insn); + auto data = insn.modrm().read16>(*this, insn); insn.modrm().write16(*this, insn, op(*this, data, cl())); } template ALWAYS_INLINE void SoftCPU::generic_RM32_1(Op op, const X86::Instruction& insn) { - auto data = insn.modrm().read32(*this, insn); - insn.modrm().write32(*this, insn, op(*this, data, 1)); + auto data = insn.modrm().read32>(*this, insn); + insn.modrm().write32(*this, insn, op(*this, data, shadow_wrap_as_initialized(1))); } template ALWAYS_INLINE void SoftCPU::generic_RM32_CL(Op op, const X86::Instruction& insn) { - auto data = insn.modrm().read32(*this, insn); + auto data = insn.modrm().read32>(*this, insn); insn.modrm().write32(*this, insn, op(*this, data, cl())); } @@ -848,63 +884,67 @@ void SoftCPU::ARPL(const X86::Instruction&) { TODO(); } void SoftCPU::BOUND(const X86::Instruction&) { TODO(); } template -ALWAYS_INLINE static unsigned op_bsf(SoftCPU&, T value) +ALWAYS_INLINE static T op_bsf(SoftCPU&, T value) { - return __builtin_ctz(value); + return { (typename T::ValueType)__builtin_ctz(value.value()), value.shadow() }; } template -ALWAYS_INLINE static unsigned op_bsr(SoftCPU&, T value) +ALWAYS_INLINE static T op_bsr(SoftCPU&, T value) { - T bit_index = 0; - if constexpr (sizeof(T) == 4) { + typename T::ValueType bit_index = 0; + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("bsrl %%eax, %%edx" : "=d"(bit_index) - : "a"(value)); + : "a"(value.value())); } - if constexpr (sizeof(T) == 2) { + if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("bsrw %%ax, %%dx" : "=d"(bit_index) - : "a"(value)); + : "a"(value.value())); } - return bit_index; + return shadow_wrap_with_taint_from(bit_index, value); } void SoftCPU::BSF_reg16_RM16(const X86::Instruction& insn) { - auto src = insn.modrm().read16(*this, insn); - set_zf(!src); - if (src) + auto src = insn.modrm().read16>(*this, insn); + // FIXME: Shadow flags. + set_zf(!src.value()); + if (src.value()) gpr16(insn.reg16()) = op_bsf(*this, src); } void SoftCPU::BSF_reg32_RM32(const X86::Instruction& insn) { - auto src = insn.modrm().read32(*this, insn); - set_zf(!src); - if (src) - gpr32(insn.reg32()) = op_bsf(*this, insn.modrm().read32(*this, insn)); + auto src = insn.modrm().read32>(*this, insn); + // FIXME: Shadow flags. + set_zf(!src.value()); + if (src.value()) + gpr32(insn.reg32()) = op_bsf(*this, insn.modrm().read32>(*this, insn)); } void SoftCPU::BSR_reg16_RM16(const X86::Instruction& insn) { - auto src = insn.modrm().read16(*this, insn); - set_zf(!src); - if (src) - gpr16(insn.reg16()) = op_bsr(*this, insn.modrm().read16(*this, insn)); + auto src = insn.modrm().read16>(*this, insn); + // FIXME: Shadow flags. + set_zf(!src.value()); + if (src.value()) + gpr16(insn.reg16()) = op_bsr(*this, insn.modrm().read16>(*this, insn)); } void SoftCPU::BSR_reg32_RM32(const X86::Instruction& insn) { - auto src = insn.modrm().read32(*this, insn); - set_zf(!src); - if (src) - gpr32(insn.reg32()) = op_bsr(*this, insn.modrm().read32(*this, insn)); + auto src = insn.modrm().read32>(*this, insn); + // FIXME: Shadow flags. + set_zf(!src.value()); + if (src.value()) + gpr32(insn.reg32()) = op_bsr(*this, insn.modrm().read32>(*this, insn)); } void SoftCPU::BSWAP_reg32(const X86::Instruction& insn) { - gpr32(insn.reg32()) = __builtin_bswap32(gpr32(insn.reg32())); + gpr32(insn.reg32()) = { __builtin_bswap32(gpr32(insn.reg32()).value()), __builtin_bswap32(gpr32(insn.reg32()).shadow()) }; } template @@ -935,52 +975,56 @@ template ALWAYS_INLINE void BTx_RM16_reg16(SoftCPU& cpu, const X86::Instruction& insn, Op op) { if (insn.modrm().is_register()) { - unsigned bit_index = cpu.gpr16(insn.reg16()) & (X86::TypeTrivia::bits - 1); - u16 original = insn.modrm().read16(cpu, insn); + unsigned bit_index = cpu.const_gpr16(insn.reg16()).value() & (X86::TypeTrivia::bits - 1); + auto original = insn.modrm().read16>(cpu, insn); u16 bit_mask = 1 << bit_index; - u16 result = op(original, bit_mask); - cpu.set_cf((original & bit_mask) != 0); + u16 result = op(original.value(), bit_mask); + // FIXME: Shadow flags. + cpu.set_cf((original.value() & bit_mask) != 0); if (should_update) - insn.modrm().write16(cpu, insn, result); + insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), original)); return; } // FIXME: Is this supposed to perform a full 16-bit read/modify/write? - unsigned bit_offset_in_array = cpu.gpr16(insn.reg16()) / 8; - unsigned bit_offset_in_byte = cpu.gpr16(insn.reg16()) & 7; + unsigned bit_offset_in_array = cpu.const_gpr16(insn.reg16()).value() / 8; + unsigned bit_offset_in_byte = cpu.const_gpr16(insn.reg16()).value() & 7; auto address = insn.modrm().resolve(cpu, insn); address.set_offset(address.offset() + bit_offset_in_array); - u8 dest = cpu.read_memory8(address); + auto dest = cpu.read_memory8(address); u8 bit_mask = 1 << bit_offset_in_byte; - u8 result = op(dest, bit_mask); - cpu.set_cf((dest & bit_mask) != 0); + u8 result = op(dest.value(), bit_mask); + // FIXME: Shadow flags. + cpu.set_cf((dest.value() & bit_mask) != 0); if (should_update) - cpu.write_memory8(address, result); + cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr16(insn.reg16()), dest)); } template ALWAYS_INLINE void BTx_RM32_reg32(SoftCPU& cpu, const X86::Instruction& insn, Op op) { if (insn.modrm().is_register()) { - unsigned bit_index = cpu.gpr32(insn.reg32()) & (X86::TypeTrivia::bits - 1); - u32 original = insn.modrm().read32(cpu, insn); + unsigned bit_index = cpu.const_gpr32(insn.reg32()).value() & (X86::TypeTrivia::bits - 1); + auto original = insn.modrm().read32>(cpu, insn); u32 bit_mask = 1 << bit_index; - u32 result = op(original, bit_mask); - cpu.set_cf((original & bit_mask) != 0); + u32 result = op(original.value(), bit_mask); + // FIXME: Shadow flags. + cpu.set_cf((original.value() & bit_mask) != 0); if (should_update) - insn.modrm().write32(cpu, insn, result); + insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), original)); return; } // FIXME: Is this supposed to perform a full 32-bit read/modify/write? - unsigned bit_offset_in_array = cpu.gpr32(insn.reg32()) / 8; - unsigned bit_offset_in_byte = cpu.gpr32(insn.reg32()) & 7; + unsigned bit_offset_in_array = cpu.const_gpr32(insn.reg32()).value() / 8; + unsigned bit_offset_in_byte = cpu.const_gpr32(insn.reg32()).value() & 7; auto address = insn.modrm().resolve(cpu, insn); address.set_offset(address.offset() + bit_offset_in_array); - u8 dest = cpu.read_memory8(address); + auto dest = cpu.read_memory8(address); u8 bit_mask = 1 << bit_offset_in_byte; - u8 result = op(dest, bit_mask); - cpu.set_cf((dest & bit_mask) != 0); + u8 result = op(dest.value(), bit_mask); + // FIXME: Shadow flags. + cpu.set_cf((dest.value() & bit_mask) != 0); if (should_update) - cpu.write_memory8(address, result); + cpu.write_memory8(address, shadow_wrap_with_taint_from(result, cpu.gpr32(insn.reg32()), dest)); } template @@ -991,12 +1035,12 @@ ALWAYS_INLINE void BTx_RM16_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op // FIXME: Support higher bit indices ASSERT(bit_index < 16); - u16 original = insn.modrm().read16(cpu, insn); + auto original = insn.modrm().read16>(cpu, insn); u16 bit_mask = 1 << bit_index; - u16 result = op(original, bit_mask); - cpu.set_cf((original & bit_mask) != 0); + auto result = op(original.value(), bit_mask); // FIXME: Shadow flags. + cpu.set_cf((original.value() & bit_mask) != 0); if (should_update) - insn.modrm().write16(cpu, insn, result); + insn.modrm().write16(cpu, insn, shadow_wrap_with_taint_from(result, original)); } template @@ -1007,12 +1051,13 @@ ALWAYS_INLINE void BTx_RM32_imm8(SoftCPU& cpu, const X86::Instruction& insn, Op // FIXME: Support higher bit indices ASSERT(bit_index < 32); - u32 original = insn.modrm().read32(cpu, insn); + auto original = insn.modrm().read32>(cpu, insn); u32 bit_mask = 1 << bit_index; - u32 result = op(original, bit_mask); - cpu.set_cf((original & bit_mask) != 0); + auto result = op(original.value(), bit_mask); + // FIXME: Shadow flags. + cpu.set_cf((original.value() & bit_mask) != 0); if (should_update) - insn.modrm().write32(cpu, insn, result); + insn.modrm().write32(cpu, insn, shadow_wrap_with_taint_from(result, original)); } #define DEFINE_GENERIC_BTx_INSN_HANDLERS(mnemonic, op, update_dest) \ @@ -1035,8 +1080,10 @@ void SoftCPU::CALL_RM16(const X86::Instruction&) { TODO(); } void SoftCPU::CALL_RM32(const X86::Instruction& insn) { - push32(eip()); - set_eip(insn.modrm().read32(*this, insn)); + push32(shadow_wrap_as_initialized(eip())); + auto address = insn.modrm().read32>(*this, insn); + warn_if_uninitialized(address, "call rm32"); + set_eip(address.value()); } void SoftCPU::CALL_imm16(const X86::Instruction&) { TODO(); } @@ -1045,21 +1092,21 @@ void SoftCPU::CALL_imm16_imm32(const X86::Instruction&) { TODO(); } void SoftCPU::CALL_imm32(const X86::Instruction& insn) { - push32(eip()); + push32(shadow_wrap_as_initialized(eip())); set_eip(eip() + (i32)insn.imm32()); } void SoftCPU::CBW(const X86::Instruction&) { - set_ah((al() & 0x80) ? 0xff : 0x00); + set_ah(shadow_wrap_with_taint_from((al().value() & 0x80) ? 0xff : 0x00, al())); } void SoftCPU::CDQ(const X86::Instruction&) { - if (eax() & 0x80000000) - set_edx(0xffffffff); + if (eax().value() & 0x80000000) + set_edx(shadow_wrap_with_taint_from(0xffffffff, eax())); else - set_edx(0x00000000); + set_edx(shadow_wrap_with_taint_from(0, eax())); } void SoftCPU::CLC(const X86::Instruction&) @@ -1079,13 +1126,13 @@ void SoftCPU::CMC(const X86::Instruction&) { TODO(); } void SoftCPU::CMOVcc_reg16_RM16(const X86::Instruction& insn) { if (evaluate_condition(insn.cc())) - gpr16(insn.reg16()) = insn.modrm().read16(*this, insn); + gpr16(insn.reg16()) = insn.modrm().read16>(*this, insn); } void SoftCPU::CMOVcc_reg32_RM32(const X86::Instruction& insn) { if (evaluate_condition(insn.cc())) - gpr32(insn.reg32()) = insn.modrm().read32(*this, insn); + gpr32(insn.reg32()) = insn.modrm().read32>(*this, insn); } template @@ -1093,8 +1140,8 @@ ALWAYS_INLINE static void do_cmps(SoftCPU& cpu, const X86::Instruction& insn) { auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)); cpu.do_once_or_repeat(insn, [&] { - auto src = cpu.read_memory({ src_segment, cpu.source_index(insn.a32()) }); - auto dest = cpu.read_memory({ cpu.es(), cpu.destination_index(insn.a32()) }); + auto src = cpu.read_memory({ src_segment, cpu.source_index(insn.a32()).value() }); + auto dest = cpu.read_memory({ cpu.es(), cpu.destination_index(insn.a32()).value() }); op_sub(cpu, dest, src); cpu.step_source_index(insn.a32(), sizeof(T)); cpu.step_destination_index(insn.a32(), sizeof(T)); @@ -1118,22 +1165,24 @@ void SoftCPU::CMPSW(const X86::Instruction& insn) void SoftCPU::CMPXCHG_RM16_reg16(const X86::Instruction& insn) { - auto current = insn.modrm().read16(*this, insn); - if (current == ax()) { + auto current = insn.modrm().read16>(*this, insn); + // FIXME: Shadow flags. + if (current.value() == ax().value()) { set_zf(true); - insn.modrm().write16(*this, insn, gpr16(insn.reg16())); + insn.modrm().write16(*this, insn, const_gpr16(insn.reg16())); } else { set_zf(false); - set_eax(current); + set_ax(current); } } void SoftCPU::CMPXCHG_RM32_reg32(const X86::Instruction& insn) { - auto current = insn.modrm().read32(*this, insn); - if (current == eax()) { + auto current = insn.modrm().read32>(*this, insn); + // FIXME: Shadow flags. + if (current.value() == eax().value()) { set_zf(true); - insn.modrm().write32(*this, insn, gpr32(insn.reg32())); + insn.modrm().write32(*this, insn, const_gpr32(insn.reg32())); } else { set_zf(false); set_eax(current); @@ -1142,13 +1191,14 @@ void SoftCPU::CMPXCHG_RM32_reg32(const X86::Instruction& insn) void SoftCPU::CMPXCHG_RM8_reg8(const X86::Instruction& insn) { - auto current = insn.modrm().read8(*this, insn); - if (current == al()) { + auto current = insn.modrm().read8>(*this, insn); + // FIXME: Shadow flags. + if (current.value() == al().value()) { set_zf(true); - insn.modrm().write8(*this, insn, gpr8(insn.reg8())); + insn.modrm().write8(*this, insn, const_gpr8(insn.reg8())); } else { set_zf(false); - set_eax(current); + set_al(current); } } @@ -1156,12 +1206,12 @@ void SoftCPU::CPUID(const X86::Instruction&) { TODO(); } void SoftCPU::CWD(const X86::Instruction&) { - set_dx((ax() & 0x8000) ? 0xffff : 0x0000); + set_dx(shadow_wrap_with_taint_from((ax().value() & 0x8000) ? 0xffff : 0x0000, ax())); } void SoftCPU::CWDE(const X86::Instruction&) { - set_eax(sign_extended_to(ax())); + set_eax(shadow_wrap_with_taint_from(sign_extended_to(ax().value()), ax())); } void SoftCPU::DAA(const X86::Instruction&) { TODO(); } @@ -1169,81 +1219,89 @@ void SoftCPU::DAS(const X86::Instruction&) { TODO(); } void SoftCPU::DEC_RM16(const X86::Instruction& insn) { - insn.modrm().write16(*this, insn, op_dec(*this, insn.modrm().read16(*this, insn))); + insn.modrm().write16(*this, insn, op_dec(*this, insn.modrm().read16>(*this, insn))); } void SoftCPU::DEC_RM32(const X86::Instruction& insn) { - insn.modrm().write32(*this, insn, op_dec(*this, insn.modrm().read32(*this, insn))); + insn.modrm().write32(*this, insn, op_dec(*this, insn.modrm().read32>(*this, insn))); } void SoftCPU::DEC_RM8(const X86::Instruction& insn) { - insn.modrm().write8(*this, insn, op_dec(*this, insn.modrm().read8(*this, insn))); + insn.modrm().write8(*this, insn, op_dec(*this, insn.modrm().read8>(*this, insn))); } void SoftCPU::DEC_reg16(const X86::Instruction& insn) { - gpr16(insn.reg16()) = op_dec(*this, gpr16(insn.reg16())); + gpr16(insn.reg16()) = op_dec(*this, const_gpr16(insn.reg16())); } void SoftCPU::DEC_reg32(const X86::Instruction& insn) { - gpr32(insn.reg32()) = op_dec(*this, gpr32(insn.reg32())); + gpr32(insn.reg32()) = op_dec(*this, const_gpr32(insn.reg32())); } void SoftCPU::DIV_RM16(const X86::Instruction& insn) { - auto divisor = insn.modrm().read16(*this, insn); - if (divisor == 0) { + auto divisor = insn.modrm().read16>(*this, insn); + if (divisor.value() == 0) { warn() << "Divide by zero"; TODO(); } - u32 dividend = ((u32)dx() << 16) | ax(); - auto result = dividend / divisor; - if (result > NumericLimits::max()) { + u32 dividend = ((u32)dx().value() << 16) | ax().value(); + auto quotient = dividend / divisor.value(); + if (quotient > NumericLimits::max()) { warn() << "Divide overflow"; TODO(); } - set_ax(result); - set_dx(dividend % divisor); + auto remainder = dividend % divisor.value(); + auto original_ax = ax(); + + set_ax(shadow_wrap_with_taint_from(quotient, original_ax, dx())); + set_dx(shadow_wrap_with_taint_from(remainder, original_ax, dx())); } void SoftCPU::DIV_RM32(const X86::Instruction& insn) { - auto divisor = insn.modrm().read32(*this, insn); - if (divisor == 0) { + auto divisor = insn.modrm().read32>(*this, insn); + if (divisor.value() == 0) { warn() << "Divide by zero"; TODO(); } - u64 dividend = ((u64)edx() << 32) | eax(); - auto result = dividend / divisor; - if (result > NumericLimits::max()) { + u64 dividend = ((u64)edx().value() << 32) | eax().value(); + auto quotient = dividend / divisor.value(); + if (quotient > NumericLimits::max()) { warn() << "Divide overflow"; TODO(); } - set_eax(result); - set_edx(dividend % divisor); + auto remainder = dividend % divisor.value(); + auto original_eax = eax(); + + set_eax(shadow_wrap_with_taint_from(quotient, original_eax, edx(), divisor)); + set_edx(shadow_wrap_with_taint_from(remainder, original_eax, edx(), divisor)); } void SoftCPU::DIV_RM8(const X86::Instruction& insn) { - auto divisor = insn.modrm().read8(*this, insn); - if (divisor == 0) { + auto divisor = insn.modrm().read8>(*this, insn); + if (divisor.value() == 0) { warn() << "Divide by zero"; TODO(); } - u16 dividend = ax(); - auto result = dividend / divisor; - if (result > NumericLimits::max()) { + u16 dividend = ax().value(); + auto quotient = dividend / divisor.value(); + if (quotient > NumericLimits::max()) { warn() << "Divide overflow"; TODO(); } - set_al(result); - set_ah(dividend % divisor); + auto remainder = dividend % divisor.value(); + auto original_ax = ax(); + set_al(shadow_wrap_with_taint_from(quotient, original_ax, divisor)); + set_ah(shadow_wrap_with_taint_from(remainder, original_ax, divisor)); } void SoftCPU::ENTER16(const X86::Instruction&) { TODO(); } @@ -1260,141 +1318,171 @@ void SoftCPU::HLT(const X86::Instruction&) { TODO(); } void SoftCPU::IDIV_RM16(const X86::Instruction& insn) { - auto divisor = (i16)insn.modrm().read16(*this, insn); + auto divisor_with_shadow = insn.modrm().read16>(*this, insn); + auto divisor = (i16)divisor_with_shadow.value(); if (divisor == 0) { warn() << "Divide by zero"; TODO(); } - i32 dividend = (i32)(((u32)dx() << 16) | (u32)ax()); + i32 dividend = (i32)(((u32)dx().value() << 16) | (u32)ax().value()); i32 result = dividend / divisor; if (result > NumericLimits::max() || result < NumericLimits::min()) { warn() << "Divide overflow"; TODO(); } - set_ax(result); - set_dx(dividend % divisor); + auto original_ax = ax(); + set_ax(shadow_wrap_with_taint_from(result, original_ax, dx(), divisor_with_shadow)); + set_dx(shadow_wrap_with_taint_from(dividend % divisor, original_ax, dx(), divisor_with_shadow)); } void SoftCPU::IDIV_RM32(const X86::Instruction& insn) { - auto divisor = (i32)insn.modrm().read32(*this, insn); + auto divisor_with_shadow = insn.modrm().read32>(*this, insn); + auto divisor = (i32)divisor_with_shadow.value(); if (divisor == 0) { warn() << "Divide by zero"; TODO(); } - i64 dividend = (i64)(((u64)edx() << 32) | (u64)eax()); + i64 dividend = (i64)(((u64)edx().value() << 32) | (u64)eax().value()); i64 result = dividend / divisor; if (result > NumericLimits::max() || result < NumericLimits::min()) { warn() << "Divide overflow"; TODO(); } - set_eax(result); - set_edx(dividend % divisor); + auto original_eax = eax(); + set_eax(shadow_wrap_with_taint_from(result, original_eax, edx(), divisor_with_shadow)); + set_edx(shadow_wrap_with_taint_from(dividend % divisor, original_eax, edx(), divisor_with_shadow)); } void SoftCPU::IDIV_RM8(const X86::Instruction& insn) { - auto divisor = (i8)insn.modrm().read8(*this, insn); + auto divisor_with_shadow = insn.modrm().read8>(*this, insn); + auto divisor = (i8)divisor_with_shadow.value(); if (divisor == 0) { warn() << "Divide by zero"; TODO(); } - i16 dividend = ax(); + i16 dividend = ax().value(); i16 result = dividend / divisor; if (result > NumericLimits::max() || result < NumericLimits::min()) { warn() << "Divide overflow"; TODO(); } - set_al(result); - set_ah(dividend % divisor); + auto original_ax = ax(); + set_al(shadow_wrap_with_taint_from(result, divisor_with_shadow, original_ax)); + set_ah(shadow_wrap_with_taint_from(dividend % divisor, divisor_with_shadow, original_ax)); } void SoftCPU::IMUL_RM16(const X86::Instruction& insn) { - op_imul(*this, insn.modrm().read16(*this, insn), ax(), (i16&)gpr16(X86::RegisterDX), (i16&)gpr16(X86::RegisterAX)); + i16 result_high; + i16 result_low; + auto src = insn.modrm().read16>(*this, insn); + op_imul(*this, src.value(), ax().value(), result_high, result_low); + gpr16(X86::RegisterDX) = shadow_wrap_with_taint_from(result_high, src, ax()); + gpr16(X86::RegisterAX) = shadow_wrap_with_taint_from(result_low, src, ax()); } void SoftCPU::IMUL_RM32(const X86::Instruction& insn) { - op_imul(*this, insn.modrm().read32(*this, insn), eax(), (i32&)gpr32(X86::RegisterEDX), (i32&)gpr32(X86::RegisterEAX)); + i32 result_high; + i32 result_low; + auto src = insn.modrm().read32>(*this, insn); + op_imul(*this, src.value(), eax().value(), result_high, result_low); + gpr32(X86::RegisterEDX) = shadow_wrap_with_taint_from(result_high, src, eax()); + gpr32(X86::RegisterEAX) = shadow_wrap_with_taint_from(result_low, src, eax()); } void SoftCPU::IMUL_RM8(const X86::Instruction& insn) { - op_imul(*this, insn.modrm().read8(*this, insn), al(), (i8&)gpr8(X86::RegisterAH), (i8&)gpr8(X86::RegisterAL)); + i8 result_high; + i8 result_low; + auto src = insn.modrm().read8>(*this, insn); + op_imul(*this, src.value(), al().value(), result_high, result_low); + gpr8(X86::RegisterAH) = shadow_wrap_with_taint_from(result_high, src, al()); + gpr8(X86::RegisterAL) = shadow_wrap_with_taint_from(result_low, src, al()); } void SoftCPU::IMUL_reg16_RM16(const X86::Instruction& insn) { - PartAddressableRegister result; - op_imul(*this, gpr16(insn.reg16()), insn.modrm().read16(*this, insn), (i16&)result.high_u16, (i16&)result.low_u16); - gpr16(insn.reg16()) = result.low_u16; + i16 result_high; + i16 result_low; + auto src = insn.modrm().read16>(*this, insn); + op_imul(*this, gpr16(insn.reg16()).value(), src.value(), result_high, result_low); + gpr16(insn.reg16()) = shadow_wrap_with_taint_from(result_low, src, gpr16(insn.reg16())); } void SoftCPU::IMUL_reg16_RM16_imm16(const X86::Instruction& insn) { - PartAddressableRegister result; - op_imul(*this, insn.modrm().read16(*this, insn), insn.imm16(), (i16&)result.high_u16, (i16&)result.low_u16); - gpr16(insn.reg16()) = result.low_u16; + i16 result_high; + i16 result_low; + auto src = insn.modrm().read16>(*this, insn); + op_imul(*this, src.value(), insn.imm16(), result_high, result_low); + gpr16(insn.reg16()) = shadow_wrap_with_taint_from(result_low, src); } void SoftCPU::IMUL_reg16_RM16_imm8(const X86::Instruction& insn) { - PartAddressableRegister result; - op_imul(*this, insn.modrm().read16(*this, insn), sign_extended_to(insn.imm8()), (i16&)result.high_u16, (i16&)result.low_u16); - gpr16(insn.reg16()) = result.low_u16; + i16 result_high; + i16 result_low; + auto src = insn.modrm().read16>(*this, insn); + op_imul(*this, src.value(), sign_extended_to(insn.imm8()), result_high, result_low); + gpr16(insn.reg16()) = shadow_wrap_with_taint_from(result_low, src); } void SoftCPU::IMUL_reg32_RM32(const X86::Instruction& insn) { i32 result_high; i32 result_low; - op_imul(*this, gpr32(insn.reg32()), insn.modrm().read32(*this, insn), result_high, result_low); - gpr32(insn.reg32()) = result_low; + auto src = insn.modrm().read32>(*this, insn); + op_imul(*this, gpr32(insn.reg32()).value(), src.value(), result_high, result_low); + gpr32(insn.reg32()) = shadow_wrap_with_taint_from(result_low, src, gpr32(insn.reg32())); } void SoftCPU::IMUL_reg32_RM32_imm32(const X86::Instruction& insn) { i32 result_high; i32 result_low; - op_imul(*this, insn.modrm().read32(*this, insn), insn.imm32(), result_high, result_low); - gpr32(insn.reg32()) = result_low; + auto src = insn.modrm().read32>(*this, insn); + op_imul(*this, src.value(), insn.imm32(), result_high, result_low); + gpr32(insn.reg32()) = shadow_wrap_with_taint_from(result_low, src); } void SoftCPU::IMUL_reg32_RM32_imm8(const X86::Instruction& insn) { i32 result_high; i32 result_low; - op_imul(*this, insn.modrm().read32(*this, insn), sign_extended_to(insn.imm8()), result_high, result_low); - gpr32(insn.reg32()) = result_low; + auto src = insn.modrm().read32>(*this, insn); + op_imul(*this, src.value(), sign_extended_to(insn.imm8()), result_high, result_low); + gpr32(insn.reg32()) = shadow_wrap_with_taint_from(result_low, src); } void SoftCPU::INC_RM16(const X86::Instruction& insn) { - insn.modrm().write16(*this, insn, op_inc(*this, insn.modrm().read16(*this, insn))); + insn.modrm().write16(*this, insn, op_inc(*this, insn.modrm().read16>(*this, insn))); } void SoftCPU::INC_RM32(const X86::Instruction& insn) { - insn.modrm().write32(*this, insn, op_inc(*this, insn.modrm().read32(*this, insn))); + insn.modrm().write32(*this, insn, op_inc(*this, insn.modrm().read32>(*this, insn))); } void SoftCPU::INC_RM8(const X86::Instruction& insn) { - insn.modrm().write8(*this, insn, op_inc(*this, insn.modrm().read8(*this, insn))); + insn.modrm().write8(*this, insn, op_inc(*this, insn.modrm().read8>(*this, insn))); } void SoftCPU::INC_reg16(const X86::Instruction& insn) { - gpr16(insn.reg16()) = op_inc(*this, gpr16(insn.reg16())); + gpr16(insn.reg16()) = op_inc(*this, const_gpr16(insn.reg16())); } void SoftCPU::INC_reg32(const X86::Instruction& insn) { - gpr32(insn.reg32()) = op_inc(*this, gpr32(insn.reg32())); + gpr32(insn.reg32()) = op_inc(*this, const_gpr32(insn.reg32())); } void SoftCPU::INSB(const X86::Instruction&) { TODO(); } @@ -1406,7 +1494,8 @@ void SoftCPU::INTO(const X86::Instruction&) { TODO(); } void SoftCPU::INT_imm8(const X86::Instruction& insn) { ASSERT(insn.imm8() == 0x82); - set_eax(m_emulator.virt_syscall(eax(), edx(), ecx(), ebx())); + // FIXME: virt_syscall should take ValueWithShadow and whine about uninitialized arguments + set_eax(shadow_wrap_as_initialized(m_emulator.virt_syscall(eax().value(), edx().value(), ecx().value(), ebx().value()))); } void SoftCPU::INVLPG(const X86::Instruction&) { TODO(); } @@ -1420,8 +1509,15 @@ void SoftCPU::IRET(const X86::Instruction&) { TODO(); } void SoftCPU::JCXZ_imm8(const X86::Instruction& insn) { - if ((insn.a32() && ecx() == 0) || (!insn.a32() && cx() == 0)) - set_eip(eip() + (i8)insn.imm8()); + if (insn.a32()) { + warn_if_uninitialized(ecx(), "jecxz imm8"); + if (ecx().value() == 0) + set_eip(eip() + (i8)insn.imm8()); + } else { + warn_if_uninitialized(cx(), "jcxz imm8"); + if (cx().value() == 0) + set_eip(eip() + (i8)insn.imm8()); + } } void SoftCPU::JMP_FAR_mem16(const X86::Instruction&) { TODO(); } @@ -1430,7 +1526,7 @@ void SoftCPU::JMP_RM16(const X86::Instruction&) { TODO(); } void SoftCPU::JMP_RM32(const X86::Instruction& insn) { - set_eip(insn.modrm().read32(*this, insn)); + set_eip(insn.modrm().read32>(*this, insn).value()); } void SoftCPU::JMP_imm16(const X86::Instruction& insn) @@ -1472,19 +1568,21 @@ void SoftCPU::LEAVE16(const X86::Instruction&) { TODO(); } void SoftCPU::LEAVE32(const X86::Instruction&) { - u32 new_ebp = read_memory32({ ss(), ebp() }); - set_esp(ebp() + 4); + auto new_ebp = read_memory32({ ss(), ebp().value() }); + set_esp({ ebp().value() + 4, ebp().shadow() }); set_ebp(new_ebp); } void SoftCPU::LEA_reg16_mem16(const X86::Instruction& insn) { - gpr16(insn.reg16()) = insn.modrm().resolve(*this, insn).offset(); + // FIXME: Respect shadow values + gpr16(insn.reg16()) = shadow_wrap_as_initialized(insn.modrm().resolve(*this, insn).offset()); } void SoftCPU::LEA_reg32_mem32(const X86::Instruction& insn) { - gpr32(insn.reg32()) = insn.modrm().resolve(*this, insn).offset(); + // FIXME: Respect shadow values + gpr32(insn.reg32()) = shadow_wrap_as_initialized(insn.modrm().resolve(*this, insn).offset()); } void SoftCPU::LES_reg16_mem16(const X86::Instruction&) { TODO(); } @@ -1503,7 +1601,7 @@ ALWAYS_INLINE static void do_lods(SoftCPU& cpu, const X86::Instruction& insn) { auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)); cpu.do_once_or_repeat(insn, [&] { - auto src = cpu.read_memory({ src_segment, cpu.source_index(insn.a32()) }); + auto src = cpu.read_memory({ src_segment, cpu.source_index(insn.a32()).value() }); cpu.gpr(X86::RegisterAL) = src; cpu.step_source_index(insn.a32(), sizeof(T)); }); @@ -1527,24 +1625,24 @@ void SoftCPU::LODSW(const X86::Instruction& insn) void SoftCPU::LOOPNZ_imm8(const X86::Instruction& insn) { if (insn.a32()) { - set_ecx(ecx() - 1); - if (ecx() != 0 && !zf()) + set_ecx({ ecx().value() - 1, ecx().shadow() }); + if (ecx().value() != 0 && !zf()) set_eip(eip() + (i8)insn.imm8()); } else { - set_cx(cx() - 1); - if (cx() != 0 && !zf()) + set_cx({ (u16)(cx().value() - 1), cx().shadow() }); + if (cx().value() != 0 && !zf()) set_eip(eip() + (i8)insn.imm8()); } } void SoftCPU::LOOPZ_imm8(const X86::Instruction& insn) { if (insn.a32()) { - set_ecx(ecx() - 1); - if (ecx() != 0 && zf()) + set_ecx({ ecx().value() - 1, ecx().shadow() }); + if (ecx().value() != 0 && zf()) set_eip(eip() + (i8)insn.imm8()); } else { - set_cx(cx() - 1); - if (cx() != 0 && zf()) + set_cx({ (u16)(cx().value() - 1), cx().shadow() }); + if (cx().value() != 0 && zf()) set_eip(eip() + (i8)insn.imm8()); } } @@ -1552,12 +1650,12 @@ void SoftCPU::LOOPZ_imm8(const X86::Instruction& insn) void SoftCPU::LOOP_imm8(const X86::Instruction& insn) { if (insn.a32()) { - set_ecx(ecx() - 1); - if (ecx() != 0) + set_ecx({ ecx().value() - 1, ecx().shadow() }); + if (ecx().value() != 0) set_eip(eip() + (i8)insn.imm8()); } else { - set_cx(cx() - 1); - if (cx() != 0) + set_cx({ (u16)(cx().value() - 1), cx().shadow() }); + if (cx().value() != 0) set_eip(eip() + (i8)insn.imm8()); } } @@ -1573,8 +1671,8 @@ ALWAYS_INLINE static void do_movs(SoftCPU& cpu, const X86::Instruction& insn) { auto src_segment = cpu.segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)); cpu.do_once_or_repeat(insn, [&] { - auto src = cpu.read_memory({ src_segment, cpu.source_index(insn.a32()) }); - cpu.write_memory({ cpu.es(), cpu.destination_index(insn.a32()) }, src); + auto src = cpu.read_memory({ src_segment, cpu.source_index(insn.a32()).value() }); + cpu.write_memory({ cpu.es(), cpu.destination_index(insn.a32()).value() }, src); cpu.step_source_index(insn.a32(), sizeof(T)); cpu.step_destination_index(insn.a32(), sizeof(T)); }); @@ -1594,34 +1692,41 @@ void SoftCPU::MOVSW(const X86::Instruction& insn) { do_movs(*this, insn); } + void SoftCPU::MOVSX_reg16_RM8(const X86::Instruction& insn) { - gpr16(insn.reg16()) = sign_extended_to(insn.modrm().read8(*this, insn)); + auto src = insn.modrm().read8>(*this, insn); + gpr16(insn.reg16()) = ValueWithShadow(sign_extended_to(src.value()), 0x0100 | (src.shadow())); } void SoftCPU::MOVSX_reg32_RM16(const X86::Instruction& insn) { - gpr32(insn.reg32()) = sign_extended_to(insn.modrm().read16(*this, insn)); + auto src = insn.modrm().read16>(*this, insn); + gpr32(insn.reg32()) = ValueWithShadow(sign_extended_to(src.value()), 0x01010000 | (src.shadow())); } void SoftCPU::MOVSX_reg32_RM8(const X86::Instruction& insn) { - gpr32(insn.reg32()) = sign_extended_to(insn.modrm().read8(*this, insn)); + auto src = insn.modrm().read8>(*this, insn); + gpr32(insn.reg32()) = ValueWithShadow(sign_extended_to(src.value()), 0x01010100 | (src.shadow())); } void SoftCPU::MOVZX_reg16_RM8(const X86::Instruction& insn) { - gpr16(insn.reg16()) = insn.modrm().read8(*this, insn); + auto src = insn.modrm().read8>(*this, insn); + gpr16(insn.reg16()) = ValueWithShadow(src.value(), 0x0100 | (src.shadow() & 0xff)); } void SoftCPU::MOVZX_reg32_RM16(const X86::Instruction& insn) { - gpr32(insn.reg32()) = insn.modrm().read16(*this, insn); + auto src = insn.modrm().read16>(*this, insn); + gpr32(insn.reg32()) = ValueWithShadow(src.value(), 0x01010000 | (src.shadow() & 0xffff)); } void SoftCPU::MOVZX_reg32_RM8(const X86::Instruction& insn) { - gpr32(insn.reg32()) = insn.modrm().read8(*this, insn); + auto src = insn.modrm().read8>(*this, insn); + gpr32(insn.reg32()) = ValueWithShadow(src.value(), 0x01010100 | (src.shadow() & 0xff)); } void SoftCPU::MOV_AL_moff8(const X86::Instruction& insn) @@ -1644,34 +1749,34 @@ void SoftCPU::MOV_EAX_moff32(const X86::Instruction& insn) void SoftCPU::MOV_RM16_imm16(const X86::Instruction& insn) { - insn.modrm().write16(*this, insn, insn.imm16()); + insn.modrm().write16(*this, insn, shadow_wrap_as_initialized(insn.imm16())); } void SoftCPU::MOV_RM16_reg16(const X86::Instruction& insn) { - insn.modrm().write16(*this, insn, gpr16(insn.reg16())); + insn.modrm().write16(*this, insn, const_gpr16(insn.reg16())); } void SoftCPU::MOV_RM16_seg(const X86::Instruction&) { TODO(); } void SoftCPU::MOV_RM32_imm32(const X86::Instruction& insn) { - insn.modrm().write32(*this, insn, insn.imm32()); + insn.modrm().write32(*this, insn, shadow_wrap_as_initialized(insn.imm32())); } void SoftCPU::MOV_RM32_reg32(const X86::Instruction& insn) { - insn.modrm().write32(*this, insn, gpr32(insn.reg32())); + insn.modrm().write32(*this, insn, const_gpr32(insn.reg32())); } void SoftCPU::MOV_RM8_imm8(const X86::Instruction& insn) { - insn.modrm().write8(*this, insn, insn.imm8()); + insn.modrm().write8(*this, insn, shadow_wrap_as_initialized(insn.imm8())); } void SoftCPU::MOV_RM8_reg8(const X86::Instruction& insn) { - insn.modrm().write8(*this, insn, gpr8(insn.reg8())); + insn.modrm().write8(*this, insn, const_gpr8(insn.reg8())); } void SoftCPU::MOV_moff16_AX(const X86::Instruction& insn) @@ -1691,12 +1796,12 @@ void SoftCPU::MOV_moff8_AL(const X86::Instruction& insn) void SoftCPU::MOV_reg16_RM16(const X86::Instruction& insn) { - gpr16(insn.reg16()) = insn.modrm().read16(*this, insn); + gpr16(insn.reg16()) = insn.modrm().read16>(*this, insn); } void SoftCPU::MOV_reg16_imm16(const X86::Instruction& insn) { - gpr16(insn.reg16()) = insn.imm16(); + gpr16(insn.reg16()) = shadow_wrap_as_initialized(insn.imm16()); } void SoftCPU::MOV_reg32_CR(const X86::Instruction&) { TODO(); } @@ -1704,22 +1809,22 @@ void SoftCPU::MOV_reg32_DR(const X86::Instruction&) { TODO(); } void SoftCPU::MOV_reg32_RM32(const X86::Instruction& insn) { - gpr32(insn.reg32()) = insn.modrm().read32(*this, insn); + gpr32(insn.reg32()) = insn.modrm().read32>(*this, insn); } void SoftCPU::MOV_reg32_imm32(const X86::Instruction& insn) { - gpr32(insn.reg32()) = insn.imm32(); + gpr32(insn.reg32()) = shadow_wrap_as_initialized(insn.imm32()); } void SoftCPU::MOV_reg8_RM8(const X86::Instruction& insn) { - gpr8(insn.reg8()) = insn.modrm().read8(*this, insn); + gpr8(insn.reg8()) = insn.modrm().read8>(*this, insn); } void SoftCPU::MOV_reg8_imm8(const X86::Instruction& insn) { - gpr8(insn.reg8()) = insn.imm8(); + gpr8(insn.reg8()) = shadow_wrap_as_initialized(insn.imm8()); } void SoftCPU::MOV_seg_RM16(const X86::Instruction&) { TODO(); } @@ -1727,46 +1832,54 @@ void SoftCPU::MOV_seg_RM32(const X86::Instruction&) { TODO(); } void SoftCPU::MUL_RM16(const X86::Instruction& insn) { - u32 result = (u32)ax() * (u32)insn.modrm().read16(*this, insn); - set_ax(result & 0xffff); - set_dx(result >> 16); + auto src = insn.modrm().read16>(*this, insn); + u32 result = (u32)ax().value() * (u32)src.value(); + auto original_ax = ax(); + set_ax(shadow_wrap_with_taint_from(result & 0xffff, src, original_ax)); + set_dx(shadow_wrap_with_taint_from(result >> 16, src, original_ax)); - set_cf(dx() != 0); - set_of(dx() != 0); + // FIXME: Shadow flags. + set_cf(dx().value() != 0); + set_of(dx().value() != 0); } void SoftCPU::MUL_RM32(const X86::Instruction& insn) { - u64 result = (u64)eax() * (u64)insn.modrm().read32(*this, insn); - set_eax(result & 0xffffffff); - set_edx(result >> 32); + auto src = insn.modrm().read32>(*this, insn); + u64 result = (u64)eax().value() * (u64)src.value(); + auto original_eax = eax(); + set_eax(shadow_wrap_with_taint_from(result, src, original_eax)); + set_edx(shadow_wrap_with_taint_from(result >> 32, src, original_eax)); - set_cf(edx() != 0); - set_of(edx() != 0); + // FIXME: Shadow flags. + set_cf(edx().value() != 0); + set_of(edx().value() != 0); } void SoftCPU::MUL_RM8(const X86::Instruction& insn) { - u16 result = (u16)al() * insn.modrm().read8(*this, insn); - set_ax(result); + auto src = insn.modrm().read8>(*this, insn); + u16 result = (u16)al().value() * src.value(); + set_ax(shadow_wrap_with_taint_from(result, src, al())); + // FIXME: Shadow flags. set_cf((result & 0xff00) != 0); set_of((result & 0xff00) != 0); } void SoftCPU::NEG_RM16(const X86::Instruction& insn) { - insn.modrm().write16(*this, insn, op_sub(*this, 0, insn.modrm().read16(*this, insn))); + insn.modrm().write16(*this, insn, op_sub>(*this, shadow_wrap_as_initialized(0), insn.modrm().read16>(*this, insn))); } void SoftCPU::NEG_RM32(const X86::Instruction& insn) { - insn.modrm().write32(*this, insn, op_sub(*this, 0, insn.modrm().read32(*this, insn))); + insn.modrm().write32(*this, insn, op_sub>(*this, shadow_wrap_as_initialized(0), insn.modrm().read32>(*this, insn))); } void SoftCPU::NEG_RM8(const X86::Instruction& insn) { - insn.modrm().write8(*this, insn, op_sub(*this, 0, insn.modrm().read8(*this, insn))); + insn.modrm().write8(*this, insn, op_sub>(*this, shadow_wrap_as_initialized(0), insn.modrm().read8>(*this, insn))); } void SoftCPU::NOP(const X86::Instruction&) @@ -1775,17 +1888,20 @@ void SoftCPU::NOP(const X86::Instruction&) void SoftCPU::NOT_RM16(const X86::Instruction& insn) { - insn.modrm().write16(*this, insn, ~insn.modrm().read16(*this, insn)); + auto data = insn.modrm().read16>(*this, insn); + insn.modrm().write16(*this, insn, ValueWithShadow(~data.value(), data.shadow())); } void SoftCPU::NOT_RM32(const X86::Instruction& insn) { - insn.modrm().write32(*this, insn, ~insn.modrm().read32(*this, insn)); + auto data = insn.modrm().read32>(*this, insn); + insn.modrm().write32(*this, insn, ValueWithShadow(~data.value(), data.shadow())); } void SoftCPU::NOT_RM8(const X86::Instruction& insn) { - insn.modrm().write8(*this, insn, ~insn.modrm().read8(*this, insn)); + auto data = insn.modrm().read8>(*this, insn); + insn.modrm().write8(*this, insn, ValueWithShadow(~data.value(), data.shadow())); } void SoftCPU::OUTSB(const X86::Instruction&) { TODO(); } @@ -1806,8 +1922,9 @@ void SoftCPU::POPF(const X86::Instruction&) { TODO(); } void SoftCPU::POPFD(const X86::Instruction&) { + // FIXME: Shadow flags. m_eflags &= ~0x00fcffff; - m_eflags |= pop32() & 0x00fcffff; + m_eflags |= pop32().value() & 0x00fcffff; } void SoftCPU::POP_DS(const X86::Instruction&) { TODO(); } @@ -1843,7 +1960,8 @@ void SoftCPU::PUSHF(const X86::Instruction&) { TODO(); } void SoftCPU::PUSHFD(const X86::Instruction&) { - push32(m_eflags & 0x00fcffff); + // FIXME: Respect shadow flags when they exist! + push32(shadow_wrap_as_initialized(m_eflags & 0x00fcffff)); } void SoftCPU::PUSH_CS(const X86::Instruction&) { TODO(); } @@ -1855,7 +1973,7 @@ void SoftCPU::PUSH_RM16(const X86::Instruction&) { TODO(); } void SoftCPU::PUSH_RM32(const X86::Instruction& insn) { - push32(insn.modrm().read32(*this, insn)); + push32(insn.modrm().read32>(*this, insn)); } void SoftCPU::PUSH_SP_8086_80186(const X86::Instruction&) { TODO(); } @@ -1863,18 +1981,18 @@ void SoftCPU::PUSH_SS(const X86::Instruction&) { TODO(); } void SoftCPU::PUSH_imm16(const X86::Instruction& insn) { - push16(insn.imm16()); + push16(shadow_wrap_as_initialized(insn.imm16())); } void SoftCPU::PUSH_imm32(const X86::Instruction& insn) { - push32(insn.imm32()); + push32(shadow_wrap_as_initialized(insn.imm32())); } void SoftCPU::PUSH_imm8(const X86::Instruction& insn) { ASSERT(!insn.has_operand_size_override_prefix()); - push32(sign_extended_to(insn.imm8())); + push32(shadow_wrap_as_initialized(sign_extended_to(insn.imm8()))); } void SoftCPU::PUSH_reg16(const X86::Instruction& insn) @@ -1887,23 +2005,23 @@ void SoftCPU::PUSH_reg32(const X86::Instruction& insn) push32(gpr32(insn.reg32())); if (m_secret_handshake_state == 2) { - m_secret_data[0] = gpr32(insn.reg32()); + m_secret_data[0] = gpr32(insn.reg32()).value(); ++m_secret_handshake_state; } else if (m_secret_handshake_state == 3) { - m_secret_data[1] = gpr32(insn.reg32()); + m_secret_data[1] = gpr32(insn.reg32()).value(); ++m_secret_handshake_state; } else if (m_secret_handshake_state == 4) { - m_secret_data[2] = gpr32(insn.reg32()); + m_secret_data[2] = gpr32(insn.reg32()).value(); m_secret_handshake_state = 0; did_receive_secret_data(); } } template -ALWAYS_INLINE static T op_rcl_impl(SoftCPU& cpu, T data, u8 steps) +ALWAYS_INLINE static T op_rcl_impl(SoftCPU& cpu, T data, ValueWithShadow steps) { - if (steps == 0) - return data; + if (steps.value() == 0) + return shadow_wrap_with_taint_from(data.value(), data, steps); u32 result = 0; u32 new_flags = 0; @@ -1913,18 +2031,18 @@ ALWAYS_INLINE static T op_rcl_impl(SoftCPU& cpu, T data, u8 steps) else asm volatile("clc"); - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("rcll %%cl, %%eax\n" : "=a"(result) - : "a"(data), "c"(steps)); - } else if constexpr (sizeof(T) == 2) { + : "a"(data.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("rclw %%cl, %%ax\n" : "=a"(result) - : "a"(data), "c"(steps)); - } else if constexpr (sizeof(T) == 1) { + : "a"(data.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("rclb %%cl, %%al\n" : "=a"(result) - : "a"(data), "c"(steps)); + : "a"(data.value()), "c"(steps.value())); } asm volatile( @@ -1933,11 +2051,11 @@ ALWAYS_INLINE static T op_rcl_impl(SoftCPU& cpu, T data, u8 steps) : "=b"(new_flags)); cpu.set_flags_oc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, data, steps); } template -ALWAYS_INLINE static T op_rcl(SoftCPU& cpu, T data, u8 steps) +ALWAYS_INLINE static T op_rcl(SoftCPU& cpu, T data, ValueWithShadow steps) { if (cpu.cf()) return op_rcl_impl(cpu, data, steps); @@ -1947,10 +2065,10 @@ ALWAYS_INLINE static T op_rcl(SoftCPU& cpu, T data, u8 steps) DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(RCL, op_rcl) template -ALWAYS_INLINE static T op_rcr_impl(SoftCPU& cpu, T data, u8 steps) +ALWAYS_INLINE static T op_rcr_impl(SoftCPU& cpu, T data, ValueWithShadow steps) { - if (steps == 0) - return data; + if (steps.value() == 0) + return shadow_wrap_with_taint_from(data.value(), data, steps); u32 result = 0; u32 new_flags = 0; @@ -1960,18 +2078,18 @@ ALWAYS_INLINE static T op_rcr_impl(SoftCPU& cpu, T data, u8 steps) else asm volatile("clc"); - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("rcrl %%cl, %%eax\n" : "=a"(result) - : "a"(data), "c"(steps)); - } else if constexpr (sizeof(T) == 2) { + : "a"(data.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("rcrw %%cl, %%ax\n" : "=a"(result) - : "a"(data), "c"(steps)); - } else if constexpr (sizeof(T) == 1) { + : "a"(data.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("rcrb %%cl, %%al\n" : "=a"(result) - : "a"(data), "c"(steps)); + : "a"(data.value()), "c"(steps.value())); } asm volatile( @@ -1980,11 +2098,11 @@ ALWAYS_INLINE static T op_rcr_impl(SoftCPU& cpu, T data, u8 steps) : "=b"(new_flags)); cpu.set_flags_oc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, data, steps); } template -ALWAYS_INLINE static T op_rcr(SoftCPU& cpu, T data, u8 steps) +ALWAYS_INLINE static T op_rcr(SoftCPU& cpu, T data, ValueWithShadow steps) { if (cpu.cf()) return op_rcr_impl(cpu, data, steps); @@ -1998,7 +2116,9 @@ void SoftCPU::RDTSC(const X86::Instruction&) { TODO(); } void SoftCPU::RET(const X86::Instruction& insn) { ASSERT(!insn.has_operand_size_override_prefix()); - set_eip(pop32()); + auto ret_address = pop32(); + warn_if_uninitialized(ret_address, "ret"); + set_eip(ret_address.value()); } void SoftCPU::RETF(const X86::Instruction&) { TODO(); } @@ -2007,31 +2127,33 @@ void SoftCPU::RETF_imm16(const X86::Instruction&) { TODO(); } void SoftCPU::RET_imm16(const X86::Instruction& insn) { ASSERT(!insn.has_operand_size_override_prefix()); - set_eip(pop32()); - set_esp(esp() + insn.imm16()); + auto ret_address = pop32(); + warn_if_uninitialized(ret_address, "ret imm16"); + set_eip(ret_address.value()); + set_esp({ esp().value() + insn.imm16(), esp().shadow() }); } template -ALWAYS_INLINE static T op_rol(SoftCPU& cpu, T data, u8 steps) +ALWAYS_INLINE static T op_rol(SoftCPU& cpu, T data, ValueWithShadow steps) { - if (steps == 0) - return data; + if (steps.value() == 0) + return shadow_wrap_with_taint_from(data.value(), data, steps); u32 result = 0; u32 new_flags = 0; - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("roll %%cl, %%eax\n" : "=a"(result) - : "a"(data), "c"(steps)); - } else if constexpr (sizeof(T) == 2) { + : "a"(data.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("rolw %%cl, %%ax\n" : "=a"(result) - : "a"(data), "c"(steps)); - } else if constexpr (sizeof(T) == 1) { + : "a"(data.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("rolb %%cl, %%al\n" : "=a"(result) - : "a"(data), "c"(steps)); + : "a"(data.value()), "c"(steps.value())); } asm volatile( @@ -2040,32 +2162,32 @@ ALWAYS_INLINE static T op_rol(SoftCPU& cpu, T data, u8 steps) : "=b"(new_flags)); cpu.set_flags_oc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, data, steps); } DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROL, op_rol) template -ALWAYS_INLINE static T op_ror(SoftCPU& cpu, T data, u8 steps) +ALWAYS_INLINE static T op_ror(SoftCPU& cpu, T data, ValueWithShadow steps) { - if (steps == 0) - return data; + if (steps.value() == 0) + return shadow_wrap_with_taint_from(data.value(), data, steps); u32 result = 0; u32 new_flags = 0; - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("rorl %%cl, %%eax\n" : "=a"(result) - : "a"(data), "c"(steps)); - } else if constexpr (sizeof(T) == 2) { + : "a"(data.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("rorw %%cl, %%ax\n" : "=a"(result) - : "a"(data), "c"(steps)); - } else if constexpr (sizeof(T) == 1) { + : "a"(data.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("rorb %%cl, %%al\n" : "=a"(result) - : "a"(data), "c"(steps)); + : "a"(data.value()), "c"(steps.value())); } asm volatile( @@ -2074,7 +2196,7 @@ ALWAYS_INLINE static T op_ror(SoftCPU& cpu, T data, u8 steps) : "=b"(new_flags)); cpu.set_flags_oc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, data, steps); } DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(ROR, op_ror) @@ -2083,7 +2205,8 @@ void SoftCPU::SAHF(const X86::Instruction&) { TODO(); } void SoftCPU::SALC(const X86::Instruction&) { - set_al(cf() ? 0xff : 0x00); + // FIXME: Respect shadow flags once they exists! + set_al(shadow_wrap_as_initialized(cf() ? 0xff : 0x00)); if (m_secret_handshake_state < 2) ++m_secret_handshake_state; @@ -2092,26 +2215,26 @@ void SoftCPU::SALC(const X86::Instruction&) } template -static T op_sar(SoftCPU& cpu, T data, u8 steps) +static T op_sar(SoftCPU& cpu, T data, ValueWithShadow steps) { - if (steps == 0) - return data; + if (steps.value() == 0) + return shadow_wrap_with_taint_from(data.value(), data, steps); u32 result = 0; u32 new_flags = 0; - if constexpr (sizeof(T) == 4) { + if constexpr (sizeof(typename T::ValueType) == 4) { asm volatile("sarl %%cl, %%eax\n" : "=a"(result) - : "a"(data), "c"(steps)); - } else if constexpr (sizeof(T) == 2) { + : "a"(data.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 2) { asm volatile("sarw %%cl, %%ax\n" : "=a"(result) - : "a"(data), "c"(steps)); - } else if constexpr (sizeof(T) == 1) { + : "a"(data.value()), "c"(steps.value())); + } else if constexpr (sizeof(typename T::ValueType) == 1) { asm volatile("sarb %%cl, %%al\n" : "=a"(result) - : "a"(data), "c"(steps)); + : "a"(data.value()), "c"(steps.value())); } asm volatile( @@ -2120,7 +2243,7 @@ static T op_sar(SoftCPU& cpu, T data, u8 steps) : "=b"(new_flags)); cpu.set_flags_oszapc(new_flags); - return result; + return shadow_wrap_with_taint_from(result, data, steps); } DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SAR, op_sar) @@ -2129,8 +2252,8 @@ template ALWAYS_INLINE static void do_scas(SoftCPU& cpu, const X86::Instruction& insn) { cpu.do_once_or_repeat(insn, [&] { - auto src = cpu.gpr(X86::RegisterAL); - auto dest = cpu.read_memory({ cpu.es(), cpu.destination_index(insn.a32()) }); + auto src = cpu.const_gpr(X86::RegisterAL); + auto dest = cpu.read_memory({ cpu.es(), cpu.destination_index(insn.a32()).value() }); op_sub(cpu, dest, src); cpu.step_destination_index(insn.a32(), sizeof(T)); }); @@ -2153,51 +2276,52 @@ void SoftCPU::SCASW(const X86::Instruction& insn) void SoftCPU::SETcc_RM8(const X86::Instruction& insn) { - insn.modrm().write8(*this, insn, evaluate_condition(insn.cc())); + // FIXME: Shadow flags. + insn.modrm().write8(*this, insn, shadow_wrap_as_initialized(evaluate_condition(insn.cc()))); } void SoftCPU::SGDT(const X86::Instruction&) { TODO(); } void SoftCPU::SHLD_RM16_reg16_CL(const X86::Instruction& insn) { - insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16(*this, insn), gpr16(insn.reg16()), cl())); + insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16>(*this, insn), const_gpr16(insn.reg16()), cl())); } void SoftCPU::SHLD_RM16_reg16_imm8(const X86::Instruction& insn) { - insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16(*this, insn), gpr16(insn.reg16()), insn.imm8())); + insn.modrm().write16(*this, insn, op_shld(*this, insn.modrm().read16>(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8()))); } void SoftCPU::SHLD_RM32_reg32_CL(const X86::Instruction& insn) { - insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32(*this, insn), gpr32(insn.reg32()), cl())); + insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32>(*this, insn), const_gpr32(insn.reg32()), cl())); } void SoftCPU::SHLD_RM32_reg32_imm8(const X86::Instruction& insn) { - insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32(*this, insn), gpr32(insn.reg32()), insn.imm8())); + insn.modrm().write32(*this, insn, op_shld(*this, insn.modrm().read32>(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8()))); } DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHL, op_shl) void SoftCPU::SHRD_RM16_reg16_CL(const X86::Instruction& insn) { - insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16(*this, insn), gpr16(insn.reg16()), cl())); + insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16>(*this, insn), const_gpr16(insn.reg16()), cl())); } void SoftCPU::SHRD_RM16_reg16_imm8(const X86::Instruction& insn) { - insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16(*this, insn), gpr16(insn.reg16()), insn.imm8())); + insn.modrm().write16(*this, insn, op_shrd(*this, insn.modrm().read16>(*this, insn), const_gpr16(insn.reg16()), shadow_wrap_as_initialized(insn.imm8()))); } void SoftCPU::SHRD_RM32_reg32_CL(const X86::Instruction& insn) { - insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32(*this, insn), gpr32(insn.reg32()), cl())); + insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32>(*this, insn), const_gpr32(insn.reg32()), cl())); } void SoftCPU::SHRD_RM32_reg32_imm8(const X86::Instruction& insn) { - insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32(*this, insn), gpr32(insn.reg32()), insn.imm8())); + insn.modrm().write32(*this, insn, op_shrd(*this, insn.modrm().read32>(*this, insn), const_gpr32(insn.reg32()), shadow_wrap_as_initialized(insn.imm8()))); } DEFINE_GENERIC_SHIFT_ROTATE_INSN_HANDLERS(SHR, op_shr) @@ -2221,7 +2345,7 @@ void SoftCPU::STI(const X86::Instruction&) { TODO(); } void SoftCPU::STOSB(const X86::Instruction& insn) { do_once_or_repeat(insn, [&] { - write_memory8({ es(), destination_index(insn.a32()) }, al()); + write_memory8({ es(), destination_index(insn.a32()).value() }, al()); step_destination_index(insn.a32(), 1); }); } @@ -2229,7 +2353,7 @@ void SoftCPU::STOSB(const X86::Instruction& insn) void SoftCPU::STOSD(const X86::Instruction& insn) { do_once_or_repeat(insn, [&] { - write_memory32({ es(), destination_index(insn.a32()) }, eax()); + write_memory32({ es(), destination_index(insn.a32()).value() }, eax()); step_destination_index(insn.a32(), 4); }); } @@ -2237,7 +2361,7 @@ void SoftCPU::STOSD(const X86::Instruction& insn) void SoftCPU::STOSW(const X86::Instruction& insn) { do_once_or_repeat(insn, [&] { - write_memory16({ es(), destination_index(insn.a32()) }, ax()); + write_memory16({ es(), destination_index(insn.a32()).value() }, ax()); step_destination_index(insn.a32(), 2); }); } @@ -2253,8 +2377,8 @@ void SoftCPU::WBINVD(const X86::Instruction&) { TODO(); } void SoftCPU::XADD_RM16_reg16(const X86::Instruction& insn) { - auto dest = insn.modrm().read16(*this, insn); - auto src = gpr16(insn.reg16()); + auto dest = insn.modrm().read16>(*this, insn); + auto src = const_gpr16(insn.reg16()); auto result = op_add(*this, dest, src); gpr16(insn.reg16()) = dest; insn.modrm().write16(*this, insn, result); @@ -2262,8 +2386,8 @@ void SoftCPU::XADD_RM16_reg16(const X86::Instruction& insn) void SoftCPU::XADD_RM32_reg32(const X86::Instruction& insn) { - auto dest = insn.modrm().read32(*this, insn); - auto src = gpr32(insn.reg32()); + auto dest = insn.modrm().read32>(*this, insn); + auto src = const_gpr32(insn.reg32()); auto result = op_add(*this, dest, src); gpr32(insn.reg32()) = dest; insn.modrm().write32(*this, insn, result); @@ -2271,8 +2395,8 @@ void SoftCPU::XADD_RM32_reg32(const X86::Instruction& insn) void SoftCPU::XADD_RM8_reg8(const X86::Instruction& insn) { - auto dest = insn.modrm().read8(*this, insn); - auto src = gpr8(insn.reg8()); + auto dest = insn.modrm().read8>(*this, insn); + auto src = const_gpr8(insn.reg8()); auto result = op_add(*this, dest, src); gpr8(insn.reg8()) = dest; insn.modrm().write8(*this, insn, result); @@ -2281,7 +2405,7 @@ void SoftCPU::XADD_RM8_reg8(const X86::Instruction& insn) void SoftCPU::XCHG_AX_reg16(const X86::Instruction& insn) { auto temp = gpr16(insn.reg16()); - gpr16(insn.reg16()) = eax(); + gpr16(insn.reg16()) = ax(); set_ax(temp); } @@ -2294,49 +2418,54 @@ void SoftCPU::XCHG_EAX_reg32(const X86::Instruction& insn) void SoftCPU::XCHG_reg16_RM16(const X86::Instruction& insn) { - auto temp = insn.modrm().read16(*this, insn); - insn.modrm().write16(*this, insn, gpr16(insn.reg16())); + auto temp = insn.modrm().read16>(*this, insn); + insn.modrm().write16(*this, insn, const_gpr16(insn.reg16())); gpr16(insn.reg16()) = temp; } void SoftCPU::XCHG_reg32_RM32(const X86::Instruction& insn) { - auto temp = insn.modrm().read32(*this, insn); - insn.modrm().write32(*this, insn, gpr32(insn.reg32())); + auto temp = insn.modrm().read32>(*this, insn); + insn.modrm().write32(*this, insn, const_gpr32(insn.reg32())); gpr32(insn.reg32()) = temp; } void SoftCPU::XCHG_reg8_RM8(const X86::Instruction& insn) { - auto temp = insn.modrm().read8(*this, insn); - insn.modrm().write8(*this, insn, gpr8(insn.reg8())); + auto temp = insn.modrm().read8>(*this, insn); + insn.modrm().write8(*this, insn, const_gpr8(insn.reg8())); gpr8(insn.reg8()) = temp; } void SoftCPU::XLAT(const X86::Instruction& insn) { - u32 offset = (insn.a32() ? ebx() : bx()) + al(); + if (insn.a32()) + warn_if_uninitialized(ebx(), "xlat ebx"); + else + warn_if_uninitialized(ebx(), "xlat bx"); + warn_if_uninitialized(al(), "xlat al"); + u32 offset = (insn.a32() ? ebx().value() : bx().value()) + al().value(); set_al(read_memory8({ segment(insn.segment_prefix().value_or(X86::SegmentRegister::DS)), offset })); } -#define DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest) \ - void SoftCPU::mnemonic##_AL_imm8(const X86::Instruction& insn) { generic_AL_imm8(op, insn); } \ - void SoftCPU::mnemonic##_AX_imm16(const X86::Instruction& insn) { generic_AX_imm16(op, insn); } \ - void SoftCPU::mnemonic##_EAX_imm32(const X86::Instruction& insn) { generic_EAX_imm32(op, insn); } \ - void SoftCPU::mnemonic##_RM16_imm16(const X86::Instruction& insn) { generic_RM16_imm16(op, insn); } \ - void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { generic_RM16_reg16(op, insn); } \ - void SoftCPU::mnemonic##_RM32_imm32(const X86::Instruction& insn) { generic_RM32_imm32(op, insn); } \ - void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { generic_RM32_reg32(op, insn); } \ - void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8(op, insn); } \ - void SoftCPU::mnemonic##_RM8_reg8(const X86::Instruction& insn) { generic_RM8_reg8(op, insn); } +#define DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest) \ + void SoftCPU::mnemonic##_AL_imm8(const X86::Instruction& insn) { generic_AL_imm8(op>, insn); } \ + void SoftCPU::mnemonic##_AX_imm16(const X86::Instruction& insn) { generic_AX_imm16(op>, insn); } \ + void SoftCPU::mnemonic##_EAX_imm32(const X86::Instruction& insn) { generic_EAX_imm32(op>, insn); } \ + void SoftCPU::mnemonic##_RM16_imm16(const X86::Instruction& insn) { generic_RM16_imm16(op>, insn); } \ + void SoftCPU::mnemonic##_RM16_reg16(const X86::Instruction& insn) { generic_RM16_reg16(op>, insn); } \ + void SoftCPU::mnemonic##_RM32_imm32(const X86::Instruction& insn) { generic_RM32_imm32(op>, insn); } \ + void SoftCPU::mnemonic##_RM32_reg32(const X86::Instruction& insn) { generic_RM32_reg32(op>, insn); } \ + void SoftCPU::mnemonic##_RM8_imm8(const X86::Instruction& insn) { generic_RM8_imm8(op>, insn); } \ + void SoftCPU::mnemonic##_RM8_reg8(const X86::Instruction& insn) { generic_RM8_reg8(op>, insn); } -#define DEFINE_GENERIC_INSN_HANDLERS(mnemonic, op, update_dest) \ - DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest) \ - void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_imm8(op, insn); } \ - void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_imm8(op, insn); } \ - void SoftCPU::mnemonic##_reg16_RM16(const X86::Instruction& insn) { generic_reg16_RM16(op, insn); } \ - void SoftCPU::mnemonic##_reg32_RM32(const X86::Instruction& insn) { generic_reg32_RM32(op, insn); } \ - void SoftCPU::mnemonic##_reg8_RM8(const X86::Instruction& insn) { generic_reg8_RM8(op, insn); } +#define DEFINE_GENERIC_INSN_HANDLERS(mnemonic, op, update_dest) \ + DEFINE_GENERIC_INSN_HANDLERS_PARTIAL(mnemonic, op, update_dest) \ + void SoftCPU::mnemonic##_RM16_imm8(const X86::Instruction& insn) { generic_RM16_imm8(op>, insn); } \ + void SoftCPU::mnemonic##_RM32_imm8(const X86::Instruction& insn) { generic_RM32_imm8(op>, insn); } \ + void SoftCPU::mnemonic##_reg16_RM16(const X86::Instruction& insn) { generic_reg16_RM16(op>, insn); } \ + void SoftCPU::mnemonic##_reg32_RM32(const X86::Instruction& insn) { generic_reg32_RM32(op>, insn); } \ + void SoftCPU::mnemonic##_reg8_RM8(const X86::Instruction& insn) { generic_reg8_RM8(op>, insn); } DEFINE_GENERIC_INSN_HANDLERS(XOR, op_xor, true) DEFINE_GENERIC_INSN_HANDLERS(OR, op_or, true) diff --git a/DevTools/UserspaceEmulator/SoftCPU.h b/DevTools/UserspaceEmulator/SoftCPU.h index 7d0e3d8e69..cd0c93e2c2 100644 --- a/DevTools/UserspaceEmulator/SoftCPU.h +++ b/DevTools/UserspaceEmulator/SoftCPU.h @@ -26,6 +26,7 @@ #pragma once +#include "ValueWithShadow.h" #include #include @@ -77,71 +78,96 @@ public: }; }; - void push32(u32); - u32 pop32(); + void push32(ValueWithShadow); + ValueWithShadow pop32(); - void push16(u16); - u16 pop16(); + void push16(ValueWithShadow); + ValueWithShadow pop16(); void push_string(const StringView&); u16 segment(X86::SegmentRegister seg) const { return m_segment[(int)seg]; } u16& segment(X86::SegmentRegister seg) { return m_segment[(int)seg]; } - u8& gpr8(X86::RegisterIndex8 reg) + ValueAndShadowReference gpr8(X86::RegisterIndex8 reg) { switch (reg) { case X86::RegisterAL: - return m_gpr[X86::RegisterEAX].low_u8; + return { m_gpr[X86::RegisterEAX].low_u8, m_gpr_shadow[X86::RegisterEAX].low_u8 }; case X86::RegisterAH: - return m_gpr[X86::RegisterEAX].high_u8; + return { m_gpr[X86::RegisterEAX].high_u8, m_gpr_shadow[X86::RegisterEAX].high_u8 }; case X86::RegisterBL: - return m_gpr[X86::RegisterEBX].low_u8; + return { m_gpr[X86::RegisterEBX].low_u8, m_gpr_shadow[X86::RegisterEBX].low_u8 }; case X86::RegisterBH: - return m_gpr[X86::RegisterEBX].high_u8; + return { m_gpr[X86::RegisterEBX].high_u8, m_gpr_shadow[X86::RegisterEBX].high_u8 }; case X86::RegisterCL: - return m_gpr[X86::RegisterECX].low_u8; + return { m_gpr[X86::RegisterECX].low_u8, m_gpr_shadow[X86::RegisterECX].low_u8 }; case X86::RegisterCH: - return m_gpr[X86::RegisterECX].high_u8; + return { m_gpr[X86::RegisterECX].high_u8, m_gpr_shadow[X86::RegisterECX].high_u8 }; case X86::RegisterDL: - return m_gpr[X86::RegisterEDX].low_u8; + return { m_gpr[X86::RegisterEDX].low_u8, m_gpr_shadow[X86::RegisterEDX].low_u8 }; case X86::RegisterDH: - return m_gpr[X86::RegisterEDX].high_u8; + return { m_gpr[X86::RegisterEDX].high_u8, m_gpr_shadow[X86::RegisterEDX].high_u8 }; } ASSERT_NOT_REACHED(); } - u8 gpr8(X86::RegisterIndex8 reg) const + ValueWithShadow const_gpr8(X86::RegisterIndex8 reg) const { switch (reg) { case X86::RegisterAL: - return m_gpr[X86::RegisterEAX].low_u8; + return { m_gpr[X86::RegisterEAX].low_u8, m_gpr_shadow[X86::RegisterEAX].low_u8 }; case X86::RegisterAH: - return m_gpr[X86::RegisterEAX].high_u8; + return { m_gpr[X86::RegisterEAX].high_u8, m_gpr_shadow[X86::RegisterEAX].high_u8 }; case X86::RegisterBL: - return m_gpr[X86::RegisterEBX].low_u8; + return { m_gpr[X86::RegisterEBX].low_u8, m_gpr_shadow[X86::RegisterEBX].low_u8 }; case X86::RegisterBH: - return m_gpr[X86::RegisterEBX].high_u8; + return { m_gpr[X86::RegisterEBX].high_u8, m_gpr_shadow[X86::RegisterEBX].high_u8 }; case X86::RegisterCL: - return m_gpr[X86::RegisterECX].low_u8; + return { m_gpr[X86::RegisterECX].low_u8, m_gpr_shadow[X86::RegisterECX].low_u8 }; case X86::RegisterCH: - return m_gpr[X86::RegisterECX].high_u8; + return { m_gpr[X86::RegisterECX].high_u8, m_gpr_shadow[X86::RegisterECX].high_u8 }; case X86::RegisterDL: - return m_gpr[X86::RegisterEDX].low_u8; + return { m_gpr[X86::RegisterEDX].low_u8, m_gpr_shadow[X86::RegisterEDX].low_u8 }; case X86::RegisterDH: - return m_gpr[X86::RegisterEDX].high_u8; + return { m_gpr[X86::RegisterEDX].high_u8, m_gpr_shadow[X86::RegisterEDX].high_u8 }; } ASSERT_NOT_REACHED(); } - u16 gpr16(X86::RegisterIndex16 reg) const { return m_gpr[reg].low_u16; } - u16& gpr16(X86::RegisterIndex16 reg) { return m_gpr[reg].low_u16; } + ValueWithShadow const_gpr16(X86::RegisterIndex16 reg) const + { + return { m_gpr[reg].low_u16, m_gpr_shadow[reg].low_u16 }; + } - u32 gpr32(X86::RegisterIndex32 reg) const { return m_gpr[reg].full_u32; } - u32& gpr32(X86::RegisterIndex32 reg) { return m_gpr[reg].full_u32; } + ValueAndShadowReference gpr16(X86::RegisterIndex16 reg) + { + return { m_gpr[reg].low_u16, m_gpr_shadow[reg].low_u16 }; + } + + ValueWithShadow const_gpr32(X86::RegisterIndex32 reg) const + { + return { m_gpr[reg].full_u32, m_gpr_shadow[reg].full_u32 }; + } + + ValueAndShadowReference gpr32(X86::RegisterIndex32 reg) + { + return { m_gpr[reg].full_u32, m_gpr_shadow[reg].full_u32 }; + } template - T gpr(unsigned register_index) const + ValueWithShadow const_gpr(unsigned register_index) const + { + if constexpr (sizeof(T) == 1) + return const_gpr8((X86::RegisterIndex8)register_index); + if constexpr (sizeof(T) == 2) + return const_gpr16((X86::RegisterIndex16)register_index); + if constexpr (sizeof(T) == 4) + return const_gpr32((X86::RegisterIndex32)register_index); + } + + template + ValueAndShadowReference gpr(unsigned register_index) { if constexpr (sizeof(T) == 1) return gpr8((X86::RegisterIndex8)register_index); @@ -151,60 +177,49 @@ public: return gpr32((X86::RegisterIndex32)register_index); } - template - T& gpr(unsigned register_index) - { - if constexpr (sizeof(T) == 1) - return gpr8((X86::RegisterIndex8)register_index); - if constexpr (sizeof(T) == 2) - return gpr16((X86::RegisterIndex16)register_index); - if constexpr (sizeof(T) == 4) - return gpr32((X86::RegisterIndex32)register_index); - } - - u32 source_index(bool a32) const + ValueWithShadow source_index(bool a32) const { if (a32) return esi(); - return si(); + return { si().value(), (u32)si().shadow() & 0xffff }; } - u32 destination_index(bool a32) const + ValueWithShadow destination_index(bool a32) const { if (a32) return edi(); - return di(); + return { di().value(), (u32)di().shadow() & 0xffff }; } - u32 loop_index(bool a32) const + ValueWithShadow loop_index(bool a32) const { if (a32) return ecx(); - return cx(); + return { cx().value(), (u32)cx().shadow() & 0xffff }; } bool decrement_loop_index(bool a32) { if (a32) { - set_ecx(ecx() - 1); - return ecx() == 0; + set_ecx({ ecx().value() - 1, ecx().shadow() }); + return ecx().value() == 0; } - set_cx(cx() - 1); - return cx() == 0; + set_cx(ValueWithShadow(cx().value() - 1, cx().shadow())); + return cx().value() == 0; } ALWAYS_INLINE void step_source_index(bool a32, u32 step) { if (a32) { if (df()) - set_esi(esi() - step); + set_esi({ esi().value() - step, esi().shadow() }); else - set_esi(esi() + step); + set_esi({ esi().value() + step, esi().shadow() }); } else { if (df()) - set_si(si() - step); + set_si(ValueWithShadow(si().value() - step, si().shadow())); else - set_si(si() + step); + set_si(ValueWithShadow(si().value() + step, si().shadow())); } } @@ -212,71 +227,70 @@ public: { if (a32) { if (df()) - set_edi(edi() - step); + set_edi({ edi().value() - step, edi().shadow() }); else - set_edi(edi() + step); + set_edi({ edi().value() + step, edi().shadow() }); } else { if (df()) - set_di(di() - step); + set_di(ValueWithShadow(di().value() - step, di().shadow())); else - set_di(di() + step); + set_di(ValueWithShadow(di().value() + step, di().shadow())); } } + ValueWithShadow eax() const { return const_gpr32(X86::RegisterEAX); } + ValueWithShadow ebx() const { return const_gpr32(X86::RegisterEBX); } + ValueWithShadow ecx() const { return const_gpr32(X86::RegisterECX); } + ValueWithShadow edx() const { return const_gpr32(X86::RegisterEDX); } + ValueWithShadow esp() const { return const_gpr32(X86::RegisterESP); } + ValueWithShadow ebp() const { return const_gpr32(X86::RegisterEBP); } + ValueWithShadow esi() const { return const_gpr32(X86::RegisterESI); } + ValueWithShadow edi() const { return const_gpr32(X86::RegisterEDI); } - u32 eax() const { return gpr32(X86::RegisterEAX); } - u32 ebx() const { return gpr32(X86::RegisterEBX); } - u32 ecx() const { return gpr32(X86::RegisterECX); } - u32 edx() const { return gpr32(X86::RegisterEDX); } - u32 esp() const { return gpr32(X86::RegisterESP); } - u32 ebp() const { return gpr32(X86::RegisterEBP); } - u32 esi() const { return gpr32(X86::RegisterESI); } - u32 edi() const { return gpr32(X86::RegisterEDI); } + ValueWithShadow ax() const { return const_gpr16(X86::RegisterAX); } + ValueWithShadow bx() const { return const_gpr16(X86::RegisterBX); } + ValueWithShadow cx() const { return const_gpr16(X86::RegisterCX); } + ValueWithShadow dx() const { return const_gpr16(X86::RegisterDX); } + ValueWithShadow sp() const { return const_gpr16(X86::RegisterSP); } + ValueWithShadow bp() const { return const_gpr16(X86::RegisterBP); } + ValueWithShadow si() const { return const_gpr16(X86::RegisterSI); } + ValueWithShadow di() const { return const_gpr16(X86::RegisterDI); } - u16 ax() const { return gpr16(X86::RegisterAX); } - u16 bx() const { return gpr16(X86::RegisterBX); } - u16 cx() const { return gpr16(X86::RegisterCX); } - u16 dx() const { return gpr16(X86::RegisterDX); } - u16 sp() const { return gpr16(X86::RegisterSP); } - u16 bp() const { return gpr16(X86::RegisterBP); } - u16 si() const { return gpr16(X86::RegisterSI); } - u16 di() const { return gpr16(X86::RegisterDI); } + ValueWithShadow al() const { return const_gpr8(X86::RegisterAL); } + ValueWithShadow ah() const { return const_gpr8(X86::RegisterAH); } + ValueWithShadow bl() const { return const_gpr8(X86::RegisterBL); } + ValueWithShadow bh() const { return const_gpr8(X86::RegisterBH); } + ValueWithShadow cl() const { return const_gpr8(X86::RegisterCL); } + ValueWithShadow ch() const { return const_gpr8(X86::RegisterCH); } + ValueWithShadow dl() const { return const_gpr8(X86::RegisterDL); } + ValueWithShadow dh() const { return const_gpr8(X86::RegisterDH); } - u8 al() const { return gpr8(X86::RegisterAL); } - u8 ah() const { return gpr8(X86::RegisterAH); } - u8 bl() const { return gpr8(X86::RegisterBL); } - u8 bh() const { return gpr8(X86::RegisterBH); } - u8 cl() const { return gpr8(X86::RegisterCL); } - u8 ch() const { return gpr8(X86::RegisterCH); } - u8 dl() const { return gpr8(X86::RegisterDL); } - u8 dh() const { return gpr8(X86::RegisterDH); } + void set_eax(ValueWithShadow value) { gpr32(X86::RegisterEAX) = value; } + void set_ebx(ValueWithShadow value) { gpr32(X86::RegisterEBX) = value; } + void set_ecx(ValueWithShadow value) { gpr32(X86::RegisterECX) = value; } + void set_edx(ValueWithShadow value) { gpr32(X86::RegisterEDX) = value; } + void set_esp(ValueWithShadow value) { gpr32(X86::RegisterESP) = value; } + void set_ebp(ValueWithShadow value) { gpr32(X86::RegisterEBP) = value; } + void set_esi(ValueWithShadow value) { gpr32(X86::RegisterESI) = value; } + void set_edi(ValueWithShadow value) { gpr32(X86::RegisterEDI) = value; } - void set_eax(u32 value) { gpr32(X86::RegisterEAX) = value; } - void set_ebx(u32 value) { gpr32(X86::RegisterEBX) = value; } - void set_ecx(u32 value) { gpr32(X86::RegisterECX) = value; } - void set_edx(u32 value) { gpr32(X86::RegisterEDX) = value; } - void set_esp(u32 value) { gpr32(X86::RegisterESP) = value; } - void set_ebp(u32 value) { gpr32(X86::RegisterEBP) = value; } - void set_esi(u32 value) { gpr32(X86::RegisterESI) = value; } - void set_edi(u32 value) { gpr32(X86::RegisterEDI) = value; } + void set_ax(ValueWithShadow value) { gpr16(X86::RegisterAX) = value; } + void set_bx(ValueWithShadow value) { gpr16(X86::RegisterBX) = value; } + void set_cx(ValueWithShadow value) { gpr16(X86::RegisterCX) = value; } + void set_dx(ValueWithShadow value) { gpr16(X86::RegisterDX) = value; } + void set_sp(ValueWithShadow value) { gpr16(X86::RegisterSP) = value; } + void set_bp(ValueWithShadow value) { gpr16(X86::RegisterBP) = value; } + void set_si(ValueWithShadow value) { gpr16(X86::RegisterSI) = value; } + void set_di(ValueWithShadow value) { gpr16(X86::RegisterDI) = value; } - void set_ax(u16 value) { gpr16(X86::RegisterAX) = value; } - void set_bx(u16 value) { gpr16(X86::RegisterBX) = value; } - void set_cx(u16 value) { gpr16(X86::RegisterCX) = value; } - void set_dx(u16 value) { gpr16(X86::RegisterDX) = value; } - void set_sp(u16 value) { gpr16(X86::RegisterSP) = value; } - void set_bp(u16 value) { gpr16(X86::RegisterBP) = value; } - void set_si(u16 value) { gpr16(X86::RegisterSI) = value; } - void set_di(u16 value) { gpr16(X86::RegisterDI) = value; } - - void set_al(u8 value) { gpr8(X86::RegisterAL) = value; } - void set_ah(u8 value) { gpr8(X86::RegisterAH) = value; } - void set_bl(u8 value) { gpr8(X86::RegisterBL) = value; } - void set_bh(u8 value) { gpr8(X86::RegisterBH) = value; } - void set_cl(u8 value) { gpr8(X86::RegisterCL) = value; } - void set_ch(u8 value) { gpr8(X86::RegisterCH) = value; } - void set_dl(u8 value) { gpr8(X86::RegisterDL) = value; } - void set_dh(u8 value) { gpr8(X86::RegisterDH) = value; } + void set_al(ValueWithShadow value) { gpr8(X86::RegisterAL) = value; } + void set_ah(ValueWithShadow value) { gpr8(X86::RegisterAH) = value; } + void set_bl(ValueWithShadow value) { gpr8(X86::RegisterBL) = value; } + void set_bh(ValueWithShadow value) { gpr8(X86::RegisterBH) = value; } + void set_cl(ValueWithShadow value) { gpr8(X86::RegisterCL) = value; } + void set_ch(ValueWithShadow value) { gpr8(X86::RegisterCH) = value; } + void set_dl(ValueWithShadow value) { gpr8(X86::RegisterDL) = value; } + void set_dh(ValueWithShadow value) { gpr8(X86::RegisterDH) = value; } bool of() const { return m_eflags & Flags::OF; } bool sf() const { return m_eflags & Flags::SF; } @@ -333,12 +347,12 @@ public: u16 es() const { return m_segment[(int)X86::SegmentRegister::ES]; } u16 ss() const { return m_segment[(int)X86::SegmentRegister::SS]; } - u8 read_memory8(X86::LogicalAddress); - u16 read_memory16(X86::LogicalAddress); - u32 read_memory32(X86::LogicalAddress); + ValueWithShadow read_memory8(X86::LogicalAddress); + ValueWithShadow read_memory16(X86::LogicalAddress); + ValueWithShadow read_memory32(X86::LogicalAddress); template - T read_memory(X86::LogicalAddress address) + ValueWithShadow read_memory(X86::LogicalAddress address) { if constexpr (sizeof(T) == 1) return read_memory8(address); @@ -348,12 +362,12 @@ public: return read_memory32(address); } - void write_memory8(X86::LogicalAddress, u8); - void write_memory16(X86::LogicalAddress, u16); - void write_memory32(X86::LogicalAddress, u32); + void write_memory8(X86::LogicalAddress, ValueWithShadow); + void write_memory16(X86::LogicalAddress, ValueWithShadow); + void write_memory32(X86::LogicalAddress, ValueWithShadow); template - void write_memory(X86::LogicalAddress address, T data) + void write_memory(X86::LogicalAddress address, ValueWithShadow data) { if constexpr (sizeof(T) == 1) return write_memory8(address, data); @@ -896,12 +910,16 @@ private: template void generic_RM16_imm8(Op, const X86::Instruction&); template + void generic_RM16_unsigned_imm8(Op, const X86::Instruction&); + template void generic_RM16_reg16(Op, const X86::Instruction&); template void generic_RM32_imm32(Op, const X86::Instruction&); template void generic_RM32_imm8(Op, const X86::Instruction&); template + void generic_RM32_unsigned_imm8(Op, const X86::Instruction&); + template void generic_RM32_reg32(Op, const X86::Instruction&); template void generic_RM8_imm8(Op, const X86::Instruction&); @@ -935,6 +953,8 @@ private: Emulator& m_emulator; PartAddressableRegister m_gpr[8]; + PartAddressableRegister m_gpr_shadow[8]; + u16 m_segment[8] { 0 }; u32 m_eflags { 0 }; diff --git a/DevTools/UserspaceEmulator/SoftMMU.cpp b/DevTools/UserspaceEmulator/SoftMMU.cpp index 4015e7ee16..5c11b3ea70 100644 --- a/DevTools/UserspaceEmulator/SoftMMU.cpp +++ b/DevTools/UserspaceEmulator/SoftMMU.cpp @@ -64,7 +64,7 @@ void SoftMMU::set_tls_region(NonnullOwnPtr region) m_tls_region = move(region); } -u8 SoftMMU::read8(X86::LogicalAddress address) +ValueWithShadow SoftMMU::read8(X86::LogicalAddress address) { auto* region = find_region(address); if (!region) { @@ -75,7 +75,7 @@ u8 SoftMMU::read8(X86::LogicalAddress address) return region->read8(address.offset() - region->base()); } -u16 SoftMMU::read16(X86::LogicalAddress address) +ValueWithShadow SoftMMU::read16(X86::LogicalAddress address) { auto* region = find_region(address); if (!region) { @@ -86,7 +86,7 @@ u16 SoftMMU::read16(X86::LogicalAddress address) return region->read16(address.offset() - region->base()); } -u32 SoftMMU::read32(X86::LogicalAddress address) +ValueWithShadow SoftMMU::read32(X86::LogicalAddress address) { auto* region = find_region(address); if (!region) { @@ -97,7 +97,7 @@ u32 SoftMMU::read32(X86::LogicalAddress address) return region->read32(address.offset() - region->base()); } -void SoftMMU::write8(X86::LogicalAddress address, u8 value) +void SoftMMU::write8(X86::LogicalAddress address, ValueWithShadow value) { auto* region = find_region(address); if (!region) { @@ -108,7 +108,7 @@ void SoftMMU::write8(X86::LogicalAddress address, u8 value) region->write8(address.offset() - region->base(), value); } -void SoftMMU::write16(X86::LogicalAddress address, u16 value) +void SoftMMU::write16(X86::LogicalAddress address, ValueWithShadow value) { auto* region = find_region(address); if (!region) { @@ -119,7 +119,7 @@ void SoftMMU::write16(X86::LogicalAddress address, u16 value) region->write16(address.offset() - region->base(), value); } -void SoftMMU::write32(X86::LogicalAddress address, u32 value) +void SoftMMU::write32(X86::LogicalAddress address, ValueWithShadow value) { auto* region = find_region(address); if (!region) { @@ -132,14 +132,16 @@ void SoftMMU::write32(X86::LogicalAddress address, u32 value) void SoftMMU::copy_to_vm(FlatPtr destination, const void* source, size_t size) { + // FIXME: We should have a way to preserve the shadow data here as well. for (size_t i = 0; i < size; ++i) - write8({ 0x20, destination + i }, ((const u8*)source)[i]); + write8({ 0x20, destination + i }, shadow_wrap_as_initialized(((const u8*)source)[i])); } void SoftMMU::copy_from_vm(void* destination, const FlatPtr source, size_t size) { + // FIXME: We should have a way to preserve the shadow data here as well. for (size_t i = 0; i < size; ++i) - ((u8*)destination)[i] = read8({ 0x20, source + i }); + ((u8*)destination)[i] = read8({ 0x20, source + i }).value(); } ByteBuffer SoftMMU::copy_buffer_from_vm(const FlatPtr source, size_t size) diff --git a/DevTools/UserspaceEmulator/SoftMMU.h b/DevTools/UserspaceEmulator/SoftMMU.h index 7202b73ab1..854ab97de7 100644 --- a/DevTools/UserspaceEmulator/SoftMMU.h +++ b/DevTools/UserspaceEmulator/SoftMMU.h @@ -26,6 +26,7 @@ #pragma once +#include "ValueWithShadow.h" #include #include #include @@ -48,13 +49,13 @@ public: bool contains(u32 address) const { return address >= base() && address < end(); } - virtual void write8(u32 offset, u8 value) = 0; - virtual void write16(u32 offset, u16 value) = 0; - virtual void write32(u32 offset, u32 value) = 0; + virtual void write8(u32 offset, ValueWithShadow) = 0; + virtual void write16(u32 offset, ValueWithShadow) = 0; + virtual void write32(u32 offset, ValueWithShadow) = 0; - virtual u8 read8(u32 offset) = 0; - virtual u16 read16(u32 offset) = 0; - virtual u32 read32(u32 offset) = 0; + virtual ValueWithShadow read8(u32 offset) = 0; + virtual ValueWithShadow read16(u32 offset) = 0; + virtual ValueWithShadow read32(u32 offset) = 0; virtual u8* cacheable_ptr([[maybe_unused]] u32 offset) { return nullptr; } virtual bool is_shared_buffer() const { return false; } @@ -81,13 +82,13 @@ public: bool m_text { false }; }; - u8 read8(X86::LogicalAddress); - u16 read16(X86::LogicalAddress); - u32 read32(X86::LogicalAddress); + ValueWithShadow read8(X86::LogicalAddress); + ValueWithShadow read16(X86::LogicalAddress); + ValueWithShadow read32(X86::LogicalAddress); - void write8(X86::LogicalAddress, u8); - void write16(X86::LogicalAddress, u16); - void write32(X86::LogicalAddress, u32); + void write8(X86::LogicalAddress, ValueWithShadow); + void write16(X86::LogicalAddress, ValueWithShadow); + void write32(X86::LogicalAddress, ValueWithShadow); Region* find_region(X86::LogicalAddress); diff --git a/DevTools/UserspaceEmulator/ValueWithShadow.h b/DevTools/UserspaceEmulator/ValueWithShadow.h new file mode 100644 index 0000000000..a30aa62bb1 --- /dev/null +++ b/DevTools/UserspaceEmulator/ValueWithShadow.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2020, Andreas Kling + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#pragma once + +namespace UserspaceEmulator { + +template +class ValueAndShadowReference; + +template +class ValueWithShadow { +public: + using ValueType = T; + + ValueWithShadow(T value, T shadow) + : m_value(value) + , m_shadow(shadow) + { + } + + ValueWithShadow(const ValueAndShadowReference&); + + T value() const { return m_value; } + T shadow() const { return m_shadow; } + + bool is_uninitialized() const + { + if constexpr (sizeof(T) == 4) + return (m_shadow & 0x01010101) != 0x01010101; + if constexpr (sizeof(T) == 2) + return (m_shadow & 0x0101) != 0x0101; + if constexpr (sizeof(T) == 1) + return (m_shadow & 0x01) != 0x01; + } + +private: + T m_value; + T m_shadow; +}; + +template +class ValueAndShadowReference { +public: + using ValueType = T; + + ValueAndShadowReference(T& value, T& shadow) + : m_value(value) + , m_shadow(shadow) + { + } + + bool is_uninitialized() const + { + if constexpr (sizeof(T) == 4) + return (m_shadow & 0x01010101) != 0x01010101; + if constexpr (sizeof(T) == 2) + return (m_shadow & 0x0101) != 0x0101; + if constexpr (sizeof(T) == 1) + return (m_shadow & 0x01) != 0x01; + } + + void operator=(const ValueWithShadow&); + + T& value() { return m_value; } + T& shadow() { return m_shadow; } + + const T& value() const { return m_value; } + const T& shadow() const { return m_shadow; } + +private: + T& m_value; + T& m_shadow; +}; + +template +ALWAYS_INLINE ValueWithShadow shadow_wrap_as_initialized(T value) +{ + if constexpr (sizeof(T) == 4) + return { value, 0x01010101 }; + if constexpr (sizeof(T) == 2) + return { value, 0x0101 }; + if constexpr (sizeof(T) == 1) + return { value, 0x01 }; +} + +template +ALWAYS_INLINE ValueWithShadow shadow_wrap_with_taint_from(T value, const U& taint_a) +{ + if (taint_a.is_uninitialized()) + return { value, 0 }; + return shadow_wrap_as_initialized(value); +} + +template +ALWAYS_INLINE ValueWithShadow shadow_wrap_with_taint_from(T value, const U& taint_a, const V& taint_b) +{ + if (taint_a.is_uninitialized() || taint_b.is_uninitialized()) + return { value, 0 }; + return shadow_wrap_as_initialized(value); +} + +template +ALWAYS_INLINE ValueWithShadow shadow_wrap_with_taint_from(T value, const U& taint_a, const V& taint_b, const X& taint_c) +{ + if (taint_a.is_uninitialized() || taint_b.is_uninitialized() || taint_c.is_uninitialized()) + return { value, 0 }; + return shadow_wrap_as_initialized(value); +} + +template +inline ValueWithShadow::ValueWithShadow(const ValueAndShadowReference& other) + : m_value(other.value()) + , m_shadow(other.shadow()) +{ +} + +template +inline void ValueAndShadowReference::operator=(const ValueWithShadow& other) +{ + m_value = other.value(); + m_shadow = other.shadow(); +} + +} diff --git a/Libraries/LibX86/Instruction.h b/Libraries/LibX86/Instruction.h index 50f88822b8..6d3fd98093 100644 --- a/Libraries/LibX86/Instruction.h +++ b/Libraries/LibX86/Instruction.h @@ -348,19 +348,19 @@ public: RegisterIndex16 reg16() const { return static_cast(register_index()); } RegisterIndex8 reg8() const { return static_cast(register_index()); } - template - void write8(CPU&, const Instruction&, u8); - template - void write16(CPU&, const Instruction&, u16); - template - void write32(CPU&, const Instruction&, u32); + template + void write8(CPU&, const Instruction&, T); + template + void write16(CPU&, const Instruction&, T); + template + void write32(CPU&, const Instruction&, T); - template - u8 read8(CPU&, const Instruction&); - template - u16 read16(CPU&, const Instruction&); - template - u32 read32(CPU&, const Instruction&); + template + T read8(CPU&, const Instruction&); + template + T read16(CPU&, const Instruction&); + template + T read32(CPU&, const Instruction&); template LogicalAddress resolve(const CPU&, const Instruction&); @@ -519,35 +519,35 @@ ALWAYS_INLINE LogicalAddress MemoryOrRegisterReference::resolve16(const CPU& cpu switch (m_rm & 7) { case 0: - offset = cpu.bx() + cpu.si() + m_displacement16; + offset = cpu.bx().value() + cpu.si().value() + m_displacement16; break; case 1: - offset = cpu.bx() + cpu.di() + m_displacement16; + offset = cpu.bx().value() + cpu.di().value() + m_displacement16; break; case 2: default_segment = SegmentRegister::SS; - offset = cpu.bp() + cpu.si() + m_displacement16; + offset = cpu.bp().value() + cpu.si().value() + m_displacement16; break; case 3: default_segment = SegmentRegister::SS; - offset = cpu.bp() + cpu.di() + m_displacement16; + offset = cpu.bp().value() + cpu.di().value() + m_displacement16; break; case 4: - offset = cpu.si() + m_displacement16; + offset = cpu.si().value() + m_displacement16; break; case 5: - offset = cpu.di() + m_displacement16; + offset = cpu.di().value() + m_displacement16; break; case 6: if ((m_rm & 0xc0) == 0) offset = m_displacement16; else { default_segment = SegmentRegister::SS; - offset = cpu.bp() + m_displacement16; + offset = cpu.bp().value() + m_displacement16; } break; default: - offset = cpu.bx() + m_displacement16; + offset = cpu.bx().value() + m_displacement16; break; } @@ -563,25 +563,25 @@ ALWAYS_INLINE LogicalAddress MemoryOrRegisterReference::resolve32(const CPU& cpu switch (m_rm & 0x07) { case 0: - offset = cpu.eax() + m_displacement32; + offset = cpu.eax().value() + m_displacement32; break; case 1: - offset = cpu.ecx() + m_displacement32; + offset = cpu.ecx().value() + m_displacement32; break; case 2: - offset = cpu.edx() + m_displacement32; + offset = cpu.edx().value() + m_displacement32; break; case 3: - offset = cpu.ebx() + m_displacement32; + offset = cpu.ebx().value() + m_displacement32; break; case 4: offset = evaluate_sib(cpu, default_segment); break; case 6: - offset = cpu.esi() + m_displacement32; + offset = cpu.esi().value() + m_displacement32; break; case 7: - offset = cpu.edi() + m_displacement32; + offset = cpu.edi().value() + m_displacement32; break; default: // 5 if ((m_rm & 0xc0) == 0x00) { @@ -589,7 +589,7 @@ ALWAYS_INLINE LogicalAddress MemoryOrRegisterReference::resolve32(const CPU& cpu break; } else { default_segment = SegmentRegister::SS; - offset = cpu.ebp() + m_displacement32; + offset = cpu.ebp().value() + m_displacement32; break; } break; @@ -619,54 +619,54 @@ ALWAYS_INLINE u32 MemoryOrRegisterReference::evaluate_sib(const CPU& cpu, Segmen u32 index = 0; switch ((m_sib >> 3) & 0x07) { case 0: - index = cpu.eax(); + index = cpu.eax().value(); break; case 1: - index = cpu.ecx(); + index = cpu.ecx().value(); break; case 2: - index = cpu.edx(); + index = cpu.edx().value(); break; case 3: - index = cpu.ebx(); + index = cpu.ebx().value(); break; case 4: index = 0; break; case 5: - index = cpu.ebp(); + index = cpu.ebp().value(); break; case 6: - index = cpu.esi(); + index = cpu.esi().value(); break; case 7: - index = cpu.edi(); + index = cpu.edi().value(); break; } u32 base = m_displacement32; switch (m_sib & 0x07) { case 0: - base += cpu.eax(); + base += cpu.eax().value(); break; case 1: - base += cpu.ecx(); + base += cpu.ecx().value(); break; case 2: - base += cpu.edx(); + base += cpu.edx().value(); break; case 3: - base += cpu.ebx(); + base += cpu.ebx().value(); break; case 4: default_segment = SegmentRegister::SS; - base += cpu.esp(); + base += cpu.esp().value(); break; case 6: - base += cpu.esi(); + base += cpu.esi().value(); break; case 7: - base += cpu.edi(); + base += cpu.edi().value(); break; default: // 5 switch ((m_rm >> 6) & 3) { @@ -675,7 +675,7 @@ ALWAYS_INLINE u32 MemoryOrRegisterReference::evaluate_sib(const CPU& cpu, Segmen case 1: case 2: default_segment = SegmentRegister::SS; - base += cpu.ebp(); + base += cpu.ebp().value(); break; default: ASSERT_NOT_REACHED(); @@ -687,8 +687,8 @@ ALWAYS_INLINE u32 MemoryOrRegisterReference::evaluate_sib(const CPU& cpu, Segmen return (scale * index) + base; } -template -ALWAYS_INLINE void MemoryOrRegisterReference::write8(CPU& cpu, const Instruction& insn, u8 value) +template +ALWAYS_INLINE void MemoryOrRegisterReference::write8(CPU& cpu, const Instruction& insn, T value) { if (is_register()) { cpu.gpr8(reg8()) = value; @@ -699,8 +699,8 @@ ALWAYS_INLINE void MemoryOrRegisterReference::write8(CPU& cpu, const Instruction cpu.write_memory8(address, value); } -template -ALWAYS_INLINE void MemoryOrRegisterReference::write16(CPU& cpu, const Instruction& insn, u16 value) +template +ALWAYS_INLINE void MemoryOrRegisterReference::write16(CPU& cpu, const Instruction& insn, T value) { if (is_register()) { cpu.gpr16(reg16()) = value; @@ -711,8 +711,8 @@ ALWAYS_INLINE void MemoryOrRegisterReference::write16(CPU& cpu, const Instructio cpu.write_memory16(address, value); } -template -ALWAYS_INLINE void MemoryOrRegisterReference::write32(CPU& cpu, const Instruction& insn, u32 value) +template +ALWAYS_INLINE void MemoryOrRegisterReference::write32(CPU& cpu, const Instruction& insn, T value) { if (is_register()) { cpu.gpr32(reg32()) = value; @@ -723,31 +723,31 @@ ALWAYS_INLINE void MemoryOrRegisterReference::write32(CPU& cpu, const Instructio cpu.write_memory32(address, value); } -template -ALWAYS_INLINE u8 MemoryOrRegisterReference::read8(CPU& cpu, const Instruction& insn) +template +ALWAYS_INLINE T MemoryOrRegisterReference::read8(CPU& cpu, const Instruction& insn) { if (is_register()) - return cpu.gpr8(reg8()); + return cpu.const_gpr8(reg8()); auto address = resolve(cpu, insn); return cpu.read_memory8(address); } -template -ALWAYS_INLINE u16 MemoryOrRegisterReference::read16(CPU& cpu, const Instruction& insn) +template +ALWAYS_INLINE T MemoryOrRegisterReference::read16(CPU& cpu, const Instruction& insn) { if (is_register()) - return cpu.gpr16(reg16()); + return cpu.const_gpr16(reg16()); auto address = resolve(cpu, insn); return cpu.read_memory16(address); } -template -ALWAYS_INLINE u32 MemoryOrRegisterReference::read32(CPU& cpu, const Instruction& insn) +template +ALWAYS_INLINE T MemoryOrRegisterReference::read32(CPU& cpu, const Instruction& insn) { if (is_register()) - return cpu.gpr32(reg32()); + return cpu.const_gpr32(reg32()); auto address = resolve(cpu, insn); return cpu.read_memory32(address);