[vm, compiler] Allow targeting TSAN or MSAN by passing a flag to gen_snapshot.

Add check that the snapshot and runtime agree on whether to use MSAN. We already have this check for TSAN.

TEST=vm/dart/sanitizer_compatibility_test
Bug: https://github.com/dart-lang/sdk/issues/55637
Bug: https://github.com/dart-lang/sdk/issues/55638
Change-Id: I320e6f55cd59209ce6e58a82ac205a87c8a60a84
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/365487
Reviewed-by: Daco Harkes <dacoharkes@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Ivan Inozemtsev <iinozemtsev@google.com>
Reviewed-by: Siva Annamalai <asiva@google.com>
This commit is contained in:
Ryan Macnak 2024-05-08 17:03:59 +00:00 committed by Commit Queue
parent 49e495d48c
commit 8c1de038e0
27 changed files with 403 additions and 294 deletions

View file

@ -89,6 +89,8 @@ config("dart_precompiler_config") {
# the same mode (TSAN or non-TSAN). # the same mode (TSAN or non-TSAN).
if (is_tsan) { if (is_tsan) {
defines += [ "TARGET_USES_THREAD_SANITIZER" ] defines += [ "TARGET_USES_THREAD_SANITIZER" ]
} else if (is_msan) {
defines += [ "TARGET_USES_MEMORY_SANITIZER" ]
} }
} }

View file

@ -35,20 +35,4 @@ extern "C" void __tsan_release(void* addr);
#define DO_IF_NOT_TSAN(CODE) CODE #define DO_IF_NOT_TSAN(CODE) CODE
#endif #endif
// By default TSAN is enabled if this code is compiled under TSAN.
//
// Though in our AOT compiler we don't know whether the target AOT runtime will
// use TSAN or not, so we'll rely on the build rules telling us that
// information.
#if defined(USING_THREAD_SANITIZER) && !defined(DART_PRECOMPILER) && \
!defined(TARGET_USES_THREAD_SANITIZER)
#define TARGET_USES_THREAD_SANITIZER
#endif
#if defined(TARGET_USES_THREAD_SANITIZER)
constexpr bool kTargetUsesThreadSanitizer = true;
#else
constexpr bool kTargetUsesThreadSanitizer = false;
#endif
#endif // RUNTIME_PLATFORM_THREAD_SANITIZER_H_ #endif // RUNTIME_PLATFORM_THREAD_SANITIZER_H_

View file

@ -0,0 +1,91 @@
// Copyright (c) 2024, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// Check for a proper error when a snapshot and a runtime don't agree on which
// sanitizer they are using.
import "dart:io";
import "package:expect/expect.dart";
import "use_flag_test_helper.dart";
String find(String haystack, List<String> needles) {
for (String needle in needles) {
if (haystack.contains(needle)) {
return needle;
}
}
throw "None of ${needles.join(' ')}";
}
void checkExists(String path) {
if (!File(path).existsSync()) {
throw "$path does not exist";
}
}
main() async {
var sanitizer = find(Platform.executable, ["MSAN", "TSAN"]);
var mode = find(Platform.executable, ["Debug", "Release", "Product"]);
var arch = find(Platform.executable, ["X64", "ARM64", "RISCV64"]);
var out = find(Platform.executable, ["out", "xcodebuild"]);
var targetFlag = {
"MSAN": "--target_memory_sanitizer",
"TSAN": "--target_thread_sanitizer"
}[sanitizer]!;
var nonePlatform = "$out/$mode$arch/vm_platform_strong.dill";
var noneGenSnapshot = "$out/$mode$arch/gen_snapshot";
var noneJitRuntime = "$out/$mode$arch/dart";
var noneAotRuntime = "$out/$mode$arch/dart_precompiled_runtime";
var sanitizerGenSnapshot = "$out/$mode$sanitizer$arch/gen_snapshot";
var sanitizerAotRuntime =
"$out/$mode$sanitizer$arch/dart_precompiled_runtime";
checkExists(noneGenSnapshot);
checkExists(noneJitRuntime);
checkExists(noneAotRuntime);
checkExists(sanitizerGenSnapshot);
checkExists(sanitizerAotRuntime);
await withTempDir('sanitizer-compatibility-test', (String tempDir) async {
var aotDill = "$tempDir/aot.dill";
var noneElf = "$tempDir/none.elf";
var sanitizerElf = "$tempDir/$sanitizer.elf";
var sanitizerElf2 = "$tempDir/${sanitizer}2.elf";
await run(noneJitRuntime, [
"pkg/vm/bin/gen_kernel.dart",
"--platform",
nonePlatform,
"--aot",
"-o",
aotDill,
"tests/language/unsorted/first_test.dart"
]);
await run(noneGenSnapshot,
["--snapshot-kind=app-aot-elf", "--elf=$noneElf", aotDill]);
await run(sanitizerGenSnapshot,
["--snapshot-kind=app-aot-elf", "--elf=$sanitizerElf", aotDill]);
await run(noneGenSnapshot, [
"--snapshot-kind=app-aot-elf",
"--elf=$sanitizerElf2",
targetFlag,
aotDill
]);
await run(noneAotRuntime, [noneElf]);
await run(sanitizerAotRuntime, [sanitizerElf]);
await run(sanitizerAotRuntime, [sanitizerElf2]);
var errorLines = await runError(noneAotRuntime, [sanitizerElf]);
Expect.contains("Snapshot not compatible", errorLines[0]);
errorLines = await runError(noneAotRuntime, [sanitizerElf2]);
Expect.contains("Snapshot not compatible", errorLines[0]);
errorLines = await runError(sanitizerAotRuntime, [noneElf]);
Expect.contains("Snapshot not compatible", errorLines[0]);
});
}

View file

@ -359,6 +359,9 @@ dart/finalizer/finalizer_isolate_groups_run_gc_test: SkipByDesign # uses spawnUr
dart/isolates/send_object_to_spawn_uri_isolate_test: SkipByDesign # uses spawnUri dart/isolates/send_object_to_spawn_uri_isolate_test: SkipByDesign # uses spawnUri
dart/issue32950_test: SkipByDesign # uses spawnUri. dart/issue32950_test: SkipByDesign # uses spawnUri.
[ $runtime != dart_precompiled || $sanitizer != msan && $sanitizer != tsan ]
dart/sanitizer_compatibility_test: SkipByDesign
[ $system != macos || $simulator ] [ $system != macos || $simulator ]
dart/thread_priority_macos_test: SkipByDesign dart/thread_priority_macos_test: SkipByDesign

View file

@ -286,7 +286,6 @@ void Assembler::Align(intptr_t alignment, intptr_t offset) {
ASSERT(((offset + buffer_.GetPosition()) & (alignment - 1)) == 0); ASSERT(((offset + buffer_.GetPosition()) & (alignment - 1)) == 0);
} }
#if defined(TARGET_USES_THREAD_SANITIZER)
void Assembler::TsanLoadAcquire(Register addr) { void Assembler::TsanLoadAcquire(Register addr) {
LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true); LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
MoveRegister(R0, addr); MoveRegister(R0, addr);
@ -298,7 +297,6 @@ void Assembler::TsanStoreRelease(Register addr) {
MoveRegister(R0, addr); MoveRegister(R0, addr);
rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1); rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
} }
#endif
static int CountLeadingZeros(uint64_t value, int width) { static int CountLeadingZeros(uint64_t value, int width) {
if (width == 64) return Utils::CountLeadingZeros64(value); if (width == 64) return Utils::CountLeadingZeros64(value);
@ -1542,7 +1540,7 @@ void Assembler::EnterFullSafepoint(Register state) {
ASSERT(addr != state); ASSERT(addr != state);
Label slow_path, done, retry; Label slow_path, done, retry;
if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) { if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
b(&slow_path); b(&slow_path);
} }
@ -1557,7 +1555,7 @@ void Assembler::EnterFullSafepoint(Register state) {
stxr(TMP, state, addr); stxr(TMP, state, addr);
cbz(&done, TMP); // 0 means stxr was successful. cbz(&done, TMP); // 0 means stxr was successful.
if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) { if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
b(&retry); b(&retry);
} }
@ -1601,7 +1599,7 @@ void Assembler::ExitFullSafepoint(Register state,
ASSERT(addr != state); ASSERT(addr != state);
Label slow_path, done, retry; Label slow_path, done, retry;
if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) { if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
b(&slow_path); b(&slow_path);
} }
@ -1616,7 +1614,7 @@ void Assembler::ExitFullSafepoint(Register state,
stxr(TMP, state, addr); stxr(TMP, state, addr);
cbz(&done, TMP); // 0 means stxr was successful. cbz(&done, TMP); // 0 means stxr was successful.
if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) { if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
b(&retry); b(&retry);
} }

View file

@ -522,10 +522,8 @@ class Assembler : public AssemblerBase {
StoreToOffset(src, base, offset, kEightBytes); StoreToOffset(src, base, offset, kEightBytes);
} }
#if defined(TARGET_USES_THREAD_SANITIZER)
void TsanLoadAcquire(Register addr); void TsanLoadAcquire(Register addr);
void TsanStoreRelease(Register addr); void TsanStoreRelease(Register addr);
#endif
void LoadAcquire(Register dst, void LoadAcquire(Register dst,
const Address& address, const Address& address,
@ -538,9 +536,9 @@ class Assembler : public AssemblerBase {
src = TMP2; src = TMP2;
} }
ldar(dst, src, size); ldar(dst, src, size);
#if defined(TARGET_USES_THREAD_SANITIZER) if (FLAG_target_thread_sanitizer) {
TsanLoadAcquire(src); TsanLoadAcquire(src);
#endif }
} }
#if defined(DART_COMPRESSED_POINTERS) #if defined(DART_COMPRESSED_POINTERS)
@ -561,9 +559,9 @@ class Assembler : public AssemblerBase {
dst = TMP2; dst = TMP2;
} }
stlr(src, dst, size); stlr(src, dst, size);
#if defined(TARGET_USES_THREAD_SANITIZER) if (FLAG_target_thread_sanitizer) {
TsanStoreRelease(dst); TsanStoreRelease(dst);
#endif }
} }
void CompareWithMemoryValue(Register value, void CompareWithMemoryValue(Register value,

View file

@ -7559,7 +7559,7 @@ ASSEMBLER_TEST_RUN(CompareImmediate32Negative, test) {
} }
// can't call (tsan) runtime methods // can't call (tsan) runtime methods
#if !defined(TARGET_USES_THREAD_SANITIZER) #if !defined(USING_THREAD_SANITIZER)
ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire, assembler) { ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire, assembler) {
__ SetupDartSP(); __ SetupDartSP();
@ -7636,7 +7636,7 @@ ASSEMBLER_TEST_RUN(StoreReleaseLoadAcquire1024, test) {
"mov csp, sp\n" "mov csp, sp\n"
"ret\n"); "ret\n");
} }
#endif // !defined(TARGET_USES_THREAD_SANITIZER) #endif // !defined(USING_THREAD_SANITIZER)
static void RangeCheck(Assembler* assembler, Register value, Register temp) { static void RangeCheck(Assembler* assembler, Register value, Register temp) {
const Register return_reg = CallingConventions::kReturnReg; const Register return_reg = CallingConventions::kReturnReg;

View file

@ -2229,11 +2229,11 @@ void Assembler::StoreObjectIntoObjectNoBarrier(Register object,
// We don't run TSAN on 32 bit systems. // We don't run TSAN on 32 bit systems.
// Don't call StoreRelease here because we would have to load the immediate // Don't call StoreRelease here because we would have to load the immediate
// into a temp register which causes spilling. // into a temp register which causes spilling.
#if defined(TARGET_USES_THREAD_SANITIZER) if (FLAG_target_thread_sanitizer) {
if (memory_order == kRelease) { if (memory_order == kRelease) {
UNIMPLEMENTED(); UNIMPLEMENTED();
}
} }
#endif
if (target::CanEmbedAsRawPointerInGeneratedCode(value)) { if (target::CanEmbedAsRawPointerInGeneratedCode(value)) {
Immediate imm_value(target::ToRawPointer(value)); Immediate imm_value(target::ToRawPointer(value));
movl(dest, imm_value); movl(dest, imm_value);

View file

@ -674,9 +674,9 @@ class Assembler : public AssemblerBase {
// On intel loads have load-acquire behavior (i.e. loads are not re-ordered // On intel loads have load-acquire behavior (i.e. loads are not re-ordered
// with other loads). // with other loads).
Load(dst, address, size); Load(dst, address, size);
#if defined(TARGET_USES_THREAD_SANITIZER) if (FLAG_target_thread_sanitizer) {
#error No support for TSAN on IA32. FATAL("No support for TSAN on IA32.");
#endif }
} }
void StoreRelease(Register src, void StoreRelease(Register src,
const Address& address, const Address& address,
@ -684,9 +684,9 @@ class Assembler : public AssemblerBase {
// On intel stores have store-release behavior (i.e. stores are not // On intel stores have store-release behavior (i.e. stores are not
// re-ordered with other stores). // re-ordered with other stores).
Store(src, address, size); Store(src, address, size);
#if defined(TARGET_USES_THREAD_SANITIZER) if (FLAG_target_thread_sanitizer) {
#error No support for TSAN on IA32. FATAL("No support for TSAN on IA32.");
#endif }
} }
void CompareWithMemoryValue(Register value, void CompareWithMemoryValue(Register value,

View file

@ -2567,7 +2567,6 @@ void Assembler::Jump(const Address& address) {
jr(TMP2); jr(TMP2);
} }
#if defined(TARGET_USES_THREAD_SANITIZER)
void Assembler::TsanLoadAcquire(Register addr) { void Assembler::TsanLoadAcquire(Register addr) {
LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true); LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
MoveRegister(A0, addr); MoveRegister(A0, addr);
@ -2578,7 +2577,6 @@ void Assembler::TsanStoreRelease(Register addr) {
MoveRegister(A0, addr); MoveRegister(A0, addr);
rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1); rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
} }
#endif
void Assembler::LoadAcquire(Register dst, void Assembler::LoadAcquire(Register dst,
const Address& address, const Address& address,
@ -2587,14 +2585,14 @@ void Assembler::LoadAcquire(Register dst,
Load(dst, address, size); Load(dst, address, size);
fence(HartEffects::kRead, HartEffects::kMemory); fence(HartEffects::kRead, HartEffects::kMemory);
#if defined(TARGET_USES_THREAD_SANITIZER) if (FLAG_target_thread_sanitizer) {
if (address.offset() == 0) { if (address.offset() == 0) {
TsanLoadAcquire(address.base()); TsanLoadAcquire(address.base());
} else { } else {
AddImmediate(TMP2, address.base(), address.offset()); AddImmediate(TMP2, address.base(), address.offset());
TsanLoadAcquire(TMP2); TsanLoadAcquire(TMP2);
}
} }
#endif
} }
void Assembler::StoreRelease(Register src, void Assembler::StoreRelease(Register src,
@ -3773,7 +3771,7 @@ void Assembler::EnterFullSafepoint(Register state) {
ASSERT(addr != state); ASSERT(addr != state);
Label slow_path, done, retry; Label slow_path, done, retry;
if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) { if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
j(&slow_path, Assembler::kNearJump); j(&slow_path, Assembler::kNearJump);
} }
@ -3787,7 +3785,7 @@ void Assembler::EnterFullSafepoint(Register state) {
sc(state, state, Address(addr, 0)); sc(state, state, Address(addr, 0));
beqz(state, &done, Assembler::kNearJump); // 0 means sc was successful. beqz(state, &done, Assembler::kNearJump); // 0 means sc was successful.
if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) { if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
j(&retry, Assembler::kNearJump); j(&retry, Assembler::kNearJump);
} }
@ -3809,7 +3807,7 @@ void Assembler::ExitFullSafepoint(Register state,
ASSERT(addr != state); ASSERT(addr != state);
Label slow_path, done, retry; Label slow_path, done, retry;
if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) { if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
j(&slow_path, Assembler::kNearJump); j(&slow_path, Assembler::kNearJump);
} }
@ -3823,7 +3821,7 @@ void Assembler::ExitFullSafepoint(Register state,
sc(state, state, Address(addr, 0)); sc(state, state, Address(addr, 0));
beqz(state, &done, Assembler::kNearJump); // 0 means sc was successful. beqz(state, &done, Assembler::kNearJump); // 0 means sc was successful.
if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) { if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
j(&retry, Assembler::kNearJump); j(&retry, Assembler::kNearJump);
} }

View file

@ -873,10 +873,8 @@ class Assembler : public MicroAssembler {
StoreToOffset(src, base, offset, kWordBytes); StoreToOffset(src, base, offset, kWordBytes);
} }
#if defined(TARGET_USES_THREAD_SANITIZER)
void TsanLoadAcquire(Register addr); void TsanLoadAcquire(Register addr);
void TsanStoreRelease(Register addr); void TsanStoreRelease(Register addr);
#endif
void LoadAcquire(Register dst, void LoadAcquire(Register dst,
const Address& address, const Address& address,

View file

@ -148,7 +148,7 @@ void Assembler::EnterFullSafepoint() {
// For TSAN, we always go to the runtime so TSAN is aware of the release // For TSAN, we always go to the runtime so TSAN is aware of the release
// semantics of entering the safepoint. // semantics of entering the safepoint.
Label done, slow_path; Label done, slow_path;
if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) { if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
jmp(&slow_path); jmp(&slow_path);
} }
@ -162,7 +162,7 @@ void Assembler::EnterFullSafepoint() {
popq(RAX); popq(RAX);
cmpq(TMP, Immediate(target::Thread::full_safepoint_state_unacquired())); cmpq(TMP, Immediate(target::Thread::full_safepoint_state_unacquired()));
if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) { if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
j(EQUAL, &done); j(EQUAL, &done);
} }
@ -205,7 +205,7 @@ void Assembler::ExitFullSafepoint(bool ignore_unwind_in_progress) {
// For TSAN, we always go to the runtime so TSAN is aware of the acquire // For TSAN, we always go to the runtime so TSAN is aware of the acquire
// semantics of leaving the safepoint. // semantics of leaving the safepoint.
Label done, slow_path; Label done, slow_path;
if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) { if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
jmp(&slow_path); jmp(&slow_path);
} }
@ -221,7 +221,7 @@ void Assembler::ExitFullSafepoint(bool ignore_unwind_in_progress) {
popq(RAX); popq(RAX);
cmpq(TMP, Immediate(target::Thread::full_safepoint_state_acquired())); cmpq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) { if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
j(EQUAL, &done); j(EQUAL, &done);
} }
@ -2023,7 +2023,6 @@ LeafRuntimeScope::~LeafRuntimeScope() {
__ LeaveFrame(); __ LeaveFrame();
} }
#if defined(TARGET_USES_THREAD_SANITIZER)
void Assembler::TsanLoadAcquire(Address addr) { void Assembler::TsanLoadAcquire(Address addr) {
LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true); LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
leaq(CallingConventions::kArg1Reg, addr); leaq(CallingConventions::kArg1Reg, addr);
@ -2035,7 +2034,6 @@ void Assembler::TsanStoreRelease(Address addr) {
leaq(CallingConventions::kArg1Reg, addr); leaq(CallingConventions::kArg1Reg, addr);
rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1); rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
} }
#endif
void Assembler::RestoreCodePointer() { void Assembler::RestoreCodePointer() {
movq(CODE_REG, movq(CODE_REG,

View file

@ -1102,10 +1102,8 @@ class Assembler : public AssemblerBase {
} }
} }
#if defined(TARGET_USES_THREAD_SANITIZER)
void TsanLoadAcquire(Address addr); void TsanLoadAcquire(Address addr);
void TsanStoreRelease(Address addr); void TsanStoreRelease(Address addr);
#endif
void LoadAcquire(Register dst, void LoadAcquire(Register dst,
const Address& address, const Address& address,
@ -1113,18 +1111,18 @@ class Assembler : public AssemblerBase {
// On intel loads have load-acquire behavior (i.e. loads are not re-ordered // On intel loads have load-acquire behavior (i.e. loads are not re-ordered
// with other loads). // with other loads).
Load(dst, address, size); Load(dst, address, size);
#if defined(TARGET_USES_THREAD_SANITIZER) if (FLAG_target_thread_sanitizer) {
TsanLoadAcquire(address); TsanLoadAcquire(address);
#endif }
} }
#if defined(DART_COMPRESSED_POINTERS) #if defined(DART_COMPRESSED_POINTERS)
void LoadAcquireCompressed(Register dst, const Address& address) override { void LoadAcquireCompressed(Register dst, const Address& address) override {
// On intel loads have load-acquire behavior (i.e. loads are not re-ordered // On intel loads have load-acquire behavior (i.e. loads are not re-ordered
// with other loads). // with other loads).
LoadCompressed(dst, address); LoadCompressed(dst, address);
#if defined(TARGET_USES_THREAD_SANITIZER) if (FLAG_target_thread_sanitizer) {
TsanLoadAcquire(address); TsanLoadAcquire(address);
#endif }
} }
#endif #endif
void StoreRelease(Register src, void StoreRelease(Register src,
@ -1133,9 +1131,9 @@ class Assembler : public AssemblerBase {
// On intel stores have store-release behavior (i.e. stores are not // On intel stores have store-release behavior (i.e. stores are not
// re-ordered with other stores). // re-ordered with other stores).
Store(src, address, size); Store(src, address, size);
#if defined(TARGET_USES_THREAD_SANITIZER) if (FLAG_target_thread_sanitizer) {
TsanStoreRelease(address); TsanStoreRelease(address);
#endif }
} }
void CompareWithMemoryValue(Register value, void CompareWithMemoryValue(Register value,

View file

@ -6227,11 +6227,11 @@ IMMEDIATE_TEST(AddrImmRAXByte,
__ popq(RAX)) __ popq(RAX))
ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire, assembler) { ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire, assembler) {
#if defined(TARGET_USES_THREAD_SANITIZER) if (FLAG_target_thread_sanitizer) {
// On TSAN builds StoreRelease/LoadAcquire will do a runtime // On TSAN builds StoreRelease/LoadAcquire will do a runtime
// call to tell TSAN about our action. // call to tell TSAN about our action.
__ MoveRegister(THR, CallingConventions::kArg2Reg); __ MoveRegister(THR, CallingConventions::kArg2Reg);
#endif }
__ pushq(RCX); __ pushq(RCX);
__ xorq(RCX, RCX); __ xorq(RCX, RCX);
@ -6306,11 +6306,11 @@ ASSEMBLER_TEST_RUN(StoreReleaseLoadAcquire, test) {
} }
ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire1024, assembler) { ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire1024, assembler) {
#if defined(TARGET_USES_THREAD_SANITIZER) if (FLAG_target_thread_sanitizer) {
// On TSAN builds StoreRelease/LoadAcquire will do a runtime // On TSAN builds StoreRelease/LoadAcquire will do a runtime
// call to tell TSAN about our action. // call to tell TSAN about our action.
__ MoveRegister(THR, CallingConventions::kArg2Reg); __ MoveRegister(THR, CallingConventions::kArg2Reg);
#endif }
__ pushq(RCX); __ pushq(RCX);
__ xorq(RCX, RCX); __ xorq(RCX, RCX);
@ -6327,19 +6327,19 @@ ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire1024, assembler) {
ASSEMBLER_TEST_RUN(StoreReleaseLoadAcquire1024, test) { ASSEMBLER_TEST_RUN(StoreReleaseLoadAcquire1024, test) {
const intptr_t res = test->InvokeWithCodeAndThread<intptr_t>(123); const intptr_t res = test->InvokeWithCodeAndThread<intptr_t>(123);
EXPECT_EQ(123, res); EXPECT_EQ(123, res);
#if !defined(TARGET_USES_THREAD_SANITIZER) if (!FLAG_target_thread_sanitizer) {
EXPECT_DISASSEMBLY_NOT_WINDOWS( EXPECT_DISASSEMBLY_NOT_WINDOWS(
"push rcx\n" "push rcx\n"
"xorq rcx,rcx\n" "xorq rcx,rcx\n"
"push rcx\n" "push rcx\n"
"subq rsp,0x400\n" "subq rsp,0x400\n"
"movq [rsp+0x400],rdx\n" "movq [rsp+0x400],rdx\n"
"movq rax,[rsp+0x400]\n" "movq rax,[rsp+0x400]\n"
"addq rsp,0x400\n" "addq rsp,0x400\n"
"pop rcx\n" "pop rcx\n"
"pop rcx\n" "pop rcx\n"
"ret\n"); "ret\n");
#endif }
} }
ASSEMBLER_TEST_GENERATE(MoveByteRunTest, assembler) { ASSEMBLER_TEST_GENERATE(MoveByteRunTest, assembler) {

View file

@ -7065,11 +7065,9 @@ void MemoryCopyInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// additionally verify here that there is an actual overlap. Instead, only // additionally verify here that there is an actual overlap. Instead, only
// do that when we need to calculate the end address of the regions in // do that when we need to calculate the end address of the regions in
// the loop case. // the loop case.
#if defined(USING_MEMORY_SANITIZER) const auto jump_distance = FLAG_target_memory_sanitizer
const auto jump_distance = compiler::Assembler::kFarJump; ? compiler::Assembler::kFarJump
#else : compiler::Assembler::kNearJump;
const auto jump_distance = compiler::Assembler::kNearJump;
#endif
__ BranchIf(UNSIGNED_LESS_EQUAL, &copy_forwards, jump_distance); __ BranchIf(UNSIGNED_LESS_EQUAL, &copy_forwards, jump_distance);
__ Comment("Copying backwards"); __ Comment("Copying backwards");
if (constant_length) { if (constant_length) {
@ -7169,13 +7167,15 @@ void MemoryCopyInstr::EmitUnrolledCopy(FlowGraphCompiler* compiler,
} }
} }
#if defined(USING_MEMORY_SANITIZER) && defined(TARGET_ARCH_X64) if (FLAG_target_memory_sanitizer) {
RegisterSet kVolatileRegisterSet(CallingConventions::kVolatileCpuRegisters, #if defined(TARGET_ARCH_X64)
CallingConventions::kVolatileXmmRegisters); RegisterSet kVolatileRegisterSet(CallingConventions::kVolatileCpuRegisters,
__ PushRegisters(kVolatileRegisterSet); CallingConventions::kVolatileXmmRegisters);
__ MsanUnpoison(dest_reg, num_bytes); __ PushRegisters(kVolatileRegisterSet);
__ PopRegisters(kVolatileRegisterSet); __ MsanUnpoison(dest_reg, num_bytes);
__ PopRegisters(kVolatileRegisterSet);
#endif #endif
}
} }
#endif #endif

View file

@ -1788,9 +1788,9 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Reserve space for the arguments that go on the stack (if any), then align. // Reserve space for the arguments that go on the stack (if any), then align.
__ ReserveAlignedFrameSpace(marshaller_.RequiredStackSpaceInBytes()); __ ReserveAlignedFrameSpace(marshaller_.RequiredStackSpaceInBytes());
#if defined(USING_MEMORY_SANITIZER) if (FLAG_target_memory_sanitizer) {
UNIMPLEMENTED(); UNIMPLEMENTED();
#endif }
EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, TMP); EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, TMP);

View file

@ -252,9 +252,9 @@ void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
const bool reversed = copy_forwards != nullptr; const bool reversed = copy_forwards != nullptr;
const intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) - const intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
(unboxed_inputs() ? 0 : kSmiTagShift); (unboxed_inputs() ? 0 : kSmiTagShift);
#if defined(USING_MEMORY_SANITIZER) if (FLAG_target_memory_sanitizer) {
__ PushPair(length_reg, dest_reg); __ PushPair(length_reg, dest_reg);
#endif }
if (reversed) { if (reversed) {
// Verify that the overlap actually exists by checking to see if // Verify that the overlap actually exists by checking to see if
// dest_start < src_end. // dest_start < src_end.
@ -298,18 +298,18 @@ void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
compiler::kObjectBytes); compiler::kObjectBytes);
__ b(&loop, NOT_ZERO); __ b(&loop, NOT_ZERO);
#if defined(USING_MEMORY_SANITIZER) if (FLAG_target_memory_sanitizer) {
__ PopPair(length_reg, dest_reg); __ PopPair(length_reg, dest_reg);
if (!unboxed_inputs()) { if (!unboxed_inputs()) {
__ ExtendNonNegativeSmi(length_reg); __ ExtendNonNegativeSmi(length_reg);
}
if (shift < 0) {
__ AsrImmediate(length_reg, length_reg, -shift);
} else {
__ LslImmediate(length_reg, length_reg, shift);
}
__ MsanUnpoison(dest_reg, length_reg);
} }
if (shift < 0) {
__ AsrImmediate(length_reg, length_reg, -shift);
} else {
__ LslImmediate(length_reg, length_reg, shift);
}
__ MsanUnpoison(dest_reg, length_reg);
#endif
} }
void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler, void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
@ -1508,8 +1508,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Reserve space for the arguments that go on the stack (if any), then align. // Reserve space for the arguments that go on the stack (if any), then align.
intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes(); intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes();
__ ReserveAlignedFrameSpace(stack_space); __ ReserveAlignedFrameSpace(stack_space);
#if defined(USING_MEMORY_SANITIZER) if (FLAG_target_memory_sanitizer) {
{
RegisterSet kVolatileRegisterSet(kAbiVolatileCpuRegs & ~(1 << SP), RegisterSet kVolatileRegisterSet(kAbiVolatileCpuRegs & ~(1 << SP),
kAbiVolatileFpuRegs); kAbiVolatileFpuRegs);
__ mov(temp1, SP); __ mov(temp1, SP);
@ -1519,7 +1518,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// reserved for outgoing arguments and the spills which might have // reserved for outgoing arguments and the spills which might have
// been generated by the register allocator. Some of these spill slots // been generated by the register allocator. Some of these spill slots
// can be used as handles passed down to the runtime. // can be used as handles passed down to the runtime.
__ sub(R1, is_leaf_ ? FPREG : saved_fp_or_sp, temp1); __ sub(R1, is_leaf_ ? FPREG : saved_fp_or_sp, compiler::Operand(temp1));
__ MsanUnpoison(temp1, R1); __ MsanUnpoison(temp1, R1);
// Incoming Dart arguments to this trampoline are potentially used as local // Incoming Dart arguments to this trampoline are potentially used as local
@ -1534,7 +1533,6 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
RESTORES_LR_FROM_FRAME(__ PopRegisters(kVolatileRegisterSet)); RESTORES_LR_FROM_FRAME(__ PopRegisters(kVolatileRegisterSet));
} }
#endif
EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, temp2); EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, temp2);
@ -2282,20 +2280,20 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE(); UNREACHABLE();
} }
#if defined(USING_MEMORY_SANITIZER) if (FLAG_target_memory_sanitizer) {
if (index.IsRegister()) { if (index.IsRegister()) {
__ ComputeElementAddressForRegIndex(TMP, IsUntagged(), class_id(), __ ComputeElementAddressForRegIndex(TMP, IsUntagged(), class_id(),
index_scale(), index_unboxed_, array, index_scale(), index_unboxed_, array,
index.reg()); index.reg());
} else { } else {
__ ComputeElementAddressForIntIndex(TMP, IsUntagged(), class_id(), __ ComputeElementAddressForIntIndex(TMP, IsUntagged(), class_id(),
index_scale(), array, index_scale(), array,
Smi::Cast(index.constant()).Value()); Smi::Cast(index.constant()).Value());
}
const intptr_t length_in_bytes = RepresentationUtils::ValueSize(
RepresentationUtils::RepresentationOfArrayElement(class_id()));
__ MsanUnpoison(TMP, length_in_bytes);
} }
const intptr_t length_in_bytes = RepresentationUtils::ValueSize(
RepresentationUtils::RepresentationOfArrayElement(class_id()));
__ MsanUnpoison(TMP, length_in_bytes);
#endif
} }
static void LoadValueCid(FlowGraphCompiler* compiler, static void LoadValueCid(FlowGraphCompiler* compiler,

View file

@ -1241,9 +1241,9 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Reserve space for the arguments that go on the stack (if any), then align. // Reserve space for the arguments that go on the stack (if any), then align.
__ ReserveAlignedFrameSpace(stack_required); __ ReserveAlignedFrameSpace(stack_required);
#if defined(USING_MEMORY_SANITIZER) if (FLAG_target_memory_sanitizer) {
UNIMPLEMENTED(); UNIMPLEMENTED();
#endif }
EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp, EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp,
locs()->temp(1).reg()); locs()->temp(1).reg());

View file

@ -1653,8 +1653,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Reserve space for the arguments that go on the stack (if any), then align. // Reserve space for the arguments that go on the stack (if any), then align.
intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes(); intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes();
__ ReserveAlignedFrameSpace(stack_space); __ ReserveAlignedFrameSpace(stack_space);
#if defined(USING_MEMORY_SANITIZER) if (FLAG_target_memory_sanitizer) {
{
RegisterSet kVolatileRegisterSet(kAbiVolatileCpuRegs, kAbiVolatileFpuRegs); RegisterSet kVolatileRegisterSet(kAbiVolatileCpuRegs, kAbiVolatileFpuRegs);
__ mv(temp1, SP); __ mv(temp1, SP);
__ PushRegisters(kVolatileRegisterSet); __ PushRegisters(kVolatileRegisterSet);
@ -1679,7 +1678,6 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ PopRegisters(kVolatileRegisterSet); __ PopRegisters(kVolatileRegisterSet);
} }
#endif
EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, temp2); EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, temp2);
@ -2544,9 +2542,9 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE(); UNREACHABLE();
} }
#if defined(USING_MEMORY_SANITIZER) if (FLAG_target_memory_sanitizer) {
UNIMPLEMENTED(); UNIMPLEMENTED();
#endif }
} }
static void LoadValueCid(FlowGraphCompiler* compiler, static void LoadValueCid(FlowGraphCompiler* compiler,

View file

@ -236,11 +236,9 @@ void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
const ScaleFactor scale = ToScaleFactor(mov_size, /*index_unboxed=*/true); const ScaleFactor scale = ToScaleFactor(mov_size, /*index_unboxed=*/true);
__ leaq(TMP, compiler::Address(src_reg, length_reg, scale, -mov_size)); __ leaq(TMP, compiler::Address(src_reg, length_reg, scale, -mov_size));
__ CompareRegisters(dest_reg, TMP); __ CompareRegisters(dest_reg, TMP);
#if defined(USING_MEMORY_SANITIZER) const auto jump_distance = FLAG_target_memory_sanitizer
const auto jump_distance = compiler::Assembler::kFarJump; ? compiler::Assembler::kFarJump
#else : compiler::Assembler::kNearJump;
const auto jump_distance = compiler::Assembler::kNearJump;
#endif
__ BranchIf(UNSIGNED_GREATER, copy_forwards, jump_distance); __ BranchIf(UNSIGNED_GREATER, copy_forwards, jump_distance);
// The backwards move must be performed, so move TMP -> src_reg and do the // The backwards move must be performed, so move TMP -> src_reg and do the
// same adjustment for dest_reg. // same adjustment for dest_reg.
@ -249,18 +247,18 @@ void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
compiler::Address(dest_reg, length_reg, scale, -mov_size)); compiler::Address(dest_reg, length_reg, scale, -mov_size));
__ std(); __ std();
} }
#if defined(USING_MEMORY_SANITIZER) if (FLAG_target_memory_sanitizer) {
// For reversed, do the `rep` first. It sets `dest_reg` to the start again. // For reversed, do the `rep` first. It sets `dest_reg` to the start again.
// For forward, do the unpoisining first, before `dest_reg` is modified. // For forward, do the unpoisining first, before `dest_reg` is modified.
__ movq(TMP, length_reg); __ movq(TMP, length_reg);
if (mov_size != 1) { if (mov_size != 1) {
// Unpoison takes the length in bytes. // Unpoison takes the length in bytes.
__ MulImmediate(TMP, mov_size); __ MulImmediate(TMP, mov_size);
}
if (!reversed) {
__ MsanUnpoison(dest_reg, TMP);
}
} }
if (!reversed) {
__ MsanUnpoison(dest_reg, TMP);
}
#endif
switch (mov_size) { switch (mov_size) {
case 1: case 1:
__ rep_movsb(); __ rep_movsb();
@ -281,11 +279,11 @@ void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
__ cld(); __ cld();
} }
#if defined(USING_MEMORY_SANITIZER) if (FLAG_target_memory_sanitizer) {
if (reversed) { if (reversed) {
__ MsanUnpoison(dest_reg, TMP); __ MsanUnpoison(dest_reg, TMP);
}
} }
#endif
} }
void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler, void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
@ -1430,8 +1428,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Reserve space for the arguments that go on the stack (if any), then align. // Reserve space for the arguments that go on the stack (if any), then align.
intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes(); intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes();
__ ReserveAlignedFrameSpace(stack_space); __ ReserveAlignedFrameSpace(stack_space);
#if defined(USING_MEMORY_SANITIZER) if (FLAG_target_memory_sanitizer) {
{
RegisterSet kVolatileRegisterSet(CallingConventions::kVolatileCpuRegisters, RegisterSet kVolatileRegisterSet(CallingConventions::kVolatileCpuRegisters,
CallingConventions::kVolatileXmmRegisters); CallingConventions::kVolatileXmmRegisters);
__ movq(temp, RSP); __ movq(temp, RSP);
@ -1457,7 +1454,6 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ PopRegisters(kVolatileRegisterSet); __ PopRegisters(kVolatileRegisterSet);
} }
#endif
if (is_leaf_) { if (is_leaf_) {
EmitParamMoves(compiler, FPREG, saved_fp, TMP); EmitParamMoves(compiler, FPREG, saved_fp, TMP);
@ -2259,12 +2255,12 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE(); UNREACHABLE();
} }
#if defined(USING_MEMORY_SANITIZER) if (FLAG_target_memory_sanitizer) {
__ leaq(TMP, element_address); __ leaq(TMP, element_address);
const intptr_t length_in_bytes = RepresentationUtils::ValueSize( const intptr_t length_in_bytes = RepresentationUtils::ValueSize(
RepresentationUtils::RepresentationOfArrayElement(class_id())); RepresentationUtils::RepresentationOfArrayElement(class_id()));
__ MsanUnpoison(TMP, length_in_bytes); __ MsanUnpoison(TMP, length_in_bytes);
#endif }
} }
LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone, LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,

View file

@ -72,7 +72,7 @@ void StubCodeCompiler::EnsureIsNewOrRemembered() {
// [Thread::tsan_utils_->setjmp_buffer_]). // [Thread::tsan_utils_->setjmp_buffer_]).
static void WithExceptionCatchingTrampoline(Assembler* assembler, static void WithExceptionCatchingTrampoline(Assembler* assembler,
std::function<void()> fun) { std::function<void()> fun) {
#if defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR) #if !defined(USING_SIMULATOR)
const Register kTsanUtilsReg = R3; const Register kTsanUtilsReg = R3;
// Reserve space for arguments and align frame before entering C++ world. // Reserve space for arguments and align frame before entering C++ world.
@ -87,69 +87,77 @@ static void WithExceptionCatchingTrampoline(Assembler* assembler,
// We rely on THR being preserved across the setjmp() call. // We rely on THR being preserved across the setjmp() call.
COMPILE_ASSERT(IsCalleeSavedRegister(THR)); COMPILE_ASSERT(IsCalleeSavedRegister(THR));
Label do_native_call; if (FLAG_target_memory_sanitizer) {
Label do_native_call;
// Save old jmp_buf. // Save old jmp_buf.
__ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset())); __ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
__ ldr(TMP, __ ldr(TMP,
Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset())); Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
__ Push(TMP); __ Push(TMP);
// Allocate jmp_buf struct on stack & remember pointer to it on the // Allocate jmp_buf struct on stack & remember pointer to it on the
// [Thread::tsan_utils_->setjmp_buffer] (which exceptions.cc will longjmp() // [Thread::tsan_utils_->setjmp_buffer] (which exceptions.cc will longjmp()
// to) // to)
__ AddImmediate(SP, -kJumpBufferSize); __ AddImmediate(SP, -kJumpBufferSize);
__ str(SP, Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset())); __ str(SP,
Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
// Call setjmp() with a pointer to the allocated jmp_buf struct. // Call setjmp() with a pointer to the allocated jmp_buf struct.
__ MoveRegister(R0, SP); __ MoveRegister(R0, SP);
__ PushRegisters(volatile_registers); __ PushRegisters(volatile_registers);
__ EnterCFrame(0); __ EnterCFrame(0);
__ mov(R25, CSP); __ mov(R25, CSP);
__ mov(CSP, SP); __ mov(CSP, SP);
__ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset())); __ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
__ CallCFunction( __ CallCFunction(
Address(kTsanUtilsReg, target::TsanUtils::setjmp_function_offset())); Address(kTsanUtilsReg, target::TsanUtils::setjmp_function_offset()));
__ mov(SP, CSP); __ mov(SP, CSP);
__ mov(CSP, R25); __ mov(CSP, R25);
__ LeaveCFrame(); __ LeaveCFrame();
__ PopRegisters(volatile_registers); __ PopRegisters(volatile_registers);
// We are the target of a longjmp() iff setjmp() returns non-0. // We are the target of a longjmp() iff setjmp() returns non-0.
__ cbz(&do_native_call, R0); __ cbz(&do_native_call, R0);
// We are the target of a longjmp: Cleanup the stack and tail-call the // We are the target of a longjmp: Cleanup the stack and tail-call the
// JumpToFrame stub which will take care of unwinding the stack and hand // JumpToFrame stub which will take care of unwinding the stack and hand
// execution to the catch entry. // execution to the catch entry.
__ AddImmediate(SP, kJumpBufferSize); __ AddImmediate(SP, kJumpBufferSize);
__ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset())); __ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
__ Pop(TMP); __ Pop(TMP);
__ str(TMP, __ str(TMP,
Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset())); Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
__ ldr(R0, Address(kTsanUtilsReg, target::TsanUtils::exception_pc_offset())); __ ldr(R0,
__ ldr(R1, Address(kTsanUtilsReg, target::TsanUtils::exception_sp_offset())); Address(kTsanUtilsReg, target::TsanUtils::exception_pc_offset()));
__ ldr(R2, Address(kTsanUtilsReg, target::TsanUtils::exception_fp_offset())); __ ldr(R1,
__ MoveRegister(R3, THR); Address(kTsanUtilsReg, target::TsanUtils::exception_sp_offset()));
__ Jump(Address(THR, target::Thread::jump_to_frame_entry_point_offset())); __ ldr(R2,
Address(kTsanUtilsReg, target::TsanUtils::exception_fp_offset()));
__ MoveRegister(R3, THR);
__ Jump(Address(THR, target::Thread::jump_to_frame_entry_point_offset()));
// We leave the created [jump_buf] structure on the stack as well as the // We leave the created [jump_buf] structure on the stack as well as the
// pushed old [Thread::tsan_utils_->setjmp_buffer_]. // pushed old [Thread::tsan_utils_->setjmp_buffer_].
__ Bind(&do_native_call); __ Bind(&do_native_call);
__ MoveRegister(kSavedRspReg, SP); __ MoveRegister(kSavedRspReg, SP);
#endif // defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR) }
#endif // !defined(USING_SIMULATOR)
fun(); fun();
#if defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR) #if !defined(USING_SIMULATOR)
__ MoveRegister(SP, kSavedRspReg); if (FLAG_target_memory_sanitizer) {
__ AddImmediate(SP, kJumpBufferSize); __ MoveRegister(SP, kSavedRspReg);
const Register kTsanUtilsReg2 = kSavedRspReg; __ AddImmediate(SP, kJumpBufferSize);
__ ldr(kTsanUtilsReg2, Address(THR, target::Thread::tsan_utils_offset())); const Register kTsanUtilsReg2 = kSavedRspReg;
__ Pop(TMP); __ ldr(kTsanUtilsReg2, Address(THR, target::Thread::tsan_utils_offset()));
__ str(TMP, __ Pop(TMP);
Address(kTsanUtilsReg2, target::TsanUtils::setjmp_buffer_offset())); __ str(TMP,
#endif // defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR) Address(kTsanUtilsReg2, target::TsanUtils::setjmp_buffer_offset()));
}
#endif // !defined(USING_SIMULATOR)
} }
// Input parameters: // Input parameters:

View file

@ -74,7 +74,7 @@ void StubCodeCompiler::EnsureIsNewOrRemembered() {
// [Thread::tsan_utils_->setjmp_buffer_]). // [Thread::tsan_utils_->setjmp_buffer_]).
static void WithExceptionCatchingTrampoline(Assembler* assembler, static void WithExceptionCatchingTrampoline(Assembler* assembler,
std::function<void()> fun) { std::function<void()> fun) {
#if defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR) #if !defined(USING_SIMULATOR)
const Register kTsanUtilsReg = RAX; const Register kTsanUtilsReg = RAX;
// Reserve space for arguments and align frame before entering C++ world. // Reserve space for arguments and align frame before entering C++ world.
@ -89,70 +89,74 @@ static void WithExceptionCatchingTrampoline(Assembler* assembler,
// We rely on THR being preserved across the setjmp() call. // We rely on THR being preserved across the setjmp() call.
COMPILE_ASSERT(IsCalleeSavedRegister(THR)); COMPILE_ASSERT(IsCalleeSavedRegister(THR));
Label do_native_call; if (FLAG_target_thread_sanitizer) {
Label do_native_call;
// Save old jmp_buf. // Save old jmp_buf.
__ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset())); __ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
__ pushq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset())); __ pushq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
// Allocate jmp_buf struct on stack & remember pointer to it on the // Allocate jmp_buf struct on stack & remember pointer to it on the
// [Thread::tsan_utils_->setjmp_buffer] (which exceptions.cc will longjmp() // [Thread::tsan_utils_->setjmp_buffer] (which exceptions.cc will longjmp()
// to) // to)
__ AddImmediate(RSP, Immediate(-kJumpBufferSize)); __ AddImmediate(RSP, Immediate(-kJumpBufferSize));
__ movq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()), __ movq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()),
RSP); RSP);
// Call setjmp() with a pointer to the allocated jmp_buf struct. // Call setjmp() with a pointer to the allocated jmp_buf struct.
__ MoveRegister(CallingConventions::kArg1Reg, RSP); __ MoveRegister(CallingConventions::kArg1Reg, RSP);
__ PushRegisters(volatile_registers); __ PushRegisters(volatile_registers);
if (OS::ActivationFrameAlignment() > 1) { if (OS::ActivationFrameAlignment() > 1) {
__ MoveRegister(kSavedRspReg, RSP);
__ andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
}
__ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
__ CallCFunction(
Address(kTsanUtilsReg, target::TsanUtils::setjmp_function_offset()),
/*restore_rsp=*/true);
if (OS::ActivationFrameAlignment() > 1) {
__ MoveRegister(RSP, kSavedRspReg);
}
__ PopRegisters(volatile_registers);
// We are the target of a longjmp() iff setjmp() returns non-0.
__ CompareImmediate(RAX, 0);
__ BranchIf(EQUAL, &do_native_call);
// We are the target of a longjmp: Cleanup the stack and tail-call the
// JumpToFrame stub which will take care of unwinding the stack and hand
// execution to the catch entry.
__ AddImmediate(RSP, Immediate(kJumpBufferSize));
__ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
__ popq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
__ movq(CallingConventions::kArg1Reg,
Address(kTsanUtilsReg, target::TsanUtils::exception_pc_offset()));
__ movq(CallingConventions::kArg2Reg,
Address(kTsanUtilsReg, target::TsanUtils::exception_sp_offset()));
__ movq(CallingConventions::kArg3Reg,
Address(kTsanUtilsReg, target::TsanUtils::exception_fp_offset()));
__ MoveRegister(CallingConventions::kArg4Reg, THR);
__ jmp(Address(THR, target::Thread::jump_to_frame_entry_point_offset()));
// We leave the created [jump_buf] structure on the stack as well as the
// pushed old [Thread::tsan_utils_->setjmp_buffer_].
__ Bind(&do_native_call);
__ MoveRegister(kSavedRspReg, RSP); __ MoveRegister(kSavedRspReg, RSP);
__ andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
} }
__ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset())); #endif // !defined(USING_SIMULATOR)
__ CallCFunction(
Address(kTsanUtilsReg, target::TsanUtils::setjmp_function_offset()),
/*restore_rsp=*/true);
if (OS::ActivationFrameAlignment() > 1) {
__ MoveRegister(RSP, kSavedRspReg);
}
__ PopRegisters(volatile_registers);
// We are the target of a longjmp() iff setjmp() returns non-0.
__ CompareImmediate(RAX, 0);
__ BranchIf(EQUAL, &do_native_call);
// We are the target of a longjmp: Cleanup the stack and tail-call the
// JumpToFrame stub which will take care of unwinding the stack and hand
// execution to the catch entry.
__ AddImmediate(RSP, Immediate(kJumpBufferSize));
__ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
__ popq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
__ movq(CallingConventions::kArg1Reg,
Address(kTsanUtilsReg, target::TsanUtils::exception_pc_offset()));
__ movq(CallingConventions::kArg2Reg,
Address(kTsanUtilsReg, target::TsanUtils::exception_sp_offset()));
__ movq(CallingConventions::kArg3Reg,
Address(kTsanUtilsReg, target::TsanUtils::exception_fp_offset()));
__ MoveRegister(CallingConventions::kArg4Reg, THR);
__ jmp(Address(THR, target::Thread::jump_to_frame_entry_point_offset()));
// We leave the created [jump_buf] structure on the stack as well as the
// pushed old [Thread::tsan_utils_->setjmp_buffer_].
__ Bind(&do_native_call);
__ MoveRegister(kSavedRspReg, RSP);
#endif // defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR)
fun(); fun();
#if defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR) #if !defined(USING_SIMULATOR)
__ MoveRegister(RSP, kSavedRspReg); if (FLAG_target_thread_sanitizer) {
__ AddImmediate(RSP, Immediate(kJumpBufferSize)); __ MoveRegister(RSP, kSavedRspReg);
const Register kTsanUtilsReg2 = kSavedRspReg; __ AddImmediate(RSP, Immediate(kJumpBufferSize));
__ movq(kTsanUtilsReg2, Address(THR, target::Thread::tsan_utils_offset())); const Register kTsanUtilsReg2 = kSavedRspReg;
__ popq(Address(kTsanUtilsReg2, target::TsanUtils::setjmp_buffer_offset())); __ movq(kTsanUtilsReg2, Address(THR, target::Thread::tsan_utils_offset()));
#endif // defined(TARGET_USES_THREAD_SANITIZER) && !defined(USING_SIMULATOR) __ popq(Address(kTsanUtilsReg2, target::TsanUtils::setjmp_buffer_offset()));
}
#endif // !defined(USING_SIMULATOR)
} }
// Input parameters: // Input parameters:

View file

@ -288,7 +288,7 @@ ISOLATE_UNIT_TEST_CASE(EvalExpressionExhaustCIDs) {
} }
// Too slow in debug mode. // Too slow in debug mode.
#if !defined(DEBUG) && !defined(TARGET_USES_THREAD_SANITIZER) #if !defined(DEBUG) && !defined(USING_THREAD_SANITIZER)
TEST_CASE(ManyClasses) { TEST_CASE(ManyClasses) {
// Limit is 20 bits. Check only more than 16 bits so test completes in // Limit is 20 bits. Check only more than 16 bits so test completes in
// reasonable time. // reasonable time.
@ -312,6 +312,6 @@ TEST_CASE(ManyClasses) {
EXPECT(IsolateGroup::Current()->class_table()->NumCids() >= kNumClasses); EXPECT(IsolateGroup::Current()->class_table()->NumCids() >= kNumClasses);
} }
#endif // !defined(DEBUG) && !defined(TARGET_USES_THREAD_SANITIZER) #endif // !defined(DEBUG) && !defined(USING_THREAD_SANITIZER)
} // namespace dart } // namespace dart

View file

@ -1028,7 +1028,8 @@ char* Dart::FeaturesString(IsolateGroup* isolate_group,
if (Snapshot::IncludesCode(kind)) { if (Snapshot::IncludesCode(kind)) {
VM_GLOBAL_FLAG_LIST(ADD_P, ADD_R, ADD_C, ADD_D); VM_GLOBAL_FLAG_LIST(ADD_P, ADD_R, ADD_C, ADD_D);
ADD_FLAG(tsan, kTargetUsesThreadSanitizer) ADD_FLAG(tsan, FLAG_target_thread_sanitizer)
ADD_FLAG(msan, FLAG_target_memory_sanitizer)
if (kind == Snapshot::kFullJIT) { if (kind == Snapshot::kFullJIT) {
// Enabling assertions affects deopt ids. // Enabling assertions affects deopt ids.

View file

@ -72,6 +72,25 @@ FLAG_LIST(PRODUCT_FLAG_MACRO,
#undef PRECOMPILE_FLAG_MACRO #undef PRECOMPILE_FLAG_MACRO
#undef DEBUG_FLAG_MACRO #undef DEBUG_FLAG_MACRO
#if defined(DART_PRECOMPILER)
DEFINE_FLAG(bool,
target_thread_sanitizer,
#if defined(TARGET_USES_THREAD_SANITIZER)
true,
#else
false,
#endif
"Generate Dart code compatible with Thread Sanitizer");
DEFINE_FLAG(bool,
target_memory_sanitizer,
#if defined(TARGET_USES_MEMORY_SANITIZER)
true,
#else
false,
#endif
"Generate Dart code compatible with Memory Sanitizer");
#endif
bool Flags::initialized_ = false; bool Flags::initialized_ = false;
// List of registered flags. // List of registered flags.

View file

@ -159,6 +159,22 @@ FLAG_LIST(PRODUCT_FLAG_MACRO,
#undef PRODUCT_FLAG_MACRO #undef PRODUCT_FLAG_MACRO
#undef PRECOMPILE_FLAG_MACRO #undef PRECOMPILE_FLAG_MACRO
#if defined(DART_PRECOMPILER)
DECLARE_FLAG(bool, target_thread_sanitizer);
DECLARE_FLAG(bool, target_memory_sanitizer);
#else
#if defined(USING_THREAD_SANITIZER)
constexpr bool FLAG_target_thread_sanitizer = true;
#else
constexpr bool FLAG_target_thread_sanitizer = false;
#endif
#if defined(USING_MEMORY_SANITIZER)
constexpr bool FLAG_target_memory_sanitizer = true;
#else
constexpr bool FLAG_target_memory_sanitizer = false;
#endif
#endif
} // namespace dart } // namespace dart
#endif // RUNTIME_VM_FLAGS_H_ #endif // RUNTIME_VM_FLAGS_H_

View file

@ -1289,6 +1289,7 @@
"name": "build dart", "name": "build dart",
"script": "tools/build.py", "script": "tools/build.py",
"arguments": [ "arguments": [
"--sanitizer=none,${sanitizer}",
"runtime", "runtime",
"runtime_precompiled" "runtime_precompiled"
] ]