[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations"

This extends the existing safepoint operation mechanism by allowing to
perform two different operations:

  * "gc safepoint operations": All mutators are stopped at places where
    it's safe to GC. It therefore requires stackmaps to be available for
    all optimized mutator frames.

  * "deopt safepoint operations": All mutators are stopped at places
    where it's safe to GC, but also safe to lazy-deopt mutator frames.
    It therefore requires deopt-id/deopt-info to be available for all
    optimized mutator frames.

Mutators can be asked to block for any of those two safepoint operations.
If a mutator is at a place where its safe to GC it will respond to "gc
safepoint operations" requests, if a mutator is additionally at a place
where it's also safe to lazy-deopt it will respond to "deopt safepoint
operation" requests.

Depending on how the runtime was entered (which is tracked via the
[Thread::runtime_call_deopt_ability_] value) - the mutator might
participate in both or only in gc safepoint operations.

During the start of a "deopt safepoint operation", the safepoint handler
will request all threads to stop at a "deopt safepoint". Some threads
might first want to initiate their own "gc safepoint operation"
(e.g. due to allocation failure) before they reach a "deopt safepoint".

We do allow this by letting the safepoint handler own a "deopt safepoint
operation" but still participate in other thread's "gc safepoint
operation" requests until all mutators are checked into places where
it's safe to lazy-deopt at which point the "deopt safepoint operation"
also owns a "gc safepoint operation".

In order to facilitate this, the Thread's safepoint_state will be
extended to consist of the following bits:

  * AtSafepoint
  * SafepointRequested
  * AtDeoptSafepoint
  * DeoptSafepointRequested
  * BlockedForSafepoint

Issue https://github.com/dart-lang/sdk/issues/45213

TEST=vm/cc/SafepointOperation_*

Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Martin Kustermann 2021-05-10 09:13:09 +00:00 committed by commit-bot@chromium.org
parent 7cc165bce5
commit 3c81d992ef
33 changed files with 1104 additions and 353 deletions

View file

@ -411,7 +411,7 @@ void SharedClassTable::Unregister(intptr_t index) {
}
void ClassTable::Remap(intptr_t* old_to_new_cid) {
ASSERT(Thread::Current()->IsAtSafepoint());
ASSERT(Thread::Current()->IsAtSafepoint(SafepointLevel::kGCAndDeopt));
const intptr_t num_cids = NumCids();
std::unique_ptr<ClassPtr[]> cls_by_old_cid(new ClassPtr[num_cids]);
auto* table = table_.load();
@ -422,7 +422,7 @@ void ClassTable::Remap(intptr_t* old_to_new_cid) {
}
void SharedClassTable::Remap(intptr_t* old_to_new_cid) {
ASSERT(Thread::Current()->IsAtSafepoint());
ASSERT(Thread::Current()->IsAtSafepoint(SafepointLevel::kGCAndDeopt));
const intptr_t num_cids = NumCids();
std::unique_ptr<intptr_t[]> size_by_old_cid(new intptr_t[num_cids]);
auto* table = table_.load();

View file

@ -529,7 +529,7 @@ void Assembler::dmb() {
Emit(kDataMemoryBarrier);
}
void Assembler::EnterSafepoint(Register addr, Register state) {
void Assembler::EnterFullSafepoint(Register addr, Register state) {
// We generate the same number of instructions whether or not the slow-path is
// forced. This simplifies GenerateJitCallbackTrampolines.
Label slow_path, done, retry;
@ -541,10 +541,10 @@ void Assembler::EnterSafepoint(Register addr, Register state) {
add(addr, THR, Operand(addr));
Bind(&retry);
ldrex(state, addr);
cmp(state, Operand(target::Thread::safepoint_state_unacquired()));
cmp(state, Operand(target::Thread::full_safepoint_state_unacquired()));
b(&slow_path, NE);
mov(state, Operand(target::Thread::safepoint_state_acquired()));
mov(state, Operand(target::Thread::full_safepoint_state_acquired()));
strex(TMP, state, addr);
cmp(TMP, Operand(0)); // 0 means strex was successful.
b(&done, EQ);
@ -580,16 +580,16 @@ void Assembler::TransitionGeneratedToNative(Register destination_address,
StoreToOffset(tmp1, THR, target::Thread::execution_state_offset());
if (enter_safepoint) {
EnterSafepoint(tmp1, tmp2);
EnterFullSafepoint(tmp1, tmp2);
}
}
void Assembler::ExitSafepoint(Register tmp1, Register tmp2) {
void Assembler::ExitFullSafepoint(Register tmp1, Register tmp2) {
Register addr = tmp1;
Register state = tmp2;
// We generate the same number of instructions whether or not the slow-path is
// forced, for consistency with EnterSafepoint.
// forced, for consistency with EnterFullSafepoint.
Label slow_path, done, retry;
if (FLAG_use_slow_path) {
b(&slow_path);
@ -599,10 +599,10 @@ void Assembler::ExitSafepoint(Register tmp1, Register tmp2) {
add(addr, THR, Operand(addr));
Bind(&retry);
ldrex(state, addr);
cmp(state, Operand(target::Thread::safepoint_state_acquired()));
cmp(state, Operand(target::Thread::full_safepoint_state_acquired()));
b(&slow_path, NE);
mov(state, Operand(target::Thread::safepoint_state_unacquired()));
mov(state, Operand(target::Thread::full_safepoint_state_unacquired()));
strex(TMP, state, addr);
cmp(TMP, Operand(0)); // 0 means strex was successful.
b(&done, EQ);
@ -623,13 +623,14 @@ void Assembler::TransitionNativeToGenerated(Register addr,
Register state,
bool exit_safepoint) {
if (exit_safepoint) {
ExitSafepoint(addr, state);
ExitFullSafepoint(addr, state);
} else {
#if defined(DEBUG)
// Ensure we've already left the safepoint.
LoadImmediate(state, 1 << target::Thread::safepoint_state_inside_bit());
ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
LoadImmediate(state, target::Thread::full_safepoint_state_acquired());
ldr(TMP, Address(THR, target::Thread::safepoint_state_offset()));
ands(TMP, TMP, Operand(state)); // Is-at-safepoint is the LSB.
ands(TMP, TMP, Operand(state));
Label ok;
b(&ok, ZERO);
Breakpoint();

View file

@ -584,8 +584,8 @@ class Assembler : public AssemblerBase {
void TransitionNativeToGenerated(Register scratch0,
Register scratch1,
bool exit_safepoint);
void EnterSafepoint(Register scratch0, Register scratch1);
void ExitSafepoint(Register scratch0, Register scratch1);
void EnterFullSafepoint(Register scratch0, Register scratch1);
void ExitFullSafepoint(Register scratch0, Register scratch1);
// Miscellaneous instructions.
void clrex();

View file

@ -1502,7 +1502,7 @@ void Assembler::LeaveDartFrame(RestorePP restore_pp) {
LeaveFrame();
}
void Assembler::EnterSafepoint(Register state) {
void Assembler::EnterFullSafepoint(Register state) {
// We generate the same number of instructions whether or not the slow-path is
// forced. This simplifies GenerateJitCallbackTrampolines.
@ -1518,10 +1518,10 @@ void Assembler::EnterSafepoint(Register state) {
add(addr, THR, Operand(addr));
Bind(&retry);
ldxr(state, addr);
cmp(state, Operand(target::Thread::safepoint_state_unacquired()));
cmp(state, Operand(target::Thread::full_safepoint_state_unacquired()));
b(&slow_path, NE);
movz(state, Immediate(target::Thread::safepoint_state_acquired()), 0);
movz(state, Immediate(target::Thread::full_safepoint_state_acquired()), 0);
stxr(TMP, state, addr);
cbz(&done, TMP); // 0 means stxr was successful.
@ -1555,13 +1555,13 @@ void Assembler::TransitionGeneratedToNative(Register destination,
StoreToOffset(tmp, THR, target::Thread::execution_state_offset());
if (enter_safepoint) {
EnterSafepoint(tmp);
EnterFullSafepoint(tmp);
}
}
void Assembler::ExitSafepoint(Register state) {
void Assembler::ExitFullSafepoint(Register state) {
// We generate the same number of instructions whether or not the slow-path is
// forced, for consistency with EnterSafepoint.
// forced, for consistency with EnterFullSafepoint.
Register addr = TMP2;
ASSERT(addr != state);
@ -1574,10 +1574,10 @@ void Assembler::ExitSafepoint(Register state) {
add(addr, THR, Operand(addr));
Bind(&retry);
ldxr(state, addr);
cmp(state, Operand(target::Thread::safepoint_state_acquired()));
cmp(state, Operand(target::Thread::full_safepoint_state_acquired()));
b(&slow_path, NE);
movz(state, Immediate(target::Thread::safepoint_state_unacquired()), 0);
movz(state, Immediate(target::Thread::full_safepoint_state_unacquired()), 0);
stxr(TMP, state, addr);
cbz(&done, TMP); // 0 means stxr was successful.
@ -1596,13 +1596,16 @@ void Assembler::ExitSafepoint(Register state) {
void Assembler::TransitionNativeToGenerated(Register state,
bool exit_safepoint) {
if (exit_safepoint) {
ExitSafepoint(state);
ExitFullSafepoint(state);
} else {
#if defined(DEBUG)
// Ensure we've already left the safepoint.
ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
LoadImmediate(state, target::Thread::full_safepoint_state_acquired());
ldr(TMP, Address(THR, target::Thread::safepoint_state_offset()));
and_(TMP, TMP, Operand(state));
Label ok;
tbz(&ok, TMP, target::Thread::safepoint_state_inside_bit());
cbz(&ok, TMP);
Breakpoint();
Bind(&ok);
#endif

View file

@ -1909,8 +1909,8 @@ class Assembler : public AssemblerBase {
Register new_exit_through_ffi,
bool enter_safepoint);
void TransitionNativeToGenerated(Register scratch, bool exit_safepoint);
void EnterSafepoint(Register scratch);
void ExitSafepoint(Register scratch);
void EnterFullSafepoint(Register scratch);
void ExitFullSafepoint(Register scratch);
void CheckCodePointer();
void RestoreCodePointer();

View file

@ -2249,24 +2249,24 @@ void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
}
}
void Assembler::EnterSafepoint(Register scratch) {
void Assembler::EnterFullSafepoint(Register scratch) {
// We generate the same number of instructions whether or not the slow-path is
// forced. This simplifies GenerateJitCallbackTrampolines.
// Compare and swap the value at Thread::safepoint_state from unacquired to
// acquired. On success, jump to 'success'; otherwise, fallthrough.
// Compare and swap the value at Thread::safepoint_state from unacquired
// to acquired. On success, jump to 'success'; otherwise, fallthrough.
Label done, slow_path;
if (FLAG_use_slow_path) {
jmp(&slow_path);
}
pushl(EAX);
movl(EAX, Immediate(target::Thread::safepoint_state_unacquired()));
movl(scratch, Immediate(target::Thread::safepoint_state_acquired()));
movl(EAX, Immediate(target::Thread::full_safepoint_state_unacquired()));
movl(scratch, Immediate(target::Thread::full_safepoint_state_acquired()));
LockCmpxchgl(Address(THR, target::Thread::safepoint_state_offset()), scratch);
movl(scratch, EAX);
popl(EAX);
cmpl(scratch, Immediate(target::Thread::safepoint_state_unacquired()));
cmpl(scratch, Immediate(target::Thread::full_safepoint_state_unacquired()));
if (!FLAG_use_slow_path) {
j(EQUAL, &done);
@ -2299,29 +2299,29 @@ void Assembler::TransitionGeneratedToNative(Register destination_address,
Immediate(target::Thread::native_execution_state()));
if (enter_safepoint) {
EnterSafepoint(scratch);
EnterFullSafepoint(scratch);
}
}
void Assembler::ExitSafepoint(Register scratch) {
void Assembler::ExitFullSafepoint(Register scratch) {
ASSERT(scratch != EAX);
// We generate the same number of instructions whether or not the slow-path is
// forced, for consistency with EnterSafepoint.
// forced, for consistency with EnterFullSafepoint.
// Compare and swap the value at Thread::safepoint_state from acquired to
// unacquired. On success, jump to 'success'; otherwise, fallthrough.
// Compare and swap the value at Thread::safepoint_state from acquired
// to unacquired. On success, jump to 'success'; otherwise, fallthrough.
Label done, slow_path;
if (FLAG_use_slow_path) {
jmp(&slow_path);
}
pushl(EAX);
movl(EAX, Immediate(target::Thread::safepoint_state_acquired()));
movl(scratch, Immediate(target::Thread::safepoint_state_unacquired()));
movl(EAX, Immediate(target::Thread::full_safepoint_state_acquired()));
movl(scratch, Immediate(target::Thread::full_safepoint_state_unacquired()));
LockCmpxchgl(Address(THR, target::Thread::safepoint_state_offset()), scratch);
movl(scratch, EAX);
popl(EAX);
cmpl(scratch, Immediate(target::Thread::safepoint_state_acquired()));
cmpl(scratch, Immediate(target::Thread::full_safepoint_state_acquired()));
if (!FLAG_use_slow_path) {
j(EQUAL, &done);
@ -2338,12 +2338,12 @@ void Assembler::ExitSafepoint(Register scratch) {
void Assembler::TransitionNativeToGenerated(Register scratch,
bool exit_safepoint) {
if (exit_safepoint) {
ExitSafepoint(scratch);
ExitFullSafepoint(scratch);
} else {
#if defined(DEBUG)
// Ensure we've already left the safepoint.
movl(scratch, Address(THR, target::Thread::safepoint_state_offset()));
andl(scratch, Immediate(1 << target::Thread::safepoint_state_inside_bit()));
andl(scratch, Immediate(target::Thread::full_safepoint_state_acquired()));
Label ok;
j(ZERO, &ok);
Breakpoint();

View file

@ -800,8 +800,8 @@ class Assembler : public AssemblerBase {
Register new_exit_through_ffi,
bool enter_safepoint);
void TransitionNativeToGenerated(Register scratch, bool exit_safepoint);
void EnterSafepoint(Register scratch);
void ExitSafepoint(Register scratch);
void EnterFullSafepoint(Register scratch);
void ExitFullSafepoint(Register scratch);
// Create a frame for calling into runtime that preserves all volatile
// registers. Frame's RSP is guaranteed to be correctly aligned and

View file

@ -131,7 +131,7 @@ void Assembler::setcc(Condition condition, ByteRegister dst) {
EmitUint8(0xC0 + (dst & 0x07));
}
void Assembler::EnterSafepoint() {
void Assembler::EnterFullSafepoint() {
// We generate the same number of instructions whether or not the slow-path is
// forced, to simplify GenerateJitCallbackTrampolines.
Label done, slow_path;
@ -139,15 +139,15 @@ void Assembler::EnterSafepoint() {
jmp(&slow_path);
}
// Compare and swap the value at Thread::safepoint_state from unacquired to
// acquired. If the CAS fails, go to a slow-path stub.
// Compare and swap the value at Thread::safepoint_state from
// unacquired to acquired. If the CAS fails, go to a slow-path stub.
pushq(RAX);
movq(RAX, Immediate(target::Thread::safepoint_state_unacquired()));
movq(TMP, Immediate(target::Thread::safepoint_state_acquired()));
movq(RAX, Immediate(target::Thread::full_safepoint_state_unacquired()));
movq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
LockCmpxchgq(Address(THR, target::Thread::safepoint_state_offset()), TMP);
movq(TMP, RAX);
popq(RAX);
cmpq(TMP, Immediate(target::Thread::safepoint_state_unacquired()));
cmpq(TMP, Immediate(target::Thread::full_safepoint_state_unacquired()));
if (!FLAG_use_slow_path) {
j(EQUAL, &done);
@ -182,28 +182,29 @@ void Assembler::TransitionGeneratedToNative(Register destination_address,
Immediate(target::Thread::native_execution_state()));
if (enter_safepoint) {
EnterSafepoint();
EnterFullSafepoint();
}
}
void Assembler::LeaveSafepoint() {
void Assembler::ExitFullSafepoint() {
// We generate the same number of instructions whether or not the slow-path is
// forced, for consistency with EnterSafepoint.
// forced, for consistency with EnterFullSafepoint.
Label done, slow_path;
if (FLAG_use_slow_path) {
jmp(&slow_path);
}
// Compare and swap the value at Thread::safepoint_state from acquired to
// unacquired. On success, jump to 'success'; otherwise, fallthrough.
// Compare and swap the value at Thread::safepoint_state from
// acquired to unacquired. On success, jump to 'success'; otherwise,
// fallthrough.
pushq(RAX);
movq(RAX, Immediate(target::Thread::safepoint_state_acquired()));
movq(TMP, Immediate(target::Thread::safepoint_state_unacquired()));
movq(RAX, Immediate(target::Thread::full_safepoint_state_acquired()));
movq(TMP, Immediate(target::Thread::full_safepoint_state_unacquired()));
LockCmpxchgq(Address(THR, target::Thread::safepoint_state_offset()), TMP);
movq(TMP, RAX);
popq(RAX);
cmpq(TMP, Immediate(target::Thread::safepoint_state_acquired()));
cmpq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
if (!FLAG_use_slow_path) {
j(EQUAL, &done);
@ -223,12 +224,12 @@ void Assembler::LeaveSafepoint() {
void Assembler::TransitionNativeToGenerated(bool leave_safepoint) {
if (leave_safepoint) {
LeaveSafepoint();
ExitFullSafepoint();
} else {
#if defined(DEBUG)
// Ensure we've already left the safepoint.
movq(TMP, Address(THR, target::Thread::safepoint_state_offset()));
andq(TMP, Immediate((1 << target::Thread::safepoint_state_inside_bit())));
andq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
Label ok;
j(ZERO, &ok);
Breakpoint();

View file

@ -318,8 +318,8 @@ class Assembler : public AssemblerBase {
void setcc(Condition condition, ByteRegister dst);
void EnterSafepoint();
void LeaveSafepoint();
void EnterFullSafepoint();
void ExitFullSafepoint();
void TransitionGeneratedToNative(Register destination_address,
Register new_exit_frame,
Register new_exit_through_ffi,

View file

@ -386,7 +386,7 @@ CodePtr CompileParsedFunctionHelper::FinalizeCompilation(
function.SetWasCompiled(true);
} else if (optimized()) {
// We cannot execute generated code while installing code.
ASSERT(Thread::Current()->IsAtSafepoint() ||
ASSERT(Thread::Current()->IsAtSafepoint(SafepointLevel::kGCAndDeopt) ||
(Thread::Current()->IsMutatorThread() &&
IsolateGroup::Current()->ContainsOnlyOneIsolate()));
// We are validating our CHA / field guard / ... assumptions. To prevent
@ -1203,7 +1203,7 @@ void BackgroundCompiler::Run() {
bool BackgroundCompiler::EnqueueCompilation(const Function& function) {
Thread* thread = Thread::Current();
ASSERT(thread->IsMutatorThread());
ASSERT(!thread->IsAtSafepoint());
ASSERT(!thread->IsAtSafepoint(SafepointLevel::kGCAndDeopt));
SafepointMonitorLocker ml_done(&done_monitor_);
if (disabled_depth_ > 0) return false;
@ -1239,7 +1239,7 @@ void BackgroundCompiler::VisitPointers(ObjectPointerVisitor* visitor) {
void BackgroundCompiler::Stop() {
Thread* thread = Thread::Current();
ASSERT(thread->isolate() == nullptr || thread->IsMutatorThread());
ASSERT(!thread->IsAtSafepoint());
ASSERT(!thread->IsAtSafepoint(SafepointLevel::kGCAndDeopt));
SafepointMonitorLocker ml_done(&done_monitor_);
StopLocked(thread, &ml_done);
@ -1262,7 +1262,7 @@ void BackgroundCompiler::StopLocked(Thread* thread,
void BackgroundCompiler::Enable() {
Thread* thread = Thread::Current();
ASSERT(thread->IsMutatorThread());
ASSERT(!thread->IsAtSafepoint());
ASSERT(!thread->IsAtSafepoint(SafepointLevel::kGCAndDeopt));
SafepointMonitorLocker ml_done(&done_monitor_);
disabled_depth_--;
@ -1274,7 +1274,7 @@ void BackgroundCompiler::Enable() {
void BackgroundCompiler::Disable() {
Thread* thread = Thread::Current();
ASSERT(thread->IsMutatorThread());
ASSERT(!thread->IsAtSafepoint());
ASSERT(!thread->IsAtSafepoint(SafepointLevel::kGCAndDeopt));
SafepointMonitorLocker ml_done(&done_monitor_);
disabled_depth_++;

View file

@ -40,7 +40,7 @@ struct RelocatorTestHelper {
explicit RelocatorTestHelper(Thread* thread)
: thread(thread),
locker(thread, thread->isolate_group()->program_lock()),
safepoint_and_growth_scope(thread) {
safepoint_and_growth_scope(thread, SafepointLevel::kGC) {
// So the relocator uses the correct instruction size layout.
FLAG_precompiled_mode = true;
FLAG_use_bare_instructions = true;

View file

@ -714,17 +714,12 @@ word Thread::stack_overflow_shared_stub_entry_point_offset(bool fpu_regs) {
: stack_overflow_shared_without_fpu_regs_entry_point_offset();
}
uword Thread::safepoint_state_unacquired() {
return dart::Thread::safepoint_state_unacquired();
uword Thread::full_safepoint_state_unacquired() {
return dart::Thread::full_safepoint_state_unacquired();
}
uword Thread::safepoint_state_acquired() {
return dart::Thread::safepoint_state_acquired();
}
intptr_t Thread::safepoint_state_inside_bit() {
COMPILE_ASSERT(dart::Thread::AtSafepointField::bitsize() == 1);
return dart::Thread::AtSafepointField::shift();
uword Thread::full_safepoint_state_acquired() {
return dart::Thread::full_safepoint_state_acquired();
}
uword Thread::generated_execution_state() {

View file

@ -1062,9 +1062,8 @@ class Thread : public AllStatic {
static uword vm_tag_dart_id();
static word safepoint_state_offset();
static uword safepoint_state_unacquired();
static uword safepoint_state_acquired();
static intptr_t safepoint_state_inside_bit();
static uword full_safepoint_state_unacquired();
static uword full_safepoint_state_acquired();
static word execution_state_offset();
static uword vm_execution_state();

View file

@ -466,8 +466,8 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// them.
__ blx(R5);
// EnterSafepoint clobbers R4, R5 and TMP, all saved or volatile.
__ EnterSafepoint(R4, R5);
// Clobbers R4, R5 and TMP, all saved or volatile.
__ EnterFullSafepoint(R4, R5);
// Returns.
__ PopList((1 << PC) | (1 << THR) | (1 << R4) | (1 << R5));

View file

@ -440,9 +440,8 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// Resets CSP and SP, important for EnterSafepoint below.
__ blr(R10);
// EnterSafepoint clobbers TMP, TMP2 and R9 -- all volatile and not holding
// return values.
__ EnterSafepoint(/*scratch=*/R9);
// Clobbers TMP, TMP2 and R9 -- all volatile and not holding return values.
__ EnterFullSafepoint(/*scratch=*/R9);
// Pop LR and THR from the real stack (CSP).
RESTORES_LR_FROM_FRAME(__ ldp(

View file

@ -315,8 +315,8 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
__ Bind(&check_done);
#endif
// EnterSafepoint takes care to not clobber *any* registers (besides scratch).
__ EnterSafepoint(/*scratch=*/ECX);
// Takes care to not clobber *any* registers (besides scratch).
__ EnterFullSafepoint(/*scratch=*/ECX);
// Restore callee-saved registers.
__ movl(ECX, EBX);

View file

@ -383,8 +383,8 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// the saved THR and the return address. The target will know to skip them.
__ call(TMP);
// EnterSafepoint takes care to not clobber *any* registers (besides TMP).
__ EnterSafepoint();
// Takes care to not clobber *any* registers (besides TMP).
__ EnterFullSafepoint();
// Restore THR (callee-saved).
__ popq(THR);

View file

@ -6987,7 +6987,7 @@ static void KillNonMainIsolatesSlow(Thread* thread, Isolate* main_isolate) {
while (true) {
bool non_main_isolates_alive = false;
{
SafepointOperationScope safepoint(thread);
DeoptSafepointOperationScope safepoint(thread);
group->ForEachIsolate(
[&](Isolate* isolate) {
if (isolate != main_isolate) {

View file

@ -222,7 +222,8 @@ HeapIterationScope::HeapIterationScope(Thread* thread, bool writable)
heap_(isolate_group()->heap()),
old_space_(heap_->old_space()),
writable_(writable) {
isolate_group()->safepoint_handler()->SafepointThreads(thread);
isolate_group()->safepoint_handler()->SafepointThreads(thread,
SafepointLevel::kGC);
{
// It's not safe to iterate over old space when concurrent marking or
@ -273,7 +274,8 @@ HeapIterationScope::~HeapIterationScope() {
ml.NotifyAll();
}
isolate_group()->safepoint_handler()->ResumeThreads(thread());
isolate_group()->safepoint_handler()->ResumeThreads(thread(),
SafepointLevel::kGC);
}
void HeapIterationScope::IterateObjects(ObjectVisitor* visitor) const {
@ -353,7 +355,7 @@ void Heap::HintFreed(intptr_t size) {
void Heap::NotifyIdle(int64_t deadline) {
Thread* thread = Thread::Current();
SafepointOperationScope safepoint_operation(thread);
GcSafepointOperationScope safepoint_operation(thread);
// Check if we want to collect new-space first, because if we want to collect
// both new-space and old-space, the new-space collection should run first
@ -420,7 +422,7 @@ void Heap::EvacuateNewSpace(Thread* thread, GCReason reason) {
return;
}
{
SafepointOperationScope safepoint_operation(thread);
GcSafepointOperationScope safepoint_operation(thread);
RecordBeforeGC(kScavenge, reason);
VMTagScope tagScope(thread, reason == kIdle ? VMTag::kGCIdleTagId
: VMTag::kGCNewSpaceTagId);
@ -444,7 +446,7 @@ void Heap::CollectNewSpaceGarbage(Thread* thread, GCReason reason) {
return;
}
{
SafepointOperationScope safepoint_operation(thread);
GcSafepointOperationScope safepoint_operation(thread);
RecordBeforeGC(kScavenge, reason);
{
VMTagScope tagScope(thread, reason == kIdle ? VMTag::kGCIdleTagId
@ -484,7 +486,7 @@ void Heap::CollectOldSpaceGarbage(Thread* thread,
return;
}
{
SafepointOperationScope safepoint_operation(thread);
GcSafepointOperationScope safepoint_operation(thread);
thread->isolate_group()->ForEachIsolate(
[&](Isolate* isolate) {
// Discard regexp backtracking stacks to further reduce memory usage.

View file

@ -41,4 +41,5 @@ heap_sources_tests = [
"pages_test.cc",
"scavenger_test.cc",
"weak_table_test.cc",
"safepoint_test.cc",
]

View file

@ -1085,7 +1085,7 @@ void PageSpace::CollectGarbage(bool compact, bool finalize) {
Thread* thread = Thread::Current();
const int64_t pre_safe_point = OS::GetCurrentMonotonicMicros();
SafepointOperationScope safepoint_scope(thread);
GcSafepointOperationScope safepoint_scope(thread);
const int64_t pre_wait_for_sweepers = OS::GetCurrentMonotonicMicros();
// Wait for pending tasks to complete and then account for the driver task.

View file

@ -12,41 +12,33 @@ namespace dart {
DEFINE_FLAG(bool, trace_safepoint, false, "Trace Safepoint logic.");
SafepointOperationScope::SafepointOperationScope(Thread* T)
: ThreadStackResource(T) {
SafepointOperationScope::SafepointOperationScope(Thread* T,
SafepointLevel level)
: ThreadStackResource(T), level_(level) {
ASSERT(T != nullptr && T->isolate_group() != nullptr);
SafepointHandler* handler = T->isolate_group()->safepoint_handler();
ASSERT(handler != NULL);
// Signal all threads to get to a safepoint and wait for them to
// get to a safepoint.
handler->SafepointThreads(T);
auto handler = T->isolate_group()->safepoint_handler();
handler->SafepointThreads(T, level_);
}
SafepointOperationScope::~SafepointOperationScope() {
Thread* T = thread();
ASSERT(T != nullptr && T->isolate_group() != nullptr);
// Resume all threads which are blocked for the safepoint operation.
SafepointHandler* handler = T->isolate_group()->safepoint_handler();
ASSERT(handler != NULL);
handler->ResumeThreads(T);
auto handler = T->isolate_group()->safepoint_handler();
handler->ResumeThreads(T, level_);
}
ForceGrowthSafepointOperationScope::ForceGrowthSafepointOperationScope(
Thread* T)
: ThreadStackResource(T) {
Thread* T,
SafepointLevel level)
: ThreadStackResource(T), level_(level) {
ASSERT(T != NULL);
IsolateGroup* IG = T->isolate_group();
ASSERT(IG != NULL);
SafepointHandler* handler = IG->safepoint_handler();
ASSERT(handler != NULL);
// Signal all threads to get to a safepoint and wait for them to
// get to a safepoint.
handler->SafepointThreads(T);
auto handler = IG->safepoint_handler();
handler->SafepointThreads(T, level_);
// N.B.: Change growth policy inside the safepoint to prevent racy access.
Heap* heap = IG->heap();
@ -64,10 +56,8 @@ ForceGrowthSafepointOperationScope::~ForceGrowthSafepointOperationScope() {
Heap* heap = IG->heap();
heap->SetGrowthControlState(current_growth_controller_state_);
// Resume all threads which are blocked for the safepoint operation.
SafepointHandler* handler = IG->safepoint_handler();
ASSERT(handler != NULL);
handler->ResumeThreads(T);
auto handler = IG->safepoint_handler();
handler->ResumeThreads(T, level_);
if (current_growth_controller_state_) {
ASSERT(T->CanCollectGarbage());
@ -82,87 +72,131 @@ ForceGrowthSafepointOperationScope::~ForceGrowthSafepointOperationScope() {
SafepointHandler::SafepointHandler(IsolateGroup* isolate_group)
: isolate_group_(isolate_group),
safepoint_lock_(),
number_threads_not_at_safepoint_(0),
safepoint_operation_count_(0),
owner_(NULL) {}
handlers_{
{isolate_group, SafepointLevel::kGC},
{isolate_group, SafepointLevel::kGCAndDeopt},
} {}
SafepointHandler::~SafepointHandler() {
ASSERT(owner_ == NULL);
ASSERT(safepoint_operation_count_ == 0);
isolate_group_ = NULL;
for (intptr_t level = 0; level < SafepointLevel::kNumLevels; ++level) {
ASSERT(handlers_[level].owner_ == nullptr);
}
}
void SafepointHandler::SafepointThreads(Thread* T) {
void SafepointHandler::SafepointThreads(Thread* T, SafepointLevel level) {
ASSERT(T->no_safepoint_scope_depth() == 0);
ASSERT(T->execution_state() == Thread::kThreadInVM);
ASSERT(T->current_safepoint_level() >= level);
{
// First grab the threads list lock for this isolate
// and check if a safepoint is already in progress. This
// ensures that two threads do not start a safepoint operation
// at the same time.
MonitorLocker sl(threads_lock());
MonitorLocker tl(threads_lock());
// Now check to see if a safepoint operation is already in progress
// for this isolate, block if an operation is in progress.
while (SafepointInProgress()) {
// If we are recursively invoking a Safepoint operation then we
// just increment the count and return, otherwise we wait for the
// safepoint operation to be done.
if (owner_ == T) {
increment_safepoint_operation_count();
return;
}
sl.WaitWithSafepointCheck(T);
// Allow recursive deopt safepoint operation.
if (handlers_[level].owner_ == T) {
handlers_[level].operation_count_++;
// If we own this safepoint level already we have to own the lower levels
// as well.
AssertWeOwnLowerLevelSafepoints(T, level);
return;
}
// Set safepoint in progress state by this thread.
SetSafepointInProgress(T);
// This level of nesting is not allowed (this thread cannot own lower levels
// and then later try acquire higher levels).
AssertWeDoNotOwnLowerLevelSafepoints(T, level);
// Go over the active thread list and ensure that all threads active
// in the isolate reach a safepoint.
Thread* current = isolate_group()->thread_registry()->active_list();
while (current != NULL) {
MonitorLocker tl(current->thread_lock());
if (!current->BypassSafepoints()) {
if (current == T) {
current->SetAtSafepoint(true);
} else {
uint32_t state = current->SetSafepointRequested(true);
if (!Thread::IsAtSafepoint(state)) {
// Thread is not already at a safepoint so try to
// get it to a safepoint and wait for it to check in.
if (current->IsMutatorThread()) {
current->ScheduleInterruptsLocked(Thread::kVMInterrupt);
}
MonitorLocker sl(&safepoint_lock_);
++number_threads_not_at_safepoint_;
}
// Mark this thread at safepoint and possibly notify waiting threads.
{
MonitorLocker tl(T->thread_lock());
EnterSafepointLocked(T, &tl);
}
// Wait until other safepoint operations are done & mark us as owning
// the safepoint - so no other thread can.
while (handlers_[level].SafepointInProgress()) {
tl.Wait();
}
handlers_[level].SetSafepointInProgress(T);
// Ensure a thread is at a safepoint or notify it to get to one.
handlers_[level].NotifyThreadsToGetToSafepointLevel(T);
}
// Now wait for all threads that are not already at a safepoint to check-in.
handlers_[level].WaitUntilThreadsReachedSafepointLevel();
AcquireLowerLevelSafepoints(T, level);
}
void SafepointHandler::AssertWeOwnLowerLevelSafepoints(Thread* T,
SafepointLevel level) {
for (intptr_t lower_level = level - 1; lower_level >= 0; --lower_level) {
RELEASE_ASSERT(handlers_[lower_level].owner_ == T);
}
}
void SafepointHandler::AssertWeDoNotOwnLowerLevelSafepoints(
Thread* T,
SafepointLevel level) {
for (intptr_t lower_level = level - 1; lower_level >= 0; --lower_level) {
RELEASE_ASSERT(handlers_[lower_level].owner_ != T);
}
}
void SafepointHandler::LevelHandler::NotifyThreadsToGetToSafepointLevel(
Thread* T) {
ASSERT(num_threads_not_parked_ == 0);
for (auto current = isolate_group()->thread_registry()->active_list();
current != nullptr; current = current->next()) {
MonitorLocker tl(current->thread_lock());
if (!current->BypassSafepoints() && current != T) {
const uint32_t state = current->SetSafepointRequested(level_, true);
if (!Thread::IsAtSafepoint(level_, state)) {
// Send OOB message to get it to safepoint.
if (current->IsMutatorThread()) {
current->ScheduleInterruptsLocked(Thread::kVMInterrupt);
}
MonitorLocker sl(&parked_lock_);
num_threads_not_parked_++;
}
current = current->next();
}
}
// Now wait for all threads that are not already at a safepoint to check-in.
}
void SafepointHandler::ResumeThreads(Thread* T, SafepointLevel level) {
{
MonitorLocker sl(&safepoint_lock_);
intptr_t num_attempts = 0;
while (number_threads_not_at_safepoint_ > 0) {
Monitor::WaitResult retval = sl.Wait(1000);
if (retval == Monitor::kTimedOut) {
num_attempts += 1;
if (FLAG_trace_safepoint && num_attempts > 10) {
// We have been waiting too long, start logging this as we might
// have an issue where a thread is not checking in for a safepoint.
for (Thread* current =
isolate_group()->thread_registry()->active_list();
current != NULL; current = current->next()) {
if (!current->IsAtSafepoint()) {
OS::PrintErr("Attempt:%" Pd
" waiting for thread %s to check in\n",
num_attempts, current->os_thread()->name());
}
MonitorLocker sl(threads_lock());
ASSERT(handlers_[level].SafepointInProgress());
ASSERT(handlers_[level].owner_ == T);
AssertWeOwnLowerLevelSafepoints(T, level);
// We allow recursive safepoints.
if (handlers_[level].operation_count_ > 1) {
handlers_[level].operation_count_--;
return;
}
ReleaseLowerLevelSafepoints(T, level);
handlers_[level].NotifyThreadsToContinue(T);
handlers_[level].ResetSafepointInProgress(T);
sl.NotifyAll();
}
ExitSafepointUsingLock(T);
}
void SafepointHandler::LevelHandler::WaitUntilThreadsReachedSafepointLevel() {
MonitorLocker sl(&parked_lock_);
intptr_t num_attempts = 0;
while (num_threads_not_parked_ > 0) {
Monitor::WaitResult retval = sl.Wait(1000);
if (retval == Monitor::kTimedOut) {
num_attempts += 1;
if (FLAG_trace_safepoint && num_attempts > 10) {
for (auto current = isolate_group()->thread_registry()->active_list();
current != nullptr; current = current->next()) {
if (!current->IsAtSafepoint(level_)) {
OS::PrintErr("Attempt:%" Pd " waiting for thread %s to check in\n",
num_attempts, current->os_thread()->name());
}
}
}
@ -170,80 +204,96 @@ void SafepointHandler::SafepointThreads(Thread* T) {
}
}
void SafepointHandler::ResumeThreads(Thread* T) {
// First resume all the threads which are blocked for the safepoint
// operation.
MonitorLocker sl(threads_lock());
// First check if we are in a recursive safepoint operation, in that case
// we just decrement safepoint_operation_count and return.
ASSERT(SafepointInProgress());
if (safepoint_operation_count() > 1) {
decrement_safepoint_operation_count();
return;
void SafepointHandler::AcquireLowerLevelSafepoints(Thread* T,
SafepointLevel level) {
MonitorLocker tl(threads_lock());
ASSERT(handlers_[level].owner_ == T);
for (intptr_t lower_level = level - 1; lower_level >= 0; --lower_level) {
while (handlers_[lower_level].SafepointInProgress()) {
tl.Wait();
}
handlers_[lower_level].SetSafepointInProgress(T);
ASSERT(handlers_[lower_level].owner_ == T);
}
Thread* current = isolate_group()->thread_registry()->active_list();
while (current != NULL) {
}
void SafepointHandler::ReleaseLowerLevelSafepoints(Thread* T,
SafepointLevel level) {
for (intptr_t lower_level = 0; lower_level < level; ++lower_level) {
handlers_[lower_level].ResetSafepointInProgress(T);
}
}
void SafepointHandler::LevelHandler::NotifyThreadsToContinue(Thread* T) {
for (auto current = isolate_group()->thread_registry()->active_list();
current != nullptr; current = current->next()) {
MonitorLocker tl(current->thread_lock());
if (!current->BypassSafepoints()) {
if (current == T) {
current->SetAtSafepoint(false);
} else {
uint32_t state = current->SetSafepointRequested(false);
if (Thread::IsBlockedForSafepoint(state)) {
tl.Notify();
if (!current->BypassSafepoints() && current != T) {
bool resume = false;
for (intptr_t lower_level = level_; lower_level >= 0; --lower_level) {
if (Thread::IsBlockedForSafepoint(current->SetSafepointRequested(
static_cast<SafepointLevel>(lower_level), false))) {
resume = true;
}
}
if (resume) {
tl.Notify();
}
}
current = current->next();
}
// Now reset the safepoint_in_progress_ state and notify all threads
// that are waiting to enter the isolate or waiting to start another
// safepoint operation.
ResetSafepointInProgress(T);
sl.NotifyAll();
}
void SafepointHandler::EnterSafepointUsingLock(Thread* T) {
MonitorLocker tl(T->thread_lock());
T->SetAtSafepoint(true);
if (T->IsSafepointRequested()) {
MonitorLocker sl(&safepoint_lock_);
ASSERT(number_threads_not_at_safepoint_ > 0);
number_threads_not_at_safepoint_ -= 1;
sl.Notify();
}
EnterSafepointLocked(T, &tl);
}
void SafepointHandler::ExitSafepointUsingLock(Thread* T) {
MonitorLocker tl(T->thread_lock());
ASSERT(T->IsAtSafepoint());
while (T->IsSafepointRequested()) {
T->SetBlockedForSafepoint(true);
tl.Wait();
T->SetBlockedForSafepoint(false);
}
T->SetAtSafepoint(false);
ExitSafepointLocked(T, &tl);
ASSERT(!T->IsSafepointRequestedLocked());
}
void SafepointHandler::BlockForSafepoint(Thread* T) {
ASSERT(!T->BypassSafepoints());
MonitorLocker tl(T->thread_lock());
if (T->IsSafepointRequested()) {
T->SetAtSafepoint(true);
{
MonitorLocker sl(&safepoint_lock_);
ASSERT(number_threads_not_at_safepoint_ > 0);
number_threads_not_at_safepoint_ -= 1;
sl.Notify();
}
while (T->IsSafepointRequested()) {
T->SetBlockedForSafepoint(true);
tl.Wait();
T->SetBlockedForSafepoint(false);
}
T->SetAtSafepoint(false);
// This takes into account the safepoint level the thread can participate in.
if (T->IsSafepointRequestedLocked()) {
EnterSafepointLocked(T, &tl);
ExitSafepointLocked(T, &tl);
ASSERT(!T->IsSafepointRequestedLocked());
}
}
void SafepointHandler::EnterSafepointLocked(Thread* T, MonitorLocker* tl) {
T->SetAtSafepoint(true);
for (intptr_t level = T->current_safepoint_level(); level >= 0; --level) {
if (T->IsSafepointLevelRequestedLocked(
static_cast<SafepointLevel>(level))) {
handlers_[level].NotifyWeAreParked(T);
}
}
}
void SafepointHandler::LevelHandler::NotifyWeAreParked(Thread* T) {
ASSERT(owner_ != nullptr);
MonitorLocker sl(&parked_lock_);
ASSERT(num_threads_not_parked_ > 0);
num_threads_not_parked_ -= 1;
if (num_threads_not_parked_ == 0) {
sl.Notify();
}
}
void SafepointHandler::ExitSafepointLocked(Thread* T, MonitorLocker* tl) {
while (T->IsSafepointRequestedLocked()) {
T->SetBlockedForSafepoint(true);
tl->Wait();
T->SetBlockedForSafepoint(false);
}
T->SetAtSafepoint(false);
}
} // namespace dart

View file

@ -17,23 +17,48 @@ namespace dart {
// all threads to a safepoint. At the end of the operation all the threads are
// resumed.
class SafepointOperationScope : public ThreadStackResource {
public:
explicit SafepointOperationScope(Thread* T);
protected:
SafepointOperationScope(Thread* T, SafepointLevel level);
~SafepointOperationScope();
private:
SafepointLevel level_;
DISALLOW_COPY_AND_ASSIGN(SafepointOperationScope);
};
// Gets all mutators to a safepoint where GC is allowed.
class GcSafepointOperationScope : public SafepointOperationScope {
public:
explicit GcSafepointOperationScope(Thread* T)
: SafepointOperationScope(T, SafepointLevel::kGC) {}
~GcSafepointOperationScope() {}
private:
DISALLOW_COPY_AND_ASSIGN(GcSafepointOperationScope);
};
// Gets all mutators to a safepoint where GC and Deopt is allowed.
class DeoptSafepointOperationScope : public SafepointOperationScope {
public:
explicit DeoptSafepointOperationScope(Thread* T)
: SafepointOperationScope(T, SafepointLevel::kGCAndDeopt) {}
~DeoptSafepointOperationScope() {}
private:
DISALLOW_COPY_AND_ASSIGN(DeoptSafepointOperationScope);
};
// A stack based scope that can be used to perform an operation after getting
// all threads to a safepoint. At the end of the operation all the threads are
// resumed. Allocations in the scope will force heap growth.
class ForceGrowthSafepointOperationScope : public ThreadStackResource {
public:
explicit ForceGrowthSafepointOperationScope(Thread* T);
ForceGrowthSafepointOperationScope(Thread* T, SafepointLevel level);
~ForceGrowthSafepointOperationScope();
private:
SafepointLevel level_;
bool current_growth_controller_state_;
DISALLOW_COPY_AND_ASSIGN(ForceGrowthSafepointOperationScope);
@ -48,65 +73,105 @@ class SafepointHandler {
void EnterSafepointUsingLock(Thread* T);
void ExitSafepointUsingLock(Thread* T);
void BlockForSafepoint(Thread* T);
bool IsOwnedByTheThread(Thread* thread) { return owner_ == thread; }
bool IsOwnedByTheThread(Thread* thread) {
for (intptr_t level = 0; level < SafepointLevel::kNumLevels; ++level) {
if (handlers_[level].owner_ == thread) {
return true;
}
}
return false;
}
bool AnySafepointInProgress() {
for (intptr_t level = 0; level < SafepointLevel::kNumLevels; ++level) {
if (handlers_[level].SafepointInProgress()) {
return true;
}
}
return false;
}
private:
void SafepointThreads(Thread* T);
void ResumeThreads(Thread* T);
class LevelHandler {
public:
LevelHandler(IsolateGroup* isolate_group, SafepointLevel level)
: isolate_group_(isolate_group), level_(level) {}
bool SafepointInProgress() const {
ASSERT(threads_lock()->IsOwnedByCurrentThread());
ASSERT((operation_count_ > 0) == (owner_ != nullptr));
return ((operation_count_ > 0) && (owner_ != NULL));
}
void SetSafepointInProgress(Thread* T) {
ASSERT(threads_lock()->IsOwnedByCurrentThread());
ASSERT(owner_ == NULL);
ASSERT(operation_count_ == 0);
operation_count_ = 1;
owner_ = T;
}
void ResetSafepointInProgress(Thread* T) {
ASSERT(threads_lock()->IsOwnedByCurrentThread());
ASSERT(owner_ == T);
ASSERT(operation_count_ == 1);
operation_count_ = 0;
owner_ = NULL;
}
void NotifyWeAreParked(Thread* T);
IsolateGroup* isolate_group() const { return isolate_group_; }
Monitor* threads_lock() const { return isolate_group_->threads_lock(); }
private:
friend class SafepointHandler;
// Helper methods for [SafepointThreads]
void NotifyThreadsToGetToSafepointLevel(Thread* T);
void WaitUntilThreadsReachedSafepointLevel();
// Helper methods for [ResumeThreads]
void NotifyThreadsToContinue(Thread* T);
IsolateGroup* isolate_group_;
SafepointLevel level_;
// Monitor used by thread initiating a safepoint operation to track threads
// not at a safepoint and wait for these threads to reach a safepoint.
Monitor parked_lock_;
// If a safepoint operation is currently in progress, this field contains
// the thread that initiated the safepoint operation, otherwise it is NULL.
Thread* owner_ = nullptr;
// The number of nested safepoint operations currently held.
int32_t operation_count_ = 0;
// Count the number of threads the currently in-progress safepoint operation
// is waiting for to check-in.
int32_t num_threads_not_parked_ = 0;
};
void SafepointThreads(Thread* T, SafepointLevel level);
void ResumeThreads(Thread* T, SafepointLevel level);
// Helper methods for [SafepointThreads]
void AssertWeOwnLowerLevelSafepoints(Thread* T, SafepointLevel level);
void AssertWeDoNotOwnLowerLevelSafepoints(Thread* T, SafepointLevel level);
void AcquireLowerLevelSafepoints(Thread* T, SafepointLevel level);
// Helper methods for [ResumeThreads]
void ReleaseLowerLevelSafepoints(Thread* T, SafepointLevel level);
void EnterSafepointLocked(Thread* T, MonitorLocker* tl);
void ExitSafepointLocked(Thread* T, MonitorLocker* tl);
IsolateGroup* isolate_group() const { return isolate_group_; }
Monitor* threads_lock() const { return isolate_group_->threads_lock(); }
bool SafepointInProgress() const {
ASSERT(threads_lock()->IsOwnedByCurrentThread());
return ((safepoint_operation_count_ > 0) && (owner_ != NULL));
}
void SetSafepointInProgress(Thread* T) {
ASSERT(threads_lock()->IsOwnedByCurrentThread());
ASSERT(owner_ == NULL);
ASSERT(safepoint_operation_count_ == 0);
safepoint_operation_count_ = 1;
owner_ = T;
}
void ResetSafepointInProgress(Thread* T) {
ASSERT(threads_lock()->IsOwnedByCurrentThread());
ASSERT(owner_ == T);
ASSERT(safepoint_operation_count_ == 1);
safepoint_operation_count_ = 0;
owner_ = NULL;
}
int32_t safepoint_operation_count() const {
ASSERT(threads_lock()->IsOwnedByCurrentThread());
return safepoint_operation_count_;
}
void increment_safepoint_operation_count() {
ASSERT(threads_lock()->IsOwnedByCurrentThread());
ASSERT(safepoint_operation_count_ < kMaxInt32);
safepoint_operation_count_ += 1;
}
void decrement_safepoint_operation_count() {
ASSERT(threads_lock()->IsOwnedByCurrentThread());
ASSERT(safepoint_operation_count_ > 0);
safepoint_operation_count_ -= 1;
}
IsolateGroup* isolate_group_;
// Monitor used by thread initiating a safepoint operation to track threads
// not at a safepoint and wait for these threads to reach a safepoint.
Monitor safepoint_lock_;
int32_t number_threads_not_at_safepoint_;
// Count that indicates if a safepoint operation is currently in progress
// and also tracks the number of recursive safepoint operations on the
// same thread.
int32_t safepoint_operation_count_;
// If a safepoint operation is currently in progress, this field contains
// the thread that initiated the safepoint operation, otherwise it is NULL.
Thread* owner_;
LevelHandler handlers_[SafepointLevel::kNumLevels];
friend class Isolate;
friend class IsolateGroup;
@ -186,7 +251,7 @@ class TransitionGeneratedToVM : public TransitionSafepointState {
// We do the more expensive operation of blocking the thread
// only if a safepoint is requested.
if (T->IsSafepointRequested()) {
handler()->BlockForSafepoint(T);
T->BlockForSafepoint();
}
}
@ -291,10 +356,8 @@ class TransitionVMToGenerated : public TransitionSafepointState {
ASSERT(thread()->execution_state() == Thread::kThreadInGenerated);
thread()->set_execution_state(Thread::kThreadInVM);
// Fast check to see if a safepoint is requested or not.
// We do the more expensive operation of blocking the thread
// only if a safepoint is requested.
if (thread()->IsSafepointRequested()) {
handler()->BlockForSafepoint(thread());
thread()->BlockForSafepoint();
}
}

View file

@ -0,0 +1,537 @@
// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include <memory>
#include <utility>
#include <vector>
#include "platform/assert.h"
#include "vm/heap/safepoint.h"
#include "vm/isolate.h"
#include "vm/lockers.h"
#include "vm/random.h"
#include "vm/thread_pool.h"
#include "vm/unit_test.h"
namespace dart {
class StateMachineTask : public ThreadPool::Task {
public:
enum State {
kInitialized = 0,
kEntered,
kPleaseExit,
kExited,
kNext,
};
struct Data {
explicit Data(IsolateGroup* isolate_group)
: isolate_group_(isolate_group) {}
void WaitUntil(intptr_t target_state) {
MonitorLocker ml(&monitor_);
while (state != target_state) {
ml.Wait();
}
}
void MarkAndNotify(intptr_t target_state) {
MonitorLocker ml(&monitor_);
state = target_state;
ml.Notify();
}
void AssertIsIn(intptr_t expected_state) {
MonitorLocker ml(&monitor_);
EXPECT_EQ(expected_state, state);
}
void AssertIsNotIn(intptr_t expected_state) {
MonitorLocker ml(&monitor_);
EXPECT_NE(expected_state, state);
}
bool IsIn(intptr_t expected_state) {
MonitorLocker ml(&monitor_);
return expected_state == state;
}
intptr_t state = kInitialized;
IsolateGroup* isolate_group_;
private:
Monitor monitor_;
};
explicit StateMachineTask(std::shared_ptr<Data> data)
: data_(std::move(data)) {}
virtual void Run() {
const bool kBypassSafepoint = false;
Thread::EnterIsolateGroupAsHelper(data_->isolate_group_,
Thread::kUnknownTask, kBypassSafepoint);
thread_ = Thread::Current();
data_->MarkAndNotify(kEntered);
RunInternal();
data_->WaitUntil(kPleaseExit);
Thread::ExitIsolateGroupAsHelper(kBypassSafepoint);
thread_ = nullptr;
data_->MarkAndNotify(kExited);
}
protected:
virtual void RunInternal() = 0;
std::shared_ptr<Data> data_;
Thread* thread_ = nullptr;
};
class DeoptTask : public StateMachineTask {
public:
enum State {
kStartDeoptOperation = StateMachineTask::kNext,
kFinishedDeoptOperation,
};
explicit DeoptTask(std::shared_ptr<Data> data)
: StateMachineTask(std::move(data)) {}
protected:
virtual void RunInternal() {
data_->WaitUntil(kStartDeoptOperation);
{ DeoptSafepointOperationScope safepoint_operation(thread_); }
data_->MarkAndNotify(kFinishedDeoptOperation);
}
};
class GcWithoutDeoptTask : public StateMachineTask {
public:
enum State {
kStartSafepointOperation = StateMachineTask::kNext,
kEndSafepointOperation,
kJoinDeoptOperation,
kDeoptOperationDone,
};
explicit GcWithoutDeoptTask(std::shared_ptr<Data> data)
: StateMachineTask(std::move(data)) {}
protected:
virtual void RunInternal() {
data_->WaitUntil(kStartSafepointOperation);
{
RuntimeCallDeoptScope no_deopt(thread_,
RuntimeCallDeoptAbility::kCannotLazyDeopt);
GcSafepointOperationScope safepoint_operation(thread_);
}
data_->MarkAndNotify(kEndSafepointOperation);
data_->WaitUntil(kJoinDeoptOperation);
EXPECT(thread_->IsSafepointRequested());
thread_->BlockForSafepoint();
data_->MarkAndNotify(kDeoptOperationDone);
}
};
// This test ensures that while a "deopt safepoint operation" is about to start
// but is still waiting for some threads to hit a "deopt safepoint" another
// safepoint operation can sucessfully start and finish.
ISOLATE_UNIT_TEST_CASE(
SafepointOperation_SafepointOpWhileDeoptSafepointOpBlocked) {
auto isolate_group = thread->isolate_group();
std::shared_ptr<DeoptTask::Data> deopt(new DeoptTask::Data(isolate_group));
std::shared_ptr<GcWithoutDeoptTask::Data> gc(
new GcWithoutDeoptTask::Data(isolate_group));
thread->EnterSafepoint();
{
// Will join outstanding threads on destruction.
ThreadPool pool;
pool.Run<DeoptTask>(deopt);
pool.Run<GcWithoutDeoptTask>(gc);
// Wait until both threads entered the isolate group.
deopt->WaitUntil(DeoptTask::kEntered);
gc->WaitUntil(GcWithoutDeoptTask::kEntered);
// Let deopt task start deopt operation scope (it will block in
// [SafepointOperationScope] constructor until all threads have checked-in).
deopt->MarkAndNotify(DeoptTask::kStartDeoptOperation);
OS::Sleep(200); // Give it time to actually start the deopt operation
// Now let the other thread do a full safepoint operation and wait until
// it's done: We want to ensure that we can do normal safepoint operations
// while a deopt operation is being started and is waiting for all mutators
// to reach an appropriate place where they can be deopted.
gc->MarkAndNotify(GcWithoutDeoptTask::kStartSafepointOperation);
gc->WaitUntil(GcWithoutDeoptTask::kEndSafepointOperation);
// We were sucessfully doing a safepoint operation, now let's ensure the
// first thread is still stuck in the starting of deopt operation.
deopt->AssertIsIn(DeoptTask::kStartDeoptOperation);
// Now we'll let the other thread check-in and ensure the deopt operation
// proceeded and finished.
gc->MarkAndNotify(GcWithoutDeoptTask::kJoinDeoptOperation);
gc->WaitUntil(GcWithoutDeoptTask::kDeoptOperationDone);
deopt->WaitUntil(DeoptTask::kFinishedDeoptOperation);
// Make both threads exit the isolate group.
deopt->MarkAndNotify(DeoptTask::kPleaseExit);
gc->MarkAndNotify(GcWithoutDeoptTask::kPleaseExit);
deopt->WaitUntil(DeoptTask::kExited);
gc->WaitUntil(GcWithoutDeoptTask::kExited);
}
thread->ExitSafepoint();
}
class LongDeoptTask : public StateMachineTask {
public:
enum State {
kStartDeoptOperation = StateMachineTask::kNext,
kInsideDeoptOperation,
kFinishDeoptOperation,
kFinishedDeoptOperation,
};
explicit LongDeoptTask(std::shared_ptr<Data> data)
: StateMachineTask(std::move(data)) {}
protected:
virtual void RunInternal() {
data_->WaitUntil(kStartDeoptOperation);
{
DeoptSafepointOperationScope safepoint_operation(thread_);
data_->MarkAndNotify(kInsideDeoptOperation);
data_->WaitUntil(kFinishDeoptOperation);
}
data_->MarkAndNotify(kFinishedDeoptOperation);
}
};
class WaiterTask : public StateMachineTask {
public:
enum State {
kEnterSafepoint = StateMachineTask::kNext,
kInsideSafepoint,
kPleaseExitSafepoint,
kExitedSafepoint,
};
explicit WaiterTask(std::shared_ptr<Data> data)
: StateMachineTask(std::move(data)) {}
protected:
virtual void RunInternal() {
data_->WaitUntil(kEnterSafepoint);
thread_->EnterSafepoint();
data_->MarkAndNotify(kInsideSafepoint);
data_->WaitUntil(kPleaseExitSafepoint);
thread_->ExitSafepoint();
data_->MarkAndNotify(kExitedSafepoint);
}
};
// This test ensures that while a "deopt safepoint operation" is in-progress
// other threads cannot perform a normal "safepoint operation".
ISOLATE_UNIT_TEST_CASE(
SafepointOperation_SafepointOpBlockedWhileDeoptSafepointOp) {
auto isolate_group = thread->isolate_group();
std::shared_ptr<LongDeoptTask::Data> deopt(
new LongDeoptTask::Data(isolate_group));
std::shared_ptr<WaiterTask::Data> gc(new WaiterTask::Data(isolate_group));
thread->EnterSafepoint();
{
// Will join outstanding threads on destruction.
ThreadPool pool;
pool.Run<LongDeoptTask>(deopt);
pool.Run<WaiterTask>(gc);
// Wait until both threads entered the isolate group.
deopt->WaitUntil(LongDeoptTask::kEntered);
gc->WaitUntil(WaiterTask::kEntered);
// Let gc task enter safepoint.
gc->MarkAndNotify(WaiterTask::kEnterSafepoint);
gc->WaitUntil(WaiterTask::kInsideSafepoint);
// Now let the "deopt operation" run and block.
deopt->MarkAndNotify(LongDeoptTask::kStartDeoptOperation);
deopt->WaitUntil(LongDeoptTask::kInsideDeoptOperation);
// Now let the gc task try to exit safepoint and do it's own safepoint
// operation: We expect it to block on exiting safepoint, since the deopt
// operation is still ongoing.
gc->MarkAndNotify(WaiterTask::kPleaseExitSafepoint);
OS::Sleep(200);
gc->AssertIsNotIn(WaiterTask::kExitedSafepoint);
// Now let's finish the deopt operation & ensure the waiter thread made
// progress.
deopt->MarkAndNotify(LongDeoptTask::kFinishDeoptOperation);
gc->WaitUntil(WaiterTask::kExitedSafepoint);
// Make both threads exit the isolate group.
deopt->MarkAndNotify(LongDeoptTask::kPleaseExit);
gc->MarkAndNotify(WaiterTask::kPleaseExit);
deopt->WaitUntil(LongDeoptTask::kExited);
gc->WaitUntil(WaiterTask::kExited);
}
thread->ExitSafepoint();
}
class CheckinTask : public StateMachineTask {
public:
enum State {
kStartLoop = StateMachineTask::kNext,
};
struct Data : public StateMachineTask::Data {
Data(IsolateGroup* isolate_group,
SafepointLevel level,
std::atomic<intptr_t>* gc_only_checkins,
std::atomic<intptr_t>* deopt_checkin)
: StateMachineTask::Data(isolate_group),
level(level),
gc_only_checkins(gc_only_checkins),
deopt_checkin(deopt_checkin) {}
SafepointLevel level;
std::atomic<intptr_t>* gc_only_checkins;
std::atomic<intptr_t>* deopt_checkin;
};
explicit CheckinTask(std::shared_ptr<Data> data) : StateMachineTask(data) {}
protected:
Data* data() { return reinterpret_cast<Data*>(data_.get()); }
virtual void RunInternal() {
data_->WaitUntil(kStartLoop);
uword last_sync = OS::GetCurrentTimeMillis();
while (!data()->IsIn(kPleaseExit)) {
switch (data()->level) {
case SafepointLevel::kGC: {
// This thread should join only GC safepoint operations.
RuntimeCallDeoptScope no_deopt(
Thread::Current(), RuntimeCallDeoptAbility::kCannotLazyDeopt);
SafepointIfRequested(thread_, data()->gc_only_checkins);
break;
}
case SafepointLevel::kGCAndDeopt: {
// This thread should join any safepoint operations.
SafepointIfRequested(thread_, data()->deopt_checkin);
break;
}
case SafepointLevel::kNumLevels:
UNREACHABLE();
}
// If we are asked to join a deopt safepoint operation we will comply with
// that but only every second.
const auto now = OS::GetCurrentTimeMillis();
if ((now - last_sync) > 200) {
thread_->EnterSafepoint();
thread_->ExitSafepoint();
last_sync = now;
}
}
}
void SafepointIfRequested(Thread* thread, std::atomic<intptr_t>* checkins) {
OS::SleepMicros(10);
if (thread->IsSafepointRequested()) {
// Collaborates by checking into the safepoint.
thread->BlockForSafepoint();
(*checkins)++;
}
}
};
// Test that mutators will not check-in to "deopt safepoint operations" at
// at places where the mutator cannot depot (which is indicated by the
// Thread::runtime_call_kind_ value).
ISOLATE_UNIT_TEST_CASE(SafepointOperation_SafepointPointTest) {
auto isolate_group = thread->isolate_group();
const intptr_t kTaskCount = 5;
std::atomic<intptr_t> gc_only_checkins[kTaskCount];
std::atomic<intptr_t> deopt_checkin[kTaskCount];
for (intptr_t i = 0; i < kTaskCount; ++i) {
gc_only_checkins[i] = 0;
deopt_checkin[i] = 0;
}
std::vector<std::shared_ptr<CheckinTask::Data>> threads;
for (intptr_t i = 0; i < kTaskCount; ++i) {
const auto level =
(i % 2) == 0 ? SafepointLevel::kGC : SafepointLevel::kGCAndDeopt;
std::unique_ptr<CheckinTask::Data> data(new CheckinTask::Data(
isolate_group, level, &gc_only_checkins[i], &deopt_checkin[i]));
threads.push_back(std::move(data));
}
{
// Will join outstanding threads on destruction.
ThreadPool pool;
for (intptr_t i = 0; i < kTaskCount; i++) {
pool.Run<CheckinTask>(threads[i]);
}
for (intptr_t i = 0; i < kTaskCount; i++) {
threads[i]->WaitUntil(CheckinTask::kEntered);
}
for (intptr_t i = 0; i < kTaskCount; i++) {
threads[i]->MarkAndNotify(CheckinTask::kStartLoop);
}
{
{ GcSafepointOperationScope safepoint_operation(thread); }
OS::SleepMicros(500);
{ DeoptSafepointOperationScope safepoint_operation(thread); }
OS::SleepMicros(500);
{ GcSafepointOperationScope safepoint_operation(thread); }
OS::SleepMicros(500);
{ DeoptSafepointOperationScope safepoint_operation(thread); }
}
for (intptr_t i = 0; i < kTaskCount; i++) {
threads[i]->MarkAndNotify(CheckinTask::kPleaseExit);
}
for (intptr_t i = 0; i < kTaskCount; i++) {
threads[i]->WaitUntil(CheckinTask::kExited);
}
for (intptr_t i = 0; i < kTaskCount; ++i) {
const auto level =
(i % 2) == 0 ? SafepointLevel::kGC : SafepointLevel::kGCAndDeopt;
switch (level) {
case SafepointLevel::kGC:
EXPECT_EQ(0, deopt_checkin[i]);
EXPECT_EQ(2, gc_only_checkins[i]);
break;
case SafepointLevel::kGCAndDeopt:
EXPECT_EQ(4, deopt_checkin[i]);
EXPECT_EQ(0, gc_only_checkins[i]);
break;
case SafepointLevel::kNumLevels:
UNREACHABLE();
}
}
}
}
class StressTask : public StateMachineTask {
public:
enum State {
kStart = StateMachineTask::kNext,
};
explicit StressTask(std::shared_ptr<Data> data) : StateMachineTask(data) {}
protected:
Data* data() { return reinterpret_cast<Data*>(data_.get()); }
virtual void RunInternal() {
data_->WaitUntil(kStart);
Random random(thread_->isolate_group()->random()->NextUInt64());
while (!data()->IsIn(kPleaseExit)) {
const auto us = random.NextUInt32() % 3;
switch (random.NextUInt32() % 5) {
case 0: {
DeoptSafepointOperationScope safepoint_op(thread_);
OS::SleepMicros(us);
break;
}
case 1: {
GcSafepointOperationScope safepoint_op(thread_);
OS::SleepMicros(us);
break;
}
case 2: {
const bool kBypassSafepoint = false;
Thread::ExitIsolateGroupAsHelper(kBypassSafepoint);
OS::SleepMicros(us);
Thread::EnterIsolateGroupAsHelper(
data_->isolate_group_, Thread::kUnknownTask, kBypassSafepoint);
thread_ = Thread::Current();
break;
}
case 3: {
thread_->EnterSafepoint();
OS::SleepMicros(us);
thread_->ExitSafepoint();
break;
}
case 4: {
if (thread_->IsSafepointRequested()) {
thread_->BlockForSafepoint();
}
break;
}
}
}
}
};
ISOLATE_UNIT_TEST_CASE(SafepointOperation_StressTest) {
auto isolate_group = thread->isolate_group();
const intptr_t kTaskCount = 5;
std::vector<std::shared_ptr<StressTask::Data>> threads;
for (intptr_t i = 0; i < kTaskCount; ++i) {
std::unique_ptr<StressTask::Data> data(new StressTask::Data(isolate_group));
threads.push_back(std::move(data));
}
thread->EnterSafepoint();
{
// Will join outstanding threads on destruction.
ThreadPool pool;
for (intptr_t i = 0; i < kTaskCount; i++) {
pool.Run<StressTask>(threads[i]);
}
for (intptr_t i = 0; i < kTaskCount; i++) {
threads[i]->WaitUntil(StressTask::kEntered);
}
for (intptr_t i = 0; i < kTaskCount; i++) {
threads[i]->MarkAndNotify(StressTask::kStart);
}
OS::Sleep(3 * 1000);
for (intptr_t i = 0; i < kTaskCount; i++) {
threads[i]->MarkAndNotify(StressTask::kPleaseExit);
}
for (intptr_t i = 0; i < kTaskCount; i++) {
threads[i]->WaitUntil(StressTask::kExited);
}
}
thread->ExitSafepoint();
}
ISOLATE_UNIT_TEST_CASE(SafepointOperation_DeoptAndNonDeoptNesting) {
{
DeoptSafepointOperationScope safepoint_scope(thread);
DeoptSafepointOperationScope safepoint_scope2(thread);
GcSafepointOperationScope safepoint_scope3(thread);
GcSafepointOperationScope safepoint_scope4(thread);
}
{
DeoptSafepointOperationScope safepoint_scope(thread);
GcSafepointOperationScope safepoint_scope2(thread);
}
}
ISOLATE_UNIT_TEST_CASE_WITH_EXPECTATION(
SafepointOperation_NonDeoptAndDeoptNesting,
"Crash") {
GcSafepointOperationScope safepoint_scope(thread);
DeoptSafepointOperationScope safepoint_scope2(thread);
}
} // namespace dart

View file

@ -1546,7 +1546,7 @@ void Scavenger::Scavenge() {
// TODO(koda): Consider moving SafepointThreads into allocation failure/retry
// logic to avoid needless collections.
Thread* thread = Thread::Current();
SafepointOperationScope safepoint_scope(thread);
GcSafepointOperationScope safepoint_scope(thread);
int64_t safe_point = OS::GetCurrentMonotonicMicros();
heap_->RecordTime(kSafePoint, safe_point - start);
@ -1784,7 +1784,7 @@ void Scavenger::Evacuate() {
// The latter means even if the scavenge promotes every object in the new
// space, the new allocation means the space is not empty,
// causing the assertion below to fail.
SafepointOperationScope scope(Thread::Current());
GcSafepointOperationScope scope(Thread::Current());
// Forces the next scavenge to promote all the objects in the new space.
early_tenure_ = true;

View file

@ -564,9 +564,9 @@ Thread* IsolateGroup::ScheduleThreadLocked(MonitorLocker* ml,
Thread* thread = nullptr;
OSThread* os_thread = OSThread::Current();
if (os_thread != nullptr) {
// If a safepoint operation is in progress wait for it
// to finish before scheduling this thread in.
while (!bypass_safepoint && safepoint_handler()->SafepointInProgress()) {
// If a safepoint operation is in progress wait for it to finish before
// scheduling this thread.
while (!bypass_safepoint && safepoint_handler()->AnySafepointInProgress()) {
ml->Wait();
}
@ -653,7 +653,8 @@ void IsolateGroup::UnscheduleThreadLocked(MonitorLocker* ml,
thread->heap_ = nullptr;
thread->set_os_thread(nullptr);
thread->set_execution_state(Thread::kThreadInNative);
thread->set_safepoint_state(Thread::SetAtSafepoint(true, 0));
thread->set_safepoint_state(Thread::AtSafepointField::encode(true) |
Thread::AtDeoptSafepointField::encode(true));
thread->clear_pending_functions();
ASSERT(thread->no_safepoint_scope_depth() == 0);
if (is_mutator) {
@ -923,7 +924,7 @@ void IsolateGroup::RegisterStaticField(const Field& field,
if (need_to_grow_backing_store) {
// We have to stop other isolates from accessing their field state, since
// we'll have to grow the backing store.
SafepointOperationScope ops(Thread::Current());
GcSafepointOperationScope scope(Thread::Current());
for (auto isolate : isolates_) {
auto field_table = isolate->field_table();
if (field_table->IsReadyToUse()) {
@ -2106,7 +2107,7 @@ bool IsolateGroup::ReloadKernel(JSONStream* js,
void IsolateGroup::DeleteReloadContext() {
// Another thread may be in the middle of GetClassForHeapWalkAt.
SafepointOperationScope safepoint_scope(Thread::Current());
GcSafepointOperationScope safepoint_scope(Thread::Current());
group_reload_context_.reset();
delete program_reload_context_;
@ -2795,10 +2796,11 @@ void IsolateGroup::RunWithStoppedMutatorsCallable(
// all other threads, including auxiliary threads are at a safepoint), even
// though we only need to ensure that the mutator threads are stopped.
if (use_force_growth_in_otherwise) {
ForceGrowthSafepointOperationScope safepoint_scope(thread);
ForceGrowthSafepointOperationScope safepoint_scope(thread,
SafepointLevel::kGC);
otherwise->Call();
} else {
SafepointOperationScope safepoint_scope(thread);
GcSafepointOperationScope safepoint_scope(thread);
otherwise->Call();
}
}

View file

@ -27,8 +27,7 @@ Monitor::WaitResult MonitorLocker::WaitWithSafepointCheck(Thread* thread,
// Fast update failed which means we could potentially be in the middle
// of a safepoint operation and need to block for it.
monitor_->Exit();
SafepointHandler* handler = thread->isolate_group()->safepoint_handler();
handler->ExitSafepointUsingLock(thread);
thread->ExitSafepointUsingLock();
monitor_->Enter();
}
thread->set_execution_state(Thread::kThreadInVM);

View file

@ -263,7 +263,7 @@ DART_EXPORT void* Dart_ExecuteInternalCommand(const char* command, void* arg) {
Thread::EnterIsolateAsHelper(args->isolate, Thread::TaskKind::kUnknownTask);
Thread* const thread = Thread::Current();
{
SafepointOperationScope scope(thread);
GcSafepointOperationScope scope(thread);
args->isolate->group()->heap()->WriteProtectCode(/*read_only=*/false);
(*args->callback)();
args->isolate->group()->heap()->WriteProtectCode(/*read_only=*/true);

View file

@ -3931,9 +3931,10 @@ void Class::Finalize() const {
}
#if defined(DEBUG)
static bool IsMutatorOrAtSafepoint() {
static bool IsMutatorOrAtDeoptSafepoint() {
Thread* thread = Thread::Current();
return thread->IsMutatorThread() || thread->IsAtSafepoint();
return thread->IsMutatorThread() ||
thread->IsAtSafepoint(SafepointLevel::kGCAndDeopt);
}
#endif
@ -3978,7 +3979,7 @@ void Class::RegisterCHACode(const Code& code) {
Function::Handle(code.function()).ToQualifiedCString(),
ToCString());
}
DEBUG_ASSERT(IsMutatorOrAtSafepoint());
DEBUG_ASSERT(IsMutatorOrAtDeoptSafepoint());
ASSERT(code.is_optimized());
CHACodeArray a(*this);
a.Register(code);
@ -7060,7 +7061,7 @@ void Function::InstallOptimizedCode(const Code& code) const {
void Function::SetInstructions(const Code& value) const {
// Ensure that nobody is executing this function when we install it.
if (untag()->code() != Code::null() && HasCode()) {
SafepointOperationScope safepoint(Thread::Current());
GcSafepointOperationScope safepoint(Thread::Current());
SetInstructionsSafe(value);
} else {
ASSERT(IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter());
@ -7189,7 +7190,7 @@ void Function::set_unoptimized_code(const Code& value) const {
#if defined(DART_PRECOMPILED_RUNTIME)
UNREACHABLE();
#else
DEBUG_ASSERT(IsMutatorOrAtSafepoint());
DEBUG_ASSERT(IsMutatorOrAtDeoptSafepoint());
ASSERT(value.IsNull() || !value.is_optimized());
untag()->set_unoptimized_code(value.ptr());
#endif
@ -10813,7 +10814,7 @@ class FieldDependentArray : public WeakCodeReferences {
void Field::RegisterDependentCode(const Code& code) const {
ASSERT(IsOriginal());
DEBUG_ASSERT(IsMutatorOrAtSafepoint());
DEBUG_ASSERT(IsMutatorOrAtDeoptSafepoint());
ASSERT(code.is_optimized());
FieldDependentArray a(*this);
a.Register(code);
@ -17270,7 +17271,7 @@ bool Code::IsUnknownDartCode(CodePtr code) {
}
void Code::DisableDartCode() const {
SafepointOperationScope safepoint(Thread::Current());
GcSafepointOperationScope safepoint(Thread::Current());
ASSERT(IsFunctionCode());
ASSERT(instructions() == active_instructions());
const Code& new_code = StubCode::FixCallersTarget();
@ -17279,7 +17280,7 @@ void Code::DisableDartCode() const {
}
void Code::DisableStubCode() const {
SafepointOperationScope safepoint(Thread::Current());
GcSafepointOperationScope safepoint(Thread::Current());
ASSERT(IsAllocationStubCode());
ASSERT(instructions() == active_instructions());
const Code& new_code = StubCode::FixAllocationStubTarget();

View file

@ -61,7 +61,7 @@ ISOLATE_UNIT_TEST_CASE(ObjectGraph) {
intptr_t d_size = d.ptr()->untag()->HeapSize();
{
// No more allocation; raw pointers ahead.
SafepointOperationScope safepoint(thread);
GcSafepointOperationScope safepoint(thread);
ObjectPtr b_raw = b.ptr();
// Clear handles to cut unintended retained paths.
b = Array::null();

View file

@ -1116,7 +1116,7 @@ DEFINE_RUNTIME_ENTRY(PatchStaticCall, 0) {
caller_frame->pc(), caller_code));
if (target_code.ptr() !=
CodePatcher::GetStaticCallTargetAt(caller_frame->pc(), caller_code)) {
SafepointOperationScope safepoint(thread);
GcSafepointOperationScope safepoint(thread);
if (target_code.ptr() !=
CodePatcher::GetStaticCallTargetAt(caller_frame->pc(), caller_code)) {
CodePatcher::PatchStaticCallAt(caller_frame->pc(), caller_code,

View file

@ -242,6 +242,20 @@ enum class RuntimeCallDeoptAbility {
kCannotLazyDeopt,
};
// The safepoint level a thread is on or a safepoint operation is requested for
//
// The higher the number the stronger the guarantees:
// * the time-to-safepoint latency increases with level
// * the frequency of hitting possible safe points decreases with level
enum SafepointLevel {
// Safe to GC
kGC,
// Safe to GC as well as Deopt.
kGCAndDeopt,
// Number of levels.
kNumLevels,
};
// A VM thread; may be executing Dart code or performing helper tasks like
// garbage collection or compilation. The Thread structure associated with
// a thread is allocated by EnsureInit before entering an isolate, and destroyed
@ -736,9 +750,14 @@ class Thread : public ThreadState {
* - Bit 0 of the safepoint_state_ field is used to indicate if the thread is
* already at a safepoint,
* - Bit 1 of the safepoint_state_ field is used to indicate if a safepoint
* operation is requested for this thread.
* - Bit 2 of the safepoint_state_ field is used to indicate that the thread
* is blocked for the safepoint operation to complete.
* is requested for this thread.
* - Bit 2 of the safepoint_state_ field is used to indicate if the thread is
* already at a deopt safepoint,
* - Bit 3 of the safepoint_state_ field is used to indicate if a deopt
* safepoint is requested for this thread.
* - Bit 4 of the safepoint_state_ field is used to indicate that the thread
* is blocked at a (deopt)safepoint and has to be woken up once the
* (deopt)safepoint operation is complete.
*
* The safepoint execution state (described above) for a thread is stored in
* in the execution_state_ field.
@ -748,35 +767,68 @@ class Thread : public ThreadState {
* kThreadInNative - The thread is running native code.
* kThreadInBlockedState - The thread is blocked waiting for a resource.
*/
static bool IsAtSafepoint(uword state) {
return AtSafepointField::decode(state);
static bool IsAtSafepoint(SafepointLevel level, uword state) {
const uword mask = AtSafepointBits(level);
return (state & mask) == mask;
}
bool IsAtSafepoint() const {
return AtSafepointField::decode(safepoint_state_);
return IsAtSafepoint(current_safepoint_level());
}
static uword SetAtSafepoint(bool value, uword state) {
return AtSafepointField::update(value, state);
bool IsAtSafepoint(SafepointLevel level) const {
return IsAtSafepoint(level, safepoint_state_.load());
}
void SetAtSafepoint(bool value) {
ASSERT(thread_lock()->IsOwnedByCurrentThread());
safepoint_state_ = AtSafepointField::update(value, safepoint_state_);
if (value) {
safepoint_state_ |= AtSafepointBits(current_safepoint_level());
} else {
safepoint_state_ &= ~AtSafepointBits(current_safepoint_level());
}
}
bool IsSafepointRequestedLocked() const {
ASSERT(thread_lock()->IsOwnedByCurrentThread());
return IsSafepointRequested();
}
bool IsSafepointRequested() const {
return SafepointRequestedField::decode(safepoint_state_);
const uword state = safepoint_state_.load();
for (intptr_t level = current_safepoint_level(); level >= 0; --level) {
if (IsSafepointLevelRequested(state, static_cast<SafepointLevel>(level)))
return true;
}
return false;
}
static uword SetSafepointRequested(bool value, uword state) {
return SafepointRequestedField::update(value, state);
}
uword SetSafepointRequested(bool value) {
bool IsSafepointLevelRequestedLocked(SafepointLevel level) const {
ASSERT(thread_lock()->IsOwnedByCurrentThread());
if (level > current_safepoint_level()) return false;
const uword state = safepoint_state_.load();
return IsSafepointLevelRequested(state, level);
}
static bool IsSafepointLevelRequested(uword state, SafepointLevel level) {
switch (level) {
case SafepointLevel::kGC:
return (state & SafepointRequestedField::mask_in_place()) != 0;
case SafepointLevel::kGCAndDeopt:
return (state & DeoptSafepointRequestedField::mask_in_place()) != 0;
case SafepointLevel::kNumLevels:
UNREACHABLE();
}
}
void BlockForSafepoint();
uword SetSafepointRequested(SafepointLevel level, bool value) {
ASSERT(thread_lock()->IsOwnedByCurrentThread());
const uword mask = level == SafepointLevel::kGC
? SafepointRequestedField::mask_in_place()
: DeoptSafepointRequestedField::mask_in_place();
if (value) {
// acquire pulls from the release in TryEnterSafepoint.
return safepoint_state_.fetch_or(SafepointRequestedField::encode(true),
std::memory_order_acquire);
return safepoint_state_.fetch_or(mask, std::memory_order_acquire);
} else {
// release pushes to the acquire in TryExitSafepoint.
return safepoint_state_.fetch_and(~SafepointRequestedField::encode(true),
std::memory_order_release);
return safepoint_state_.fetch_and(~mask, std::memory_order_release);
}
}
static bool IsBlockedForSafepoint(uword state) {
@ -824,12 +876,21 @@ class Thread : public ThreadState {
(execution_state() == kThreadInGenerated);
}
static uword safepoint_state_unacquired() { return SetAtSafepoint(false, 0); }
static uword safepoint_state_acquired() { return SetAtSafepoint(true, 0); }
static uword full_safepoint_state_unacquired() {
return (0 << AtSafepointField::shift()) |
(0 << AtDeoptSafepointField::shift());
}
static uword full_safepoint_state_acquired() {
return (1 << AtSafepointField::shift()) |
(1 << AtDeoptSafepointField::shift());
}
bool TryEnterSafepoint() {
uword old_state = 0;
uword new_state = SetAtSafepoint(true, 0);
uword new_state = AtSafepointField::encode(true);
if (current_safepoint_level() == SafepointLevel::kGCAndDeopt) {
new_state |= AtDeoptSafepointField::encode(true);
}
return safepoint_state_.compare_exchange_strong(old_state, new_state,
std::memory_order_release);
}
@ -846,7 +907,10 @@ class Thread : public ThreadState {
}
bool TryExitSafepoint() {
uword old_state = SetAtSafepoint(true, 0);
uword old_state = AtSafepointField::encode(true);
if (current_safepoint_level() == SafepointLevel::kGCAndDeopt) {
old_state |= AtDeoptSafepointField::encode(true);
}
uword new_state = 0;
return safepoint_state_.compare_exchange_strong(old_state, new_state,
std::memory_order_acquire);
@ -863,6 +927,8 @@ class Thread : public ThreadState {
}
void CheckForSafepoint() {
// If we are in a runtime call that doesn't support lazy deopt, we will only
// respond to gc safepointing requests.
ASSERT(no_safepoint_scope_depth() == 0);
if (IsSafepointRequested()) {
BlockForSafepoint();
@ -927,6 +993,13 @@ class Thread : public ThreadState {
PendingDeopts& pending_deopts() { return pending_deopts_; }
SafepointLevel current_safepoint_level() const {
return runtime_call_deopt_ability_ ==
RuntimeCallDeoptAbility::kCannotLazyDeopt
? SafepointLevel::kGC
: SafepointLevel::kGCAndDeopt;
}
private:
template <class T>
T* AllocateReusableHandle();
@ -1065,11 +1138,32 @@ class Thread : public ThreadState {
#undef REUSABLE_HANDLE_SCOPE_VARIABLE
#endif // defined(DEBUG)
// Generated code assumes that AtSafepointField is the LSB.
class AtSafepointField : public BitField<uword, bool, 0, 1> {};
class SafepointRequestedField : public BitField<uword, bool, 1, 1> {};
class BlockedForSafepointField : public BitField<uword, bool, 2, 1> {};
class BypassSafepointsField : public BitField<uword, bool, 3, 1> {};
class SafepointRequestedField
: public BitField<uword, bool, AtSafepointField::kNextBit, 1> {};
class AtDeoptSafepointField
: public BitField<uword, bool, SafepointRequestedField::kNextBit, 1> {};
class DeoptSafepointRequestedField
: public BitField<uword, bool, AtDeoptSafepointField::kNextBit, 1> {};
class BlockedForSafepointField
: public BitField<uword,
bool,
DeoptSafepointRequestedField::kNextBit,
1> {};
class BypassSafepointsField
: public BitField<uword, bool, BlockedForSafepointField::kNextBit, 1> {};
static uword AtSafepointBits(SafepointLevel level) {
switch (level) {
case SafepointLevel::kGC:
return AtSafepointField::mask_in_place();
case SafepointLevel::kGCAndDeopt:
return AtSafepointField::mask_in_place() |
AtDeoptSafepointField::mask_in_place();
case SafepointLevel::kNumLevels:
UNREACHABLE();
}
}
#if defined(USING_SAFE_STACK)
uword saved_safestack_limit_;
@ -1096,7 +1190,6 @@ class Thread : public ThreadState {
void set_safepoint_state(uint32_t value) { safepoint_state_ = value; }
void EnterSafepointUsingLock();
void ExitSafepointUsingLock();
void BlockForSafepoint();
void FinishEntering(TaskKind kind);
void PrepareLeaving();
@ -1128,6 +1221,11 @@ class Thread : public ThreadState {
friend class compiler::target::Thread;
friend class FieldTable;
friend class RuntimeCallDeoptScope;
friend class
TransitionGeneratedToVM; // IsSafepointRequested/BlockForSafepoint
friend class
TransitionVMToGenerated; // IsSafepointRequested/BlockForSafepoint
friend class MonitorLocker; // ExitSafepointUsingLock
friend Isolate* CreateWithinExistingIsolateGroup(IsolateGroup*,
const char*,
char**);

View file

@ -675,13 +675,13 @@ ISOLATE_UNIT_TEST_CASE(SafepointTestVM) {
ISOLATE_UNIT_TEST_CASE(RecursiveSafepointTest1) {
intptr_t count = 0;
{
SafepointOperationScope safepoint_scope(thread);
GcSafepointOperationScope safepoint_scope(thread);
count += 1;
{
SafepointOperationScope safepoint_scope(thread);
GcSafepointOperationScope safepoint_scope(thread);
count += 1;
{
SafepointOperationScope safepoint_scope(thread);
GcSafepointOperationScope safepoint_scope(thread);
count += 1;
}
}
@ -785,7 +785,7 @@ ISOLATE_UNIT_TEST_CASE(SafepointTestVM2) {
}
bool all_helpers = false;
do {
SafepointOperationScope safepoint_scope(thread);
GcSafepointOperationScope safepoint_scope(thread);
{
MonitorLocker ml(&monitor);
if (expected_count == SafepointTestTask::kTaskCount) {
@ -816,9 +816,9 @@ ISOLATE_UNIT_TEST_CASE(RecursiveSafepointTest2) {
}
bool all_helpers = false;
do {
SafepointOperationScope safepoint_scope(thread);
GcSafepointOperationScope safepoint_scope(thread);
{
SafepointOperationScope safepoint_scope(thread);
GcSafepointOperationScope safepoint_scope(thread);
MonitorLocker ml(&monitor);
if (expected_count == SafepointTestTask::kTaskCount) {
all_helpers = true;
@ -830,9 +830,9 @@ ISOLATE_UNIT_TEST_CASE(RecursiveSafepointTest2) {
isolate->set_current_tag(tag);
bool all_exited = false;
do {
SafepointOperationScope safepoint_scope(thread);
GcSafepointOperationScope safepoint_scope(thread);
{
SafepointOperationScope safepoint_scope(thread);
GcSafepointOperationScope safepoint_scope(thread);
MonitorLocker ml(&monitor);
if (exited == SafepointTestTask::kTaskCount) {
all_exited = true;