mirror of
https://github.com/dart-lang/sdk
synced 2024-11-02 08:44:27 +00:00
Re-land "[vm] Aggressive write-barrier elimination."
Three bugs were fixed:
1. BitVector::Equals was not fully fixed by the original CL.
2. We need to add old objects to the deferred marking queue during
RememberLiveTemporaries().
3. The thread being scanned in RestoreWriteBarrierInvariant may not
be scheduled, so we cannot use its store buffer block.
In addition, this changed uncovered another bug fixed in:
https://dart-review.googlesource.com/c/sdk/+/138960.
Original CL is in patchset 3.
This reverts commit 30a12a349e
.
Change-Id: I36169b09563998ed5b3c3eac70ee0ebe78853e62
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/138920
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Commit-Queue: Samir Jindel <sjindel@google.com>
This commit is contained in:
parent
ba68d04d3f
commit
4c6a2ab47c
28 changed files with 924 additions and 91 deletions
|
@ -3,7 +3,7 @@
|
|||
// BSD-style license that can be found in the LICENSE file.
|
||||
|
||||
#include "vm/bit_vector.h"
|
||||
|
||||
#include "vm/log.h"
|
||||
#include "vm/os.h"
|
||||
|
||||
namespace dart {
|
||||
|
@ -39,8 +39,10 @@ bool BitVector::Equals(const BitVector& other) const {
|
|||
if (data_[i] != other.data_[i]) return false;
|
||||
}
|
||||
if (i < data_length_) {
|
||||
if (length_ % kBitsPerWord == 0) return data_[i] == other.data_[i];
|
||||
|
||||
// Don't compare bits beyond length_.
|
||||
const intptr_t shift_size = (kBitsPerWord - length_) & (kBitsPerWord - 1);
|
||||
const intptr_t shift_size = kBitsPerWord - (length_ % kBitsPerWord);
|
||||
const uword mask = static_cast<uword>(-1) >> shift_size;
|
||||
if ((data_[i] & mask) != (other.data_[i] & mask)) return false;
|
||||
}
|
||||
|
@ -105,11 +107,11 @@ bool BitVector::IsEmpty() const {
|
|||
}
|
||||
|
||||
void BitVector::Print() const {
|
||||
OS::PrintErr("[");
|
||||
THR_Print("[");
|
||||
for (intptr_t i = 0; i < length_; i++) {
|
||||
OS::PrintErr(Contains(i) ? "1" : "0");
|
||||
THR_Print(Contains(i) ? "1" : "0");
|
||||
}
|
||||
OS::PrintErr("]");
|
||||
THR_Print("]");
|
||||
}
|
||||
|
||||
} // namespace dart
|
||||
|
|
|
@ -70,6 +70,8 @@ class BitVector : public ZoneAllocated {
|
|||
data_[i / kBitsPerWord] &= ~(static_cast<uword>(1) << (i % kBitsPerWord));
|
||||
}
|
||||
|
||||
void Set(intptr_t i, bool value) { value ? Add(i) : Remove(i); }
|
||||
|
||||
bool Equals(const BitVector& other) const;
|
||||
|
||||
// Add all elements that are in the bitvector from.
|
||||
|
@ -92,6 +94,14 @@ class BitVector : public ZoneAllocated {
|
|||
return (block & (static_cast<uword>(1) << (i % kBitsPerWord))) != 0;
|
||||
}
|
||||
|
||||
bool SubsetOf(const BitVector& other) {
|
||||
ASSERT(length_ == other.length_);
|
||||
for (intptr_t i = 0; i < data_length_; ++i) {
|
||||
if ((data_[i] & other.data_[i]) != data_[i]) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Clear() {
|
||||
for (intptr_t i = 0; i < data_length_; i++) {
|
||||
data_[i] = 0;
|
||||
|
|
|
@ -615,7 +615,9 @@ void FlowGraphCompiler::VisitBlocks() {
|
|||
EmitInstructionPrologue(instr);
|
||||
ASSERT(pending_deoptimization_env_ == NULL);
|
||||
pending_deoptimization_env_ = instr->env();
|
||||
DEBUG_ONLY(current_instruction_ = instr);
|
||||
instr->EmitNativeCode(this);
|
||||
DEBUG_ONLY(current_instruction_ = nullptr);
|
||||
pending_deoptimization_env_ = NULL;
|
||||
if (IsPeephole(instr)) {
|
||||
ASSERT(top_of_stack_ == nullptr);
|
||||
|
@ -708,7 +710,9 @@ void FlowGraphCompiler::GenerateDeferredCode() {
|
|||
set_current_instruction(slow_path->instruction());
|
||||
SpecialStatsBegin(stats_tag);
|
||||
BeginCodeSourceRange();
|
||||
DEBUG_ONLY(current_instruction_ = slow_path->instruction());
|
||||
slow_path->GenerateCode(this);
|
||||
DEBUG_ONLY(current_instruction_ = nullptr);
|
||||
EndCodeSourceRange(slow_path->instruction()->token_pos());
|
||||
SpecialStatsEnd(stats_tag);
|
||||
set_current_instruction(nullptr);
|
||||
|
|
|
@ -1121,6 +1121,15 @@ class FlowGraphCompiler : public ValueObject {
|
|||
// is amenable to a peephole optimization.
|
||||
bool IsPeephole(Instruction* instr) const;
|
||||
|
||||
#if defined(DEBUG)
|
||||
bool CanCallDart() const {
|
||||
return current_instruction_ == nullptr ||
|
||||
current_instruction_->CanCallDart();
|
||||
}
|
||||
#else
|
||||
bool CanCallDart() const { return true; }
|
||||
#endif
|
||||
|
||||
// This struct contains either function or code, the other one being NULL.
|
||||
class StaticCallsStruct : public ZoneAllocated {
|
||||
public:
|
||||
|
|
|
@ -995,6 +995,7 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
|
|||
RawPcDescriptors::Kind kind,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
__ BranchLinkPatchable(stub, entry_kind);
|
||||
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
|
||||
}
|
||||
|
@ -1005,6 +1006,7 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
|
|||
LocationSummary* locs,
|
||||
const Function& target,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
|
||||
__ GenerateUnRelocatedPcRelativeCall();
|
||||
AddPcRelativeCallTarget(target, entry_kind);
|
||||
|
@ -1061,6 +1063,7 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
|
||||
// Each ICData propagated from unoptimized to optimized code contains the
|
||||
// function that corresponds to the Dart function of that IC call. Due
|
||||
|
@ -1084,6 +1087,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(entry_kind == Code::EntryKind::kNormal ||
|
||||
entry_kind == Code::EntryKind::kUnchecked);
|
||||
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
|
||||
|
@ -1109,6 +1113,7 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
|
|||
LocationSummary* locs,
|
||||
intptr_t try_index,
|
||||
intptr_t slow_path_argument_count) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
|
||||
const ArgumentsDescriptor args_desc(arguments_descriptor);
|
||||
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
|
||||
|
@ -1156,6 +1161,7 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
|
|||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind,
|
||||
bool receiver_can_be_smi) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(entry_kind == Code::EntryKind::kNormal ||
|
||||
entry_kind == Code::EntryKind::kUnchecked);
|
||||
ASSERT(ic_data.NumArgsTested() == 1);
|
||||
|
@ -1200,6 +1206,7 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args,
|
|||
LocationSummary* locs,
|
||||
const ICData& ic_data,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
const Code& stub =
|
||||
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
|
||||
__ LoadObject(R9, ic_data);
|
||||
|
@ -1216,6 +1223,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(!function.IsClosureFunction());
|
||||
if (function.HasOptionalParameters() || function.IsGeneric()) {
|
||||
__ LoadObject(R4, arguments_descriptor);
|
||||
|
@ -1235,6 +1243,7 @@ void FlowGraphCompiler::EmitDispatchTableCall(
|
|||
Register cid_reg,
|
||||
int32_t selector_offset,
|
||||
const Array& arguments_descriptor) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(cid_reg != ARGS_DESC_REG);
|
||||
if (!arguments_descriptor.IsNull()) {
|
||||
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
|
||||
|
|
|
@ -966,6 +966,7 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
|
|||
RawPcDescriptors::Kind kind,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
__ BranchLinkPatchable(stub, entry_kind);
|
||||
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
|
||||
}
|
||||
|
@ -976,6 +977,7 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
|
|||
LocationSummary* locs,
|
||||
const Function& target,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
|
||||
__ GenerateUnRelocatedPcRelativeCall();
|
||||
AddPcRelativeCallTarget(target, entry_kind);
|
||||
|
@ -1023,6 +1025,7 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
|
||||
// Each ICData propagated from unoptimized to optimized code contains the
|
||||
// function that corresponds to the Dart function of that IC call. Due
|
||||
|
@ -1045,6 +1048,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(entry_kind == Code::EntryKind::kNormal ||
|
||||
entry_kind == Code::EntryKind::kUnchecked);
|
||||
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
|
||||
|
@ -1076,6 +1080,7 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
|
|||
LocationSummary* locs,
|
||||
intptr_t try_index,
|
||||
intptr_t slow_path_argument_count) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
|
||||
const ArgumentsDescriptor args_desc(arguments_descriptor);
|
||||
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
|
||||
|
@ -1120,6 +1125,7 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
|
|||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind,
|
||||
bool receiver_can_be_smi) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(ic_data.NumArgsTested() == 1);
|
||||
const Code& initial_stub = StubCode::UnlinkedCall();
|
||||
const char* switchable_call_mode = "smiable";
|
||||
|
@ -1170,6 +1176,7 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args,
|
|||
LocationSummary* locs,
|
||||
const ICData& ic_data,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
const Code& stub =
|
||||
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
|
||||
__ LoadObject(R5, ic_data);
|
||||
|
@ -1186,6 +1193,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(!function.IsClosureFunction());
|
||||
if (function.HasOptionalParameters() || function.IsGeneric()) {
|
||||
__ LoadObject(R4, arguments_descriptor);
|
||||
|
@ -1205,6 +1213,7 @@ void FlowGraphCompiler::EmitDispatchTableCall(
|
|||
Register cid_reg,
|
||||
int32_t selector_offset,
|
||||
const Array& arguments_descriptor) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(cid_reg != ARGS_DESC_REG);
|
||||
if (!arguments_descriptor.IsNull()) {
|
||||
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
|
||||
|
|
|
@ -861,6 +861,7 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
|
|||
RawPcDescriptors::Kind kind,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
__ Call(stub, /*moveable_target=*/false, entry_kind);
|
||||
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
|
||||
}
|
||||
|
@ -871,6 +872,7 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
|
|||
LocationSummary* locs,
|
||||
const Function& target,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
const auto& stub = StubCode::CallStaticFunction();
|
||||
__ Call(stub, /*movable_target=*/true, entry_kind);
|
||||
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
|
||||
|
@ -892,6 +894,7 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args,
|
|||
LocationSummary* locs,
|
||||
const ICData& ic_data,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
const Code& stub =
|
||||
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
|
||||
__ LoadObject(ECX, ic_data);
|
||||
|
@ -919,6 +922,7 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
|
||||
// Each ICData propagated from unoptimized to optimized code contains the
|
||||
// function that corresponds to the Dart function of that IC call. Due
|
||||
|
@ -942,6 +946,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(entry_kind == Code::EntryKind::kNormal ||
|
||||
entry_kind == Code::EntryKind::kUnchecked);
|
||||
ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
|
||||
|
@ -967,6 +972,7 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
|
|||
LocationSummary* locs,
|
||||
intptr_t try_index,
|
||||
intptr_t slow_path_argument_count) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
|
||||
const ArgumentsDescriptor args_desc(arguments_descriptor);
|
||||
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
|
||||
|
@ -1014,6 +1020,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
if (function.HasOptionalParameters() || function.IsGeneric()) {
|
||||
__ LoadObject(EDX, arguments_descriptor);
|
||||
} else {
|
||||
|
|
|
@ -977,6 +977,7 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
|
|||
RawPcDescriptors::Kind kind,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
__ CallPatchable(stub, entry_kind);
|
||||
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
|
||||
}
|
||||
|
@ -987,6 +988,7 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
|
|||
LocationSummary* locs,
|
||||
const Function& target,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(is_optimizing());
|
||||
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
|
||||
__ GenerateUnRelocatedPcRelativeCall();
|
||||
|
@ -1019,6 +1021,7 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args,
|
|||
LocationSummary* locs,
|
||||
const ICData& ic_data,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
const Code& stub =
|
||||
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
|
||||
__ LoadObject(RBX, ic_data);
|
||||
|
@ -1047,6 +1050,7 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
|
||||
// Each ICData propagated from unoptimized to optimized code contains the
|
||||
// function that corresponds to the Dart function of that IC call. Due
|
||||
|
@ -1070,6 +1074,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(entry_kind == Code::EntryKind::kNormal ||
|
||||
entry_kind == Code::EntryKind::kUnchecked);
|
||||
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
|
||||
|
@ -1095,6 +1100,7 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
|
|||
LocationSummary* locs,
|
||||
intptr_t try_index,
|
||||
intptr_t slow_path_argument_count) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
|
||||
const ArgumentsDescriptor args_desc(arguments_descriptor);
|
||||
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
|
||||
|
@ -1136,6 +1142,7 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
|
|||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind,
|
||||
bool receiver_can_be_smi) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(entry_kind == Code::EntryKind::kNormal ||
|
||||
entry_kind == Code::EntryKind::kUnchecked);
|
||||
ASSERT(ic_data.NumArgsTested() == 1);
|
||||
|
@ -1178,6 +1185,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(!function.IsClosureFunction());
|
||||
if (function.HasOptionalParameters() || function.IsGeneric()) {
|
||||
__ LoadObject(R10, arguments_descriptor);
|
||||
|
@ -1197,6 +1205,7 @@ void FlowGraphCompiler::EmitDispatchTableCall(
|
|||
Register cid_reg,
|
||||
int32_t selector_offset,
|
||||
const Array& arguments_descriptor) {
|
||||
ASSERT(CanCallDart());
|
||||
const Register table_reg = RAX;
|
||||
ASSERT(cid_reg != table_reg);
|
||||
ASSERT(cid_reg != ARGS_DESC_REG);
|
||||
|
|
|
@ -1024,6 +1024,15 @@ class Instruction : public ZoneAllocated {
|
|||
// See StoreInstanceFieldInstr::HasUnknownSideEffects() for rationale.
|
||||
virtual bool HasUnknownSideEffects() const = 0;
|
||||
|
||||
// Whether this instruction can call Dart code without going through
|
||||
// the runtime.
|
||||
//
|
||||
// Must be true for any instruction which can call Dart code without
|
||||
// first creating an exit frame to transition into the runtime.
|
||||
//
|
||||
// See also WriteBarrierElimination and Thread::RememberLiveTemporaries().
|
||||
virtual bool CanCallDart() const { return false; }
|
||||
|
||||
virtual bool CanTriggerGC() const;
|
||||
|
||||
// Get the block entry for this instruction.
|
||||
|
@ -3181,6 +3190,8 @@ class BranchInstr : public Instruction {
|
|||
return comparison()->HasUnknownSideEffects();
|
||||
}
|
||||
|
||||
virtual bool CanCallDart() const { return comparison()->CanCallDart(); }
|
||||
|
||||
ComparisonInstr* comparison() const { return comparison_; }
|
||||
void SetComparison(ComparisonInstr* comp);
|
||||
|
||||
|
@ -3661,6 +3672,7 @@ class TemplateDartCall : public Definition {
|
|||
}
|
||||
|
||||
virtual bool MayThrow() const { return true; }
|
||||
virtual bool CanCallDart() const { return true; }
|
||||
|
||||
virtual intptr_t InputCount() const { return inputs_->length(); }
|
||||
virtual Value* InputAt(intptr_t i) const { return inputs_->At(i); }
|
||||
|
@ -4411,6 +4423,7 @@ class IfThenElseInstr : public Definition {
|
|||
virtual bool HasUnknownSideEffects() const {
|
||||
return comparison()->HasUnknownSideEffects();
|
||||
}
|
||||
virtual bool CanCallDart() const { return comparison()->CanCallDart(); }
|
||||
|
||||
virtual bool AttributesEqual(Instruction* other) const {
|
||||
IfThenElseInstr* other_if_then_else = other->AsIfThenElse();
|
||||
|
@ -4539,6 +4552,7 @@ class StaticCallInstr : public TemplateDartCall<0> {
|
|||
}
|
||||
|
||||
virtual bool HasUnknownSideEffects() const { return true; }
|
||||
virtual bool CanCallDart() const { return true; }
|
||||
|
||||
// Initialize result type of this call instruction if target is a recognized
|
||||
// method or has pragma annotation.
|
||||
|
@ -4816,6 +4830,9 @@ class NativeCallInstr : public TemplateDartCall<0> {
|
|||
|
||||
virtual bool HasUnknownSideEffects() const { return true; }
|
||||
|
||||
// Always creates an exit frame before more Dart code can be called.
|
||||
virtual bool CanCallDart() const { return false; }
|
||||
|
||||
void SetupNative();
|
||||
|
||||
PRINT_OPERANDS_TO_SUPPORT
|
||||
|
@ -4875,6 +4892,9 @@ class FfiCallInstr : public Definition {
|
|||
|
||||
virtual bool HasUnknownSideEffects() const { return true; }
|
||||
|
||||
// Always creates an exit frame before more Dart code can be called.
|
||||
virtual bool CanCallDart() const { return false; }
|
||||
|
||||
virtual Representation RequiredInputRepresentation(intptr_t idx) const;
|
||||
virtual Representation representation() const;
|
||||
|
||||
|
@ -5435,6 +5455,7 @@ class StringInterpolateInstr : public TemplateDefinition<1, Throws> {
|
|||
virtual CompileType ComputeType() const;
|
||||
// Issues a static call to Dart code which calls toString on objects.
|
||||
virtual bool HasUnknownSideEffects() const { return true; }
|
||||
virtual bool CanCallDart() const { return true; }
|
||||
virtual bool ComputeCanDeoptimize() const { return !FLAG_precompiled_mode; }
|
||||
|
||||
const Function& CallFunction() const;
|
||||
|
@ -5489,6 +5510,10 @@ class StoreIndexedInstr : public TemplateInstruction<3, NoThrow> {
|
|||
(emit_store_barrier_ == kEmitStoreBarrier);
|
||||
}
|
||||
|
||||
void set_emit_store_barrier(StoreBarrierType value) {
|
||||
emit_store_barrier_ = value;
|
||||
}
|
||||
|
||||
virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
|
||||
return speculative_mode_;
|
||||
}
|
||||
|
@ -5509,6 +5534,8 @@ class StoreIndexedInstr : public TemplateInstruction<3, NoThrow> {
|
|||
|
||||
virtual bool HasUnknownSideEffects() const { return false; }
|
||||
|
||||
void PrintOperandsTo(BufferFormatter* f) const;
|
||||
|
||||
ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
|
||||
|
||||
private:
|
||||
|
@ -5516,7 +5543,7 @@ class StoreIndexedInstr : public TemplateInstruction<3, NoThrow> {
|
|||
return compiler::Assembler::kValueCanBeSmi;
|
||||
}
|
||||
|
||||
const StoreBarrierType emit_store_barrier_;
|
||||
StoreBarrierType emit_store_barrier_;
|
||||
const bool index_unboxed_;
|
||||
const intptr_t index_scale_;
|
||||
const intptr_t class_id_;
|
||||
|
@ -7102,6 +7129,7 @@ class CheckedSmiOpInstr : public TemplateDefinition<2, Throws> {
|
|||
virtual bool RecomputeType();
|
||||
|
||||
virtual bool HasUnknownSideEffects() const { return true; }
|
||||
virtual bool CanCallDart() const { return true; }
|
||||
|
||||
virtual Definition* Canonicalize(FlowGraph* flow_graph);
|
||||
|
||||
|
@ -7150,6 +7178,7 @@ class CheckedSmiComparisonInstr : public TemplateComparison<2, Throws> {
|
|||
bool is_negated() const { return is_negated_; }
|
||||
|
||||
virtual bool HasUnknownSideEffects() const { return true; }
|
||||
virtual bool CanCallDart() const { return true; }
|
||||
|
||||
PRINT_OPERANDS_TO_SUPPORT
|
||||
|
||||
|
@ -7785,6 +7814,8 @@ class DoubleToIntegerInstr : public TemplateDefinition<1, Throws> {
|
|||
|
||||
virtual bool HasUnknownSideEffects() const { return false; }
|
||||
|
||||
virtual bool CanCallDart() const { return true; }
|
||||
|
||||
private:
|
||||
InstanceCallInstr* instance_call_;
|
||||
|
||||
|
|
|
@ -1125,6 +1125,13 @@ void StoreIndexedUnsafeInstr::PrintOperandsTo(BufferFormatter* f) const {
|
|||
value()->PrintTo(f);
|
||||
}
|
||||
|
||||
void StoreIndexedInstr::PrintOperandsTo(BufferFormatter* f) const {
|
||||
Instruction::PrintOperandsTo(f);
|
||||
if (!ShouldEmitStoreBarrier()) {
|
||||
f->Print(", NoStoreBarrier");
|
||||
}
|
||||
}
|
||||
|
||||
void TailCallInstr::PrintOperandsTo(BufferFormatter* f) const {
|
||||
const char* name = "<unknown code>";
|
||||
if (code_.IsStubCode()) {
|
||||
|
|
|
@ -44,6 +44,14 @@ RawFunction* GetFunction(const Library& lib, const char* name) {
|
|||
return func.raw();
|
||||
}
|
||||
|
||||
RawClass* GetClass(const Library& lib, const char* name) {
|
||||
Thread* thread = Thread::Current();
|
||||
const auto& cls = Class::Handle(
|
||||
lib.LookupClassAllowPrivate(String::Handle(Symbols::New(thread, name))));
|
||||
EXPECT(!cls.IsNull());
|
||||
return cls.raw();
|
||||
}
|
||||
|
||||
void Invoke(const Library& lib, const char* name) {
|
||||
// These tests rely on running unoptimized code to collect type feedback. The
|
||||
// interpreter does not collect type feedback for interface calls, so set
|
||||
|
|
|
@ -58,6 +58,7 @@ RawLibrary* LoadTestScript(const char* script,
|
|||
const char* lib_uri = RESOLVED_USER_TEST_URI);
|
||||
|
||||
RawFunction* GetFunction(const Library& lib, const char* name);
|
||||
RawClass* GetClass(const Library& lib, const char* name);
|
||||
|
||||
void Invoke(const Library& lib, const char* name);
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ class SlotCache : public ZoneAllocated {
|
|||
|
||||
const char* Slot::KindToCString(Kind k) {
|
||||
switch (k) {
|
||||
#define NATIVE_CASE(C, F, id, M) \
|
||||
#define NATIVE_CASE(C, U, F, id, M) \
|
||||
case NATIVE_SLOT_NAME(C, F, id, M): \
|
||||
return NATIVE_TO_STR(C, F, id, M);
|
||||
NATIVE_SLOTS_LIST(NATIVE_CASE)
|
||||
|
@ -70,7 +70,7 @@ const char* Slot::KindToCString(Kind k) {
|
|||
|
||||
bool Slot::ParseKind(const char* str, Kind* out) {
|
||||
ASSERT(str != nullptr && out != nullptr);
|
||||
#define NATIVE_CASE(C, F, id, M) \
|
||||
#define NATIVE_CASE(C, U, F, id, M) \
|
||||
if (strcmp(str, NATIVE_TO_STR(C, F, id, M)) == 0) { \
|
||||
*out = NATIVE_SLOT_NAME(C, F, id, M); \
|
||||
return true; \
|
||||
|
@ -101,7 +101,8 @@ const Slot& Slot::GetNativeSlot(Kind kind) {
|
|||
static const Slot fields[] = {
|
||||
#define FIELD_FINAL (IsImmutableBit::encode(true))
|
||||
#define FIELD_VAR (0)
|
||||
#define DEFINE_NATIVE_FIELD(ClassName, FieldName, cid, mutability) \
|
||||
#define DEFINE_NATIVE_FIELD(ClassName, UnderlyingType, FieldName, cid, \
|
||||
mutability) \
|
||||
Slot(Kind::k##ClassName##_##FieldName, FIELD_##mutability, k##cid##Cid, \
|
||||
compiler::target::ClassName::FieldName##_offset(), \
|
||||
#ClassName "." #FieldName, nullptr),
|
||||
|
|
|
@ -36,10 +36,11 @@ class ParsedFunction;
|
|||
// List of slots that correspond to fields of native objects in the following
|
||||
// format:
|
||||
//
|
||||
// V(class_name, field_name, exact_type, FINAL|VAR)
|
||||
// V(class_name, underlying_type, field_name, exact_type, FINAL|VAR)
|
||||
//
|
||||
// - class_name and field_name specify the name of the host class and the name
|
||||
// of the field respectively;
|
||||
// - underlying_type: the Raw class which holds the field;
|
||||
// - exact_type specifies exact type of the field (any load from this field
|
||||
// would only yield instances of this type);
|
||||
// - the last component specifies whether field behaves like a final field
|
||||
|
@ -48,31 +49,31 @@ class ParsedFunction;
|
|||
//
|
||||
// Note: native slots are expected to be non-nullable.
|
||||
#define NATIVE_SLOTS_LIST(V) \
|
||||
V(Array, length, Smi, FINAL) \
|
||||
V(Context, parent, Context, FINAL) \
|
||||
V(Closure, instantiator_type_arguments, TypeArguments, FINAL) \
|
||||
V(Closure, delayed_type_arguments, TypeArguments, FINAL) \
|
||||
V(Closure, function_type_arguments, TypeArguments, FINAL) \
|
||||
V(Closure, function, Function, FINAL) \
|
||||
V(Closure, context, Context, FINAL) \
|
||||
V(Closure, hash, Context, VAR) \
|
||||
V(GrowableObjectArray, length, Smi, VAR) \
|
||||
V(GrowableObjectArray, data, Array, VAR) \
|
||||
V(TypedDataBase, length, Smi, FINAL) \
|
||||
V(TypedDataView, offset_in_bytes, Smi, FINAL) \
|
||||
V(TypedDataView, data, Dynamic, FINAL) \
|
||||
V(String, length, Smi, FINAL) \
|
||||
V(LinkedHashMap, index, TypedDataUint32Array, VAR) \
|
||||
V(LinkedHashMap, data, Array, VAR) \
|
||||
V(LinkedHashMap, hash_mask, Smi, VAR) \
|
||||
V(LinkedHashMap, used_data, Smi, VAR) \
|
||||
V(LinkedHashMap, deleted_keys, Smi, VAR) \
|
||||
V(ArgumentsDescriptor, type_args_len, Smi, FINAL) \
|
||||
V(ArgumentsDescriptor, positional_count, Smi, FINAL) \
|
||||
V(ArgumentsDescriptor, count, Smi, FINAL) \
|
||||
V(ArgumentsDescriptor, size, Smi, FINAL) \
|
||||
V(PointerBase, data_field, Dynamic, FINAL) \
|
||||
V(Type, arguments, TypeArguments, FINAL)
|
||||
V(Array, RawArray, length, Smi, FINAL) \
|
||||
V(Context, RawContext, parent, Context, FINAL) \
|
||||
V(Closure, RawClosure, instantiator_type_arguments, TypeArguments, FINAL) \
|
||||
V(Closure, RawClosure, delayed_type_arguments, TypeArguments, FINAL) \
|
||||
V(Closure, RawClosure, function_type_arguments, TypeArguments, FINAL) \
|
||||
V(Closure, RawClosure, function, Function, FINAL) \
|
||||
V(Closure, RawClosure, context, Context, FINAL) \
|
||||
V(Closure, RawClosure, hash, Context, VAR) \
|
||||
V(GrowableObjectArray, RawGrowableObjectArray, length, Smi, VAR) \
|
||||
V(GrowableObjectArray, RawGrowableObjectArray, data, Array, VAR) \
|
||||
V(TypedDataBase, RawTypedDataBase, length, Smi, FINAL) \
|
||||
V(TypedDataView, RawTypedDataView, offset_in_bytes, Smi, FINAL) \
|
||||
V(TypedDataView, RawTypedDataView, data, Dynamic, FINAL) \
|
||||
V(String, RawString, length, Smi, FINAL) \
|
||||
V(LinkedHashMap, RawLinkedHashMap, index, TypedDataUint32Array, VAR) \
|
||||
V(LinkedHashMap, RawLinkedHashMap, data, Array, VAR) \
|
||||
V(LinkedHashMap, RawLinkedHashMap, hash_mask, Smi, VAR) \
|
||||
V(LinkedHashMap, RawLinkedHashMap, used_data, Smi, VAR) \
|
||||
V(LinkedHashMap, RawLinkedHashMap, deleted_keys, Smi, VAR) \
|
||||
V(ArgumentsDescriptor, RawArray, type_args_len, Smi, FINAL) \
|
||||
V(ArgumentsDescriptor, RawArray, positional_count, Smi, FINAL) \
|
||||
V(ArgumentsDescriptor, RawArray, count, Smi, FINAL) \
|
||||
V(ArgumentsDescriptor, RawArray, size, Smi, FINAL) \
|
||||
V(PointerBase, RawPointerBase, data_field, Dynamic, FINAL) \
|
||||
V(Type, RawType, arguments, TypeArguments, FINAL)
|
||||
|
||||
// Slot is an abstraction that describes an readable (and possibly writeable)
|
||||
// location within an object.
|
||||
|
@ -86,7 +87,7 @@ class Slot : public ZoneAllocated {
|
|||
// clang-format off
|
||||
enum class Kind : uint8_t {
|
||||
// Native slots are identified by their kind - each native slot has its own.
|
||||
#define DECLARE_KIND(ClassName, FieldName, cid, mutability) \
|
||||
#define DECLARE_KIND(ClassName, UnderlyingType, FieldName, cid, mutability) \
|
||||
k##ClassName##_##FieldName,
|
||||
NATIVE_SLOTS_LIST(DECLARE_KIND)
|
||||
#undef DECLARE_KIND
|
||||
|
@ -134,7 +135,7 @@ class Slot : public ZoneAllocated {
|
|||
const ParsedFunction* parsed_function);
|
||||
|
||||
// Convenience getters for native slots.
|
||||
#define DEFINE_GETTER(ClassName, FieldName, cid, mutability) \
|
||||
#define DEFINE_GETTER(ClassName, UnderlyingType, FieldName, cid, mutability) \
|
||||
static const Slot& ClassName##_##FieldName() { \
|
||||
return GetNativeSlot(Kind::k##ClassName##_##FieldName); \
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "vm/compiler/backend/redundancy_elimination.h"
|
||||
#include "vm/compiler/backend/type_propagator.h"
|
||||
#include "vm/compiler/call_specializer.h"
|
||||
#include "vm/compiler/write_barrier_elimination.h"
|
||||
#if defined(DART_PRECOMPILER)
|
||||
#include "vm/compiler/aot/aot_call_specializer.h"
|
||||
#include "vm/compiler/aot/precompiler.h"
|
||||
|
@ -266,7 +267,7 @@ FlowGraph* CompilerPass::RunForceOptimizedPipeline(
|
|||
// so it should not be lifted earlier than that pass.
|
||||
INVOKE_PASS(DCE);
|
||||
INVOKE_PASS(Canonicalize);
|
||||
INVOKE_PASS(WriteBarrierElimination);
|
||||
INVOKE_PASS(EliminateWriteBarriers);
|
||||
INVOKE_PASS(FinalizeGraph);
|
||||
#if defined(DART_PRECOMPILER)
|
||||
if (mode == kAOT) {
|
||||
|
@ -349,7 +350,7 @@ FlowGraph* CompilerPass::RunPipeline(PipelineMode mode,
|
|||
INVOKE_PASS(EliminateStackOverflowChecks);
|
||||
INVOKE_PASS(Canonicalize);
|
||||
INVOKE_PASS(AllocationSinking_DetachMaterializations);
|
||||
INVOKE_PASS(WriteBarrierElimination);
|
||||
INVOKE_PASS(EliminateWriteBarriers);
|
||||
INVOKE_PASS(FinalizeGraph);
|
||||
#if defined(DART_PRECOMPILER)
|
||||
if (mode == kAOT) {
|
||||
|
@ -529,38 +530,7 @@ COMPILER_PASS(ReorderBlocks, {
|
|||
}
|
||||
});
|
||||
|
||||
static void WriteBarrierElimination(FlowGraph* flow_graph) {
|
||||
for (BlockIterator block_it = flow_graph->reverse_postorder_iterator();
|
||||
!block_it.Done(); block_it.Advance()) {
|
||||
BlockEntryInstr* block = block_it.Current();
|
||||
Definition* last_allocated = nullptr;
|
||||
for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
|
||||
Instruction* current = it.Current();
|
||||
if (!current->CanTriggerGC()) {
|
||||
if (StoreInstanceFieldInstr* instr = current->AsStoreInstanceField()) {
|
||||
if (instr->instance()->definition() == last_allocated) {
|
||||
instr->set_emit_store_barrier(kNoStoreBarrier);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (AllocationInstr* alloc = current->AsAllocation()) {
|
||||
if (alloc->WillAllocateNewOrRemembered()) {
|
||||
last_allocated = alloc;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (current->CanTriggerGC()) {
|
||||
last_allocated = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
COMPILER_PASS(WriteBarrierElimination,
|
||||
{ WriteBarrierElimination(flow_graph); });
|
||||
COMPILER_PASS(EliminateWriteBarriers, { EliminateWriteBarriers(flow_graph); });
|
||||
|
||||
COMPILER_PASS(FinalizeGraph, {
|
||||
// At the end of the pipeline, force recomputing and caching graph
|
||||
|
|
|
@ -50,7 +50,7 @@ namespace dart {
|
|||
V(TypePropagation) \
|
||||
V(UseTableDispatch) \
|
||||
V(WidenSmiToInt32) \
|
||||
V(WriteBarrierElimination)
|
||||
V(EliminateWriteBarriers)
|
||||
|
||||
class AllocationSinking;
|
||||
class BlockScheduler;
|
||||
|
|
|
@ -166,6 +166,8 @@ compiler_sources = [
|
|||
"stub_code_compiler_arm64.cc",
|
||||
"stub_code_compiler_ia32.cc",
|
||||
"stub_code_compiler_x64.cc",
|
||||
"write_barrier_elimination.cc",
|
||||
"write_barrier_elimination.h",
|
||||
]
|
||||
|
||||
compiler_sources_tests = [
|
||||
|
@ -190,4 +192,5 @@ compiler_sources_tests = [
|
|||
"backend/typed_data_aot_test.cc",
|
||||
"backend/yield_position_test.cc",
|
||||
"cha_test.cc",
|
||||
"write_barrier_elimination_test.cc",
|
||||
]
|
||||
|
|
388
runtime/vm/compiler/write_barrier_elimination.cc
Normal file
388
runtime/vm/compiler/write_barrier_elimination.cc
Normal file
|
@ -0,0 +1,388 @@
|
|||
// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
|
||||
// for details. All rights reserved. Use of this source code is governed by a
|
||||
// BSD-style license that can be found in the LICENSE file.
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "vm/compiler/backend/flow_graph.h"
|
||||
#include "vm/compiler/compiler_pass.h"
|
||||
#include "vm/compiler/write_barrier_elimination.h"
|
||||
|
||||
namespace dart {
|
||||
|
||||
#if defined(DEBUG)
|
||||
DEFINE_FLAG(bool,
|
||||
trace_write_barrier_elimination,
|
||||
false,
|
||||
"Trace WriteBarrierElimination pass.");
|
||||
#endif
|
||||
|
||||
class DefinitionIndexPairTrait {
|
||||
public:
|
||||
typedef Definition* Key;
|
||||
typedef intptr_t Value;
|
||||
struct Pair {
|
||||
Definition* definition = nullptr;
|
||||
intptr_t index = -1;
|
||||
Pair() {}
|
||||
Pair(Definition* definition, intptr_t index)
|
||||
: definition(definition), index(index) {}
|
||||
};
|
||||
|
||||
static Key KeyOf(Pair kv) { return kv.definition; }
|
||||
static Value ValueOf(Pair kv) { return kv.index; }
|
||||
static inline intptr_t Hashcode(Key key) { return std::hash<Key>()(key); }
|
||||
static inline bool IsKeyEqual(Pair kv, Key key) {
|
||||
return kv.definition == key;
|
||||
}
|
||||
};
|
||||
|
||||
typedef DirectChainedHashMap<DefinitionIndexPairTrait> DefinitionIndexMap;
|
||||
|
||||
// Inter-block write-barrier elimination.
|
||||
//
|
||||
// This optimization removes write barriers from some store instructions under
|
||||
// certain assumptions which the runtime is responsible to sustain.
|
||||
//
|
||||
// We can skip a write barrier on a StoreInstanceField to a container object X
|
||||
// if we know that either:
|
||||
// - X is in new-space, or
|
||||
// - X is in old-space, and:
|
||||
// - X is in the store buffer, and
|
||||
// - X is in the deferred marking stack (if concurrent marking is enabled)
|
||||
//
|
||||
// The result of an Allocation instruction (Instruction::IsAllocation()) will
|
||||
// satisfy one of these requirements immediately after the instruction
|
||||
// if WillAllocateNewOrRemembered() is true.
|
||||
//
|
||||
// Without runtime support, we would have to assume that any instruction which
|
||||
// can trigger a new-space scavenge (Instruction::CanTriggerGC()) might promote
|
||||
// a new-space temporary into old-space, and we could not skip a store barrier
|
||||
// on a write into it afterward.
|
||||
//
|
||||
// However, many instructions can trigger GC in unlikely cases, like
|
||||
// CheckStackOverflow and Box. To avoid interrupting write barrier elimination
|
||||
// across these instructions, the runtime ensures that any live temporaries
|
||||
// (except arrays) promoted during a scavenge caused by a non-Dart-call
|
||||
// instruction (see Instruction::CanCallDart()) will be added to the store
|
||||
// buffer. Additionally, if concurrent marking was initiated, the runtime
|
||||
// ensures that all live temporaries are also in the deferred marking stack.
|
||||
//
|
||||
// See also Thread::RememberLiveTemporaries() and
|
||||
// Thread::DeferredMarkLiveTemporaries().
|
||||
class WriteBarrierElimination : public ValueObject {
|
||||
public:
|
||||
WriteBarrierElimination(Zone* zone, FlowGraph* flow_graph);
|
||||
|
||||
void Analyze();
|
||||
void SaveResults();
|
||||
|
||||
private:
|
||||
void IndexDefinitions(Zone* zone);
|
||||
|
||||
bool AnalyzeBlock(BlockEntryInstr* entry);
|
||||
void MergePredecessors(BlockEntryInstr* entry);
|
||||
|
||||
void UpdateVectorForBlock(BlockEntryInstr* entry, bool finalize);
|
||||
|
||||
static intptr_t Index(BlockEntryInstr* entry) {
|
||||
return entry->postorder_number();
|
||||
}
|
||||
|
||||
intptr_t Index(Definition* def) {
|
||||
ASSERT(IsUsable(def));
|
||||
return definition_indices_.LookupValue(def);
|
||||
}
|
||||
|
||||
bool IsUsable(Definition* def) {
|
||||
return def->IsPhi() || (def->IsAllocation() &&
|
||||
def->AsAllocation()->WillAllocateNewOrRemembered());
|
||||
}
|
||||
|
||||
#if defined(DEBUG)
|
||||
static bool SlotEligibleForWBE(const Slot& slot);
|
||||
#endif
|
||||
|
||||
FlowGraph* const flow_graph_;
|
||||
const GrowableArray<BlockEntryInstr*>* const block_order_;
|
||||
|
||||
// Number of usable definitions in the graph.
|
||||
intptr_t definition_count_ = 0;
|
||||
|
||||
// Maps each usable definition to its index in the bitvectors.
|
||||
DefinitionIndexMap definition_indices_;
|
||||
|
||||
// Bitvector with all non-Array-allocation instructions set. Used to
|
||||
// un-mark Array allocations as usable.
|
||||
BitVector* array_allocations_mask_;
|
||||
|
||||
// Bitvectors for each block of which allocations are new or remembered
|
||||
// at the start (after Phis).
|
||||
GrowableArray<BitVector*> usable_allocs_in_;
|
||||
|
||||
// Bitvectors for each block of which allocations are new or remembered
|
||||
// at the end of the block.
|
||||
GrowableArray<BitVector*> usable_allocs_out_;
|
||||
|
||||
// Remaining blocks to process.
|
||||
GrowableArray<BlockEntryInstr*> worklist_;
|
||||
|
||||
// Temporary used in many functions to avoid repeated zone allocation.
|
||||
BitVector* vector_;
|
||||
|
||||
// Bitvector of blocks which have been processed, to ensure each block
|
||||
// is processed at least once.
|
||||
BitVector* processed_blocks_;
|
||||
|
||||
#if defined(DEBUG)
|
||||
bool tracing_ = false;
|
||||
#else
|
||||
static constexpr bool tracing_ = false;
|
||||
#endif
|
||||
};
|
||||
|
||||
WriteBarrierElimination::WriteBarrierElimination(Zone* zone,
|
||||
FlowGraph* flow_graph)
|
||||
: flow_graph_(flow_graph), block_order_(&flow_graph->postorder()) {
|
||||
#if defined(DEBUG)
|
||||
if (flow_graph->should_print() && FLAG_trace_write_barrier_elimination) {
|
||||
tracing_ = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
IndexDefinitions(zone);
|
||||
|
||||
for (intptr_t i = 0; i < block_order_->length(); ++i) {
|
||||
usable_allocs_in_.Add(new (zone) BitVector(zone, definition_count_));
|
||||
usable_allocs_in_[i]->CopyFrom(vector_);
|
||||
|
||||
usable_allocs_out_.Add(new (zone) BitVector(zone, definition_count_));
|
||||
usable_allocs_out_[i]->CopyFrom(vector_);
|
||||
}
|
||||
|
||||
processed_blocks_ = new (zone) BitVector(zone, block_order_->length());
|
||||
}
|
||||
|
||||
void WriteBarrierElimination::Analyze() {
|
||||
for (intptr_t i = 0; i < block_order_->length(); ++i) {
|
||||
worklist_.Add(block_order_->At(i));
|
||||
}
|
||||
|
||||
while (!worklist_.is_empty()) {
|
||||
auto* const entry = worklist_.RemoveLast();
|
||||
if (AnalyzeBlock(entry)) {
|
||||
for (intptr_t i = 0; i < entry->last_instruction()->SuccessorCount();
|
||||
++i) {
|
||||
if (tracing_) {
|
||||
THR_Print("Enqueueing block %" Pd "\n", entry->block_id());
|
||||
}
|
||||
worklist_.Add(entry->last_instruction()->SuccessorAt(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void WriteBarrierElimination::SaveResults() {
|
||||
for (intptr_t i = 0; i < block_order_->length(); ++i) {
|
||||
vector_->CopyFrom(usable_allocs_in_[i]);
|
||||
UpdateVectorForBlock(block_order_->At(i), /*finalize=*/true);
|
||||
}
|
||||
}
|
||||
|
||||
void WriteBarrierElimination::IndexDefinitions(Zone* zone) {
|
||||
BitmapBuilder array_allocations;
|
||||
|
||||
for (intptr_t i = 0; i < block_order_->length(); ++i) {
|
||||
BlockEntryInstr* const block = block_order_->At(i);
|
||||
if (auto join_block = block->AsJoinEntry()) {
|
||||
for (PhiIterator it(join_block); !it.Done(); it.Advance()) {
|
||||
array_allocations.Set(definition_count_, false);
|
||||
definition_indices_.Insert({it.Current(), definition_count_++});
|
||||
#if defined(DEBUG)
|
||||
if (tracing_) {
|
||||
THR_Print("Definition (%" Pd ") has index %" Pd ".\n",
|
||||
it.Current()->ssa_temp_index(), definition_count_ - 1);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
|
||||
if (Definition* current = it.Current()->AsDefinition()) {
|
||||
if (IsUsable(current)) {
|
||||
array_allocations.Set(definition_count_, current->IsCreateArray());
|
||||
definition_indices_.Insert({current, definition_count_++});
|
||||
#if defined(DEBUG)
|
||||
if (tracing_) {
|
||||
THR_Print("Definition (%" Pd ") has index %" Pd ".\n",
|
||||
current->ssa_temp_index(), definition_count_ - 1);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vector_ = new (zone) BitVector(zone, definition_count_);
|
||||
vector_->SetAll();
|
||||
array_allocations_mask_ = new (zone) BitVector(zone, definition_count_);
|
||||
for (intptr_t i = 0; i < definition_count_; ++i) {
|
||||
if (!array_allocations.Get(i)) array_allocations_mask_->Add(i);
|
||||
}
|
||||
}
|
||||
|
||||
void WriteBarrierElimination::MergePredecessors(BlockEntryInstr* entry) {
|
||||
vector_->Clear();
|
||||
for (intptr_t i = 0; i < entry->PredecessorCount(); ++i) {
|
||||
BitVector* predecessor_set =
|
||||
usable_allocs_out_[Index(entry->PredecessorAt(i))];
|
||||
if (i == 0) {
|
||||
vector_->AddAll(predecessor_set);
|
||||
} else {
|
||||
vector_->Intersect(predecessor_set);
|
||||
}
|
||||
}
|
||||
|
||||
if (JoinEntryInstr* join = entry->AsJoinEntry()) {
|
||||
// A Phi is usable if and only if all its inputs are usable.
|
||||
for (PhiIterator it(join); !it.Done(); it.Advance()) {
|
||||
PhiInstr* phi = it.Current();
|
||||
ASSERT(phi->InputCount() == entry->PredecessorCount());
|
||||
bool is_usable = true;
|
||||
for (intptr_t i = 0; i < phi->InputCount(); ++i) {
|
||||
BitVector* const predecessor_set =
|
||||
usable_allocs_out_[Index(entry->PredecessorAt(i))];
|
||||
Definition* const origin = phi->InputAt(i)->definition();
|
||||
if (!IsUsable(origin) || !predecessor_set->Contains(Index(origin))) {
|
||||
is_usable = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
vector_->Set(Index(phi), is_usable);
|
||||
}
|
||||
|
||||
#if defined(DEBUG)
|
||||
if (tracing_) {
|
||||
THR_Print("Merge predecessors for %" Pd ".\n", entry->block_id());
|
||||
for (PhiIterator it(join); !it.Done(); it.Advance()) {
|
||||
PhiInstr* phi = it.Current();
|
||||
THR_Print("%" Pd ": %s\n", phi->ssa_temp_index(),
|
||||
vector_->Contains(Index(phi)) ? "true" : "false");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
bool WriteBarrierElimination::AnalyzeBlock(BlockEntryInstr* entry) {
|
||||
// Recompute the usable allocs in-set.
|
||||
MergePredecessors(entry);
|
||||
|
||||
// If the in-set has not changed, there's no work to do.
|
||||
BitVector* const in_set = usable_allocs_in_[Index(entry)];
|
||||
ASSERT(vector_->SubsetOf(*in_set)); // convergence
|
||||
if (vector_->Equals(*in_set) && processed_blocks_->Contains(Index(entry))) {
|
||||
if (tracing_) {
|
||||
THR_Print("Bailout of block %" Pd ": inputs didn't change.\n",
|
||||
entry->block_id());
|
||||
}
|
||||
return false;
|
||||
} else if (tracing_) {
|
||||
THR_Print("Inputs of block %" Pd " changed: ", entry->block_id());
|
||||
in_set->Print();
|
||||
THR_Print(" -> ");
|
||||
vector_->Print();
|
||||
THR_Print("\n");
|
||||
}
|
||||
|
||||
usable_allocs_in_[Index(entry)]->CopyFrom(vector_);
|
||||
UpdateVectorForBlock(entry, /*finalize=*/false);
|
||||
|
||||
processed_blocks_->Add(Index(entry));
|
||||
|
||||
// Successors only need to be updated if the out-set changes.
|
||||
if (vector_->Equals(*usable_allocs_out_[Index(entry)])) {
|
||||
if (tracing_) {
|
||||
THR_Print("Bailout of block %" Pd ": out-set didn't change.\n",
|
||||
entry->block_id());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
BitVector* const out_set = usable_allocs_out_[Index(entry)];
|
||||
ASSERT(vector_->SubsetOf(*out_set)); // convergence
|
||||
out_set->CopyFrom(vector_);
|
||||
if (tracing_) {
|
||||
THR_Print("Block %" Pd " changed.\n", entry->block_id());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#if defined(DEBUG)
|
||||
bool WriteBarrierElimination::SlotEligibleForWBE(const Slot& slot) {
|
||||
// We assume that Dart code only stores into Instances or Contexts.
|
||||
// This assumption is used in
|
||||
// RestoreWriteBarrierInvariantVisitor::VisitPointers.
|
||||
|
||||
switch (slot.kind()) {
|
||||
case Slot::Kind::kCapturedVariable: // Context
|
||||
return true;
|
||||
case Slot::Kind::kDartField: // Instance
|
||||
return true;
|
||||
|
||||
#define FOR_EACH_NATIVE_SLOT(class, underlying_type, field, type, modifiers) \
|
||||
case Slot::Kind::k##class##_##field: \
|
||||
return std::is_base_of<RawInstance, underlying_type>::value || \
|
||||
std::is_base_of<RawContext, underlying_type>::value;
|
||||
|
||||
NATIVE_SLOTS_LIST(FOR_EACH_NATIVE_SLOT)
|
||||
#undef FOR_EACH_NATIVE_SLOT
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void WriteBarrierElimination::UpdateVectorForBlock(BlockEntryInstr* entry,
|
||||
bool finalize) {
|
||||
for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) {
|
||||
Instruction* const current = it.Current();
|
||||
|
||||
if (finalize) {
|
||||
if (StoreInstanceFieldInstr* instr = current->AsStoreInstanceField()) {
|
||||
Definition* const container = instr->instance()->definition();
|
||||
if (IsUsable(container) && vector_->Contains(Index(container))) {
|
||||
DEBUG_ASSERT(SlotEligibleForWBE(instr->slot()));
|
||||
instr->set_emit_store_barrier(kNoStoreBarrier);
|
||||
}
|
||||
} else if (StoreIndexedInstr* instr = current->AsStoreIndexed()) {
|
||||
Definition* const array = instr->array()->definition();
|
||||
if (IsUsable(array) && vector_->Contains(Index(array))) {
|
||||
instr->set_emit_store_barrier(StoreBarrierType::kNoStoreBarrier);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (current->CanCallDart()) {
|
||||
vector_->Clear();
|
||||
} else if (current->CanTriggerGC()) {
|
||||
// Clear array allocations. These are not added to the remembered set
|
||||
// by Thread::RememberLiveTemporaries() after a scavenge.
|
||||
vector_->Intersect(array_allocations_mask_);
|
||||
}
|
||||
|
||||
if (AllocationInstr* const alloc = current->AsAllocation()) {
|
||||
if (alloc->WillAllocateNewOrRemembered()) {
|
||||
vector_->Add(Index(alloc));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void EliminateWriteBarriers(FlowGraph* flow_graph) {
|
||||
WriteBarrierElimination elimination(Thread::Current()->zone(), flow_graph);
|
||||
elimination.Analyze();
|
||||
elimination.SaveResults();
|
||||
}
|
||||
|
||||
} // namespace dart
|
15
runtime/vm/compiler/write_barrier_elimination.h
Normal file
15
runtime/vm/compiler/write_barrier_elimination.h
Normal file
|
@ -0,0 +1,15 @@
|
|||
// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
|
||||
// for details. All rights reserved. Use of this source code is governed by a
|
||||
// BSD-style license that can be found in the LICENSE file.
|
||||
|
||||
#ifndef RUNTIME_VM_COMPILER_WRITE_BARRIER_ELIMINATION_H_
|
||||
#define RUNTIME_VM_COMPILER_WRITE_BARRIER_ELIMINATION_H_
|
||||
|
||||
namespace dart {
|
||||
|
||||
class FlowGraph;
|
||||
void EliminateWriteBarriers(FlowGraph* flow_graph);
|
||||
|
||||
} // namespace dart
|
||||
|
||||
#endif // RUNTIME_VM_COMPILER_WRITE_BARRIER_ELIMINATION_H_
|
198
runtime/vm/compiler/write_barrier_elimination_test.cc
Normal file
198
runtime/vm/compiler/write_barrier_elimination_test.cc
Normal file
|
@ -0,0 +1,198 @@
|
|||
// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
|
||||
// for details. All rights reserved. Use of this source code is governed by a
|
||||
// BSD-style license that can be found in the LICENSE file.
|
||||
|
||||
#include "platform/assert.h"
|
||||
#include "vm/compiler/backend/il_printer.h"
|
||||
#include "vm/compiler/backend/il_test_helper.h"
|
||||
#include "vm/unit_test.h"
|
||||
|
||||
namespace dart {
|
||||
|
||||
DEBUG_ONLY(DECLARE_FLAG(bool, trace_write_barrier_elimination);)
|
||||
|
||||
ISOLATE_UNIT_TEST_CASE(IRTest_WriteBarrierElimination_JoinSuccessors) {
|
||||
DEBUG_ONLY(FLAG_trace_write_barrier_elimination = true);
|
||||
|
||||
// This is a regression test for a bug where we were using
|
||||
// JoinEntry::SuccessorCount() to determine the number of outgoing blocks
|
||||
// from the join block. JoinEntry::SuccessorCount() is in fact always 0;
|
||||
// JoinEntry::last_instruction()->SuccessorCount() should be used instead.
|
||||
const char* kScript =
|
||||
R"(
|
||||
class C {
|
||||
int value;
|
||||
C next;
|
||||
C prev;
|
||||
}
|
||||
|
||||
@pragma("vm:never-inline")
|
||||
fn() {}
|
||||
|
||||
foo(int x) {
|
||||
C prev = C();
|
||||
C next;
|
||||
while (x --> 0) {
|
||||
next = C();
|
||||
next.prev = prev;
|
||||
prev?.next = next;
|
||||
prev = next;
|
||||
fn();
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
main() { foo(10); }
|
||||
)";
|
||||
|
||||
const auto& root_library = Library::Handle(LoadTestScript(kScript));
|
||||
|
||||
Invoke(root_library, "main");
|
||||
|
||||
const auto& function = Function::Handle(GetFunction(root_library, "foo"));
|
||||
TestPipeline pipeline(function, CompilerPass::kJIT);
|
||||
FlowGraph* flow_graph = pipeline.RunPasses({});
|
||||
|
||||
auto entry = flow_graph->graph_entry()->normal_entry();
|
||||
EXPECT(entry != nullptr);
|
||||
|
||||
StoreInstanceFieldInstr* store1 = nullptr;
|
||||
StoreInstanceFieldInstr* store2 = nullptr;
|
||||
|
||||
ILMatcher cursor(flow_graph, entry);
|
||||
RELEASE_ASSERT(cursor.TryMatch({
|
||||
kMoveGlob,
|
||||
kMatchAndMoveGoto,
|
||||
kMoveGlob,
|
||||
kMatchAndMoveBranchTrue,
|
||||
kMoveGlob,
|
||||
{kMatchAndMoveStoreInstanceField, &store1},
|
||||
kMoveGlob,
|
||||
{kMatchAndMoveStoreInstanceField, &store2},
|
||||
}));
|
||||
|
||||
EXPECT(store1->ShouldEmitStoreBarrier() == false);
|
||||
EXPECT(store2->ShouldEmitStoreBarrier() == true);
|
||||
}
|
||||
|
||||
ISOLATE_UNIT_TEST_CASE(IRTest_WriteBarrierElimination_AtLeastOnce) {
|
||||
DEBUG_ONLY(FLAG_trace_write_barrier_elimination = true);
|
||||
|
||||
// Ensure that we process every block at least once during the analysis
|
||||
// phase so that the out-sets will be initialized. If we don't process
|
||||
// each block at least once, the store "c.next = n" will be marked
|
||||
// NoWriteBarrier.
|
||||
const char* kScript =
|
||||
R"(
|
||||
class C {
|
||||
C next;
|
||||
}
|
||||
|
||||
@pragma("vm:never-inline")
|
||||
fn() {}
|
||||
|
||||
foo(int x) {
|
||||
C c = C();
|
||||
C n = C();
|
||||
if (x > 5) {
|
||||
fn();
|
||||
}
|
||||
c.next = n;
|
||||
return c;
|
||||
}
|
||||
|
||||
main() { foo(0); foo(10); }
|
||||
)";
|
||||
|
||||
const auto& root_library = Library::Handle(LoadTestScript(kScript));
|
||||
|
||||
Invoke(root_library, "main");
|
||||
|
||||
const auto& function = Function::Handle(GetFunction(root_library, "foo"));
|
||||
TestPipeline pipeline(function, CompilerPass::kJIT);
|
||||
FlowGraph* flow_graph = pipeline.RunPasses({});
|
||||
|
||||
auto entry = flow_graph->graph_entry()->normal_entry();
|
||||
EXPECT(entry != nullptr);
|
||||
|
||||
StoreInstanceFieldInstr* store = nullptr;
|
||||
|
||||
ILMatcher cursor(flow_graph, entry);
|
||||
RELEASE_ASSERT(cursor.TryMatch({
|
||||
kMoveGlob,
|
||||
kMatchAndMoveBranchFalse,
|
||||
kMoveGlob,
|
||||
kMatchAndMoveGoto,
|
||||
kMoveGlob,
|
||||
{kMatchAndMoveStoreInstanceField, &store},
|
||||
}));
|
||||
|
||||
EXPECT(store->ShouldEmitStoreBarrier() == true);
|
||||
}
|
||||
|
||||
ISOLATE_UNIT_TEST_CASE(IRTest_WriteBarrierElimination_Arrays) {
|
||||
DEBUG_ONLY(FLAG_trace_write_barrier_elimination = true);
|
||||
|
||||
// Test that array allocations are not considered usable after a
|
||||
// may-trigger-GC instruction (in this case CheckStackOverflow), unlike
|
||||
// normal allocations, which are only interruped by a Dart call.
|
||||
const char* kScript =
|
||||
R"(
|
||||
class C {
|
||||
C next;
|
||||
}
|
||||
|
||||
@pragma("vm:never-inline")
|
||||
fn() {}
|
||||
|
||||
foo(int x) {
|
||||
C c = C();
|
||||
C n = C();
|
||||
List<C> array = List<C>(1);
|
||||
while (x --> 0) {
|
||||
c.next = n;
|
||||
n = c;
|
||||
c = C();
|
||||
}
|
||||
array[0] = c;
|
||||
return c;
|
||||
}
|
||||
|
||||
main() { foo(10); }
|
||||
)";
|
||||
|
||||
const auto& root_library = Library::Handle(LoadTestScript(kScript));
|
||||
|
||||
Invoke(root_library, "main");
|
||||
|
||||
const auto& function = Function::Handle(GetFunction(root_library, "foo"));
|
||||
TestPipeline pipeline(function, CompilerPass::kJIT);
|
||||
FlowGraph* flow_graph = pipeline.RunPasses({});
|
||||
|
||||
auto entry = flow_graph->graph_entry()->normal_entry();
|
||||
EXPECT(entry != nullptr);
|
||||
|
||||
StoreInstanceFieldInstr* store_into_c = nullptr;
|
||||
StoreIndexedInstr* store_into_array = nullptr;
|
||||
|
||||
ILMatcher cursor(flow_graph, entry);
|
||||
RELEASE_ASSERT(cursor.TryMatch({
|
||||
kMoveGlob,
|
||||
kMatchAndMoveGoto,
|
||||
kMoveGlob,
|
||||
kMatchAndMoveBranchTrue,
|
||||
kMoveGlob,
|
||||
{kMatchAndMoveStoreInstanceField, &store_into_c},
|
||||
kMoveGlob,
|
||||
kMatchAndMoveGoto,
|
||||
kMoveGlob,
|
||||
kMatchAndMoveBranchFalse,
|
||||
kMoveGlob,
|
||||
{kMatchAndMoveStoreIndexed, &store_into_array},
|
||||
}));
|
||||
|
||||
EXPECT(store_into_c->ShouldEmitStoreBarrier() == false);
|
||||
EXPECT(store_into_array->ShouldEmitStoreBarrier() == true);
|
||||
}
|
||||
|
||||
} // namespace dart
|
|
@ -819,6 +819,8 @@ void GCMarker::StartConcurrentMark(PageSpace* page_space) {
|
|||
ASSERT(result);
|
||||
}
|
||||
|
||||
isolate_group_->DeferredMarkLiveTemporaries();
|
||||
|
||||
// Wait for roots to be marked before exiting safepoint.
|
||||
MonitorLocker ml(&root_slices_monitor_);
|
||||
while (root_slices_finished_ != kNumRootSlices) {
|
||||
|
|
|
@ -1187,6 +1187,9 @@ void Scavenger::Scavenge() {
|
|||
ProcessWeakReferences();
|
||||
page_space->ReleaseDataLock();
|
||||
|
||||
// Restore write-barrier assumptions.
|
||||
isolate_group->RememberLiveTemporaries();
|
||||
|
||||
// Scavenge finished. Run accounting.
|
||||
int64_t end = OS::GetCurrentMonotonicMicros();
|
||||
heap_->RecordTime(kProcessToSpace, process_to_space - iterate_roots);
|
||||
|
|
|
@ -2366,7 +2366,7 @@ void Isolate::LowLevelCleanup(Isolate* isolate) {
|
|||
// after a potential shutdown of the group, which would turn down any pending
|
||||
// GC tasks as well as the heap.
|
||||
Isolate::MarkIsolateDead(is_application_isolate);
|
||||
}
|
||||
} // namespace dart
|
||||
|
||||
Dart_InitializeIsolateCallback Isolate::initialize_callback_ = nullptr;
|
||||
Dart_IsolateGroupCreateCallback Isolate::create_group_callback_ = nullptr;
|
||||
|
@ -2461,6 +2461,18 @@ void IsolateGroup::ReleaseStoreBuffers() {
|
|||
thread_registry()->ReleaseStoreBuffers();
|
||||
}
|
||||
|
||||
void Isolate::RememberLiveTemporaries() {
|
||||
if (mutator_thread_ != nullptr) {
|
||||
mutator_thread_->RememberLiveTemporaries();
|
||||
}
|
||||
}
|
||||
|
||||
void Isolate::DeferredMarkLiveTemporaries() {
|
||||
if (mutator_thread_ != nullptr) {
|
||||
mutator_thread_->DeferredMarkLiveTemporaries();
|
||||
}
|
||||
}
|
||||
|
||||
void IsolateGroup::EnableIncrementalBarrier(
|
||||
MarkingStack* marking_stack,
|
||||
MarkingStack* deferred_marking_stack) {
|
||||
|
@ -2578,6 +2590,17 @@ uword IsolateGroup::FindPendingDeoptAtSafepoint(uword fp) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void IsolateGroup::DeferredMarkLiveTemporaries() {
|
||||
ForEachIsolate(
|
||||
[&](Isolate* isolate) { isolate->DeferredMarkLiveTemporaries(); },
|
||||
/*at_safepoint=*/true);
|
||||
}
|
||||
|
||||
void IsolateGroup::RememberLiveTemporaries() {
|
||||
ForEachIsolate([&](Isolate* isolate) { isolate->RememberLiveTemporaries(); },
|
||||
/*at_safepoint=*/true);
|
||||
}
|
||||
|
||||
RawClass* Isolate::GetClassForHeapWalkAt(intptr_t cid) {
|
||||
RawClass* raw_class = nullptr;
|
||||
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
|
||||
|
|
|
@ -479,6 +479,9 @@ class IsolateGroup : public IntrusiveDListEntry<IsolateGroup> {
|
|||
|
||||
uword FindPendingDeoptAtSafepoint(uword fp);
|
||||
|
||||
void RememberLiveTemporaries();
|
||||
void DeferredMarkLiveTemporaries();
|
||||
|
||||
private:
|
||||
friend class Heap;
|
||||
friend class StackFrame; // For `[isolates_].First()`.
|
||||
|
@ -1149,6 +1152,9 @@ class Isolate : public BaseIsolate, public IntrusiveDListEntry<Isolate> {
|
|||
|
||||
static void NotifyLowMemory();
|
||||
|
||||
void RememberLiveTemporaries();
|
||||
void DeferredMarkLiveTemporaries();
|
||||
|
||||
private:
|
||||
friend class Dart; // Init, InitOnce, Shutdown.
|
||||
friend class IsolateKillerVisitor; // Kill().
|
||||
|
|
|
@ -160,7 +160,7 @@ bool StackFrame::IsBareInstructionsStubFrame() const {
|
|||
return false;
|
||||
}
|
||||
|
||||
bool StackFrame::IsStubFrame() const {
|
||||
bool StackFrame::IsStubFrame(bool needed_for_gc) const {
|
||||
if (is_interpreted()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ bool StackFrame::IsStubFrame() const {
|
|||
NoSafepointScope no_safepoint;
|
||||
#endif
|
||||
|
||||
RawCode* code = GetCodeObject();
|
||||
RawCode* code = GetCodeObject(needed_for_gc);
|
||||
ASSERT(code != Object::null());
|
||||
const intptr_t cid = code->ptr()->owner_->GetClassId();
|
||||
ASSERT(cid == kNullCid || cid == kClassCid || cid == kFunctionCid);
|
||||
|
@ -418,9 +418,9 @@ RawCode* StackFrame::LookupDartCode() const {
|
|||
return Code::null();
|
||||
}
|
||||
|
||||
RawCode* StackFrame::GetCodeObject() const {
|
||||
RawCode* StackFrame::GetCodeObject(bool needed_for_gc) const {
|
||||
ASSERT(!is_interpreted());
|
||||
if (auto isolate = IsolateOfBareInstructionsFrame(/*needed_for_gc=*/false)) {
|
||||
if (auto isolate = IsolateOfBareInstructionsFrame(needed_for_gc)) {
|
||||
auto const rct = isolate->reverse_pc_lookup_cache();
|
||||
return rct->Lookup(pc(), /*is_return_address=*/true);
|
||||
} else {
|
||||
|
@ -546,8 +546,8 @@ TokenPosition StackFrame::GetTokenPos() const {
|
|||
return TokenPosition::kNoSource;
|
||||
}
|
||||
|
||||
bool StackFrame::IsValid() const {
|
||||
if (IsEntryFrame() || IsExitFrame() || IsStubFrame()) {
|
||||
bool StackFrame::IsValid(bool needed_for_gc) const {
|
||||
if (IsEntryFrame() || IsExitFrame() || IsStubFrame(needed_for_gc)) {
|
||||
return true;
|
||||
}
|
||||
if (is_interpreted()) {
|
||||
|
|
|
@ -94,7 +94,7 @@ class StackFrame : public ValueObject {
|
|||
const char* ToCString() const;
|
||||
|
||||
// Check validity of a frame, used for assertion purposes.
|
||||
virtual bool IsValid() const;
|
||||
virtual bool IsValid(bool needed_for_gc = false) const;
|
||||
|
||||
// Returns the isolate containing the bare instructions of the current frame.
|
||||
//
|
||||
|
@ -112,11 +112,12 @@ class StackFrame : public ValueObject {
|
|||
bool IsBareInstructionsStubFrame() const;
|
||||
|
||||
// Frame type.
|
||||
virtual bool IsDartFrame(bool validate = true) const {
|
||||
ASSERT(!validate || IsValid());
|
||||
return !(IsEntryFrame() || IsExitFrame() || IsStubFrame());
|
||||
virtual bool IsDartFrame(bool validate = true,
|
||||
bool needed_for_gc = false) const {
|
||||
ASSERT(!validate || IsValid(needed_for_gc));
|
||||
return !(IsEntryFrame() || IsExitFrame() || IsStubFrame(needed_for_gc));
|
||||
}
|
||||
virtual bool IsStubFrame() const;
|
||||
virtual bool IsStubFrame(bool neede_for_gc = false) const;
|
||||
virtual bool IsEntryFrame() const { return false; }
|
||||
virtual bool IsExitFrame() const { return false; }
|
||||
|
||||
|
@ -158,7 +159,7 @@ class StackFrame : public ValueObject {
|
|||
Thread* thread() const { return thread_; }
|
||||
|
||||
private:
|
||||
RawCode* GetCodeObject() const;
|
||||
RawCode* GetCodeObject(bool needed_for_gc = false) const;
|
||||
RawBytecode* GetBytecodeObject() const;
|
||||
|
||||
|
||||
|
@ -199,9 +200,11 @@ class StackFrame : public ValueObject {
|
|||
// runtime code.
|
||||
class ExitFrame : public StackFrame {
|
||||
public:
|
||||
bool IsValid() const { return sp() == 0; }
|
||||
bool IsDartFrame(bool validate = true) const { return false; }
|
||||
bool IsStubFrame() const { return false; }
|
||||
bool IsValid(bool needed_for_gc = false) const { return sp() == 0; }
|
||||
bool IsDartFrame(bool validate = true, bool needed_for_gc = false) const {
|
||||
return false;
|
||||
}
|
||||
bool IsStubFrame(bool needed_for_gc = false) const { return false; }
|
||||
bool IsExitFrame() const { return true; }
|
||||
|
||||
// Visit objects in the frame.
|
||||
|
@ -221,11 +224,13 @@ class ExitFrame : public StackFrame {
|
|||
// dart code.
|
||||
class EntryFrame : public StackFrame {
|
||||
public:
|
||||
bool IsValid() const {
|
||||
bool IsValid(bool needed_for_gc = false) const {
|
||||
return StubCode::InInvocationStub(pc(), is_interpreted());
|
||||
}
|
||||
bool IsDartFrame(bool validate = true) const { return false; }
|
||||
bool IsStubFrame() const { return false; }
|
||||
bool IsDartFrame(bool validate = true, bool needed_for_gc = false) const {
|
||||
return false;
|
||||
}
|
||||
bool IsStubFrame(bool needed_for_gc = false) const { return false; }
|
||||
bool IsEntryFrame() const { return true; }
|
||||
|
||||
// Visit objects in the frame.
|
||||
|
|
|
@ -558,6 +558,7 @@ void Thread::StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy) {
|
|||
}
|
||||
|
||||
void Thread::StoreBufferAddObject(RawObject* obj) {
|
||||
ASSERT(this == Thread::Current());
|
||||
store_buffer_block_->Push(obj);
|
||||
if (store_buffer_block_->IsFull()) {
|
||||
StoreBufferBlockProcess(StoreBuffer::kCheckThreshold);
|
||||
|
@ -728,6 +729,108 @@ void Thread::VisitObjectPointers(ObjectPointerVisitor* visitor,
|
|||
}
|
||||
}
|
||||
|
||||
class RestoreWriteBarrierInvariantVisitor : public ObjectPointerVisitor {
|
||||
public:
|
||||
RestoreWriteBarrierInvariantVisitor(IsolateGroup* group,
|
||||
Thread* thread,
|
||||
Thread::RestoreWriteBarrierInvariantOp op)
|
||||
: ObjectPointerVisitor(group),
|
||||
thread_(thread),
|
||||
current_(Thread::Current()),
|
||||
op_(op) {}
|
||||
|
||||
void VisitPointers(RawObject** first, RawObject** last) {
|
||||
for (; first != last + 1; first++) {
|
||||
RawObject* obj = *first;
|
||||
// Stores into new-space objects don't need a write barrier.
|
||||
if (obj->IsSmiOrNewObject()) continue;
|
||||
|
||||
// To avoid adding too much work into the remembered set, skip
|
||||
// arrays. Write barrier elimination will not remove the barrier
|
||||
// if we can trigger GC between array allocation and store.
|
||||
if (obj->GetClassId() == kArrayCid) continue;
|
||||
|
||||
// Dart code won't store into VM-internal objects except Contexts.
|
||||
// This assumption is checked by an assertion in
|
||||
// WriteBarrierElimination::UpdateVectorForBlock.
|
||||
if (!obj->IsDartInstance() && !obj->IsContext()) continue;
|
||||
|
||||
// Dart code won't store into canonical instances.
|
||||
if (obj->IsCanonical()) continue;
|
||||
|
||||
// Objects in the VM isolate heap are immutable and won't be
|
||||
// stored into. Check this condition last because there's no bit
|
||||
// in the header for it.
|
||||
if (obj->InVMIsolateHeap()) continue;
|
||||
|
||||
switch (op_) {
|
||||
case Thread::RestoreWriteBarrierInvariantOp::kAddToRememberedSet:
|
||||
if (!obj->IsRemembered()) {
|
||||
obj->AddToRememberedSet(current_);
|
||||
}
|
||||
if (current_->is_marking()) {
|
||||
current_->DeferredMarkingStackAddObject(obj);
|
||||
}
|
||||
break;
|
||||
case Thread::RestoreWriteBarrierInvariantOp::kAddToDeferredMarkingStack:
|
||||
// Re-scan obj when finalizing marking.
|
||||
current_->DeferredMarkingStackAddObject(obj);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
Thread* const thread_;
|
||||
Thread* const current_;
|
||||
Thread::RestoreWriteBarrierInvariantOp op_;
|
||||
};
|
||||
|
||||
// Write barrier elimination assumes that all live temporaries will be
|
||||
// in the remembered set after a scavenge triggered by a non-Dart-call
|
||||
// instruction (see Instruction::CanCallDart()), and additionally they will be
|
||||
// in the deferred marking stack if concurrent marking started. Specifically,
|
||||
// this includes any instruction which will always create an exit frame
|
||||
// below the current frame before any other Dart frames.
|
||||
//
|
||||
// Therefore, to support this assumption, we scan the stack after a scavenge
|
||||
// or when concurrent marking begins and add all live temporaries in
|
||||
// Dart frames preceeding an exit frame to the store buffer or deferred
|
||||
// marking stack.
|
||||
void Thread::RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op) {
|
||||
ASSERT(IsAtSafepoint());
|
||||
ASSERT(IsMutatorThread());
|
||||
|
||||
const StackFrameIterator::CrossThreadPolicy cross_thread_policy =
|
||||
StackFrameIterator::kAllowCrossThreadIteration;
|
||||
StackFrameIterator frames_iterator(top_exit_frame_info(),
|
||||
ValidationPolicy::kDontValidateFrames,
|
||||
this, cross_thread_policy);
|
||||
RestoreWriteBarrierInvariantVisitor visitor(isolate_group(), this, op);
|
||||
bool scan_next_dart_frame = false;
|
||||
for (StackFrame* frame = frames_iterator.NextFrame(); frame != NULL;
|
||||
frame = frames_iterator.NextFrame()) {
|
||||
if (frame->IsExitFrame()) {
|
||||
scan_next_dart_frame = true;
|
||||
} else if (frame->IsDartFrame(/*validate=*/false, /*needed_for_gc=*/true)) {
|
||||
if (scan_next_dart_frame) {
|
||||
frame->VisitObjectPointers(&visitor);
|
||||
}
|
||||
scan_next_dart_frame = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Thread::DeferredMarkLiveTemporaries() {
|
||||
RestoreWriteBarrierInvariant(
|
||||
RestoreWriteBarrierInvariantOp::kAddToDeferredMarkingStack);
|
||||
}
|
||||
|
||||
void Thread::RememberLiveTemporaries() {
|
||||
RestoreWriteBarrierInvariant(
|
||||
RestoreWriteBarrierInvariantOp::kAddToRememberedSet);
|
||||
}
|
||||
|
||||
bool Thread::CanLoadFromThread(const Object& object) {
|
||||
// In order to allow us to use assembler helper routines with non-[Code]
|
||||
// objects *before* stubs are initialized, we only loop ver the stubs if the
|
||||
|
|
|
@ -826,6 +826,8 @@ class Thread : public ThreadState {
|
|||
// Visit all object pointers.
|
||||
void VisitObjectPointers(ObjectPointerVisitor* visitor,
|
||||
ValidationPolicy validate_frames);
|
||||
void RememberLiveTemporaries();
|
||||
void DeferredMarkLiveTemporaries();
|
||||
|
||||
bool IsValidHandle(Dart_Handle object) const;
|
||||
bool IsValidLocalHandle(Dart_Handle object) const;
|
||||
|
@ -856,6 +858,13 @@ class Thread : public ThreadState {
|
|||
template <class T>
|
||||
T* AllocateReusableHandle();
|
||||
|
||||
enum class RestoreWriteBarrierInvariantOp {
|
||||
kAddToRememberedSet,
|
||||
kAddToDeferredMarkingStack
|
||||
};
|
||||
friend class RestoreWriteBarrierInvariantVisitor;
|
||||
void RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op);
|
||||
|
||||
// Set the current compiler state and return the previous compiler state.
|
||||
CompilerState* SetCompilerState(CompilerState* state) {
|
||||
CompilerState* previous = compiler_state_;
|
||||
|
|
Loading…
Reference in a new issue