mirror of
https://github.com/dart-lang/sdk
synced 2024-11-02 08:20:31 +00:00
Revert "Re-land "[vm] Aggressive write-barrier elimination.""
This reverts commiteff1a9ff97
. Reason for revert: Causes flaky hits of RELEASE_ASSERT in marker.cc, see b/151131634. Original change's description: > Re-land "[vm] Aggressive write-barrier elimination." > > The original revision is in Patchset 3. > > Four bugs were fixed: > > 1. JoinEntryInstr::SuccessorCount() is not the correct way to get the > number of successor blocks from the Join block; > JoinEntryInstr::last_instruction()->SuccessorCount() must be used > instead. > > 2. BitVector::Equals() was non-deterministically returning 'false' > for equal vectors. > > 3. All blocks need to be processed at least once during the Analysis > phase (not only in the SaveResults phase). > > 4. We were not removing write barriers from StoreIndexed instructions, > even though we had support for it. > > This reverts commit7fd8ad5a2d
. > > Fixes https://github.com/dart-lang/sdk/issues/40780 > > Change-Id: I9650ec2c547ec49cf88ca0524e14f6c245621f6a > Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/138086 > Commit-Queue: Samir Jindel <sjindel@google.com> > Reviewed-by: Martin Kustermann <kustermann@google.com> > Reviewed-by: Ryan Macnak <rmacnak@google.com> TBR=kustermann@google.com,rmacnak@google.com,sjindel@google.com Change-Id: If9afd84465175fad2431405a97e9293c8bd5e476 No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/138808 Reviewed-by: Martin Kustermann <kustermann@google.com> Commit-Queue: Martin Kustermann <kustermann@google.com>
This commit is contained in:
parent
83378633e1
commit
30a12a349e
28 changed files with 95 additions and 922 deletions
|
@ -3,7 +3,7 @@
|
|||
// BSD-style license that can be found in the LICENSE file.
|
||||
|
||||
#include "vm/bit_vector.h"
|
||||
#include "vm/log.h"
|
||||
|
||||
#include "vm/os.h"
|
||||
|
||||
namespace dart {
|
||||
|
@ -40,7 +40,7 @@ bool BitVector::Equals(const BitVector& other) const {
|
|||
}
|
||||
if (i < data_length_) {
|
||||
// Don't compare bits beyond length_.
|
||||
const intptr_t shift_size = kBitsPerWord - (length_ % kBitsPerWord);
|
||||
const intptr_t shift_size = (kBitsPerWord - length_) & (kBitsPerWord - 1);
|
||||
const uword mask = static_cast<uword>(-1) >> shift_size;
|
||||
if ((data_[i] & mask) != (other.data_[i] & mask)) return false;
|
||||
}
|
||||
|
@ -105,11 +105,11 @@ bool BitVector::IsEmpty() const {
|
|||
}
|
||||
|
||||
void BitVector::Print() const {
|
||||
THR_Print("[");
|
||||
OS::PrintErr("[");
|
||||
for (intptr_t i = 0; i < length_; i++) {
|
||||
THR_Print(Contains(i) ? "1" : "0");
|
||||
OS::PrintErr(Contains(i) ? "1" : "0");
|
||||
}
|
||||
THR_Print("]");
|
||||
OS::PrintErr("]");
|
||||
}
|
||||
|
||||
} // namespace dart
|
||||
|
|
|
@ -70,8 +70,6 @@ class BitVector : public ZoneAllocated {
|
|||
data_[i / kBitsPerWord] &= ~(static_cast<uword>(1) << (i % kBitsPerWord));
|
||||
}
|
||||
|
||||
void Set(intptr_t i, bool value) { value ? Add(i) : Remove(i); }
|
||||
|
||||
bool Equals(const BitVector& other) const;
|
||||
|
||||
// Add all elements that are in the bitvector from.
|
||||
|
@ -94,14 +92,6 @@ class BitVector : public ZoneAllocated {
|
|||
return (block & (static_cast<uword>(1) << (i % kBitsPerWord))) != 0;
|
||||
}
|
||||
|
||||
bool SubsetOf(const BitVector& other) {
|
||||
ASSERT(length_ == other.length_);
|
||||
for (intptr_t i = 0; i < data_length_; ++i) {
|
||||
if ((data_[i] & other.data_[i]) != data_[i]) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Clear() {
|
||||
for (intptr_t i = 0; i < data_length_; i++) {
|
||||
data_[i] = 0;
|
||||
|
|
|
@ -615,9 +615,7 @@ void FlowGraphCompiler::VisitBlocks() {
|
|||
EmitInstructionPrologue(instr);
|
||||
ASSERT(pending_deoptimization_env_ == NULL);
|
||||
pending_deoptimization_env_ = instr->env();
|
||||
DEBUG_ONLY(current_instruction_ = instr);
|
||||
instr->EmitNativeCode(this);
|
||||
DEBUG_ONLY(current_instruction_ = nullptr);
|
||||
pending_deoptimization_env_ = NULL;
|
||||
if (IsPeephole(instr)) {
|
||||
ASSERT(top_of_stack_ == nullptr);
|
||||
|
@ -710,9 +708,7 @@ void FlowGraphCompiler::GenerateDeferredCode() {
|
|||
set_current_instruction(slow_path->instruction());
|
||||
SpecialStatsBegin(stats_tag);
|
||||
BeginCodeSourceRange();
|
||||
DEBUG_ONLY(current_instruction_ = slow_path->instruction());
|
||||
slow_path->GenerateCode(this);
|
||||
DEBUG_ONLY(current_instruction_ = nullptr);
|
||||
EndCodeSourceRange(slow_path->instruction()->token_pos());
|
||||
SpecialStatsEnd(stats_tag);
|
||||
set_current_instruction(nullptr);
|
||||
|
|
|
@ -1121,15 +1121,6 @@ class FlowGraphCompiler : public ValueObject {
|
|||
// is amenable to a peephole optimization.
|
||||
bool IsPeephole(Instruction* instr) const;
|
||||
|
||||
#if defined(DEBUG)
|
||||
bool CanCallDart() const {
|
||||
return current_instruction_ == nullptr ||
|
||||
current_instruction_->CanCallDart();
|
||||
}
|
||||
#else
|
||||
bool CanCallDart() const { return true; }
|
||||
#endif
|
||||
|
||||
// This struct contains either function or code, the other one being NULL.
|
||||
class StaticCallsStruct : public ZoneAllocated {
|
||||
public:
|
||||
|
|
|
@ -995,7 +995,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
|
|||
RawPcDescriptors::Kind kind,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
__ BranchLinkPatchable(stub, entry_kind);
|
||||
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
|
||||
}
|
||||
|
@ -1006,7 +1005,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
|
|||
LocationSummary* locs,
|
||||
const Function& target,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
|
||||
__ GenerateUnRelocatedPcRelativeCall();
|
||||
AddPcRelativeCallTarget(target, entry_kind);
|
||||
|
@ -1063,7 +1061,6 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
|
||||
// Each ICData propagated from unoptimized to optimized code contains the
|
||||
// function that corresponds to the Dart function of that IC call. Due
|
||||
|
@ -1087,7 +1084,6 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(entry_kind == Code::EntryKind::kNormal ||
|
||||
entry_kind == Code::EntryKind::kUnchecked);
|
||||
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
|
||||
|
@ -1113,7 +1109,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
|
|||
LocationSummary* locs,
|
||||
intptr_t try_index,
|
||||
intptr_t slow_path_argument_count) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
|
||||
const ArgumentsDescriptor args_desc(arguments_descriptor);
|
||||
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
|
||||
|
@ -1161,7 +1156,6 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
|
|||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind,
|
||||
bool receiver_can_be_smi) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(entry_kind == Code::EntryKind::kNormal ||
|
||||
entry_kind == Code::EntryKind::kUnchecked);
|
||||
ASSERT(ic_data.NumArgsTested() == 1);
|
||||
|
@ -1206,7 +1200,6 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args,
|
|||
LocationSummary* locs,
|
||||
const ICData& ic_data,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
const Code& stub =
|
||||
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
|
||||
__ LoadObject(R9, ic_data);
|
||||
|
@ -1223,7 +1216,6 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(!function.IsClosureFunction());
|
||||
if (function.HasOptionalParameters() || function.IsGeneric()) {
|
||||
__ LoadObject(R4, arguments_descriptor);
|
||||
|
@ -1243,7 +1235,6 @@ void FlowGraphCompiler::EmitDispatchTableCall(
|
|||
Register cid_reg,
|
||||
int32_t selector_offset,
|
||||
const Array& arguments_descriptor) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(cid_reg != ARGS_DESC_REG);
|
||||
if (!arguments_descriptor.IsNull()) {
|
||||
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
|
||||
|
|
|
@ -966,7 +966,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
|
|||
RawPcDescriptors::Kind kind,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
__ BranchLinkPatchable(stub, entry_kind);
|
||||
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
|
||||
}
|
||||
|
@ -977,7 +976,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
|
|||
LocationSummary* locs,
|
||||
const Function& target,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
|
||||
__ GenerateUnRelocatedPcRelativeCall();
|
||||
AddPcRelativeCallTarget(target, entry_kind);
|
||||
|
@ -1025,7 +1023,6 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
|
||||
// Each ICData propagated from unoptimized to optimized code contains the
|
||||
// function that corresponds to the Dart function of that IC call. Due
|
||||
|
@ -1048,7 +1045,6 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(entry_kind == Code::EntryKind::kNormal ||
|
||||
entry_kind == Code::EntryKind::kUnchecked);
|
||||
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
|
||||
|
@ -1080,7 +1076,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
|
|||
LocationSummary* locs,
|
||||
intptr_t try_index,
|
||||
intptr_t slow_path_argument_count) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
|
||||
const ArgumentsDescriptor args_desc(arguments_descriptor);
|
||||
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
|
||||
|
@ -1125,7 +1120,6 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
|
|||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind,
|
||||
bool receiver_can_be_smi) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(ic_data.NumArgsTested() == 1);
|
||||
const Code& initial_stub = StubCode::UnlinkedCall();
|
||||
const char* switchable_call_mode = "smiable";
|
||||
|
@ -1176,7 +1170,6 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args,
|
|||
LocationSummary* locs,
|
||||
const ICData& ic_data,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
const Code& stub =
|
||||
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
|
||||
__ LoadObject(R5, ic_data);
|
||||
|
@ -1193,7 +1186,6 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(!function.IsClosureFunction());
|
||||
if (function.HasOptionalParameters() || function.IsGeneric()) {
|
||||
__ LoadObject(R4, arguments_descriptor);
|
||||
|
@ -1213,7 +1205,6 @@ void FlowGraphCompiler::EmitDispatchTableCall(
|
|||
Register cid_reg,
|
||||
int32_t selector_offset,
|
||||
const Array& arguments_descriptor) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(cid_reg != ARGS_DESC_REG);
|
||||
if (!arguments_descriptor.IsNull()) {
|
||||
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
|
||||
|
|
|
@ -861,7 +861,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
|
|||
RawPcDescriptors::Kind kind,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
__ Call(stub, /*moveable_target=*/false, entry_kind);
|
||||
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
|
||||
}
|
||||
|
@ -872,7 +871,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
|
|||
LocationSummary* locs,
|
||||
const Function& target,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
const auto& stub = StubCode::CallStaticFunction();
|
||||
__ Call(stub, /*movable_target=*/true, entry_kind);
|
||||
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
|
||||
|
@ -894,7 +892,6 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args,
|
|||
LocationSummary* locs,
|
||||
const ICData& ic_data,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
const Code& stub =
|
||||
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
|
||||
__ LoadObject(ECX, ic_data);
|
||||
|
@ -922,7 +919,6 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
|
||||
// Each ICData propagated from unoptimized to optimized code contains the
|
||||
// function that corresponds to the Dart function of that IC call. Due
|
||||
|
@ -946,7 +942,6 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(entry_kind == Code::EntryKind::kNormal ||
|
||||
entry_kind == Code::EntryKind::kUnchecked);
|
||||
ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
|
||||
|
@ -972,7 +967,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
|
|||
LocationSummary* locs,
|
||||
intptr_t try_index,
|
||||
intptr_t slow_path_argument_count) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
|
||||
const ArgumentsDescriptor args_desc(arguments_descriptor);
|
||||
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
|
||||
|
@ -1020,7 +1014,6 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
if (function.HasOptionalParameters() || function.IsGeneric()) {
|
||||
__ LoadObject(EDX, arguments_descriptor);
|
||||
} else {
|
||||
|
|
|
@ -977,7 +977,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
|
|||
RawPcDescriptors::Kind kind,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
__ CallPatchable(stub, entry_kind);
|
||||
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
|
||||
}
|
||||
|
@ -988,7 +987,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
|
|||
LocationSummary* locs,
|
||||
const Function& target,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(is_optimizing());
|
||||
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
|
||||
__ GenerateUnRelocatedPcRelativeCall();
|
||||
|
@ -1021,7 +1019,6 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args,
|
|||
LocationSummary* locs,
|
||||
const ICData& ic_data,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
const Code& stub =
|
||||
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
|
||||
__ LoadObject(RBX, ic_data);
|
||||
|
@ -1050,7 +1047,6 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
|
||||
// Each ICData propagated from unoptimized to optimized code contains the
|
||||
// function that corresponds to the Dart function of that IC call. Due
|
||||
|
@ -1074,7 +1070,6 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(entry_kind == Code::EntryKind::kNormal ||
|
||||
entry_kind == Code::EntryKind::kUnchecked);
|
||||
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
|
||||
|
@ -1100,7 +1095,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
|
|||
LocationSummary* locs,
|
||||
intptr_t try_index,
|
||||
intptr_t slow_path_argument_count) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
|
||||
const ArgumentsDescriptor args_desc(arguments_descriptor);
|
||||
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
|
||||
|
@ -1142,7 +1136,6 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
|
|||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind,
|
||||
bool receiver_can_be_smi) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(entry_kind == Code::EntryKind::kNormal ||
|
||||
entry_kind == Code::EntryKind::kUnchecked);
|
||||
ASSERT(ic_data.NumArgsTested() == 1);
|
||||
|
@ -1185,7 +1178,6 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
|
|||
TokenPosition token_pos,
|
||||
LocationSummary* locs,
|
||||
Code::EntryKind entry_kind) {
|
||||
ASSERT(CanCallDart());
|
||||
ASSERT(!function.IsClosureFunction());
|
||||
if (function.HasOptionalParameters() || function.IsGeneric()) {
|
||||
__ LoadObject(R10, arguments_descriptor);
|
||||
|
@ -1205,7 +1197,6 @@ void FlowGraphCompiler::EmitDispatchTableCall(
|
|||
Register cid_reg,
|
||||
int32_t selector_offset,
|
||||
const Array& arguments_descriptor) {
|
||||
ASSERT(CanCallDart());
|
||||
const Register table_reg = RAX;
|
||||
ASSERT(cid_reg != table_reg);
|
||||
ASSERT(cid_reg != ARGS_DESC_REG);
|
||||
|
|
|
@ -1024,15 +1024,6 @@ class Instruction : public ZoneAllocated {
|
|||
// See StoreInstanceFieldInstr::HasUnknownSideEffects() for rationale.
|
||||
virtual bool HasUnknownSideEffects() const = 0;
|
||||
|
||||
// Whether this instruction can call Dart code without going through
|
||||
// the runtime.
|
||||
//
|
||||
// Must be true for any instruction which can call Dart code without
|
||||
// first creating an exit frame to transition into the runtime.
|
||||
//
|
||||
// See also WriteBarrierElimination and Thread::RememberLiveTemporaries().
|
||||
virtual bool CanCallDart() const { return false; }
|
||||
|
||||
virtual bool CanTriggerGC() const;
|
||||
|
||||
// Get the block entry for this instruction.
|
||||
|
@ -3190,8 +3181,6 @@ class BranchInstr : public Instruction {
|
|||
return comparison()->HasUnknownSideEffects();
|
||||
}
|
||||
|
||||
virtual bool CanCallDart() const { return comparison()->CanCallDart(); }
|
||||
|
||||
ComparisonInstr* comparison() const { return comparison_; }
|
||||
void SetComparison(ComparisonInstr* comp);
|
||||
|
||||
|
@ -3672,7 +3661,6 @@ class TemplateDartCall : public Definition {
|
|||
}
|
||||
|
||||
virtual bool MayThrow() const { return true; }
|
||||
virtual bool CanCallDart() const { return true; }
|
||||
|
||||
virtual intptr_t InputCount() const { return inputs_->length(); }
|
||||
virtual Value* InputAt(intptr_t i) const { return inputs_->At(i); }
|
||||
|
@ -4423,7 +4411,6 @@ class IfThenElseInstr : public Definition {
|
|||
virtual bool HasUnknownSideEffects() const {
|
||||
return comparison()->HasUnknownSideEffects();
|
||||
}
|
||||
virtual bool CanCallDart() const { return comparison()->CanCallDart(); }
|
||||
|
||||
virtual bool AttributesEqual(Instruction* other) const {
|
||||
IfThenElseInstr* other_if_then_else = other->AsIfThenElse();
|
||||
|
@ -4552,7 +4539,6 @@ class StaticCallInstr : public TemplateDartCall<0> {
|
|||
}
|
||||
|
||||
virtual bool HasUnknownSideEffects() const { return true; }
|
||||
virtual bool CanCallDart() const { return true; }
|
||||
|
||||
// Initialize result type of this call instruction if target is a recognized
|
||||
// method or has pragma annotation.
|
||||
|
@ -4830,9 +4816,6 @@ class NativeCallInstr : public TemplateDartCall<0> {
|
|||
|
||||
virtual bool HasUnknownSideEffects() const { return true; }
|
||||
|
||||
// Always creates an exit frame before more Dart code can be called.
|
||||
virtual bool CanCallDart() const { return false; }
|
||||
|
||||
void SetupNative();
|
||||
|
||||
PRINT_OPERANDS_TO_SUPPORT
|
||||
|
@ -4892,9 +4875,6 @@ class FfiCallInstr : public Definition {
|
|||
|
||||
virtual bool HasUnknownSideEffects() const { return true; }
|
||||
|
||||
// Always creates an exit frame before more Dart code can be called.
|
||||
virtual bool CanCallDart() const { return false; }
|
||||
|
||||
virtual Representation RequiredInputRepresentation(intptr_t idx) const;
|
||||
virtual Representation representation() const;
|
||||
|
||||
|
@ -5455,7 +5435,6 @@ class StringInterpolateInstr : public TemplateDefinition<1, Throws> {
|
|||
virtual CompileType ComputeType() const;
|
||||
// Issues a static call to Dart code which calls toString on objects.
|
||||
virtual bool HasUnknownSideEffects() const { return true; }
|
||||
virtual bool CanCallDart() const { return true; }
|
||||
virtual bool ComputeCanDeoptimize() const { return !FLAG_precompiled_mode; }
|
||||
|
||||
const Function& CallFunction() const;
|
||||
|
@ -5510,10 +5489,6 @@ class StoreIndexedInstr : public TemplateInstruction<3, NoThrow> {
|
|||
(emit_store_barrier_ == kEmitStoreBarrier);
|
||||
}
|
||||
|
||||
void set_emit_store_barrier(StoreBarrierType value) {
|
||||
emit_store_barrier_ = value;
|
||||
}
|
||||
|
||||
virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
|
||||
return speculative_mode_;
|
||||
}
|
||||
|
@ -5534,8 +5509,6 @@ class StoreIndexedInstr : public TemplateInstruction<3, NoThrow> {
|
|||
|
||||
virtual bool HasUnknownSideEffects() const { return false; }
|
||||
|
||||
void PrintOperandsTo(BufferFormatter* f) const;
|
||||
|
||||
ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
|
||||
|
||||
private:
|
||||
|
@ -5543,7 +5516,7 @@ class StoreIndexedInstr : public TemplateInstruction<3, NoThrow> {
|
|||
return compiler::Assembler::kValueCanBeSmi;
|
||||
}
|
||||
|
||||
StoreBarrierType emit_store_barrier_;
|
||||
const StoreBarrierType emit_store_barrier_;
|
||||
const bool index_unboxed_;
|
||||
const intptr_t index_scale_;
|
||||
const intptr_t class_id_;
|
||||
|
@ -7129,7 +7102,6 @@ class CheckedSmiOpInstr : public TemplateDefinition<2, Throws> {
|
|||
virtual bool RecomputeType();
|
||||
|
||||
virtual bool HasUnknownSideEffects() const { return true; }
|
||||
virtual bool CanCallDart() const { return true; }
|
||||
|
||||
virtual Definition* Canonicalize(FlowGraph* flow_graph);
|
||||
|
||||
|
@ -7178,7 +7150,6 @@ class CheckedSmiComparisonInstr : public TemplateComparison<2, Throws> {
|
|||
bool is_negated() const { return is_negated_; }
|
||||
|
||||
virtual bool HasUnknownSideEffects() const { return true; }
|
||||
virtual bool CanCallDart() const { return true; }
|
||||
|
||||
PRINT_OPERANDS_TO_SUPPORT
|
||||
|
||||
|
@ -7814,8 +7785,6 @@ class DoubleToIntegerInstr : public TemplateDefinition<1, Throws> {
|
|||
|
||||
virtual bool HasUnknownSideEffects() const { return false; }
|
||||
|
||||
virtual bool CanCallDart() const { return true; }
|
||||
|
||||
private:
|
||||
InstanceCallInstr* instance_call_;
|
||||
|
||||
|
|
|
@ -1125,13 +1125,6 @@ void StoreIndexedUnsafeInstr::PrintOperandsTo(BufferFormatter* f) const {
|
|||
value()->PrintTo(f);
|
||||
}
|
||||
|
||||
void StoreIndexedInstr::PrintOperandsTo(BufferFormatter* f) const {
|
||||
Instruction::PrintOperandsTo(f);
|
||||
if (!ShouldEmitStoreBarrier()) {
|
||||
f->Print(", NoStoreBarrier");
|
||||
}
|
||||
}
|
||||
|
||||
void TailCallInstr::PrintOperandsTo(BufferFormatter* f) const {
|
||||
const char* name = "<unknown code>";
|
||||
if (code_.IsStubCode()) {
|
||||
|
|
|
@ -44,14 +44,6 @@ RawFunction* GetFunction(const Library& lib, const char* name) {
|
|||
return func.raw();
|
||||
}
|
||||
|
||||
RawClass* GetClass(const Library& lib, const char* name) {
|
||||
Thread* thread = Thread::Current();
|
||||
const auto& cls = Class::Handle(
|
||||
lib.LookupClassAllowPrivate(String::Handle(Symbols::New(thread, name))));
|
||||
EXPECT(!cls.IsNull());
|
||||
return cls.raw();
|
||||
}
|
||||
|
||||
void Invoke(const Library& lib, const char* name) {
|
||||
// These tests rely on running unoptimized code to collect type feedback. The
|
||||
// interpreter does not collect type feedback for interface calls, so set
|
||||
|
|
|
@ -58,7 +58,6 @@ RawLibrary* LoadTestScript(const char* script,
|
|||
const char* lib_uri = RESOLVED_USER_TEST_URI);
|
||||
|
||||
RawFunction* GetFunction(const Library& lib, const char* name);
|
||||
RawClass* GetClass(const Library& lib, const char* name);
|
||||
|
||||
void Invoke(const Library& lib, const char* name);
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ class SlotCache : public ZoneAllocated {
|
|||
|
||||
const char* Slot::KindToCString(Kind k) {
|
||||
switch (k) {
|
||||
#define NATIVE_CASE(C, U, F, id, M) \
|
||||
#define NATIVE_CASE(C, F, id, M) \
|
||||
case NATIVE_SLOT_NAME(C, F, id, M): \
|
||||
return NATIVE_TO_STR(C, F, id, M);
|
||||
NATIVE_SLOTS_LIST(NATIVE_CASE)
|
||||
|
@ -70,7 +70,7 @@ const char* Slot::KindToCString(Kind k) {
|
|||
|
||||
bool Slot::ParseKind(const char* str, Kind* out) {
|
||||
ASSERT(str != nullptr && out != nullptr);
|
||||
#define NATIVE_CASE(C, U, F, id, M) \
|
||||
#define NATIVE_CASE(C, F, id, M) \
|
||||
if (strcmp(str, NATIVE_TO_STR(C, F, id, M)) == 0) { \
|
||||
*out = NATIVE_SLOT_NAME(C, F, id, M); \
|
||||
return true; \
|
||||
|
@ -101,8 +101,7 @@ const Slot& Slot::GetNativeSlot(Kind kind) {
|
|||
static const Slot fields[] = {
|
||||
#define FIELD_FINAL (IsImmutableBit::encode(true))
|
||||
#define FIELD_VAR (0)
|
||||
#define DEFINE_NATIVE_FIELD(ClassName, UnderlyingType, FieldName, cid, \
|
||||
mutability) \
|
||||
#define DEFINE_NATIVE_FIELD(ClassName, FieldName, cid, mutability) \
|
||||
Slot(Kind::k##ClassName##_##FieldName, FIELD_##mutability, k##cid##Cid, \
|
||||
compiler::target::ClassName::FieldName##_offset(), \
|
||||
#ClassName "." #FieldName, nullptr),
|
||||
|
|
|
@ -36,11 +36,10 @@ class ParsedFunction;
|
|||
// List of slots that correspond to fields of native objects in the following
|
||||
// format:
|
||||
//
|
||||
// V(class_name, underlying_type, field_name, exact_type, FINAL|VAR)
|
||||
// V(class_name, field_name, exact_type, FINAL|VAR)
|
||||
//
|
||||
// - class_name and field_name specify the name of the host class and the name
|
||||
// of the field respectively;
|
||||
// - underlying_type: the Raw class which holds the field;
|
||||
// - exact_type specifies exact type of the field (any load from this field
|
||||
// would only yield instances of this type);
|
||||
// - the last component specifies whether field behaves like a final field
|
||||
|
@ -49,31 +48,31 @@ class ParsedFunction;
|
|||
//
|
||||
// Note: native slots are expected to be non-nullable.
|
||||
#define NATIVE_SLOTS_LIST(V) \
|
||||
V(Array, RawArray, length, Smi, FINAL) \
|
||||
V(Context, RawContext, parent, Context, FINAL) \
|
||||
V(Closure, RawClosure, instantiator_type_arguments, TypeArguments, FINAL) \
|
||||
V(Closure, RawClosure, delayed_type_arguments, TypeArguments, FINAL) \
|
||||
V(Closure, RawClosure, function_type_arguments, TypeArguments, FINAL) \
|
||||
V(Closure, RawClosure, function, Function, FINAL) \
|
||||
V(Closure, RawClosure, context, Context, FINAL) \
|
||||
V(Closure, RawClosure, hash, Context, VAR) \
|
||||
V(GrowableObjectArray, RawGrowableObjectArray, length, Smi, VAR) \
|
||||
V(GrowableObjectArray, RawGrowableObjectArray, data, Array, VAR) \
|
||||
V(TypedDataBase, RawTypedDataBase, length, Smi, FINAL) \
|
||||
V(TypedDataView, RawTypedDataView, offset_in_bytes, Smi, FINAL) \
|
||||
V(TypedDataView, RawTypedDataView, data, Dynamic, FINAL) \
|
||||
V(String, RawString, length, Smi, FINAL) \
|
||||
V(LinkedHashMap, RawLinkedHashMap, index, TypedDataUint32Array, VAR) \
|
||||
V(LinkedHashMap, RawLinkedHashMap, data, Array, VAR) \
|
||||
V(LinkedHashMap, RawLinkedHashMap, hash_mask, Smi, VAR) \
|
||||
V(LinkedHashMap, RawLinkedHashMap, used_data, Smi, VAR) \
|
||||
V(LinkedHashMap, RawLinkedHashMap, deleted_keys, Smi, VAR) \
|
||||
V(ArgumentsDescriptor, RawArray, type_args_len, Smi, FINAL) \
|
||||
V(ArgumentsDescriptor, RawArray, positional_count, Smi, FINAL) \
|
||||
V(ArgumentsDescriptor, RawArray, count, Smi, FINAL) \
|
||||
V(ArgumentsDescriptor, RawArray, size, Smi, FINAL) \
|
||||
V(PointerBase, RawPointerBase, data_field, Dynamic, FINAL) \
|
||||
V(Type, RawType, arguments, TypeArguments, FINAL)
|
||||
V(Array, length, Smi, FINAL) \
|
||||
V(Context, parent, Context, FINAL) \
|
||||
V(Closure, instantiator_type_arguments, TypeArguments, FINAL) \
|
||||
V(Closure, delayed_type_arguments, TypeArguments, FINAL) \
|
||||
V(Closure, function_type_arguments, TypeArguments, FINAL) \
|
||||
V(Closure, function, Function, FINAL) \
|
||||
V(Closure, context, Context, FINAL) \
|
||||
V(Closure, hash, Context, VAR) \
|
||||
V(GrowableObjectArray, length, Smi, VAR) \
|
||||
V(GrowableObjectArray, data, Array, VAR) \
|
||||
V(TypedDataBase, length, Smi, FINAL) \
|
||||
V(TypedDataView, offset_in_bytes, Smi, FINAL) \
|
||||
V(TypedDataView, data, Dynamic, FINAL) \
|
||||
V(String, length, Smi, FINAL) \
|
||||
V(LinkedHashMap, index, TypedDataUint32Array, VAR) \
|
||||
V(LinkedHashMap, data, Array, VAR) \
|
||||
V(LinkedHashMap, hash_mask, Smi, VAR) \
|
||||
V(LinkedHashMap, used_data, Smi, VAR) \
|
||||
V(LinkedHashMap, deleted_keys, Smi, VAR) \
|
||||
V(ArgumentsDescriptor, type_args_len, Smi, FINAL) \
|
||||
V(ArgumentsDescriptor, positional_count, Smi, FINAL) \
|
||||
V(ArgumentsDescriptor, count, Smi, FINAL) \
|
||||
V(ArgumentsDescriptor, size, Smi, FINAL) \
|
||||
V(PointerBase, data_field, Dynamic, FINAL) \
|
||||
V(Type, arguments, TypeArguments, FINAL)
|
||||
|
||||
// Slot is an abstraction that describes an readable (and possibly writeable)
|
||||
// location within an object.
|
||||
|
@ -87,7 +86,7 @@ class Slot : public ZoneAllocated {
|
|||
// clang-format off
|
||||
enum class Kind : uint8_t {
|
||||
// Native slots are identified by their kind - each native slot has its own.
|
||||
#define DECLARE_KIND(ClassName, UnderlyingType, FieldName, cid, mutability) \
|
||||
#define DECLARE_KIND(ClassName, FieldName, cid, mutability) \
|
||||
k##ClassName##_##FieldName,
|
||||
NATIVE_SLOTS_LIST(DECLARE_KIND)
|
||||
#undef DECLARE_KIND
|
||||
|
@ -135,7 +134,7 @@ class Slot : public ZoneAllocated {
|
|||
const ParsedFunction* parsed_function);
|
||||
|
||||
// Convenience getters for native slots.
|
||||
#define DEFINE_GETTER(ClassName, UnderlyingType, FieldName, cid, mutability) \
|
||||
#define DEFINE_GETTER(ClassName, FieldName, cid, mutability) \
|
||||
static const Slot& ClassName##_##FieldName() { \
|
||||
return GetNativeSlot(Kind::k##ClassName##_##FieldName); \
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
#include "vm/compiler/backend/redundancy_elimination.h"
|
||||
#include "vm/compiler/backend/type_propagator.h"
|
||||
#include "vm/compiler/call_specializer.h"
|
||||
#include "vm/compiler/write_barrier_elimination.h"
|
||||
#if defined(DART_PRECOMPILER)
|
||||
#include "vm/compiler/aot/aot_call_specializer.h"
|
||||
#include "vm/compiler/aot/precompiler.h"
|
||||
|
@ -267,7 +266,7 @@ FlowGraph* CompilerPass::RunForceOptimizedPipeline(
|
|||
// so it should not be lifted earlier than that pass.
|
||||
INVOKE_PASS(DCE);
|
||||
INVOKE_PASS(Canonicalize);
|
||||
INVOKE_PASS(EliminateWriteBarriers);
|
||||
INVOKE_PASS(WriteBarrierElimination);
|
||||
INVOKE_PASS(FinalizeGraph);
|
||||
#if defined(DART_PRECOMPILER)
|
||||
if (mode == kAOT) {
|
||||
|
@ -350,7 +349,7 @@ FlowGraph* CompilerPass::RunPipeline(PipelineMode mode,
|
|||
INVOKE_PASS(EliminateStackOverflowChecks);
|
||||
INVOKE_PASS(Canonicalize);
|
||||
INVOKE_PASS(AllocationSinking_DetachMaterializations);
|
||||
INVOKE_PASS(EliminateWriteBarriers);
|
||||
INVOKE_PASS(WriteBarrierElimination);
|
||||
INVOKE_PASS(FinalizeGraph);
|
||||
#if defined(DART_PRECOMPILER)
|
||||
if (mode == kAOT) {
|
||||
|
@ -530,7 +529,38 @@ COMPILER_PASS(ReorderBlocks, {
|
|||
}
|
||||
});
|
||||
|
||||
COMPILER_PASS(EliminateWriteBarriers, { EliminateWriteBarriers(flow_graph); });
|
||||
static void WriteBarrierElimination(FlowGraph* flow_graph) {
|
||||
for (BlockIterator block_it = flow_graph->reverse_postorder_iterator();
|
||||
!block_it.Done(); block_it.Advance()) {
|
||||
BlockEntryInstr* block = block_it.Current();
|
||||
Definition* last_allocated = nullptr;
|
||||
for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
|
||||
Instruction* current = it.Current();
|
||||
if (!current->CanTriggerGC()) {
|
||||
if (StoreInstanceFieldInstr* instr = current->AsStoreInstanceField()) {
|
||||
if (instr->instance()->definition() == last_allocated) {
|
||||
instr->set_emit_store_barrier(kNoStoreBarrier);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (AllocationInstr* alloc = current->AsAllocation()) {
|
||||
if (alloc->WillAllocateNewOrRemembered()) {
|
||||
last_allocated = alloc;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (current->CanTriggerGC()) {
|
||||
last_allocated = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
COMPILER_PASS(WriteBarrierElimination,
|
||||
{ WriteBarrierElimination(flow_graph); });
|
||||
|
||||
COMPILER_PASS(FinalizeGraph, {
|
||||
// At the end of the pipeline, force recomputing and caching graph
|
||||
|
|
|
@ -50,7 +50,7 @@ namespace dart {
|
|||
V(TypePropagation) \
|
||||
V(UseTableDispatch) \
|
||||
V(WidenSmiToInt32) \
|
||||
V(EliminateWriteBarriers)
|
||||
V(WriteBarrierElimination)
|
||||
|
||||
class AllocationSinking;
|
||||
class BlockScheduler;
|
||||
|
|
|
@ -100,14 +100,14 @@ compiler_sources = [
|
|||
"compiler_state.h",
|
||||
"ffi/abi.cc",
|
||||
"ffi/abi.h",
|
||||
"ffi/call.cc",
|
||||
"ffi/call.h",
|
||||
"ffi/callback.cc",
|
||||
"ffi/callback.h",
|
||||
"ffi/frame_rebase.cc",
|
||||
"ffi/frame_rebase.h",
|
||||
"ffi/marshaller.cc",
|
||||
"ffi/marshaller.h",
|
||||
"ffi/call.cc",
|
||||
"ffi/call.h",
|
||||
"ffi/callback.cc",
|
||||
"ffi/callback.h",
|
||||
"ffi/native_calling_convention.cc",
|
||||
"ffi/native_calling_convention.h",
|
||||
"ffi/native_location.cc",
|
||||
|
@ -166,8 +166,6 @@ compiler_sources = [
|
|||
"stub_code_compiler_arm64.cc",
|
||||
"stub_code_compiler_ia32.cc",
|
||||
"stub_code_compiler_x64.cc",
|
||||
"write_barrier_elimination.cc",
|
||||
"write_barrier_elimination.h",
|
||||
]
|
||||
|
||||
compiler_sources_tests = [
|
||||
|
@ -192,5 +190,4 @@ compiler_sources_tests = [
|
|||
"backend/typed_data_aot_test.cc",
|
||||
"backend/yield_position_test.cc",
|
||||
"cha_test.cc",
|
||||
"write_barrier_elimination_test.cc",
|
||||
]
|
||||
|
|
|
@ -1,388 +0,0 @@
|
|||
// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
|
||||
// for details. All rights reserved. Use of this source code is governed by a
|
||||
// BSD-style license that can be found in the LICENSE file.
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "vm/compiler/backend/flow_graph.h"
|
||||
#include "vm/compiler/compiler_pass.h"
|
||||
#include "vm/compiler/write_barrier_elimination.h"
|
||||
|
||||
namespace dart {
|
||||
|
||||
#if defined(DEBUG)
|
||||
DEFINE_FLAG(bool,
|
||||
trace_write_barrier_elimination,
|
||||
false,
|
||||
"Trace WriteBarrierElimination pass.");
|
||||
#endif
|
||||
|
||||
class DefinitionIndexPairTrait {
|
||||
public:
|
||||
typedef Definition* Key;
|
||||
typedef intptr_t Value;
|
||||
struct Pair {
|
||||
Definition* definition = nullptr;
|
||||
intptr_t index = -1;
|
||||
Pair() {}
|
||||
Pair(Definition* definition, intptr_t index)
|
||||
: definition(definition), index(index) {}
|
||||
};
|
||||
|
||||
static Key KeyOf(Pair kv) { return kv.definition; }
|
||||
static Value ValueOf(Pair kv) { return kv.index; }
|
||||
static inline intptr_t Hashcode(Key key) { return std::hash<Key>()(key); }
|
||||
static inline bool IsKeyEqual(Pair kv, Key key) {
|
||||
return kv.definition == key;
|
||||
}
|
||||
};
|
||||
|
||||
typedef DirectChainedHashMap<DefinitionIndexPairTrait> DefinitionIndexMap;
|
||||
|
||||
// Inter-block write-barrier elimination.
|
||||
//
|
||||
// This optimization removes write barriers from some store instructions under
|
||||
// certain assumptions which the runtime is responsible to sustain.
|
||||
//
|
||||
// We can skip a write barrier on a StoreInstanceField to a container object X
|
||||
// if we know that either:
|
||||
// - X is in new-space, or
|
||||
// - X is in old-space, and:
|
||||
// - X is in the store buffer, and
|
||||
// - X is in the deferred marking stack (if concurrent marking is enabled)
|
||||
//
|
||||
// The result of an Allocation instruction (Instruction::IsAllocation()) will
|
||||
// satisfy one of these requirements immediately after the instruction
|
||||
// if WillAllocateNewOrRemembered() is true.
|
||||
//
|
||||
// Without runtime support, we would have to assume that any instruction which
|
||||
// can trigger a new-space scavenge (Instruction::CanTriggerGC()) might promote
|
||||
// a new-space temporary into old-space, and we could not skip a store barrier
|
||||
// on a write into it afterward.
|
||||
//
|
||||
// However, many instructions can trigger GC in unlikely cases, like
|
||||
// CheckStackOverflow and Box. To avoid interrupting write barrier elimination
|
||||
// across these instructions, the runtime ensures that any live temporaries
|
||||
// (except arrays) promoted during a scavenge caused by a non-Dart-call
|
||||
// instruction (see Instruction::CanCallDart()) will be added to the store
|
||||
// buffer. Additionally, if concurrent marking was initiated, the runtime
|
||||
// ensures that all live temporaries are also in the deferred marking stack.
|
||||
//
|
||||
// See also Thread::RememberLiveTemporaries() and
|
||||
// Thread::DeferredMarkLiveTemporaries().
|
||||
class WriteBarrierElimination : public ValueObject {
|
||||
public:
|
||||
WriteBarrierElimination(Zone* zone, FlowGraph* flow_graph);
|
||||
|
||||
void Analyze();
|
||||
void SaveResults();
|
||||
|
||||
private:
|
||||
void IndexDefinitions(Zone* zone);
|
||||
|
||||
bool AnalyzeBlock(BlockEntryInstr* entry);
|
||||
void MergePredecessors(BlockEntryInstr* entry);
|
||||
|
||||
void UpdateVectorForBlock(BlockEntryInstr* entry, bool finalize);
|
||||
|
||||
static intptr_t Index(BlockEntryInstr* entry) {
|
||||
return entry->postorder_number();
|
||||
}
|
||||
|
||||
intptr_t Index(Definition* def) {
|
||||
ASSERT(IsUsable(def));
|
||||
return definition_indices_.LookupValue(def);
|
||||
}
|
||||
|
||||
bool IsUsable(Definition* def) {
|
||||
return def->IsPhi() || (def->IsAllocation() &&
|
||||
def->AsAllocation()->WillAllocateNewOrRemembered());
|
||||
}
|
||||
|
||||
#if defined(DEBUG)
|
||||
static bool SlotEligibleForWBE(const Slot& slot);
|
||||
#endif
|
||||
|
||||
FlowGraph* const flow_graph_;
|
||||
const GrowableArray<BlockEntryInstr*>* const block_order_;
|
||||
|
||||
// Number of usable definitions in the graph.
|
||||
intptr_t definition_count_ = 0;
|
||||
|
||||
// Maps each usable definition to its index in the bitvectors.
|
||||
DefinitionIndexMap definition_indices_;
|
||||
|
||||
// Bitvector with all non-Array-allocation instructions set. Used to
|
||||
// un-mark Array allocations as usable.
|
||||
BitVector* array_allocations_mask_;
|
||||
|
||||
// Bitvectors for each block of which allocations are new or remembered
|
||||
// at the start (after Phis).
|
||||
GrowableArray<BitVector*> usable_allocs_in_;
|
||||
|
||||
// Bitvectors for each block of which allocations are new or remembered
|
||||
// at the end of the block.
|
||||
GrowableArray<BitVector*> usable_allocs_out_;
|
||||
|
||||
// Remaining blocks to process.
|
||||
GrowableArray<BlockEntryInstr*> worklist_;
|
||||
|
||||
// Temporary used in many functions to avoid repeated zone allocation.
|
||||
BitVector* vector_;
|
||||
|
||||
// Bitvector of blocks which have been processed, to ensure each block
|
||||
// is processed at least once.
|
||||
BitVector* processed_blocks_;
|
||||
|
||||
#if defined(DEBUG)
|
||||
bool tracing_ = false;
|
||||
#else
|
||||
static constexpr bool tracing_ = false;
|
||||
#endif
|
||||
};
|
||||
|
||||
WriteBarrierElimination::WriteBarrierElimination(Zone* zone,
|
||||
FlowGraph* flow_graph)
|
||||
: flow_graph_(flow_graph), block_order_(&flow_graph->postorder()) {
|
||||
#if defined(DEBUG)
|
||||
if (flow_graph->should_print() && FLAG_trace_write_barrier_elimination) {
|
||||
tracing_ = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
IndexDefinitions(zone);
|
||||
|
||||
for (intptr_t i = 0; i < block_order_->length(); ++i) {
|
||||
usable_allocs_in_.Add(new (zone) BitVector(zone, definition_count_));
|
||||
usable_allocs_in_[i]->CopyFrom(vector_);
|
||||
|
||||
usable_allocs_out_.Add(new (zone) BitVector(zone, definition_count_));
|
||||
usable_allocs_out_[i]->CopyFrom(vector_);
|
||||
}
|
||||
|
||||
processed_blocks_ = new (zone) BitVector(zone, block_order_->length());
|
||||
}
|
||||
|
||||
void WriteBarrierElimination::Analyze() {
|
||||
for (intptr_t i = 0; i < block_order_->length(); ++i) {
|
||||
worklist_.Add(block_order_->At(i));
|
||||
}
|
||||
|
||||
while (!worklist_.is_empty()) {
|
||||
auto* const entry = worklist_.RemoveLast();
|
||||
if (AnalyzeBlock(entry)) {
|
||||
for (intptr_t i = 0; i < entry->last_instruction()->SuccessorCount();
|
||||
++i) {
|
||||
if (tracing_) {
|
||||
THR_Print("Enqueueing block %" Pd "\n", entry->block_id());
|
||||
}
|
||||
worklist_.Add(entry->last_instruction()->SuccessorAt(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void WriteBarrierElimination::SaveResults() {
|
||||
for (intptr_t i = 0; i < block_order_->length(); ++i) {
|
||||
vector_->CopyFrom(usable_allocs_in_[i]);
|
||||
UpdateVectorForBlock(block_order_->At(i), /*finalize=*/true);
|
||||
}
|
||||
}
|
||||
|
||||
void WriteBarrierElimination::IndexDefinitions(Zone* zone) {
|
||||
BitmapBuilder array_allocations;
|
||||
|
||||
for (intptr_t i = 0; i < block_order_->length(); ++i) {
|
||||
BlockEntryInstr* const block = block_order_->At(i);
|
||||
if (auto join_block = block->AsJoinEntry()) {
|
||||
for (PhiIterator it(join_block); !it.Done(); it.Advance()) {
|
||||
array_allocations.Set(definition_count_, false);
|
||||
definition_indices_.Insert({it.Current(), definition_count_++});
|
||||
#if defined(DEBUG)
|
||||
if (tracing_) {
|
||||
THR_Print("Definition (%" Pd ") has index %" Pd ".\n",
|
||||
it.Current()->ssa_temp_index(), definition_count_ - 1);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
|
||||
if (Definition* current = it.Current()->AsDefinition()) {
|
||||
if (IsUsable(current)) {
|
||||
array_allocations.Set(definition_count_, current->IsCreateArray());
|
||||
definition_indices_.Insert({current, definition_count_++});
|
||||
#if defined(DEBUG)
|
||||
if (tracing_) {
|
||||
THR_Print("Definition (%" Pd ") has index %" Pd ".\n",
|
||||
current->ssa_temp_index(), definition_count_ - 1);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vector_ = new (zone) BitVector(zone, definition_count_);
|
||||
vector_->SetAll();
|
||||
array_allocations_mask_ = new (zone) BitVector(zone, definition_count_);
|
||||
for (intptr_t i = 0; i < definition_count_; ++i) {
|
||||
if (!array_allocations.Get(i)) array_allocations_mask_->Add(i);
|
||||
}
|
||||
}
|
||||
|
||||
void WriteBarrierElimination::MergePredecessors(BlockEntryInstr* entry) {
|
||||
vector_->Clear();
|
||||
for (intptr_t i = 0; i < entry->PredecessorCount(); ++i) {
|
||||
BitVector* predecessor_set =
|
||||
usable_allocs_out_[Index(entry->PredecessorAt(i))];
|
||||
if (i == 0) {
|
||||
vector_->AddAll(predecessor_set);
|
||||
} else {
|
||||
vector_->Intersect(predecessor_set);
|
||||
}
|
||||
}
|
||||
|
||||
if (JoinEntryInstr* join = entry->AsJoinEntry()) {
|
||||
// A Phi is usable if and only if all its inputs are usable.
|
||||
for (PhiIterator it(join); !it.Done(); it.Advance()) {
|
||||
PhiInstr* phi = it.Current();
|
||||
ASSERT(phi->InputCount() == entry->PredecessorCount());
|
||||
bool is_usable = true;
|
||||
for (intptr_t i = 0; i < phi->InputCount(); ++i) {
|
||||
BitVector* const predecessor_set =
|
||||
usable_allocs_out_[Index(entry->PredecessorAt(i))];
|
||||
Definition* const origin = phi->InputAt(i)->definition();
|
||||
if (!IsUsable(origin) || !predecessor_set->Contains(Index(origin))) {
|
||||
is_usable = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
vector_->Set(Index(phi), is_usable);
|
||||
}
|
||||
|
||||
#if defined(DEBUG)
|
||||
if (tracing_) {
|
||||
THR_Print("Merge predecessors for %" Pd ".\n", entry->block_id());
|
||||
for (PhiIterator it(join); !it.Done(); it.Advance()) {
|
||||
PhiInstr* phi = it.Current();
|
||||
THR_Print("%" Pd ": %s\n", phi->ssa_temp_index(),
|
||||
vector_->Contains(Index(phi)) ? "true" : "false");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
bool WriteBarrierElimination::AnalyzeBlock(BlockEntryInstr* entry) {
|
||||
// Recompute the usable allocs in-set.
|
||||
MergePredecessors(entry);
|
||||
|
||||
// If the in-set has not changed, there's no work to do.
|
||||
BitVector* const in_set = usable_allocs_in_[Index(entry)];
|
||||
ASSERT(vector_->SubsetOf(*in_set)); // convergence
|
||||
if (vector_->Equals(*in_set) && processed_blocks_->Contains(Index(entry))) {
|
||||
if (tracing_) {
|
||||
THR_Print("Bailout of block %" Pd ": inputs didn't change.\n",
|
||||
entry->block_id());
|
||||
}
|
||||
return false;
|
||||
} else if (tracing_) {
|
||||
THR_Print("Inputs of block %" Pd " changed: ", entry->block_id());
|
||||
in_set->Print();
|
||||
THR_Print(" -> ");
|
||||
vector_->Print();
|
||||
THR_Print("\n");
|
||||
}
|
||||
|
||||
usable_allocs_in_[Index(entry)]->CopyFrom(vector_);
|
||||
UpdateVectorForBlock(entry, /*finalize=*/false);
|
||||
|
||||
processed_blocks_->Add(Index(entry));
|
||||
|
||||
// Successors only need to be updated if the out-set changes.
|
||||
if (vector_->Equals(*usable_allocs_out_[Index(entry)])) {
|
||||
if (tracing_) {
|
||||
THR_Print("Bailout of block %" Pd ": out-set didn't change.\n",
|
||||
entry->block_id());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
BitVector* const out_set = usable_allocs_out_[Index(entry)];
|
||||
ASSERT(vector_->SubsetOf(*out_set)); // convergence
|
||||
out_set->CopyFrom(vector_);
|
||||
if (tracing_) {
|
||||
THR_Print("Block %" Pd " changed.\n", entry->block_id());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#if defined(DEBUG)
|
||||
bool WriteBarrierElimination::SlotEligibleForWBE(const Slot& slot) {
|
||||
// We assume that Dart code only stores into Instances or Contexts.
|
||||
// This assumption is used in
|
||||
// RestoreWriteBarrierInvariantVisitor::VisitPointers.
|
||||
|
||||
switch (slot.kind()) {
|
||||
case Slot::Kind::kCapturedVariable: // Context
|
||||
return true;
|
||||
case Slot::Kind::kDartField: // Instance
|
||||
return true;
|
||||
|
||||
#define FOR_EACH_NATIVE_SLOT(class, underlying_type, field, type, modifiers) \
|
||||
case Slot::Kind::k##class##_##field: \
|
||||
return std::is_base_of<RawInstance, underlying_type>::value || \
|
||||
std::is_base_of<RawContext, underlying_type>::value;
|
||||
|
||||
NATIVE_SLOTS_LIST(FOR_EACH_NATIVE_SLOT)
|
||||
#undef FOR_EACH_NATIVE_SLOT
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void WriteBarrierElimination::UpdateVectorForBlock(BlockEntryInstr* entry,
|
||||
bool finalize) {
|
||||
for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) {
|
||||
Instruction* const current = it.Current();
|
||||
|
||||
if (finalize) {
|
||||
if (StoreInstanceFieldInstr* instr = current->AsStoreInstanceField()) {
|
||||
Definition* const container = instr->instance()->definition();
|
||||
if (IsUsable(container) && vector_->Contains(Index(container))) {
|
||||
DEBUG_ASSERT(SlotEligibleForWBE(instr->slot()));
|
||||
instr->set_emit_store_barrier(kNoStoreBarrier);
|
||||
}
|
||||
} else if (StoreIndexedInstr* instr = current->AsStoreIndexed()) {
|
||||
Definition* const array = instr->array()->definition();
|
||||
if (IsUsable(array) && vector_->Contains(Index(array))) {
|
||||
instr->set_emit_store_barrier(StoreBarrierType::kNoStoreBarrier);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (current->CanCallDart()) {
|
||||
vector_->Clear();
|
||||
} else if (current->CanTriggerGC()) {
|
||||
// Clear array allocations. These are not added to the remembered set
|
||||
// by Thread::RememberLiveTemporaries() after a scavenge.
|
||||
vector_->Intersect(array_allocations_mask_);
|
||||
}
|
||||
|
||||
if (AllocationInstr* const alloc = current->AsAllocation()) {
|
||||
if (alloc->WillAllocateNewOrRemembered()) {
|
||||
vector_->Add(Index(alloc));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void EliminateWriteBarriers(FlowGraph* flow_graph) {
|
||||
WriteBarrierElimination elimination(Thread::Current()->zone(), flow_graph);
|
||||
elimination.Analyze();
|
||||
elimination.SaveResults();
|
||||
}
|
||||
|
||||
} // namespace dart
|
|
@ -1,15 +0,0 @@
|
|||
// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
|
||||
// for details. All rights reserved. Use of this source code is governed by a
|
||||
// BSD-style license that can be found in the LICENSE file.
|
||||
|
||||
#ifndef RUNTIME_VM_COMPILER_WRITE_BARRIER_ELIMINATION_H_
|
||||
#define RUNTIME_VM_COMPILER_WRITE_BARRIER_ELIMINATION_H_
|
||||
|
||||
namespace dart {
|
||||
|
||||
class FlowGraph;
|
||||
void EliminateWriteBarriers(FlowGraph* flow_graph);
|
||||
|
||||
} // namespace dart
|
||||
|
||||
#endif // RUNTIME_VM_COMPILER_WRITE_BARRIER_ELIMINATION_H_
|
|
@ -1,198 +0,0 @@
|
|||
// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
|
||||
// for details. All rights reserved. Use of this source code is governed by a
|
||||
// BSD-style license that can be found in the LICENSE file.
|
||||
|
||||
#include "platform/assert.h"
|
||||
#include "vm/compiler/backend/il_printer.h"
|
||||
#include "vm/compiler/backend/il_test_helper.h"
|
||||
#include "vm/unit_test.h"
|
||||
|
||||
namespace dart {
|
||||
|
||||
DEBUG_ONLY(DECLARE_FLAG(bool, trace_write_barrier_elimination);)
|
||||
|
||||
ISOLATE_UNIT_TEST_CASE(IRTest_WriteBarrierElimination_JoinSuccessors) {
|
||||
DEBUG_ONLY(FLAG_trace_write_barrier_elimination = true);
|
||||
|
||||
// This is a regression test for a bug where we were using
|
||||
// JoinEntry::SuccessorCount() to determine the number of outgoing blocks
|
||||
// from the join block. JoinEntry::SuccessorCount() is in fact always 0;
|
||||
// JoinEntry::last_instruction()->SuccessorCount() should be used instead.
|
||||
const char* kScript =
|
||||
R"(
|
||||
class C {
|
||||
int value;
|
||||
C next;
|
||||
C prev;
|
||||
}
|
||||
|
||||
@pragma("vm:never-inline")
|
||||
fn() {}
|
||||
|
||||
foo(int x) {
|
||||
C prev = C();
|
||||
C next;
|
||||
while (x --> 0) {
|
||||
next = C();
|
||||
next.prev = prev;
|
||||
prev?.next = next;
|
||||
prev = next;
|
||||
fn();
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
main() { foo(10); }
|
||||
)";
|
||||
|
||||
const auto& root_library = Library::Handle(LoadTestScript(kScript));
|
||||
|
||||
Invoke(root_library, "main");
|
||||
|
||||
const auto& function = Function::Handle(GetFunction(root_library, "foo"));
|
||||
TestPipeline pipeline(function, CompilerPass::kJIT);
|
||||
FlowGraph* flow_graph = pipeline.RunPasses({});
|
||||
|
||||
auto entry = flow_graph->graph_entry()->normal_entry();
|
||||
EXPECT(entry != nullptr);
|
||||
|
||||
StoreInstanceFieldInstr* store1 = nullptr;
|
||||
StoreInstanceFieldInstr* store2 = nullptr;
|
||||
|
||||
ILMatcher cursor(flow_graph, entry);
|
||||
RELEASE_ASSERT(cursor.TryMatch({
|
||||
kMoveGlob,
|
||||
kMatchAndMoveGoto,
|
||||
kMoveGlob,
|
||||
kMatchAndMoveBranchTrue,
|
||||
kMoveGlob,
|
||||
{kMatchAndMoveStoreInstanceField, &store1},
|
||||
kMoveGlob,
|
||||
{kMatchAndMoveStoreInstanceField, &store2},
|
||||
}));
|
||||
|
||||
EXPECT(store1->ShouldEmitStoreBarrier() == false);
|
||||
EXPECT(store2->ShouldEmitStoreBarrier() == true);
|
||||
}
|
||||
|
||||
ISOLATE_UNIT_TEST_CASE(IRTest_WriteBarrierElimination_AtLeastOnce) {
|
||||
DEBUG_ONLY(FLAG_trace_write_barrier_elimination = true);
|
||||
|
||||
// Ensure that we process every block at least once during the analysis
|
||||
// phase so that the out-sets will be initialized. If we don't process
|
||||
// each block at least once, the store "c.next = n" will be marked
|
||||
// NoWriteBarrier.
|
||||
const char* kScript =
|
||||
R"(
|
||||
class C {
|
||||
C next;
|
||||
}
|
||||
|
||||
@pragma("vm:never-inline")
|
||||
fn() {}
|
||||
|
||||
foo(int x) {
|
||||
C c = C();
|
||||
C n = C();
|
||||
if (x > 5) {
|
||||
fn();
|
||||
}
|
||||
c.next = n;
|
||||
return c;
|
||||
}
|
||||
|
||||
main() { foo(0); foo(10); }
|
||||
)";
|
||||
|
||||
const auto& root_library = Library::Handle(LoadTestScript(kScript));
|
||||
|
||||
Invoke(root_library, "main");
|
||||
|
||||
const auto& function = Function::Handle(GetFunction(root_library, "foo"));
|
||||
TestPipeline pipeline(function, CompilerPass::kJIT);
|
||||
FlowGraph* flow_graph = pipeline.RunPasses({});
|
||||
|
||||
auto entry = flow_graph->graph_entry()->normal_entry();
|
||||
EXPECT(entry != nullptr);
|
||||
|
||||
StoreInstanceFieldInstr* store = nullptr;
|
||||
|
||||
ILMatcher cursor(flow_graph, entry);
|
||||
RELEASE_ASSERT(cursor.TryMatch({
|
||||
kMoveGlob,
|
||||
kMatchAndMoveBranchFalse,
|
||||
kMoveGlob,
|
||||
kMatchAndMoveGoto,
|
||||
kMoveGlob,
|
||||
{kMatchAndMoveStoreInstanceField, &store},
|
||||
}));
|
||||
|
||||
EXPECT(store->ShouldEmitStoreBarrier() == true);
|
||||
}
|
||||
|
||||
ISOLATE_UNIT_TEST_CASE(IRTest_WriteBarrierElimination_Arrays) {
|
||||
DEBUG_ONLY(FLAG_trace_write_barrier_elimination = true);
|
||||
|
||||
// Test that array allocations are not considered usable after a
|
||||
// may-trigger-GC instruction (in this case CheckStackOverflow), unlike
|
||||
// normal allocations, which are only interruped by a Dart call.
|
||||
const char* kScript =
|
||||
R"(
|
||||
class C {
|
||||
C next;
|
||||
}
|
||||
|
||||
@pragma("vm:never-inline")
|
||||
fn() {}
|
||||
|
||||
foo(int x) {
|
||||
C c = C();
|
||||
C n = C();
|
||||
List<C> array = List<C>(1);
|
||||
while (x --> 0) {
|
||||
c.next = n;
|
||||
n = c;
|
||||
c = C();
|
||||
}
|
||||
array[0] = c;
|
||||
return c;
|
||||
}
|
||||
|
||||
main() { foo(10); }
|
||||
)";
|
||||
|
||||
const auto& root_library = Library::Handle(LoadTestScript(kScript));
|
||||
|
||||
Invoke(root_library, "main");
|
||||
|
||||
const auto& function = Function::Handle(GetFunction(root_library, "foo"));
|
||||
TestPipeline pipeline(function, CompilerPass::kJIT);
|
||||
FlowGraph* flow_graph = pipeline.RunPasses({});
|
||||
|
||||
auto entry = flow_graph->graph_entry()->normal_entry();
|
||||
EXPECT(entry != nullptr);
|
||||
|
||||
StoreInstanceFieldInstr* store_into_c = nullptr;
|
||||
StoreIndexedInstr* store_into_array = nullptr;
|
||||
|
||||
ILMatcher cursor(flow_graph, entry);
|
||||
RELEASE_ASSERT(cursor.TryMatch({
|
||||
kMoveGlob,
|
||||
kMatchAndMoveGoto,
|
||||
kMoveGlob,
|
||||
kMatchAndMoveBranchTrue,
|
||||
kMoveGlob,
|
||||
{kMatchAndMoveStoreInstanceField, &store_into_c},
|
||||
kMoveGlob,
|
||||
kMatchAndMoveGoto,
|
||||
kMoveGlob,
|
||||
kMatchAndMoveBranchFalse,
|
||||
kMoveGlob,
|
||||
{kMatchAndMoveStoreIndexed, &store_into_array},
|
||||
}));
|
||||
|
||||
EXPECT(store_into_c->ShouldEmitStoreBarrier() == false);
|
||||
EXPECT(store_into_array->ShouldEmitStoreBarrier() == true);
|
||||
}
|
||||
|
||||
} // namespace dart
|
|
@ -816,8 +816,6 @@ void GCMarker::StartConcurrentMark(PageSpace* page_space) {
|
|||
ASSERT(result);
|
||||
}
|
||||
|
||||
isolate_group_->DeferredMarkLiveTemporaries();
|
||||
|
||||
// Wait for roots to be marked before exiting safepoint.
|
||||
MonitorLocker ml(&root_slices_monitor_);
|
||||
while (root_slices_finished_ != kNumRootSlices) {
|
||||
|
|
|
@ -1187,9 +1187,6 @@ void Scavenger::Scavenge() {
|
|||
ProcessWeakReferences();
|
||||
page_space->ReleaseDataLock();
|
||||
|
||||
// Restore write-barrier assumptions.
|
||||
isolate_group->RememberLiveTemporaries();
|
||||
|
||||
// Scavenge finished. Run accounting.
|
||||
int64_t end = OS::GetCurrentMonotonicMicros();
|
||||
heap_->RecordTime(kProcessToSpace, process_to_space - iterate_roots);
|
||||
|
|
|
@ -2366,7 +2366,7 @@ void Isolate::LowLevelCleanup(Isolate* isolate) {
|
|||
// after a potential shutdown of the group, which would turn down any pending
|
||||
// GC tasks as well as the heap.
|
||||
Isolate::MarkIsolateDead(is_application_isolate);
|
||||
} // namespace dart
|
||||
}
|
||||
|
||||
Dart_InitializeIsolateCallback Isolate::initialize_callback_ = nullptr;
|
||||
Dart_IsolateGroupCreateCallback Isolate::create_group_callback_ = nullptr;
|
||||
|
@ -2461,18 +2461,6 @@ void IsolateGroup::ReleaseStoreBuffers() {
|
|||
thread_registry()->ReleaseStoreBuffers();
|
||||
}
|
||||
|
||||
void Isolate::RememberLiveTemporaries() {
|
||||
if (mutator_thread_ != nullptr) {
|
||||
mutator_thread_->RememberLiveTemporaries();
|
||||
}
|
||||
}
|
||||
|
||||
void Isolate::DeferredMarkLiveTemporaries() {
|
||||
if (mutator_thread_ != nullptr) {
|
||||
mutator_thread_->DeferredMarkLiveTemporaries();
|
||||
}
|
||||
}
|
||||
|
||||
void IsolateGroup::EnableIncrementalBarrier(
|
||||
MarkingStack* marking_stack,
|
||||
MarkingStack* deferred_marking_stack) {
|
||||
|
@ -2590,17 +2578,6 @@ uword IsolateGroup::FindPendingDeoptAtSafepoint(uword fp) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void IsolateGroup::DeferredMarkLiveTemporaries() {
|
||||
ForEachIsolate(
|
||||
[&](Isolate* isolate) { isolate->DeferredMarkLiveTemporaries(); },
|
||||
/*at_safepoint=*/true);
|
||||
}
|
||||
|
||||
void IsolateGroup::RememberLiveTemporaries() {
|
||||
ForEachIsolate([&](Isolate* isolate) { isolate->RememberLiveTemporaries(); },
|
||||
/*at_safepoint=*/true);
|
||||
}
|
||||
|
||||
RawClass* Isolate::GetClassForHeapWalkAt(intptr_t cid) {
|
||||
RawClass* raw_class = nullptr;
|
||||
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
|
||||
|
|
|
@ -479,9 +479,6 @@ class IsolateGroup : public IntrusiveDListEntry<IsolateGroup> {
|
|||
|
||||
uword FindPendingDeoptAtSafepoint(uword fp);
|
||||
|
||||
void RememberLiveTemporaries();
|
||||
void DeferredMarkLiveTemporaries();
|
||||
|
||||
private:
|
||||
friend class Heap;
|
||||
friend class StackFrame; // For `[isolates_].First()`.
|
||||
|
@ -1152,9 +1149,6 @@ class Isolate : public BaseIsolate, public IntrusiveDListEntry<Isolate> {
|
|||
|
||||
static void NotifyLowMemory();
|
||||
|
||||
void RememberLiveTemporaries();
|
||||
void DeferredMarkLiveTemporaries();
|
||||
|
||||
private:
|
||||
friend class Dart; // Init, InitOnce, Shutdown.
|
||||
friend class IsolateKillerVisitor; // Kill().
|
||||
|
|
|
@ -160,7 +160,7 @@ bool StackFrame::IsBareInstructionsStubFrame() const {
|
|||
return false;
|
||||
}
|
||||
|
||||
bool StackFrame::IsStubFrame(bool needed_for_gc) const {
|
||||
bool StackFrame::IsStubFrame() const {
|
||||
if (is_interpreted()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ bool StackFrame::IsStubFrame(bool needed_for_gc) const {
|
|||
NoSafepointScope no_safepoint;
|
||||
#endif
|
||||
|
||||
RawCode* code = GetCodeObject(needed_for_gc);
|
||||
RawCode* code = GetCodeObject();
|
||||
ASSERT(code != Object::null());
|
||||
const intptr_t cid = code->ptr()->owner_->GetClassId();
|
||||
ASSERT(cid == kNullCid || cid == kClassCid || cid == kFunctionCid);
|
||||
|
@ -418,9 +418,9 @@ RawCode* StackFrame::LookupDartCode() const {
|
|||
return Code::null();
|
||||
}
|
||||
|
||||
RawCode* StackFrame::GetCodeObject(bool needed_for_gc) const {
|
||||
RawCode* StackFrame::GetCodeObject() const {
|
||||
ASSERT(!is_interpreted());
|
||||
if (auto isolate = IsolateOfBareInstructionsFrame(needed_for_gc)) {
|
||||
if (auto isolate = IsolateOfBareInstructionsFrame(/*needed_for_gc=*/false)) {
|
||||
auto const rct = isolate->reverse_pc_lookup_cache();
|
||||
return rct->Lookup(pc(), /*is_return_address=*/true);
|
||||
} else {
|
||||
|
@ -546,8 +546,8 @@ TokenPosition StackFrame::GetTokenPos() const {
|
|||
return TokenPosition::kNoSource;
|
||||
}
|
||||
|
||||
bool StackFrame::IsValid(bool needed_for_gc) const {
|
||||
if (IsEntryFrame() || IsExitFrame() || IsStubFrame(needed_for_gc)) {
|
||||
bool StackFrame::IsValid() const {
|
||||
if (IsEntryFrame() || IsExitFrame() || IsStubFrame()) {
|
||||
return true;
|
||||
}
|
||||
if (is_interpreted()) {
|
||||
|
|
|
@ -94,7 +94,7 @@ class StackFrame : public ValueObject {
|
|||
const char* ToCString() const;
|
||||
|
||||
// Check validity of a frame, used for assertion purposes.
|
||||
virtual bool IsValid(bool needed_for_gc = false) const;
|
||||
virtual bool IsValid() const;
|
||||
|
||||
// Returns the isolate containing the bare instructions of the current frame.
|
||||
//
|
||||
|
@ -112,12 +112,11 @@ class StackFrame : public ValueObject {
|
|||
bool IsBareInstructionsStubFrame() const;
|
||||
|
||||
// Frame type.
|
||||
virtual bool IsDartFrame(bool validate = true,
|
||||
bool needed_for_gc = false) const {
|
||||
ASSERT(!validate || IsValid(needed_for_gc));
|
||||
return !(IsEntryFrame() || IsExitFrame() || IsStubFrame(needed_for_gc));
|
||||
virtual bool IsDartFrame(bool validate = true) const {
|
||||
ASSERT(!validate || IsValid());
|
||||
return !(IsEntryFrame() || IsExitFrame() || IsStubFrame());
|
||||
}
|
||||
virtual bool IsStubFrame(bool neede_for_gc = false) const;
|
||||
virtual bool IsStubFrame() const;
|
||||
virtual bool IsEntryFrame() const { return false; }
|
||||
virtual bool IsExitFrame() const { return false; }
|
||||
|
||||
|
@ -159,7 +158,7 @@ class StackFrame : public ValueObject {
|
|||
Thread* thread() const { return thread_; }
|
||||
|
||||
private:
|
||||
RawCode* GetCodeObject(bool needed_for_gc = false) const;
|
||||
RawCode* GetCodeObject() const;
|
||||
RawBytecode* GetBytecodeObject() const;
|
||||
|
||||
|
||||
|
@ -200,11 +199,9 @@ class StackFrame : public ValueObject {
|
|||
// runtime code.
|
||||
class ExitFrame : public StackFrame {
|
||||
public:
|
||||
bool IsValid(bool needed_for_gc = false) const { return sp() == 0; }
|
||||
bool IsDartFrame(bool validate = true, bool needed_for_gc = false) const {
|
||||
return false;
|
||||
}
|
||||
bool IsStubFrame(bool needed_for_gc = false) const { return false; }
|
||||
bool IsValid() const { return sp() == 0; }
|
||||
bool IsDartFrame(bool validate = true) const { return false; }
|
||||
bool IsStubFrame() const { return false; }
|
||||
bool IsExitFrame() const { return true; }
|
||||
|
||||
// Visit objects in the frame.
|
||||
|
@ -224,13 +221,11 @@ class ExitFrame : public StackFrame {
|
|||
// dart code.
|
||||
class EntryFrame : public StackFrame {
|
||||
public:
|
||||
bool IsValid(bool needed_for_gc = false) const {
|
||||
bool IsValid() const {
|
||||
return StubCode::InInvocationStub(pc(), is_interpreted());
|
||||
}
|
||||
bool IsDartFrame(bool validate = true, bool needed_for_gc = false) const {
|
||||
return false;
|
||||
}
|
||||
bool IsStubFrame(bool needed_for_gc = false) const { return false; }
|
||||
bool IsDartFrame(bool validate = true) const { return false; }
|
||||
bool IsStubFrame() const { return false; }
|
||||
bool IsEntryFrame() const { return true; }
|
||||
|
||||
// Visit objects in the frame.
|
||||
|
|
|
@ -728,105 +728,6 @@ void Thread::VisitObjectPointers(ObjectPointerVisitor* visitor,
|
|||
}
|
||||
}
|
||||
|
||||
class RestoreWriteBarrierInvariantVisitor : public ObjectPointerVisitor {
|
||||
public:
|
||||
RestoreWriteBarrierInvariantVisitor(IsolateGroup* group,
|
||||
Thread* thread,
|
||||
Thread::RestoreWriteBarrierInvariantOp op)
|
||||
: ObjectPointerVisitor(group), thread_(thread), op_(op) {}
|
||||
|
||||
void VisitPointers(RawObject** first, RawObject** last) {
|
||||
for (; first != last + 1; first++) {
|
||||
RawObject* obj = *first;
|
||||
// Stores into new-space objects don't need a write barrier.
|
||||
if (obj->IsSmiOrNewObject()) continue;
|
||||
|
||||
// To avoid adding too much work into the remembered set, skip
|
||||
// arrays. Write barrier elimination will not remove the barrier
|
||||
// if we can trigger GC between array allocation and store.
|
||||
if (obj->GetClassId() == kArrayCid) continue;
|
||||
|
||||
// Dart code won't store into VM-internal objects except Contexts.
|
||||
// This assumption is checked by an assertion in
|
||||
// WriteBarrierElimination::UpdateVectorForBlock.
|
||||
if (!obj->IsDartInstance() && !obj->IsContext()) continue;
|
||||
|
||||
// Dart code won't store into canonical instances.
|
||||
if (obj->IsCanonical()) continue;
|
||||
|
||||
// Already remembered, nothing to do.
|
||||
if (op_ == Thread::RestoreWriteBarrierInvariantOp::kAddToRememberedSet &&
|
||||
obj->IsRemembered()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Objects in the VM isolate heap are immutable and won't be
|
||||
// stored into. Check this condition last because there's no bit
|
||||
// in the header for it.
|
||||
if (obj->InVMIsolateHeap()) continue;
|
||||
|
||||
switch (op_) {
|
||||
case Thread::RestoreWriteBarrierInvariantOp::kAddToRememberedSet:
|
||||
obj->AddToRememberedSet(thread_);
|
||||
break;
|
||||
case Thread::RestoreWriteBarrierInvariantOp::kAddToDeferredMarkingStack:
|
||||
// Re-scan obj when finalizing marking.
|
||||
thread_->DeferredMarkingStackAddObject(obj);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
Thread* const thread_;
|
||||
Thread::RestoreWriteBarrierInvariantOp op_;
|
||||
};
|
||||
|
||||
// Write barrier elimination assumes that all live temporaries will be
|
||||
// in the remembered set after a scavenge triggered by a non-Dart-call
|
||||
// instruction (see Instruction::CanCallDart()), and additionally they will be
|
||||
// in the deferred marking stack if concurrent marking started. Specifically,
|
||||
// this includes any instruction which will always create an exit frame
|
||||
// below the current frame before any other Dart frames.
|
||||
//
|
||||
// Therefore, to support this assumption, we scan the stack after a scavenge
|
||||
// or when concurrent marking begins and add all live temporaries in
|
||||
// Dart frames preceeding an exit frame to the store buffer or deferred
|
||||
// marking stack.
|
||||
void Thread::RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op) {
|
||||
ASSERT(IsAtSafepoint());
|
||||
ASSERT(IsMutatorThread());
|
||||
|
||||
const StackFrameIterator::CrossThreadPolicy cross_thread_policy =
|
||||
StackFrameIterator::kAllowCrossThreadIteration;
|
||||
StackFrameIterator frames_iterator(top_exit_frame_info(),
|
||||
ValidationPolicy::kDontValidateFrames,
|
||||
this, cross_thread_policy);
|
||||
RestoreWriteBarrierInvariantVisitor visitor(isolate_group(), this, op);
|
||||
bool scan_next_dart_frame = false;
|
||||
for (StackFrame* frame = frames_iterator.NextFrame(); frame != NULL;
|
||||
frame = frames_iterator.NextFrame()) {
|
||||
if (frame->IsExitFrame()) {
|
||||
scan_next_dart_frame = true;
|
||||
} else if (frame->IsDartFrame(/*validate=*/false, /*needed_for_gc=*/true)) {
|
||||
if (scan_next_dart_frame) {
|
||||
frame->VisitObjectPointers(&visitor);
|
||||
}
|
||||
scan_next_dart_frame = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Thread::DeferredMarkLiveTemporaries() {
|
||||
RestoreWriteBarrierInvariant(
|
||||
RestoreWriteBarrierInvariantOp::kAddToDeferredMarkingStack);
|
||||
}
|
||||
|
||||
void Thread::RememberLiveTemporaries() {
|
||||
RestoreWriteBarrierInvariant(
|
||||
RestoreWriteBarrierInvariantOp::kAddToRememberedSet);
|
||||
}
|
||||
|
||||
bool Thread::CanLoadFromThread(const Object& object) {
|
||||
// In order to allow us to use assembler helper routines with non-[Code]
|
||||
// objects *before* stubs are initialized, we only loop ver the stubs if the
|
||||
|
|
|
@ -826,8 +826,6 @@ class Thread : public ThreadState {
|
|||
// Visit all object pointers.
|
||||
void VisitObjectPointers(ObjectPointerVisitor* visitor,
|
||||
ValidationPolicy validate_frames);
|
||||
void RememberLiveTemporaries();
|
||||
void DeferredMarkLiveTemporaries();
|
||||
|
||||
bool IsValidHandle(Dart_Handle object) const;
|
||||
bool IsValidLocalHandle(Dart_Handle object) const;
|
||||
|
@ -858,13 +856,6 @@ class Thread : public ThreadState {
|
|||
template <class T>
|
||||
T* AllocateReusableHandle();
|
||||
|
||||
enum class RestoreWriteBarrierInvariantOp {
|
||||
kAddToRememberedSet,
|
||||
kAddToDeferredMarkingStack
|
||||
};
|
||||
friend class RestoreWriteBarrierInvariantVisitor;
|
||||
void RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op);
|
||||
|
||||
// Set the current compiler state and return the previous compiler state.
|
||||
CompilerState* SetCompilerState(CompilerState* state) {
|
||||
CompilerState* previous = compiler_state_;
|
||||
|
|
Loading…
Reference in a new issue