Revert "Re-land "[vm] Aggressive write-barrier elimination.""

Reason: Causing numerous DartFuzz crashes

This reverts commit 595038d19f.

Change-Id: I94e79cf1a3ddf9210a2929128cfc825bf47c92ac
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/136260
Reviewed-by: Ben Konyi <bkonyi@google.com>
This commit is contained in:
Ben Konyi 2020-02-18 17:12:21 +00:00
parent 21e944f1bc
commit edc8bd7ec6
18 changed files with 30 additions and 492 deletions

View file

@ -70,8 +70,6 @@ class BitVector : public ZoneAllocated {
data_[i / kBitsPerWord] &= ~(static_cast<uword>(1) << (i % kBitsPerWord));
}
void Set(intptr_t i, bool value) { value ? Add(i) : Remove(i); }
bool Equals(const BitVector& other) const;
// Add all elements that are in the bitvector from.
@ -94,14 +92,6 @@ class BitVector : public ZoneAllocated {
return (block & (static_cast<uword>(1) << (i % kBitsPerWord))) != 0;
}
bool SubsetOf(const BitVector& other) {
ASSERT(length_ == other.length_);
for (intptr_t i = 0; i < data_length_; ++i) {
if ((data_[i] & other.data_[i]) != data_[i]) return false;
}
return true;
}
void Clear() {
for (intptr_t i = 0; i < data_length_; i++) {
data_[i] = 0;

View file

@ -612,9 +612,7 @@ void FlowGraphCompiler::VisitBlocks() {
EmitInstructionPrologue(instr);
ASSERT(pending_deoptimization_env_ == NULL);
pending_deoptimization_env_ = instr->env();
DEBUG_ONLY(current_instruction_ = instr);
instr->EmitNativeCode(this);
DEBUG_ONLY(current_instruction_ = nullptr);
pending_deoptimization_env_ = NULL;
if (IsPeephole(instr)) {
ASSERT(top_of_stack_ == nullptr);
@ -705,9 +703,7 @@ void FlowGraphCompiler::GenerateDeferredCode() {
slow_path->instruction()->tag());
SpecialStatsBegin(stats_tag);
BeginCodeSourceRange();
DEBUG_ONLY(current_instruction_ = slow_path->instruction());
slow_path->GenerateCode(this);
DEBUG_ONLY(current_instruction_ = nullptr);
EndCodeSourceRange(slow_path->instruction()->token_pos());
SpecialStatsEnd(stats_tag);
}

View file

@ -1117,15 +1117,6 @@ class FlowGraphCompiler : public ValueObject {
// is amenable to a peephole optimization.
bool IsPeephole(Instruction* instr) const;
#ifdef DEBUG
bool CanCallDart() const {
return current_instruction_ == nullptr ||
current_instruction_->CanCallDart();
}
#else
bool CanCallDart() const { return true; }
#endif
// This struct contains either function or code, the other one being NULL.
class StaticCallsStruct : public ZoneAllocated {
public:
@ -1219,10 +1210,6 @@ class FlowGraphCompiler : public ValueObject {
ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data_;
Array& edge_counters_array_;
// Instruction current running EmitNativeCode(). Useful for asserts.
// Does not include Phis and BlockEntrys.
DEBUG_ONLY(Instruction* current_instruction_ = nullptr);
DISALLOW_COPY_AND_ASSIGN(FlowGraphCompiler);
};

View file

@ -1001,7 +1001,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
RawPcDescriptors::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
__ BranchLinkPatchable(stub, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
}
@ -1012,7 +1011,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ GenerateUnRelocatedPcRelativeCall();
AddPcRelativeCallTarget(target, entry_kind);
@ -1092,7 +1090,6 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(entry_kind == Code::EntryKind::kNormal ||
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
@ -1118,7 +1115,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
LocationSummary* locs,
intptr_t try_index,
intptr_t slow_path_argument_count) {
ASSERT(CanCallDart());
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
const ArgumentsDescriptor args_desc(arguments_descriptor);
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
@ -1166,7 +1162,6 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
LocationSummary* locs,
Code::EntryKind entry_kind,
bool receiver_can_be_smi) {
ASSERT(CanCallDart());
ASSERT(entry_kind == Code::EntryKind::kNormal ||
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(ic_data.NumArgsTested() == 1);

View file

@ -972,7 +972,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
RawPcDescriptors::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
__ BranchLinkPatchable(stub, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
}
@ -983,7 +982,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ GenerateUnRelocatedPcRelativeCall();
AddPcRelativeCallTarget(target, entry_kind);
@ -1053,7 +1051,6 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(entry_kind == Code::EntryKind::kNormal ||
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
@ -1085,7 +1082,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
LocationSummary* locs,
intptr_t try_index,
intptr_t slow_path_argument_count) {
ASSERT(CanCallDart());
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
const ArgumentsDescriptor args_desc(arguments_descriptor);
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
@ -1130,7 +1126,6 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
LocationSummary* locs,
Code::EntryKind entry_kind,
bool receiver_can_be_smi) {
ASSERT(CanCallDart());
ASSERT(ic_data.NumArgsTested() == 1);
const Code& initial_stub = StubCode::UnlinkedCall();
const char* switchable_call_mode = "smiable";

View file

@ -867,7 +867,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
RawPcDescriptors::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
__ Call(stub, /*moveable_target=*/false, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
}
@ -878,7 +877,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
const auto& stub = StubCode::CallStaticFunction();
__ Call(stub, /*movable_target=*/true, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
@ -950,7 +948,6 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(entry_kind == Code::EntryKind::kNormal ||
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
@ -976,7 +973,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
LocationSummary* locs,
intptr_t try_index,
intptr_t slow_path_argument_count) {
ASSERT(CanCallDart());
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
const ArgumentsDescriptor args_desc(arguments_descriptor);
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(

View file

@ -986,7 +986,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
RawPcDescriptors::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
__ CallPatchable(stub, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
}
@ -997,7 +996,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(is_optimizing());
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ GenerateUnRelocatedPcRelativeCall();
@ -1081,7 +1079,6 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(entry_kind == Code::EntryKind::kNormal ||
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
@ -1107,7 +1104,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
LocationSummary* locs,
intptr_t try_index,
intptr_t slow_path_argument_count) {
ASSERT(CanCallDart());
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
const ArgumentsDescriptor args_desc(arguments_descriptor);
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
@ -1149,7 +1145,6 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
LocationSummary* locs,
Code::EntryKind entry_kind,
bool receiver_can_be_smi) {
ASSERT(CanCallDart());
ASSERT(entry_kind == Code::EntryKind::kNormal ||
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(ic_data.NumArgsTested() == 1);

View file

@ -1031,15 +1031,6 @@ class Instruction : public ZoneAllocated {
// See StoreInstanceFieldInstr::HasUnknownSideEffects() for rationale.
virtual bool HasUnknownSideEffects() const = 0;
// Whether this instruction can call Dart code without going through
// the runtime.
//
// Must be true for any instruction which can call Dart code without
// first creating an exit frame to transition into the runtime.
//
// See also WriteBarrierElimination and Thread::RememberLiveTemporaries().
virtual bool CanCallDart() const { return false; }
virtual bool CanTriggerGC() const;
// Get the block entry for this instruction.
@ -3152,8 +3143,6 @@ class BranchInstr : public Instruction {
return comparison()->HasUnknownSideEffects();
}
virtual bool CanCallDart() const { return comparison()->CanCallDart(); }
ComparisonInstr* comparison() const { return comparison_; }
void SetComparison(ComparisonInstr* comp);
@ -3720,7 +3709,6 @@ class ClosureCallInstr : public TemplateDartCall<1> {
virtual bool ComputeCanDeoptimize() const { return !FLAG_precompiled_mode; }
virtual bool HasUnknownSideEffects() const { return true; }
virtual bool CanCallDart() const { return true; }
Code::EntryKind entry_kind() const { return entry_kind_; }
@ -3796,7 +3784,6 @@ class InstanceCallBaseInstr : public TemplateDartCall<0> {
}
virtual bool HasUnknownSideEffects() const { return true; }
virtual bool CanCallDart() const { return true; }
void SetResultType(Zone* zone, CompileType new_type) {
result_type_ = new (zone) CompileType(new_type);
@ -4348,7 +4335,6 @@ class IfThenElseInstr : public Definition {
virtual bool HasUnknownSideEffects() const {
return comparison()->HasUnknownSideEffects();
}
virtual bool CanCallDart() const { return comparison()->CanCallDart(); }
virtual bool AttributesEqual(Instruction* other) const {
IfThenElseInstr* other_if_then_else = other->AsIfThenElse();
@ -4477,7 +4463,6 @@ class StaticCallInstr : public TemplateDartCall<0> {
}
virtual bool HasUnknownSideEffects() const { return true; }
virtual bool CanCallDart() const { return true; }
// Initialize result type of this call instruction if target is a recognized
// method or has pragma annotation.
@ -4736,9 +4721,6 @@ class NativeCallInstr : public TemplateDartCall<0> {
virtual bool HasUnknownSideEffects() const { return true; }
// Always creates an exit frame before more Dart code can be called.
virtual bool CanCallDart() const { return false; }
void SetupNative();
PRINT_OPERANDS_TO_SUPPORT
@ -4798,9 +4780,6 @@ class FfiCallInstr : public Definition {
virtual bool HasUnknownSideEffects() const { return true; }
// Always creates an exit frame before more Dart code can be called.
virtual bool CanCallDart() const { return false; }
virtual Representation RequiredInputRepresentation(intptr_t idx) const;
virtual Representation representation() const;
@ -5350,7 +5329,6 @@ class StringInterpolateInstr : public TemplateDefinition<1, Throws> {
virtual CompileType ComputeType() const;
// Issues a static call to Dart code which calls toString on objects.
virtual bool HasUnknownSideEffects() const { return true; }
virtual bool CanCallDart() const { return true; }
virtual bool ComputeCanDeoptimize() const { return !FLAG_precompiled_mode; }
const Function& CallFunction() const;
@ -7028,7 +7006,6 @@ class CheckedSmiOpInstr : public TemplateDefinition<2, Throws> {
virtual bool RecomputeType();
virtual bool HasUnknownSideEffects() const { return true; }
virtual bool CanCallDart() const { return true; }
virtual Definition* Canonicalize(FlowGraph* flow_graph);
@ -7073,7 +7050,6 @@ class CheckedSmiComparisonInstr : public TemplateComparison<2, Throws> {
bool is_negated() const { return is_negated_; }
virtual bool HasUnknownSideEffects() const { return true; }
virtual bool CanCallDart() const { return true; }
PRINT_OPERANDS_TO_SUPPORT
@ -7709,8 +7685,6 @@ class DoubleToIntegerInstr : public TemplateDefinition<1, Throws> {
virtual bool HasUnknownSideEffects() const { return false; }
virtual bool CanCallDart() const { return true; }
private:
InstanceCallInstr* instance_call_;

View file

@ -19,7 +19,6 @@
#include "vm/compiler/backend/redundancy_elimination.h"
#include "vm/compiler/backend/type_propagator.h"
#include "vm/compiler/call_specializer.h"
#include "vm/compiler/write_barrier_elimination.h"
#if defined(DART_PRECOMPILER)
#include "vm/compiler/aot/aot_call_specializer.h"
#include "vm/compiler/aot/precompiler.h"
@ -530,6 +529,36 @@ COMPILER_PASS(ReorderBlocks, {
}
});
static void WriteBarrierElimination(FlowGraph* flow_graph) {
for (BlockIterator block_it = flow_graph->reverse_postorder_iterator();
!block_it.Done(); block_it.Advance()) {
BlockEntryInstr* block = block_it.Current();
Definition* last_allocated = nullptr;
for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
Instruction* current = it.Current();
if (!current->CanTriggerGC()) {
if (StoreInstanceFieldInstr* instr = current->AsStoreInstanceField()) {
if (instr->instance()->definition() == last_allocated) {
instr->set_emit_store_barrier(kNoStoreBarrier);
}
continue;
}
}
if (AllocationInstr* alloc = current->AsAllocation()) {
if (alloc->WillAllocateNewOrRemembered()) {
last_allocated = alloc;
continue;
}
}
if (current->CanTriggerGC()) {
last_allocated = nullptr;
}
}
}
}
COMPILER_PASS(WriteBarrierElimination,
{ WriteBarrierElimination(flow_graph); });

View file

@ -166,8 +166,6 @@ compiler_sources = [
"stub_code_compiler_arm64.cc",
"stub_code_compiler_ia32.cc",
"stub_code_compiler_x64.cc",
"write_barrier_elimination.cc",
"write_barrier_elimination.h",
]
compiler_sources_tests = [

View file

@ -1,280 +0,0 @@
// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include <functional>
#include "vm/compiler/backend/flow_graph.h"
#include "vm/compiler/compiler_pass.h"
#include "vm/compiler/write_barrier_elimination.h"
namespace dart {
class DefinitionIndexPairTrait {
public:
typedef Definition* Key;
typedef intptr_t Value;
typedef struct {
Definition* definition;
intptr_t index;
} Pair;
static Key KeyOf(Pair kv) { return kv.definition; }
static Value ValueOf(Pair kv) { return kv.index; }
static inline intptr_t Hashcode(Key key) { return std::hash<Key>()(key); }
static inline bool IsKeyEqual(Pair kv, Key key) {
return kv.definition == key;
}
};
typedef DirectChainedHashMap<DefinitionIndexPairTrait> DefinitionIndexMap;
// Inter-block write-barrier elimination.
//
// This optimization removes write barriers from some store instructions under
// certain assumptions which the runtime is responsible to sustain.
//
// We can skip a write barrier on a StoreInstanceField to a container object X
// if we know that either:
// - X is in new-space, or
// - X is in old-space, and:
// - X is in the store buffer, and
// - X is in the deferred marking stack (if concurrent marking is enabled)
//
// The result of an Allocation instruction (Instruction::IsAllocation()) will
// satisfy one of these requirements immediately after the instruction
// if WillAllocateNewOrRemembered() is true.
//
// Without runtime support, we would have to assume that any instruction which
// can trigger a new-space scavenge (Instruction::CanTriggerGC()) might promote
// a new-space temporary into old-space, and we could not skip a store barrier
// on a write into it afterward.
//
// However, many instructions can trigger GC in unlikely cases, like
// CheckStackOverflow and Box. To avoid interrupting write barrier elimination
// across these instructions, the runtime ensures that any live temporaries
// (except arrays) promoted during a scavenge caused by a non-Dart-call
// instruction (see Instruction::CanCallDart()) will be added to the store
// buffer. Additionally, if concurrent marking was initiated, the runtime
// ensures that all live temporaries are also in the deferred marking stack.
//
// See also Thread::RememberLiveTemporaries() and
// Thread::DeferredMarkLiveTemporaries().
class WriteBarrierElimination : public ValueObject {
public:
WriteBarrierElimination(Zone* zone, FlowGraph* flow_graph);
void Analyze();
void SaveResults();
private:
void IndexDefinitions(Zone* zone);
bool AnalyzeBlock(BlockEntryInstr* entry);
void MergePredecessors(BlockEntryInstr* entry);
void UpdateVectorForBlock(BlockEntryInstr* entry, bool finalize);
static intptr_t Index(BlockEntryInstr* entry) {
return entry->postorder_number();
}
intptr_t Index(Definition* def) {
ASSERT(IsUsable(def));
return definition_indices_.LookupValue(def) - 1;
}
bool IsUsable(Definition* def) {
return def->IsPhi() || (def->IsAllocation() &&
def->AsAllocation()->WillAllocateNewOrRemembered());
}
FlowGraph* const flow_graph_;
const GrowableArray<BlockEntryInstr*>* const block_order_;
// Number of usable definitions in the graph.
intptr_t definition_count_ = 0;
// Maps each usable definition to its index in the bitvectors.
DefinitionIndexMap definition_indices_;
// Bitvector with all non-Array-allocation instructions set. Used to
// un-mark Array allocations as usable.
BitVector* array_allocations_mask_;
// Bitvectors for each block of which allocations are new or remembered
// at the start (after Phis).
GrowableArray<BitVector*> usable_allocs_in_;
// Bitvectors for each block of which allocations are new or remembered
// at the end of the block.
GrowableArray<BitVector*> usable_allocs_out_;
// Remaining blocks to process.
GrowableArray<BlockEntryInstr*> worklist_;
// Temporary used in many functions to avoid repeated zone allocation.
BitVector* vector_;
};
WriteBarrierElimination::WriteBarrierElimination(Zone* zone,
FlowGraph* flow_graph)
: flow_graph_(flow_graph), block_order_(&flow_graph->postorder()) {
IndexDefinitions(zone);
for (intptr_t i = 0; i < block_order_->length(); ++i) {
usable_allocs_in_.Add(new (zone) BitVector(zone, definition_count_));
usable_allocs_in_[i]->CopyFrom(vector_);
usable_allocs_out_.Add(new (zone) BitVector(zone, definition_count_));
usable_allocs_out_[i]->CopyFrom(vector_);
}
}
void WriteBarrierElimination::Analyze() {
for (intptr_t i = 0; i < block_order_->length(); ++i) {
worklist_.Add(block_order_->At(i));
}
while (!worklist_.is_empty()) {
auto* const entry = worklist_.RemoveLast();
if (AnalyzeBlock(entry)) {
for (intptr_t i = 0; i < entry->SuccessorCount(); ++i) {
worklist_.Add(entry->SuccessorAt(i));
}
}
}
}
void WriteBarrierElimination::SaveResults() {
for (intptr_t i = 0; i < block_order_->length(); ++i) {
vector_->CopyFrom(usable_allocs_in_[i]);
UpdateVectorForBlock(block_order_->At(i), /*finalize=*/true);
}
}
void WriteBarrierElimination::IndexDefinitions(Zone* zone) {
GrowableArray<bool> array_allocations(10);
for (intptr_t i = 0; i < block_order_->length(); ++i) {
BlockEntryInstr* const block = block_order_->At(i);
if (block->IsJoinEntry()) {
for (PhiIterator it(block->AsJoinEntry()); !it.Done(); it.Advance()) {
definition_indices_.Insert({it.Current(), ++definition_count_});
array_allocations.Add(false);
}
}
for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
if (Definition* current = it.Current()->AsDefinition()) {
if (IsUsable(current)) {
definition_indices_.Insert({current, ++definition_count_});
array_allocations.Add(current->IsCreateArray());
}
}
}
}
vector_ = new (zone) BitVector(zone, definition_count_);
vector_->SetAll();
array_allocations_mask_ = new (zone) BitVector(zone, definition_count_);
for (intptr_t i = 0; i < definition_count_; ++i) {
if (!array_allocations[i]) array_allocations_mask_->Add(i);
}
}
void WriteBarrierElimination::MergePredecessors(BlockEntryInstr* entry) {
vector_->Clear();
for (intptr_t i = 0; i < entry->PredecessorCount(); ++i) {
BitVector* predecessor_set =
usable_allocs_out_[Index(entry->PredecessorAt(i))];
if (i == 0) {
vector_->AddAll(predecessor_set);
} else {
vector_->Intersect(predecessor_set);
}
}
if (JoinEntryInstr* join = entry->AsJoinEntry()) {
// A Phi is usable if and only if all its inputs are usable.
for (PhiIterator it(join); !it.Done(); it.Advance()) {
PhiInstr* phi = it.Current();
ASSERT(phi->InputCount() == entry->PredecessorCount());
bool is_usable = true;
for (intptr_t i = 0; i < phi->InputCount(); ++i) {
BitVector* const predecessor_set =
usable_allocs_out_[Index(entry->PredecessorAt(i))];
Definition* const origin = phi->InputAt(i)->definition();
if (!IsUsable(origin) || !predecessor_set->Contains(Index(origin))) {
is_usable = false;
break;
}
}
vector_->Set(Index(phi), is_usable);
}
}
}
bool WriteBarrierElimination::AnalyzeBlock(BlockEntryInstr* entry) {
// Recompute the usable allocs in-set.
MergePredecessors(entry);
// If the in-set has not changed, there's no work to do.
BitVector* const in_set = usable_allocs_in_[Index(entry)];
ASSERT(vector_->SubsetOf(*in_set)); // convergence
if (vector_->Equals(*in_set)) {
return false;
} else {
usable_allocs_in_[Index(entry)]->CopyFrom(vector_);
}
UpdateVectorForBlock(entry, /*finalize=*/false);
// Successors only need to be updated if the out-set changes.
if (vector_->Equals(*usable_allocs_out_[Index(entry)])) {
return false;
} else {
BitVector* const out_set = usable_allocs_out_[Index(entry)];
ASSERT(vector_->SubsetOf(*out_set)); // convergence
out_set->CopyFrom(vector_);
return true;
}
}
void WriteBarrierElimination::UpdateVectorForBlock(BlockEntryInstr* entry,
bool finalize) {
for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) {
Instruction* const current = it.Current();
if (finalize) {
if (StoreInstanceFieldInstr* instr = current->AsStoreInstanceField()) {
Definition* const container = instr->instance()->definition();
if (IsUsable(container) && vector_->Contains(Index(container))) {
instr->set_emit_store_barrier(kNoStoreBarrier);
}
}
}
if (current->CanCallDart()) {
vector_->Clear();
} else if (current->CanTriggerGC()) {
// Clear array allocations. These are not added to the remembered set
// by Thread::RememberLiveTemporaries() after a scavenge.
vector_->Intersect(array_allocations_mask_);
}
if (AllocationInstr* const alloc = current->AsAllocation()) {
if (alloc->WillAllocateNewOrRemembered()) {
vector_->Add(Index(alloc));
}
}
}
}
void WriteBarrierElimination(FlowGraph* flow_graph) {
class WriteBarrierElimination elimination(Thread::Current()->zone(),
flow_graph);
elimination.Analyze();
elimination.SaveResults();
}
} // namespace dart

View file

@ -1,15 +0,0 @@
// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_COMPILER_WRITE_BARRIER_ELIMINATION_H_
#define RUNTIME_VM_COMPILER_WRITE_BARRIER_ELIMINATION_H_
namespace dart {
class FlowGraph;
void WriteBarrierElimination(FlowGraph* flow_graph);
} // namespace dart
#endif // RUNTIME_VM_COMPILER_WRITE_BARRIER_ELIMINATION_H_

View file

@ -799,8 +799,6 @@ void GCMarker::StartConcurrentMark(PageSpace* page_space) {
ASSERT(result);
}
isolate_->DeferredMarkLiveTemporaries();
// Wait for roots to be marked before exiting safepoint.
MonitorLocker ml(&root_slices_monitor_);
while (root_slices_finished_ != kNumRootSlices) {

View file

@ -1074,9 +1074,6 @@ void Scavenger::Scavenge() {
ProcessWeakReferences();
page_space->ReleaseDataLock();
// Restore write-barrier assumptions.
isolate->RememberLiveTemporaries();
// Scavenge finished. Run accounting.
int64_t end = OS::GetCurrentMonotonicMicros();
heap_->RecordTime(kProcessToSpace, process_to_space - iterate_roots);

View file

@ -2397,18 +2397,6 @@ void Isolate::ReleaseStoreBuffers() {
thread_registry()->ReleaseStoreBuffers(this);
}
void Isolate::RememberLiveTemporaries() {
if (mutator_thread_ != nullptr) {
mutator_thread_->RememberLiveTemporaries();
}
}
void Isolate::DeferredMarkLiveTemporaries() {
if (mutator_thread_ != nullptr) {
mutator_thread_->DeferredMarkLiveTemporaries();
}
}
void Isolate::EnableIncrementalBarrier(MarkingStack* marking_stack,
MarkingStack* deferred_marking_stack) {
ASSERT(marking_stack_ == nullptr);

View file

@ -1075,9 +1075,6 @@ class Isolate : public BaseIsolate, public IntrusiveDListEntry<Isolate> {
static void NotifyLowMemory();
void RememberLiveTemporaries();
void DeferredMarkLiveTemporaries();
private:
friend class Dart; // Init, InitOnce, Shutdown.
friend class IsolateKillerVisitor; // Kill().

View file

@ -697,99 +697,6 @@ void Thread::VisitObjectPointers(ObjectPointerVisitor* visitor,
}
}
class HandleLiveTemporariesVisitor : public ObjectPointerVisitor {
public:
explicit HandleLiveTemporariesVisitor(Isolate* isolate,
Thread* thread,
Thread::HandleLiveTemporariesOp op)
: ObjectPointerVisitor(isolate), thread_(thread), op_(op) {}
void VisitPointers(RawObject** first, RawObject** last) {
for (; first != last + 1; first++) {
RawObject* obj = *first;
// Stores into new-space objects don't need a write barrier.
if (obj->IsSmiOrNewObject()) continue;
// To avoid adding too much work into the remembered set, skip
// arrays. Write barrier elimination we'll do this.
if (obj->GetClassId() == kArrayCid) continue;
// Dart code won't store into VM-internal objects.
if (!obj->IsDartInstance()) continue;
// Dart code won't store into canonical instances.
if (obj->IsCanonical()) continue;
// Already remembered, nothing to do.
if (op_ == Thread::HandleLiveTemporariesOp::kAddToRememberedSet &&
obj->IsRemembered()) {
continue;
}
// Objects in the VM isolate heap are immutable and won't be
// stored into. Check this condition last because there's no bit
// in the header for it.
if (obj->InVMIsolateHeap()) continue;
switch (op_) {
case Thread::HandleLiveTemporariesOp::kAddToRememberedSet:
obj->AddToRememberedSet(thread_);
break;
case Thread::HandleLiveTemporariesOp::kAddToDeferredMarkingStack:
// Re-scan obj when finalizing marking.
thread_->DeferredMarkingStackAddObject(obj);
break;
}
}
}
private:
Thread* const thread_;
Thread::HandleLiveTemporariesOp op_;
};
// Write barrier elimination assumes that all live temporaries will be
// in the remembered set after a scavenge triggered by a non-Dart-call
// instruction (see Instruction::CanCallDart()), and additionally they will be
// in the deferred marking stack if concurrent marking started. Specifically,
// this includes any instruction which will always create an exit frame
// below the current frame before any other Dart frames.
//
// Therefore, to support this assumption, we scan the stack after a scavenge
// or when concurrent marking begins and add all live temporaries in
// Dart frames preceeding an exit frame to the store buffer or deferred
// marking stack.
void Thread::HandleLiveTemporaries(HandleLiveTemporariesOp op) {
ASSERT(IsMutatorThread());
const StackFrameIterator::CrossThreadPolicy cross_thread_policy =
StackFrameIterator::kAllowCrossThreadIteration;
StackFrameIterator frames_iterator(top_exit_frame_info(),
ValidationPolicy::kDontValidateFrames,
this, cross_thread_policy);
HandleLiveTemporariesVisitor visitor(isolate(), this, op);
bool scan_next_dart_frame = false;
for (StackFrame* frame = frames_iterator.NextFrame(); frame != NULL;
frame = frames_iterator.NextFrame()) {
if (frame->IsExitFrame()) {
scan_next_dart_frame = true;
} else if (frame->IsDartFrame()) {
if (scan_next_dart_frame) {
frame->VisitObjectPointers(&visitor);
}
scan_next_dart_frame = false;
}
}
}
void Thread::DeferredMarkLiveTemporaries() {
HandleLiveTemporaries(HandleLiveTemporariesOp::kAddToDeferredMarkingStack);
}
void Thread::RememberLiveTemporaries() {
HandleLiveTemporaries(HandleLiveTemporariesOp::kAddToRememberedSet);
}
bool Thread::CanLoadFromThread(const Object& object) {
// In order to allow us to use assembler helper routines with non-[Code]
// objects *before* stubs are initialized, we only loop ver the stubs if the

View file

@ -819,8 +819,6 @@ class Thread : public ThreadState {
// Visit all object pointers.
void VisitObjectPointers(ObjectPointerVisitor* visitor,
ValidationPolicy validate_frames);
void RememberLiveTemporaries();
void DeferredMarkLiveTemporaries();
bool IsValidHandle(Dart_Handle object) const;
bool IsValidLocalHandle(Dart_Handle object) const;
@ -851,13 +849,6 @@ class Thread : public ThreadState {
template <class T>
T* AllocateReusableHandle();
enum class HandleLiveTemporariesOp {
kAddToRememberedSet,
kAddToDeferredMarkingStack
};
friend class HandleLiveTemporariesVisitor;
void HandleLiveTemporaries(HandleLiveTemporariesOp op);
// Set the current compiler state and return the previous compiler state.
CompilerState* SetCompilerState(CompilerState* state) {
CompilerState* previous = compiler_state_;