Revert "[vm] Re-land aggressive write-barrier elimination."

This reverts commit c7d7552697.

Reason for revert: https://github.com/dart-lang/sdk/issues/40780

Original change's description:
> [vm] Re-land aggressive write-barrier elimination.
> 
> It incorrectly assumed that all stores in Dart code write to Instances.
> There is actually one exception, Contexts, which do not inherit from Instance.
> 
> I've added asserts to ensure this kind of bug cannot resurface.
> 
> The original change is in patchset 4.
> 
> Change-Id: Ic2d8d05e70a4de738eb9fb5980487b4f27111b8c
> Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/136221
> Commit-Queue: Samir Jindel <sjindel@google.com>
> Reviewed-by: Martin Kustermann <kustermann@google.com>
> Reviewed-by: Ryan Macnak <rmacnak@google.com>

TBR=kustermann@google.com,rmacnak@google.com,sjindel@google.com

# Not skipping CQ checks because original CL landed > 1 day ago.

Change-Id: I1891677b21560c7fc5a54a8eb800ef5850654402
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/137290
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Ryan Macnak 2020-02-26 02:18:25 +00:00 committed by commit-bot@chromium.org
parent fca91e4dfd
commit 7fd8ad5a2d
23 changed files with 85 additions and 615 deletions

View file

@ -70,8 +70,6 @@ class BitVector : public ZoneAllocated {
data_[i / kBitsPerWord] &= ~(static_cast<uword>(1) << (i % kBitsPerWord));
}
void Set(intptr_t i, bool value) { value ? Add(i) : Remove(i); }
bool Equals(const BitVector& other) const;
// Add all elements that are in the bitvector from.
@ -94,14 +92,6 @@ class BitVector : public ZoneAllocated {
return (block & (static_cast<uword>(1) << (i % kBitsPerWord))) != 0;
}
bool SubsetOf(const BitVector& other) {
ASSERT(length_ == other.length_);
for (intptr_t i = 0; i < data_length_; ++i) {
if ((data_[i] & other.data_[i]) != data_[i]) return false;
}
return true;
}
void Clear() {
for (intptr_t i = 0; i < data_length_; i++) {
data_[i] = 0;

View file

@ -612,9 +612,7 @@ void FlowGraphCompiler::VisitBlocks() {
EmitInstructionPrologue(instr);
ASSERT(pending_deoptimization_env_ == NULL);
pending_deoptimization_env_ = instr->env();
DEBUG_ONLY(current_instruction_ = instr);
instr->EmitNativeCode(this);
DEBUG_ONLY(current_instruction_ = nullptr);
pending_deoptimization_env_ = NULL;
if (IsPeephole(instr)) {
ASSERT(top_of_stack_ == nullptr);
@ -705,9 +703,7 @@ void FlowGraphCompiler::GenerateDeferredCode() {
slow_path->instruction()->tag());
SpecialStatsBegin(stats_tag);
BeginCodeSourceRange();
DEBUG_ONLY(current_instruction_ = slow_path->instruction());
slow_path->GenerateCode(this);
DEBUG_ONLY(current_instruction_ = nullptr);
EndCodeSourceRange(slow_path->instruction()->token_pos());
SpecialStatsEnd(stats_tag);
}

View file

@ -1117,15 +1117,6 @@ class FlowGraphCompiler : public ValueObject {
// is amenable to a peephole optimization.
bool IsPeephole(Instruction* instr) const;
#if defined(DEBUG)
bool CanCallDart() const {
return current_instruction_ == nullptr ||
current_instruction_->CanCallDart();
}
#else
bool CanCallDart() const { return true; }
#endif
// This struct contains either function or code, the other one being NULL.
class StaticCallsStruct : public ZoneAllocated {
public:
@ -1219,10 +1210,6 @@ class FlowGraphCompiler : public ValueObject {
ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data_;
Array& edge_counters_array_;
// Instruction currently running EmitNativeCode(). Useful for asserts.
// Does not include Phis and BlockEntrys.
DEBUG_ONLY(Instruction* current_instruction_ = nullptr);
DISALLOW_COPY_AND_ASSIGN(FlowGraphCompiler);
};

View file

@ -1000,7 +1000,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
RawPcDescriptors::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
__ BranchLinkPatchable(stub, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
}
@ -1011,7 +1010,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ GenerateUnRelocatedPcRelativeCall();
AddPcRelativeCallTarget(target, entry_kind);
@ -1068,7 +1066,6 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
// Each ICData propagated from unoptimized to optimized code contains the
// function that corresponds to the Dart function of that IC call. Due
@ -1092,7 +1089,6 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(entry_kind == Code::EntryKind::kNormal ||
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
@ -1118,7 +1114,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
LocationSummary* locs,
intptr_t try_index,
intptr_t slow_path_argument_count) {
ASSERT(CanCallDart());
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
const ArgumentsDescriptor args_desc(arguments_descriptor);
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
@ -1166,7 +1161,6 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
LocationSummary* locs,
Code::EntryKind entry_kind,
bool receiver_can_be_smi) {
ASSERT(CanCallDart());
ASSERT(entry_kind == Code::EntryKind::kNormal ||
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(ic_data.NumArgsTested() == 1);
@ -1211,7 +1205,6 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t count_with_type_args,
LocationSummary* locs,
const ICData& ic_data,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
const Code& stub =
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
__ LoadObject(R9, ic_data);
@ -1228,7 +1221,6 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(!function.IsClosureFunction());
if (function.HasOptionalParameters() || function.IsGeneric()) {
__ LoadObject(R4, arguments_descriptor);
@ -1248,7 +1240,6 @@ void FlowGraphCompiler::EmitDispatchTableCall(
Register cid_reg,
int32_t selector_offset,
const Array& arguments_descriptor) {
ASSERT(CanCallDart());
ASSERT(cid_reg != ARGS_DESC_REG);
if (!arguments_descriptor.IsNull()) {
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);

View file

@ -971,7 +971,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
RawPcDescriptors::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
__ BranchLinkPatchable(stub, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
}
@ -982,7 +981,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ GenerateUnRelocatedPcRelativeCall();
AddPcRelativeCallTarget(target, entry_kind);
@ -1030,7 +1028,6 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
// Each ICData propagated from unoptimized to optimized code contains the
// function that corresponds to the Dart function of that IC call. Due
@ -1053,7 +1050,6 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(entry_kind == Code::EntryKind::kNormal ||
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
@ -1085,7 +1081,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
LocationSummary* locs,
intptr_t try_index,
intptr_t slow_path_argument_count) {
ASSERT(CanCallDart());
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
const ArgumentsDescriptor args_desc(arguments_descriptor);
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
@ -1130,7 +1125,6 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
LocationSummary* locs,
Code::EntryKind entry_kind,
bool receiver_can_be_smi) {
ASSERT(CanCallDart());
ASSERT(ic_data.NumArgsTested() == 1);
const Code& initial_stub = StubCode::UnlinkedCall();
const char* switchable_call_mode = "smiable";
@ -1181,7 +1175,6 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t count_with_type_args,
LocationSummary* locs,
const ICData& ic_data,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
const Code& stub =
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
__ LoadObject(R5, ic_data);
@ -1198,7 +1191,6 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(!function.IsClosureFunction());
if (function.HasOptionalParameters() || function.IsGeneric()) {
__ LoadObject(R4, arguments_descriptor);
@ -1218,7 +1210,6 @@ void FlowGraphCompiler::EmitDispatchTableCall(
Register cid_reg,
int32_t selector_offset,
const Array& arguments_descriptor) {
ASSERT(CanCallDart());
ASSERT(cid_reg != ARGS_DESC_REG);
if (!arguments_descriptor.IsNull()) {
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);

View file

@ -866,7 +866,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
RawPcDescriptors::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
__ Call(stub, /*moveable_target=*/false, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
}
@ -877,7 +876,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
const auto& stub = StubCode::CallStaticFunction();
__ Call(stub, /*movable_target=*/true, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
@ -899,7 +897,6 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t count_with_type_args,
LocationSummary* locs,
const ICData& ic_data,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
const Code& stub =
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
__ LoadObject(ECX, ic_data);
@ -927,7 +924,6 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
// Each ICData propagated from unoptimized to optimized code contains the
// function that corresponds to the Dart function of that IC call. Due
@ -951,7 +947,6 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(entry_kind == Code::EntryKind::kNormal ||
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
@ -977,7 +972,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
LocationSummary* locs,
intptr_t try_index,
intptr_t slow_path_argument_count) {
ASSERT(CanCallDart());
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
const ArgumentsDescriptor args_desc(arguments_descriptor);
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
@ -1025,7 +1019,6 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
if (function.HasOptionalParameters() || function.IsGeneric()) {
__ LoadObject(EDX, arguments_descriptor);
} else {

View file

@ -985,7 +985,6 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
RawPcDescriptors::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
__ CallPatchable(stub, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
}
@ -996,7 +995,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(is_optimizing());
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ GenerateUnRelocatedPcRelativeCall();
@ -1029,7 +1027,6 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t count_with_type_args,
LocationSummary* locs,
const ICData& ic_data,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
const Code& stub =
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
__ LoadObject(RBX, ic_data);
@ -1058,7 +1055,6 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
// Each ICData propagated from unoptimized to optimized code contains the
// function that corresponds to the Dart function of that IC call. Due
@ -1082,7 +1078,6 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(entry_kind == Code::EntryKind::kNormal ||
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
@ -1108,7 +1103,6 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
LocationSummary* locs,
intptr_t try_index,
intptr_t slow_path_argument_count) {
ASSERT(CanCallDart());
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
const ArgumentsDescriptor args_desc(arguments_descriptor);
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
@ -1150,7 +1144,6 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
LocationSummary* locs,
Code::EntryKind entry_kind,
bool receiver_can_be_smi) {
ASSERT(CanCallDart());
ASSERT(entry_kind == Code::EntryKind::kNormal ||
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(ic_data.NumArgsTested() == 1);
@ -1193,7 +1186,6 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
TokenPosition token_pos,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(!function.IsClosureFunction());
if (function.HasOptionalParameters() || function.IsGeneric()) {
__ LoadObject(R10, arguments_descriptor);
@ -1213,7 +1205,6 @@ void FlowGraphCompiler::EmitDispatchTableCall(
Register cid_reg,
int32_t selector_offset,
const Array& arguments_descriptor) {
ASSERT(CanCallDart());
const Register table_reg = RAX;
ASSERT(cid_reg != table_reg);
ASSERT(cid_reg != ARGS_DESC_REG);

View file

@ -1031,15 +1031,6 @@ class Instruction : public ZoneAllocated {
// See StoreInstanceFieldInstr::HasUnknownSideEffects() for rationale.
virtual bool HasUnknownSideEffects() const = 0;
// Whether this instruction can call Dart code without going through
// the runtime.
//
// Must be true for any instruction which can call Dart code without
// first creating an exit frame to transition into the runtime.
//
// See also WriteBarrierElimination and Thread::RememberLiveTemporaries().
virtual bool CanCallDart() const { return false; }
virtual bool CanTriggerGC() const;
// Get the block entry for this instruction.
@ -3152,8 +3143,6 @@ class BranchInstr : public Instruction {
return comparison()->HasUnknownSideEffects();
}
virtual bool CanCallDart() const { return comparison()->CanCallDart(); }
ComparisonInstr* comparison() const { return comparison_; }
void SetComparison(ComparisonInstr* comp);
@ -3636,7 +3625,6 @@ class TemplateDartCall : public Definition {
}
virtual bool MayThrow() const { return true; }
virtual bool CanCallDart() const { return true; }
virtual intptr_t InputCount() const { return inputs_->length(); }
virtual Value* InputAt(intptr_t i) const { return inputs_->At(i); }
@ -4347,7 +4335,6 @@ class IfThenElseInstr : public Definition {
virtual bool HasUnknownSideEffects() const {
return comparison()->HasUnknownSideEffects();
}
virtual bool CanCallDart() const { return comparison()->CanCallDart(); }
virtual bool AttributesEqual(Instruction* other) const {
IfThenElseInstr* other_if_then_else = other->AsIfThenElse();
@ -4476,7 +4463,6 @@ class StaticCallInstr : public TemplateDartCall<0> {
}
virtual bool HasUnknownSideEffects() const { return true; }
virtual bool CanCallDart() const { return true; }
// Initialize result type of this call instruction if target is a recognized
// method or has pragma annotation.
@ -4735,9 +4721,6 @@ class NativeCallInstr : public TemplateDartCall<0> {
virtual bool HasUnknownSideEffects() const { return true; }
// Always creates an exit frame before more Dart code can be called.
virtual bool CanCallDart() const { return false; }
void SetupNative();
PRINT_OPERANDS_TO_SUPPORT
@ -4797,9 +4780,6 @@ class FfiCallInstr : public Definition {
virtual bool HasUnknownSideEffects() const { return true; }
// Always creates an exit frame before more Dart code can be called.
virtual bool CanCallDart() const { return false; }
virtual Representation RequiredInputRepresentation(intptr_t idx) const;
virtual Representation representation() const;
@ -5349,7 +5329,6 @@ class StringInterpolateInstr : public TemplateDefinition<1, Throws> {
virtual CompileType ComputeType() const;
// Issues a static call to Dart code which calls toString on objects.
virtual bool HasUnknownSideEffects() const { return true; }
virtual bool CanCallDart() const { return true; }
virtual bool ComputeCanDeoptimize() const { return !FLAG_precompiled_mode; }
const Function& CallFunction() const;
@ -7027,7 +7006,6 @@ class CheckedSmiOpInstr : public TemplateDefinition<2, Throws> {
virtual bool RecomputeType();
virtual bool HasUnknownSideEffects() const { return true; }
virtual bool CanCallDart() const { return true; }
virtual Definition* Canonicalize(FlowGraph* flow_graph);
@ -7072,7 +7050,6 @@ class CheckedSmiComparisonInstr : public TemplateComparison<2, Throws> {
bool is_negated() const { return is_negated_; }
virtual bool HasUnknownSideEffects() const { return true; }
virtual bool CanCallDart() const { return true; }
PRINT_OPERANDS_TO_SUPPORT
@ -7708,8 +7685,6 @@ class DoubleToIntegerInstr : public TemplateDefinition<1, Throws> {
virtual bool HasUnknownSideEffects() const { return false; }
virtual bool CanCallDart() const { return true; }
private:
InstanceCallInstr* instance_call_;

View file

@ -51,7 +51,7 @@ class SlotCache : public ZoneAllocated {
const char* Slot::KindToCString(Kind k) {
switch (k) {
#define NATIVE_CASE(C, U, F, id, M) \
#define NATIVE_CASE(C, F, id, M) \
case NATIVE_SLOT_NAME(C, F, id, M): \
return NATIVE_TO_STR(C, F, id, M);
NATIVE_SLOTS_LIST(NATIVE_CASE)
@ -70,7 +70,7 @@ const char* Slot::KindToCString(Kind k) {
bool Slot::ParseKind(const char* str, Kind* out) {
ASSERT(str != nullptr && out != nullptr);
#define NATIVE_CASE(C, U, F, id, M) \
#define NATIVE_CASE(C, F, id, M) \
if (strcmp(str, NATIVE_TO_STR(C, F, id, M)) == 0) { \
*out = NATIVE_SLOT_NAME(C, F, id, M); \
return true; \
@ -101,8 +101,7 @@ const Slot& Slot::GetNativeSlot(Kind kind) {
static const Slot fields[] = {
#define FIELD_FINAL (IsImmutableBit::encode(true))
#define FIELD_VAR (0)
#define DEFINE_NATIVE_FIELD(ClassName, UnderlyingType, FieldName, cid, \
mutability) \
#define DEFINE_NATIVE_FIELD(ClassName, FieldName, cid, mutability) \
Slot(Kind::k##ClassName##_##FieldName, FIELD_##mutability, k##cid##Cid, \
compiler::target::ClassName::FieldName##_offset(), \
#ClassName "." #FieldName, nullptr),

View file

@ -36,11 +36,10 @@ class ParsedFunction;
// List of slots that correspond to fields of native objects in the following
// format:
//
// V(class_name, underlying_type, field_name, exact_type, FINAL|VAR)
// V(class_name, field_name, exact_type, FINAL|VAR)
//
// - class_name and field_name specify the name of the host class and the name
// of the field respectively;
// - underlying_type: the Raw class which holds the field;
// - exact_type specifies exact type of the field (any load from this field
// would only yield instances of this type);
// - the last component specifies whether field behaves like a final field
@ -49,31 +48,31 @@ class ParsedFunction;
//
// Note: native slots are expected to be non-nullable.
#define NATIVE_SLOTS_LIST(V) \
V(Array, RawArray, length, Smi, FINAL) \
V(Context, RawContext, parent, Context, FINAL) \
V(Closure, RawClosure, instantiator_type_arguments, TypeArguments, FINAL) \
V(Closure, RawClosure, delayed_type_arguments, TypeArguments, FINAL) \
V(Closure, RawClosure, function_type_arguments, TypeArguments, FINAL) \
V(Closure, RawClosure, function, Function, FINAL) \
V(Closure, RawClosure, context, Context, FINAL) \
V(Closure, RawClosure, hash, Context, VAR) \
V(GrowableObjectArray, RawGrowableObjectArray, length, Smi, VAR) \
V(GrowableObjectArray, RawGrowableObjectArray, data, Array, VAR) \
V(TypedDataBase, RawTypedDataBase, data_field, Dynamic, FINAL) \
V(TypedDataBase, RawTypedDataBase, length, Smi, FINAL) \
V(TypedDataView, RawTypedDataView, offset_in_bytes, Smi, FINAL) \
V(TypedDataView, RawTypedDataView, data, Dynamic, FINAL) \
V(String, RawString, length, Smi, FINAL) \
V(LinkedHashMap, RawLinkedHashMap, index, TypedDataUint32Array, VAR) \
V(LinkedHashMap, RawLinkedHashMap, data, Array, VAR) \
V(LinkedHashMap, RawLinkedHashMap, hash_mask, Smi, VAR) \
V(LinkedHashMap, RawLinkedHashMap, used_data, Smi, VAR) \
V(LinkedHashMap, RawLinkedHashMap, deleted_keys, Smi, VAR) \
V(ArgumentsDescriptor, RawArray, type_args_len, Smi, FINAL) \
V(ArgumentsDescriptor, RawArray, positional_count, Smi, FINAL) \
V(ArgumentsDescriptor, RawArray, count, Smi, FINAL) \
V(Pointer, RawPointer, c_memory_address, Dynamic, FINAL) \
V(Type, RawType, arguments, TypeArguments, FINAL)
V(Array, length, Smi, FINAL) \
V(Context, parent, Context, FINAL) \
V(Closure, instantiator_type_arguments, TypeArguments, FINAL) \
V(Closure, delayed_type_arguments, TypeArguments, FINAL) \
V(Closure, function_type_arguments, TypeArguments, FINAL) \
V(Closure, function, Function, FINAL) \
V(Closure, context, Context, FINAL) \
V(Closure, hash, Context, VAR) \
V(GrowableObjectArray, length, Smi, VAR) \
V(GrowableObjectArray, data, Array, VAR) \
V(TypedDataBase, data_field, Dynamic, FINAL) \
V(TypedDataBase, length, Smi, FINAL) \
V(TypedDataView, offset_in_bytes, Smi, FINAL) \
V(TypedDataView, data, Dynamic, FINAL) \
V(String, length, Smi, FINAL) \
V(LinkedHashMap, index, TypedDataUint32Array, VAR) \
V(LinkedHashMap, data, Array, VAR) \
V(LinkedHashMap, hash_mask, Smi, VAR) \
V(LinkedHashMap, used_data, Smi, VAR) \
V(LinkedHashMap, deleted_keys, Smi, VAR) \
V(ArgumentsDescriptor, type_args_len, Smi, FINAL) \
V(ArgumentsDescriptor, positional_count, Smi, FINAL) \
V(ArgumentsDescriptor, count, Smi, FINAL) \
V(Pointer, c_memory_address, Dynamic, FINAL) \
V(Type, arguments, TypeArguments, FINAL)
// Slot is an abstraction that describes an readable (and possibly writeable)
// location within an object.
@ -87,7 +86,7 @@ class Slot : public ZoneAllocated {
// clang-format off
enum class Kind : uint8_t {
// Native slots are identified by their kind - each native slot has its own.
#define DECLARE_KIND(ClassName, UnderlyingType, FieldName, cid, mutability) \
#define DECLARE_KIND(ClassName, FieldName, cid, mutability) \
k##ClassName##_##FieldName,
NATIVE_SLOTS_LIST(DECLARE_KIND)
#undef DECLARE_KIND
@ -135,7 +134,7 @@ class Slot : public ZoneAllocated {
const ParsedFunction* parsed_function);
// Convenience getters for native slots.
#define DEFINE_GETTER(ClassName, UnderlyingType, FieldName, cid, mutability) \
#define DEFINE_GETTER(ClassName, FieldName, cid, mutability) \
static const Slot& ClassName##_##FieldName() { \
return GetNativeSlot(Kind::k##ClassName##_##FieldName); \
}

View file

@ -19,7 +19,6 @@
#include "vm/compiler/backend/redundancy_elimination.h"
#include "vm/compiler/backend/type_propagator.h"
#include "vm/compiler/call_specializer.h"
#include "vm/compiler/write_barrier_elimination.h"
#if defined(DART_PRECOMPILER)
#include "vm/compiler/aot/aot_call_specializer.h"
#include "vm/compiler/aot/precompiler.h"
@ -267,7 +266,7 @@ FlowGraph* CompilerPass::RunForceOptimizedPipeline(
// so it should not be lifted earlier than that pass.
INVOKE_PASS(DCE);
INVOKE_PASS(Canonicalize);
INVOKE_PASS(EliminateWriteBarriers);
INVOKE_PASS(WriteBarrierElimination);
INVOKE_PASS(FinalizeGraph);
#if defined(DART_PRECOMPILER)
if (mode == kAOT) {
@ -350,7 +349,7 @@ FlowGraph* CompilerPass::RunPipeline(PipelineMode mode,
INVOKE_PASS(EliminateStackOverflowChecks);
INVOKE_PASS(Canonicalize);
INVOKE_PASS(AllocationSinking_DetachMaterializations);
INVOKE_PASS(EliminateWriteBarriers);
INVOKE_PASS(WriteBarrierElimination);
INVOKE_PASS(FinalizeGraph);
#if defined(DART_PRECOMPILER)
if (mode == kAOT) {
@ -530,7 +529,38 @@ COMPILER_PASS(ReorderBlocks, {
}
});
COMPILER_PASS(EliminateWriteBarriers, { EliminateWriteBarriers(flow_graph); });
static void WriteBarrierElimination(FlowGraph* flow_graph) {
for (BlockIterator block_it = flow_graph->reverse_postorder_iterator();
!block_it.Done(); block_it.Advance()) {
BlockEntryInstr* block = block_it.Current();
Definition* last_allocated = nullptr;
for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
Instruction* current = it.Current();
if (!current->CanTriggerGC()) {
if (StoreInstanceFieldInstr* instr = current->AsStoreInstanceField()) {
if (instr->instance()->definition() == last_allocated) {
instr->set_emit_store_barrier(kNoStoreBarrier);
}
continue;
}
}
if (AllocationInstr* alloc = current->AsAllocation()) {
if (alloc->WillAllocateNewOrRemembered()) {
last_allocated = alloc;
continue;
}
}
if (current->CanTriggerGC()) {
last_allocated = nullptr;
}
}
}
}
COMPILER_PASS(WriteBarrierElimination,
{ WriteBarrierElimination(flow_graph); });
COMPILER_PASS(FinalizeGraph, {
// At the end of the pipeline, force recomputing and caching graph

View file

@ -50,7 +50,7 @@ namespace dart {
V(TypePropagation) \
V(UseTableDispatch) \
V(WidenSmiToInt32) \
V(EliminateWriteBarriers)
V(WriteBarrierElimination)
class AllocationSinking;
class BlockScheduler;

View file

@ -166,8 +166,6 @@ compiler_sources = [
"stub_code_compiler_arm64.cc",
"stub_code_compiler_ia32.cc",
"stub_code_compiler_x64.cc",
"write_barrier_elimination.cc",
"write_barrier_elimination.h",
]
compiler_sources_tests = [

View file

@ -1,308 +0,0 @@
// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include <functional>
#include "vm/compiler/backend/flow_graph.h"
#include "vm/compiler/compiler_pass.h"
#include "vm/compiler/write_barrier_elimination.h"
namespace dart {
class DefinitionIndexPairTrait {
public:
typedef Definition* Key;
typedef intptr_t Value;
struct Pair {
Definition* definition = nullptr;
intptr_t index = -1;
Pair() {}
Pair(Definition* definition, intptr_t index)
: definition(definition), index(index) {}
};
static Key KeyOf(Pair kv) { return kv.definition; }
static Value ValueOf(Pair kv) { return kv.index; }
static inline intptr_t Hashcode(Key key) { return std::hash<Key>()(key); }
static inline bool IsKeyEqual(Pair kv, Key key) {
return kv.definition == key;
}
};
typedef DirectChainedHashMap<DefinitionIndexPairTrait> DefinitionIndexMap;
// Inter-block write-barrier elimination.
//
// This optimization removes write barriers from some store instructions under
// certain assumptions which the runtime is responsible to sustain.
//
// We can skip a write barrier on a StoreInstanceField to a container object X
// if we know that either:
// - X is in new-space, or
// - X is in old-space, and:
// - X is in the store buffer, and
// - X is in the deferred marking stack (if concurrent marking is enabled)
//
// The result of an Allocation instruction (Instruction::IsAllocation()) will
// satisfy one of these requirements immediately after the instruction
// if WillAllocateNewOrRemembered() is true.
//
// Without runtime support, we would have to assume that any instruction which
// can trigger a new-space scavenge (Instruction::CanTriggerGC()) might promote
// a new-space temporary into old-space, and we could not skip a store barrier
// on a write into it afterward.
//
// However, many instructions can trigger GC in unlikely cases, like
// CheckStackOverflow and Box. To avoid interrupting write barrier elimination
// across these instructions, the runtime ensures that any live temporaries
// (except arrays) promoted during a scavenge caused by a non-Dart-call
// instruction (see Instruction::CanCallDart()) will be added to the store
// buffer. Additionally, if concurrent marking was initiated, the runtime
// ensures that all live temporaries are also in the deferred marking stack.
//
// See also Thread::RememberLiveTemporaries() and
// Thread::DeferredMarkLiveTemporaries().
class WriteBarrierElimination : public ValueObject {
public:
WriteBarrierElimination(Zone* zone, FlowGraph* flow_graph);
void Analyze();
void SaveResults();
private:
void IndexDefinitions(Zone* zone);
bool AnalyzeBlock(BlockEntryInstr* entry);
void MergePredecessors(BlockEntryInstr* entry);
void UpdateVectorForBlock(BlockEntryInstr* entry, bool finalize);
static intptr_t Index(BlockEntryInstr* entry) {
return entry->postorder_number();
}
intptr_t Index(Definition* def) {
ASSERT(IsUsable(def));
return definition_indices_.LookupValue(def);
}
bool IsUsable(Definition* def) {
return def->IsPhi() || (def->IsAllocation() &&
def->AsAllocation()->WillAllocateNewOrRemembered());
}
#if defined(DEBUG)
static bool SlotEligibleForWBE(const Slot& slot);
#endif
FlowGraph* const flow_graph_;
const GrowableArray<BlockEntryInstr*>* const block_order_;
// Number of usable definitions in the graph.
intptr_t definition_count_ = 0;
// Maps each usable definition to its index in the bitvectors.
DefinitionIndexMap definition_indices_;
// Bitvector with all non-Array-allocation instructions set. Used to
// un-mark Array allocations as usable.
BitVector* array_allocations_mask_;
// Bitvectors for each block of which allocations are new or remembered
// at the start (after Phis).
GrowableArray<BitVector*> usable_allocs_in_;
// Bitvectors for each block of which allocations are new or remembered
// at the end of the block.
GrowableArray<BitVector*> usable_allocs_out_;
// Remaining blocks to process.
GrowableArray<BlockEntryInstr*> worklist_;
// Temporary used in many functions to avoid repeated zone allocation.
BitVector* vector_;
};
WriteBarrierElimination::WriteBarrierElimination(Zone* zone,
FlowGraph* flow_graph)
: flow_graph_(flow_graph), block_order_(&flow_graph->postorder()) {
IndexDefinitions(zone);
for (intptr_t i = 0; i < block_order_->length(); ++i) {
usable_allocs_in_.Add(new (zone) BitVector(zone, definition_count_));
usable_allocs_in_[i]->CopyFrom(vector_);
usable_allocs_out_.Add(new (zone) BitVector(zone, definition_count_));
usable_allocs_out_[i]->CopyFrom(vector_);
}
}
void WriteBarrierElimination::Analyze() {
for (intptr_t i = 0; i < block_order_->length(); ++i) {
worklist_.Add(block_order_->At(i));
}
while (!worklist_.is_empty()) {
auto* const entry = worklist_.RemoveLast();
if (AnalyzeBlock(entry)) {
for (intptr_t i = 0; i < entry->SuccessorCount(); ++i) {
worklist_.Add(entry->SuccessorAt(i));
}
}
}
}
void WriteBarrierElimination::SaveResults() {
for (intptr_t i = 0; i < block_order_->length(); ++i) {
vector_->CopyFrom(usable_allocs_in_[i]);
UpdateVectorForBlock(block_order_->At(i), /*finalize=*/true);
}
}
void WriteBarrierElimination::IndexDefinitions(Zone* zone) {
BitmapBuilder array_allocations;
for (intptr_t i = 0; i < block_order_->length(); ++i) {
BlockEntryInstr* const block = block_order_->At(i);
if (auto join_block = block->AsJoinEntry()) {
for (PhiIterator it(join_block); !it.Done(); it.Advance()) {
array_allocations.Set(definition_count_, false);
definition_indices_.Insert({it.Current(), definition_count_++});
}
}
for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
if (Definition* current = it.Current()->AsDefinition()) {
if (IsUsable(current)) {
array_allocations.Set(definition_count_, current->IsCreateArray());
definition_indices_.Insert({current, definition_count_++});
}
}
}
}
vector_ = new (zone) BitVector(zone, definition_count_);
vector_->SetAll();
array_allocations_mask_ = new (zone) BitVector(zone, definition_count_);
for (intptr_t i = 0; i < definition_count_; ++i) {
if (!array_allocations.Get(i)) array_allocations_mask_->Add(i);
}
}
void WriteBarrierElimination::MergePredecessors(BlockEntryInstr* entry) {
vector_->Clear();
for (intptr_t i = 0; i < entry->PredecessorCount(); ++i) {
BitVector* predecessor_set =
usable_allocs_out_[Index(entry->PredecessorAt(i))];
if (i == 0) {
vector_->AddAll(predecessor_set);
} else {
vector_->Intersect(predecessor_set);
}
}
if (JoinEntryInstr* join = entry->AsJoinEntry()) {
// A Phi is usable if and only if all its inputs are usable.
for (PhiIterator it(join); !it.Done(); it.Advance()) {
PhiInstr* phi = it.Current();
ASSERT(phi->InputCount() == entry->PredecessorCount());
bool is_usable = true;
for (intptr_t i = 0; i < phi->InputCount(); ++i) {
BitVector* const predecessor_set =
usable_allocs_out_[Index(entry->PredecessorAt(i))];
Definition* const origin = phi->InputAt(i)->definition();
if (!IsUsable(origin) || !predecessor_set->Contains(Index(origin))) {
is_usable = false;
break;
}
}
vector_->Set(Index(phi), is_usable);
}
}
}
bool WriteBarrierElimination::AnalyzeBlock(BlockEntryInstr* entry) {
// Recompute the usable allocs in-set.
MergePredecessors(entry);
// If the in-set has not changed, there's no work to do.
BitVector* const in_set = usable_allocs_in_[Index(entry)];
ASSERT(vector_->SubsetOf(*in_set)); // convergence
if (vector_->Equals(*in_set)) return false;
usable_allocs_in_[Index(entry)]->CopyFrom(vector_);
UpdateVectorForBlock(entry, /*finalize=*/false);
// Successors only need to be updated if the out-set changes.
if (vector_->Equals(*usable_allocs_out_[Index(entry)])) return false;
BitVector* const out_set = usable_allocs_out_[Index(entry)];
ASSERT(vector_->SubsetOf(*out_set)); // convergence
out_set->CopyFrom(vector_);
return true;
}
#if defined(DEBUG)
bool WriteBarrierElimination::SlotEligibleForWBE(const Slot& slot) {
// We assume that Dart code only stores into Instances or Contexts.
// This assumption is used in
// RestoreWriteBarrierInvariantVisitor::VisitPointers.
switch (slot.kind()) {
case Slot::Kind::kCapturedVariable: // Context
return true;
case Slot::Kind::kDartField: // Instance
return true;
#define FOR_EACH_NATIVE_SLOT(class, underlying_type, field, type, modifiers) \
case Slot::Kind::k##class##_##field: \
return std::is_base_of<RawInstance, underlying_type>::value || \
std::is_base_of<RawContext, underlying_type>::value;
NATIVE_SLOTS_LIST(FOR_EACH_NATIVE_SLOT)
#undef FOR_EACH_NATIVE_SLOT
default:
return false;
}
}
#endif
void WriteBarrierElimination::UpdateVectorForBlock(BlockEntryInstr* entry,
bool finalize) {
for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) {
Instruction* const current = it.Current();
if (finalize) {
if (StoreInstanceFieldInstr* instr = current->AsStoreInstanceField()) {
Definition* const container = instr->instance()->definition();
if (IsUsable(container) && vector_->Contains(Index(container))) {
DEBUG_ASSERT(SlotEligibleForWBE(instr->slot()));
instr->set_emit_store_barrier(kNoStoreBarrier);
}
}
}
if (current->CanCallDart()) {
vector_->Clear();
} else if (current->CanTriggerGC()) {
// Clear array allocations. These are not added to the remembered set
// by Thread::RememberLiveTemporaries() after a scavenge.
vector_->Intersect(array_allocations_mask_);
}
if (AllocationInstr* const alloc = current->AsAllocation()) {
if (alloc->WillAllocateNewOrRemembered()) {
vector_->Add(Index(alloc));
}
}
}
}
void EliminateWriteBarriers(FlowGraph* flow_graph) {
WriteBarrierElimination elimination(Thread::Current()->zone(), flow_graph);
elimination.Analyze();
elimination.SaveResults();
}
} // namespace dart

View file

@ -1,15 +0,0 @@
// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_COMPILER_WRITE_BARRIER_ELIMINATION_H_
#define RUNTIME_VM_COMPILER_WRITE_BARRIER_ELIMINATION_H_
namespace dart {
class FlowGraph;
void EliminateWriteBarriers(FlowGraph* flow_graph);
} // namespace dart
#endif // RUNTIME_VM_COMPILER_WRITE_BARRIER_ELIMINATION_H_

View file

@ -816,8 +816,6 @@ void GCMarker::StartConcurrentMark(PageSpace* page_space) {
ASSERT(result);
}
isolate_group_->DeferredMarkLiveTemporaries();
// Wait for roots to be marked before exiting safepoint.
MonitorLocker ml(&root_slices_monitor_);
while (root_slices_finished_ != kNumRootSlices) {

View file

@ -1088,9 +1088,6 @@ void Scavenger::Scavenge() {
ProcessWeakReferences();
page_space->ReleaseDataLock();
// Restore write-barrier assumptions.
isolate_group->RememberLiveTemporaries();
// Scavenge finished. Run accounting.
int64_t end = OS::GetCurrentMonotonicMicros();
heap_->RecordTime(kProcessToSpace, process_to_space - iterate_roots);

View file

@ -2364,7 +2364,7 @@ void Isolate::LowLevelCleanup(Isolate* isolate) {
// after a potential shutdown of the group, which would turn down any pending
// GC tasks as well as the heap.
Isolate::MarkIsolateDead(is_application_isolate);
} // namespace dart
}
Dart_InitializeIsolateCallback Isolate::initialize_callback_ = nullptr;
Dart_IsolateGroupCreateCallback Isolate::create_group_callback_ = nullptr;
@ -2453,18 +2453,6 @@ void IsolateGroup::ReleaseStoreBuffers() {
thread_registry()->ReleaseStoreBuffers();
}
void Isolate::RememberLiveTemporaries() {
if (mutator_thread_ != nullptr) {
mutator_thread_->RememberLiveTemporaries();
}
}
void Isolate::DeferredMarkLiveTemporaries() {
if (mutator_thread_ != nullptr) {
mutator_thread_->DeferredMarkLiveTemporaries();
}
}
void IsolateGroup::EnableIncrementalBarrier(
MarkingStack* marking_stack,
MarkingStack* deferred_marking_stack) {
@ -2582,17 +2570,6 @@ uword IsolateGroup::FindPendingDeoptAtSafepoint(uword fp) {
return 0;
}
void IsolateGroup::DeferredMarkLiveTemporaries() {
ForEachIsolate(
[&](Isolate* isolate) { isolate->DeferredMarkLiveTemporaries(); },
/*at_safepoint=*/true);
}
void IsolateGroup::RememberLiveTemporaries() {
ForEachIsolate([&](Isolate* isolate) { isolate->RememberLiveTemporaries(); },
/*at_safepoint=*/true);
}
RawClass* Isolate::GetClassForHeapWalkAt(intptr_t cid) {
RawClass* raw_class = nullptr;
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)

View file

@ -473,9 +473,6 @@ class IsolateGroup : public IntrusiveDListEntry<IsolateGroup> {
uword FindPendingDeoptAtSafepoint(uword fp);
void RememberLiveTemporaries();
void DeferredMarkLiveTemporaries();
private:
friend class Heap;
friend class StackFrame; // For `[isolates_].First()`.
@ -1146,9 +1143,6 @@ class Isolate : public BaseIsolate, public IntrusiveDListEntry<Isolate> {
static void NotifyLowMemory();
void RememberLiveTemporaries();
void DeferredMarkLiveTemporaries();
private:
friend class Dart; // Init, InitOnce, Shutdown.
friend class IsolateKillerVisitor; // Kill().

View file

@ -160,7 +160,7 @@ bool StackFrame::IsBareInstructionsStubFrame() const {
return false;
}
bool StackFrame::IsStubFrame(bool needed_for_gc) const {
bool StackFrame::IsStubFrame() const {
if (is_interpreted()) {
return false;
}
@ -176,7 +176,7 @@ bool StackFrame::IsStubFrame(bool needed_for_gc) const {
NoSafepointScope no_safepoint;
#endif
RawCode* code = GetCodeObject(needed_for_gc);
RawCode* code = GetCodeObject();
ASSERT(code != Object::null());
const intptr_t cid = code->ptr()->owner_->GetClassId();
ASSERT(cid == kNullCid || cid == kClassCid || cid == kFunctionCid);
@ -418,9 +418,9 @@ RawCode* StackFrame::LookupDartCode() const {
return Code::null();
}
RawCode* StackFrame::GetCodeObject(bool needed_for_gc) const {
RawCode* StackFrame::GetCodeObject() const {
ASSERT(!is_interpreted());
if (auto isolate = IsolateOfBareInstructionsFrame(needed_for_gc)) {
if (auto isolate = IsolateOfBareInstructionsFrame(/*needed_for_gc=*/false)) {
auto const rct = isolate->reverse_pc_lookup_cache();
return rct->Lookup(pc(), /*is_return_address=*/true);
} else {
@ -546,8 +546,8 @@ TokenPosition StackFrame::GetTokenPos() const {
return TokenPosition::kNoSource;
}
bool StackFrame::IsValid(bool needed_for_gc) const {
if (IsEntryFrame() || IsExitFrame() || IsStubFrame(needed_for_gc)) {
bool StackFrame::IsValid() const {
if (IsEntryFrame() || IsExitFrame() || IsStubFrame()) {
return true;
}
if (is_interpreted()) {

View file

@ -94,7 +94,7 @@ class StackFrame : public ValueObject {
const char* ToCString() const;
// Check validity of a frame, used for assertion purposes.
virtual bool IsValid(bool needed_for_gc = false) const;
virtual bool IsValid() const;
// Returns the isolate containing the bare instructions of the current frame.
//
@ -112,12 +112,11 @@ class StackFrame : public ValueObject {
bool IsBareInstructionsStubFrame() const;
// Frame type.
virtual bool IsDartFrame(bool validate = true,
bool needed_for_gc = false) const {
ASSERT(!validate || IsValid(needed_for_gc));
return !(IsEntryFrame() || IsExitFrame() || IsStubFrame(needed_for_gc));
virtual bool IsDartFrame(bool validate = true) const {
ASSERT(!validate || IsValid());
return !(IsEntryFrame() || IsExitFrame() || IsStubFrame());
}
virtual bool IsStubFrame(bool neede_for_gc = false) const;
virtual bool IsStubFrame() const;
virtual bool IsEntryFrame() const { return false; }
virtual bool IsExitFrame() const { return false; }
@ -159,7 +158,7 @@ class StackFrame : public ValueObject {
Thread* thread() const { return thread_; }
private:
RawCode* GetCodeObject(bool needed_for_gc = false) const;
RawCode* GetCodeObject() const;
RawBytecode* GetBytecodeObject() const;
@ -200,11 +199,9 @@ class StackFrame : public ValueObject {
// runtime code.
class ExitFrame : public StackFrame {
public:
bool IsValid(bool needed_for_gc = false) const { return sp() == 0; }
bool IsDartFrame(bool validate = true, bool needed_for_gc = false) const {
return false;
}
bool IsStubFrame(bool needed_for_gc = false) const { return false; }
bool IsValid() const { return sp() == 0; }
bool IsDartFrame(bool validate = true) const { return false; }
bool IsStubFrame() const { return false; }
bool IsExitFrame() const { return true; }
// Visit objects in the frame.
@ -224,13 +221,11 @@ class ExitFrame : public StackFrame {
// dart code.
class EntryFrame : public StackFrame {
public:
bool IsValid(bool needed_for_gc = false) const {
bool IsValid() const {
return StubCode::InInvocationStub(pc(), is_interpreted());
}
bool IsDartFrame(bool validate = true, bool needed_for_gc = false) const {
return false;
}
bool IsStubFrame(bool needed_for_gc = false) const { return false; }
bool IsDartFrame(bool validate = true) const { return false; }
bool IsStubFrame() const { return false; }
bool IsEntryFrame() const { return true; }
// Visit objects in the frame.

View file

@ -727,105 +727,6 @@ void Thread::VisitObjectPointers(ObjectPointerVisitor* visitor,
}
}
class RestoreWriteBarrierInvariantVisitor : public ObjectPointerVisitor {
public:
RestoreWriteBarrierInvariantVisitor(IsolateGroup* group,
Thread* thread,
Thread::RestoreWriteBarrierInvariantOp op)
: ObjectPointerVisitor(group), thread_(thread), op_(op) {}
void VisitPointers(RawObject** first, RawObject** last) {
for (; first != last + 1; first++) {
RawObject* obj = *first;
// Stores into new-space objects don't need a write barrier.
if (obj->IsSmiOrNewObject()) continue;
// To avoid adding too much work into the remembered set, skip
// arrays. Write barrier elimination will not remove the barrier
// if we can trigger GC between array allocation and store.
if (obj->GetClassId() == kArrayCid) continue;
// Dart code won't store into VM-internal objects except Contexts.
// This assumption is checked by an assertion in
// WriteBarrierElimination::UpdateVectorForBlock.
if (!obj->IsDartInstance() && !obj->IsContext()) continue;
// Dart code won't store into canonical instances.
if (obj->IsCanonical()) continue;
// Already remembered, nothing to do.
if (op_ == Thread::RestoreWriteBarrierInvariantOp::kAddToRememberedSet &&
obj->IsRemembered()) {
continue;
}
// Objects in the VM isolate heap are immutable and won't be
// stored into. Check this condition last because there's no bit
// in the header for it.
if (obj->InVMIsolateHeap()) continue;
switch (op_) {
case Thread::RestoreWriteBarrierInvariantOp::kAddToRememberedSet:
obj->AddToRememberedSet(thread_);
break;
case Thread::RestoreWriteBarrierInvariantOp::kAddToDeferredMarkingStack:
// Re-scan obj when finalizing marking.
thread_->DeferredMarkingStackAddObject(obj);
break;
}
}
}
private:
Thread* const thread_;
Thread::RestoreWriteBarrierInvariantOp op_;
};
// Write barrier elimination assumes that all live temporaries will be
// in the remembered set after a scavenge triggered by a non-Dart-call
// instruction (see Instruction::CanCallDart()), and additionally they will be
// in the deferred marking stack if concurrent marking started. Specifically,
// this includes any instruction which will always create an exit frame
// below the current frame before any other Dart frames.
//
// Therefore, to support this assumption, we scan the stack after a scavenge
// or when concurrent marking begins and add all live temporaries in
// Dart frames preceeding an exit frame to the store buffer or deferred
// marking stack.
void Thread::RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op) {
ASSERT(IsAtSafepoint());
ASSERT(IsMutatorThread());
const StackFrameIterator::CrossThreadPolicy cross_thread_policy =
StackFrameIterator::kAllowCrossThreadIteration;
StackFrameIterator frames_iterator(top_exit_frame_info(),
ValidationPolicy::kDontValidateFrames,
this, cross_thread_policy);
RestoreWriteBarrierInvariantVisitor visitor(isolate_group(), this, op);
bool scan_next_dart_frame = false;
for (StackFrame* frame = frames_iterator.NextFrame(); frame != NULL;
frame = frames_iterator.NextFrame()) {
if (frame->IsExitFrame()) {
scan_next_dart_frame = true;
} else if (frame->IsDartFrame(/*validate=*/false, /*needed_for_gc=*/true)) {
if (scan_next_dart_frame) {
frame->VisitObjectPointers(&visitor);
}
scan_next_dart_frame = false;
}
}
}
void Thread::DeferredMarkLiveTemporaries() {
RestoreWriteBarrierInvariant(
RestoreWriteBarrierInvariantOp::kAddToDeferredMarkingStack);
}
void Thread::RememberLiveTemporaries() {
RestoreWriteBarrierInvariant(
RestoreWriteBarrierInvariantOp::kAddToRememberedSet);
}
bool Thread::CanLoadFromThread(const Object& object) {
// In order to allow us to use assembler helper routines with non-[Code]
// objects *before* stubs are initialized, we only loop ver the stubs if the

View file

@ -824,8 +824,6 @@ class Thread : public ThreadState {
// Visit all object pointers.
void VisitObjectPointers(ObjectPointerVisitor* visitor,
ValidationPolicy validate_frames);
void RememberLiveTemporaries();
void DeferredMarkLiveTemporaries();
bool IsValidHandle(Dart_Handle object) const;
bool IsValidLocalHandle(Dart_Handle object) const;
@ -856,13 +854,6 @@ class Thread : public ThreadState {
template <class T>
T* AllocateReusableHandle();
enum class RestoreWriteBarrierInvariantOp {
kAddToRememberedSet,
kAddToDeferredMarkingStack
};
friend class RestoreWriteBarrierInvariantVisitor;
void RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op);
// Set the current compiler state and return the previous compiler state.
CompilerState* SetCompilerState(CompilerState* state) {
CompilerState* previous = compiler_state_;