[vm, compiler] Consistently use CallTargets instead of ICData or MegamorphicCache.

Bug: https://github.com/dart-lang/sdk/issues/37575
Change-Id: I15f3862af380b04498cb58c8658aa6de76212733
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/114445
Commit-Queue: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
This commit is contained in:
Ryan Macnak 2019-09-04 18:49:40 +00:00 committed by commit-bot@chromium.org
parent efe256cf6c
commit 67bb2b7819
18 changed files with 457 additions and 665 deletions

View file

@ -109,10 +109,9 @@ bool AotCallSpecializer::TryCreateICDataForUniqueTarget(
return false;
}
const ICData& ic_data =
ICData::ZoneHandle(Z, ICData::NewFrom(*call->ic_data(), 1));
ic_data.AddReceiverCheck(cls.id(), target_function);
call->set_ic_data(&ic_data);
call->SetTargets(
CallTargets::CreateMonomorphic(Z, cls.id(), target_function));
ASSERT(call->Targets().IsMonomorphic());
// If we know that the only noSuchMethod is Object.noSuchMethod then
// this call is guaranteed to either succeed or throw.
@ -242,14 +241,9 @@ bool AotCallSpecializer::TryInlineFieldAccess(InstanceCallInstr* call) {
if ((op_kind == Token::kGET) && TryInlineInstanceGetter(call)) {
return true;
}
const ICData& unary_checks =
ICData::Handle(Z, call->ic_data()->AsUnaryClassChecks());
if (!unary_checks.NumberOfChecksIs(0) && (op_kind == Token::kSET) &&
TryInlineInstanceSetter(call, unary_checks)) {
if ((op_kind == Token::kSET) && TryInlineInstanceSetter(call)) {
return true;
}
return false;
}
@ -787,22 +781,18 @@ void AotCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
return;
}
const CallTargets& targets = instr->Targets();
const intptr_t receiver_idx = instr->FirstArgIndex();
const ICData& unary_checks =
ICData::ZoneHandle(Z, instr->ic_data()->AsUnaryClassChecks());
const intptr_t number_of_checks = unary_checks.NumberOfChecks();
if (I->can_use_strong_mode_types()) {
// In AOT strong mode, we avoid deopting speculation.
// TODO(ajcbik): replace this with actual analysis phase
// that determines if checks are removed later.
} else if (speculative_policy_->IsAllowedForInlining(instr->deopt_id()) &&
number_of_checks > 0) {
if ((op_kind == Token::kINDEX) &&
TryReplaceWithIndexedOp(instr, &unary_checks)) {
!targets.is_empty()) {
if ((op_kind == Token::kINDEX) && TryReplaceWithIndexedOp(instr)) {
return;
}
if ((op_kind == Token::kASSIGN_INDEX) &&
TryReplaceWithIndexedOp(instr, &unary_checks)) {
if ((op_kind == Token::kASSIGN_INDEX) && TryReplaceWithIndexedOp(instr)) {
return;
}
if ((op_kind == Token::kEQ) && TryReplaceWithEqualityOp(instr, op_kind)) {
@ -833,25 +823,22 @@ void AotCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
return;
}
bool has_one_target = number_of_checks > 0 && unary_checks.HasOneTarget();
bool has_one_target = targets.HasSingleTarget();
if (has_one_target) {
// Check if the single target is a polymorphic target, if it is,
// we don't have one target.
const Function& target = Function::Handle(Z, unary_checks.GetTargetAt(0));
const Function& target = targets.FirstTarget();
const bool polymorphic_target = MethodRecognizer::PolymorphicTarget(target);
has_one_target = !polymorphic_target;
}
if (has_one_target) {
RawFunction::Kind function_kind =
Function::Handle(Z, unary_checks.GetTargetAt(0)).kind();
const Function& target = targets.FirstTarget();
RawFunction::Kind function_kind = target.kind();
if (flow_graph()->CheckForInstanceCall(instr, function_kind) ==
FlowGraph::ToCheck::kNoCheck) {
CallTargets* targets = CallTargets::Create(Z, unary_checks);
ASSERT(targets->HasSingleTarget());
const Function& target = targets->FirstTarget();
StaticCallInstr* call = StaticCallInstr::FromCall(
Z, instr, target, targets->AggregateCallCount());
Z, instr, target, targets.AggregateCallCount());
instr->ReplaceWith(call, current_iterator());
return;
}
@ -864,7 +851,7 @@ void AotCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
case Token::kLTE:
case Token::kGT:
case Token::kGTE: {
if (HasOnlyTwoOf(*instr->ic_data(), kSmiCid) ||
if (instr->BinaryFeedback().OperandsAre(kSmiCid) ||
HasLikelySmiOperand(instr)) {
ASSERT(receiver_idx == 0);
Definition* left = instr->ArgumentAt(0);
@ -885,7 +872,7 @@ void AotCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
case Token::kADD:
case Token::kSUB:
case Token::kMUL: {
if (HasOnlyTwoOf(*instr->ic_data(), kSmiCid) ||
if (instr->BinaryFeedback().OperandsAre(kSmiCid) ||
HasLikelySmiOperand(instr)) {
ASSERT(receiver_idx == 0);
Definition* left = instr->ArgumentAt(0);
@ -1033,7 +1020,8 @@ void AotCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
return;
} else if ((ic_data.raw() != ICData::null()) &&
!ic_data.NumberOfChecksIs(0)) {
CallTargets* targets = CallTargets::Create(Z, ic_data);
const CallTargets* targets = CallTargets::Create(Z, ic_data);
ASSERT(!targets->is_empty());
PolymorphicInstanceCallInstr* call =
new (Z) PolymorphicInstanceCallInstr(instr, *targets,
/* complete = */ true);
@ -1051,13 +1039,12 @@ void AotCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
// More than one target. Generate generic polymorphic call without
// deoptimization.
if (instr->ic_data()->NumberOfUsedChecks() > 0) {
if (targets.length() > 0) {
ASSERT(!FLAG_polymorphic_with_deopt);
// OK to use checks with PolymorphicInstanceCallInstr since no
// deoptimization is allowed.
CallTargets* targets = CallTargets::Create(Z, *instr->ic_data());
PolymorphicInstanceCallInstr* call =
new (Z) PolymorphicInstanceCallInstr(instr, *targets,
new (Z) PolymorphicInstanceCallInstr(instr, targets,
/* complete = */ false);
instr->ReplaceWith(call, current_iterator());
return;

View file

@ -1108,8 +1108,8 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
const ArgumentsDescriptor args_desc(arguments_descriptor);
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
zone(), MegamorphicCacheTable::LookupOriginal(thread(), name,
arguments_descriptor));
zone(),
MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
__ Comment("MegamorphicCall");
// Load receiver into R0.

View file

@ -1074,8 +1074,8 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
const ArgumentsDescriptor args_desc(arguments_descriptor);
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
zone(), MegamorphicCacheTable::LookupOriginal(thread(), name,
arguments_descriptor));
zone(),
MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
__ Comment("MegamorphicCall");
// Load receiver into R0.

View file

@ -969,8 +969,8 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
const ArgumentsDescriptor args_desc(arguments_descriptor);
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
zone(), MegamorphicCacheTable::LookupOriginal(thread(), name,
arguments_descriptor));
zone(),
MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
__ Comment("MegamorphicCall");
// Load receiver into EBX.

View file

@ -1094,8 +1094,8 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
const ArgumentsDescriptor args_desc(arguments_descriptor);
const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
zone(), MegamorphicCacheTable::LookupOriginal(thread(), name,
arguments_descriptor));
zone(),
MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
__ Comment("MegamorphicCall");
// Load receiver into RDX.
__ movq(RDX, compiler::Address(RSP, (args_desc.Count() - 1) * kWordSize));

View file

@ -650,16 +650,22 @@ Cids* Cids::CreateMonomorphic(Zone* zone, intptr_t cid) {
return cids;
}
Cids* Cids::CreateAndExpand(Zone* zone,
const ICData& ic_data,
int argument_number) {
Cids* Cids::CreateForArgument(Zone* zone,
const BinaryFeedback& binary_feedback,
int argument_number) {
Cids* cids = new (zone) Cids(zone);
cids->CreateHelper(zone, ic_data, argument_number,
/* include_targets = */ false);
cids->Sort(OrderById);
for (intptr_t i = 0; i < binary_feedback.feedback_.length(); i++) {
ASSERT((argument_number == 0) || (argument_number == 1));
const intptr_t cid = argument_number == 0
? binary_feedback.feedback_[i].first
: binary_feedback.feedback_[i].second;
cids->Add(new (zone) CidRange(cid, cid));
}
// Merge adjacent class id ranges.
{
if (cids->length() != 0) {
cids->Sort(OrderById);
// Merge adjacent class id ranges.
int dest = 0;
for (int src = 1; src < cids->length(); src++) {
if (cids->cid_ranges_[dest]->cid_end + 1 >=
@ -673,53 +679,6 @@ Cids* Cids::CreateAndExpand(Zone* zone,
cids->SetLength(dest + 1);
}
// Merging/extending cid ranges is also done in CallTargets::CreateAndExpand.
// If changing this code, consider also adjusting CallTargets code.
if (cids->length() > 1 && argument_number == 0 && ic_data.HasOneTarget()) {
// Try harder to merge ranges if method lookups in the gaps result in the
// same target method.
const Function& target = Function::Handle(zone, ic_data.GetTargetAt(0));
if (!MethodRecognizer::PolymorphicTarget(target)) {
const auto& args_desc_array =
Array::Handle(zone, ic_data.arguments_descriptor());
ArgumentsDescriptor args_desc(args_desc_array);
const auto& name = String::Handle(zone, ic_data.target_name());
auto& fn = Function::Handle(zone);
intptr_t dest = 0;
for (intptr_t src = 1; src < cids->length(); src++) {
// Inspect all cids in the gap and see if they all resolve to the same
// target.
bool can_merge = true;
for (intptr_t cid = cids->cid_ranges_[dest]->cid_end + 1,
end = cids->cid_ranges_[src]->cid_start;
cid < end; ++cid) {
bool class_is_abstract = false;
if (FlowGraphCompiler::LookupMethodFor(cid, name, args_desc, &fn,
&class_is_abstract)) {
if (fn.raw() == target.raw()) {
continue;
}
if (class_is_abstract) {
continue;
}
}
can_merge = false;
break;
}
if (can_merge) {
cids->cid_ranges_[dest]->cid_end = cids->cid_ranges_[src]->cid_end;
} else {
dest++;
if (src != dest) cids->cid_ranges_[dest] = cids->cid_ranges_[src];
}
}
cids->SetLength(dest + 1);
}
}
return cids;
}
@ -740,64 +699,55 @@ static intptr_t Usage(const Function& function) {
return count;
}
void Cids::CreateHelper(Zone* zone,
const ICData& ic_data,
int argument_number,
bool include_targets) {
ASSERT(argument_number < ic_data.NumArgsTested());
if (ic_data.NumberOfChecks() == 0) return;
void CallTargets::CreateHelper(Zone* zone, const ICData& ic_data) {
Function& dummy = Function::Handle(zone);
bool check_one_arg = ic_data.NumArgsTested() == 1;
const intptr_t num_args_tested = ic_data.NumArgsTested();
int checks = ic_data.NumberOfChecks();
for (int i = 0; i < checks; i++) {
if (ic_data.GetCountAt(i) == 0) continue;
intptr_t id = 0;
if (check_one_arg) {
for (int i = 0, n = ic_data.NumberOfChecks(); i < n; i++) {
if (ic_data.GetCountAt(i) == 0) {
continue;
}
intptr_t id = kDynamicCid;
if (num_args_tested == 0) {
} else if (num_args_tested == 1) {
ic_data.GetOneClassCheckAt(i, &id, &dummy);
} else {
ASSERT(num_args_tested == 2);
GrowableArray<intptr_t> arg_ids;
ic_data.GetCheckAt(i, &arg_ids, &dummy);
id = arg_ids[argument_number];
}
if (include_targets) {
Function& function = Function::ZoneHandle(zone, ic_data.GetTargetAt(i));
intptr_t count = ic_data.GetCountAt(i);
cid_ranges_.Add(new (zone) TargetInfo(id, id, &function, count,
ic_data.GetExactnessAt(i)));
} else {
cid_ranges_.Add(new (zone) CidRange(id, id));
id = arg_ids[0];
}
Function& function = Function::ZoneHandle(zone, ic_data.GetTargetAt(i));
intptr_t count = ic_data.GetCountAt(i);
cid_ranges_.Add(new (zone) TargetInfo(id, id, &function, count,
ic_data.GetExactnessAt(i)));
}
if (ic_data.is_megamorphic()) {
ASSERT(num_args_tested == 1); // Only 1-arg ICData will turn megamorphic.
const String& name = String::Handle(zone, ic_data.target_name());
const Array& descriptor =
Array::Handle(zone, ic_data.arguments_descriptor());
Thread* thread = Thread::Current();
const MegamorphicCache& cache = MegamorphicCache::Handle(
zone, MegamorphicCacheTable::LookupClone(Thread::Current(), name,
descriptor));
zone, MegamorphicCacheTable::Lookup(thread, name, descriptor));
SafepointMutexLocker ml(thread->isolate()->megamorphic_mutex());
MegamorphicCacheEntries entries(Array::Handle(zone, cache.buckets()));
for (intptr_t i = 0; i < entries.Length(); i++) {
for (intptr_t i = 0, n = entries.Length(); i < n; i++) {
const intptr_t id =
Smi::Value(entries[i].Get<MegamorphicCache::kClassIdIndex>());
if (id == kIllegalCid) {
continue;
}
if (include_targets) {
Function& function = Function::ZoneHandle(zone);
function ^= entries[i].Get<MegamorphicCache::kTargetFunctionIndex>();
const intptr_t filled_entry_count = cache.filled_entry_count();
ASSERT(filled_entry_count > 0);
cid_ranges_.Add(new (zone) TargetInfo(
id, id, &function, Usage(function) / filled_entry_count,
StaticTypeExactnessState::NotTracking()));
} else {
cid_ranges_.Add(new (zone) CidRange(id, id));
}
Function& function = Function::ZoneHandle(zone);
function ^= entries[i].Get<MegamorphicCache::kTargetFunctionIndex>();
const intptr_t filled_entry_count = cache.filled_entry_count();
ASSERT(filled_entry_count > 0);
cid_ranges_.Add(new (zone) TargetInfo(
id, id, &function, Usage(function) / filled_entry_count,
StaticTypeExactnessState::NotTracking()));
}
}
}
@ -812,6 +762,11 @@ intptr_t Cids::MonomorphicReceiverCid() const {
return cid_ranges_[0]->cid_start;
}
StaticTypeExactnessState CallTargets::MonomorphicExactness() const {
ASSERT(IsMonomorphic());
return TargetAt(0)->exactness;
}
CheckClassInstr::CheckClassInstr(Value* value,
intptr_t deopt_id,
const Cids& cids,
@ -3789,19 +3744,58 @@ bool UnboxInstr::CanConvertSmi() const {
}
}
CallTargets* CallTargets::Create(Zone* zone, const ICData& ic_data) {
const BinaryFeedback* BinaryFeedback::Create(Zone* zone,
const ICData& ic_data) {
BinaryFeedback* result = new (zone) BinaryFeedback(zone);
if (ic_data.NumArgsTested() == 2) {
for (intptr_t i = 0, n = ic_data.NumberOfChecks(); i < n; i++) {
if (ic_data.GetCountAt(i) == 0) {
continue;
}
GrowableArray<intptr_t> arg_ids;
ic_data.GetClassIdsAt(i, &arg_ids);
result->feedback_.Add({arg_ids[0], arg_ids[1]});
}
}
return result;
}
const BinaryFeedback* BinaryFeedback::CreateMonomorphic(Zone* zone,
intptr_t receiver_cid,
intptr_t argument_cid) {
BinaryFeedback* result = new (zone) BinaryFeedback(zone);
result->feedback_.Add({receiver_cid, argument_cid});
return result;
}
const CallTargets* CallTargets::CreateMonomorphic(Zone* zone,
intptr_t receiver_cid,
const Function& target) {
CallTargets* targets = new (zone) CallTargets(zone);
targets->CreateHelper(zone, ic_data, /* argument_number = */ 0,
/* include_targets = */ true);
const intptr_t count = 1;
targets->cid_ranges_.Add(new (zone) TargetInfo(
receiver_cid, receiver_cid, &Function::ZoneHandle(zone, target.raw()),
count, StaticTypeExactnessState::NotTracking()));
return targets;
}
const CallTargets* CallTargets::Create(Zone* zone, const ICData& ic_data) {
CallTargets* targets = new (zone) CallTargets(zone);
targets->CreateHelper(zone, ic_data);
targets->Sort(OrderById);
targets->MergeIntoRanges();
return targets;
}
CallTargets* CallTargets::CreateAndExpand(Zone* zone, const ICData& ic_data) {
const CallTargets* CallTargets::CreateAndExpand(Zone* zone,
const ICData& ic_data) {
CallTargets& targets = *new (zone) CallTargets(zone);
targets.CreateHelper(zone, ic_data, /* argument_number = */ 0,
/* include_targets = */ true);
targets.CreateHelper(zone, ic_data);
if (targets.is_empty() || targets.IsMonomorphic()) {
return &targets;
}
targets.Sort(OrderById);
Array& args_desc_array = Array::Handle(zone, ic_data.arguments_descriptor());
@ -3885,6 +3879,10 @@ CallTargets* CallTargets::CreateAndExpand(Zone* zone, const ICData& ic_data) {
}
void CallTargets::MergeIntoRanges() {
if (length() == 0) {
return; // For correctness not performance: must not update length to 1.
}
// Merge adjacent class id ranges.
int dest = 0;
// We merge entries that dispatch to the same target, but polymorphic targets
@ -4533,6 +4531,56 @@ RawFunction* InstanceCallInstr::ResolveForReceiverClass(
args_desc, allow_add);
}
const CallTargets& InstanceCallInstr::Targets() {
if (targets_ == nullptr) {
Zone* zone = Thread::Current()->zone();
if (HasICData()) {
targets_ = CallTargets::CreateAndExpand(zone, *ic_data());
} else {
targets_ = new (zone) CallTargets(zone);
ASSERT(targets_->is_empty());
}
}
return *targets_;
}
const BinaryFeedback& InstanceCallInstr::BinaryFeedback() {
if (binary_ == nullptr) {
Zone* zone = Thread::Current()->zone();
if (HasICData()) {
binary_ = BinaryFeedback::Create(zone, *ic_data());
} else {
binary_ = new (zone) class BinaryFeedback(zone);
}
}
return *binary_;
}
const CallTargets& StaticCallInstr::Targets() {
if (targets_ == nullptr) {
Zone* zone = Thread::Current()->zone();
if (HasICData()) {
targets_ = CallTargets::CreateAndExpand(zone, *ic_data());
} else {
targets_ = new (zone) CallTargets(zone);
ASSERT(targets_->is_empty());
}
}
return *targets_;
}
const BinaryFeedback& StaticCallInstr::BinaryFeedback() {
if (binary_ == nullptr) {
Zone* zone = Thread::Current()->zone();
if (HasICData()) {
binary_ = BinaryFeedback::Create(zone, *ic_data());
} else {
binary_ = new (zone) class BinaryFeedback(zone);
}
}
return *binary_;
}
bool CallTargets::HasSingleRecognizedTarget() const {
if (!HasSingleTarget()) return false;
return MethodRecognizer::RecognizeKind(FirstTarget()) !=
@ -4540,7 +4588,7 @@ bool CallTargets::HasSingleRecognizedTarget() const {
}
bool CallTargets::HasSingleTarget() const {
ASSERT(length() != 0);
if (length() == 0) return false;
for (int i = 0; i < length(); i++) {
if (TargetAt(i)->target->raw() != TargetAt(0)->target->raw()) return false;
}
@ -4645,7 +4693,7 @@ Definition* InstanceCallInstr::Canonicalize(FlowGraph* flow_graph) {
// TODO(dartbug.com/37291): Allow this optimization, but accumulate affected
// InstanceCallInstrs and the corresponding reciever cids during compilation.
// After compilation, add receiver checks to the ICData for those call sites.
if (ic_data()->NumberOfUsedChecks() == 0) return this;
if (Targets().is_empty()) return this;
const CallTargets* new_target =
FlowGraphCompiler::ResolveCallTargetsForReceiverCid(

View file

@ -5,6 +5,8 @@
#ifndef RUNTIME_VM_COMPILER_BACKEND_IL_H_
#define RUNTIME_VM_COMPILER_BACKEND_IL_H_
#include <utility>
#include "vm/allocation.h"
#include "vm/code_descriptors.h"
#include "vm/compiler/backend/compile_type.h"
@ -24,6 +26,7 @@
namespace dart {
class BinaryFeedback;
class BitVector;
class BlockEntryInstr;
class BlockEntryWithInitialDefs;
@ -46,10 +49,10 @@ class ParsedFunction;
class Range;
class RangeAnalysis;
class RangeBoundary;
class SExpression;
class SExpList;
class UnboxIntegerInstr;
class SExpression;
class TypeUsageInfo;
class UnboxIntegerInstr;
namespace compiler {
class BlockBuilder;
@ -573,14 +576,14 @@ struct TargetInfo : public CidRange {
// and PolymorphicInstanceCall instructions.
class Cids : public ZoneAllocated {
public:
explicit Cids(Zone* zone) : zone_(zone) {}
explicit Cids(Zone* zone) : cid_ranges_(zone, 6) {}
// Creates the off-heap Cids object that reflects the contents
// of the on-VM-heap IC data.
// Ranges of Cids are merged if there is only one target function and
// it is used for all cids in the gaps between ranges.
static Cids* CreateAndExpand(Zone* zone,
const ICData& ic_data,
int argument_number);
static Cids* CreateForArgument(Zone* zone,
const BinaryFeedback& binary_feedback,
int argument_number);
static Cids* CreateMonomorphic(Zone* zone, intptr_t cid);
bool Equals(const Cids& other) const;
@ -609,12 +612,7 @@ class Cids : public ZoneAllocated {
intptr_t ComputeHighestCid() const;
protected:
void CreateHelper(Zone* zone,
const ICData& ic_data,
int argument_number,
bool include_targets);
GrowableArray<CidRange*> cid_ranges_;
Zone* zone_;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Cids);
@ -623,18 +621,24 @@ class Cids : public ZoneAllocated {
class CallTargets : public Cids {
public:
explicit CallTargets(Zone* zone) : Cids(zone) {}
static const CallTargets* CreateMonomorphic(Zone* zone,
intptr_t receiver_cid,
const Function& target);
// Creates the off-heap CallTargets object that reflects the contents
// of the on-VM-heap IC data.
static CallTargets* Create(Zone* zone, const ICData& ic_data);
static const CallTargets* Create(Zone* zone, const ICData& ic_data);
// This variant also expands the class-ids to neighbouring classes that
// inherit the same method.
static CallTargets* CreateAndExpand(Zone* zone, const ICData& ic_data);
static const CallTargets* CreateAndExpand(Zone* zone, const ICData& ic_data);
TargetInfo* TargetAt(int i) const { return static_cast<TargetInfo*>(At(i)); }
intptr_t AggregateCallCount() const;
StaticTypeExactnessState MonomorphicExactness() const;
bool HasSingleTarget() const;
bool HasSingleRecognizedTarget() const;
const Function& FirstTarget() const;
@ -642,10 +646,96 @@ class CallTargets : public Cids {
void Print() const;
bool ReceiverIs(intptr_t cid) const {
return IsMonomorphic() && MonomorphicReceiverCid() == cid;
}
bool ReceiverIsSmiOrMint() const {
if (cid_ranges_.is_empty()) {
return false;
}
for (intptr_t i = 0, n = cid_ranges_.length(); i < n; i++) {
for (intptr_t j = cid_ranges_[i]->cid_start; j <= cid_ranges_[i]->cid_end;
j++) {
if (j != kSmiCid && j != kMintCid) {
return false;
}
}
}
return true;
}
private:
void CreateHelper(Zone* zone, const ICData& ic_data);
void MergeIntoRanges();
};
// Represents type feedback for the binary operators, and a few recognized
// static functions (see MethodRecognizer::NumArgsCheckedForStaticCall).
class BinaryFeedback : public ZoneAllocated {
public:
explicit BinaryFeedback(Zone* zone) : feedback_(zone, 2) {}
static const BinaryFeedback* Create(Zone* zone, const ICData& ic_data);
static const BinaryFeedback* CreateMonomorphic(Zone* zone,
intptr_t receiver_cid,
intptr_t argument_cid);
bool ArgumentIs(intptr_t cid) const {
if (feedback_.is_empty()) {
return false;
}
for (intptr_t i = 0, n = feedback_.length(); i < n; i++) {
if (feedback_[i].second != cid) {
return false;
}
}
return true;
}
bool OperandsAreEither(intptr_t cid_a, intptr_t cid_b) const {
if (feedback_.is_empty()) {
return false;
}
for (intptr_t i = 0, n = feedback_.length(); i < n; i++) {
if ((feedback_[i].first != cid_a) && (feedback_[i].first != cid_b)) {
return false;
}
if ((feedback_[i].second != cid_a) && (feedback_[i].second != cid_b)) {
return false;
}
}
return true;
}
bool OperandsAreSmiOrNull() const {
return OperandsAreEither(kSmiCid, kNullCid);
}
bool OperandsAreSmiOrMint() const {
return OperandsAreEither(kSmiCid, kMintCid);
}
bool OperandsAreSmiOrDouble() const {
return OperandsAreEither(kSmiCid, kDoubleCid);
}
bool OperandsAre(intptr_t cid) const {
if (feedback_.length() != 1) return false;
return (feedback_[0].first == cid) && (feedback_[0].second == cid);
}
bool IncludesOperands(intptr_t cid) const {
for (intptr_t i = 0, n = feedback_.length(); i < n; i++) {
if ((feedback_[i].first == cid) && (feedback_[i].second == cid)) {
return true;
}
}
return false;
}
private:
GrowableArray<std::pair<intptr_t, intptr_t>> feedback_;
friend class Cids;
};
class Instruction : public ZoneAllocated {
public:
#define DECLARE_TAG(type, attrs) k##type,
@ -3583,12 +3673,22 @@ class InstanceCallInstr : public TemplateDartCall<0> {
void set_entry_kind(Code::EntryKind value) { entry_kind_ = value; }
const CallTargets& Targets();
void SetTargets(const CallTargets* targets) { targets_ = targets; }
const BinaryFeedback& BinaryFeedback();
void SetBinaryFeedback(const class BinaryFeedback* binary) {
binary_ = binary;
}
protected:
friend class CallSpecializer;
void set_ic_data(ICData* value) { ic_data_ = value; }
private:
const ICData* ic_data_;
const CallTargets* targets_ = nullptr;
const class BinaryFeedback* binary_ = nullptr;
const String& function_name_;
const Token::Kind token_kind_; // Binary op, unary op, kGET or kILLEGAL.
const intptr_t checked_argument_count_;
@ -4106,12 +4206,17 @@ class StaticCallInstr : public TemplateDartCall<0> {
virtual AliasIdentity Identity() const { return identity_; }
virtual void SetIdentity(AliasIdentity identity) { identity_ = identity; }
const CallTargets& Targets();
const BinaryFeedback& BinaryFeedback();
PRINT_OPERANDS_TO_SUPPORT
ADD_OPERANDS_TO_S_EXPRESSION_SUPPORT
ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
private:
const ICData* ic_data_;
const CallTargets* targets_ = nullptr;
const class BinaryFeedback* binary_ = nullptr;
const intptr_t call_count_;
const Function& function_;
const ICData::RebindRule rebind_rule_;

View file

@ -3307,15 +3307,16 @@ bool FlowGraphInliner::TryReplaceInstanceCallWithInline(
ForwardInstructionIterator* iterator,
InstanceCallInstr* call,
SpeculativeInliningPolicy* policy) {
Function& target = Function::Handle(Z);
GrowableArray<intptr_t> class_ids;
call->ic_data()->GetCheckAt(0, &class_ids, &target);
const intptr_t receiver_cid = class_ids[0];
const CallTargets& targets = call->Targets();
ASSERT(targets.IsMonomorphic());
const intptr_t receiver_cid = targets.MonomorphicReceiverCid();
const Function& target = targets.FirstTarget();
const auto exactness = targets.MonomorphicExactness();
ExactnessInfo exactness_info{exactness.IsExact(), false};
FunctionEntryInstr* entry = nullptr;
Instruction* last = nullptr;
Definition* result = nullptr;
auto exactness = call->ic_data()->GetExactnessAt(0);
ExactnessInfo exactness_info{exactness.IsExact(), false};
if (FlowGraphInliner::TryInlineRecognizedMethod(
flow_graph, receiver_cid, target, call,
call->Receiver()->definition(), call->token_pos(), call->ic_data(),
@ -3338,8 +3339,7 @@ bool FlowGraphInliner::TryReplaceInstanceCallWithInline(
switch (check) {
case FlowGraph::ToCheck::kCheckCid: {
Instruction* check_class = flow_graph->CreateCheckClass(
call->Receiver()->definition(),
*Cids::CreateAndExpand(Z, *call->ic_data(), 0), call->deopt_id(),
call->Receiver()->definition(), targets, call->deopt_id(),
call->token_pos());
flow_graph->InsertBefore(call, check_class, call->env(),
FlowGraph::kEffect);
@ -3982,16 +3982,12 @@ bool FlowGraphInliner::TryInlineRecognizedMethod(
case MethodRecognizer::kGrowableArraySetData:
ASSERT((receiver_cid == kGrowableObjectArrayCid) ||
((receiver_cid == kDynamicCid) && call->IsStaticCall()));
ASSERT(call->IsStaticCall() ||
(ic_data == NULL || ic_data->NumberOfChecksIs(1)));
return InlineGrowableArraySetter(
flow_graph, Slot::GrowableObjectArray_data(), kEmitStoreBarrier, call,
receiver, graph_entry, entry, last, result);
case MethodRecognizer::kGrowableArraySetLength:
ASSERT((receiver_cid == kGrowableObjectArrayCid) ||
((receiver_cid == kDynamicCid) && call->IsStaticCall()));
ASSERT(call->IsStaticCall() ||
(ic_data == NULL || ic_data->NumberOfChecksIs(1)));
return InlineGrowableArraySetter(
flow_graph, Slot::GrowableObjectArray_length(), kNoStoreBarrier, call,
receiver, graph_entry, entry, last, result);

View file

@ -40,131 +40,19 @@ static bool IsNumberCid(intptr_t cid) {
return (cid == kSmiCid) || (cid == kDoubleCid);
}
static bool ClassIdIsOneOf(intptr_t class_id,
const GrowableArray<intptr_t>& class_ids) {
for (intptr_t i = 0; i < class_ids.length(); i++) {
ASSERT(class_ids[i] != kIllegalCid);
if (class_ids[i] == class_id) {
return true;
}
}
return false;
}
// Returns true if ICData tests two arguments and all ICData cids are in the
// required sets 'receiver_class_ids' or 'argument_class_ids', respectively.
static bool ICDataHasOnlyReceiverArgumentClassIds(
const ICData& ic_data,
const GrowableArray<intptr_t>& receiver_class_ids,
const GrowableArray<intptr_t>& argument_class_ids) {
if (ic_data.NumArgsTested() != 2) {
return false;
}
const intptr_t len = ic_data.NumberOfChecks();
GrowableArray<intptr_t> class_ids;
for (intptr_t i = 0; i < len; i++) {
if (ic_data.IsUsedAt(i)) {
ic_data.GetClassIdsAt(i, &class_ids);
ASSERT(class_ids.length() == 2);
if (!ClassIdIsOneOf(class_ids[0], receiver_class_ids) ||
!ClassIdIsOneOf(class_ids[1], argument_class_ids)) {
return false;
}
}
}
return true;
}
static bool ICDataHasReceiverArgumentClassIds(const ICData& ic_data,
intptr_t receiver_class_id,
intptr_t argument_class_id) {
if (ic_data.NumArgsTested() != 2) {
return false;
}
const intptr_t len = ic_data.NumberOfChecks();
for (intptr_t i = 0; i < len; i++) {
if (ic_data.IsUsedAt(i)) {
GrowableArray<intptr_t> class_ids;
ic_data.GetClassIdsAt(i, &class_ids);
ASSERT(class_ids.length() == 2);
if ((class_ids[0] == receiver_class_id) &&
(class_ids[1] == argument_class_id)) {
return true;
}
}
}
return false;
}
static bool HasOnlyOneSmi(const ICData& ic_data) {
return (ic_data.NumberOfUsedChecks() == 1) &&
ic_data.HasReceiverClassId(kSmiCid);
}
static bool HasOnlySmiOrMint(const ICData& ic_data) {
if (ic_data.NumberOfUsedChecks() == 1) {
return ic_data.HasReceiverClassId(kSmiCid) ||
ic_data.HasReceiverClassId(kMintCid);
}
return (ic_data.NumberOfUsedChecks() == 2) &&
ic_data.HasReceiverClassId(kSmiCid) &&
ic_data.HasReceiverClassId(kMintCid);
}
bool CallSpecializer::HasOnlyTwoOf(const ICData& ic_data, intptr_t cid) {
if (ic_data.NumberOfUsedChecks() != 1) {
return false;
}
GrowableArray<intptr_t> first;
GrowableArray<intptr_t> second;
ic_data.GetUsedCidsForTwoArgs(&first, &second);
return (first[0] == cid) && (second[0] == cid);
}
// Returns false if the ICData contains anything other than the 4 combinations
// of Mint and Smi for the receiver and argument classes.
static bool HasTwoMintOrSmi(const ICData& ic_data) {
GrowableArray<intptr_t> first;
GrowableArray<intptr_t> second;
ic_data.GetUsedCidsForTwoArgs(&first, &second);
for (intptr_t i = 0; i < first.length(); i++) {
if ((first[i] != kSmiCid) && (first[i] != kMintCid)) {
return false;
}
if ((second[i] != kSmiCid) && (second[i] != kMintCid)) {
return false;
}
}
return true;
}
// Returns false if the ICData contains anything other than the 4 combinations
// of Double and Smi for the receiver and argument classes.
static bool HasTwoDoubleOrSmi(const ICData& ic_data) {
GrowableArray<intptr_t> class_ids(2);
class_ids.Add(kSmiCid);
class_ids.Add(kDoubleCid);
return ICDataHasOnlyReceiverArgumentClassIds(ic_data, class_ids, class_ids);
}
static bool HasOnlyOneDouble(const ICData& ic_data) {
return (ic_data.NumberOfUsedChecks() == 1) &&
ic_data.HasReceiverClassId(kDoubleCid);
}
static bool ShouldSpecializeForDouble(const ICData& ic_data) {
static bool ShouldSpecializeForDouble(const BinaryFeedback& binary_feedback) {
// Don't specialize for double if we can't unbox them.
if (!CanUnboxDouble()) {
return false;
}
// Unboxed double operation can't handle case of two smis.
if (ICDataHasReceiverArgumentClassIds(ic_data, kSmiCid, kSmiCid)) {
if (binary_feedback.IncludesOperands(kSmiCid)) {
return false;
}
// Check that it have seen only smis and doubles.
return HasTwoDoubleOrSmi(ic_data);
// Check that the call site has seen only smis and doubles.
return binary_feedback.OperandsAreSmiOrDouble();
}
// Optimize instance calls using ICData.
@ -208,7 +96,8 @@ void CallSpecializer::ApplyClassIds() {
bool CallSpecializer::TryCreateICData(InstanceCallInstr* call) {
ASSERT(call->HasICData());
if (call->ic_data()->NumberOfUsedChecks() > 0) {
if (call->Targets().length() > 0) {
// This occurs when an instance call has too many checks, will be converted
// to megamorphic call.
return false;
@ -272,17 +161,14 @@ bool CallSpecializer::TryCreateICData(InstanceCallInstr* call) {
return false;
}
// Create new ICData, do not modify the one attached to the instruction
// since it is attached to the assembly instruction itself.
const ICData& ic_data = ICData::ZoneHandle(
Z, ICData::NewFrom(*call->ic_data(), class_ids.length()));
if (class_ids.length() > 1) {
ic_data.AddCheck(class_ids, function);
} else {
ASSERT(class_ids.length() == 1);
ic_data.AddReceiverCheck(class_ids[0], function);
// Update the CallTargets attached to the instruction with our speculative
// target. The next round of CallSpecializer::VisitInstanceCall will make
// use of this.
call->SetTargets(CallTargets::CreateMonomorphic(Z, class_ids[0], function));
if (class_ids.length() == 2) {
call->SetBinaryFeedback(
BinaryFeedback::CreateMonomorphic(Z, class_ids[0], class_ids[1]));
}
call->set_ic_data(&ic_data);
return true;
}
@ -365,11 +251,11 @@ void CallSpecializer::AddCheckClass(Definition* to_check,
}
void CallSpecializer::AddChecksForArgNr(InstanceCallInstr* call,
Definition* instr,
Definition* argument,
int argument_number) {
const Cids* cids =
Cids::CreateAndExpand(Z, *call->ic_data(), argument_number);
AddCheckClass(instr, *cids, call->deopt_id(), call->env(), call);
Cids::CreateForArgument(zone(), call->BinaryFeedback(), argument_number);
AddCheckClass(argument, *cids, call->deopt_id(), call->env(), call);
}
void CallSpecializer::AddCheckNull(Value* to_check,
@ -390,30 +276,12 @@ void CallSpecializer::AddCheckNull(Value* to_check,
}
}
static bool ArgIsAlways(intptr_t cid,
const ICData& ic_data,
intptr_t arg_number) {
ASSERT(ic_data.NumArgsTested() > arg_number);
if (ic_data.NumberOfUsedChecks() == 0) {
return false;
bool CallSpecializer::TryReplaceWithIndexedOp(InstanceCallInstr* call) {
if (call->Targets().IsMonomorphic()) {
return FlowGraphInliner::TryReplaceInstanceCallWithInline(
flow_graph_, current_iterator(), call, speculative_policy_);
}
const intptr_t num_checks = ic_data.NumberOfChecks();
for (intptr_t i = 0; i < num_checks; i++) {
if (ic_data.IsUsedAt(i) && ic_data.GetClassIdAt(i, arg_number) != cid) {
return false;
}
}
return true;
}
bool CallSpecializer::TryReplaceWithIndexedOp(InstanceCallInstr* call,
const ICData* unary_checks) {
// Check for monomorphic IC data.
if (!unary_checks->NumberOfChecksIs(1)) {
return false;
}
return FlowGraphInliner::TryReplaceInstanceCallWithInline(
flow_graph_, current_iterator(), call, speculative_policy_);
return false;
}
// Return true if d is a string of length one (a constant or result from
@ -436,7 +304,7 @@ static bool IsLengthOneString(Definition* d) {
// E.g., detect str[x] == "x"; and use an integer comparison of char-codes.
bool CallSpecializer::TryStringLengthOneEquality(InstanceCallInstr* call,
Token::Kind op_kind) {
ASSERT(HasOnlyTwoOf(*call->ic_data(), kOneByteStringCid));
ASSERT(call->BinaryFeedback().OperandsAre(kOneByteStringCid));
// Check that left and right are length one strings (either string constants
// or results of string-from-char-code.
Definition* left = call->ArgumentAt(0);
@ -516,8 +384,7 @@ static bool SmiFitsInDouble() {
bool CallSpecializer::TryReplaceWithEqualityOp(InstanceCallInstr* call,
Token::Kind op_kind) {
const ICData& ic_data = *call->ic_data();
ASSERT(ic_data.NumArgsTested() == 2);
const BinaryFeedback& binary_feedback = call->BinaryFeedback();
ASSERT(call->type_args_len() == 0);
ASSERT(call->ArgumentCount() == 2);
@ -525,9 +392,9 @@ bool CallSpecializer::TryReplaceWithEqualityOp(InstanceCallInstr* call,
Definition* const right = call->ArgumentAt(1);
intptr_t cid = kIllegalCid;
if (HasOnlyTwoOf(ic_data, kOneByteStringCid)) {
if (binary_feedback.OperandsAre(kOneByteStringCid)) {
return TryStringLengthOneEquality(call, op_kind);
} else if (HasOnlyTwoOf(ic_data, kSmiCid)) {
} else if (binary_feedback.OperandsAre(kSmiCid)) {
InsertBefore(call,
new (Z) CheckSmiInstr(new (Z) Value(left), call->deopt_id(),
call->token_pos()),
@ -537,15 +404,15 @@ bool CallSpecializer::TryReplaceWithEqualityOp(InstanceCallInstr* call,
call->token_pos()),
call->env(), FlowGraph::kEffect);
cid = kSmiCid;
} else if (HasTwoMintOrSmi(ic_data) &&
} else if (binary_feedback.OperandsAreSmiOrMint() &&
FlowGraphCompiler::SupportsUnboxedInt64()) {
cid = kMintCid;
} else if (HasTwoDoubleOrSmi(ic_data) && CanUnboxDouble()) {
} else if (binary_feedback.OperandsAreSmiOrDouble() && CanUnboxDouble()) {
// Use double comparison.
if (SmiFitsInDouble()) {
cid = kDoubleCid;
} else {
if (ICDataHasReceiverArgumentClassIds(ic_data, kSmiCid, kSmiCid)) {
if (binary_feedback.IncludesOperands(kSmiCid)) {
// We cannot use double comparison on two smis. Need polymorphic
// call.
return false;
@ -562,11 +429,7 @@ bool CallSpecializer::TryReplaceWithEqualityOp(InstanceCallInstr* call,
// Check if ICDData contains checks with Smi/Null combinations. In that case
// we can still emit the optimized Smi equality operation but need to add
// checks for null or Smi.
GrowableArray<intptr_t> smi_or_null(2);
smi_or_null.Add(kSmiCid);
smi_or_null.Add(kNullCid);
if (ICDataHasOnlyReceiverArgumentClassIds(ic_data, smi_or_null,
smi_or_null)) {
if (binary_feedback.OperandsAreSmiOrNull()) {
AddChecksForArgNr(call, left, /* arg_number = */ 0);
AddChecksForArgNr(call, right, /* arg_number = */ 1);
@ -599,16 +462,15 @@ bool CallSpecializer::TryReplaceWithEqualityOp(InstanceCallInstr* call,
bool CallSpecializer::TryReplaceWithRelationalOp(InstanceCallInstr* call,
Token::Kind op_kind) {
const ICData& ic_data = *call->ic_data();
ASSERT(ic_data.NumArgsTested() == 2);
ASSERT(call->type_args_len() == 0);
ASSERT(call->ArgumentCount() == 2);
const BinaryFeedback& binary_feedback = call->BinaryFeedback();
Definition* left = call->ArgumentAt(0);
Definition* right = call->ArgumentAt(1);
intptr_t cid = kIllegalCid;
if (HasOnlyTwoOf(ic_data, kSmiCid)) {
if (binary_feedback.OperandsAre(kSmiCid)) {
InsertBefore(call,
new (Z) CheckSmiInstr(new (Z) Value(left), call->deopt_id(),
call->token_pos()),
@ -618,15 +480,15 @@ bool CallSpecializer::TryReplaceWithRelationalOp(InstanceCallInstr* call,
call->token_pos()),
call->env(), FlowGraph::kEffect);
cid = kSmiCid;
} else if (HasTwoMintOrSmi(ic_data) &&
} else if (binary_feedback.OperandsAreSmiOrMint() &&
FlowGraphCompiler::SupportsUnboxedInt64()) {
cid = kMintCid;
} else if (HasTwoDoubleOrSmi(ic_data) && CanUnboxDouble()) {
} else if (binary_feedback.OperandsAreSmiOrDouble() && CanUnboxDouble()) {
// Use double comparison.
if (SmiFitsInDouble()) {
cid = kDoubleCid;
} else {
if (ICDataHasReceiverArgumentClassIds(ic_data, kSmiCid, kSmiCid)) {
if (binary_feedback.IncludesOperands(kSmiCid)) {
// We cannot use double comparison on two smis. Need polymorphic
// call.
return false;
@ -654,31 +516,33 @@ bool CallSpecializer::TryReplaceWithBinaryOp(InstanceCallInstr* call,
Token::Kind op_kind) {
intptr_t operands_type = kIllegalCid;
ASSERT(call->HasICData());
const ICData& ic_data = *call->ic_data();
const BinaryFeedback& binary_feedback = call->BinaryFeedback();
switch (op_kind) {
case Token::kADD:
case Token::kSUB:
case Token::kMUL:
if (HasOnlyTwoOf(ic_data, kSmiCid)) {
if (binary_feedback.OperandsAre(kSmiCid)) {
// Don't generate smi code if the IC data is marked because
// of an overflow.
operands_type = ic_data.HasDeoptReason(ICData::kDeoptBinarySmiOp)
? kMintCid
: kSmiCid;
} else if (HasTwoMintOrSmi(ic_data) &&
operands_type =
call->ic_data()->HasDeoptReason(ICData::kDeoptBinarySmiOp)
? kMintCid
: kSmiCid;
} else if (binary_feedback.OperandsAreSmiOrMint() &&
FlowGraphCompiler::SupportsUnboxedInt64()) {
// Don't generate mint code if the IC data is marked because of an
// overflow.
if (ic_data.HasDeoptReason(ICData::kDeoptBinaryInt64Op)) return false;
if (call->ic_data()->HasDeoptReason(ICData::kDeoptBinaryInt64Op))
return false;
operands_type = kMintCid;
} else if (ShouldSpecializeForDouble(ic_data)) {
} else if (ShouldSpecializeForDouble(binary_feedback)) {
operands_type = kDoubleCid;
} else if (HasOnlyTwoOf(ic_data, kFloat32x4Cid)) {
} else if (binary_feedback.OperandsAre(kFloat32x4Cid)) {
operands_type = kFloat32x4Cid;
} else if (HasOnlyTwoOf(ic_data, kInt32x4Cid)) {
} else if (binary_feedback.OperandsAre(kInt32x4Cid)) {
ASSERT(op_kind != Token::kMUL); // Int32x4 doesn't have a multiply op.
operands_type = kInt32x4Cid;
} else if (HasOnlyTwoOf(ic_data, kFloat64x2Cid)) {
} else if (binary_feedback.OperandsAre(kFloat64x2Cid)) {
operands_type = kFloat64x2Cid;
} else {
return false;
@ -686,12 +550,12 @@ bool CallSpecializer::TryReplaceWithBinaryOp(InstanceCallInstr* call,
break;
case Token::kDIV:
if (!FlowGraphCompiler::SupportsHardwareDivision()) return false;
if (ShouldSpecializeForDouble(ic_data) ||
HasOnlyTwoOf(ic_data, kSmiCid)) {
if (ShouldSpecializeForDouble(binary_feedback) ||
binary_feedback.OperandsAre(kSmiCid)) {
operands_type = kDoubleCid;
} else if (HasOnlyTwoOf(ic_data, kFloat32x4Cid)) {
} else if (binary_feedback.OperandsAre(kFloat32x4Cid)) {
operands_type = kFloat32x4Cid;
} else if (HasOnlyTwoOf(ic_data, kFloat64x2Cid)) {
} else if (binary_feedback.OperandsAre(kFloat64x2Cid)) {
operands_type = kFloat64x2Cid;
} else {
return false;
@ -700,11 +564,11 @@ bool CallSpecializer::TryReplaceWithBinaryOp(InstanceCallInstr* call,
case Token::kBIT_AND:
case Token::kBIT_OR:
case Token::kBIT_XOR:
if (HasOnlyTwoOf(ic_data, kSmiCid)) {
if (binary_feedback.OperandsAre(kSmiCid)) {
operands_type = kSmiCid;
} else if (HasTwoMintOrSmi(ic_data)) {
} else if (binary_feedback.OperandsAreSmiOrMint()) {
operands_type = kMintCid;
} else if (HasOnlyTwoOf(ic_data, kInt32x4Cid)) {
} else if (binary_feedback.OperandsAre(kInt32x4Cid)) {
operands_type = kInt32x4Cid;
} else {
return false;
@ -712,22 +576,22 @@ bool CallSpecializer::TryReplaceWithBinaryOp(InstanceCallInstr* call,
break;
case Token::kSHR:
case Token::kSHL:
if (HasOnlyTwoOf(ic_data, kSmiCid)) {
if (binary_feedback.OperandsAre(kSmiCid)) {
// Left shift may overflow from smi into mint or big ints.
// Don't generate smi code if the IC data is marked because
// of an overflow.
if (ic_data.HasDeoptReason(ICData::kDeoptBinaryInt64Op)) {
if (call->ic_data()->HasDeoptReason(ICData::kDeoptBinaryInt64Op)) {
return false;
}
operands_type = ic_data.HasDeoptReason(ICData::kDeoptBinarySmiOp)
? kMintCid
: kSmiCid;
} else if (HasTwoMintOrSmi(ic_data) &&
HasOnlyOneSmi(ICData::Handle(
Z, ic_data.AsUnaryClassChecksForArgNr(1)))) {
operands_type =
call->ic_data()->HasDeoptReason(ICData::kDeoptBinarySmiOp)
? kMintCid
: kSmiCid;
} else if (binary_feedback.OperandsAreSmiOrMint() &&
binary_feedback.ArgumentIs(kSmiCid)) {
// Don't generate mint code if the IC data is marked because of an
// overflow.
if (ic_data.HasDeoptReason(ICData::kDeoptBinaryInt64Op)) {
if (call->ic_data()->HasDeoptReason(ICData::kDeoptBinaryInt64Op)) {
return false;
}
// Check for smi/mint << smi or smi/mint >> smi.
@ -739,8 +603,8 @@ bool CallSpecializer::TryReplaceWithBinaryOp(InstanceCallInstr* call,
case Token::kMOD:
case Token::kTRUNCDIV:
if (!FlowGraphCompiler::SupportsHardwareDivision()) return false;
if (HasOnlyTwoOf(ic_data, kSmiCid)) {
if (ic_data.HasDeoptReason(ICData::kDeoptBinarySmiOp)) {
if (binary_feedback.OperandsAre(kSmiCid)) {
if (call->ic_data()->HasDeoptReason(ICData::kDeoptBinarySmiOp)) {
return false;
}
operands_type = kSmiCid;
@ -844,7 +708,7 @@ bool CallSpecializer::TryReplaceWithUnaryOp(InstanceCallInstr* call,
ASSERT(call->ArgumentCount() == 1);
Definition* input = call->ArgumentAt(0);
Definition* unary_op = NULL;
if (HasOnlyOneSmi(*call->ic_data())) {
if (call->Targets().ReceiverIs(kSmiCid)) {
InsertBefore(call,
new (Z) CheckSmiInstr(new (Z) Value(input), call->deopt_id(),
call->token_pos()),
@ -852,11 +716,11 @@ bool CallSpecializer::TryReplaceWithUnaryOp(InstanceCallInstr* call,
unary_op = new (Z)
UnarySmiOpInstr(op_kind, new (Z) Value(input), call->deopt_id());
} else if ((op_kind == Token::kBIT_NOT) &&
HasOnlySmiOrMint(*call->ic_data()) &&
call->Targets().ReceiverIsSmiOrMint() &&
FlowGraphCompiler::SupportsUnboxedInt64()) {
unary_op = new (Z)
UnaryInt64OpInstr(op_kind, new (Z) Value(input), call->deopt_id());
} else if (HasOnlyOneDouble(*call->ic_data()) &&
} else if (call->Targets().ReceiverIs(kDoubleCid) &&
(op_kind == Token::kNEGATE) && CanUnboxDouble()) {
AddReceiverCheck(call);
unary_op = new (Z) UnaryDoubleOpInstr(Token::kNEGATE, new (Z) Value(input),
@ -869,33 +733,16 @@ bool CallSpecializer::TryReplaceWithUnaryOp(InstanceCallInstr* call,
return true;
}
// Lookup field with the given name in the given class.
RawField* CallSpecializer::GetField(intptr_t class_id,
const String& field_name) {
Class& cls = Class::Handle(Z, isolate()->class_table()->At(class_id));
Field& field = Field::Handle(Z);
while (!cls.IsNull()) {
field = cls.LookupInstanceField(field_name);
if (!field.IsNull()) {
return should_clone_fields_ ? field.CloneFromOriginal() : field.raw();
}
cls = cls.SuperClass();
}
return Field::null();
}
bool CallSpecializer::TryInlineImplicitInstanceGetter(InstanceCallInstr* call) {
ASSERT(call->HasICData());
const ICData& ic_data = *call->ic_data();
ASSERT(ic_data.HasOneTarget());
GrowableArray<intptr_t> class_ids;
ic_data.GetClassIdsAt(0, &class_ids);
ASSERT(class_ids.length() == 1);
const CallTargets& targets = call->Targets();
ASSERT(targets.HasSingleTarget());
// Inline implicit instance getter.
const String& field_name =
String::Handle(Z, Field::NameFromGetter(call->function_name()));
const Field& field = Field::ZoneHandle(Z, GetField(class_ids[0], field_name));
Field& field = Field::ZoneHandle(Z, targets.FirstTarget().accessor_field());
ASSERT(!field.IsNull());
if (should_clone_fields_) {
field = field.CloneFromOriginal();
}
switch (
flow_graph()->CheckForInstanceCall(call, RawFunction::kImplicitGetter)) {
@ -935,35 +782,23 @@ void CallSpecializer::InlineImplicitInstanceGetter(Definition* call,
}
}
bool CallSpecializer::TryInlineInstanceSetter(InstanceCallInstr* instr,
const ICData& unary_ic_data) {
ASSERT(!unary_ic_data.NumberOfChecksIs(0) &&
(unary_ic_data.NumArgsTested() == 1));
ASSERT(instr->HasICData());
if (unary_ic_data.NumberOfChecksIs(0)) {
// No type feedback collected.
return false;
}
if (!unary_ic_data.HasOneTarget()) {
bool CallSpecializer::TryInlineInstanceSetter(InstanceCallInstr* instr) {
const CallTargets& targets = instr->Targets();
if (!targets.HasSingleTarget()) {
// Polymorphic sites are inlined like normal method calls by conventional
// inlining.
return false;
}
Function& target = Function::Handle(Z);
intptr_t class_id;
unary_ic_data.GetOneClassCheckAt(0, &class_id, &target);
const Function& target = targets.FirstTarget();
if (target.kind() != RawFunction::kImplicitSetter) {
// Non-implicit setter are inlined like normal method calls.
return false;
}
// Inline implicit instance setter.
String& field_name = String::Handle(Z, instr->function_name().raw());
if (Function::IsDynamicInvocationForwarderName(field_name)) {
field_name = Function::DemangleDynamicInvocationForwarderName(field_name);
}
field_name = Field::NameFromSetter(field_name);
const Field& field = Field::ZoneHandle(Z, GetField(class_id, field_name));
Field& field = Field::ZoneHandle(Z, target.accessor_field());
ASSERT(!field.IsNull());
if (should_clone_fields_) {
field = field.CloneFromOriginal();
}
switch (
flow_graph()->CheckForInstanceCall(instr, RawFunction::kImplicitSetter)) {
@ -984,10 +819,10 @@ bool CallSpecializer::TryInlineInstanceSetter(InstanceCallInstr* instr,
// True if we can use unchecked entry into the setter.
bool is_unchecked_call = false;
if (!FLAG_precompiled_mode) {
if (unary_ic_data.NumberOfChecks() == 1 &&
unary_ic_data.GetExactnessAt(0).IsExact()) {
if (unary_ic_data.GetExactnessAt(0).IsTriviallyExact()) {
flow_graph()->AddExactnessGuard(instr, unary_ic_data.GetCidAt(0));
if (targets.IsMonomorphic() && targets.MonomorphicExactness().IsExact()) {
if (targets.MonomorphicExactness().IsTriviallyExact()) {
flow_graph()->AddExactnessGuard(instr,
targets.MonomorphicReceiverCid());
}
is_unchecked_call = true;
}
@ -1109,20 +944,13 @@ bool CallSpecializer::InlineSimdBinaryOp(InstanceCallInstr* call,
// Only unique implicit instance getters can be currently handled.
bool CallSpecializer::TryInlineInstanceGetter(InstanceCallInstr* call) {
ASSERT(call->HasICData());
const ICData& ic_data = *call->ic_data();
if (ic_data.NumberOfUsedChecks() == 0) {
// No type feedback collected.
return false;
}
if (!ic_data.HasOneTarget()) {
const CallTargets& targets = call->Targets();
if (!targets.HasSingleTarget()) {
// Polymorphic sites are inlined like normal methods by conventional
// inlining in FlowGraphInliner.
return false;
}
const Function& target = Function::Handle(Z, ic_data.GetTargetAt(0));
const Function& target = targets.FirstTarget();
if (target.kind() != RawFunction::kImplicitGetter) {
// Non-implicit getters are inlined like normal methods by conventional
// inlining in FlowGraphInliner.
@ -1148,28 +976,26 @@ void CallSpecializer::ReplaceWithMathCFunction(
// Inline only simple, frequently called core library methods.
bool CallSpecializer::TryInlineInstanceMethod(InstanceCallInstr* call) {
ASSERT(call->HasICData());
const ICData& ic_data = *call->ic_data();
if (ic_data.NumberOfUsedChecks() != 1) {
const CallTargets& targets = call->Targets();
if (!targets.IsMonomorphic()) {
// No type feedback collected or multiple receivers/targets found.
return false;
}
Function& target = Function::Handle(Z);
GrowableArray<intptr_t> class_ids;
ic_data.GetCheckAt(0, &class_ids, &target);
const Function& target = targets.FirstTarget();
intptr_t receiver_cid = targets.MonomorphicReceiverCid();
MethodRecognizer::Kind recognized_kind =
MethodRecognizer::RecognizeKind(target);
if (CanUnboxDouble() &&
(recognized_kind == MethodRecognizer::kIntegerToDouble)) {
if (class_ids[0] == kSmiCid) {
if (receiver_cid == kSmiCid) {
AddReceiverCheck(call);
ReplaceCall(call,
new (Z) SmiToDoubleInstr(new (Z) Value(call->ArgumentAt(0)),
call->token_pos()));
return true;
} else if ((class_ids[0] == kMintCid) && CanConvertInt64ToDouble()) {
} else if ((receiver_cid == kMintCid) && CanConvertInt64ToDouble()) {
AddReceiverCheck(call);
ReplaceCall(call,
new (Z) Int64ToDoubleInstr(new (Z) Value(call->ArgumentAt(0)),
@ -1178,7 +1004,7 @@ bool CallSpecializer::TryInlineInstanceMethod(InstanceCallInstr* call) {
}
}
if (class_ids[0] == kDoubleCid) {
if (receiver_cid == kDoubleCid) {
if (!CanUnboxDouble()) {
return false;
}
@ -1480,22 +1306,20 @@ void CallSpecializer::VisitStaticCall(StaticCallInstr* call) {
MethodRecognizer::Kind recognized_kind =
MethodRecognizer::RecognizeKind(call->function());
const CallTargets& targets = call->Targets();
const BinaryFeedback& binary_feedback = call->BinaryFeedback();
switch (recognized_kind) {
case MethodRecognizer::kMathMin:
case MethodRecognizer::kMathMax: {
// We can handle only monomorphic min/max call sites with both arguments
// being either doubles or smis.
if (CanUnboxDouble() && call->HasICData() &&
call->ic_data()->NumberOfChecksIs(1) &&
if (CanUnboxDouble() && targets.IsMonomorphic() &&
(call->FirstArgIndex() == 0)) {
const ICData& ic_data = *call->ic_data();
intptr_t result_cid = kIllegalCid;
if (ICDataHasReceiverArgumentClassIds(ic_data, kDoubleCid,
kDoubleCid)) {
if (binary_feedback.IncludesOperands(kDoubleCid)) {
result_cid = kDoubleCid;
} else if (ICDataHasReceiverArgumentClassIds(ic_data, kSmiCid,
kSmiCid)) {
} else if (binary_feedback.IncludesOperands(kSmiCid)) {
result_cid = kSmiCid;
}
if (result_cid != kIllegalCid) {
@ -1503,8 +1327,7 @@ void CallSpecializer::VisitStaticCall(StaticCallInstr* call) {
recognized_kind, new (Z) Value(call->ArgumentAt(0)),
new (Z) Value(call->ArgumentAt(1)), call->deopt_id(),
result_cid);
const Cids* cids =
Cids::CreateAndExpand(Z, ic_data, /* argument_number =*/0);
const Cids* cids = Cids::CreateMonomorphic(Z, result_cid);
AddCheckClass(min_max->left()->definition(), *cids,
call->deopt_id(), call->env(), call);
AddCheckClass(min_max->right()->definition(), *cids,
@ -1516,17 +1339,16 @@ void CallSpecializer::VisitStaticCall(StaticCallInstr* call) {
break;
}
case MethodRecognizer::kDoubleFromInteger: {
if (call->HasICData() && call->ic_data()->NumberOfChecksIs(1) &&
if (call->HasICData() && targets.IsMonomorphic() &&
(call->FirstArgIndex() == 0)) {
const ICData& ic_data = *call->ic_data();
if (CanUnboxDouble()) {
if (ArgIsAlways(kSmiCid, ic_data, 1)) {
if (binary_feedback.ArgumentIs(kSmiCid)) {
Definition* arg = call->ArgumentAt(1);
AddCheckSmi(arg, call->deopt_id(), call->env(), call);
ReplaceCall(call, new (Z) SmiToDoubleInstr(new (Z) Value(arg),
call->token_pos()));
return;
} else if (ArgIsAlways(kMintCid, ic_data, 1) &&
} else if (binary_feedback.ArgumentIs(kMintCid) &&
CanConvertInt64ToDouble()) {
Definition* arg = call->ArgumentAt(1);
ReplaceCall(call, new (Z) Int64ToDoubleInstr(new (Z) Value(arg),

View file

@ -67,8 +67,7 @@ class CallSpecializer : public FlowGraphVisitor {
Zone* zone() const { return flow_graph_->zone(); }
const Function& function() const { return flow_graph_->function(); }
bool TryReplaceWithIndexedOp(InstanceCallInstr* call,
const ICData* unary_checks);
bool TryReplaceWithIndexedOp(InstanceCallInstr* call);
bool TryReplaceWithBinaryOp(InstanceCallInstr* call, Token::Kind op_kind);
bool TryReplaceWithUnaryOp(InstanceCallInstr* call, Token::Kind op_kind);
@ -77,8 +76,7 @@ class CallSpecializer : public FlowGraphVisitor {
bool TryReplaceWithRelationalOp(InstanceCallInstr* call, Token::Kind op_kind);
bool TryInlineInstanceGetter(InstanceCallInstr* call);
bool TryInlineInstanceSetter(InstanceCallInstr* call,
const ICData& unary_ic_data);
bool TryInlineInstanceSetter(InstanceCallInstr* call);
bool TryInlineInstanceMethod(InstanceCallInstr* call);
void ReplaceWithInstanceOf(InstanceCallInstr* instr);
@ -93,8 +91,8 @@ class CallSpecializer : public FlowGraphVisitor {
// Add a class check for the call's first argument (receiver).
void AddReceiverCheck(InstanceCallInstr* call) {
AddChecksForArgNr(call, call->Receiver()->definition(),
/* argument_number = */ 0);
AddCheckClass(call->Receiver()->definition(), call->Targets(),
call->deopt_id(), call->env(), call);
}
// Insert a null check if needed.
@ -107,8 +105,6 @@ class CallSpecializer : public FlowGraphVisitor {
// Attempt to build ICData for call using propagated class-ids.
virtual bool TryCreateICData(InstanceCallInstr* call);
static bool HasOnlyTwoOf(const ICData& ic_data, intptr_t cid);
virtual bool TryReplaceInstanceOfWithRangeCheck(InstanceCallInstr* call,
const AbstractType& type);
@ -143,7 +139,7 @@ class CallSpecializer : public FlowGraphVisitor {
// call, using the call's IC data to determine the check, and the call's
// deopt ID and deoptimization environment if the check fails.
void AddChecksForArgNr(InstanceCallInstr* call,
Definition* instr,
Definition* argument,
int argument_number);
bool InlineSimdBinaryOp(InstanceCallInstr* call,
@ -164,8 +160,6 @@ class CallSpecializer : public FlowGraphVisitor {
bool TryStringLengthOneEquality(InstanceCallInstr* call, Token::Kind op_kind);
RawField* GetField(intptr_t class_id, const String& field_name);
void SpecializePolymorphicInstanceCall(PolymorphicInstanceCallInstr* call);
// Tries to add cid tests to 'results' so that no deoptimization is

View file

@ -115,15 +115,6 @@ class CompilerState : public ThreadStackResource {
// TODO(vegorov): disambiguate slots for different context IDs.
LocalVariable* GetDummyCapturedVariable(intptr_t context_id, intptr_t index);
ZoneGrowableArray<const MegamorphicCache*>& cloned_megamorphic_caches() {
if (cloned_megamorphic_caches_ == nullptr) {
Zone* Z = Thread::Current()->zone();
cloned_megamorphic_caches_ =
new (Z) ZoneGrowableArray<const MegamorphicCache*>(Z, 12);
}
return *cloned_megamorphic_caches_;
}
private:
CHA cha_;
intptr_t deopt_id_ = 0;
@ -136,9 +127,6 @@ class CompilerState : public ThreadStackResource {
ZoneGrowableArray<ZoneGrowableArray<const Slot*>*>* dummy_slots_ = nullptr;
ZoneGrowableArray<LocalVariable*>* dummy_captured_vars_ = nullptr;
ZoneGrowableArray<const MegamorphicCache*>* cloned_megamorphic_caches_ =
nullptr;
CompilerState* previous_;
};

View file

@ -51,15 +51,14 @@ bool JitCallSpecializer::TryOptimizeStaticCallUsingStaticTypes(
}
void JitCallSpecializer::ReplaceWithStaticCall(InstanceCallInstr* instr,
const ICData& unary_checks,
const Function& target,
intptr_t call_count) {
StaticCallInstr* call =
StaticCallInstr::FromCall(Z, instr, target, call_count);
if (unary_checks.NumberOfChecks() == 1 &&
unary_checks.GetExactnessAt(0).IsExact()) {
if (unary_checks.GetExactnessAt(0).IsTriviallyExact()) {
flow_graph()->AddExactnessGuard(instr, unary_checks.GetCidAt(0));
const CallTargets& targets = instr->Targets();
if (targets.IsMonomorphic() && targets.MonomorphicExactness().IsExact()) {
if (targets.MonomorphicExactness().IsTriviallyExact()) {
flow_graph()->AddExactnessGuard(instr, targets.MonomorphicReceiverCid());
}
call->set_entry_kind(Code::EntryKind::kUnchecked);
}
@ -71,9 +70,11 @@ void JitCallSpecializer::ReplaceWithStaticCall(InstanceCallInstr* instr,
// TODO(dartbug.com/30635) Evaluate how much this can be shared with
// AotCallSpecializer.
void JitCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
if (!instr->HasICData() || (instr->ic_data()->NumberOfUsedChecks() == 0)) {
return;
const CallTargets& targets = instr->Targets();
if (targets.is_empty()) {
return; // No feedback.
}
const Token::Kind op_kind = instr->token_kind();
// Type test is special as it always gets converted into inlined code.
@ -82,15 +83,10 @@ void JitCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
return;
}
const ICData& unary_checks =
ICData::ZoneHandle(Z, instr->ic_data()->AsUnaryClassChecks());
if ((op_kind == Token::kASSIGN_INDEX) &&
TryReplaceWithIndexedOp(instr, &unary_checks)) {
if ((op_kind == Token::kASSIGN_INDEX) && TryReplaceWithIndexedOp(instr)) {
return;
}
if ((op_kind == Token::kINDEX) &&
TryReplaceWithIndexedOp(instr, &unary_checks)) {
if ((op_kind == Token::kINDEX) && TryReplaceWithIndexedOp(instr)) {
return;
}
@ -114,22 +110,18 @@ void JitCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
if ((op_kind == Token::kGET) && TryInlineInstanceGetter(instr)) {
return;
}
if ((op_kind == Token::kSET) &&
TryInlineInstanceSetter(instr, unary_checks)) {
if ((op_kind == Token::kSET) && TryInlineInstanceSetter(instr)) {
return;
}
if (TryInlineInstanceMethod(instr)) {
return;
}
const CallTargets& targets = *CallTargets::CreateAndExpand(Z, unary_checks);
bool has_one_target = targets.HasSingleTarget();
if (has_one_target) {
// Check if the single target is a polymorphic target, if it is,
// we don't have one target.
const Function& target = Function::Handle(Z, unary_checks.GetTargetAt(0));
const Function& target = targets.FirstTarget();
if (target.recognized_kind() == MethodRecognizer::kObjectRuntimeType) {
has_one_target = PolymorphicInstanceCallInstr::ComputeRuntimeType(
targets) != Type::null();
@ -141,12 +133,10 @@ void JitCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
}
if (has_one_target) {
const Function& target =
Function::ZoneHandle(Z, unary_checks.GetTargetAt(0));
const Function& target = targets.FirstTarget();
if (flow_graph()->CheckForInstanceCall(instr, target.kind()) ==
FlowGraph::ToCheck::kNoCheck) {
ReplaceWithStaticCall(instr, unary_checks, target,
targets.AggregateCallCount());
ReplaceWithStaticCall(instr, target, targets.AggregateCallCount());
return;
}
}
@ -163,30 +153,13 @@ void JitCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
// non-deopting megamorphic call stub when it sees new receiver classes.
if (has_one_target && FLAG_polymorphic_with_deopt &&
(!instr->ic_data()->HasDeoptReason(ICData::kDeoptCheckClass) ||
unary_checks.NumberOfChecks() <= FLAG_max_polymorphic_checks)) {
targets.length() <= FLAG_max_polymorphic_checks)) {
// Type propagation has not run yet, we cannot eliminate the check.
// TODO(erikcorry): The receiver check should use the off-heap targets
// array, not the IC array.
// After we determined `targets.HasSingleTarget()` the mutator might have
// updated the megamorphic cache by adding more entries with *different*
// targets.
//
// We therefore have to ensure the class check we insert is only valid for
// precisely the [targets] classes we have based our decision upon.
//
// (i.e. we cannot use [AddReceiverCheck]/[AddCheckClass], since it
// internally consults megmorphic cache again, which can return a superset
// of the classes - possibly with different targets)
//
AddCheckClass(instr->Receiver()->definition(), targets, instr->deopt_id(),
instr->env(), instr);
AddReceiverCheck(instr);
// Call can still deoptimize, do not detach environment from instr.
const Function& target =
Function::ZoneHandle(Z, unary_checks.GetTargetAt(0));
ReplaceWithStaticCall(instr, unary_checks, target,
targets.AggregateCallCount());
const Function& target = targets.FirstTarget();
ReplaceWithStaticCall(instr, target, targets.AggregateCallCount());
} else {
PolymorphicInstanceCallInstr* call =
new (Z) PolymorphicInstanceCallInstr(instr, targets,

View file

@ -36,7 +36,6 @@ class JitCallSpecializer : public CallSpecializer {
Value* context_value);
void ReplaceWithStaticCall(InstanceCallInstr* instr,
const ICData& unary_checks,
const Function& target,
intptr_t call_count);

View file

@ -13,10 +13,9 @@
namespace dart {
RawMegamorphicCache* MegamorphicCacheTable::LookupOriginal(
Thread* thread,
const String& name,
const Array& descriptor) {
RawMegamorphicCache* MegamorphicCacheTable::Lookup(Thread* thread,
const String& name,
const Array& descriptor) {
Isolate* isolate = thread->isolate();
// Multiple compilation threads could access this lookup.
SafepointMutexLocker ml(isolate->megamorphic_mutex());
@ -45,31 +44,6 @@ RawMegamorphicCache* MegamorphicCacheTable::LookupOriginal(
return cache.raw();
}
RawMegamorphicCache* MegamorphicCacheTable::LookupClone(
Thread* thread,
const String& name,
const Array& descriptor) {
if (!Compiler::IsBackgroundCompilation()) {
return LookupOriginal(thread, name, descriptor);
}
auto& cloned_caches = thread->compiler_state().cloned_megamorphic_caches();
for (intptr_t i = 0; i < cloned_caches.length(); i++) {
const MegamorphicCache& cache = *cloned_caches[i];
if ((cache.target_name() == name.raw()) &&
(cache.arguments_descriptor() == descriptor.raw())) {
return cache.raw();
}
}
const auto& original =
MegamorphicCache::Handle(LookupOriginal(thread, name, descriptor));
const auto& clone =
MegamorphicCache::ZoneHandle(MegamorphicCache::Clone(original));
cloned_caches.Add(&clone);
return clone.raw();
}
RawFunction* MegamorphicCacheTable::miss_handler(Isolate* isolate) {
ASSERT(isolate->object_store()->megamorphic_miss_function() !=
Function::null());

View file

@ -39,16 +39,9 @@ class MegamorphicCacheTable : public AllStatic {
static void ReInitMissHandlerCode(Isolate* isolate,
compiler::ObjectPoolBuilder* wrapper));
// Lookup a cache for querying type feedback. The result may not be mutated by
// another thread.
static RawMegamorphicCache* LookupClone(Thread* thread,
const String& name,
const Array& descriptor);
// Lookup a cache for insertion into compiled code. The result may be mutated
// by the another thread.
static RawMegamorphicCache* LookupOriginal(Thread* thread,
const String& name,
const Array& descriptor);
static RawMegamorphicCache* Lookup(Thread* thread,
const String& name,
const Array& descriptor);
static void PrintSizes(Isolate* isolate);
};

View file

@ -14388,59 +14388,6 @@ bool ICData::HasReceiverClassId(intptr_t class_id) const {
}
return false;
}
// Returns true if all targets are the same.
// TODO(srdjan): if targets are native use their C_function to compare.
// TODO(rmacnak): this question should only be asked against a CallTargets,
// not an ICData.
bool ICData::HasOneTarget() const {
ASSERT(!NumberOfChecksIs(0));
const Function& first_target = Function::Handle(GetTargetAt(0));
const intptr_t len = NumberOfChecks();
for (intptr_t i = 1; i < len; i++) {
if (IsUsedAt(i) && (GetTargetAt(i) != first_target.raw())) {
return false;
}
}
if (is_megamorphic()) {
Thread* thread = Thread::Current();
Zone* zone = thread->zone();
const String& name = String::Handle(zone, target_name());
const Array& descriptor = Array::Handle(zone, arguments_descriptor());
const MegamorphicCache& cache = MegamorphicCache::Handle(
zone, MegamorphicCacheTable::LookupClone(thread, name, descriptor));
MegamorphicCacheEntries entries(Array::Handle(cache.buckets()));
for (intptr_t i = 0; i < entries.Length(); i++) {
const intptr_t id =
Smi::Value(entries[i].Get<MegamorphicCache::kClassIdIndex>());
if (id == kIllegalCid) {
continue;
}
if (entries[i].Get<MegamorphicCache::kTargetFunctionIndex>() !=
first_target.raw()) {
return false;
}
}
}
return true;
}
void ICData::GetUsedCidsForTwoArgs(GrowableArray<intptr_t>* first,
GrowableArray<intptr_t>* second) const {
ASSERT(NumArgsTested() == 2);
first->Clear();
second->Clear();
GrowableArray<intptr_t> class_ids;
const intptr_t len = NumberOfChecks();
for (intptr_t i = 0; i < len; i++) {
if (GetCountAt(i) > 0) {
GetClassIdsAt(i, &class_ids);
ASSERT(class_ids.length() == 2);
first->Add(class_ids[0]);
second->Add(class_ids[1]);
}
}
}
#endif
bool ICData::IsUsedAt(intptr_t i) const {
@ -16044,37 +15991,6 @@ void MegamorphicCache::SwitchToBareInstructions() {
}
}
RawMegamorphicCache* MegamorphicCache::Clone(const MegamorphicCache& from) {
Thread* thread = Thread::Current();
Zone* zone = thread->zone();
MegamorphicCache& result = MegamorphicCache::Handle(zone);
{
RawObject* raw =
Object::Allocate(MegamorphicCache::kClassId,
MegamorphicCache::InstanceSize(), Heap::kNew);
NoSafepointScope no_safepoint;
result ^= raw;
}
SafepointMutexLocker ml(thread->isolate()->megamorphic_mutex());
const Array& from_buckets = Array::Handle(zone, from.buckets());
const intptr_t len = from_buckets.Length();
const Array& cloned_buckets =
Array::Handle(zone, Array::New(len, Heap::kNew));
Object& obj = Object::Handle(zone);
for (intptr_t i = 0; i < len; i++) {
obj = from_buckets.At(i);
cloned_buckets.SetAt(i, obj);
}
result.set_buckets(cloned_buckets);
result.set_mask(from.mask());
result.set_target_name(String::Handle(zone, from.target_name()));
result.set_arguments_descriptor(
Array::Handle(zone, from.arguments_descriptor()));
result.set_filled_entry_count(from.filled_entry_count());
return result.raw();
}
void SubtypeTestCache::Init() {
cached_array_ = Array::New(kTestEntryLength, Heap::kOld);
}

View file

@ -1802,18 +1802,6 @@ class ICData : public Object {
RebindRule rebind_rule() const;
void set_rebind_rule(uint32_t rebind_rule) const;
// This bit is set when a call site becomes megamorphic and starts using a
// MegamorphicCache instead of ICData. It means that the entries in the
// ICData are incomplete and the MegamorphicCache needs to also be consulted
// to list the call site's observed receiver classes and targets.
bool is_megamorphic() const {
// Ensure any following load instructions do not get performed before this
// one.
const uint32_t bits = LoadNonPointer<uint32_t, MemoryOrder::kAcquire>(
&raw_ptr()->state_bits_);
return MegamorphicBit::decode(bits);
}
void set_is_megamorphic(bool value) const {
// We don't have concurrent RW access to [state_bits_].
const uint32_t updated_bits =
@ -1956,8 +1944,6 @@ class ICData : public Object {
RawUnlinkedCall* AsUnlinkedCall() const;
// Consider only used entries.
bool HasOneTarget() const;
bool HasReceiverClassId(intptr_t class_id) const;
// Note: passing non-null receiver_type enables exactness tracking for
@ -1990,9 +1976,6 @@ class ICData : public Object {
bool IsUsedAt(intptr_t i) const;
void GetUsedCidsForTwoArgs(GrowableArray<intptr_t>* first,
GrowableArray<intptr_t>* second) const;
void PrintToJSONArray(const JSONArray& jsarray,
TokenPosition token_pos) const;
@ -2036,6 +2019,20 @@ class ICData : public Object {
void set_entries(const Array& value) const;
void set_state_bits(uint32_t bits) const;
// This bit is set when a call site becomes megamorphic and starts using a
// MegamorphicCache instead of ICData. It means that the entries in the
// ICData are incomplete and the MegamorphicCache needs to also be consulted
// to list the call site's observed receiver classes and targets.
// In the compiler, this should only be read once by CallTargets to avoid the
// compiler seeing an unstable set of feedback.
bool is_megamorphic() const {
// Ensure any following load instructions do not get performed before this
// one.
const uint32_t bits = LoadNonPointer<uint32_t, MemoryOrder::kAcquire>(
&raw_ptr()->state_bits_);
return MegamorphicBit::decode(bits);
}
bool ValidateInterceptor(const Function& target) const;
enum {
@ -2097,13 +2094,14 @@ class ICData : public Object {
static RawArray* cached_icdata_arrays_[kCachedICDataArrayCount];
FINAL_HEAP_OBJECT_IMPLEMENTATION(ICData, Object);
friend class CallSiteResetter;
friend class CallTargets;
friend class Class;
friend class Deserializer;
friend class ICDataTestTask;
friend class Interpreter;
friend class SnapshotWriter;
friend class Serializer;
friend class Deserializer;
friend class CallSiteResetter;
friend class SnapshotWriter;
};
// Often used constants for number of free function type parameters.

View file

@ -1168,7 +1168,7 @@ static void TrySwitchInstanceCall(const ICData& ic_data,
const Array& descriptor =
Array::Handle(zone, ic_data.arguments_descriptor());
const MegamorphicCache& cache = MegamorphicCache::Handle(
zone, MegamorphicCacheTable::LookupOriginal(thread, name, descriptor));
zone, MegamorphicCacheTable::Lookup(thread, name, descriptor));
ic_data.set_is_megamorphic(true);
CodePatcher::PatchInstanceCallAt(caller_frame->pc(), caller_code, cache,
StubCode::MegamorphicCall());
@ -1795,8 +1795,7 @@ DEFINE_RUNTIME_ENTRY(MegamorphicCacheMissHandler, 3) {
if (number_of_checks > FLAG_max_polymorphic_checks) {
// Switch to megamorphic call.
const MegamorphicCache& cache = MegamorphicCache::Handle(
zone,
MegamorphicCacheTable::LookupOriginal(thread, name, descriptor));
zone, MegamorphicCacheTable::Lookup(thread, name, descriptor));
DartFrameIterator iterator(thread,
StackFrameIterator::kNoCrossThreadIteration);
StackFrame* miss_function_frame = iterator.NextFrame();