dart-sdk/runtime/vm/instructions_arm.cc

414 lines
14 KiB
C++
Raw Normal View History

// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
#if defined(TARGET_ARCH_ARM)
#include "vm/instructions.h"
#include "vm/instructions_arm.h"
#include "vm/constants.h"
#include "vm/cpu.h"
#include "vm/object.h"
#include "vm/object_store.h"
#include "vm/reverse_pc_lookup_cache.h"
namespace dart {
CallPattern::CallPattern(uword pc, const Code& code)
: object_pool_(ObjectPool::Handle(code.GetObjectPool())),
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
target_code_pool_index_(-1) {
ASSERT(code.ContainsInstructionAt(pc));
// Last instruction: blx lr.
ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xe12fff3e);
Register reg;
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
InstructionPattern::DecodeLoadWordFromPool(pc - 2 * Instr::kInstrSize, &reg,
&target_code_pool_index_);
ASSERT(reg == CODE_REG);
}
ICCallPattern::ICCallPattern(uword pc, const Code& code)
: object_pool_(ObjectPool::Handle(code.GetObjectPool())),
target_pool_index_(-1),
data_pool_index_(-1) {
ASSERT(code.ContainsInstructionAt(pc));
// Last instruction: blx lr.
ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xe12fff3e);
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
Register reg;
uword data_load_end = InstructionPattern::DecodeLoadWordFromPool(
pc - 2 * Instr::kInstrSize, &reg, &target_pool_index_);
ASSERT(reg == CODE_REG);
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
InstructionPattern::DecodeLoadWordFromPool(data_load_end, &reg,
&data_pool_index_);
ASSERT(reg == R9);
}
NativeCallPattern::NativeCallPattern(uword pc, const Code& code)
: object_pool_(ObjectPool::Handle(code.GetObjectPool())),
end_(pc),
native_function_pool_index_(-1),
target_code_pool_index_(-1) {
ASSERT(code.ContainsInstructionAt(pc));
// Last instruction: blx lr.
ASSERT(*(reinterpret_cast<uint32_t*>(end_) - 1) == 0xe12fff3e);
Register reg;
uword native_function_load_end = InstructionPattern::DecodeLoadWordFromPool(
end_ - 2 * Instr::kInstrSize, &reg, &target_code_pool_index_);
ASSERT(reg == CODE_REG);
InstructionPattern::DecodeLoadWordFromPool(native_function_load_end, &reg,
&native_function_pool_index_);
ASSERT(reg == R9);
}
CodePtr NativeCallPattern::target() const {
return static_cast<CodePtr>(object_pool_.ObjectAt(target_code_pool_index_));
}
void NativeCallPattern::set_target(const Code& new_target) const {
object_pool_.SetObjectAt(target_code_pool_index_, new_target);
// No need to flush the instruction cache, since the code is not modified.
}
NativeFunction NativeCallPattern::native_function() const {
return reinterpret_cast<NativeFunction>(
object_pool_.RawValueAt(native_function_pool_index_));
}
void NativeCallPattern::set_native_function(NativeFunction func) const {
object_pool_.SetRawValueAt(native_function_pool_index_,
reinterpret_cast<uword>(func));
}
// Decodes a load sequence ending at 'end' (the last instruction of the load
// sequence is the instruction before the one at end). Returns a pointer to
// the first instruction in the sequence. Returns the register being loaded
// and the loaded immediate value in the output parameters 'reg' and 'value'
// respectively.
uword InstructionPattern::DecodeLoadWordImmediate(uword end,
Register* reg,
intptr_t* value) {
uword start = end - Instr::kInstrSize;
int32_t instr = Instr::At(start)->InstructionBits();
intptr_t imm = 0;
if ((instr & 0xfff00000) == 0xe3400000) { // movt reg, #imm_hi
imm |= (instr & 0xf0000) << 12;
imm |= (instr & 0xfff) << 16;
start -= Instr::kInstrSize;
instr = Instr::At(start)->InstructionBits();
}
ASSERT((instr & 0xfff00000) == 0xe3000000); // movw reg, #imm_lo
imm |= (instr & 0xf0000) >> 4;
imm |= instr & 0xfff;
*reg = static_cast<Register>((instr & 0xf000) >> 12);
*value = imm;
return start;
}
void InstructionPattern::EncodeLoadWordImmediate(uword end,
Register reg,
intptr_t value) {
uint16_t low16 = value & 0xffff;
uint16_t high16 = (value >> 16) & 0xffff;
// movw reg, #imm_lo
uint32_t movw_instr = 0xe3000000;
movw_instr |= (low16 >> 12) << 16;
movw_instr |= (reg << 12);
movw_instr |= (low16 & 0xfff);
// movt reg, #imm_hi
uint32_t movt_instr = 0xe3400000;
movt_instr |= (high16 >> 12) << 16;
movt_instr |= (reg << 12);
movt_instr |= (high16 & 0xfff);
uint32_t* cursor = reinterpret_cast<uint32_t*>(end);
*(--cursor) = movt_instr;
*(--cursor) = movw_instr;
#if defined(DEBUG)
Register decoded_reg;
intptr_t decoded_value;
DecodeLoadWordImmediate(end, &decoded_reg, &decoded_value);
ASSERT(reg == decoded_reg);
ASSERT(value == decoded_value);
#endif
}
static bool IsLoadWithOffset(int32_t instr,
Register base,
intptr_t* offset,
Register* dst) {
if ((instr & 0xffff0000) == (0xe5900000 | (base << 16))) {
// ldr reg, [base, #+offset]
*offset = instr & 0xfff;
*dst = static_cast<Register>((instr & 0xf000) >> 12);
return true;
}
return false;
}
// Decodes a load sequence ending at 'end' (the last instruction of the load
// sequence is the instruction before the one at end). Returns a pointer to
// the first instruction in the sequence. Returns the register being loaded
// and the index in the pool being read from in the output parameters 'reg'
// and 'index' respectively.
uword InstructionPattern::DecodeLoadWordFromPool(uword end,
Register* reg,
intptr_t* index) {
uword start = end - Instr::kInstrSize;
int32_t instr = Instr::At(start)->InstructionBits();
intptr_t offset = 0;
if (IsLoadWithOffset(instr, PP, &offset, reg)) {
// ldr reg, [PP, #+offset]
} else {
ASSERT((instr & 0xfff00000) == 0xe5900000); // ldr reg, [reg, #+offset]
offset = instr & 0xfff;
start -= Instr::kInstrSize;
instr = Instr::At(start)->InstructionBits();
if ((instr & 0xffff0000) == (0xe2850000 | (PP << 16))) {
// add reg, pp, operand
const intptr_t rot = (instr & 0xf00) >> 7;
const intptr_t imm8 = instr & 0xff;
offset += (imm8 >> rot) | (imm8 << (32 - rot));
*reg = static_cast<Register>((instr & 0xf000) >> 12);
} else {
ASSERT((instr & 0xffff0000) == (0xe0800000 | (PP << 16)));
// add reg, pp, reg
intptr_t value = 0;
start = DecodeLoadWordImmediate(start, reg, &value);
offset += value;
}
}
*index = ObjectPool::IndexFromOffset(offset);
return start;
}
bool DecodeLoadObjectFromPoolOrThread(uword pc, const Code& code, Object* obj) {
ASSERT(code.ContainsInstructionAt(pc));
int32_t instr = Instr::At(pc)->InstructionBits();
intptr_t offset;
Register dst;
if (IsLoadWithOffset(instr, PP, &offset, &dst)) {
intptr_t index = ObjectPool::IndexFromOffset(offset);
return ObjectAtPoolIndex(code, index, obj);
} else if (IsLoadWithOffset(instr, THR, &offset, &dst)) {
return Thread::ObjectAtOffset(offset, obj);
}
// TODO(rmacnak): Sequence for loads beyond 12 bits.
return false;
}
CodePtr CallPattern::TargetCode() const {
return static_cast<CodePtr>(object_pool_.ObjectAt(target_code_pool_index_));
}
void CallPattern::SetTargetCode(const Code& target_code) const {
object_pool_.SetObjectAt(target_code_pool_index_, target_code);
}
ObjectPtr ICCallPattern::Data() const {
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
return object_pool_.ObjectAt(data_pool_index_);
}
void ICCallPattern::SetData(const Object& data) const {
ASSERT(data.IsArray() || data.IsICData() || data.IsMegamorphicCache());
object_pool_.SetObjectAt(data_pool_index_, data);
}
CodePtr ICCallPattern::TargetCode() const {
return static_cast<CodePtr>(object_pool_.ObjectAt(target_pool_index_));
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
}
void ICCallPattern::SetTargetCode(const Code& target_code) const {
object_pool_.SetObjectAt(target_pool_index_, target_code);
}
Reland "[vm/aot] Avoid using most Code objects in stack traces with --dwarf-stack-traces" This is a reland of b6dc4dad4dae73fe5f8500653de61a76fa5d6ce6 TEST=ci Original change's description: > [vm/aot] Avoid using most Code objects in stack traces with --dwarf-stack-traces > > The following changes are done in preparation for the removal of Code > objects in AOT with --dwarf-stack-traces: > > * Stack trace objects are extended to hold uword PCs (which may not > fit into Smi range). > > * Scanning stack frames in GC (StackFrame::VisitObjectPointers) > now avoids using Code objects. > In order to find CompressedStackMaps it now calls > ReversePc::FindCompressedStackMaps. > > * Singleton Code object (StubCode::UnknownDartCode()) is prepared as > a replacement for Code objects in stack traces. It has > PayloadStart() == 0 and Size() == kUwordMax so it includes > arbitrary PCs. > > * In --dwarf-stack-traces mode, most Code objects obtained from stack > frames are replaced with StubCode::UnknownDartCode(). > This simulates future behavior of ReversePc::Lookup when Code objects > will be removed. > > Issue: https://github.com/dart-lang/sdk/issues/44852 > Change-Id: I7cec7b8b9396c9cfeca3c256a412ba4e82a7e0c4 > TEST=ci > Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/182720 > Commit-Queue: Alexander Markov <alexmarkov@google.com> > Reviewed-by: Tess Strickland <sstrickl@google.com> > Reviewed-by: Martin Kustermann <kustermann@google.com> Change-Id: Ia2fc4672a085cd963b7fc103369851df3603590c Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/186202 Commit-Queue: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Tess Strickland <sstrickl@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
2021-02-22 10:46:28 +00:00
SwitchableCallPatternBase::SwitchableCallPatternBase(
const ObjectPool& object_pool)
: object_pool_(object_pool), data_pool_index_(-1), target_pool_index_(-1) {}
ObjectPtr SwitchableCallPatternBase::data() const {
return object_pool_.ObjectAt(data_pool_index_);
}
void SwitchableCallPatternBase::SetData(const Object& data) const {
ASSERT(!Object::Handle(object_pool_.ObjectAt(data_pool_index_)).IsCode());
object_pool_.SetObjectAt(data_pool_index_, data);
}
SwitchableCallPattern::SwitchableCallPattern(uword pc, const Code& code)
Reland "[vm/aot] Avoid using most Code objects in stack traces with --dwarf-stack-traces" This is a reland of b6dc4dad4dae73fe5f8500653de61a76fa5d6ce6 TEST=ci Original change's description: > [vm/aot] Avoid using most Code objects in stack traces with --dwarf-stack-traces > > The following changes are done in preparation for the removal of Code > objects in AOT with --dwarf-stack-traces: > > * Stack trace objects are extended to hold uword PCs (which may not > fit into Smi range). > > * Scanning stack frames in GC (StackFrame::VisitObjectPointers) > now avoids using Code objects. > In order to find CompressedStackMaps it now calls > ReversePc::FindCompressedStackMaps. > > * Singleton Code object (StubCode::UnknownDartCode()) is prepared as > a replacement for Code objects in stack traces. It has > PayloadStart() == 0 and Size() == kUwordMax so it includes > arbitrary PCs. > > * In --dwarf-stack-traces mode, most Code objects obtained from stack > frames are replaced with StubCode::UnknownDartCode(). > This simulates future behavior of ReversePc::Lookup when Code objects > will be removed. > > Issue: https://github.com/dart-lang/sdk/issues/44852 > Change-Id: I7cec7b8b9396c9cfeca3c256a412ba4e82a7e0c4 > TEST=ci > Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/182720 > Commit-Queue: Alexander Markov <alexmarkov@google.com> > Reviewed-by: Tess Strickland <sstrickl@google.com> > Reviewed-by: Martin Kustermann <kustermann@google.com> Change-Id: Ia2fc4672a085cd963b7fc103369851df3603590c Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/186202 Commit-Queue: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Tess Strickland <sstrickl@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
2021-02-22 10:46:28 +00:00
: SwitchableCallPatternBase(ObjectPool::Handle(code.GetObjectPool())) {
ASSERT(code.ContainsInstructionAt(pc));
// Last instruction: blx lr.
ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xe12fff3e);
Register reg;
uword data_load_end = InstructionPattern::DecodeLoadWordFromPool(
pc - Instr::kInstrSize, &reg, &data_pool_index_);
ASSERT(reg == R9);
InstructionPattern::DecodeLoadWordFromPool(data_load_end - Instr::kInstrSize,
&reg, &target_pool_index_);
ASSERT(reg == CODE_REG);
}
uword SwitchableCallPattern::target_entry() const {
return Code::Handle(Code::RawCast(object_pool_.ObjectAt(target_pool_index_)))
.MonomorphicEntryPoint();
}
void SwitchableCallPattern::SetTarget(const Code& target) const {
ASSERT(Object::Handle(object_pool_.ObjectAt(target_pool_index_)).IsCode());
object_pool_.SetObjectAt(target_pool_index_, target);
}
Reland "[vm/aot] Avoid using most Code objects in stack traces with --dwarf-stack-traces" This is a reland of b6dc4dad4dae73fe5f8500653de61a76fa5d6ce6 TEST=ci Original change's description: > [vm/aot] Avoid using most Code objects in stack traces with --dwarf-stack-traces > > The following changes are done in preparation for the removal of Code > objects in AOT with --dwarf-stack-traces: > > * Stack trace objects are extended to hold uword PCs (which may not > fit into Smi range). > > * Scanning stack frames in GC (StackFrame::VisitObjectPointers) > now avoids using Code objects. > In order to find CompressedStackMaps it now calls > ReversePc::FindCompressedStackMaps. > > * Singleton Code object (StubCode::UnknownDartCode()) is prepared as > a replacement for Code objects in stack traces. It has > PayloadStart() == 0 and Size() == kUwordMax so it includes > arbitrary PCs. > > * In --dwarf-stack-traces mode, most Code objects obtained from stack > frames are replaced with StubCode::UnknownDartCode(). > This simulates future behavior of ReversePc::Lookup when Code objects > will be removed. > > Issue: https://github.com/dart-lang/sdk/issues/44852 > Change-Id: I7cec7b8b9396c9cfeca3c256a412ba4e82a7e0c4 > TEST=ci > Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/182720 > Commit-Queue: Alexander Markov <alexmarkov@google.com> > Reviewed-by: Tess Strickland <sstrickl@google.com> > Reviewed-by: Martin Kustermann <kustermann@google.com> Change-Id: Ia2fc4672a085cd963b7fc103369851df3603590c Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/186202 Commit-Queue: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Tess Strickland <sstrickl@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
2021-02-22 10:46:28 +00:00
BareSwitchableCallPattern::BareSwitchableCallPattern(uword pc)
: SwitchableCallPatternBase(ObjectPool::Handle(
IsolateGroup::Current()->object_store()->global_object_pool())) {
// Last instruction: blx lr.
ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xe12fff3e);
Register reg;
uword data_load_end = InstructionPattern::DecodeLoadWordFromPool(
pc - Instr::kInstrSize, &reg, &data_pool_index_);
ASSERT(reg == R9);
InstructionPattern::DecodeLoadWordFromPool(data_load_end, &reg,
&target_pool_index_);
ASSERT(reg == LINK_REGISTER);
}
uword BareSwitchableCallPattern::target_entry() const {
return object_pool_.RawValueAt(target_pool_index_);
}
void BareSwitchableCallPattern::SetTarget(const Code& target) const {
[vm] Decouple assemblers from runtime. This is the next step towards preventing compiler from directly peeking into runtime and instead interact with runtime through a well defined surface. The goal of the refactoring to locate all places where compiler accesses some runtime information and partion those accesses into two categories: - creating objects in the host runtime (e.g. allocating strings, numbers, etc) during compilation; - accessing properties of the target runtime (e.g. offsets of fields) to embed those into the generated code; This change introduces dart::compiler and dart::compiler::target namespaces. All code in the compiler will gradually be moved into dart::compiler namespace. One of the motivations for this change is to be able to prevent access to globally defined host constants like kWordSize by shadowing them in the dart::compiler namespace. The nested namespace dart::compiler::target hosts all information about target runtime that compiler could access, e.g. compiler::target::kWordSize defines word size of the target which will eventually be made different from the host kWordSize (defined by dart::kWordSize). The API for compiler to runtime interaction is placed into compiler_api.h. Note that we still permit runtime to access compiler internals directly - this is not going to be decoupled as part of this work. Issue https://github.com/dart-lang/sdk/issues/31709 Change-Id: If4396d295879391becfa6c38d4802bbff81f5b20 Reviewed-on: https://dart-review.googlesource.com/c/90242 Commit-Queue: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2019-01-25 16:45:13 +00:00
ASSERT(object_pool_.TypeAt(target_pool_index_) ==
ObjectPool::EntryType::kImmediate);
object_pool_.SetRawValueAt(target_pool_index_,
target.MonomorphicEntryPoint());
}
ReturnPattern::ReturnPattern(uword pc) : pc_(pc) {}
bool ReturnPattern::IsValid() const {
Instr* bx_lr = Instr::At(pc_);
const int32_t B4 = 1 << 4;
const int32_t B21 = 1 << 21;
const int32_t B24 = 1 << 24;
int32_t instruction = (static_cast<int32_t>(AL) << kConditionShift) | B24 |
B21 | (0xfff << 8) | B4 |
(LINK_REGISTER.code << kRmShift);
return bx_lr->InstructionBits() == instruction;
}
bool PcRelativeCallPattern::IsValid() const {
// bl.<cond> <offset>
const uint32_t word = *reinterpret_cast<uint32_t*>(pc_);
const uint32_t branch = 0x05;
const uword type = ((word >> kTypeShift) & ((1 << kTypeBits) - 1));
const uword link = ((word >> kLinkShift) & ((1 << kLinkBits) - 1));
return type == branch && link == 1;
}
bool PcRelativeTailCallPattern::IsValid() const {
// b.<cond> <offset>
const uint32_t word = *reinterpret_cast<uint32_t*>(pc_);
const uint32_t branch = 0x05;
const uword type = ((word >> kTypeShift) & ((1 << kTypeBits) - 1));
const uword link = ((word >> kLinkShift) & ((1 << kLinkBits) - 1));
return type == branch && link == 0;
}
void PcRelativeTrampolineJumpPattern::Initialize() {
#if !defined(DART_PRECOMPILED_RUNTIME)
uint32_t* add_pc =
reinterpret_cast<uint32_t*>(pattern_start_ + 2 * Instr::kInstrSize);
*add_pc = kAddPcEncoding;
set_distance(0);
#else
UNREACHABLE();
#endif
}
int32_t PcRelativeTrampolineJumpPattern::distance() {
#if !defined(DART_PRECOMPILED_RUNTIME)
const uword end = pattern_start_ + 2 * Instr::kInstrSize;
Register reg;
intptr_t value;
InstructionPattern::DecodeLoadWordImmediate(end, &reg, &value);
value -= kDistanceOffset;
ASSERT(reg == TMP);
return value;
#else
UNREACHABLE();
return 0;
#endif
}
void PcRelativeTrampolineJumpPattern::set_distance(int32_t distance) {
#if !defined(DART_PRECOMPILED_RUNTIME)
const uword end = pattern_start_ + 2 * Instr::kInstrSize;
InstructionPattern::EncodeLoadWordImmediate(end, TMP,
distance + kDistanceOffset);
#else
UNREACHABLE();
#endif
}
bool PcRelativeTrampolineJumpPattern::IsValid() const {
#if !defined(DART_PRECOMPILED_RUNTIME)
const uword end = pattern_start_ + 2 * Instr::kInstrSize;
Register reg;
intptr_t value;
InstructionPattern::DecodeLoadWordImmediate(end, &reg, &value);
uint32_t* add_pc =
reinterpret_cast<uint32_t*>(pattern_start_ + 2 * Instr::kInstrSize);
return reg == TMP && *add_pc == kAddPcEncoding;
#else
UNREACHABLE();
return false;
#endif
}
Reland "[VM] Introduction of type testing stubs - Part 1-4" Relands 165c583d57af613836cf7d08242ce969521db00b [VM] Introduction of type testing stubs - Part 1 This CL: * Adds a field to [RawAbstractType] which will always hold a pointer to the entrypoint of a type testing stub * Makes this new field be initialized to a default stub whenever a instances are created (e.g. via Type::New(), snapshot reader, ...) * Makes the clustered snapshotter write a reference to the corresponding [RawInstructions] object when writing the field and do the reverse when reading it. * Makes us call the type testing stub for performing assert-assignable checks. To reduce unnecessary loads on callsites, we store the entrypoint of the type testing stubs directly in the type objects. This means that the caller of type testing stubs can simply branch there without populating a code object first. This also means that the type testing stubs themselves have no access to a pool and we therefore also don't hold on to the [Code] object, only the [Instruction] object is necessary. The type testing stubs do not setup a frame themselves and also have no safepoint. In the case when the type testing stubs could not determine a positive answer they will tail-call a general-purpose stub. The general-purpose stub sets up a stub frame, tries to consult a [SubtypeTestCache] and bails out to runtime if this was unsuccessful. This CL is just the the first, for ease of reviewing. The actual type-specialized type testing stubs will be generated in later CLs. Reviewed-on: https://dart-review.googlesource.com/44787 Relands f226c22424c483d65499545e560efc059f9dde1c [VM] Introduction of type testing stubs - Part 2 This CL starts building type testing stubs specialzed for [Type] objects we test against. More specifically, it adds support for: * Handling obvious fast cases on the call sites (while still having a call to stub for negative case) * Handling type tests against type parameters, by loading the value of the type parameter on the call sites and invoking it's type testing stub. * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subtype-checks. ==> e.g. String/List<dynamic> * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the type arguments. ==> e.g. Widget<State>, where we know [Widget] is only extended and not implemented. * Specialzed type testing stubs for certain non-instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the instantiated type arguments and cid based comparisons for type parameters. (Note that this fast-case migth result in some false-negatives!) ==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only extended and not implemented. This optimizes cases where the caller uses `new HashMap<A, B>()` and only uses `A` and `B` as key/values (and not subclasses of it). The false-negative can occur when subtypes of A or B are used. In such cases we fall back to the [SubtypeTestCache]-based imlementation. Reviewed-on: https://dart-review.googlesource.com/44788 Relands 25f98bcc7561006d70a487ba3de55551658ac683 [VM] Introduction of type testing stubs - Part 3 The changes include: * Make AssertAssignableInstr no longer have a call-summary, which helps methods with several parameter checks by not having to re-load/re-initialize type arguments registers * Lazily create SubtypeTestCaches: We already go to runtime to warm up the caches, so we now also create the caches on the first runtime call and patch the pool entries. * No longer load the destination name into a register: We only need the name when we throw an exception, so it is not on the hot path. Instead we let the runtime look at the call site, decoding a pool index from the instructions stream. The destination name will be available in the pool, at a consecutive index to the subtype cache. * Remove the fall-through to N=1 case for probing subtypeing tests, since those will always be handled by the optimized stubs. * Do not generate optimized stubs for FutureOr<T> (so far it just falled-through to TTS). We can make optimzed version of that later, but it requires special subtyping rules. * Local code quality improvement in the type-testing-stubs: Avoid extra jump at last case of cid-class-range checks. There are still a number of optimization opportunities we can do in future changes. Reviewed-on: https://dart-review.googlesource.com/46984 Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b [VM] Introduction of type testing stubs - Part 4 In order to avoid generating type testing stubs for too many types in the system - and thereby potentially cause an increase in code size - this change introduces a smarter way to decide for which types we should generate optimized type testing stubs. The precompiler creates a [TypeUsageInfo] which we use to collect information. More specifically: a) We collect the destination types for all type checks we emit (we do this inside AssertAssignableInstr::EmitNativeCode). -> These are types we might want to generate optimized type testing stubs for. b) We collect type argument vectors used in instance creations (we do this inside AllocateObjectInstr::EmitNativeCode) and keep a set of of used type argument vectors for each class. After the precompiler has finished compiling normal code we scan the set of destination types collected in a) for uninstantiated types (or more specifically, type parameter types). We then propagate the type argument vectors used on object allocation sites, which were collected in b), in order to find out what kind of types are flowing into those type parameters. This allows us to extend the set of types which we test against, by adding the types that flow into type parameters. We use this final augmented set of destination types as a "filter" when making the decision whether to generate an optimized type testing stub for a given type. Reviewed-on: https://dart-review.googlesource.com/48640 Issue https://github.com/dart-lang/sdk/issues/32603 Closes https://github.com/dart-lang/sdk/issues/32852 Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44 Reviewed-on: https://dart-review.googlesource.com/50944 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
intptr_t TypeTestingStubCallPattern::GetSubtypeTestCachePoolIndex() {
// Calls to the type testing stubs look like:
// ldr R9, ...
[vm/compiler] Unify GenerateAssertAssignable on non-IA32 archs. The differences between the different implementations are very minor, so abstract out the few that remain into Assembler: * Change Load<X>FromPoolOffset to Load<X>FromPoolIndex and do pool offset calculations internally, since all caller sites have the pool index. (Especially important because the pool register is tagged on X64 and ARM and untagged on ARM64, so this abstracts that away.) * Add default for pool register argument on ARM, since ARM64 already had one and X64 doesn't take a pool register argument. Other changes: * Use specific TestTypeABI registers within the helper method that adds caller-side checks instead of passing registers as arguments and document which registers are used for input or output (and when, if they are used conditionally). Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-nnbd-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-nnbd-linux-release-simarm-try,vm-kernel-nnbd-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-nnbd-linux-release-simarm64-try Change-Id: Ifc7a0eaa6aacf7f629aa9647b028500648af653d Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/167803 Commit-Queue: Tess Strickland <sstrickl@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-10-26 19:41:46 +00:00
// ldr Rn, [PP+idx]
Reland "[VM] Introduction of type testing stubs - Part 1-4" Relands 165c583d57af613836cf7d08242ce969521db00b [VM] Introduction of type testing stubs - Part 1 This CL: * Adds a field to [RawAbstractType] which will always hold a pointer to the entrypoint of a type testing stub * Makes this new field be initialized to a default stub whenever a instances are created (e.g. via Type::New(), snapshot reader, ...) * Makes the clustered snapshotter write a reference to the corresponding [RawInstructions] object when writing the field and do the reverse when reading it. * Makes us call the type testing stub for performing assert-assignable checks. To reduce unnecessary loads on callsites, we store the entrypoint of the type testing stubs directly in the type objects. This means that the caller of type testing stubs can simply branch there without populating a code object first. This also means that the type testing stubs themselves have no access to a pool and we therefore also don't hold on to the [Code] object, only the [Instruction] object is necessary. The type testing stubs do not setup a frame themselves and also have no safepoint. In the case when the type testing stubs could not determine a positive answer they will tail-call a general-purpose stub. The general-purpose stub sets up a stub frame, tries to consult a [SubtypeTestCache] and bails out to runtime if this was unsuccessful. This CL is just the the first, for ease of reviewing. The actual type-specialized type testing stubs will be generated in later CLs. Reviewed-on: https://dart-review.googlesource.com/44787 Relands f226c22424c483d65499545e560efc059f9dde1c [VM] Introduction of type testing stubs - Part 2 This CL starts building type testing stubs specialzed for [Type] objects we test against. More specifically, it adds support for: * Handling obvious fast cases on the call sites (while still having a call to stub for negative case) * Handling type tests against type parameters, by loading the value of the type parameter on the call sites and invoking it's type testing stub. * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subtype-checks. ==> e.g. String/List<dynamic> * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the type arguments. ==> e.g. Widget<State>, where we know [Widget] is only extended and not implemented. * Specialzed type testing stubs for certain non-instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the instantiated type arguments and cid based comparisons for type parameters. (Note that this fast-case migth result in some false-negatives!) ==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only extended and not implemented. This optimizes cases where the caller uses `new HashMap<A, B>()` and only uses `A` and `B` as key/values (and not subclasses of it). The false-negative can occur when subtypes of A or B are used. In such cases we fall back to the [SubtypeTestCache]-based imlementation. Reviewed-on: https://dart-review.googlesource.com/44788 Relands 25f98bcc7561006d70a487ba3de55551658ac683 [VM] Introduction of type testing stubs - Part 3 The changes include: * Make AssertAssignableInstr no longer have a call-summary, which helps methods with several parameter checks by not having to re-load/re-initialize type arguments registers * Lazily create SubtypeTestCaches: We already go to runtime to warm up the caches, so we now also create the caches on the first runtime call and patch the pool entries. * No longer load the destination name into a register: We only need the name when we throw an exception, so it is not on the hot path. Instead we let the runtime look at the call site, decoding a pool index from the instructions stream. The destination name will be available in the pool, at a consecutive index to the subtype cache. * Remove the fall-through to N=1 case for probing subtypeing tests, since those will always be handled by the optimized stubs. * Do not generate optimized stubs for FutureOr<T> (so far it just falled-through to TTS). We can make optimzed version of that later, but it requires special subtyping rules. * Local code quality improvement in the type-testing-stubs: Avoid extra jump at last case of cid-class-range checks. There are still a number of optimization opportunities we can do in future changes. Reviewed-on: https://dart-review.googlesource.com/46984 Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b [VM] Introduction of type testing stubs - Part 4 In order to avoid generating type testing stubs for too many types in the system - and thereby potentially cause an increase in code size - this change introduces a smarter way to decide for which types we should generate optimized type testing stubs. The precompiler creates a [TypeUsageInfo] which we use to collect information. More specifically: a) We collect the destination types for all type checks we emit (we do this inside AssertAssignableInstr::EmitNativeCode). -> These are types we might want to generate optimized type testing stubs for. b) We collect type argument vectors used in instance creations (we do this inside AllocateObjectInstr::EmitNativeCode) and keep a set of of used type argument vectors for each class. After the precompiler has finished compiling normal code we scan the set of destination types collected in a) for uninstantiated types (or more specifically, type parameter types). We then propagate the type argument vectors used on object allocation sites, which were collected in b), in order to find out what kind of types are flowing into those type parameters. This allows us to extend the set of types which we test against, by adding the types that flow into type parameters. We use this final augmented set of destination types as a "filter" when making the decision whether to generate an optimized type testing stub for a given type. Reviewed-on: https://dart-review.googlesource.com/48640 Issue https://github.com/dart-lang/sdk/issues/32603 Closes https://github.com/dart-lang/sdk/issues/32852 Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44 Reviewed-on: https://dart-review.googlesource.com/50944 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
// blx R9
// or
[vm/compiler] Unify GenerateAssertAssignable on non-IA32 archs. The differences between the different implementations are very minor, so abstract out the few that remain into Assembler: * Change Load<X>FromPoolOffset to Load<X>FromPoolIndex and do pool offset calculations internally, since all caller sites have the pool index. (Especially important because the pool register is tagged on X64 and ARM and untagged on ARM64, so this abstracts that away.) * Add default for pool register argument on ARM, since ARM64 already had one and X64 doesn't take a pool register argument. Other changes: * Use specific TestTypeABI registers within the helper method that adds caller-side checks instead of passing registers as arguments and document which registers are used for input or output (and when, if they are used conditionally). Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-nnbd-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-nnbd-linux-release-simarm-try,vm-kernel-nnbd-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-nnbd-linux-release-simarm64-try Change-Id: Ifc7a0eaa6aacf7f629aa9647b028500648af653d Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/167803 Commit-Queue: Tess Strickland <sstrickl@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-10-26 19:41:46 +00:00
// ldr Rn, [PP+idx]
// blx pc+<offset>
[vm/compiler] Unify GenerateAssertAssignable on non-IA32 archs. The differences between the different implementations are very minor, so abstract out the few that remain into Assembler: * Change Load<X>FromPoolOffset to Load<X>FromPoolIndex and do pool offset calculations internally, since all caller sites have the pool index. (Especially important because the pool register is tagged on X64 and ARM and untagged on ARM64, so this abstracts that away.) * Add default for pool register argument on ARM, since ARM64 already had one and X64 doesn't take a pool register argument. Other changes: * Use specific TestTypeABI registers within the helper method that adds caller-side checks instead of passing registers as arguments and document which registers are used for input or output (and when, if they are used conditionally). Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-nnbd-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-nnbd-linux-release-simarm-try,vm-kernel-nnbd-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-nnbd-linux-release-simarm64-try Change-Id: Ifc7a0eaa6aacf7f629aa9647b028500648af653d Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/167803 Commit-Queue: Tess Strickland <sstrickl@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-10-26 19:41:46 +00:00
// where Rn = TypeTestABI::kSubtypeTestCacheReg.
Reland "[VM] Introduction of type testing stubs - Part 1-4" Relands 165c583d57af613836cf7d08242ce969521db00b [VM] Introduction of type testing stubs - Part 1 This CL: * Adds a field to [RawAbstractType] which will always hold a pointer to the entrypoint of a type testing stub * Makes this new field be initialized to a default stub whenever a instances are created (e.g. via Type::New(), snapshot reader, ...) * Makes the clustered snapshotter write a reference to the corresponding [RawInstructions] object when writing the field and do the reverse when reading it. * Makes us call the type testing stub for performing assert-assignable checks. To reduce unnecessary loads on callsites, we store the entrypoint of the type testing stubs directly in the type objects. This means that the caller of type testing stubs can simply branch there without populating a code object first. This also means that the type testing stubs themselves have no access to a pool and we therefore also don't hold on to the [Code] object, only the [Instruction] object is necessary. The type testing stubs do not setup a frame themselves and also have no safepoint. In the case when the type testing stubs could not determine a positive answer they will tail-call a general-purpose stub. The general-purpose stub sets up a stub frame, tries to consult a [SubtypeTestCache] and bails out to runtime if this was unsuccessful. This CL is just the the first, for ease of reviewing. The actual type-specialized type testing stubs will be generated in later CLs. Reviewed-on: https://dart-review.googlesource.com/44787 Relands f226c22424c483d65499545e560efc059f9dde1c [VM] Introduction of type testing stubs - Part 2 This CL starts building type testing stubs specialzed for [Type] objects we test against. More specifically, it adds support for: * Handling obvious fast cases on the call sites (while still having a call to stub for negative case) * Handling type tests against type parameters, by loading the value of the type parameter on the call sites and invoking it's type testing stub. * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subtype-checks. ==> e.g. String/List<dynamic> * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the type arguments. ==> e.g. Widget<State>, where we know [Widget] is only extended and not implemented. * Specialzed type testing stubs for certain non-instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the instantiated type arguments and cid based comparisons for type parameters. (Note that this fast-case migth result in some false-negatives!) ==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only extended and not implemented. This optimizes cases where the caller uses `new HashMap<A, B>()` and only uses `A` and `B` as key/values (and not subclasses of it). The false-negative can occur when subtypes of A or B are used. In such cases we fall back to the [SubtypeTestCache]-based imlementation. Reviewed-on: https://dart-review.googlesource.com/44788 Relands 25f98bcc7561006d70a487ba3de55551658ac683 [VM] Introduction of type testing stubs - Part 3 The changes include: * Make AssertAssignableInstr no longer have a call-summary, which helps methods with several parameter checks by not having to re-load/re-initialize type arguments registers * Lazily create SubtypeTestCaches: We already go to runtime to warm up the caches, so we now also create the caches on the first runtime call and patch the pool entries. * No longer load the destination name into a register: We only need the name when we throw an exception, so it is not on the hot path. Instead we let the runtime look at the call site, decoding a pool index from the instructions stream. The destination name will be available in the pool, at a consecutive index to the subtype cache. * Remove the fall-through to N=1 case for probing subtypeing tests, since those will always be handled by the optimized stubs. * Do not generate optimized stubs for FutureOr<T> (so far it just falled-through to TTS). We can make optimzed version of that later, but it requires special subtyping rules. * Local code quality improvement in the type-testing-stubs: Avoid extra jump at last case of cid-class-range checks. There are still a number of optimization opportunities we can do in future changes. Reviewed-on: https://dart-review.googlesource.com/46984 Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b [VM] Introduction of type testing stubs - Part 4 In order to avoid generating type testing stubs for too many types in the system - and thereby potentially cause an increase in code size - this change introduces a smarter way to decide for which types we should generate optimized type testing stubs. The precompiler creates a [TypeUsageInfo] which we use to collect information. More specifically: a) We collect the destination types for all type checks we emit (we do this inside AssertAssignableInstr::EmitNativeCode). -> These are types we might want to generate optimized type testing stubs for. b) We collect type argument vectors used in instance creations (we do this inside AllocateObjectInstr::EmitNativeCode) and keep a set of of used type argument vectors for each class. After the precompiler has finished compiling normal code we scan the set of destination types collected in a) for uninstantiated types (or more specifically, type parameter types). We then propagate the type argument vectors used on object allocation sites, which were collected in b), in order to find out what kind of types are flowing into those type parameters. This allows us to extend the set of types which we test against, by adding the types that flow into type parameters. We use this final augmented set of destination types as a "filter" when making the decision whether to generate an optimized type testing stub for a given type. Reviewed-on: https://dart-review.googlesource.com/48640 Issue https://github.com/dart-lang/sdk/issues/32603 Closes https://github.com/dart-lang/sdk/issues/32852 Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44 Reviewed-on: https://dart-review.googlesource.com/50944 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
// Ensure the caller of the type testing stub (whose return address is [pc_])
// branched via `blx R9` or a pc-relative call.
uword pc = pc_ - Instr::kInstrSize;
const uint32_t blx_r9 = 0xe12fff39;
if (*reinterpret_cast<uint32_t*>(pc) != blx_r9) {
PcRelativeCallPattern pattern(pc);
RELEASE_ASSERT(pattern.IsValid());
}
const uword load_instr_end = pc;
Reland "[VM] Introduction of type testing stubs - Part 1-4" Relands 165c583d57af613836cf7d08242ce969521db00b [VM] Introduction of type testing stubs - Part 1 This CL: * Adds a field to [RawAbstractType] which will always hold a pointer to the entrypoint of a type testing stub * Makes this new field be initialized to a default stub whenever a instances are created (e.g. via Type::New(), snapshot reader, ...) * Makes the clustered snapshotter write a reference to the corresponding [RawInstructions] object when writing the field and do the reverse when reading it. * Makes us call the type testing stub for performing assert-assignable checks. To reduce unnecessary loads on callsites, we store the entrypoint of the type testing stubs directly in the type objects. This means that the caller of type testing stubs can simply branch there without populating a code object first. This also means that the type testing stubs themselves have no access to a pool and we therefore also don't hold on to the [Code] object, only the [Instruction] object is necessary. The type testing stubs do not setup a frame themselves and also have no safepoint. In the case when the type testing stubs could not determine a positive answer they will tail-call a general-purpose stub. The general-purpose stub sets up a stub frame, tries to consult a [SubtypeTestCache] and bails out to runtime if this was unsuccessful. This CL is just the the first, for ease of reviewing. The actual type-specialized type testing stubs will be generated in later CLs. Reviewed-on: https://dart-review.googlesource.com/44787 Relands f226c22424c483d65499545e560efc059f9dde1c [VM] Introduction of type testing stubs - Part 2 This CL starts building type testing stubs specialzed for [Type] objects we test against. More specifically, it adds support for: * Handling obvious fast cases on the call sites (while still having a call to stub for negative case) * Handling type tests against type parameters, by loading the value of the type parameter on the call sites and invoking it's type testing stub. * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subtype-checks. ==> e.g. String/List<dynamic> * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the type arguments. ==> e.g. Widget<State>, where we know [Widget] is only extended and not implemented. * Specialzed type testing stubs for certain non-instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the instantiated type arguments and cid based comparisons for type parameters. (Note that this fast-case migth result in some false-negatives!) ==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only extended and not implemented. This optimizes cases where the caller uses `new HashMap<A, B>()` and only uses `A` and `B` as key/values (and not subclasses of it). The false-negative can occur when subtypes of A or B are used. In such cases we fall back to the [SubtypeTestCache]-based imlementation. Reviewed-on: https://dart-review.googlesource.com/44788 Relands 25f98bcc7561006d70a487ba3de55551658ac683 [VM] Introduction of type testing stubs - Part 3 The changes include: * Make AssertAssignableInstr no longer have a call-summary, which helps methods with several parameter checks by not having to re-load/re-initialize type arguments registers * Lazily create SubtypeTestCaches: We already go to runtime to warm up the caches, so we now also create the caches on the first runtime call and patch the pool entries. * No longer load the destination name into a register: We only need the name when we throw an exception, so it is not on the hot path. Instead we let the runtime look at the call site, decoding a pool index from the instructions stream. The destination name will be available in the pool, at a consecutive index to the subtype cache. * Remove the fall-through to N=1 case for probing subtypeing tests, since those will always be handled by the optimized stubs. * Do not generate optimized stubs for FutureOr<T> (so far it just falled-through to TTS). We can make optimzed version of that later, but it requires special subtyping rules. * Local code quality improvement in the type-testing-stubs: Avoid extra jump at last case of cid-class-range checks. There are still a number of optimization opportunities we can do in future changes. Reviewed-on: https://dart-review.googlesource.com/46984 Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b [VM] Introduction of type testing stubs - Part 4 In order to avoid generating type testing stubs for too many types in the system - and thereby potentially cause an increase in code size - this change introduces a smarter way to decide for which types we should generate optimized type testing stubs. The precompiler creates a [TypeUsageInfo] which we use to collect information. More specifically: a) We collect the destination types for all type checks we emit (we do this inside AssertAssignableInstr::EmitNativeCode). -> These are types we might want to generate optimized type testing stubs for. b) We collect type argument vectors used in instance creations (we do this inside AllocateObjectInstr::EmitNativeCode) and keep a set of of used type argument vectors for each class. After the precompiler has finished compiling normal code we scan the set of destination types collected in a) for uninstantiated types (or more specifically, type parameter types). We then propagate the type argument vectors used on object allocation sites, which were collected in b), in order to find out what kind of types are flowing into those type parameters. This allows us to extend the set of types which we test against, by adding the types that flow into type parameters. We use this final augmented set of destination types as a "filter" when making the decision whether to generate an optimized type testing stub for a given type. Reviewed-on: https://dart-review.googlesource.com/48640 Issue https://github.com/dart-lang/sdk/issues/32603 Closes https://github.com/dart-lang/sdk/issues/32852 Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44 Reviewed-on: https://dart-review.googlesource.com/50944 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
Register reg;
intptr_t pool_index = -1;
InstructionPattern::DecodeLoadWordFromPool(load_instr_end, &reg, &pool_index);
[vm/compiler] Unify GenerateAssertAssignable on non-IA32 archs. The differences between the different implementations are very minor, so abstract out the few that remain into Assembler: * Change Load<X>FromPoolOffset to Load<X>FromPoolIndex and do pool offset calculations internally, since all caller sites have the pool index. (Especially important because the pool register is tagged on X64 and ARM and untagged on ARM64, so this abstracts that away.) * Add default for pool register argument on ARM, since ARM64 already had one and X64 doesn't take a pool register argument. Other changes: * Use specific TestTypeABI registers within the helper method that adds caller-side checks instead of passing registers as arguments and document which registers are used for input or output (and when, if they are used conditionally). Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-nnbd-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-nnbd-linux-release-simarm-try,vm-kernel-nnbd-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-nnbd-linux-release-simarm64-try Change-Id: Ifc7a0eaa6aacf7f629aa9647b028500648af653d Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/167803 Commit-Queue: Tess Strickland <sstrickl@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-10-26 19:41:46 +00:00
ASSERT_EQUAL(reg, TypeTestABI::kSubtypeTestCacheReg);
Reland "[VM] Introduction of type testing stubs - Part 1-4" Relands 165c583d57af613836cf7d08242ce969521db00b [VM] Introduction of type testing stubs - Part 1 This CL: * Adds a field to [RawAbstractType] which will always hold a pointer to the entrypoint of a type testing stub * Makes this new field be initialized to a default stub whenever a instances are created (e.g. via Type::New(), snapshot reader, ...) * Makes the clustered snapshotter write a reference to the corresponding [RawInstructions] object when writing the field and do the reverse when reading it. * Makes us call the type testing stub for performing assert-assignable checks. To reduce unnecessary loads on callsites, we store the entrypoint of the type testing stubs directly in the type objects. This means that the caller of type testing stubs can simply branch there without populating a code object first. This also means that the type testing stubs themselves have no access to a pool and we therefore also don't hold on to the [Code] object, only the [Instruction] object is necessary. The type testing stubs do not setup a frame themselves and also have no safepoint. In the case when the type testing stubs could not determine a positive answer they will tail-call a general-purpose stub. The general-purpose stub sets up a stub frame, tries to consult a [SubtypeTestCache] and bails out to runtime if this was unsuccessful. This CL is just the the first, for ease of reviewing. The actual type-specialized type testing stubs will be generated in later CLs. Reviewed-on: https://dart-review.googlesource.com/44787 Relands f226c22424c483d65499545e560efc059f9dde1c [VM] Introduction of type testing stubs - Part 2 This CL starts building type testing stubs specialzed for [Type] objects we test against. More specifically, it adds support for: * Handling obvious fast cases on the call sites (while still having a call to stub for negative case) * Handling type tests against type parameters, by loading the value of the type parameter on the call sites and invoking it's type testing stub. * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subtype-checks. ==> e.g. String/List<dynamic> * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the type arguments. ==> e.g. Widget<State>, where we know [Widget] is only extended and not implemented. * Specialzed type testing stubs for certain non-instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the instantiated type arguments and cid based comparisons for type parameters. (Note that this fast-case migth result in some false-negatives!) ==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only extended and not implemented. This optimizes cases where the caller uses `new HashMap<A, B>()` and only uses `A` and `B` as key/values (and not subclasses of it). The false-negative can occur when subtypes of A or B are used. In such cases we fall back to the [SubtypeTestCache]-based imlementation. Reviewed-on: https://dart-review.googlesource.com/44788 Relands 25f98bcc7561006d70a487ba3de55551658ac683 [VM] Introduction of type testing stubs - Part 3 The changes include: * Make AssertAssignableInstr no longer have a call-summary, which helps methods with several parameter checks by not having to re-load/re-initialize type arguments registers * Lazily create SubtypeTestCaches: We already go to runtime to warm up the caches, so we now also create the caches on the first runtime call and patch the pool entries. * No longer load the destination name into a register: We only need the name when we throw an exception, so it is not on the hot path. Instead we let the runtime look at the call site, decoding a pool index from the instructions stream. The destination name will be available in the pool, at a consecutive index to the subtype cache. * Remove the fall-through to N=1 case for probing subtypeing tests, since those will always be handled by the optimized stubs. * Do not generate optimized stubs for FutureOr<T> (so far it just falled-through to TTS). We can make optimzed version of that later, but it requires special subtyping rules. * Local code quality improvement in the type-testing-stubs: Avoid extra jump at last case of cid-class-range checks. There are still a number of optimization opportunities we can do in future changes. Reviewed-on: https://dart-review.googlesource.com/46984 Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b [VM] Introduction of type testing stubs - Part 4 In order to avoid generating type testing stubs for too many types in the system - and thereby potentially cause an increase in code size - this change introduces a smarter way to decide for which types we should generate optimized type testing stubs. The precompiler creates a [TypeUsageInfo] which we use to collect information. More specifically: a) We collect the destination types for all type checks we emit (we do this inside AssertAssignableInstr::EmitNativeCode). -> These are types we might want to generate optimized type testing stubs for. b) We collect type argument vectors used in instance creations (we do this inside AllocateObjectInstr::EmitNativeCode) and keep a set of of used type argument vectors for each class. After the precompiler has finished compiling normal code we scan the set of destination types collected in a) for uninstantiated types (or more specifically, type parameter types). We then propagate the type argument vectors used on object allocation sites, which were collected in b), in order to find out what kind of types are flowing into those type parameters. This allows us to extend the set of types which we test against, by adding the types that flow into type parameters. We use this final augmented set of destination types as a "filter" when making the decision whether to generate an optimized type testing stub for a given type. Reviewed-on: https://dart-review.googlesource.com/48640 Issue https://github.com/dart-lang/sdk/issues/32603 Closes https://github.com/dart-lang/sdk/issues/32852 Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44 Reviewed-on: https://dart-review.googlesource.com/50944 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
return pool_index;
}
} // namespace dart
#endif // defined TARGET_ARCH_ARM