2013-01-18 00:34:20 +00:00
|
|
|
// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
|
|
|
|
// for details. All rights reserved. Use of this source code is governed by a
|
|
|
|
// BSD-style license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
|
|
|
|
#if defined(TARGET_ARCH_ARM)
|
|
|
|
|
2016-10-27 12:02:20 +00:00
|
|
|
#include "vm/instructions.h"
|
|
|
|
#include "vm/instructions_arm.h"
|
|
|
|
|
2019-04-08 09:45:27 +00:00
|
|
|
#include "vm/constants.h"
|
2013-03-08 21:16:46 +00:00
|
|
|
#include "vm/cpu.h"
|
2013-01-18 00:34:20 +00:00
|
|
|
#include "vm/object.h"
|
2021-01-14 23:06:56 +00:00
|
|
|
#include "vm/object_store.h"
|
2018-12-14 16:11:53 +00:00
|
|
|
#include "vm/reverse_pc_lookup_cache.h"
|
2013-01-18 00:34:20 +00:00
|
|
|
|
|
|
|
namespace dart {
|
|
|
|
|
2013-02-20 17:28:35 +00:00
|
|
|
CallPattern::CallPattern(uword pc, const Code& code)
|
2015-06-10 09:41:22 +00:00
|
|
|
: object_pool_(ObjectPool::Handle(code.GetObjectPool())),
|
2019-06-12 21:56:53 +00:00
|
|
|
target_code_pool_index_(-1) {
|
2019-05-21 19:44:27 +00:00
|
|
|
ASSERT(code.ContainsInstructionAt(pc));
|
|
|
|
// Last instruction: blx lr.
|
2020-07-15 21:00:56 +00:00
|
|
|
ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xe12fff3e);
|
2019-05-21 19:44:27 +00:00
|
|
|
|
|
|
|
Register reg;
|
2019-06-12 21:56:53 +00:00
|
|
|
InstructionPattern::DecodeLoadWordFromPool(pc - 2 * Instr::kInstrSize, ®,
|
|
|
|
&target_code_pool_index_);
|
|
|
|
ASSERT(reg == CODE_REG);
|
|
|
|
}
|
|
|
|
|
|
|
|
ICCallPattern::ICCallPattern(uword pc, const Code& code)
|
|
|
|
: object_pool_(ObjectPool::Handle(code.GetObjectPool())),
|
|
|
|
target_pool_index_(-1),
|
|
|
|
data_pool_index_(-1) {
|
|
|
|
ASSERT(code.ContainsInstructionAt(pc));
|
|
|
|
// Last instruction: blx lr.
|
2020-07-15 21:00:56 +00:00
|
|
|
ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xe12fff3e);
|
2019-06-12 21:56:53 +00:00
|
|
|
|
|
|
|
Register reg;
|
|
|
|
uword data_load_end = InstructionPattern::DecodeLoadWordFromPool(
|
|
|
|
pc - 2 * Instr::kInstrSize, ®, &target_pool_index_);
|
2015-09-19 11:21:09 +00:00
|
|
|
ASSERT(reg == CODE_REG);
|
2019-06-12 21:56:53 +00:00
|
|
|
|
|
|
|
InstructionPattern::DecodeLoadWordFromPool(data_load_end, ®,
|
|
|
|
&data_pool_index_);
|
|
|
|
ASSERT(reg == R9);
|
2013-03-11 23:15:48 +00:00
|
|
|
}
|
2013-02-20 17:28:35 +00:00
|
|
|
|
2015-08-21 09:37:50 +00:00
|
|
|
NativeCallPattern::NativeCallPattern(uword pc, const Code& code)
|
|
|
|
: object_pool_(ObjectPool::Handle(code.GetObjectPool())),
|
|
|
|
end_(pc),
|
|
|
|
native_function_pool_index_(-1),
|
2015-09-19 11:21:09 +00:00
|
|
|
target_code_pool_index_(-1) {
|
2015-08-21 09:37:50 +00:00
|
|
|
ASSERT(code.ContainsInstructionAt(pc));
|
|
|
|
// Last instruction: blx lr.
|
2020-07-15 21:00:56 +00:00
|
|
|
ASSERT(*(reinterpret_cast<uint32_t*>(end_) - 1) == 0xe12fff3e);
|
2015-08-21 09:37:50 +00:00
|
|
|
|
|
|
|
Register reg;
|
2016-11-08 21:54:47 +00:00
|
|
|
uword native_function_load_end = InstructionPattern::DecodeLoadWordFromPool(
|
|
|
|
end_ - 2 * Instr::kInstrSize, ®, &target_code_pool_index_);
|
2015-09-19 11:21:09 +00:00
|
|
|
ASSERT(reg == CODE_REG);
|
2016-11-08 21:54:47 +00:00
|
|
|
InstructionPattern::DecodeLoadWordFromPool(native_function_load_end, ®,
|
2015-08-21 09:37:50 +00:00
|
|
|
&native_function_pool_index_);
|
2015-10-26 18:05:32 +00:00
|
|
|
ASSERT(reg == R9);
|
2015-08-21 09:37:50 +00:00
|
|
|
}
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
CodePtr NativeCallPattern::target() const {
|
|
|
|
return static_cast<CodePtr>(object_pool_.ObjectAt(target_code_pool_index_));
|
2015-08-21 09:37:50 +00:00
|
|
|
}
|
|
|
|
|
2015-09-19 11:21:09 +00:00
|
|
|
void NativeCallPattern::set_target(const Code& new_target) const {
|
|
|
|
object_pool_.SetObjectAt(target_code_pool_index_, new_target);
|
2015-08-21 09:37:50 +00:00
|
|
|
// No need to flush the instruction cache, since the code is not modified.
|
|
|
|
}
|
|
|
|
|
|
|
|
NativeFunction NativeCallPattern::native_function() const {
|
|
|
|
return reinterpret_cast<NativeFunction>(
|
|
|
|
object_pool_.RawValueAt(native_function_pool_index_));
|
|
|
|
}
|
|
|
|
|
|
|
|
void NativeCallPattern::set_native_function(NativeFunction func) const {
|
|
|
|
object_pool_.SetRawValueAt(native_function_pool_index_,
|
2016-11-08 21:54:47 +00:00
|
|
|
reinterpret_cast<uword>(func));
|
2015-08-21 09:37:50 +00:00
|
|
|
}
|
|
|
|
|
2013-10-01 10:22:47 +00:00
|
|
|
// Decodes a load sequence ending at 'end' (the last instruction of the load
|
|
|
|
// sequence is the instruction before the one at end). Returns a pointer to
|
|
|
|
// the first instruction in the sequence. Returns the register being loaded
|
|
|
|
// and the loaded immediate value in the output parameters 'reg' and 'value'
|
|
|
|
// respectively.
|
|
|
|
uword InstructionPattern::DecodeLoadWordImmediate(uword end,
|
|
|
|
Register* reg,
|
|
|
|
intptr_t* value) {
|
|
|
|
uword start = end - Instr::kInstrSize;
|
|
|
|
int32_t instr = Instr::At(start)->InstructionBits();
|
|
|
|
intptr_t imm = 0;
|
2020-06-26 09:39:15 +00:00
|
|
|
if ((instr & 0xfff00000) == 0xe3400000) { // movt reg, #imm_hi
|
|
|
|
imm |= (instr & 0xf0000) << 12;
|
|
|
|
imm |= (instr & 0xfff) << 16;
|
2013-10-01 10:22:47 +00:00
|
|
|
start -= Instr::kInstrSize;
|
|
|
|
instr = Instr::At(start)->InstructionBits();
|
2013-03-25 20:29:47 +00:00
|
|
|
}
|
2020-06-26 09:39:15 +00:00
|
|
|
ASSERT((instr & 0xfff00000) == 0xe3000000); // movw reg, #imm_lo
|
|
|
|
imm |= (instr & 0xf0000) >> 4;
|
|
|
|
imm |= instr & 0xfff;
|
|
|
|
*reg = static_cast<Register>((instr & 0xf000) >> 12);
|
|
|
|
*value = imm;
|
2013-10-01 10:22:47 +00:00
|
|
|
return start;
|
2013-03-25 20:29:47 +00:00
|
|
|
}
|
|
|
|
|
2019-01-17 12:32:38 +00:00
|
|
|
void InstructionPattern::EncodeLoadWordImmediate(uword end,
|
|
|
|
Register reg,
|
|
|
|
intptr_t value) {
|
|
|
|
uint16_t low16 = value & 0xffff;
|
|
|
|
uint16_t high16 = (value >> 16) & 0xffff;
|
|
|
|
|
|
|
|
// movw reg, #imm_lo
|
|
|
|
uint32_t movw_instr = 0xe3000000;
|
|
|
|
movw_instr |= (low16 >> 12) << 16;
|
|
|
|
movw_instr |= (reg << 12);
|
|
|
|
movw_instr |= (low16 & 0xfff);
|
|
|
|
|
|
|
|
// movt reg, #imm_hi
|
|
|
|
uint32_t movt_instr = 0xe3400000;
|
|
|
|
movt_instr |= (high16 >> 12) << 16;
|
|
|
|
movt_instr |= (reg << 12);
|
|
|
|
movt_instr |= (high16 & 0xfff);
|
|
|
|
|
|
|
|
uint32_t* cursor = reinterpret_cast<uint32_t*>(end);
|
|
|
|
*(--cursor) = movt_instr;
|
|
|
|
*(--cursor) = movw_instr;
|
|
|
|
|
|
|
|
#if defined(DEBUG)
|
|
|
|
Register decoded_reg;
|
|
|
|
intptr_t decoded_value;
|
|
|
|
DecodeLoadWordImmediate(end, &decoded_reg, &decoded_value);
|
|
|
|
ASSERT(reg == decoded_reg);
|
|
|
|
ASSERT(value == decoded_value);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
static bool IsLoadWithOffset(int32_t instr,
|
|
|
|
Register base,
|
|
|
|
intptr_t* offset,
|
|
|
|
Register* dst) {
|
2015-11-12 23:18:31 +00:00
|
|
|
if ((instr & 0xffff0000) == (0xe5900000 | (base << 16))) {
|
|
|
|
// ldr reg, [base, #+offset]
|
|
|
|
*offset = instr & 0xfff;
|
|
|
|
*dst = static_cast<Register>((instr & 0xf000) >> 12);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-10-01 10:22:47 +00:00
|
|
|
// Decodes a load sequence ending at 'end' (the last instruction of the load
|
|
|
|
// sequence is the instruction before the one at end). Returns a pointer to
|
|
|
|
// the first instruction in the sequence. Returns the register being loaded
|
|
|
|
// and the index in the pool being read from in the output parameters 'reg'
|
|
|
|
// and 'index' respectively.
|
|
|
|
uword InstructionPattern::DecodeLoadWordFromPool(uword end,
|
|
|
|
Register* reg,
|
|
|
|
intptr_t* index) {
|
|
|
|
uword start = end - Instr::kInstrSize;
|
|
|
|
int32_t instr = Instr::At(start)->InstructionBits();
|
|
|
|
intptr_t offset = 0;
|
2015-11-12 23:18:31 +00:00
|
|
|
if (IsLoadWithOffset(instr, PP, &offset, reg)) {
|
|
|
|
// ldr reg, [PP, #+offset]
|
2013-02-20 17:28:35 +00:00
|
|
|
} else {
|
2013-03-11 23:15:48 +00:00
|
|
|
ASSERT((instr & 0xfff00000) == 0xe5900000); // ldr reg, [reg, #+offset]
|
2013-02-20 17:28:35 +00:00
|
|
|
offset = instr & 0xfff;
|
2013-10-01 10:22:47 +00:00
|
|
|
start -= Instr::kInstrSize;
|
|
|
|
instr = Instr::At(start)->InstructionBits();
|
2015-10-26 18:05:32 +00:00
|
|
|
if ((instr & 0xffff0000) == (0xe2850000 | (PP << 16))) {
|
|
|
|
// add reg, pp, operand
|
2013-10-01 10:22:47 +00:00
|
|
|
const intptr_t rot = (instr & 0xf00) >> 7;
|
|
|
|
const intptr_t imm8 = instr & 0xff;
|
2013-06-12 16:57:06 +00:00
|
|
|
offset += (imm8 >> rot) | (imm8 << (32 - rot));
|
2013-03-11 23:15:48 +00:00
|
|
|
*reg = static_cast<Register>((instr & 0xf000) >> 12);
|
2013-02-20 17:28:35 +00:00
|
|
|
} else {
|
2015-10-26 18:05:32 +00:00
|
|
|
ASSERT((instr & 0xffff0000) == (0xe0800000 | (PP << 16)));
|
|
|
|
// add reg, pp, reg
|
2020-05-17 12:43:48 +00:00
|
|
|
intptr_t value = 0;
|
|
|
|
start = DecodeLoadWordImmediate(start, reg, &value);
|
|
|
|
offset += value;
|
2013-02-20 17:28:35 +00:00
|
|
|
}
|
|
|
|
}
|
2015-08-21 07:31:36 +00:00
|
|
|
*index = ObjectPool::IndexFromOffset(offset);
|
2013-10-01 10:22:47 +00:00
|
|
|
return start;
|
2013-03-11 23:15:48 +00:00
|
|
|
}
|
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
bool DecodeLoadObjectFromPoolOrThread(uword pc, const Code& code, Object* obj) {
|
2015-11-12 23:18:31 +00:00
|
|
|
ASSERT(code.ContainsInstructionAt(pc));
|
|
|
|
|
|
|
|
int32_t instr = Instr::At(pc)->InstructionBits();
|
|
|
|
intptr_t offset;
|
|
|
|
Register dst;
|
|
|
|
if (IsLoadWithOffset(instr, PP, &offset, &dst)) {
|
|
|
|
intptr_t index = ObjectPool::IndexFromOffset(offset);
|
2022-05-11 19:52:45 +00:00
|
|
|
return ObjectAtPoolIndex(code, index, obj);
|
2015-11-12 23:18:31 +00:00
|
|
|
} else if (IsLoadWithOffset(instr, THR, &offset, &dst)) {
|
|
|
|
return Thread::ObjectAtOffset(offset, obj);
|
|
|
|
}
|
|
|
|
// TODO(rmacnak): Sequence for loads beyond 12 bits.
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
CodePtr CallPattern::TargetCode() const {
|
|
|
|
return static_cast<CodePtr>(object_pool_.ObjectAt(target_code_pool_index_));
|
2013-02-15 23:55:31 +00:00
|
|
|
}
|
|
|
|
|
2015-09-19 11:21:09 +00:00
|
|
|
void CallPattern::SetTargetCode(const Code& target_code) const {
|
|
|
|
object_pool_.SetObjectAt(target_code_pool_index_, target_code);
|
2013-01-18 00:34:20 +00:00
|
|
|
}
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
ObjectPtr ICCallPattern::Data() const {
|
2019-06-12 21:56:53 +00:00
|
|
|
return object_pool_.ObjectAt(data_pool_index_);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ICCallPattern::SetData(const Object& data) const {
|
|
|
|
ASSERT(data.IsArray() || data.IsICData() || data.IsMegamorphicCache());
|
|
|
|
object_pool_.SetObjectAt(data_pool_index_, data);
|
|
|
|
}
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
CodePtr ICCallPattern::TargetCode() const {
|
|
|
|
return static_cast<CodePtr>(object_pool_.ObjectAt(target_pool_index_));
|
2019-06-12 21:56:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void ICCallPattern::SetTargetCode(const Code& target_code) const {
|
|
|
|
object_pool_.SetObjectAt(target_pool_index_, target_code);
|
|
|
|
}
|
|
|
|
|
2021-02-22 10:46:28 +00:00
|
|
|
SwitchableCallPatternBase::SwitchableCallPatternBase(
|
|
|
|
const ObjectPool& object_pool)
|
|
|
|
: object_pool_(object_pool), data_pool_index_(-1), target_pool_index_(-1) {}
|
2018-12-14 16:11:53 +00:00
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
ObjectPtr SwitchableCallPatternBase::data() const {
|
2018-12-14 16:11:53 +00:00
|
|
|
return object_pool_.ObjectAt(data_pool_index_);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SwitchableCallPatternBase::SetData(const Object& data) const {
|
|
|
|
ASSERT(!Object::Handle(object_pool_.ObjectAt(data_pool_index_)).IsCode());
|
|
|
|
object_pool_.SetObjectAt(data_pool_index_, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
SwitchableCallPattern::SwitchableCallPattern(uword pc, const Code& code)
|
2021-02-22 10:46:28 +00:00
|
|
|
: SwitchableCallPatternBase(ObjectPool::Handle(code.GetObjectPool())) {
|
2015-11-04 17:31:19 +00:00
|
|
|
ASSERT(code.ContainsInstructionAt(pc));
|
2016-08-12 18:18:35 +00:00
|
|
|
// Last instruction: blx lr.
|
2020-07-15 21:00:56 +00:00
|
|
|
ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xe12fff3e);
|
2015-11-04 17:31:19 +00:00
|
|
|
|
|
|
|
Register reg;
|
2016-11-08 21:54:47 +00:00
|
|
|
uword data_load_end = InstructionPattern::DecodeLoadWordFromPool(
|
|
|
|
pc - Instr::kInstrSize, ®, &data_pool_index_);
|
2015-11-04 17:31:19 +00:00
|
|
|
ASSERT(reg == R9);
|
2016-10-13 19:44:25 +00:00
|
|
|
InstructionPattern::DecodeLoadWordFromPool(data_load_end - Instr::kInstrSize,
|
2016-11-08 21:54:47 +00:00
|
|
|
®, &target_pool_index_);
|
2016-10-13 19:44:25 +00:00
|
|
|
ASSERT(reg == CODE_REG);
|
2015-11-04 17:31:19 +00:00
|
|
|
}
|
|
|
|
|
2021-02-04 17:36:47 +00:00
|
|
|
uword SwitchableCallPattern::target_entry() const {
|
|
|
|
return Code::Handle(Code::RawCast(object_pool_.ObjectAt(target_pool_index_)))
|
|
|
|
.MonomorphicEntryPoint();
|
2015-11-04 17:31:19 +00:00
|
|
|
}
|
2021-02-04 17:36:47 +00:00
|
|
|
|
2016-08-12 18:18:35 +00:00
|
|
|
void SwitchableCallPattern::SetTarget(const Code& target) const {
|
|
|
|
ASSERT(Object::Handle(object_pool_.ObjectAt(target_pool_index_)).IsCode());
|
|
|
|
object_pool_.SetObjectAt(target_pool_index_, target);
|
2015-11-04 17:31:19 +00:00
|
|
|
}
|
|
|
|
|
2021-02-22 10:46:28 +00:00
|
|
|
BareSwitchableCallPattern::BareSwitchableCallPattern(uword pc)
|
|
|
|
: SwitchableCallPatternBase(ObjectPool::Handle(
|
|
|
|
IsolateGroup::Current()->object_store()->global_object_pool())) {
|
2018-12-14 16:11:53 +00:00
|
|
|
// Last instruction: blx lr.
|
2020-07-15 21:00:56 +00:00
|
|
|
ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xe12fff3e);
|
2018-12-14 16:11:53 +00:00
|
|
|
|
|
|
|
Register reg;
|
|
|
|
uword data_load_end = InstructionPattern::DecodeLoadWordFromPool(
|
|
|
|
pc - Instr::kInstrSize, ®, &data_pool_index_);
|
|
|
|
ASSERT(reg == R9);
|
|
|
|
|
|
|
|
InstructionPattern::DecodeLoadWordFromPool(data_load_end, ®,
|
|
|
|
&target_pool_index_);
|
2020-12-10 15:51:27 +00:00
|
|
|
ASSERT(reg == LINK_REGISTER);
|
2018-12-14 16:11:53 +00:00
|
|
|
}
|
|
|
|
|
2021-02-04 17:36:47 +00:00
|
|
|
uword BareSwitchableCallPattern::target_entry() const {
|
|
|
|
return object_pool_.RawValueAt(target_pool_index_);
|
2018-12-14 16:11:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void BareSwitchableCallPattern::SetTarget(const Code& target) const {
|
2019-01-25 16:45:13 +00:00
|
|
|
ASSERT(object_pool_.TypeAt(target_pool_index_) ==
|
|
|
|
ObjectPool::EntryType::kImmediate);
|
2018-12-14 16:11:53 +00:00
|
|
|
object_pool_.SetRawValueAt(target_pool_index_,
|
|
|
|
target.MonomorphicEntryPoint());
|
|
|
|
}
|
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
ReturnPattern::ReturnPattern(uword pc) : pc_(pc) {}
|
2015-02-26 18:48:55 +00:00
|
|
|
|
|
|
|
bool ReturnPattern::IsValid() const {
|
|
|
|
Instr* bx_lr = Instr::At(pc_);
|
|
|
|
const int32_t B4 = 1 << 4;
|
|
|
|
const int32_t B21 = 1 << 21;
|
|
|
|
const int32_t B24 = 1 << 24;
|
2016-11-08 21:54:47 +00:00
|
|
|
int32_t instruction = (static_cast<int32_t>(AL) << kConditionShift) | B24 |
|
|
|
|
B21 | (0xfff << 8) | B4 |
|
2020-12-10 15:51:27 +00:00
|
|
|
(LINK_REGISTER.code << kRmShift);
|
2020-06-26 09:39:15 +00:00
|
|
|
return bx_lr->InstructionBits() == instruction;
|
2015-02-26 18:48:55 +00:00
|
|
|
}
|
|
|
|
|
2018-11-01 12:34:42 +00:00
|
|
|
bool PcRelativeCallPattern::IsValid() const {
|
2019-01-17 12:32:38 +00:00
|
|
|
// bl.<cond> <offset>
|
2018-11-01 12:34:42 +00:00
|
|
|
const uint32_t word = *reinterpret_cast<uint32_t*>(pc_);
|
2020-04-15 14:10:53 +00:00
|
|
|
const uint32_t branch = 0x05;
|
|
|
|
const uword type = ((word >> kTypeShift) & ((1 << kTypeBits) - 1));
|
|
|
|
const uword link = ((word >> kLinkShift) & ((1 << kLinkBits) - 1));
|
|
|
|
return type == branch && link == 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PcRelativeTailCallPattern::IsValid() const {
|
|
|
|
// b.<cond> <offset>
|
|
|
|
const uint32_t word = *reinterpret_cast<uint32_t*>(pc_);
|
|
|
|
const uint32_t branch = 0x05;
|
|
|
|
const uword type = ((word >> kTypeShift) & ((1 << kTypeBits) - 1));
|
|
|
|
const uword link = ((word >> kLinkShift) & ((1 << kLinkBits) - 1));
|
|
|
|
return type == branch && link == 0;
|
2018-11-01 12:34:42 +00:00
|
|
|
}
|
|
|
|
|
2019-01-17 12:32:38 +00:00
|
|
|
void PcRelativeTrampolineJumpPattern::Initialize() {
|
|
|
|
#if !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
uint32_t* add_pc =
|
|
|
|
reinterpret_cast<uint32_t*>(pattern_start_ + 2 * Instr::kInstrSize);
|
|
|
|
*add_pc = kAddPcEncoding;
|
|
|
|
set_distance(0);
|
|
|
|
#else
|
|
|
|
UNREACHABLE();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t PcRelativeTrampolineJumpPattern::distance() {
|
|
|
|
#if !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
const uword end = pattern_start_ + 2 * Instr::kInstrSize;
|
|
|
|
Register reg;
|
|
|
|
intptr_t value;
|
|
|
|
InstructionPattern::DecodeLoadWordImmediate(end, ®, &value);
|
|
|
|
value -= kDistanceOffset;
|
|
|
|
ASSERT(reg == TMP);
|
|
|
|
return value;
|
|
|
|
#else
|
|
|
|
UNREACHABLE();
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void PcRelativeTrampolineJumpPattern::set_distance(int32_t distance) {
|
|
|
|
#if !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
const uword end = pattern_start_ + 2 * Instr::kInstrSize;
|
|
|
|
InstructionPattern::EncodeLoadWordImmediate(end, TMP,
|
|
|
|
distance + kDistanceOffset);
|
|
|
|
#else
|
|
|
|
UNREACHABLE();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PcRelativeTrampolineJumpPattern::IsValid() const {
|
|
|
|
#if !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
const uword end = pattern_start_ + 2 * Instr::kInstrSize;
|
|
|
|
Register reg;
|
|
|
|
intptr_t value;
|
|
|
|
InstructionPattern::DecodeLoadWordImmediate(end, ®, &value);
|
|
|
|
|
|
|
|
uint32_t* add_pc =
|
|
|
|
reinterpret_cast<uint32_t*>(pattern_start_ + 2 * Instr::kInstrSize);
|
|
|
|
|
|
|
|
return reg == TMP && *add_pc == kAddPcEncoding;
|
|
|
|
#else
|
|
|
|
UNREACHABLE();
|
|
|
|
return false;
|
|
|
|
#endif
|
2018-11-01 12:34:42 +00:00
|
|
|
}
|
|
|
|
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
intptr_t TypeTestingStubCallPattern::GetSubtypeTestCachePoolIndex() {
|
|
|
|
// Calls to the type testing stubs look like:
|
2020-04-18 20:27:38 +00:00
|
|
|
// ldr R9, ...
|
[vm/compiler] Unify GenerateAssertAssignable on non-IA32 archs.
The differences between the different implementations are very
minor, so abstract out the few that remain into Assembler:
* Change Load<X>FromPoolOffset to Load<X>FromPoolIndex and do pool
offset calculations internally, since all caller sites have the pool
index. (Especially important because the pool register is tagged on
X64 and ARM and untagged on ARM64, so this abstracts that away.)
* Add default for pool register argument on ARM, since ARM64 already had
one and X64 doesn't take a pool register argument.
Other changes:
* Use specific TestTypeABI registers within the helper method that adds
caller-side checks instead of passing registers as arguments and
document which registers are used for input or output (and when, if
they are used conditionally).
Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-nnbd-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-nnbd-linux-release-simarm-try,vm-kernel-nnbd-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-nnbd-linux-release-simarm64-try
Change-Id: Ifc7a0eaa6aacf7f629aa9647b028500648af653d
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/167803
Commit-Queue: Tess Strickland <sstrickl@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-10-26 19:41:46 +00:00
|
|
|
// ldr Rn, [PP+idx]
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
// blx R9
|
2020-04-18 20:27:38 +00:00
|
|
|
// or
|
[vm/compiler] Unify GenerateAssertAssignable on non-IA32 archs.
The differences between the different implementations are very
minor, so abstract out the few that remain into Assembler:
* Change Load<X>FromPoolOffset to Load<X>FromPoolIndex and do pool
offset calculations internally, since all caller sites have the pool
index. (Especially important because the pool register is tagged on
X64 and ARM and untagged on ARM64, so this abstracts that away.)
* Add default for pool register argument on ARM, since ARM64 already had
one and X64 doesn't take a pool register argument.
Other changes:
* Use specific TestTypeABI registers within the helper method that adds
caller-side checks instead of passing registers as arguments and
document which registers are used for input or output (and when, if
they are used conditionally).
Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-nnbd-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-nnbd-linux-release-simarm-try,vm-kernel-nnbd-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-nnbd-linux-release-simarm64-try
Change-Id: Ifc7a0eaa6aacf7f629aa9647b028500648af653d
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/167803
Commit-Queue: Tess Strickland <sstrickl@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-10-26 19:41:46 +00:00
|
|
|
// ldr Rn, [PP+idx]
|
2020-04-18 20:27:38 +00:00
|
|
|
// blx pc+<offset>
|
[vm/compiler] Unify GenerateAssertAssignable on non-IA32 archs.
The differences between the different implementations are very
minor, so abstract out the few that remain into Assembler:
* Change Load<X>FromPoolOffset to Load<X>FromPoolIndex and do pool
offset calculations internally, since all caller sites have the pool
index. (Especially important because the pool register is tagged on
X64 and ARM and untagged on ARM64, so this abstracts that away.)
* Add default for pool register argument on ARM, since ARM64 already had
one and X64 doesn't take a pool register argument.
Other changes:
* Use specific TestTypeABI registers within the helper method that adds
caller-side checks instead of passing registers as arguments and
document which registers are used for input or output (and when, if
they are used conditionally).
Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-nnbd-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-nnbd-linux-release-simarm-try,vm-kernel-nnbd-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-nnbd-linux-release-simarm64-try
Change-Id: Ifc7a0eaa6aacf7f629aa9647b028500648af653d
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/167803
Commit-Queue: Tess Strickland <sstrickl@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-10-26 19:41:46 +00:00
|
|
|
// where Rn = TypeTestABI::kSubtypeTestCacheReg.
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
|
|
|
|
// Ensure the caller of the type testing stub (whose return address is [pc_])
|
2020-04-18 20:27:38 +00:00
|
|
|
// branched via `blx R9` or a pc-relative call.
|
|
|
|
uword pc = pc_ - Instr::kInstrSize;
|
2020-07-15 21:00:56 +00:00
|
|
|
const uint32_t blx_r9 = 0xe12fff39;
|
2020-04-18 20:27:38 +00:00
|
|
|
if (*reinterpret_cast<uint32_t*>(pc) != blx_r9) {
|
|
|
|
PcRelativeCallPattern pattern(pc);
|
|
|
|
RELEASE_ASSERT(pattern.IsValid());
|
|
|
|
}
|
|
|
|
|
|
|
|
const uword load_instr_end = pc;
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
|
|
|
|
Register reg;
|
|
|
|
intptr_t pool_index = -1;
|
|
|
|
InstructionPattern::DecodeLoadWordFromPool(load_instr_end, ®, &pool_index);
|
[vm/compiler] Unify GenerateAssertAssignable on non-IA32 archs.
The differences between the different implementations are very
minor, so abstract out the few that remain into Assembler:
* Change Load<X>FromPoolOffset to Load<X>FromPoolIndex and do pool
offset calculations internally, since all caller sites have the pool
index. (Especially important because the pool register is tagged on
X64 and ARM and untagged on ARM64, so this abstracts that away.)
* Add default for pool register argument on ARM, since ARM64 already had
one and X64 doesn't take a pool register argument.
Other changes:
* Use specific TestTypeABI registers within the helper method that adds
caller-side checks instead of passing registers as arguments and
document which registers are used for input or output (and when, if
they are used conditionally).
Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-nnbd-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-nnbd-linux-release-simarm-try,vm-kernel-nnbd-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-nnbd-linux-release-simarm64-try
Change-Id: Ifc7a0eaa6aacf7f629aa9647b028500648af653d
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/167803
Commit-Queue: Tess Strickland <sstrickl@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-10-26 19:41:46 +00:00
|
|
|
ASSERT_EQUAL(reg, TypeTestABI::kSubtypeTestCacheReg);
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
return pool_index;
|
|
|
|
}
|
|
|
|
|
2013-01-18 00:34:20 +00:00
|
|
|
} // namespace dart
|
|
|
|
|
|
|
|
#endif // defined TARGET_ARCH_ARM
|