2014-04-02 17:39:32 +00:00
|
|
|
// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
|
|
|
|
// for details. All rights reserved. Use of this source code is governed by a
|
|
|
|
// BSD-style license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
|
|
|
|
#if defined(TARGET_ARCH_ARM64)
|
|
|
|
|
2016-10-27 12:02:20 +00:00
|
|
|
#include "vm/instructions.h"
|
|
|
|
#include "vm/instructions_arm64.h"
|
|
|
|
|
2019-04-08 09:45:27 +00:00
|
|
|
#include "vm/constants.h"
|
2014-04-02 17:39:32 +00:00
|
|
|
#include "vm/cpu.h"
|
|
|
|
#include "vm/object.h"
|
2021-01-14 23:06:56 +00:00
|
|
|
#include "vm/object_store.h"
|
2019-01-25 16:45:13 +00:00
|
|
|
#include "vm/reverse_pc_lookup_cache.h"
|
2014-04-02 17:39:32 +00:00
|
|
|
|
|
|
|
namespace dart {
|
|
|
|
|
|
|
|
CallPattern::CallPattern(uword pc, const Code& code)
|
2015-06-10 09:41:22 +00:00
|
|
|
: object_pool_(ObjectPool::Handle(code.GetObjectPool())),
|
2019-06-12 21:56:53 +00:00
|
|
|
target_code_pool_index_(-1) {
|
2019-05-21 19:44:27 +00:00
|
|
|
ASSERT(code.ContainsInstructionAt(pc));
|
2021-01-26 20:06:09 +00:00
|
|
|
// Last instruction: blr lr.
|
|
|
|
ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xd63f03c0);
|
2019-05-21 19:44:27 +00:00
|
|
|
|
|
|
|
Register reg;
|
2019-06-12 21:56:53 +00:00
|
|
|
InstructionPattern::DecodeLoadWordFromPool(pc - 2 * Instr::kInstrSize, ®,
|
|
|
|
&target_code_pool_index_);
|
2015-09-19 11:21:09 +00:00
|
|
|
ASSERT(reg == CODE_REG);
|
2014-04-02 17:39:32 +00:00
|
|
|
}
|
|
|
|
|
2019-06-12 21:56:53 +00:00
|
|
|
ICCallPattern::ICCallPattern(uword pc, const Code& code)
|
|
|
|
: object_pool_(ObjectPool::Handle(code.GetObjectPool())),
|
|
|
|
target_pool_index_(-1),
|
|
|
|
data_pool_index_(-1) {
|
|
|
|
ASSERT(code.ContainsInstructionAt(pc));
|
|
|
|
// Last instruction: blr lr.
|
|
|
|
ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xd63f03c0);
|
|
|
|
|
|
|
|
Register data_reg, code_reg;
|
|
|
|
intptr_t pool_index;
|
|
|
|
InstructionPattern::DecodeLoadDoubleWordFromPool(
|
|
|
|
pc - 2 * Instr::kInstrSize, &data_reg, &code_reg, &pool_index);
|
|
|
|
ASSERT(data_reg == R5);
|
|
|
|
ASSERT(code_reg == CODE_REG);
|
|
|
|
|
|
|
|
data_pool_index_ = pool_index;
|
|
|
|
target_pool_index_ = pool_index + 1;
|
|
|
|
}
|
|
|
|
|
2015-08-21 09:37:50 +00:00
|
|
|
NativeCallPattern::NativeCallPattern(uword pc, const Code& code)
|
|
|
|
: object_pool_(ObjectPool::Handle(code.GetObjectPool())),
|
|
|
|
end_(pc),
|
|
|
|
native_function_pool_index_(-1),
|
2015-09-19 11:21:09 +00:00
|
|
|
target_code_pool_index_(-1) {
|
2015-08-21 09:37:50 +00:00
|
|
|
ASSERT(code.ContainsInstructionAt(pc));
|
2021-01-08 20:40:52 +00:00
|
|
|
// Last instruction: blr lr.
|
|
|
|
ASSERT(*(reinterpret_cast<uint32_t*>(end_) - 1) == 0xd63f03c0);
|
2015-08-21 09:37:50 +00:00
|
|
|
|
|
|
|
Register reg;
|
2016-11-08 21:54:47 +00:00
|
|
|
uword native_function_load_end = InstructionPattern::DecodeLoadWordFromPool(
|
|
|
|
end_ - 2 * Instr::kInstrSize, ®, &target_code_pool_index_);
|
2015-09-19 11:21:09 +00:00
|
|
|
ASSERT(reg == CODE_REG);
|
2016-11-08 21:54:47 +00:00
|
|
|
InstructionPattern::DecodeLoadWordFromPool(native_function_load_end, ®,
|
2015-08-21 09:37:50 +00:00
|
|
|
&native_function_pool_index_);
|
|
|
|
ASSERT(reg == R5);
|
|
|
|
}
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
CodePtr NativeCallPattern::target() const {
|
|
|
|
return static_cast<CodePtr>(object_pool_.ObjectAt(target_code_pool_index_));
|
2015-08-21 09:37:50 +00:00
|
|
|
}
|
|
|
|
|
2015-09-19 11:21:09 +00:00
|
|
|
void NativeCallPattern::set_target(const Code& target) const {
|
|
|
|
object_pool_.SetObjectAt(target_code_pool_index_, target);
|
2015-08-21 09:37:50 +00:00
|
|
|
// No need to flush the instruction cache, since the code is not modified.
|
|
|
|
}
|
|
|
|
|
|
|
|
NativeFunction NativeCallPattern::native_function() const {
|
|
|
|
return reinterpret_cast<NativeFunction>(
|
|
|
|
object_pool_.RawValueAt(native_function_pool_index_));
|
|
|
|
}
|
|
|
|
|
|
|
|
void NativeCallPattern::set_native_function(NativeFunction func) const {
|
|
|
|
object_pool_.SetRawValueAt(native_function_pool_index_,
|
2016-11-08 21:54:47 +00:00
|
|
|
reinterpret_cast<uword>(func));
|
2015-08-21 09:37:50 +00:00
|
|
|
}
|
|
|
|
|
2014-04-02 17:39:32 +00:00
|
|
|
// Decodes a load sequence ending at 'end' (the last instruction of the load
|
|
|
|
// sequence is the instruction before the one at end). Returns a pointer to
|
|
|
|
// the first instruction in the sequence. Returns the register being loaded
|
|
|
|
// and the loaded object in the output parameters 'reg' and 'obj'
|
|
|
|
// respectively.
|
|
|
|
uword InstructionPattern::DecodeLoadObject(uword end,
|
2015-06-10 09:41:22 +00:00
|
|
|
const ObjectPool& object_pool,
|
2014-04-02 17:39:32 +00:00
|
|
|
Register* reg,
|
|
|
|
Object* obj) {
|
2014-04-17 22:00:11 +00:00
|
|
|
// 1. LoadWordFromPool
|
|
|
|
// or
|
|
|
|
// 2. LoadDecodableImmediate
|
|
|
|
uword start = 0;
|
|
|
|
Instr* instr = Instr::At(end - Instr::kInstrSize);
|
|
|
|
if (instr->IsLoadStoreRegOp()) {
|
|
|
|
// Case 1.
|
|
|
|
intptr_t index = 0;
|
|
|
|
start = DecodeLoadWordFromPool(end, reg, &index);
|
2015-06-10 09:41:22 +00:00
|
|
|
*obj = object_pool.ObjectAt(index);
|
2014-04-17 22:00:11 +00:00
|
|
|
} else {
|
|
|
|
// Case 2.
|
|
|
|
intptr_t value = 0;
|
|
|
|
start = DecodeLoadWordImmediate(end, reg, &value);
|
2020-04-25 05:21:27 +00:00
|
|
|
*obj = static_cast<ObjectPtr>(value);
|
2014-04-17 22:00:11 +00:00
|
|
|
}
|
|
|
|
return start;
|
2014-04-02 17:39:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Decodes a load sequence ending at 'end' (the last instruction of the load
|
|
|
|
// sequence is the instruction before the one at end). Returns a pointer to
|
|
|
|
// the first instruction in the sequence. Returns the register being loaded
|
|
|
|
// and the loaded immediate value in the output parameters 'reg' and 'value'
|
|
|
|
// respectively.
|
|
|
|
uword InstructionPattern::DecodeLoadWordImmediate(uword end,
|
|
|
|
Register* reg,
|
|
|
|
intptr_t* value) {
|
2014-04-17 22:00:11 +00:00
|
|
|
// 1. LoadWordFromPool
|
|
|
|
// or
|
|
|
|
// 2. LoadWordFromPool
|
|
|
|
// orri
|
|
|
|
// or
|
|
|
|
// 3. LoadPatchableImmediate
|
|
|
|
uword start = end - Instr::kInstrSize;
|
|
|
|
Instr* instr = Instr::At(start);
|
|
|
|
bool odd = false;
|
|
|
|
|
|
|
|
// Case 2.
|
|
|
|
if (instr->IsLogicalImmOp()) {
|
|
|
|
ASSERT(instr->Bit(29) == 1);
|
|
|
|
odd = true;
|
|
|
|
// end points at orri so that we can pass it to DecodeLoadWordFromPool.
|
|
|
|
end = start;
|
|
|
|
start -= Instr::kInstrSize;
|
|
|
|
instr = Instr::At(start);
|
|
|
|
// Case 2 falls through to case 1.
|
|
|
|
}
|
|
|
|
|
|
|
|
// Case 1.
|
|
|
|
if (instr->IsLoadStoreRegOp()) {
|
|
|
|
start = DecodeLoadWordFromPool(end, reg, value);
|
|
|
|
if (odd) {
|
|
|
|
*value |= 1;
|
|
|
|
}
|
|
|
|
return start;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Case 3.
|
|
|
|
// movk dst, imm3, 3; movk dst, imm2, 2; movk dst, imm1, 1; movz dst, imm0, 0
|
|
|
|
ASSERT(instr->IsMoveWideOp());
|
|
|
|
ASSERT(instr->Bits(29, 2) == 3);
|
|
|
|
ASSERT(instr->HWField() == 3); // movk dst, imm3, 3
|
|
|
|
*reg = instr->RdField();
|
|
|
|
*value = static_cast<int64_t>(instr->Imm16Field()) << 48;
|
|
|
|
|
|
|
|
start -= Instr::kInstrSize;
|
|
|
|
instr = Instr::At(start);
|
|
|
|
ASSERT(instr->IsMoveWideOp());
|
|
|
|
ASSERT(instr->Bits(29, 2) == 3);
|
|
|
|
ASSERT(instr->HWField() == 2); // movk dst, imm2, 2
|
|
|
|
ASSERT(instr->RdField() == *reg);
|
|
|
|
*value |= static_cast<int64_t>(instr->Imm16Field()) << 32;
|
|
|
|
|
|
|
|
start -= Instr::kInstrSize;
|
|
|
|
instr = Instr::At(start);
|
|
|
|
ASSERT(instr->IsMoveWideOp());
|
|
|
|
ASSERT(instr->Bits(29, 2) == 3);
|
|
|
|
ASSERT(instr->HWField() == 1); // movk dst, imm1, 1
|
|
|
|
ASSERT(instr->RdField() == *reg);
|
|
|
|
*value |= static_cast<int64_t>(instr->Imm16Field()) << 16;
|
|
|
|
|
|
|
|
start -= Instr::kInstrSize;
|
|
|
|
instr = Instr::At(start);
|
|
|
|
ASSERT(instr->IsMoveWideOp());
|
|
|
|
ASSERT(instr->Bits(29, 2) == 2);
|
|
|
|
ASSERT(instr->HWField() == 0); // movz dst, imm0, 0
|
|
|
|
ASSERT(instr->RdField() == *reg);
|
|
|
|
*value |= static_cast<int64_t>(instr->Imm16Field());
|
|
|
|
|
|
|
|
return start;
|
2014-04-02 17:39:32 +00:00
|
|
|
}
|
|
|
|
|
2018-09-11 10:02:04 +00:00
|
|
|
// See comment in instructions_arm64.h
|
2014-04-02 17:39:32 +00:00
|
|
|
uword InstructionPattern::DecodeLoadWordFromPool(uword end,
|
|
|
|
Register* reg,
|
|
|
|
intptr_t* index) {
|
2014-04-17 22:00:11 +00:00
|
|
|
// 1. ldr dst, [pp, offset]
|
|
|
|
// or
|
2014-05-09 23:20:14 +00:00
|
|
|
// 2. add dst, pp, #offset_hi12
|
|
|
|
// ldr dst [dst, #offset_lo12]
|
|
|
|
// or
|
|
|
|
// 3. movz dst, low_offset, 0
|
2014-04-17 22:00:11 +00:00
|
|
|
// movk dst, hi_offset, 1 (optional)
|
|
|
|
// ldr dst, [pp, dst]
|
|
|
|
uword start = end - Instr::kInstrSize;
|
|
|
|
Instr* instr = Instr::At(start);
|
|
|
|
intptr_t offset = 0;
|
|
|
|
|
|
|
|
// Last instruction is always an ldr into a 64-bit X register.
|
|
|
|
ASSERT(instr->IsLoadStoreRegOp() && (instr->Bit(22) == 1) &&
|
2016-11-08 21:54:47 +00:00
|
|
|
(instr->Bits(30, 2) == 3));
|
2014-04-17 22:00:11 +00:00
|
|
|
|
|
|
|
// Grab the destination register from the ldr instruction.
|
|
|
|
*reg = instr->RtField();
|
|
|
|
|
|
|
|
if (instr->Bit(24) == 1) {
|
2014-05-09 23:20:14 +00:00
|
|
|
// base + scaled unsigned 12-bit immediate offset.
|
2014-04-17 22:00:11 +00:00
|
|
|
// Case 1.
|
2014-05-09 23:20:14 +00:00
|
|
|
offset |= (instr->Imm12Field() << 3);
|
|
|
|
if (instr->RnField() == *reg) {
|
|
|
|
start -= Instr::kInstrSize;
|
|
|
|
instr = Instr::At(start);
|
|
|
|
ASSERT(instr->IsAddSubImmOp());
|
|
|
|
ASSERT(instr->RnField() == PP);
|
|
|
|
ASSERT(instr->RdField() == *reg);
|
|
|
|
offset |= (instr->Imm12Field() << 12);
|
|
|
|
}
|
2014-04-17 22:00:11 +00:00
|
|
|
} else {
|
|
|
|
ASSERT(instr->Bits(10, 2) == 2);
|
|
|
|
// We have to look at the preceding one or two instructions to find the
|
|
|
|
// offset.
|
|
|
|
|
|
|
|
start -= Instr::kInstrSize;
|
|
|
|
instr = Instr::At(start);
|
|
|
|
ASSERT(instr->IsMoveWideOp());
|
|
|
|
ASSERT(instr->RdField() == *reg);
|
|
|
|
if (instr->Bits(29, 2) == 2) { // movz dst, low_offset, 0
|
|
|
|
ASSERT(instr->HWField() == 0);
|
|
|
|
offset = instr->Imm16Field();
|
|
|
|
// no high offset.
|
|
|
|
} else {
|
|
|
|
ASSERT(instr->Bits(29, 2) == 3); // movk dst, high_offset, 1
|
|
|
|
ASSERT(instr->HWField() == 1);
|
|
|
|
offset = instr->Imm16Field() << 16;
|
|
|
|
|
|
|
|
start -= Instr::kInstrSize;
|
|
|
|
instr = Instr::At(start);
|
|
|
|
ASSERT(instr->IsMoveWideOp());
|
|
|
|
ASSERT(instr->RdField() == *reg);
|
|
|
|
ASSERT(instr->Bits(29, 2) == 2); // movz dst, low_offset, 0
|
|
|
|
ASSERT(instr->HWField() == 0);
|
|
|
|
offset |= instr->Imm16Field();
|
|
|
|
}
|
|
|
|
}
|
2015-08-21 07:31:36 +00:00
|
|
|
// PP is untagged on ARM64.
|
2014-04-17 22:00:11 +00:00
|
|
|
ASSERT(Utils::IsAligned(offset, 8));
|
2015-08-21 07:31:36 +00:00
|
|
|
*index = ObjectPool::IndexFromOffset(offset - kHeapObjectTag);
|
2014-04-17 22:00:11 +00:00
|
|
|
return start;
|
2014-04-02 17:39:32 +00:00
|
|
|
}
|
|
|
|
|
2018-09-11 10:02:04 +00:00
|
|
|
// See comment in instructions_arm64.h
|
|
|
|
uword InstructionPattern::DecodeLoadDoubleWordFromPool(uword end,
|
|
|
|
Register* reg1,
|
|
|
|
Register* reg2,
|
|
|
|
intptr_t* index) {
|
|
|
|
// Cases:
|
|
|
|
//
|
|
|
|
// 1. ldp reg1, reg2, [pp, offset]
|
|
|
|
//
|
|
|
|
// 2. add tmp, pp, #upper12
|
|
|
|
// ldp reg1, reg2, [tmp, #lower12]
|
|
|
|
//
|
|
|
|
// 3. add tmp, pp, #upper12
|
|
|
|
// add tmp, tmp, #lower12
|
|
|
|
// ldp reg1, reg2, [tmp, 0]
|
|
|
|
//
|
|
|
|
// Note that the pp register is untagged!
|
|
|
|
//
|
|
|
|
uword start = end - Instr::kInstrSize;
|
|
|
|
Instr* ldr_instr = Instr::At(start);
|
|
|
|
|
|
|
|
// Last instruction is always an ldp into two 64-bit X registers.
|
2018-12-14 16:11:53 +00:00
|
|
|
ASSERT(ldr_instr->IsLoadStoreRegPairOp() && (ldr_instr->Bit(22) == 1));
|
2018-09-11 10:02:04 +00:00
|
|
|
|
|
|
|
// Grab the destination register from the ldp instruction.
|
|
|
|
*reg1 = ldr_instr->RtField();
|
|
|
|
*reg2 = ldr_instr->Rt2Field();
|
|
|
|
|
|
|
|
Register base_reg = ldr_instr->RnField();
|
|
|
|
const int base_offset = 8 * ldr_instr->Imm7Field();
|
|
|
|
|
|
|
|
intptr_t pool_offset = 0;
|
|
|
|
if (base_reg == PP) {
|
|
|
|
// Case 1.
|
|
|
|
pool_offset = base_offset;
|
|
|
|
} else {
|
|
|
|
// Case 2 & 3.
|
2018-12-14 16:11:53 +00:00
|
|
|
ASSERT(base_reg == TMP);
|
2018-09-11 10:02:04 +00:00
|
|
|
|
|
|
|
pool_offset = base_offset;
|
|
|
|
|
|
|
|
start -= Instr::kInstrSize;
|
|
|
|
Instr* add_instr = Instr::At(start);
|
2018-12-14 16:11:53 +00:00
|
|
|
ASSERT(add_instr->IsAddSubImmOp());
|
|
|
|
ASSERT(add_instr->RdField() == TMP);
|
2018-09-11 10:02:04 +00:00
|
|
|
|
|
|
|
const auto shift = add_instr->Imm12ShiftField();
|
2018-12-14 16:11:53 +00:00
|
|
|
ASSERT(shift == 0 || shift == 1);
|
2018-09-11 10:02:04 +00:00
|
|
|
pool_offset += (add_instr->Imm12Field() << (shift == 1 ? 12 : 0));
|
|
|
|
|
|
|
|
if (add_instr->RnField() == TMP) {
|
|
|
|
start -= Instr::kInstrSize;
|
|
|
|
Instr* prev_add_instr = Instr::At(start);
|
2018-12-14 16:11:53 +00:00
|
|
|
ASSERT(prev_add_instr->IsAddSubImmOp());
|
|
|
|
ASSERT(prev_add_instr->RnField() == PP);
|
2018-09-11 10:02:04 +00:00
|
|
|
|
|
|
|
const auto shift = prev_add_instr->Imm12ShiftField();
|
2018-12-14 16:11:53 +00:00
|
|
|
ASSERT(shift == 0 || shift == 1);
|
2018-09-11 10:02:04 +00:00
|
|
|
pool_offset += (prev_add_instr->Imm12Field() << (shift == 1 ? 12 : 0));
|
|
|
|
} else {
|
2018-12-14 16:11:53 +00:00
|
|
|
ASSERT(add_instr->RnField() == PP);
|
2018-09-11 10:02:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
*index = ObjectPool::IndexFromOffset(pool_offset - kHeapObjectTag);
|
|
|
|
return start;
|
|
|
|
}
|
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
bool DecodeLoadObjectFromPoolOrThread(uword pc, const Code& code, Object* obj) {
|
2015-11-12 23:18:31 +00:00
|
|
|
ASSERT(code.ContainsInstructionAt(pc));
|
|
|
|
|
|
|
|
Instr* instr = Instr::At(pc);
|
|
|
|
if (instr->IsLoadStoreRegOp() && (instr->Bit(22) == 1) &&
|
|
|
|
(instr->Bits(30, 2) == 3) && instr->Bit(24) == 1) {
|
|
|
|
intptr_t offset = (instr->Imm12Field() << 3);
|
|
|
|
if (instr->RnField() == PP) {
|
|
|
|
// PP is untagged on ARM64.
|
|
|
|
ASSERT(Utils::IsAligned(offset, 8));
|
|
|
|
intptr_t index = ObjectPool::IndexFromOffset(offset - kHeapObjectTag);
|
2021-02-25 21:09:57 +00:00
|
|
|
const ObjectPool& pool = ObjectPool::Handle(code.GetObjectPool());
|
2021-01-14 23:06:56 +00:00
|
|
|
if (!pool.IsNull() && (index < pool.Length()) &&
|
|
|
|
(pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject)) {
|
|
|
|
*obj = pool.ObjectAt(index);
|
|
|
|
return true;
|
2015-11-12 23:18:31 +00:00
|
|
|
}
|
|
|
|
} else if (instr->RnField() == THR) {
|
|
|
|
return Thread::ObjectAtOffset(offset, obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// TODO(rmacnak): Loads with offsets beyond 12 bits.
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-05-09 23:20:14 +00:00
|
|
|
// Encodes a load sequence ending at 'end'. Encodes a fixed length two
|
|
|
|
// instruction load from the pool pointer in PP using the destination
|
|
|
|
// register reg as a temporary for the base address.
|
|
|
|
// Assumes that the location has already been validated for patching.
|
|
|
|
void InstructionPattern::EncodeLoadWordFromPoolFixed(uword end,
|
|
|
|
int32_t offset) {
|
|
|
|
uword start = end - Instr::kInstrSize;
|
|
|
|
Instr* instr = Instr::At(start);
|
|
|
|
const int32_t upper12 = offset & 0x00fff000;
|
|
|
|
const int32_t lower12 = offset & 0x00000fff;
|
2016-11-08 21:54:47 +00:00
|
|
|
ASSERT((offset & 0xff000000) == 0); // Can't encode > 24 bits.
|
2014-05-09 23:20:14 +00:00
|
|
|
ASSERT(((lower12 >> 3) << 3) == lower12); // 8-byte aligned.
|
|
|
|
instr->SetImm12Bits(instr->InstructionBits(), lower12 >> 3);
|
|
|
|
|
|
|
|
start -= Instr::kInstrSize;
|
|
|
|
instr = Instr::At(start);
|
|
|
|
instr->SetImm12Bits(instr->InstructionBits(), upper12 >> 12);
|
|
|
|
instr->SetInstructionBits(instr->InstructionBits() | B22);
|
|
|
|
}
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
CodePtr CallPattern::TargetCode() const {
|
|
|
|
return static_cast<CodePtr>(object_pool_.ObjectAt(target_code_pool_index_));
|
2014-04-02 17:39:32 +00:00
|
|
|
}
|
|
|
|
|
2015-09-19 11:21:09 +00:00
|
|
|
void CallPattern::SetTargetCode(const Code& target) const {
|
|
|
|
object_pool_.SetObjectAt(target_code_pool_index_, target);
|
2014-04-17 22:00:11 +00:00
|
|
|
// No need to flush the instruction cache, since the code is not modified.
|
2014-04-02 17:39:32 +00:00
|
|
|
}
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
ObjectPtr ICCallPattern::Data() const {
|
2019-06-12 21:56:53 +00:00
|
|
|
return object_pool_.ObjectAt(data_pool_index_);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ICCallPattern::SetData(const Object& data) const {
|
|
|
|
ASSERT(data.IsArray() || data.IsICData() || data.IsMegamorphicCache());
|
|
|
|
object_pool_.SetObjectAt(data_pool_index_, data);
|
|
|
|
}
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
CodePtr ICCallPattern::TargetCode() const {
|
|
|
|
return static_cast<CodePtr>(object_pool_.ObjectAt(target_pool_index_));
|
2019-06-12 21:56:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void ICCallPattern::SetTargetCode(const Code& target) const {
|
|
|
|
object_pool_.SetObjectAt(target_pool_index_, target);
|
|
|
|
// No need to flush the instruction cache, since the code is not modified.
|
|
|
|
}
|
|
|
|
|
2021-02-22 10:46:28 +00:00
|
|
|
SwitchableCallPatternBase::SwitchableCallPatternBase(
|
|
|
|
const ObjectPool& object_pool)
|
|
|
|
: object_pool_(object_pool), data_pool_index_(-1), target_pool_index_(-1) {}
|
2018-12-14 16:11:53 +00:00
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
ObjectPtr SwitchableCallPatternBase::data() const {
|
2018-12-14 16:11:53 +00:00
|
|
|
return object_pool_.ObjectAt(data_pool_index_);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SwitchableCallPatternBase::SetData(const Object& data) const {
|
|
|
|
ASSERT(!Object::Handle(object_pool_.ObjectAt(data_pool_index_)).IsCode());
|
|
|
|
object_pool_.SetObjectAt(data_pool_index_, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
SwitchableCallPattern::SwitchableCallPattern(uword pc, const Code& code)
|
2021-02-22 10:46:28 +00:00
|
|
|
: SwitchableCallPatternBase(ObjectPool::Handle(code.GetObjectPool())) {
|
2015-11-04 17:31:19 +00:00
|
|
|
ASSERT(code.ContainsInstructionAt(pc));
|
2019-04-23 19:27:35 +00:00
|
|
|
// Last instruction: blr lr.
|
|
|
|
ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xd63f03c0);
|
2015-11-04 17:31:19 +00:00
|
|
|
|
2018-09-11 10:02:04 +00:00
|
|
|
Register ic_data_reg, code_reg;
|
|
|
|
intptr_t pool_index;
|
|
|
|
InstructionPattern::DecodeLoadDoubleWordFromPool(
|
|
|
|
pc - 2 * Instr::kInstrSize, &ic_data_reg, &code_reg, &pool_index);
|
2018-12-14 16:11:53 +00:00
|
|
|
ASSERT(ic_data_reg == R5);
|
|
|
|
ASSERT(code_reg == CODE_REG);
|
2018-09-11 10:02:04 +00:00
|
|
|
|
|
|
|
data_pool_index_ = pool_index;
|
|
|
|
target_pool_index_ = pool_index + 1;
|
2015-11-04 17:31:19 +00:00
|
|
|
}
|
|
|
|
|
2021-02-04 17:36:47 +00:00
|
|
|
uword SwitchableCallPattern::target_entry() const {
|
|
|
|
return Code::Handle(Code::RawCast(object_pool_.ObjectAt(target_pool_index_)))
|
|
|
|
.MonomorphicEntryPoint();
|
2015-11-04 17:31:19 +00:00
|
|
|
}
|
|
|
|
|
2016-08-12 18:18:35 +00:00
|
|
|
void SwitchableCallPattern::SetTarget(const Code& target) const {
|
|
|
|
ASSERT(Object::Handle(object_pool_.ObjectAt(target_pool_index_)).IsCode());
|
|
|
|
object_pool_.SetObjectAt(target_pool_index_, target);
|
2015-11-04 17:31:19 +00:00
|
|
|
}
|
|
|
|
|
2021-02-22 10:46:28 +00:00
|
|
|
BareSwitchableCallPattern::BareSwitchableCallPattern(uword pc)
|
|
|
|
: SwitchableCallPatternBase(ObjectPool::Handle(
|
|
|
|
IsolateGroup::Current()->object_store()->global_object_pool())) {
|
2019-04-23 19:27:35 +00:00
|
|
|
// Last instruction: blr lr.
|
|
|
|
ASSERT(*(reinterpret_cast<uint32_t*>(pc) - 1) == 0xd63f03c0);
|
2018-12-14 16:11:53 +00:00
|
|
|
|
|
|
|
Register ic_data_reg, code_reg;
|
|
|
|
intptr_t pool_index;
|
|
|
|
InstructionPattern::DecodeLoadDoubleWordFromPool(
|
|
|
|
pc - Instr::kInstrSize, &ic_data_reg, &code_reg, &pool_index);
|
|
|
|
ASSERT(ic_data_reg == R5);
|
2020-12-10 15:51:27 +00:00
|
|
|
ASSERT(code_reg == LINK_REGISTER);
|
2018-12-14 16:11:53 +00:00
|
|
|
|
|
|
|
data_pool_index_ = pool_index;
|
|
|
|
target_pool_index_ = pool_index + 1;
|
|
|
|
}
|
|
|
|
|
2021-02-04 17:36:47 +00:00
|
|
|
uword BareSwitchableCallPattern::target_entry() const {
|
|
|
|
return object_pool_.RawValueAt(target_pool_index_);
|
2018-12-14 16:11:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void BareSwitchableCallPattern::SetTarget(const Code& target) const {
|
2019-01-25 16:45:13 +00:00
|
|
|
ASSERT(object_pool_.TypeAt(target_pool_index_) ==
|
|
|
|
ObjectPool::EntryType::kImmediate);
|
2018-12-14 16:11:53 +00:00
|
|
|
object_pool_.SetRawValueAt(target_pool_index_,
|
|
|
|
target.MonomorphicEntryPoint());
|
|
|
|
}
|
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
ReturnPattern::ReturnPattern(uword pc) : pc_(pc) {}
|
2015-02-26 18:48:55 +00:00
|
|
|
|
|
|
|
bool ReturnPattern::IsValid() const {
|
|
|
|
Instr* bx_lr = Instr::At(pc_);
|
2020-12-10 15:51:27 +00:00
|
|
|
const Register crn = ConcreteRegister(LINK_REGISTER);
|
2015-02-26 18:48:55 +00:00
|
|
|
const int32_t instruction = RET | (static_cast<int32_t>(crn) << kRnShift);
|
|
|
|
return bx_lr->InstructionBits() == instruction;
|
|
|
|
}
|
|
|
|
|
2018-11-01 12:34:42 +00:00
|
|
|
bool PcRelativeCallPattern::IsValid() const {
|
|
|
|
// bl <offset>
|
|
|
|
const uint32_t word = *reinterpret_cast<uint32_t*>(pc_);
|
|
|
|
const uint32_t branch_link = 0x25;
|
|
|
|
return (word >> 26) == branch_link;
|
|
|
|
}
|
|
|
|
|
2020-04-15 14:10:53 +00:00
|
|
|
bool PcRelativeTailCallPattern::IsValid() const {
|
|
|
|
// b <offset>
|
|
|
|
const uint32_t word = *reinterpret_cast<uint32_t*>(pc_);
|
|
|
|
const uint32_t branch_link = 0x5;
|
|
|
|
return (word >> 26) == branch_link;
|
|
|
|
}
|
|
|
|
|
2019-01-17 12:32:38 +00:00
|
|
|
void PcRelativeTrampolineJumpPattern::Initialize() {
|
|
|
|
#if !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
uint32_t* pattern = reinterpret_cast<uint32_t*>(pattern_start_);
|
|
|
|
pattern[0] = kAdrEncoding;
|
|
|
|
pattern[1] = kMovzEncoding;
|
|
|
|
pattern[2] = kAddTmpTmp2;
|
|
|
|
pattern[3] = kJumpEncoding;
|
|
|
|
set_distance(0);
|
|
|
|
#else
|
|
|
|
UNREACHABLE();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t PcRelativeTrampolineJumpPattern::distance() {
|
|
|
|
#if !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
uint32_t* pattern = reinterpret_cast<uint32_t*>(pattern_start_);
|
|
|
|
const uint32_t adr = pattern[0];
|
|
|
|
const uint32_t movz = pattern[1];
|
|
|
|
const uint32_t lower16 =
|
|
|
|
(((adr >> 5) & ((1 << 19) - 1)) << 2) | ((adr >> 29) & 0x3);
|
|
|
|
const uint32_t higher16 = (movz >> kImm16Shift) & 0xffff;
|
|
|
|
return (higher16 << 16) | lower16;
|
|
|
|
#else
|
|
|
|
UNREACHABLE();
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void PcRelativeTrampolineJumpPattern::set_distance(int32_t distance) {
|
|
|
|
#if !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
uint32_t* pattern = reinterpret_cast<uint32_t*>(pattern_start_);
|
|
|
|
uint32_t low16 = distance & 0xffff;
|
|
|
|
uint32_t high16 = (distance >> 16) & 0xffff;
|
|
|
|
pattern[0] = kAdrEncoding | ((low16 & 0x3) << 29) | ((low16 >> 2) << 5);
|
|
|
|
pattern[1] = kMovzEncoding | (high16 << kImm16Shift);
|
|
|
|
ASSERT(IsValid());
|
|
|
|
#else
|
|
|
|
UNREACHABLE();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PcRelativeTrampolineJumpPattern::IsValid() const {
|
|
|
|
#if !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
const uint32_t adr_mask = (3 << 29) | (((1 << 19) - 1) << 5);
|
|
|
|
const uint32_t movz_mask = 0xffff << 5;
|
|
|
|
uint32_t* pattern = reinterpret_cast<uint32_t*>(pattern_start_);
|
2019-04-24 23:12:45 +00:00
|
|
|
return ((pattern[0] & ~adr_mask) == kAdrEncoding) &&
|
|
|
|
((pattern[1] & ~movz_mask) == kMovzEncoding) &&
|
|
|
|
(pattern[2] == kAddTmpTmp2) && (pattern[3] == kJumpEncoding);
|
2019-01-17 12:32:38 +00:00
|
|
|
#else
|
|
|
|
UNREACHABLE();
|
|
|
|
return false;
|
|
|
|
#endif
|
2018-11-01 12:34:42 +00:00
|
|
|
}
|
|
|
|
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
intptr_t TypeTestingStubCallPattern::GetSubtypeTestCachePoolIndex() {
|
|
|
|
// Calls to the type testing stubs look like:
|
2020-04-18 20:27:38 +00:00
|
|
|
// ldr R9, ...
|
[vm/compiler] Unify GenerateAssertAssignable on non-IA32 archs.
The differences between the different implementations are very
minor, so abstract out the few that remain into Assembler:
* Change Load<X>FromPoolOffset to Load<X>FromPoolIndex and do pool
offset calculations internally, since all caller sites have the pool
index. (Especially important because the pool register is tagged on
X64 and ARM and untagged on ARM64, so this abstracts that away.)
* Add default for pool register argument on ARM, since ARM64 already had
one and X64 doesn't take a pool register argument.
Other changes:
* Use specific TestTypeABI registers within the helper method that adds
caller-side checks instead of passing registers as arguments and
document which registers are used for input or output (and when, if
they are used conditionally).
Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-nnbd-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-nnbd-linux-release-simarm-try,vm-kernel-nnbd-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-nnbd-linux-release-simarm64-try
Change-Id: Ifc7a0eaa6aacf7f629aa9647b028500648af653d
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/167803
Commit-Queue: Tess Strickland <sstrickl@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-10-26 19:41:46 +00:00
|
|
|
// ldr Rn, [PP+idx]
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
// blr R9
|
2020-04-18 20:27:38 +00:00
|
|
|
// or
|
[vm/compiler] Unify GenerateAssertAssignable on non-IA32 archs.
The differences between the different implementations are very
minor, so abstract out the few that remain into Assembler:
* Change Load<X>FromPoolOffset to Load<X>FromPoolIndex and do pool
offset calculations internally, since all caller sites have the pool
index. (Especially important because the pool register is tagged on
X64 and ARM and untagged on ARM64, so this abstracts that away.)
* Add default for pool register argument on ARM, since ARM64 already had
one and X64 doesn't take a pool register argument.
Other changes:
* Use specific TestTypeABI registers within the helper method that adds
caller-side checks instead of passing registers as arguments and
document which registers are used for input or output (and when, if
they are used conditionally).
Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-nnbd-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-nnbd-linux-release-simarm-try,vm-kernel-nnbd-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-nnbd-linux-release-simarm64-try
Change-Id: Ifc7a0eaa6aacf7f629aa9647b028500648af653d
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/167803
Commit-Queue: Tess Strickland <sstrickl@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-10-26 19:41:46 +00:00
|
|
|
// ldr Rn, [PP+idx]
|
2020-04-18 20:27:38 +00:00
|
|
|
// blr pc+<offset>
|
[vm/compiler] Unify GenerateAssertAssignable on non-IA32 archs.
The differences between the different implementations are very
minor, so abstract out the few that remain into Assembler:
* Change Load<X>FromPoolOffset to Load<X>FromPoolIndex and do pool
offset calculations internally, since all caller sites have the pool
index. (Especially important because the pool register is tagged on
X64 and ARM and untagged on ARM64, so this abstracts that away.)
* Add default for pool register argument on ARM, since ARM64 already had
one and X64 doesn't take a pool register argument.
Other changes:
* Use specific TestTypeABI registers within the helper method that adds
caller-side checks instead of passing registers as arguments and
document which registers are used for input or output (and when, if
they are used conditionally).
Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-nnbd-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-nnbd-linux-release-simarm-try,vm-kernel-nnbd-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-nnbd-linux-release-simarm64-try
Change-Id: Ifc7a0eaa6aacf7f629aa9647b028500648af653d
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/167803
Commit-Queue: Tess Strickland <sstrickl@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-10-26 19:41:46 +00:00
|
|
|
// where Rn = TypeTestABI::kSubtypeTestCacheReg.
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
|
|
|
|
// Ensure the caller of the type testing stub (whose return address is [pc_])
|
2020-04-18 20:27:38 +00:00
|
|
|
// branched via `blr R9` or a pc-relative call.
|
|
|
|
uword pc = pc_ - Instr::kInstrSize;
|
|
|
|
const uword blr_r9 = 0xd63f0120;
|
|
|
|
if (*reinterpret_cast<uint32_t*>(pc) != blr_r9) {
|
|
|
|
PcRelativeCallPattern pattern(pc);
|
|
|
|
RELEASE_ASSERT(pattern.IsValid());
|
|
|
|
}
|
|
|
|
|
|
|
|
const uword load_instr_end = pc;
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
|
|
|
|
Register reg;
|
|
|
|
intptr_t pool_index = -1;
|
|
|
|
InstructionPattern::DecodeLoadWordFromPool(load_instr_end, ®, &pool_index);
|
[vm/compiler] Unify GenerateAssertAssignable on non-IA32 archs.
The differences between the different implementations are very
minor, so abstract out the few that remain into Assembler:
* Change Load<X>FromPoolOffset to Load<X>FromPoolIndex and do pool
offset calculations internally, since all caller sites have the pool
index. (Especially important because the pool register is tagged on
X64 and ARM and untagged on ARM64, so this abstracts that away.)
* Add default for pool register argument on ARM, since ARM64 already had
one and X64 doesn't take a pool register argument.
Other changes:
* Use specific TestTypeABI registers within the helper method that adds
caller-side checks instead of passing registers as arguments and
document which registers are used for input or output (and when, if
they are used conditionally).
Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-nnbd-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-nnbd-linux-release-simarm-try,vm-kernel-nnbd-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-nnbd-linux-release-simarm64-try
Change-Id: Ifc7a0eaa6aacf7f629aa9647b028500648af653d
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/167803
Commit-Queue: Tess Strickland <sstrickl@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-10-26 19:41:46 +00:00
|
|
|
ASSERT_EQUAL(reg, TypeTestABI::kSubtypeTestCacheReg);
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
return pool_index;
|
|
|
|
}
|
|
|
|
|
2014-04-02 17:39:32 +00:00
|
|
|
} // namespace dart
|
|
|
|
|
|
|
|
#endif // defined TARGET_ARCH_ARM64
|