mirror of
https://github.com/dart-lang/sdk
synced 2024-10-14 09:58:32 +00:00
[vm/compiler] Initial implementation of IL binary serialization
This change adds binary serialization/deserialization of flow graphs. It supports all IL instructions and certain objects which can be referenced from IL instructions. IL binary serialization is a useful machanism which would allow us to split compilation into multiple parts in order to parallelize AOT compilation. The program structure (libraries/classes/functions/fields) is not serialized. It is assumed that reader and writer use the same program structure. Caveats: * FFI callbacks are not supported yet. * Closure functions are not re-created when reading flow graph. * Flow graph should be in SSA form (unoptimized flow graphs are not supported). * JIT mode is not supported (serializer currently assumes lazy linking of native methods and empty ICData). In order to test IL serialization, --test_il_serialization VM option is added to serialize and deserialize flow graph before generating code. TEST=vm/dart/splay_test now runs with --test_il_serialization. TEST=Manual run of vm-kernel-precomp-linux-debug-x64-try with --test_il_serialization enabled (only ffi tests failed). Issue: https://github.com/dart-lang/sdk/issues/43299 Change-Id: I7bbfd9e3a301e00c9cfbffa06b8f1f6c78a78470 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/254941 Reviewed-by: Ryan Macnak <rmacnak@google.com> Commit-Queue: Alexander Markov <alexmarkov@google.com> Reviewed-by: Slava Egorov <vegorov@google.com>
This commit is contained in:
parent
f4ea96ff2c
commit
9700458975
|
@ -31,6 +31,7 @@
|
|||
// VMOptions=--no_load_cse
|
||||
// VMOptions=--no_dead_store_elimination
|
||||
// VMOptions=--no_load_cse --no_dead_store_elimination
|
||||
// VMOptions=--test_il_serialization
|
||||
|
||||
import "dart:math";
|
||||
import 'package:benchmark_harness/benchmark_harness.dart';
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
// VMOptions=--no_load_cse
|
||||
// VMOptions=--no_dead_store_elimination
|
||||
// VMOptions=--no_load_cse --no_dead_store_elimination
|
||||
// VMOptions=--test_il_serialization
|
||||
|
||||
import "dart:math";
|
||||
import 'package:benchmark_harness/benchmark_harness.dart';
|
||||
|
|
|
@ -149,4 +149,26 @@ void BitmapBuilder::SetBit(intptr_t bit_offset, bool value) {
|
|||
}
|
||||
}
|
||||
|
||||
void BitmapBuilder::Write(BaseWriteStream* stream) const {
|
||||
const intptr_t payload_size =
|
||||
Utils::Minimum(Utils::RoundUp(Length(), kBitsPerByte) / kBitsPerByte,
|
||||
data_size_in_bytes_);
|
||||
stream->Write<intptr_t>(Length());
|
||||
stream->Write<intptr_t>(payload_size);
|
||||
stream->WriteBytes(BackingStore(), payload_size);
|
||||
}
|
||||
|
||||
void BitmapBuilder::Read(ReadStream* stream) {
|
||||
length_ = stream->Read<intptr_t>();
|
||||
const intptr_t payload_size = stream->Read<intptr_t>();
|
||||
if (payload_size > data_size_in_bytes_) {
|
||||
data_size_in_bytes_ = payload_size;
|
||||
data_.ptr_ = AllocBackingStore(data_size_in_bytes_);
|
||||
} else {
|
||||
memset(BackingStore() + payload_size, 0,
|
||||
data_size_in_bytes_ - payload_size);
|
||||
}
|
||||
stream->ReadBytes(BackingStore(), payload_size);
|
||||
}
|
||||
|
||||
} // namespace dart
|
||||
|
|
|
@ -54,6 +54,9 @@ class BitmapBuilder : public ZoneAllocated {
|
|||
void Print() const;
|
||||
void AppendAsBytesTo(BaseWriteStream* stream) const;
|
||||
|
||||
void Write(BaseWriteStream* stream) const;
|
||||
void Read(ReadStream* stream);
|
||||
|
||||
private:
|
||||
static constexpr intptr_t kIncrementSizeInBytes = 16;
|
||||
static constexpr intptr_t kInlineCapacityInBytes = 16;
|
||||
|
|
|
@ -107,7 +107,7 @@ class ExceptionHandlerList : public ZoneAllocated {
|
|||
ASSERT(list_[try_index].pc_offset == ExceptionHandlers::kInvalidPcOffset);
|
||||
list_[try_index].pc_offset = pc_offset;
|
||||
list_[try_index].is_generated = is_generated;
|
||||
ASSERT(handler_types.IsZoneHandle());
|
||||
ASSERT(handler_types.IsNotTemporaryScopedHandle());
|
||||
list_[try_index].handler_types = &handler_types;
|
||||
list_[try_index].needs_stacktrace |= needs_stacktrace;
|
||||
}
|
||||
|
|
|
@ -375,6 +375,10 @@ int32_t SelectorMap::SelectorId(const Function& interface_target) const {
|
|||
const TableSelector* SelectorMap::GetSelector(
|
||||
const Function& interface_target) const {
|
||||
const int32_t sid = SelectorId(interface_target);
|
||||
return GetSelector(sid);
|
||||
}
|
||||
|
||||
const TableSelector* SelectorMap::GetSelector(int32_t sid) const {
|
||||
if (sid == kInvalidSelectorId) return nullptr;
|
||||
const TableSelector* selector = &selectors_[sid];
|
||||
if (!selector->IsUsed()) return nullptr;
|
||||
|
|
|
@ -61,6 +61,8 @@ class SelectorMap {
|
|||
// not have a selector assigned.
|
||||
const TableSelector* GetSelector(const Function& interface_target) const;
|
||||
|
||||
const TableSelector* GetSelector(int32_t sid) const;
|
||||
|
||||
private:
|
||||
static const int32_t kInvalidSelectorId =
|
||||
kernel::ProcedureAttributesMetadata::kInvalidSelectorId;
|
||||
|
|
|
@ -18,6 +18,8 @@ namespace dart {
|
|||
class AbstractType;
|
||||
class BaseTextBuffer;
|
||||
class Definition;
|
||||
class FlowGraphDeserializer;
|
||||
class FlowGraphSerializer;
|
||||
|
||||
template <typename T>
|
||||
class GrowableArray;
|
||||
|
@ -281,6 +283,9 @@ class CompileType : public ZoneAllocated {
|
|||
void set_owner(Definition* owner) { owner_ = owner; }
|
||||
Definition* owner() const { return owner_; }
|
||||
|
||||
void Write(FlowGraphSerializer* s) const;
|
||||
explicit CompileType(FlowGraphDeserializer* d);
|
||||
|
||||
private:
|
||||
bool can_be_null_;
|
||||
bool can_be_sentinel_;
|
||||
|
|
|
@ -2896,4 +2896,120 @@ void FlowGraph::Print(const char* phase) {
|
|||
FlowGraphPrinter::PrintGraph(phase, this);
|
||||
}
|
||||
|
||||
class SSACompactor : public ValueObject {
|
||||
public:
|
||||
SSACompactor(intptr_t num_blocks,
|
||||
intptr_t num_ssa_vars,
|
||||
ZoneGrowableArray<Definition*>* detached_defs)
|
||||
: block_num_(num_blocks),
|
||||
ssa_num_(num_ssa_vars),
|
||||
detached_defs_(detached_defs) {
|
||||
block_num_.EnsureLength(num_blocks, -1);
|
||||
ssa_num_.EnsureLength(num_ssa_vars, -1);
|
||||
}
|
||||
|
||||
void RenumberGraph(FlowGraph* graph) {
|
||||
for (auto block : graph->reverse_postorder()) {
|
||||
block_num_[block->block_id()] = 1;
|
||||
CollectDetachedMaterializations(block->env());
|
||||
|
||||
if (auto* block_with_idefs = block->AsBlockEntryWithInitialDefs()) {
|
||||
for (Definition* def : *block_with_idefs->initial_definitions()) {
|
||||
RenumberDefinition(def);
|
||||
CollectDetachedMaterializations(def->env());
|
||||
}
|
||||
}
|
||||
if (auto* join = block->AsJoinEntry()) {
|
||||
for (PhiIterator it(join); !it.Done(); it.Advance()) {
|
||||
RenumberDefinition(it.Current());
|
||||
}
|
||||
}
|
||||
for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
|
||||
Instruction* instr = it.Current();
|
||||
if (Definition* def = instr->AsDefinition()) {
|
||||
RenumberDefinition(def);
|
||||
}
|
||||
CollectDetachedMaterializations(instr->env());
|
||||
}
|
||||
}
|
||||
for (auto* def : (*detached_defs_)) {
|
||||
RenumberDefinition(def);
|
||||
}
|
||||
graph->set_current_ssa_temp_index(current_ssa_index_);
|
||||
|
||||
// Preserve order between block ids to as predecessors are sorted
|
||||
// by block ids.
|
||||
intptr_t current_block_index = 0;
|
||||
for (intptr_t i = 0, n = block_num_.length(); i < n; ++i) {
|
||||
if (block_num_[i] >= 0) {
|
||||
block_num_[i] = current_block_index++;
|
||||
}
|
||||
}
|
||||
for (auto block : graph->reverse_postorder()) {
|
||||
block->set_block_id(block_num_[block->block_id()]);
|
||||
}
|
||||
graph->set_max_block_id(current_block_index - 1);
|
||||
}
|
||||
|
||||
private:
|
||||
void RenumberDefinition(Definition* def) {
|
||||
if (def->HasSSATemp()) {
|
||||
const intptr_t old_index = def->ssa_temp_index();
|
||||
intptr_t new_index = ssa_num_[old_index];
|
||||
if (new_index < 0) {
|
||||
ssa_num_[old_index] = new_index = current_ssa_index_++;
|
||||
}
|
||||
def->set_ssa_temp_index(new_index);
|
||||
}
|
||||
}
|
||||
|
||||
bool IsDetachedDefinition(Definition* def) {
|
||||
return def->IsMaterializeObject() && (def->next() == nullptr);
|
||||
}
|
||||
|
||||
void AddDetachedDefinition(Definition* def) {
|
||||
for (intptr_t i = 0, n = detached_defs_->length(); i < n; ++i) {
|
||||
if ((*detached_defs_)[i] == def) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
detached_defs_->Add(def);
|
||||
// Follow inputs as detached definitions can reference other
|
||||
// detached definitions.
|
||||
for (intptr_t i = 0, n = def->InputCount(); i < n; ++i) {
|
||||
Definition* input = def->InputAt(i)->definition();
|
||||
if (IsDetachedDefinition(input)) {
|
||||
AddDetachedDefinition(input);
|
||||
}
|
||||
}
|
||||
ASSERT(def->env() == nullptr);
|
||||
}
|
||||
|
||||
void CollectDetachedMaterializations(Environment* env) {
|
||||
if (env == nullptr) {
|
||||
return;
|
||||
}
|
||||
for (Environment::DeepIterator it(env); !it.Done(); it.Advance()) {
|
||||
Definition* def = it.CurrentValue()->definition();
|
||||
if (IsDetachedDefinition(def)) {
|
||||
AddDetachedDefinition(def);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GrowableArray<intptr_t> block_num_;
|
||||
GrowableArray<intptr_t> ssa_num_;
|
||||
intptr_t current_ssa_index_ = 0;
|
||||
ZoneGrowableArray<Definition*>* detached_defs_;
|
||||
};
|
||||
|
||||
void FlowGraph::CompactSSA(ZoneGrowableArray<Definition*>* detached_defs) {
|
||||
if (detached_defs == nullptr) {
|
||||
detached_defs = new (Z) ZoneGrowableArray<Definition*>(Z, 0);
|
||||
}
|
||||
SSACompactor compactor(max_block_id() + 1, current_ssa_temp_index(),
|
||||
detached_defs);
|
||||
compactor.RenumberGraph(this);
|
||||
}
|
||||
|
||||
} // namespace dart
|
||||
|
|
|
@ -208,6 +208,9 @@ class FlowGraph : public ZoneAllocated {
|
|||
const GrowableArray<BlockEntryInstr*>& reverse_postorder() const {
|
||||
return reverse_postorder_;
|
||||
}
|
||||
const GrowableArray<BlockEntryInstr*>& optimized_block_order() const {
|
||||
return optimized_block_order_;
|
||||
}
|
||||
static bool ShouldReorderBlocks(const Function& function, bool is_optimized);
|
||||
GrowableArray<BlockEntryInstr*>* CodegenBlockOrder(bool is_optimized);
|
||||
|
||||
|
@ -533,6 +536,14 @@ class FlowGraph : public ZoneAllocated {
|
|||
const Array& coverage_array() const { return *coverage_array_; }
|
||||
void set_coverage_array(const Array& array) { coverage_array_ = &array; }
|
||||
|
||||
// Renumbers SSA values and basic blocks to make numbering dense.
|
||||
// Preserves order among block ids.
|
||||
//
|
||||
// Also collects definitions which are detached from the flow graph
|
||||
// but still referenced (currently only MaterializeObject instructions
|
||||
// can be detached).
|
||||
void CompactSSA(ZoneGrowableArray<Definition*>* detached_defs = nullptr);
|
||||
|
||||
private:
|
||||
friend class FlowGraphCompiler; // TODO(ajcbik): restructure
|
||||
friend class FlowGraphChecker;
|
||||
|
|
|
@ -403,6 +403,13 @@ void FlowGraphChecker::VisitUseDef(Instruction* instruction,
|
|||
ASSERT1(def->previous() != nullptr, def);
|
||||
// Skip checks below for common constants as checking them could be slow.
|
||||
if (IsCommonConstant(def)) return;
|
||||
} else if (def->IsMaterializeObject()) {
|
||||
// Materializations can be both linked into graph and detached.
|
||||
if (def->next() != nullptr) {
|
||||
ASSERT1(def->previous() != nullptr, def);
|
||||
} else {
|
||||
ASSERT1(def->previous() == nullptr, def);
|
||||
}
|
||||
} else {
|
||||
// Others are fully linked into graph.
|
||||
ASSERT1(def->next() != nullptr, def);
|
||||
|
@ -453,6 +460,14 @@ void FlowGraphChecker::VisitDefUse(Definition* def,
|
|||
ASSERT1(instruction->IsGraphEntry() || instruction->next() != nullptr,
|
||||
instruction);
|
||||
ASSERT2(DefDominatesUse(def, instruction), def, instruction);
|
||||
} else if (instruction->IsMaterializeObject()) {
|
||||
// Materializations can be both linked into graph and detached.
|
||||
if (instruction->next() != nullptr) {
|
||||
ASSERT1(instruction->previous() != nullptr, instruction);
|
||||
ASSERT2(DefDominatesUse(def, instruction), def, instruction);
|
||||
} else {
|
||||
ASSERT1(instruction->previous() == nullptr, instruction);
|
||||
}
|
||||
} else {
|
||||
// Others are fully linked into graph.
|
||||
ASSERT1(IsControlFlow(instruction) || instruction->next() != nullptr,
|
||||
|
|
|
@ -1057,6 +1057,11 @@ bool StrictCompareInstr::AttributesEqual(const Instruction& other) const {
|
|||
(needs_number_check() == other_op->needs_number_check());
|
||||
}
|
||||
|
||||
const RuntimeEntry& CaseInsensitiveCompareInstr::TargetFunction() const {
|
||||
return handle_surrogates_ ? kCaseInsensitiveCompareUTF16RuntimeEntry
|
||||
: kCaseInsensitiveCompareUCS2RuntimeEntry;
|
||||
}
|
||||
|
||||
bool MathMinMaxInstr::AttributesEqual(const Instruction& other) const {
|
||||
auto const other_op = other.AsMathMinMax();
|
||||
ASSERT(other_op != NULL);
|
||||
|
@ -3143,10 +3148,10 @@ Definition* UnboxIntegerInstr::Canonicalize(FlowGraph* flow_graph) {
|
|||
}
|
||||
}
|
||||
|
||||
if ((speculative_mode_ == kGuardInputs) && !ComputeCanDeoptimize()) {
|
||||
if ((SpeculativeModeOfInput(0) == kGuardInputs) && !ComputeCanDeoptimize()) {
|
||||
// Remember if we ever learn out input doesn't require checking, as
|
||||
// the input Value might be later changed that would make us forget.
|
||||
speculative_mode_ = kNotSpeculative;
|
||||
set_speculative_mode(kNotSpeculative);
|
||||
}
|
||||
|
||||
return this;
|
||||
|
|
File diff suppressed because it is too large
Load diff
2305
runtime/vm/compiler/backend/il_serializer.cc
Normal file
2305
runtime/vm/compiler/backend/il_serializer.cc
Normal file
File diff suppressed because it is too large
Load diff
473
runtime/vm/compiler/backend/il_serializer.h
Normal file
473
runtime/vm/compiler/backend/il_serializer.h
Normal file
|
@ -0,0 +1,473 @@
|
|||
// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
|
||||
// for details. All rights reserved. Use of this source code is governed by a
|
||||
// BSD-style license that can be found in the LICENSE file.
|
||||
|
||||
#ifndef RUNTIME_VM_COMPILER_BACKEND_IL_SERIALIZER_H_
|
||||
#define RUNTIME_VM_COMPILER_BACKEND_IL_SERIALIZER_H_
|
||||
|
||||
#if defined(DART_PRECOMPILED_RUNTIME)
|
||||
#error "AOT runtime should not use compiler sources (including header files)"
|
||||
#endif // defined(DART_PRECOMPILED_RUNTIME)
|
||||
|
||||
#include <utility> // For std::move.
|
||||
|
||||
#include "platform/globals.h"
|
||||
#include "vm/allocation.h"
|
||||
#include "vm/compiler/backend/locations.h"
|
||||
|
||||
namespace dart {
|
||||
|
||||
class AliasIdentity;
|
||||
class BlockEntryInstr;
|
||||
class CallTargets;
|
||||
class CatchBlockEntryInstr;
|
||||
struct CidRangeValue;
|
||||
class Cids;
|
||||
class Code;
|
||||
class ComparisonInstr;
|
||||
class CompileType;
|
||||
class Definition;
|
||||
class Environment;
|
||||
class FunctionEntryInstr;
|
||||
class Instruction;
|
||||
class FlowGraph;
|
||||
class GraphEntryInstr;
|
||||
class Heap;
|
||||
class IndirectEntryInstr;
|
||||
class JoinEntryInstr;
|
||||
class LocalVariable;
|
||||
class LocationSummary;
|
||||
class MoveOperands;
|
||||
class NonStreamingWriteStream;
|
||||
class OsrEntryInstr;
|
||||
class ParsedFunction;
|
||||
class ParallelMoveInstr;
|
||||
class PhiInstr;
|
||||
class Range;
|
||||
class ReadStream;
|
||||
class TargetEntryInstr;
|
||||
class TokenPosition;
|
||||
|
||||
namespace compiler {
|
||||
struct TableSelector;
|
||||
|
||||
namespace ffi {
|
||||
class CallbackMarshaller;
|
||||
class CallMarshaller;
|
||||
class NativeCallingConvention;
|
||||
} // namespace ffi
|
||||
} // namespace compiler
|
||||
|
||||
// The list of types which are handled by flow graph serializer/deserializer.
|
||||
// For each type there is a corresponding Write<T>(T) and Read<T>() methods.
|
||||
//
|
||||
// This list includes all types of fields of IL instructions
|
||||
// which are serialized via DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS macro,
|
||||
// except enum types which are unwrapped with serializable_type_t.
|
||||
//
|
||||
// The list is sorted alphabetically by type name.
|
||||
#define IL_SERIALIZABLE_TYPE_LIST(V) \
|
||||
V(AliasIdentity) \
|
||||
V(const AbstractType&) \
|
||||
V(const AbstractType*) \
|
||||
V(const Array&) \
|
||||
V(bool) \
|
||||
V(const compiler::ffi::CallbackMarshaller&) \
|
||||
V(const compiler::ffi::CallMarshaller&) \
|
||||
V(const CallTargets&) \
|
||||
V(const char*) \
|
||||
V(CidRangeValue) \
|
||||
V(const Cids&) \
|
||||
V(const Class&) \
|
||||
V(const Code&) \
|
||||
V(ComparisonInstr*) \
|
||||
V(CompileType*) \
|
||||
V(ConstantInstr*) \
|
||||
V(Definition*) \
|
||||
V(double) \
|
||||
V(Environment*) \
|
||||
V(const Field&) \
|
||||
V(const ICData*) \
|
||||
V(int8_t) \
|
||||
V(int16_t) \
|
||||
V(int32_t) \
|
||||
V(int64_t) \
|
||||
V(const Function&) \
|
||||
V(const FunctionType&) \
|
||||
V(Instruction*) \
|
||||
V(const LocalVariable&) \
|
||||
V(LocationSummary*) \
|
||||
V(MoveOperands*) \
|
||||
V(const compiler::ffi::NativeCallingConvention&) \
|
||||
V(const Object&) \
|
||||
V(ParallelMoveInstr*) \
|
||||
V(PhiInstr*) \
|
||||
V(Range*) \
|
||||
V(Representation) \
|
||||
V(const Slot&) \
|
||||
V(const Slot*) \
|
||||
V(const String&) \
|
||||
V(const compiler::TableSelector*) \
|
||||
V(TokenPosition) \
|
||||
V(const TypeArguments&) \
|
||||
V(const TypeParameters&) \
|
||||
V(uint8_t) \
|
||||
V(uint16_t) \
|
||||
V(uint32_t) \
|
||||
V(uint64_t) \
|
||||
V(Value*)
|
||||
|
||||
// List of types serializable as references.
|
||||
#define IL_SERIALIZABLE_REF_TYPE_LIST(V) \
|
||||
V(BlockEntryInstr*) \
|
||||
V(CatchBlockEntryInstr*) \
|
||||
V(Definition*) \
|
||||
V(FunctionEntryInstr*) \
|
||||
V(IndirectEntryInstr*) \
|
||||
V(JoinEntryInstr*) \
|
||||
V(OsrEntryInstr*) \
|
||||
V(TargetEntryInstr*)
|
||||
|
||||
// Serializes flow graph, including constants and references
|
||||
// to objects of program structure.
|
||||
//
|
||||
// Each IL instruction is serialized in 2 step:
|
||||
// - the main step (T::WriteTo / T::T()) serializes
|
||||
// instruction fields, basically everything required to
|
||||
// re-create instruction object.
|
||||
// - the extra step (T::WriteExtra / T::ReadExtra) serializes
|
||||
// references to other instructions, including inputs,
|
||||
// environments, locations (may reference constants) and successors.
|
||||
//
|
||||
class FlowGraphSerializer : public ValueObject {
|
||||
public:
|
||||
explicit FlowGraphSerializer(NonStreamingWriteStream* stream);
|
||||
~FlowGraphSerializer();
|
||||
|
||||
// Writes [flow_graph] into the stream.
|
||||
// The graph should be compacted via CompactSSA().
|
||||
// [detached_defs] should contain all definitions which are
|
||||
// detached from the graph but can still be referenced from
|
||||
// environments.
|
||||
void WriteFlowGraph(const FlowGraph& flow_graph,
|
||||
const ZoneGrowableArray<Definition*>& detached_defs);
|
||||
|
||||
// Default implementation of 'Write' method, when
|
||||
// specialization for a particular type is not provided.
|
||||
// This struct is used for the partial template instantiations below.
|
||||
template <typename T>
|
||||
struct WriteTrait {
|
||||
using ArgType = T;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct WriteTrait<GrowableArray<T>> {
|
||||
using ArgType = const GrowableArray<T>&;
|
||||
static void Write(FlowGraphSerializer* s, ArgType x) {
|
||||
const intptr_t len = x.length();
|
||||
s->Write<intptr_t>(len);
|
||||
for (intptr_t i = 0; i < len; ++i) {
|
||||
s->Write<T>(x[i]);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct WriteTrait<const GrowableArray<T>&> {
|
||||
using ArgType = const GrowableArray<T>&;
|
||||
static void Write(FlowGraphSerializer* s, ArgType x) {
|
||||
WriteTrait<GrowableArray<T>>::Write(s, x);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct WriteTrait<ZoneGrowableArray<T>*> {
|
||||
using ArgType = const ZoneGrowableArray<T>*;
|
||||
static void Write(FlowGraphSerializer* s, ArgType x) {
|
||||
if (x == nullptr) {
|
||||
s->Write<intptr_t>(-1);
|
||||
return;
|
||||
}
|
||||
const intptr_t len = x->length();
|
||||
s->Write<intptr_t>(len);
|
||||
for (intptr_t i = 0; i < len; ++i) {
|
||||
s->Write<T>((*x)[i]);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct WriteTrait<const ZoneGrowableArray<T>&> {
|
||||
using ArgType = const ZoneGrowableArray<T>&;
|
||||
static void Write(FlowGraphSerializer* s, ArgType x) {
|
||||
WriteTrait<ZoneGrowableArray<T>*>::Write(s, &x);
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization in case intptr_t is not mapped to intN_t.
|
||||
template <>
|
||||
struct WriteTrait<intptr_t> {
|
||||
using ArgType = intptr_t;
|
||||
static void Write(FlowGraphSerializer* s, intptr_t x) {
|
||||
#ifdef ARCH_IS_64_BIT
|
||||
s->Write<int64_t>(x);
|
||||
#else
|
||||
s->Write<int32_t>(x);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization in case uintptr_t is not mapped to uintN_t.
|
||||
template <>
|
||||
struct WriteTrait<uintptr_t> {
|
||||
using ArgType = uintptr_t;
|
||||
static void Write(FlowGraphSerializer* s, uintptr_t x) {
|
||||
#ifdef ARCH_IS_64_BIT
|
||||
s->Write<uint64_t>(x);
|
||||
#else
|
||||
s->Write<uint32_t>(x);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
void Write(typename WriteTrait<T>::ArgType x) {
|
||||
WriteTrait<T>::Write(this, x);
|
||||
}
|
||||
|
||||
#define DECLARE_WRITE_METHOD(type) \
|
||||
template <> \
|
||||
void Write<type>(type x);
|
||||
IL_SERIALIZABLE_TYPE_LIST(DECLARE_WRITE_METHOD)
|
||||
#undef DECLARE_WRITE_METHOD
|
||||
|
||||
template <typename T>
|
||||
void WriteRef(T x);
|
||||
|
||||
#define DECLARE_WRITE_REF_METHOD(type) \
|
||||
template <> \
|
||||
void WriteRef<type>(type x);
|
||||
IL_SERIALIZABLE_REF_TYPE_LIST(DECLARE_WRITE_REF_METHOD)
|
||||
#undef DECLARE_WRITE_REF_METHOD
|
||||
|
||||
template <typename T>
|
||||
void WriteGrowableArrayOfRefs(const GrowableArray<T>& array) {
|
||||
const intptr_t len = array.length();
|
||||
Write<intptr_t>(len);
|
||||
for (intptr_t i = 0; i < len; ++i) {
|
||||
WriteRef<T>(array[i]);
|
||||
}
|
||||
}
|
||||
|
||||
BaseWriteStream* stream() const { return stream_; }
|
||||
IsolateGroup* isolate_group() const { return isolate_group_; }
|
||||
bool can_write_refs() const { return can_write_refs_; }
|
||||
|
||||
private:
|
||||
void WriteObjectImpl(const Object& x, intptr_t cid, intptr_t object_index);
|
||||
|
||||
// Used to track scopes of recursive types during serialization.
|
||||
struct TypeScope {
|
||||
TypeScope(FlowGraphSerializer* serializer, bool is_recursive)
|
||||
: serializer_(serializer),
|
||||
is_recursive_(is_recursive),
|
||||
was_writing_recursive_type_(serializer->writing_recursive_type_) {
|
||||
serializer->writing_recursive_type_ = is_recursive;
|
||||
}
|
||||
|
||||
~TypeScope() {
|
||||
serializer_->writing_recursive_type_ = was_writing_recursive_type_;
|
||||
}
|
||||
|
||||
// Returns true if type of the current scope can be canonicalized
|
||||
// during deserialization. Recursive types which were not
|
||||
// fully deserialized should not be canonicalized.
|
||||
bool CanBeCanonicalized() const {
|
||||
return !is_recursive_ || !was_writing_recursive_type_;
|
||||
}
|
||||
|
||||
FlowGraphSerializer* const serializer_;
|
||||
const bool is_recursive_;
|
||||
const bool was_writing_recursive_type_;
|
||||
};
|
||||
|
||||
NonStreamingWriteStream* stream_;
|
||||
Zone* zone_;
|
||||
IsolateGroup* isolate_group_;
|
||||
Heap* heap_;
|
||||
intptr_t object_counter_ = 0;
|
||||
bool can_write_refs_ = false;
|
||||
bool writing_recursive_type_ = false;
|
||||
};
|
||||
|
||||
// Deserializes flow graph.
|
||||
// All constants and types are canonicalized during deserialization.
|
||||
class FlowGraphDeserializer : public ValueObject {
|
||||
public:
|
||||
FlowGraphDeserializer(const ParsedFunction& parsed_function,
|
||||
ReadStream* stream);
|
||||
|
||||
const ParsedFunction& parsed_function() const { return parsed_function_; }
|
||||
|
||||
Zone* zone() const { return zone_; }
|
||||
ReadStream* stream() const { return stream_; }
|
||||
Thread* thread() const { return thread_; }
|
||||
IsolateGroup* isolate_group() const { return isolate_group_; }
|
||||
|
||||
GraphEntryInstr* graph_entry() const { return graph_entry_; }
|
||||
void set_graph_entry(GraphEntryInstr* entry) { graph_entry_ = entry; }
|
||||
|
||||
BlockEntryInstr* current_block() const { return current_block_; }
|
||||
void set_current_block(BlockEntryInstr* block) { current_block_ = block; }
|
||||
|
||||
BlockEntryInstr* block(intptr_t block_id) const {
|
||||
BlockEntryInstr* b = blocks_[block_id];
|
||||
ASSERT(b != nullptr);
|
||||
return b;
|
||||
}
|
||||
void set_block(intptr_t block_id, BlockEntryInstr* block) {
|
||||
ASSERT(blocks_[block_id] == nullptr);
|
||||
blocks_[block_id] = block;
|
||||
}
|
||||
|
||||
Definition* definition(intptr_t ssa_temp_index) const {
|
||||
Definition* def = definitions_[ssa_temp_index];
|
||||
ASSERT(def != nullptr);
|
||||
return def;
|
||||
}
|
||||
void set_definition(intptr_t ssa_temp_index, Definition* def) {
|
||||
ASSERT(definitions_[ssa_temp_index] == nullptr);
|
||||
definitions_[ssa_temp_index] = def;
|
||||
}
|
||||
|
||||
FlowGraph* ReadFlowGraph();
|
||||
|
||||
// Default implementation of 'Read' method, when
|
||||
// specialization for a particular type is not provided.
|
||||
// This struct is used for the partial template instantiations below.
|
||||
template <typename T>
|
||||
struct ReadTrait {};
|
||||
|
||||
template <typename T>
|
||||
struct ReadTrait<GrowableArray<T>> {
|
||||
static GrowableArray<T> Read(FlowGraphDeserializer* d) {
|
||||
const intptr_t len = d->Read<intptr_t>();
|
||||
GrowableArray<T> array(len);
|
||||
for (int i = 0; i < len; ++i) {
|
||||
array.Add(d->Read<T>());
|
||||
}
|
||||
return array;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ReadTrait<const GrowableArray<T>&> {
|
||||
static const GrowableArray<T>& Read(FlowGraphDeserializer* d) {
|
||||
return ReadTrait<GrowableArray<T>>::Read(d);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ReadTrait<ZoneGrowableArray<T>*> {
|
||||
static ZoneGrowableArray<T>* Read(FlowGraphDeserializer* d) {
|
||||
const intptr_t len = d->Read<intptr_t>();
|
||||
if (len < 0) {
|
||||
return nullptr;
|
||||
}
|
||||
auto* array = new (d->zone()) ZoneGrowableArray<T>(d->zone(), len);
|
||||
for (int i = 0; i < len; ++i) {
|
||||
array->Add(d->Read<T>());
|
||||
}
|
||||
return array;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ReadTrait<const ZoneGrowableArray<T>&> {
|
||||
static const ZoneGrowableArray<T>& Read(FlowGraphDeserializer* d) {
|
||||
return *ReadTrait<ZoneGrowableArray<T>*>::Read(d);
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization in case intptr_t is not mapped to intN_t.
|
||||
template <>
|
||||
struct ReadTrait<intptr_t> {
|
||||
static intptr_t Read(FlowGraphDeserializer* d) {
|
||||
#ifdef ARCH_IS_64_BIT
|
||||
return d->Read<int64_t>();
|
||||
#else
|
||||
return d->Read<int32_t>();
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization in case uintptr_t is not mapped to uintN_t.
|
||||
template <>
|
||||
struct ReadTrait<uintptr_t> {
|
||||
static uintptr_t Read(FlowGraphDeserializer* d) {
|
||||
#ifdef ARCH_IS_64_BIT
|
||||
return d->Read<uint64_t>();
|
||||
#else
|
||||
return d->Read<uint32_t>();
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
T Read() {
|
||||
return ReadTrait<T>::Read(this);
|
||||
}
|
||||
|
||||
#define DECLARE_READ_METHOD(type) \
|
||||
template <> \
|
||||
type Read<type>();
|
||||
IL_SERIALIZABLE_TYPE_LIST(DECLARE_READ_METHOD)
|
||||
#undef DECLARE_READ_METHOD
|
||||
|
||||
template <typename T>
|
||||
T ReadRef();
|
||||
|
||||
#define DECLARE_READ_REF_METHOD(type) \
|
||||
template <> \
|
||||
type ReadRef<type>();
|
||||
IL_SERIALIZABLE_REF_TYPE_LIST(DECLARE_READ_REF_METHOD)
|
||||
#undef DECLARE_READ_REF_METHOD
|
||||
|
||||
template <typename T>
|
||||
GrowableArray<T> ReadGrowableArrayOfRefs() {
|
||||
const intptr_t len = Read<intptr_t>();
|
||||
GrowableArray<T> array(len);
|
||||
for (int i = 0; i < len; ++i) {
|
||||
array.Add(ReadRef<T>());
|
||||
}
|
||||
return std::move(array);
|
||||
}
|
||||
|
||||
private:
|
||||
ClassPtr GetClassById(classid_t id) const;
|
||||
const Object& ReadObjectImpl(intptr_t cid, intptr_t object_index);
|
||||
void SetObjectAt(intptr_t object_index, const Object& object);
|
||||
|
||||
InstancePtr MaybeCanonicalize(const Instance& obj,
|
||||
intptr_t object_index,
|
||||
bool can_be_canonicalized);
|
||||
|
||||
const ParsedFunction& parsed_function_;
|
||||
ReadStream* stream_;
|
||||
Zone* zone_;
|
||||
Thread* thread_;
|
||||
IsolateGroup* isolate_group_;
|
||||
|
||||
// Deserialized objects.
|
||||
GraphEntryInstr* graph_entry_ = nullptr;
|
||||
BlockEntryInstr* current_block_ = nullptr;
|
||||
GrowableArray<BlockEntryInstr*> blocks_;
|
||||
GrowableArray<Definition*> definitions_;
|
||||
GrowableArray<const Object*> objects_;
|
||||
intptr_t object_counter_ = 0;
|
||||
GrowableArray<intptr_t> pending_canonicalization_;
|
||||
};
|
||||
|
||||
} // namespace dart
|
||||
|
||||
#endif // RUNTIME_VM_COMPILER_BACKEND_IL_SERIALIZER_H_
|
|
@ -21,6 +21,8 @@ namespace dart {
|
|||
class BaseTextBuffer;
|
||||
class ConstantInstr;
|
||||
class Definition;
|
||||
class FlowGraphDeserializer;
|
||||
class FlowGraphSerializer;
|
||||
class PairLocation;
|
||||
class Value;
|
||||
|
||||
|
@ -430,8 +432,8 @@ class Location : public ValueObject {
|
|||
|
||||
Location Copy() const;
|
||||
|
||||
static Location read(uword value) { return Location(value); }
|
||||
uword write() const { return value_; }
|
||||
void Write(FlowGraphSerializer* s) const;
|
||||
static Location Read(FlowGraphDeserializer* d);
|
||||
|
||||
private:
|
||||
explicit Location(uword value) : value_(value) {}
|
||||
|
@ -572,8 +574,7 @@ class RegisterSet : public ValueObject {
|
|||
ASSERT(kNumberOfFpuRegisters <= (kWordSize * kBitsPerByte));
|
||||
}
|
||||
|
||||
explicit RegisterSet(uintptr_t cpu_register_mask,
|
||||
uintptr_t fpu_register_mask = 0)
|
||||
explicit RegisterSet(uintptr_t cpu_register_mask, uintptr_t fpu_register_mask)
|
||||
: RegisterSet() {
|
||||
AddTaggedRegisters(cpu_register_mask, fpu_register_mask);
|
||||
}
|
||||
|
@ -717,6 +718,9 @@ class RegisterSet : public ValueObject {
|
|||
untagged_cpu_registers_.Clear();
|
||||
}
|
||||
|
||||
void Write(FlowGraphSerializer* s) const;
|
||||
explicit RegisterSet(FlowGraphDeserializer* d);
|
||||
|
||||
private:
|
||||
SmallSet<Register> cpu_registers_;
|
||||
SmallSet<Register> untagged_cpu_registers_;
|
||||
|
@ -835,6 +839,9 @@ class LocationSummary : public ZoneAllocated {
|
|||
void CheckWritableInputs();
|
||||
#endif
|
||||
|
||||
void Write(FlowGraphSerializer* s) const;
|
||||
explicit LocationSummary(FlowGraphDeserializer* d);
|
||||
|
||||
private:
|
||||
BitmapBuilder& EnsureStackBitmap() {
|
||||
if (stack_bitmap_ == NULL) {
|
||||
|
|
|
@ -1815,7 +1815,7 @@ RangeBoundary RangeBoundary::Shl(const RangeBoundary& value_boundary,
|
|||
int64_t value = value_boundary.ConstantValue();
|
||||
|
||||
if (value == 0) {
|
||||
return RangeBoundary(0);
|
||||
return RangeBoundary::FromConstant(0);
|
||||
} else if (shift_count == 0 ||
|
||||
(limit > 0 && Utils::IsInt(static_cast<int>(limit), value))) {
|
||||
// Result stays in 64 bit range.
|
||||
|
|
|
@ -301,6 +301,9 @@ class RangeBoundary : public ValueObject {
|
|||
|
||||
int64_t SmiLowerBound() const { return LowerBound(kRangeBoundarySmi); }
|
||||
|
||||
void Write(FlowGraphSerializer* s) const;
|
||||
explicit RangeBoundary(FlowGraphDeserializer* d);
|
||||
|
||||
private:
|
||||
RangeBoundary(Kind kind, int64_t value, int64_t offset)
|
||||
: kind_(kind), value_(value), offset_(offset) {}
|
||||
|
@ -536,6 +539,9 @@ class Range : public ZoneAllocated {
|
|||
Definition* left_defn,
|
||||
Range* result);
|
||||
|
||||
void Write(FlowGraphSerializer* s) const;
|
||||
explicit Range(FlowGraphDeserializer* d);
|
||||
|
||||
private:
|
||||
RangeBoundary min_;
|
||||
RangeBoundary max_;
|
||||
|
|
|
@ -82,13 +82,14 @@ TEST_CASE(RangeTests) {
|
|||
RangeBoundary(compiler::target::kSmiMin),
|
||||
RangeBoundary(compiler::target::kSmiMax));
|
||||
}
|
||||
TEST_RANGE_OP(Range::Shl, 0, 100, 0, 64, RangeBoundary(0),
|
||||
TEST_RANGE_OP(Range::Shl, 0, 100, 0, 64, RangeBoundary::FromConstant(0),
|
||||
RangeBoundary::PositiveInfinity());
|
||||
TEST_RANGE_OP(Range::Shl, -100, 0, 0, 64, RangeBoundary::NegativeInfinity(),
|
||||
RangeBoundary(0));
|
||||
RangeBoundary::FromConstant(0));
|
||||
|
||||
TEST_RANGE_OP(Range::Shr, -8, 8, 1, 2, RangeBoundary(-4), RangeBoundary(4));
|
||||
TEST_RANGE_OP(Range::Shr, 1, 8, 1, 2, RangeBoundary(0), RangeBoundary(4));
|
||||
TEST_RANGE_OP(Range::Shr, 1, 8, 1, 2, RangeBoundary::FromConstant(0),
|
||||
RangeBoundary(4));
|
||||
TEST_RANGE_OP(Range::Shr, -16, -8, 1, 2, RangeBoundary(-8),
|
||||
RangeBoundary(-2));
|
||||
TEST_RANGE_OP(Range::Shr, 2, 4, -1, 1, RangeBoundary(1), RangeBoundary(4));
|
||||
|
@ -466,24 +467,26 @@ TEST_CASE(RangeAnd) {
|
|||
// [0xff, 0xfff] & [0xf, 0xf] = [0x0, 0xf].
|
||||
TEST_RANGE_AND(static_cast<int64_t>(0xff), static_cast<int64_t>(0xfff),
|
||||
static_cast<int64_t>(0xf), static_cast<int64_t>(0xf),
|
||||
RangeBoundary(0), RangeBoundary(0xf));
|
||||
RangeBoundary::FromConstant(0), RangeBoundary(0xf));
|
||||
|
||||
// [0xffffffff, 0xffffffff] & [0xfffffffff, 0xfffffffff] = [0x0, 0xfffffffff].
|
||||
TEST_RANGE_AND(
|
||||
static_cast<int64_t>(0xffffffff), static_cast<int64_t>(0xffffffff),
|
||||
static_cast<int64_t>(0xfffffffff), static_cast<int64_t>(0xfffffffff),
|
||||
RangeBoundary(0), RangeBoundary(static_cast<int64_t>(0xfffffffff)));
|
||||
RangeBoundary::FromConstant(0),
|
||||
RangeBoundary(static_cast<int64_t>(0xfffffffff)));
|
||||
|
||||
// [0xffffffff, 0xffffffff] & [-20, 20] = [0x0, 0xffffffff].
|
||||
TEST_RANGE_AND(static_cast<int64_t>(0xffffffff),
|
||||
static_cast<int64_t>(0xffffffff), static_cast<int64_t>(-20),
|
||||
static_cast<int64_t>(20), RangeBoundary(0),
|
||||
static_cast<int64_t>(20), RangeBoundary::FromConstant(0),
|
||||
RangeBoundary(static_cast<int64_t>(0xffffffff)));
|
||||
|
||||
// [-20, 20] & [0xffffffff, 0xffffffff] = [0x0, 0xffffffff].
|
||||
TEST_RANGE_AND(static_cast<int64_t>(-20), static_cast<int64_t>(20),
|
||||
static_cast<int64_t>(0xffffffff),
|
||||
static_cast<int64_t>(0xffffffff), RangeBoundary(0),
|
||||
static_cast<int64_t>(0xffffffff),
|
||||
RangeBoundary::FromConstant(0),
|
||||
RangeBoundary(static_cast<int64_t>(0xffffffff)));
|
||||
|
||||
// Test that [-20, 20] & [-20, 20] = [-32, 31].
|
||||
|
|
|
@ -3676,8 +3676,10 @@ void AllocationSinking::Optimize() {
|
|||
// Remove materializations from the graph. Register allocator will treat them
|
||||
// as part of the environment not as a real instruction.
|
||||
void AllocationSinking::DetachMaterializations() {
|
||||
for (intptr_t i = 0; i < materializations_.length(); i++) {
|
||||
materializations_[i]->previous()->LinkTo(materializations_[i]->next());
|
||||
for (MaterializeObjectInstr* mat : materializations_) {
|
||||
mat->previous()->LinkTo(mat->next());
|
||||
mat->set_next(nullptr);
|
||||
mat->set_previous(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -302,48 +302,60 @@ const Slot& Slot::GetTypeArgumentsSlotFor(Thread* thread, const Class& cls) {
|
|||
const intptr_t offset =
|
||||
compiler::target::Class::TypeArgumentsFieldOffset(cls);
|
||||
ASSERT(offset != Class::kNoTypeArguments);
|
||||
return SlotCache::Instance(thread).Canonicalize(
|
||||
Slot(Kind::kTypeArguments,
|
||||
IsImmutableBit::encode(true) |
|
||||
IsCompressedBit::encode(
|
||||
compiler::target::Class::HasCompressedPointers(cls)),
|
||||
kTypeArgumentsCid, offset, ":type_arguments",
|
||||
/*static_type=*/nullptr, kTagged));
|
||||
return GetCanonicalSlot(
|
||||
thread, Kind::kTypeArguments,
|
||||
IsImmutableBit::encode(true) |
|
||||
IsCompressedBit::encode(
|
||||
compiler::target::Class::HasCompressedPointers(cls)),
|
||||
kTypeArgumentsCid, offset, ":type_arguments",
|
||||
/*static_type=*/nullptr, kTagged);
|
||||
}
|
||||
|
||||
const Slot& Slot::GetContextVariableSlotFor(Thread* thread,
|
||||
const LocalVariable& variable) {
|
||||
ASSERT(variable.is_captured());
|
||||
return SlotCache::Instance(thread).Canonicalize(
|
||||
Slot(Kind::kCapturedVariable,
|
||||
IsImmutableBit::encode(variable.is_final() && !variable.is_late()) |
|
||||
IsNullableBit::encode(true) |
|
||||
IsCompressedBit::encode(Context::ContainsCompressedPointers()) |
|
||||
IsSentinelVisibleBit::encode(variable.is_late()),
|
||||
kDynamicCid,
|
||||
compiler::target::Context::variable_offset(variable.index().value()),
|
||||
&variable.name(), &variable.type(), kTagged));
|
||||
return GetCanonicalSlot(
|
||||
thread, Kind::kCapturedVariable,
|
||||
IsImmutableBit::encode(variable.is_final() && !variable.is_late()) |
|
||||
IsNullableBit::encode(true) |
|
||||
IsCompressedBit::encode(Context::ContainsCompressedPointers()) |
|
||||
IsSentinelVisibleBit::encode(variable.is_late()),
|
||||
kDynamicCid,
|
||||
compiler::target::Context::variable_offset(variable.index().value()),
|
||||
&variable.name(), &variable.type(), kTagged);
|
||||
}
|
||||
|
||||
const Slot& Slot::GetTypeArgumentsIndexSlot(Thread* thread, intptr_t index) {
|
||||
const intptr_t offset =
|
||||
compiler::target::TypeArguments::type_at_offset(index);
|
||||
const Slot& slot = Slot(
|
||||
Kind::kTypeArgumentsIndex,
|
||||
return GetCanonicalSlot(
|
||||
thread, Kind::kTypeArgumentsIndex,
|
||||
IsImmutableBit::encode(true) |
|
||||
IsCompressedBit::encode(TypeArguments::ContainsCompressedPointers()),
|
||||
kDynamicCid, offset, ":argument", /*static_type=*/nullptr, kTagged);
|
||||
return SlotCache::Instance(thread).Canonicalize(slot);
|
||||
}
|
||||
|
||||
const Slot& Slot::GetArrayElementSlot(Thread* thread,
|
||||
intptr_t offset_in_bytes) {
|
||||
const Slot& slot =
|
||||
Slot(Kind::kArrayElement,
|
||||
IsNullableBit::encode(true) |
|
||||
IsCompressedBit::encode(Array::ContainsCompressedPointers()),
|
||||
kDynamicCid, offset_in_bytes, ":array_element",
|
||||
/*static_type=*/nullptr, kTagged);
|
||||
return GetCanonicalSlot(
|
||||
thread, Kind::kArrayElement,
|
||||
IsNullableBit::encode(true) |
|
||||
IsCompressedBit::encode(Array::ContainsCompressedPointers()),
|
||||
kDynamicCid, offset_in_bytes, ":array_element",
|
||||
/*static_type=*/nullptr, kTagged);
|
||||
}
|
||||
|
||||
const Slot& Slot::GetCanonicalSlot(Thread* thread,
|
||||
Slot::Kind kind,
|
||||
int8_t flags,
|
||||
ClassIdTagType cid,
|
||||
intptr_t offset_in_bytes,
|
||||
const void* data,
|
||||
const AbstractType* static_type,
|
||||
Representation representation,
|
||||
const FieldGuardState& field_guard_state) {
|
||||
const Slot& slot = Slot(kind, flags, cid, offset_in_bytes, data, static_type,
|
||||
representation, field_guard_state);
|
||||
return SlotCache::Instance(thread).Canonicalize(slot);
|
||||
}
|
||||
|
||||
|
@ -456,8 +468,8 @@ const Slot& Slot::Get(const Field& field,
|
|||
}
|
||||
|
||||
Class& owner = Class::Handle(zone, field.Owner());
|
||||
const Slot& slot = SlotCache::Instance(thread).Canonicalize(Slot(
|
||||
Kind::kDartField,
|
||||
const Slot& slot = GetCanonicalSlot(
|
||||
thread, Kind::kDartField,
|
||||
IsImmutableBit::encode((field.is_final() && !field.is_late()) ||
|
||||
field.is_const()) |
|
||||
IsNullableBit::encode(is_nullable) |
|
||||
|
@ -467,7 +479,7 @@ const Slot& Slot::Get(const Field& field,
|
|||
IsSentinelVisibleBit::encode(field.is_late() && field.is_final() &&
|
||||
!field.has_initializer()),
|
||||
nullable_cid, compiler::target::Field::OffsetOf(field), &field, &type,
|
||||
rep, field_guard_state));
|
||||
rep, field_guard_state);
|
||||
|
||||
// If properties of this slot were based on the guarded state make sure
|
||||
// to add the field to the list of guarded fields. Note that during background
|
||||
|
|
|
@ -357,9 +357,12 @@ class Slot : public ZoneAllocated {
|
|||
bool IsPotentialUnboxed() const;
|
||||
Representation UnboxedRepresentation() const;
|
||||
|
||||
void Write(FlowGraphSerializer* s) const;
|
||||
static const Slot& Read(FlowGraphDeserializer* d);
|
||||
|
||||
private:
|
||||
Slot(Kind kind,
|
||||
int8_t bits,
|
||||
int8_t flags,
|
||||
ClassIdTagType cid,
|
||||
intptr_t offset_in_bytes,
|
||||
const void* data,
|
||||
|
@ -367,7 +370,7 @@ class Slot : public ZoneAllocated {
|
|||
Representation representation,
|
||||
const FieldGuardState& field_guard_state = FieldGuardState())
|
||||
: kind_(kind),
|
||||
flags_(bits),
|
||||
flags_(flags),
|
||||
cid_(cid),
|
||||
offset_in_bytes_(offset_in_bytes),
|
||||
representation_(representation),
|
||||
|
@ -397,6 +400,17 @@ class Slot : public ZoneAllocated {
|
|||
return static_cast<const T*>(data_);
|
||||
}
|
||||
|
||||
static const Slot& GetCanonicalSlot(
|
||||
Thread* thread,
|
||||
Kind kind,
|
||||
int8_t flags,
|
||||
ClassIdTagType cid,
|
||||
intptr_t offset_in_bytes,
|
||||
const void* data,
|
||||
const AbstractType* static_type,
|
||||
Representation representation,
|
||||
const FieldGuardState& field_guard_state = FieldGuardState());
|
||||
|
||||
// There is a fixed statically known number of native slots so we cache
|
||||
// them statically.
|
||||
static AcqRelAtomic<Slot*> native_fields_;
|
||||
|
|
|
@ -84,6 +84,7 @@ DEFINE_OPTION_HANDLER(CompilerPass::ParseFiltersFromFlag,
|
|||
"Do --compiler-passes=help for more information.");
|
||||
DECLARE_FLAG(bool, print_flow_graph);
|
||||
DECLARE_FLAG(bool, print_flow_graph_optimized);
|
||||
DEFINE_FLAG(bool, test_il_serialization, false, "Test IL serialization.");
|
||||
|
||||
void CompilerPassState::set_flow_graph(FlowGraph* flow_graph) {
|
||||
flow_graph_ = flow_graph;
|
||||
|
@ -565,6 +566,22 @@ COMPILER_PASS(ReorderBlocks, {
|
|||
if (state->reorder_blocks) {
|
||||
BlockScheduler::ReorderBlocks(flow_graph);
|
||||
}
|
||||
|
||||
// This is the last compiler pass.
|
||||
// Test that round-trip IL serialization works before generating code.
|
||||
if (FLAG_test_il_serialization && CompilerState::Current().is_aot()) {
|
||||
Zone* zone = flow_graph->zone();
|
||||
auto* detached_defs = new (zone) ZoneGrowableArray<Definition*>(zone, 0);
|
||||
flow_graph->CompactSSA(detached_defs);
|
||||
|
||||
ZoneWriteStream write_stream(flow_graph->zone(), 1024);
|
||||
FlowGraphSerializer serializer(&write_stream);
|
||||
serializer.WriteFlowGraph(*flow_graph, *detached_defs);
|
||||
ReadStream read_stream(write_stream.buffer(), write_stream.bytes_written());
|
||||
FlowGraphDeserializer deserializer(flow_graph->parsed_function(),
|
||||
&read_stream);
|
||||
state->set_flow_graph(deserializer.ReadFlowGraph());
|
||||
}
|
||||
});
|
||||
|
||||
COMPILER_PASS(EliminateWriteBarriers, { EliminateWriteBarriers(flow_graph); });
|
||||
|
|
|
@ -65,6 +65,8 @@ compiler_sources = [
|
|||
"backend/il_printer.cc",
|
||||
"backend/il_printer.h",
|
||||
"backend/il_riscv.cc",
|
||||
"backend/il_serializer.cc",
|
||||
"backend/il_serializer.h",
|
||||
"backend/il_x64.cc",
|
||||
"backend/inliner.cc",
|
||||
"backend/inliner.h",
|
||||
|
|
|
@ -14,18 +14,15 @@ namespace compiler {
|
|||
namespace ffi {
|
||||
|
||||
// TODO(dartbug.com/36607): Cache the trampolines.
|
||||
FunctionPtr TrampolineFunction(const FunctionType& dart_signature,
|
||||
FunctionPtr TrampolineFunction(const String& name,
|
||||
const FunctionType& signature,
|
||||
const FunctionType& c_signature,
|
||||
bool is_leaf,
|
||||
const String& function_name) {
|
||||
bool is_leaf) {
|
||||
ASSERT(signature.num_implicit_parameters() == 1);
|
||||
Thread* thread = Thread::Current();
|
||||
Zone* zone = thread->zone();
|
||||
String& name =
|
||||
String::Handle(zone, Symbols::NewFormatted(thread, "FfiTrampoline_%s",
|
||||
function_name.ToCString()));
|
||||
const Library& lib = Library::Handle(zone, Library::FfiLibrary());
|
||||
const Class& owner_class = Class::Handle(zone, lib.toplevel_class());
|
||||
FunctionType& signature = FunctionType::Handle(zone, FunctionType::New());
|
||||
Function& function = Function::Handle(
|
||||
zone, Function::New(signature, name, UntaggedFunction::kFfiTrampoline,
|
||||
/*is_static=*/true,
|
||||
|
@ -35,31 +32,47 @@ FunctionPtr TrampolineFunction(const FunctionType& dart_signature,
|
|||
/*is_native=*/false, owner_class,
|
||||
TokenPosition::kMinSource));
|
||||
function.set_is_debuggable(false);
|
||||
|
||||
// Create unique names for the parameters, as they are used in scope building
|
||||
// and error messages.
|
||||
if (signature.num_fixed_parameters() > 0) {
|
||||
function.CreateNameArray();
|
||||
function.SetParameterNameAt(0, Symbols::ClosureParameter());
|
||||
auto& param_name = String::Handle(zone);
|
||||
for (intptr_t i = 1, n = signature.num_fixed_parameters(); i < n; ++i) {
|
||||
param_name = Symbols::NewFormatted(thread, ":ffi_param%" Pd, i);
|
||||
function.SetParameterNameAt(i, param_name);
|
||||
}
|
||||
}
|
||||
|
||||
function.SetFfiCSignature(c_signature);
|
||||
function.SetFfiIsLeaf(is_leaf);
|
||||
|
||||
return function.ptr();
|
||||
}
|
||||
|
||||
FunctionPtr TrampolineFunction(const FunctionType& dart_signature,
|
||||
const FunctionType& c_signature,
|
||||
bool is_leaf,
|
||||
const String& function_name) {
|
||||
Thread* thread = Thread::Current();
|
||||
Zone* zone = thread->zone();
|
||||
String& name =
|
||||
String::Handle(zone, Symbols::NewFormatted(thread, "FfiTrampoline_%s",
|
||||
function_name.ToCString()));
|
||||
|
||||
// Trampolines have no optional arguments.
|
||||
FunctionType& signature = FunctionType::Handle(zone, FunctionType::New());
|
||||
const intptr_t num_fixed = dart_signature.num_fixed_parameters();
|
||||
signature.set_num_implicit_parameters(1);
|
||||
signature.set_num_fixed_parameters(num_fixed);
|
||||
signature.set_result_type(
|
||||
AbstractType::Handle(zone, dart_signature.result_type()));
|
||||
signature.set_parameter_types(
|
||||
Array::Handle(zone, dart_signature.parameter_types()));
|
||||
|
||||
// Create unique names for the parameters, as they are used in scope building
|
||||
// and error messages.
|
||||
if (num_fixed > 0) {
|
||||
function.CreateNameArray();
|
||||
function.SetParameterNameAt(0, Symbols::ClosureParameter());
|
||||
for (intptr_t i = 1; i < num_fixed; i++) {
|
||||
name = Symbols::NewFormatted(thread, ":ffi_param%" Pd, i);
|
||||
function.SetParameterNameAt(i, name);
|
||||
}
|
||||
}
|
||||
function.SetFfiCSignature(c_signature);
|
||||
signature ^= ClassFinalizer::FinalizeType(signature);
|
||||
function.SetSignature(signature);
|
||||
|
||||
function.SetFfiIsLeaf(is_leaf);
|
||||
|
||||
return function.ptr();
|
||||
return TrampolineFunction(name, signature, c_signature, is_leaf);
|
||||
}
|
||||
|
||||
} // namespace ffi
|
||||
|
|
|
@ -19,6 +19,11 @@ namespace compiler {
|
|||
|
||||
namespace ffi {
|
||||
|
||||
FunctionPtr TrampolineFunction(const String& name,
|
||||
const FunctionType& signature,
|
||||
const FunctionType& c_signature,
|
||||
bool is_leaf);
|
||||
|
||||
FunctionPtr TrampolineFunction(const FunctionType& dart_signature,
|
||||
const FunctionType& c_signature,
|
||||
bool is_leaf,
|
||||
|
|
|
@ -131,6 +131,7 @@ class BaseMarshaller : public ZoneAllocated {
|
|||
|
||||
bool ContainsHandles() const;
|
||||
|
||||
const Function& dart_signature() const { return dart_signature_; }
|
||||
StringPtr function_name() const { return dart_signature_.name(); }
|
||||
|
||||
protected:
|
||||
|
|
|
@ -889,7 +889,8 @@ void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
|
|||
// Fall through to &is_simple_case
|
||||
|
||||
const RegisterSet caller_saved_registers(
|
||||
TypeTestABI::kSubtypeTestCacheStubCallerSavedRegisters);
|
||||
TypeTestABI::kSubtypeTestCacheStubCallerSavedRegisters,
|
||||
/*fpu_registers=*/0);
|
||||
|
||||
__ Bind(&is_simple_case);
|
||||
{
|
||||
|
|
|
@ -3222,6 +3222,28 @@ void Class::AddFunction(const Function& function) const {
|
|||
}
|
||||
}
|
||||
|
||||
intptr_t Class::FindFunctionIndex(const Function& needle) const {
|
||||
Thread* thread = Thread::Current();
|
||||
if (EnsureIsFinalized(thread) != Error::null()) {
|
||||
return -1;
|
||||
}
|
||||
REUSABLE_ARRAY_HANDLESCOPE(thread);
|
||||
REUSABLE_FUNCTION_HANDLESCOPE(thread);
|
||||
Array& funcs = thread->ArrayHandle();
|
||||
Function& function = thread->FunctionHandle();
|
||||
funcs = current_functions();
|
||||
ASSERT(!funcs.IsNull());
|
||||
const intptr_t len = funcs.Length();
|
||||
for (intptr_t i = 0; i < len; i++) {
|
||||
function ^= funcs.At(i);
|
||||
if (needle.ptr() == function.ptr()) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
// No function found.
|
||||
return -1;
|
||||
}
|
||||
|
||||
FunctionPtr Class::FunctionFromIndex(intptr_t idx) const {
|
||||
const Array& funcs = Array::Handle(current_functions());
|
||||
if ((idx < 0) || (idx >= funcs.Length())) {
|
||||
|
@ -3234,20 +3256,13 @@ FunctionPtr Class::FunctionFromIndex(intptr_t idx) const {
|
|||
}
|
||||
|
||||
FunctionPtr Class::ImplicitClosureFunctionFromIndex(intptr_t idx) const {
|
||||
const Array& funcs = Array::Handle(current_functions());
|
||||
if ((idx < 0) || (idx >= funcs.Length())) {
|
||||
Function& func = Function::Handle(FunctionFromIndex(idx));
|
||||
if (func.IsNull() || !func.HasImplicitClosureFunction()) {
|
||||
return Function::null();
|
||||
}
|
||||
Function& func = Function::Handle();
|
||||
func ^= funcs.At(idx);
|
||||
func = func.ImplicitClosureFunction();
|
||||
ASSERT(!func.IsNull());
|
||||
if (!func.HasImplicitClosureFunction()) {
|
||||
return Function::null();
|
||||
}
|
||||
const Function& closure_func =
|
||||
Function::Handle(func.ImplicitClosureFunction());
|
||||
ASSERT(!closure_func.IsNull());
|
||||
return closure_func.ptr();
|
||||
return func.ptr();
|
||||
}
|
||||
|
||||
intptr_t Class::FindImplicitClosureFunctionIndex(const Function& needle) const {
|
||||
|
@ -4682,6 +4697,35 @@ void Class::AddFields(const GrowableArray<const Field*>& new_fields) const {
|
|||
SetFields(new_arr);
|
||||
}
|
||||
|
||||
intptr_t Class::FindFieldIndex(const Field& needle) const {
|
||||
Thread* thread = Thread::Current();
|
||||
if (EnsureIsFinalized(thread) != Error::null()) {
|
||||
return -1;
|
||||
}
|
||||
REUSABLE_ARRAY_HANDLESCOPE(thread);
|
||||
REUSABLE_FIELD_HANDLESCOPE(thread);
|
||||
Array& fields = thread->ArrayHandle();
|
||||
Field& field = thread->FieldHandle();
|
||||
fields = this->fields();
|
||||
ASSERT(!fields.IsNull());
|
||||
for (intptr_t i = 0, n = fields.Length(); i < n; ++i) {
|
||||
field ^= fields.At(i);
|
||||
if (needle.ptr() == field.ptr()) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
// Not found.
|
||||
return -1;
|
||||
}
|
||||
|
||||
FieldPtr Class::FieldFromIndex(intptr_t idx) const {
|
||||
Array& fields = Array::Handle(this->fields());
|
||||
if ((idx < 0) || (idx >= fields.Length())) {
|
||||
return Field::null();
|
||||
}
|
||||
return Field::RawCast(fields.At(idx));
|
||||
}
|
||||
|
||||
bool Class::InjectCIDFields() const {
|
||||
if (library() != Library::InternalLibrary() ||
|
||||
Name() != Symbols::ClassID().ptr()) {
|
||||
|
|
|
@ -1478,6 +1478,9 @@ class Class : public Object {
|
|||
void AddField(const Field& field) const;
|
||||
void AddFields(const GrowableArray<const Field*>& fields) const;
|
||||
|
||||
intptr_t FindFieldIndex(const Field& needle) const;
|
||||
FieldPtr FieldFromIndex(intptr_t idx) const;
|
||||
|
||||
// If this is a dart:internal.ClassID class, then inject our own const
|
||||
// fields. Returns true if synthetic fields are injected and regular
|
||||
// field declarations should be ignored.
|
||||
|
@ -1505,6 +1508,7 @@ class Class : public Object {
|
|||
}
|
||||
void SetFunctions(const Array& value) const;
|
||||
void AddFunction(const Function& function) const;
|
||||
intptr_t FindFunctionIndex(const Function& needle) const;
|
||||
FunctionPtr FunctionFromIndex(intptr_t idx) const;
|
||||
intptr_t FindImplicitClosureFunctionIndex(const Function& needle) const;
|
||||
FunctionPtr ImplicitClosureFunctionFromIndex(intptr_t idx) const;
|
||||
|
@ -7772,6 +7776,8 @@ class Instance : public Object {
|
|||
friend class Closure;
|
||||
friend class Pointer;
|
||||
friend class DeferredObject;
|
||||
friend class FlowGraphSerializer;
|
||||
friend class FlowGraphDeserializer;
|
||||
friend class RegExp;
|
||||
friend class StubCode;
|
||||
friend class TypedDataView;
|
||||
|
@ -7898,6 +7904,8 @@ class TypeParameters : public Object {
|
|||
FINAL_HEAP_OBJECT_IMPLEMENTATION(TypeParameters, Object);
|
||||
friend class Class;
|
||||
friend class ClassFinalizer;
|
||||
friend class FlowGraphSerializer;
|
||||
friend class FlowGraphDeserializer;
|
||||
friend class Function;
|
||||
friend class FunctionType;
|
||||
friend class Object;
|
||||
|
@ -9911,6 +9919,7 @@ class OneByteString : public AllStatic {
|
|||
|
||||
friend class Class;
|
||||
friend class ExternalOneByteString;
|
||||
friend class FlowGraphSerializer;
|
||||
friend class ImageWriter;
|
||||
friend class String;
|
||||
friend class StringHasher;
|
||||
|
@ -10031,6 +10040,7 @@ class TwoByteString : public AllStatic {
|
|||
}
|
||||
|
||||
friend class Class;
|
||||
friend class FlowGraphSerializer;
|
||||
friend class ImageWriter;
|
||||
friend class String;
|
||||
friend class StringHasher;
|
||||
|
|
|
@ -928,15 +928,9 @@ void IRRegExpMacroAssembler::CheckNotBackReferenceIgnoreCase(
|
|||
|
||||
Definition* is_match_def;
|
||||
|
||||
if (unicode) {
|
||||
is_match_def = new (Z) CaseInsensitiveCompareInstr(
|
||||
string_value, lhs_index_value, rhs_index_value, length_value,
|
||||
kCaseInsensitiveCompareUTF16RuntimeEntry, specialization_cid_);
|
||||
} else {
|
||||
is_match_def = new (Z) CaseInsensitiveCompareInstr(
|
||||
string_value, lhs_index_value, rhs_index_value, length_value,
|
||||
kCaseInsensitiveCompareUCS2RuntimeEntry, specialization_cid_);
|
||||
}
|
||||
is_match_def = new (Z) CaseInsensitiveCompareInstr(
|
||||
string_value, lhs_index_value, rhs_index_value, length_value,
|
||||
/*handle_surrogates=*/unicode, specialization_cid_);
|
||||
|
||||
BranchOrBacktrack(Comparison(kNE, is_match_def, BoolConstant(true)),
|
||||
on_no_match);
|
||||
|
|
|
@ -626,7 +626,7 @@ void TypeTestingStubGenerator::
|
|||
// c) Then we'll check each value of the type argument.
|
||||
compiler::Label pop_saved_registers_on_failure;
|
||||
const RegisterSet saved_registers(
|
||||
TTSInternalRegs::kSavedTypeArgumentRegisters);
|
||||
TTSInternalRegs::kSavedTypeArgumentRegisters, /*fpu_registers=*/0);
|
||||
__ PushRegisters(saved_registers);
|
||||
|
||||
AbstractType& type_arg = AbstractType::Handle();
|
||||
|
|
Loading…
Reference in a new issue