dart-sdk/runtime/vm/thread.h

951 lines
34 KiB
C
Raw Normal View History

// Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_THREAD_H_
#define RUNTIME_VM_THREAD_H_
#include "include/dart_api.h"
#include "platform/assert.h"
#include "platform/atomic.h"
#include "platform/safe_stack.h"
#include "vm/bitfield.h"
#include "vm/constants.h"
#include "vm/globals.h"
#include "vm/handles.h"
#include "vm/heap/pointer_block.h"
#include "vm/os_thread.h"
#include "vm/runtime_entry_list.h"
namespace dart {
class AbstractType;
class ApiLocalScope;
class Array;
class CompilerState;
class Class;
class Code;
class CompilerStats;
class Error;
class ExceptionHandlers;
class Field;
class Function;
class GrowableObjectArray;
class HandleScope;
class Heap;
class HierarchyInfo;
class Instance;
class Isolate;
class Library;
class LongJumpScope;
class Object;
class OSThread;
class JSONObject;
class PcDescriptors;
class RawBool;
class RawObject;
class RawCode;
class RawError;
class RawGrowableObjectArray;
class RawStackTrace;
class RawString;
class RuntimeEntry;
class Smi;
class StackResource;
class StackTrace;
class String;
class TimelineStream;
class TypeArguments;
class TypeParameter;
Reland "[VM] Introduction of type testing stubs - Part 1-4" Relands 165c583d57af613836cf7d08242ce969521db00b [VM] Introduction of type testing stubs - Part 1 This CL: * Adds a field to [RawAbstractType] which will always hold a pointer to the entrypoint of a type testing stub * Makes this new field be initialized to a default stub whenever a instances are created (e.g. via Type::New(), snapshot reader, ...) * Makes the clustered snapshotter write a reference to the corresponding [RawInstructions] object when writing the field and do the reverse when reading it. * Makes us call the type testing stub for performing assert-assignable checks. To reduce unnecessary loads on callsites, we store the entrypoint of the type testing stubs directly in the type objects. This means that the caller of type testing stubs can simply branch there without populating a code object first. This also means that the type testing stubs themselves have no access to a pool and we therefore also don't hold on to the [Code] object, only the [Instruction] object is necessary. The type testing stubs do not setup a frame themselves and also have no safepoint. In the case when the type testing stubs could not determine a positive answer they will tail-call a general-purpose stub. The general-purpose stub sets up a stub frame, tries to consult a [SubtypeTestCache] and bails out to runtime if this was unsuccessful. This CL is just the the first, for ease of reviewing. The actual type-specialized type testing stubs will be generated in later CLs. Reviewed-on: https://dart-review.googlesource.com/44787 Relands f226c22424c483d65499545e560efc059f9dde1c [VM] Introduction of type testing stubs - Part 2 This CL starts building type testing stubs specialzed for [Type] objects we test against. More specifically, it adds support for: * Handling obvious fast cases on the call sites (while still having a call to stub for negative case) * Handling type tests against type parameters, by loading the value of the type parameter on the call sites and invoking it's type testing stub. * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subtype-checks. ==> e.g. String/List<dynamic> * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the type arguments. ==> e.g. Widget<State>, where we know [Widget] is only extended and not implemented. * Specialzed type testing stubs for certain non-instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the instantiated type arguments and cid based comparisons for type parameters. (Note that this fast-case migth result in some false-negatives!) ==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only extended and not implemented. This optimizes cases where the caller uses `new HashMap<A, B>()` and only uses `A` and `B` as key/values (and not subclasses of it). The false-negative can occur when subtypes of A or B are used. In such cases we fall back to the [SubtypeTestCache]-based imlementation. Reviewed-on: https://dart-review.googlesource.com/44788 Relands 25f98bcc7561006d70a487ba3de55551658ac683 [VM] Introduction of type testing stubs - Part 3 The changes include: * Make AssertAssignableInstr no longer have a call-summary, which helps methods with several parameter checks by not having to re-load/re-initialize type arguments registers * Lazily create SubtypeTestCaches: We already go to runtime to warm up the caches, so we now also create the caches on the first runtime call and patch the pool entries. * No longer load the destination name into a register: We only need the name when we throw an exception, so it is not on the hot path. Instead we let the runtime look at the call site, decoding a pool index from the instructions stream. The destination name will be available in the pool, at a consecutive index to the subtype cache. * Remove the fall-through to N=1 case for probing subtypeing tests, since those will always be handled by the optimized stubs. * Do not generate optimized stubs for FutureOr<T> (so far it just falled-through to TTS). We can make optimzed version of that later, but it requires special subtyping rules. * Local code quality improvement in the type-testing-stubs: Avoid extra jump at last case of cid-class-range checks. There are still a number of optimization opportunities we can do in future changes. Reviewed-on: https://dart-review.googlesource.com/46984 Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b [VM] Introduction of type testing stubs - Part 4 In order to avoid generating type testing stubs for too many types in the system - and thereby potentially cause an increase in code size - this change introduces a smarter way to decide for which types we should generate optimized type testing stubs. The precompiler creates a [TypeUsageInfo] which we use to collect information. More specifically: a) We collect the destination types for all type checks we emit (we do this inside AssertAssignableInstr::EmitNativeCode). -> These are types we might want to generate optimized type testing stubs for. b) We collect type argument vectors used in instance creations (we do this inside AllocateObjectInstr::EmitNativeCode) and keep a set of of used type argument vectors for each class. After the precompiler has finished compiling normal code we scan the set of destination types collected in a) for uninstantiated types (or more specifically, type parameter types). We then propagate the type argument vectors used on object allocation sites, which were collected in b), in order to find out what kind of types are flowing into those type parameters. This allows us to extend the set of types which we test against, by adding the types that flow into type parameters. We use this final augmented set of destination types as a "filter" when making the decision whether to generate an optimized type testing stub for a given type. Reviewed-on: https://dart-review.googlesource.com/48640 Issue https://github.com/dart-lang/sdk/issues/32603 Closes https://github.com/dart-lang/sdk/issues/32852 Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44 Reviewed-on: https://dart-review.googlesource.com/50944 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
class TypeUsageInfo;
class Zone;
#define REUSABLE_HANDLE_LIST(V) \
V(AbstractType) \
V(Array) \
V(Class) \
V(Code) \
V(Error) \
V(ExceptionHandlers) \
V(Field) \
V(Function) \
V(GrowableObjectArray) \
V(Instance) \
V(Library) \
V(Object) \
V(PcDescriptors) \
V(Smi) \
V(String) \
V(TypeArguments) \
V(TypeParameter)
#if defined(TARGET_ARCH_DBC)
#define CACHED_VM_STUBS_LIST(V)
#else
#define CACHED_VM_STUBS_LIST(V) \
V(RawCode*, write_barrier_code_, StubCode::WriteBarrier_entry()->code(), \
NULL) \
V(RawCode*, fix_callers_target_code_, \
StubCode::FixCallersTarget_entry()->code(), NULL) \
V(RawCode*, fix_allocation_stub_code_, \
StubCode::FixAllocationStubTarget_entry()->code(), NULL) \
V(RawCode*, invoke_dart_code_stub_, \
StubCode::InvokeDartCode_entry()->code(), NULL) \
V(RawCode*, invoke_dart_code_from_bytecode_stub_, \
StubCode::InvokeDartCodeFromBytecode_entry()->code(), NULL) \
V(RawCode*, call_to_runtime_stub_, StubCode::CallToRuntime_entry()->code(), \
NULL) \
V(RawCode*, null_error_shared_without_fpu_regs_stub_, \
StubCode::NullErrorSharedWithoutFPURegs_entry()->code(), NULL) \
V(RawCode*, null_error_shared_with_fpu_regs_stub_, \
StubCode::NullErrorSharedWithFPURegs_entry()->code(), NULL) \
V(RawCode*, stack_overflow_shared_without_fpu_regs_stub_, \
StubCode::StackOverflowSharedWithoutFPURegs_entry()->code(), NULL) \
V(RawCode*, stack_overflow_shared_with_fpu_regs_stub_, \
StubCode::StackOverflowSharedWithFPURegs_entry()->code(), NULL) \
V(RawCode*, monomorphic_miss_stub_, \
StubCode::MonomorphicMiss_entry()->code(), NULL) \
V(RawCode*, ic_lookup_through_code_stub_, \
StubCode::ICCallThroughCode_entry()->code(), NULL) \
V(RawCode*, lazy_deopt_from_return_stub_, \
StubCode::DeoptimizeLazyFromReturn_entry()->code(), NULL) \
V(RawCode*, lazy_deopt_from_throw_stub_, \
Reland "[VM] Introduction of type testing stubs - Part 1-4" Relands 165c583d57af613836cf7d08242ce969521db00b [VM] Introduction of type testing stubs - Part 1 This CL: * Adds a field to [RawAbstractType] which will always hold a pointer to the entrypoint of a type testing stub * Makes this new field be initialized to a default stub whenever a instances are created (e.g. via Type::New(), snapshot reader, ...) * Makes the clustered snapshotter write a reference to the corresponding [RawInstructions] object when writing the field and do the reverse when reading it. * Makes us call the type testing stub for performing assert-assignable checks. To reduce unnecessary loads on callsites, we store the entrypoint of the type testing stubs directly in the type objects. This means that the caller of type testing stubs can simply branch there without populating a code object first. This also means that the type testing stubs themselves have no access to a pool and we therefore also don't hold on to the [Code] object, only the [Instruction] object is necessary. The type testing stubs do not setup a frame themselves and also have no safepoint. In the case when the type testing stubs could not determine a positive answer they will tail-call a general-purpose stub. The general-purpose stub sets up a stub frame, tries to consult a [SubtypeTestCache] and bails out to runtime if this was unsuccessful. This CL is just the the first, for ease of reviewing. The actual type-specialized type testing stubs will be generated in later CLs. Reviewed-on: https://dart-review.googlesource.com/44787 Relands f226c22424c483d65499545e560efc059f9dde1c [VM] Introduction of type testing stubs - Part 2 This CL starts building type testing stubs specialzed for [Type] objects we test against. More specifically, it adds support for: * Handling obvious fast cases on the call sites (while still having a call to stub for negative case) * Handling type tests against type parameters, by loading the value of the type parameter on the call sites and invoking it's type testing stub. * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subtype-checks. ==> e.g. String/List<dynamic> * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the type arguments. ==> e.g. Widget<State>, where we know [Widget] is only extended and not implemented. * Specialzed type testing stubs for certain non-instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the instantiated type arguments and cid based comparisons for type parameters. (Note that this fast-case migth result in some false-negatives!) ==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only extended and not implemented. This optimizes cases where the caller uses `new HashMap<A, B>()` and only uses `A` and `B` as key/values (and not subclasses of it). The false-negative can occur when subtypes of A or B are used. In such cases we fall back to the [SubtypeTestCache]-based imlementation. Reviewed-on: https://dart-review.googlesource.com/44788 Relands 25f98bcc7561006d70a487ba3de55551658ac683 [VM] Introduction of type testing stubs - Part 3 The changes include: * Make AssertAssignableInstr no longer have a call-summary, which helps methods with several parameter checks by not having to re-load/re-initialize type arguments registers * Lazily create SubtypeTestCaches: We already go to runtime to warm up the caches, so we now also create the caches on the first runtime call and patch the pool entries. * No longer load the destination name into a register: We only need the name when we throw an exception, so it is not on the hot path. Instead we let the runtime look at the call site, decoding a pool index from the instructions stream. The destination name will be available in the pool, at a consecutive index to the subtype cache. * Remove the fall-through to N=1 case for probing subtypeing tests, since those will always be handled by the optimized stubs. * Do not generate optimized stubs for FutureOr<T> (so far it just falled-through to TTS). We can make optimzed version of that later, but it requires special subtyping rules. * Local code quality improvement in the type-testing-stubs: Avoid extra jump at last case of cid-class-range checks. There are still a number of optimization opportunities we can do in future changes. Reviewed-on: https://dart-review.googlesource.com/46984 Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b [VM] Introduction of type testing stubs - Part 4 In order to avoid generating type testing stubs for too many types in the system - and thereby potentially cause an increase in code size - this change introduces a smarter way to decide for which types we should generate optimized type testing stubs. The precompiler creates a [TypeUsageInfo] which we use to collect information. More specifically: a) We collect the destination types for all type checks we emit (we do this inside AssertAssignableInstr::EmitNativeCode). -> These are types we might want to generate optimized type testing stubs for. b) We collect type argument vectors used in instance creations (we do this inside AllocateObjectInstr::EmitNativeCode) and keep a set of of used type argument vectors for each class. After the precompiler has finished compiling normal code we scan the set of destination types collected in a) for uninstantiated types (or more specifically, type parameter types). We then propagate the type argument vectors used on object allocation sites, which were collected in b), in order to find out what kind of types are flowing into those type parameters. This allows us to extend the set of types which we test against, by adding the types that flow into type parameters. We use this final augmented set of destination types as a "filter" when making the decision whether to generate an optimized type testing stub for a given type. Reviewed-on: https://dart-review.googlesource.com/48640 Issue https://github.com/dart-lang/sdk/issues/32603 Closes https://github.com/dart-lang/sdk/issues/32852 Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44 Reviewed-on: https://dart-review.googlesource.com/50944 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
StubCode::DeoptimizeLazyFromThrow_entry()->code(), NULL) \
V(RawCode*, slow_type_test_stub_, StubCode::SlowTypeTest_entry()->code(), \
NULL) \
V(RawCode*, lazy_specialize_type_test_stub_, \
StubCode::LazySpecializeTypeTest_entry()->code(), NULL)
#endif
#define CACHED_NON_VM_STUB_LIST(V) \
V(RawObject*, object_null_, Object::null(), NULL) \
V(RawBool*, bool_true_, Object::bool_true().raw(), NULL) \
V(RawBool*, bool_false_, Object::bool_false().raw(), NULL)
// List of VM-global objects/addresses cached in each Thread object.
// Important: constant false must immediately follow constant true.
#define CACHED_VM_OBJECTS_LIST(V) \
CACHED_NON_VM_STUB_LIST(V) \
CACHED_VM_STUBS_LIST(V)
// This assertion marks places which assume that boolean false immediate
// follows bool true in the CACHED_VM_OBJECTS_LIST
#define ASSERT_BOOL_FALSE_FOLLOWS_BOOL_TRUE() \
ASSERT((Thread::bool_true_offset() + kWordSize) == \
Thread::bool_false_offset());
#if defined(TARGET_ARCH_DBC)
#define CACHED_VM_STUBS_ADDRESSES_LIST(V)
#else
#define CACHED_VM_STUBS_ADDRESSES_LIST(V) \
V(uword, write_barrier_entry_point_, \
StubCode::WriteBarrier_entry()->EntryPoint(), 0) \
V(uword, call_to_runtime_entry_point_, \
StubCode::CallToRuntime_entry()->EntryPoint(), 0) \
V(uword, null_error_shared_without_fpu_regs_entry_point_, \
StubCode::NullErrorSharedWithoutFPURegs_entry()->EntryPoint(), 0) \
V(uword, null_error_shared_with_fpu_regs_entry_point_, \
StubCode::NullErrorSharedWithFPURegs_entry()->EntryPoint(), 0) \
V(uword, stack_overflow_shared_without_fpu_regs_entry_point_, \
StubCode::StackOverflowSharedWithoutFPURegs_entry()->EntryPoint(), 0) \
V(uword, stack_overflow_shared_with_fpu_regs_entry_point_, \
StubCode::StackOverflowSharedWithFPURegs_entry()->EntryPoint(), 0) \
V(uword, megamorphic_call_checked_entry_, \
StubCode::MegamorphicCall_entry()->EntryPoint(), 0) \
V(uword, monomorphic_miss_entry_, \
StubCode::MonomorphicMiss_entry()->EntryPoint(), 0)
#endif
#define CACHED_ADDRESSES_LIST(V) \
CACHED_VM_STUBS_ADDRESSES_LIST(V) \
V(uword, no_scope_native_wrapper_entry_point_, \
NativeEntry::NoScopeNativeCallWrapperEntry(), 0) \
V(uword, auto_scope_native_wrapper_entry_point_, \
NativeEntry::AutoScopeNativeCallWrapperEntry(), 0) \
V(uword, interpret_call_entry_point_, RuntimeEntry::InterpretCallEntry(), 0) \
V(RawString**, predefined_symbols_address_, Symbols::PredefinedAddress(), \
NULL) \
V(uword, double_nan_address_, reinterpret_cast<uword>(&double_nan_constant), \
0) \
V(uword, double_negate_address_, \
reinterpret_cast<uword>(&double_negate_constant), 0) \
V(uword, double_abs_address_, reinterpret_cast<uword>(&double_abs_constant), \
0) \
V(uword, float_not_address_, reinterpret_cast<uword>(&float_not_constant), \
0) \
V(uword, float_negate_address_, \
reinterpret_cast<uword>(&float_negate_constant), 0) \
V(uword, float_absolute_address_, \
reinterpret_cast<uword>(&float_absolute_constant), 0) \
V(uword, float_zerow_address_, \
reinterpret_cast<uword>(&float_zerow_constant), 0)
#define CACHED_CONSTANTS_LIST(V) \
CACHED_VM_OBJECTS_LIST(V) \
CACHED_ADDRESSES_LIST(V)
enum class ValidationPolicy {
kValidateFrames = 0,
kDontValidateFrames = 1,
};
// A VM thread; may be executing Dart code or performing helper tasks like
// garbage collection or compilation. The Thread structure associated with
// a thread is allocated by EnsureInit before entering an isolate, and destroyed
// automatically when the underlying OS thread exits. NOTE: On Windows, CleanUp
// must currently be called manually (issue 23474).
class Thread : public BaseThread {
public:
// The kind of task this thread is performing. Sampled by the profiler.
enum TaskKind {
kUnknownTask = 0x0,
kMutatorTask = 0x1,
kCompilerTask = 0x2,
kMarkerTask = 0x4,
kSweeperTask = 0x8,
kCompactorTask = 0x10,
};
// Converts a TaskKind to its corresponding C-String name.
static const char* TaskKindToCString(TaskKind kind);
~Thread();
// The currently executing thread, or NULL if not yet initialized.
static Thread* Current() {
#if defined(HAS_C11_THREAD_LOCAL)
return OSThread::CurrentVMThread();
#else
BaseThread* thread = OSThread::GetCurrentTLS();
if (thread == NULL || thread->is_os_thread()) {
return NULL;
}
return reinterpret_cast<Thread*>(thread);
#endif
}
// Makes the current thread enter 'isolate'.
static bool EnterIsolate(Isolate* isolate);
// Makes the current thread exit its isolate.
static void ExitIsolate();
// A VM thread other than the main mutator thread can enter an isolate as a
// "helper" to gain limited concurrent access to the isolate. One example is
// SweeperTask (which uses the class table, which is copy-on-write).
// TODO(koda): Properly synchronize heap access to expand allowed operations.
static bool EnterIsolateAsHelper(Isolate* isolate,
TaskKind kind,
bool bypass_safepoint = false);
static void ExitIsolateAsHelper(bool bypass_safepoint = false);
// Empties the store buffer block into the isolate.
void PrepareForGC();
void SetStackLimit(uword value);
void ClearStackLimit();
// Access to the current stack limit for generated code. This may be
// overwritten with a special value to trigger interrupts.
uword stack_limit_address() const {
return reinterpret_cast<uword>(&stack_limit_);
}
static intptr_t stack_limit_offset() {
return OFFSET_OF(Thread, stack_limit_);
}
// The true stack limit for this isolate.
uword saved_stack_limit() const { return saved_stack_limit_; }
#if defined(USING_SAFE_STACK)
uword saved_safestack_limit() const { return saved_safestack_limit_; }
void set_saved_safestack_limit(uword limit) {
saved_safestack_limit_ = limit;
}
#endif
#if defined(TARGET_ARCH_DBC)
// Access to the current stack limit for DBC interpreter.
uword stack_limit() const { return stack_limit_; }
#endif
// Stack overflow flags
enum {
kOsrRequest = 0x1, // Current stack overflow caused by OSR request.
};
uword write_barrier_mask() const { return write_barrier_mask_; }
static intptr_t write_barrier_mask_offset() {
return OFFSET_OF(Thread, write_barrier_mask_);
}
static intptr_t stack_overflow_flags_offset() {
return OFFSET_OF(Thread, stack_overflow_flags_);
}
int32_t IncrementAndGetStackOverflowCount() {
return ++stack_overflow_count_;
}
TaskKind task_kind() const { return task_kind_; }
// Retrieves and clears the stack overflow flags. These are set by
// the generated code before the slow path runtime routine for a
// stack overflow is called.
uword GetAndClearStackOverflowFlags();
// Interrupt bits.
enum {
kVMInterrupt = 0x1, // Internal VM checks: safepoints, store buffers, etc.
kMessageInterrupt = 0x2, // An interrupt to process an out of band message.
kInterruptsMask = (kVMInterrupt | kMessageInterrupt),
};
void ScheduleInterrupts(uword interrupt_bits);
void ScheduleInterruptsLocked(uword interrupt_bits);
RawError* HandleInterrupts();
uword GetAndClearInterrupts();
bool HasScheduledInterrupts() const {
return (stack_limit_ & kInterruptsMask) != 0;
}
// OSThread corresponding to this thread.
OSThread* os_thread() const { return os_thread_; }
void set_os_thread(OSThread* os_thread) { os_thread_ = os_thread; }
// Monitor corresponding to this thread.
Monitor* thread_lock() const { return thread_lock_; }
// The topmost zone used for allocation in this thread.
Zone* zone() const { return zone_; }
bool ZoneIsOwnedByThread(Zone* zone) const;
void IncrementMemoryCapacity(uintptr_t value) {
current_zone_capacity_ += value;
if (current_zone_capacity_ > zone_high_watermark_) {
zone_high_watermark_ = current_zone_capacity_;
}
}
void DecrementMemoryCapacity(uintptr_t value) {
ASSERT(current_zone_capacity_ >= value);
current_zone_capacity_ -= value;
}
uintptr_t current_zone_capacity() { return current_zone_capacity_; }
uintptr_t zone_high_watermark() const { return zone_high_watermark_; }
void ResetHighWatermark() { zone_high_watermark_ = current_zone_capacity_; }
// The reusable api local scope for this thread.
ApiLocalScope* api_reusable_scope() const { return api_reusable_scope_; }
void set_api_reusable_scope(ApiLocalScope* value) {
ASSERT(value == NULL || api_reusable_scope_ == NULL);
api_reusable_scope_ = value;
}
// The api local scope for this thread, this where all local handles
// are allocated.
ApiLocalScope* api_top_scope() const { return api_top_scope_; }
void set_api_top_scope(ApiLocalScope* value) { api_top_scope_ = value; }
// The isolate that this thread is operating on, or NULL if none.
Isolate* isolate() const { return isolate_; }
static intptr_t isolate_offset() { return OFFSET_OF(Thread, isolate_); }
bool IsMutatorThread() const;
bool CanCollectGarbage() const;
// Offset of Dart TimelineStream object.
static intptr_t dart_stream_offset() {
return OFFSET_OF(Thread, dart_stream_);
}
// Is |this| executing Dart code?
bool IsExecutingDartCode() const;
// Has |this| exited Dart code?
bool HasExitedDartCode() const;
CompilerState& compiler_state() {
ASSERT(compiler_state_ != nullptr);
return *compiler_state_;
}
HierarchyInfo* hierarchy_info() const {
ASSERT(isolate_ != NULL);
return hierarchy_info_;
}
void set_hierarchy_info(HierarchyInfo* value) {
ASSERT(isolate_ != NULL);
ASSERT((hierarchy_info_ == NULL && value != NULL) ||
(hierarchy_info_ != NULL && value == NULL));
hierarchy_info_ = value;
}
Reland "[VM] Introduction of type testing stubs - Part 1-4" Relands 165c583d57af613836cf7d08242ce969521db00b [VM] Introduction of type testing stubs - Part 1 This CL: * Adds a field to [RawAbstractType] which will always hold a pointer to the entrypoint of a type testing stub * Makes this new field be initialized to a default stub whenever a instances are created (e.g. via Type::New(), snapshot reader, ...) * Makes the clustered snapshotter write a reference to the corresponding [RawInstructions] object when writing the field and do the reverse when reading it. * Makes us call the type testing stub for performing assert-assignable checks. To reduce unnecessary loads on callsites, we store the entrypoint of the type testing stubs directly in the type objects. This means that the caller of type testing stubs can simply branch there without populating a code object first. This also means that the type testing stubs themselves have no access to a pool and we therefore also don't hold on to the [Code] object, only the [Instruction] object is necessary. The type testing stubs do not setup a frame themselves and also have no safepoint. In the case when the type testing stubs could not determine a positive answer they will tail-call a general-purpose stub. The general-purpose stub sets up a stub frame, tries to consult a [SubtypeTestCache] and bails out to runtime if this was unsuccessful. This CL is just the the first, for ease of reviewing. The actual type-specialized type testing stubs will be generated in later CLs. Reviewed-on: https://dart-review.googlesource.com/44787 Relands f226c22424c483d65499545e560efc059f9dde1c [VM] Introduction of type testing stubs - Part 2 This CL starts building type testing stubs specialzed for [Type] objects we test against. More specifically, it adds support for: * Handling obvious fast cases on the call sites (while still having a call to stub for negative case) * Handling type tests against type parameters, by loading the value of the type parameter on the call sites and invoking it's type testing stub. * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subtype-checks. ==> e.g. String/List<dynamic> * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the type arguments. ==> e.g. Widget<State>, where we know [Widget] is only extended and not implemented. * Specialzed type testing stubs for certain non-instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the instantiated type arguments and cid based comparisons for type parameters. (Note that this fast-case migth result in some false-negatives!) ==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only extended and not implemented. This optimizes cases where the caller uses `new HashMap<A, B>()` and only uses `A` and `B` as key/values (and not subclasses of it). The false-negative can occur when subtypes of A or B are used. In such cases we fall back to the [SubtypeTestCache]-based imlementation. Reviewed-on: https://dart-review.googlesource.com/44788 Relands 25f98bcc7561006d70a487ba3de55551658ac683 [VM] Introduction of type testing stubs - Part 3 The changes include: * Make AssertAssignableInstr no longer have a call-summary, which helps methods with several parameter checks by not having to re-load/re-initialize type arguments registers * Lazily create SubtypeTestCaches: We already go to runtime to warm up the caches, so we now also create the caches on the first runtime call and patch the pool entries. * No longer load the destination name into a register: We only need the name when we throw an exception, so it is not on the hot path. Instead we let the runtime look at the call site, decoding a pool index from the instructions stream. The destination name will be available in the pool, at a consecutive index to the subtype cache. * Remove the fall-through to N=1 case for probing subtypeing tests, since those will always be handled by the optimized stubs. * Do not generate optimized stubs for FutureOr<T> (so far it just falled-through to TTS). We can make optimzed version of that later, but it requires special subtyping rules. * Local code quality improvement in the type-testing-stubs: Avoid extra jump at last case of cid-class-range checks. There are still a number of optimization opportunities we can do in future changes. Reviewed-on: https://dart-review.googlesource.com/46984 Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b [VM] Introduction of type testing stubs - Part 4 In order to avoid generating type testing stubs for too many types in the system - and thereby potentially cause an increase in code size - this change introduces a smarter way to decide for which types we should generate optimized type testing stubs. The precompiler creates a [TypeUsageInfo] which we use to collect information. More specifically: a) We collect the destination types for all type checks we emit (we do this inside AssertAssignableInstr::EmitNativeCode). -> These are types we might want to generate optimized type testing stubs for. b) We collect type argument vectors used in instance creations (we do this inside AllocateObjectInstr::EmitNativeCode) and keep a set of of used type argument vectors for each class. After the precompiler has finished compiling normal code we scan the set of destination types collected in a) for uninstantiated types (or more specifically, type parameter types). We then propagate the type argument vectors used on object allocation sites, which were collected in b), in order to find out what kind of types are flowing into those type parameters. This allows us to extend the set of types which we test against, by adding the types that flow into type parameters. We use this final augmented set of destination types as a "filter" when making the decision whether to generate an optimized type testing stub for a given type. Reviewed-on: https://dart-review.googlesource.com/48640 Issue https://github.com/dart-lang/sdk/issues/32603 Closes https://github.com/dart-lang/sdk/issues/32852 Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44 Reviewed-on: https://dart-review.googlesource.com/50944 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
TypeUsageInfo* type_usage_info() const {
ASSERT(isolate_ != NULL);
return type_usage_info_;
}
void set_type_usage_info(TypeUsageInfo* value) {
ASSERT(isolate_ != NULL);
ASSERT((type_usage_info_ == NULL && value != NULL) ||
(type_usage_info_ != NULL && value == NULL));
type_usage_info_ = value;
}
int32_t no_callback_scope_depth() const { return no_callback_scope_depth_; }
void IncrementNoCallbackScopeDepth() {
ASSERT(no_callback_scope_depth_ < INT_MAX);
no_callback_scope_depth_ += 1;
}
void DecrementNoCallbackScopeDepth() {
ASSERT(no_callback_scope_depth_ > 0);
no_callback_scope_depth_ -= 1;
}
void StoreBufferAddObject(RawObject* obj);
void StoreBufferAddObjectGC(RawObject* obj);
#if defined(TESTING)
bool StoreBufferContains(RawObject* obj) const {
return store_buffer_block_->Contains(obj);
}
#endif
void StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy);
static intptr_t store_buffer_block_offset() {
return OFFSET_OF(Thread, store_buffer_block_);
}
uword top_exit_frame_info() const { return top_exit_frame_info_; }
void set_top_exit_frame_info(uword top_exit_frame_info) {
top_exit_frame_info_ = top_exit_frame_info;
}
static intptr_t top_exit_frame_info_offset() {
return OFFSET_OF(Thread, top_exit_frame_info_);
}
StackResource* top_resource() const { return top_resource_; }
void set_top_resource(StackResource* value) { top_resource_ = value; }
static intptr_t top_resource_offset() {
return OFFSET_OF(Thread, top_resource_);
}
// Heap of the isolate that this thread is operating on.
Heap* heap() const { return heap_; }
static intptr_t heap_offset() { return OFFSET_OF(Thread, heap_); }
void set_top(uword value) {
ASSERT(heap_ != NULL);
top_ = value;
}
void set_end(uword value) {
ASSERT(heap_ != NULL);
end_ = value;
}
uword top() { return top_; }
uword end() { return end_; }
static intptr_t top_offset() { return OFFSET_OF(Thread, top_); }
static intptr_t end_offset() { return OFFSET_OF(Thread, end_); }
int32_t no_handle_scope_depth() const {
#if defined(DEBUG)
return no_handle_scope_depth_;
#else
return 0;
#endif
}
void IncrementNoHandleScopeDepth() {
#if defined(DEBUG)
ASSERT(no_handle_scope_depth_ < INT_MAX);
no_handle_scope_depth_ += 1;
#endif
}
void DecrementNoHandleScopeDepth() {
#if defined(DEBUG)
ASSERT(no_handle_scope_depth_ > 0);
no_handle_scope_depth_ -= 1;
#endif
}
HandleScope* top_handle_scope() const {
#if defined(DEBUG)
return top_handle_scope_;
#else
return 0;
#endif
}
void set_top_handle_scope(HandleScope* handle_scope) {
#if defined(DEBUG)
top_handle_scope_ = handle_scope;
#endif
}
int32_t no_safepoint_scope_depth() const {
#if defined(DEBUG)
return no_safepoint_scope_depth_;
#else
return 0;
#endif
}
void IncrementNoSafepointScopeDepth() {
#if defined(DEBUG)
ASSERT(no_safepoint_scope_depth_ < INT_MAX);
no_safepoint_scope_depth_ += 1;
#endif
}
void DecrementNoSafepointScopeDepth() {
#if defined(DEBUG)
ASSERT(no_safepoint_scope_depth_ > 0);
no_safepoint_scope_depth_ -= 1;
#endif
}
#define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value) \
static intptr_t member_name##offset() { \
return OFFSET_OF(Thread, member_name); \
}
CACHED_CONSTANTS_LIST(DEFINE_OFFSET_METHOD)
#undef DEFINE_OFFSET_METHOD
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) || \
defined(TARGET_ARCH_X64)
static intptr_t write_barrier_wrappers_offset(Register reg) {
ASSERT((kDartAvailableCpuRegs & (1 << reg)) != 0);
intptr_t index = 0;
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
if (i == reg) break;
++index;
}
return OFFSET_OF(Thread, write_barrier_wrappers_entry_points_) +
index * sizeof(uword);
}
#endif
#define DEFINE_OFFSET_METHOD(name) \
static intptr_t name##_entry_point_offset() { \
return OFFSET_OF(Thread, name##_entry_point_); \
}
RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
#undef DEFINE_OFFSET_METHOD
#define DEFINE_OFFSET_METHOD(returntype, name, ...) \
static intptr_t name##_entry_point_offset() { \
return OFFSET_OF(Thread, name##_entry_point_); \
}
LEAF_RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
#undef DEFINE_OFFSET_METHOD
static bool CanLoadFromThread(const Object& object);
static intptr_t OffsetFromThread(const Object& object);
static bool ObjectAtOffset(intptr_t offset, Object* object);
static intptr_t OffsetFromThread(const RuntimeEntry* runtime_entry);
LongJumpScope* long_jump_base() const { return long_jump_base_; }
void set_long_jump_base(LongJumpScope* value) { long_jump_base_ = value; }
uword vm_tag() const { return vm_tag_; }
void set_vm_tag(uword tag) { vm_tag_ = tag; }
static intptr_t vm_tag_offset() { return OFFSET_OF(Thread, vm_tag_); }
int64_t unboxed_int64_runtime_arg() const {
return unboxed_int64_runtime_arg_;
}
void set_unboxed_int64_runtime_arg(int64_t value) {
unboxed_int64_runtime_arg_ = value;
}
static intptr_t unboxed_int64_runtime_arg_offset() {
return OFFSET_OF(Thread, unboxed_int64_runtime_arg_);
}
RawGrowableObjectArray* pending_functions();
void clear_pending_functions();
RawObject* active_exception() const { return active_exception_; }
void set_active_exception(const Object& value);
static intptr_t active_exception_offset() {
return OFFSET_OF(Thread, active_exception_);
}
RawObject* active_stacktrace() const { return active_stacktrace_; }
void set_active_stacktrace(const Object& value);
static intptr_t active_stacktrace_offset() {
return OFFSET_OF(Thread, active_stacktrace_);
}
uword resume_pc() const { return resume_pc_; }
void set_resume_pc(uword value) { resume_pc_ = value; }
static uword resume_pc_offset() { return OFFSET_OF(Thread, resume_pc_); }
RawError* sticky_error() const;
void set_sticky_error(const Error& value);
void clear_sticky_error();
RawError* get_and_clear_sticky_error();
RawStackTrace* async_stack_trace() const;
void set_async_stack_trace(const StackTrace& stack_trace);
void set_raw_async_stack_trace(RawStackTrace* raw_stack_trace);
void clear_async_stack_trace();
static intptr_t async_stack_trace_offset() {
return OFFSET_OF(Thread, async_stack_trace_);
}
CompilerStats* compiler_stats() { return compiler_stats_; }
#if defined(DEBUG)
#define REUSABLE_HANDLE_SCOPE_ACCESSORS(object) \
void set_reusable_##object##_handle_scope_active(bool value) { \
reusable_##object##_handle_scope_active_ = value; \
} \
bool reusable_##object##_handle_scope_active() const { \
return reusable_##object##_handle_scope_active_; \
}
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_ACCESSORS)
#undef REUSABLE_HANDLE_SCOPE_ACCESSORS
bool IsAnyReusableHandleScopeActive() const {
#define IS_REUSABLE_HANDLE_SCOPE_ACTIVE(object) \
if (reusable_##object##_handle_scope_active_) { \
return true; \
}
REUSABLE_HANDLE_LIST(IS_REUSABLE_HANDLE_SCOPE_ACTIVE)
return false;
#undef IS_REUSABLE_HANDLE_SCOPE_ACTIVE
}
#endif // defined(DEBUG)
void ClearReusableHandles();
#define REUSABLE_HANDLE(object) \
object& object##Handle() const { return *object##_handle_; }
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE)
#undef REUSABLE_HANDLE
/*
* Fields used to support safepointing a thread.
*
* - Bit 0 of the safepoint_state_ field is used to indicate if the thread is
* already at a safepoint,
* - Bit 1 of the safepoint_state_ field is used to indicate if a safepoint
* operation is requested for this thread.
* - Bit 2 of the safepoint_state_ field is used to indicate that the thread
* is blocked for the safepoint operation to complete.
*
* The safepoint execution state (described above) for a thread is stored in
* in the execution_state_ field.
* Potential execution states a thread could be in:
* kThreadInGenerated - The thread is running jitted dart/stub code.
* kThreadInVM - The thread is running VM code.
* kThreadInNative - The thread is running native code.
* kThreadInBlockedState - The thread is blocked waiting for a resource.
*/
static bool IsAtSafepoint(uint32_t state) {
return AtSafepointField::decode(state);
}
bool IsAtSafepoint() const {
return AtSafepointField::decode(safepoint_state_);
}
static uint32_t SetAtSafepoint(bool value, uint32_t state) {
return AtSafepointField::update(value, state);
}
void SetAtSafepoint(bool value) {
ASSERT(thread_lock()->IsOwnedByCurrentThread());
safepoint_state_ = AtSafepointField::update(value, safepoint_state_);
}
bool IsSafepointRequested() const {
return SafepointRequestedField::decode(safepoint_state_);
}
static uint32_t SetSafepointRequested(bool value, uint32_t state) {
return SafepointRequestedField::update(value, state);
}
uint32_t SetSafepointRequested(bool value) {
ASSERT(thread_lock()->IsOwnedByCurrentThread());
uint32_t old_state;
uint32_t new_state;
do {
old_state = safepoint_state_;
new_state = SafepointRequestedField::update(value, old_state);
} while (AtomicOperations::CompareAndSwapUint32(
&safepoint_state_, old_state, new_state) != old_state);
return old_state;
}
static bool IsBlockedForSafepoint(uint32_t state) {
return BlockedForSafepointField::decode(state);
}
bool IsBlockedForSafepoint() const {
return BlockedForSafepointField::decode(safepoint_state_);
}
void SetBlockedForSafepoint(bool value) {
ASSERT(thread_lock()->IsOwnedByCurrentThread());
safepoint_state_ =
BlockedForSafepointField::update(value, safepoint_state_);
}
bool BypassSafepoints() const {
return BypassSafepointsField::decode(safepoint_state_);
}
static uint32_t SetBypassSafepoints(bool value, uint32_t state) {
return BypassSafepointsField::update(value, state);
}
enum ExecutionState {
kThreadInVM = 0,
kThreadInGenerated,
kThreadInNative,
kThreadInBlockedState
};
ExecutionState execution_state() const {
return static_cast<ExecutionState>(execution_state_);
}
void set_execution_state(ExecutionState state) {
execution_state_ = static_cast<uint32_t>(state);
}
bool TryEnterSafepoint() {
uint32_t new_state = SetAtSafepoint(true, 0);
if (AtomicOperations::CompareAndSwapUint32(&safepoint_state_, 0,
new_state) != 0) {
return false;
}
return true;
}
void EnterSafepoint() {
// First try a fast update of the thread state to indicate it is at a
// safepoint.
if (!TryEnterSafepoint()) {
// Fast update failed which means we could potentially be in the middle
// of a safepoint operation.
EnterSafepointUsingLock();
}
}
bool TryExitSafepoint() {
uint32_t old_state = SetAtSafepoint(true, 0);
if (AtomicOperations::CompareAndSwapUint32(&safepoint_state_, old_state,
0) != old_state) {
return false;
}
return true;
}
void ExitSafepoint() {
// First try a fast update of the thread state to indicate it is not at a
// safepoint anymore.
if (!TryExitSafepoint()) {
// Fast update failed which means we could potentially be in the middle
// of a safepoint operation.
ExitSafepointUsingLock();
}
}
void CheckForSafepoint() {
if (IsSafepointRequested()) {
BlockForSafepoint();
}
}
Thread* next() const { return next_; }
// Visit all object pointers.
void VisitObjectPointers(ObjectPointerVisitor* visitor,
ValidationPolicy validate_frames);
bool IsValidHandle(Dart_Handle object) const;
bool IsValidLocalHandle(Dart_Handle object) const;
intptr_t CountLocalHandles() const;
bool IsValidZoneHandle(Dart_Handle object) const;
intptr_t CountZoneHandles() const;
bool IsValidScopedHandle(Dart_Handle object) const;
intptr_t CountScopedHandles() const;
int ZoneSizeInBytes() const;
void UnwindScopes(uword stack_marker);
void InitVMConstants();
#ifndef PRODUCT
void PrintJSON(JSONStream* stream) const;
#endif
private:
template <class T>
T* AllocateReusableHandle();
// Set the current compiler state and return the previous compiler state.
CompilerState* SetCompilerState(CompilerState* state) {
CompilerState* previous = compiler_state_;
compiler_state_ = state;
return previous;
}
// Accessed from generated code.
// ** This block of fields must come first! **
// For AOT cross-compilation, we rely on these members having the same offsets
// in SIMARM(IA32) and ARM, and the same offsets in SIMARM64(X64) and ARM64.
// We use only word-sized fields to avoid differences in struct packing on the
// different architectures. See also CheckOffsets in dart.cc.
uword stack_limit_;
uword stack_overflow_flags_;
uword write_barrier_mask_;
Isolate* isolate_;
Heap* heap_;
uword top_;
uword end_;
uword top_exit_frame_info_;
StoreBufferBlock* store_buffer_block_;
uword vm_tag_;
TaskKind task_kind_;
RawStackTrace* async_stack_trace_;
// Memory location dedicated for passing unboxed int64 values from
// generated code to runtime.
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
ALIGN8 int64_t unboxed_int64_runtime_arg_;
// State that is cached in the TLS for fast access in generated code.
#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value) \
type_name member_name;
CACHED_CONSTANTS_LIST(DECLARE_MEMBERS)
#undef DECLARE_MEMBERS
#define DECLARE_MEMBERS(name) uword name##_entry_point_;
RUNTIME_ENTRY_LIST(DECLARE_MEMBERS)
#undef DECLARE_MEMBERS
#define DECLARE_MEMBERS(returntype, name, ...) uword name##_entry_point_;
LEAF_RUNTIME_ENTRY_LIST(DECLARE_MEMBERS)
#undef DECLARE_MEMBERS
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) || \
defined(TARGET_ARCH_X64)
uword write_barrier_wrappers_entry_points_[kNumberOfDartAvailableCpuRegs];
#endif
TimelineStream* dart_stream_;
OSThread* os_thread_;
Monitor* thread_lock_;
Zone* zone_;
uintptr_t current_zone_capacity_;
uintptr_t zone_high_watermark_;
ApiLocalScope* api_reusable_scope_;
ApiLocalScope* api_top_scope_;
StackResource* top_resource_;
LongJumpScope* long_jump_base_;
int32_t no_callback_scope_depth_;
#if defined(DEBUG)
HandleScope* top_handle_scope_;
int32_t no_handle_scope_depth_;
int32_t no_safepoint_scope_depth_;
#endif
VMHandles reusable_handles_;
uword saved_stack_limit_;
intptr_t defer_oob_messages_count_;
uint16_t deferred_interrupts_mask_;
uint16_t deferred_interrupts_;
int32_t stack_overflow_count_;
// Compiler state:
CompilerState* compiler_state_ = nullptr;
HierarchyInfo* hierarchy_info_;
Reland "[VM] Introduction of type testing stubs - Part 1-4" Relands 165c583d57af613836cf7d08242ce969521db00b [VM] Introduction of type testing stubs - Part 1 This CL: * Adds a field to [RawAbstractType] which will always hold a pointer to the entrypoint of a type testing stub * Makes this new field be initialized to a default stub whenever a instances are created (e.g. via Type::New(), snapshot reader, ...) * Makes the clustered snapshotter write a reference to the corresponding [RawInstructions] object when writing the field and do the reverse when reading it. * Makes us call the type testing stub for performing assert-assignable checks. To reduce unnecessary loads on callsites, we store the entrypoint of the type testing stubs directly in the type objects. This means that the caller of type testing stubs can simply branch there without populating a code object first. This also means that the type testing stubs themselves have no access to a pool and we therefore also don't hold on to the [Code] object, only the [Instruction] object is necessary. The type testing stubs do not setup a frame themselves and also have no safepoint. In the case when the type testing stubs could not determine a positive answer they will tail-call a general-purpose stub. The general-purpose stub sets up a stub frame, tries to consult a [SubtypeTestCache] and bails out to runtime if this was unsuccessful. This CL is just the the first, for ease of reviewing. The actual type-specialized type testing stubs will be generated in later CLs. Reviewed-on: https://dart-review.googlesource.com/44787 Relands f226c22424c483d65499545e560efc059f9dde1c [VM] Introduction of type testing stubs - Part 2 This CL starts building type testing stubs specialzed for [Type] objects we test against. More specifically, it adds support for: * Handling obvious fast cases on the call sites (while still having a call to stub for negative case) * Handling type tests against type parameters, by loading the value of the type parameter on the call sites and invoking it's type testing stub. * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subtype-checks. ==> e.g. String/List<dynamic> * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the type arguments. ==> e.g. Widget<State>, where we know [Widget] is only extended and not implemented. * Specialzed type testing stubs for certain non-instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the instantiated type arguments and cid based comparisons for type parameters. (Note that this fast-case migth result in some false-negatives!) ==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only extended and not implemented. This optimizes cases where the caller uses `new HashMap<A, B>()` and only uses `A` and `B` as key/values (and not subclasses of it). The false-negative can occur when subtypes of A or B are used. In such cases we fall back to the [SubtypeTestCache]-based imlementation. Reviewed-on: https://dart-review.googlesource.com/44788 Relands 25f98bcc7561006d70a487ba3de55551658ac683 [VM] Introduction of type testing stubs - Part 3 The changes include: * Make AssertAssignableInstr no longer have a call-summary, which helps methods with several parameter checks by not having to re-load/re-initialize type arguments registers * Lazily create SubtypeTestCaches: We already go to runtime to warm up the caches, so we now also create the caches on the first runtime call and patch the pool entries. * No longer load the destination name into a register: We only need the name when we throw an exception, so it is not on the hot path. Instead we let the runtime look at the call site, decoding a pool index from the instructions stream. The destination name will be available in the pool, at a consecutive index to the subtype cache. * Remove the fall-through to N=1 case for probing subtypeing tests, since those will always be handled by the optimized stubs. * Do not generate optimized stubs for FutureOr<T> (so far it just falled-through to TTS). We can make optimzed version of that later, but it requires special subtyping rules. * Local code quality improvement in the type-testing-stubs: Avoid extra jump at last case of cid-class-range checks. There are still a number of optimization opportunities we can do in future changes. Reviewed-on: https://dart-review.googlesource.com/46984 Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b [VM] Introduction of type testing stubs - Part 4 In order to avoid generating type testing stubs for too many types in the system - and thereby potentially cause an increase in code size - this change introduces a smarter way to decide for which types we should generate optimized type testing stubs. The precompiler creates a [TypeUsageInfo] which we use to collect information. More specifically: a) We collect the destination types for all type checks we emit (we do this inside AssertAssignableInstr::EmitNativeCode). -> These are types we might want to generate optimized type testing stubs for. b) We collect type argument vectors used in instance creations (we do this inside AllocateObjectInstr::EmitNativeCode) and keep a set of of used type argument vectors for each class. After the precompiler has finished compiling normal code we scan the set of destination types collected in a) for uninstantiated types (or more specifically, type parameter types). We then propagate the type argument vectors used on object allocation sites, which were collected in b), in order to find out what kind of types are flowing into those type parameters. This allows us to extend the set of types which we test against, by adding the types that flow into type parameters. We use this final augmented set of destination types as a "filter" when making the decision whether to generate an optimized type testing stub for a given type. Reviewed-on: https://dart-review.googlesource.com/48640 Issue https://github.com/dart-lang/sdk/issues/32603 Closes https://github.com/dart-lang/sdk/issues/32852 Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44 Reviewed-on: https://dart-review.googlesource.com/50944 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
TypeUsageInfo* type_usage_info_;
RawGrowableObjectArray* pending_functions_;
// JumpToExceptionHandler state:
RawObject* active_exception_;
RawObject* active_stacktrace_;
uword resume_pc_;
RawError* sticky_error_;
CompilerStats* compiler_stats_;
// Reusable handles support.
#define REUSABLE_HANDLE_FIELDS(object) object* object##_handle_;
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_FIELDS)
#undef REUSABLE_HANDLE_FIELDS
#if defined(DEBUG)
#define REUSABLE_HANDLE_SCOPE_VARIABLE(object) \
bool reusable_##object##_handle_scope_active_;
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_VARIABLE);
#undef REUSABLE_HANDLE_SCOPE_VARIABLE
#endif // defined(DEBUG)
class AtSafepointField : public BitField<uint32_t, bool, 0, 1> {};
class SafepointRequestedField : public BitField<uint32_t, bool, 1, 1> {};
class BlockedForSafepointField : public BitField<uint32_t, bool, 2, 1> {};
class BypassSafepointsField : public BitField<uint32_t, bool, 3, 1> {};
uint32_t safepoint_state_;
uint32_t execution_state_;
#if defined(USING_SAFE_STACK)
uword saved_safestack_limit_;
#endif
Thread* next_; // Used to chain the thread structures in an isolate.
explicit Thread(Isolate* isolate);
void StoreBufferRelease(
StoreBuffer::ThresholdPolicy policy = StoreBuffer::kCheckThreshold);
void StoreBufferAcquire();
void set_zone(Zone* zone) { zone_ = zone; }
void set_safepoint_state(uint32_t value) { safepoint_state_ = value; }
void EnterSafepointUsingLock();
void ExitSafepointUsingLock();
void BlockForSafepoint();
static void SetCurrent(Thread* current) { OSThread::SetCurrentTLS(current); }
void DeferOOBMessageInterrupts();
void RestoreOOBMessageInterrupts();
#define REUSABLE_FRIEND_DECLARATION(name) \
friend class Reusable##name##HandleScope;
REUSABLE_HANDLE_LIST(REUSABLE_FRIEND_DECLARATION)
#undef REUSABLE_FRIEND_DECLARATION
friend class ApiZone;
friend class Interpreter;
friend class InterruptChecker;
friend class Isolate;
friend class IsolateTestHelper;
friend class NoOOBMessageScope;
friend class Simulator;
friend class StackZone;
friend class ThreadRegistry;
friend class CompilerState;
DISALLOW_COPY_AND_ASSIGN(Thread);
};
#if defined(HOST_OS_WINDOWS)
// Clears the state of the current thread and frees the allocation.
void WindowsThreadCleanUp();
#endif
// Disable thread interrupts.
class DisableThreadInterruptsScope : public StackResource {
public:
explicit DisableThreadInterruptsScope(Thread* thread);
~DisableThreadInterruptsScope();
};
} // namespace dart
#endif // RUNTIME_VM_THREAD_H_