dart-sdk/runtime/vm/thread.h

1405 lines
52 KiB
C
Raw Normal View History

// Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_THREAD_H_
#define RUNTIME_VM_THREAD_H_
[vm] Decouple assemblers from runtime. This is the next step towards preventing compiler from directly peeking into runtime and instead interact with runtime through a well defined surface. The goal of the refactoring to locate all places where compiler accesses some runtime information and partion those accesses into two categories: - creating objects in the host runtime (e.g. allocating strings, numbers, etc) during compilation; - accessing properties of the target runtime (e.g. offsets of fields) to embed those into the generated code; This change introduces dart::compiler and dart::compiler::target namespaces. All code in the compiler will gradually be moved into dart::compiler namespace. One of the motivations for this change is to be able to prevent access to globally defined host constants like kWordSize by shadowing them in the dart::compiler namespace. The nested namespace dart::compiler::target hosts all information about target runtime that compiler could access, e.g. compiler::target::kWordSize defines word size of the target which will eventually be made different from the host kWordSize (defined by dart::kWordSize). The API for compiler to runtime interaction is placed into compiler_api.h. Note that we still permit runtime to access compiler internals directly - this is not going to be decoupled as part of this work. Issue https://github.com/dart-lang/sdk/issues/31709 Change-Id: If4396d295879391becfa6c38d4802bbff81f5b20 Reviewed-on: https://dart-review.googlesource.com/c/90242 Commit-Queue: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2019-01-25 16:45:13 +00:00
#if defined(SHOULD_NOT_INCLUDE_RUNTIME)
#error "Should not include runtime"
#endif
#include "include/dart_api.h"
#include "platform/assert.h"
#include "platform/atomic.h"
#include "platform/safe_stack.h"
#include "vm/bitfield.h"
#include "vm/compiler/runtime_api.h"
#include "vm/constants.h"
#include "vm/globals.h"
#include "vm/handles.h"
#include "vm/heap/pointer_block.h"
#include "vm/os_thread.h"
#include "vm/pending_deopts.h"
#include "vm/random.h"
#include "vm/runtime_entry_list.h"
[vm] Decouple assemblers from runtime. This is the next step towards preventing compiler from directly peeking into runtime and instead interact with runtime through a well defined surface. The goal of the refactoring to locate all places where compiler accesses some runtime information and partion those accesses into two categories: - creating objects in the host runtime (e.g. allocating strings, numbers, etc) during compilation; - accessing properties of the target runtime (e.g. offsets of fields) to embed those into the generated code; This change introduces dart::compiler and dart::compiler::target namespaces. All code in the compiler will gradually be moved into dart::compiler namespace. One of the motivations for this change is to be able to prevent access to globally defined host constants like kWordSize by shadowing them in the dart::compiler namespace. The nested namespace dart::compiler::target hosts all information about target runtime that compiler could access, e.g. compiler::target::kWordSize defines word size of the target which will eventually be made different from the host kWordSize (defined by dart::kWordSize). The API for compiler to runtime interaction is placed into compiler_api.h. Note that we still permit runtime to access compiler internals directly - this is not going to be decoupled as part of this work. Issue https://github.com/dart-lang/sdk/issues/31709 Change-Id: If4396d295879391becfa6c38d4802bbff81f5b20 Reviewed-on: https://dart-review.googlesource.com/c/90242 Commit-Queue: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2019-01-25 16:45:13 +00:00
#include "vm/thread_stack_resource.h"
#include "vm/thread_state.h"
namespace dart {
class AbstractType;
class ApiLocalScope;
class Array;
class CompilerState;
class CompilerTimings;
class Class;
class Code;
class Error;
class ExceptionHandlers;
class Field;
class FieldTable;
class Function;
class GrowableObjectArray;
class HandleScope;
class Heap;
class HierarchyInfo;
class Instance;
class Isolate;
class IsolateGroup;
class Library;
class Object;
class OSThread;
class JSONObject;
class PcDescriptors;
class RuntimeEntry;
class Smi;
class StackResource;
class StackTrace;
class String;
class TimelineStream;
class TypeArguments;
class TypeParameter;
Reland "[VM] Introduction of type testing stubs - Part 1-4" Relands 165c583d57af613836cf7d08242ce969521db00b [VM] Introduction of type testing stubs - Part 1 This CL: * Adds a field to [RawAbstractType] which will always hold a pointer to the entrypoint of a type testing stub * Makes this new field be initialized to a default stub whenever a instances are created (e.g. via Type::New(), snapshot reader, ...) * Makes the clustered snapshotter write a reference to the corresponding [RawInstructions] object when writing the field and do the reverse when reading it. * Makes us call the type testing stub for performing assert-assignable checks. To reduce unnecessary loads on callsites, we store the entrypoint of the type testing stubs directly in the type objects. This means that the caller of type testing stubs can simply branch there without populating a code object first. This also means that the type testing stubs themselves have no access to a pool and we therefore also don't hold on to the [Code] object, only the [Instruction] object is necessary. The type testing stubs do not setup a frame themselves and also have no safepoint. In the case when the type testing stubs could not determine a positive answer they will tail-call a general-purpose stub. The general-purpose stub sets up a stub frame, tries to consult a [SubtypeTestCache] and bails out to runtime if this was unsuccessful. This CL is just the the first, for ease of reviewing. The actual type-specialized type testing stubs will be generated in later CLs. Reviewed-on: https://dart-review.googlesource.com/44787 Relands f226c22424c483d65499545e560efc059f9dde1c [VM] Introduction of type testing stubs - Part 2 This CL starts building type testing stubs specialzed for [Type] objects we test against. More specifically, it adds support for: * Handling obvious fast cases on the call sites (while still having a call to stub for negative case) * Handling type tests against type parameters, by loading the value of the type parameter on the call sites and invoking it's type testing stub. * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subtype-checks. ==> e.g. String/List<dynamic> * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the type arguments. ==> e.g. Widget<State>, where we know [Widget] is only extended and not implemented. * Specialzed type testing stubs for certain non-instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the instantiated type arguments and cid based comparisons for type parameters. (Note that this fast-case migth result in some false-negatives!) ==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only extended and not implemented. This optimizes cases where the caller uses `new HashMap<A, B>()` and only uses `A` and `B` as key/values (and not subclasses of it). The false-negative can occur when subtypes of A or B are used. In such cases we fall back to the [SubtypeTestCache]-based imlementation. Reviewed-on: https://dart-review.googlesource.com/44788 Relands 25f98bcc7561006d70a487ba3de55551658ac683 [VM] Introduction of type testing stubs - Part 3 The changes include: * Make AssertAssignableInstr no longer have a call-summary, which helps methods with several parameter checks by not having to re-load/re-initialize type arguments registers * Lazily create SubtypeTestCaches: We already go to runtime to warm up the caches, so we now also create the caches on the first runtime call and patch the pool entries. * No longer load the destination name into a register: We only need the name when we throw an exception, so it is not on the hot path. Instead we let the runtime look at the call site, decoding a pool index from the instructions stream. The destination name will be available in the pool, at a consecutive index to the subtype cache. * Remove the fall-through to N=1 case for probing subtypeing tests, since those will always be handled by the optimized stubs. * Do not generate optimized stubs for FutureOr<T> (so far it just falled-through to TTS). We can make optimzed version of that later, but it requires special subtyping rules. * Local code quality improvement in the type-testing-stubs: Avoid extra jump at last case of cid-class-range checks. There are still a number of optimization opportunities we can do in future changes. Reviewed-on: https://dart-review.googlesource.com/46984 Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b [VM] Introduction of type testing stubs - Part 4 In order to avoid generating type testing stubs for too many types in the system - and thereby potentially cause an increase in code size - this change introduces a smarter way to decide for which types we should generate optimized type testing stubs. The precompiler creates a [TypeUsageInfo] which we use to collect information. More specifically: a) We collect the destination types for all type checks we emit (we do this inside AssertAssignableInstr::EmitNativeCode). -> These are types we might want to generate optimized type testing stubs for. b) We collect type argument vectors used in instance creations (we do this inside AllocateObjectInstr::EmitNativeCode) and keep a set of of used type argument vectors for each class. After the precompiler has finished compiling normal code we scan the set of destination types collected in a) for uninstantiated types (or more specifically, type parameter types). We then propagate the type argument vectors used on object allocation sites, which were collected in b), in order to find out what kind of types are flowing into those type parameters. This allows us to extend the set of types which we test against, by adding the types that flow into type parameters. We use this final augmented set of destination types as a "filter" when making the decision whether to generate an optimized type testing stub for a given type. Reviewed-on: https://dart-review.googlesource.com/48640 Issue https://github.com/dart-lang/sdk/issues/32603 Closes https://github.com/dart-lang/sdk/issues/32852 Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44 Reviewed-on: https://dart-review.googlesource.com/50944 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
class TypeUsageInfo;
class Zone;
namespace compiler {
namespace target {
class Thread;
} // namespace target
} // namespace compiler
#define REUSABLE_HANDLE_LIST(V) \
V(AbstractType) \
V(Array) \
V(Class) \
V(Code) \
V(Error) \
V(ExceptionHandlers) \
V(Field) \
V(Function) \
V(GrowableObjectArray) \
V(Instance) \
V(Library) \
V(Object) \
V(PcDescriptors) \
V(Smi) \
V(String) \
V(TypeParameters) \
V(TypeArguments) \
V(TypeParameter)
#define CACHED_VM_STUBS_LIST(V) \
V(CodePtr, write_barrier_code_, StubCode::WriteBarrier().ptr(), nullptr) \
V(CodePtr, array_write_barrier_code_, StubCode::ArrayWriteBarrier().ptr(), \
nullptr) \
V(CodePtr, fix_callers_target_code_, StubCode::FixCallersTarget().ptr(), \
nullptr) \
V(CodePtr, fix_allocation_stub_code_, \
StubCode::FixAllocationStubTarget().ptr(), nullptr) \
V(CodePtr, invoke_dart_code_stub_, StubCode::InvokeDartCode().ptr(), \
nullptr) \
V(CodePtr, call_to_runtime_stub_, StubCode::CallToRuntime().ptr(), nullptr) \
V(CodePtr, late_initialization_error_shared_without_fpu_regs_stub_, \
StubCode::LateInitializationErrorSharedWithoutFPURegs().ptr(), nullptr) \
V(CodePtr, late_initialization_error_shared_with_fpu_regs_stub_, \
StubCode::LateInitializationErrorSharedWithFPURegs().ptr(), nullptr) \
V(CodePtr, null_error_shared_without_fpu_regs_stub_, \
StubCode::NullErrorSharedWithoutFPURegs().ptr(), nullptr) \
V(CodePtr, null_error_shared_with_fpu_regs_stub_, \
StubCode::NullErrorSharedWithFPURegs().ptr(), nullptr) \
V(CodePtr, null_arg_error_shared_without_fpu_regs_stub_, \
StubCode::NullArgErrorSharedWithoutFPURegs().ptr(), nullptr) \
V(CodePtr, null_arg_error_shared_with_fpu_regs_stub_, \
StubCode::NullArgErrorSharedWithFPURegs().ptr(), nullptr) \
V(CodePtr, null_cast_error_shared_without_fpu_regs_stub_, \
StubCode::NullCastErrorSharedWithoutFPURegs().ptr(), nullptr) \
V(CodePtr, null_cast_error_shared_with_fpu_regs_stub_, \
StubCode::NullCastErrorSharedWithFPURegs().ptr(), nullptr) \
V(CodePtr, range_error_shared_without_fpu_regs_stub_, \
StubCode::RangeErrorSharedWithoutFPURegs().ptr(), nullptr) \
V(CodePtr, range_error_shared_with_fpu_regs_stub_, \
StubCode::RangeErrorSharedWithFPURegs().ptr(), nullptr) \
V(CodePtr, allocate_mint_with_fpu_regs_stub_, \
StubCode::AllocateMintSharedWithFPURegs().ptr(), nullptr) \
V(CodePtr, allocate_mint_without_fpu_regs_stub_, \
StubCode::AllocateMintSharedWithoutFPURegs().ptr(), nullptr) \
V(CodePtr, allocate_object_stub_, StubCode::AllocateObject().ptr(), nullptr) \
V(CodePtr, allocate_object_parameterized_stub_, \
StubCode::AllocateObjectParameterized().ptr(), nullptr) \
V(CodePtr, allocate_object_slow_stub_, StubCode::AllocateObjectSlow().ptr(), \
nullptr) \
V(CodePtr, stack_overflow_shared_without_fpu_regs_stub_, \
StubCode::StackOverflowSharedWithoutFPURegs().ptr(), nullptr) \
V(CodePtr, stack_overflow_shared_with_fpu_regs_stub_, \
StubCode::StackOverflowSharedWithFPURegs().ptr(), nullptr) \
V(CodePtr, switchable_call_miss_stub_, StubCode::SwitchableCallMiss().ptr(), \
nullptr) \
V(CodePtr, throw_stub_, StubCode::Throw().ptr(), nullptr) \
V(CodePtr, re_throw_stub_, StubCode::Throw().ptr(), nullptr) \
V(CodePtr, assert_boolean_stub_, StubCode::AssertBoolean().ptr(), nullptr) \
V(CodePtr, optimize_stub_, StubCode::OptimizeFunction().ptr(), nullptr) \
V(CodePtr, deoptimize_stub_, StubCode::Deoptimize().ptr(), nullptr) \
V(CodePtr, lazy_deopt_from_return_stub_, \
StubCode::DeoptimizeLazyFromReturn().ptr(), nullptr) \
V(CodePtr, lazy_deopt_from_throw_stub_, \
StubCode::DeoptimizeLazyFromThrow().ptr(), nullptr) \
V(CodePtr, slow_type_test_stub_, StubCode::SlowTypeTest().ptr(), nullptr) \
V(CodePtr, lazy_specialize_type_test_stub_, \
StubCode::LazySpecializeTypeTest().ptr(), nullptr) \
V(CodePtr, enter_safepoint_stub_, StubCode::EnterSafepoint().ptr(), nullptr) \
V(CodePtr, exit_safepoint_stub_, StubCode::ExitSafepoint().ptr(), nullptr) \
V(CodePtr, call_native_through_safepoint_stub_, \
StubCode::CallNativeThroughSafepoint().ptr(), nullptr)
#define CACHED_NON_VM_STUB_LIST(V) \
V(ObjectPtr, object_null_, Object::null(), nullptr) \
V(BoolPtr, bool_true_, Object::bool_true().ptr(), nullptr) \
V(BoolPtr, bool_false_, Object::bool_false().ptr(), nullptr)
// List of VM-global objects/addresses cached in each Thread object.
// Important: constant false must immediately follow constant true.
#define CACHED_VM_OBJECTS_LIST(V) \
CACHED_NON_VM_STUB_LIST(V) \
CACHED_VM_STUBS_LIST(V)
// This assertion marks places which assume that boolean false immediate
// follows bool true in the CACHED_VM_OBJECTS_LIST
#define ASSERT_BOOL_FALSE_FOLLOWS_BOOL_TRUE() \
ASSERT((Thread::bool_true_offset() + kWordSize) == \
Thread::bool_false_offset());
#define CACHED_VM_STUBS_ADDRESSES_LIST(V) \
V(uword, write_barrier_entry_point_, StubCode::WriteBarrier().EntryPoint(), \
0) \
V(uword, array_write_barrier_entry_point_, \
StubCode::ArrayWriteBarrier().EntryPoint(), 0) \
V(uword, call_to_runtime_entry_point_, \
StubCode::CallToRuntime().EntryPoint(), 0) \
V(uword, allocate_mint_with_fpu_regs_entry_point_, \
StubCode::AllocateMintSharedWithFPURegs().EntryPoint(), 0) \
V(uword, allocate_mint_without_fpu_regs_entry_point_, \
StubCode::AllocateMintSharedWithoutFPURegs().EntryPoint(), 0) \
V(uword, allocate_object_entry_point_, \
StubCode::AllocateObject().EntryPoint(), 0) \
V(uword, allocate_object_parameterized_entry_point_, \
StubCode::AllocateObjectParameterized().EntryPoint(), 0) \
V(uword, allocate_object_slow_entry_point_, \
StubCode::AllocateObjectSlow().EntryPoint(), 0) \
V(uword, stack_overflow_shared_without_fpu_regs_entry_point_, \
StubCode::StackOverflowSharedWithoutFPURegs().EntryPoint(), 0) \
V(uword, stack_overflow_shared_with_fpu_regs_entry_point_, \
StubCode::StackOverflowSharedWithFPURegs().EntryPoint(), 0) \
V(uword, megamorphic_call_checked_entry_, \
StubCode::MegamorphicCall().EntryPoint(), 0) \
V(uword, switchable_call_miss_entry_, \
StubCode::SwitchableCallMiss().EntryPoint(), 0) \
V(uword, optimize_entry_, StubCode::OptimizeFunction().EntryPoint(), 0) \
V(uword, deoptimize_entry_, StubCode::Deoptimize().EntryPoint(), 0) \
V(uword, call_native_through_safepoint_entry_point_, \
StubCode::CallNativeThroughSafepoint().EntryPoint(), 0) \
V(uword, slow_type_test_entry_point_, StubCode::SlowTypeTest().EntryPoint(), \
0)
#define CACHED_ADDRESSES_LIST(V) \
CACHED_VM_STUBS_ADDRESSES_LIST(V) \
V(uword, bootstrap_native_wrapper_entry_point_, \
NativeEntry::BootstrapNativeCallWrapperEntry(), 0) \
V(uword, no_scope_native_wrapper_entry_point_, \
NativeEntry::NoScopeNativeCallWrapperEntry(), 0) \
V(uword, auto_scope_native_wrapper_entry_point_, \
NativeEntry::AutoScopeNativeCallWrapperEntry(), 0) \
V(StringPtr*, predefined_symbols_address_, Symbols::PredefinedAddress(), \
NULL) \
V(uword, double_nan_address_, reinterpret_cast<uword>(&double_nan_constant), \
0) \
V(uword, double_negate_address_, \
reinterpret_cast<uword>(&double_negate_constant), 0) \
V(uword, double_abs_address_, reinterpret_cast<uword>(&double_abs_constant), \
0) \
V(uword, float_not_address_, reinterpret_cast<uword>(&float_not_constant), \
0) \
V(uword, float_negate_address_, \
reinterpret_cast<uword>(&float_negate_constant), 0) \
V(uword, float_absolute_address_, \
reinterpret_cast<uword>(&float_absolute_constant), 0) \
V(uword, float_zerow_address_, \
reinterpret_cast<uword>(&float_zerow_constant), 0)
#define CACHED_CONSTANTS_LIST(V) \
CACHED_VM_OBJECTS_LIST(V) \
CACHED_ADDRESSES_LIST(V)
enum class ValidationPolicy {
kValidateFrames = 0,
kDontValidateFrames = 1,
};
enum class RuntimeCallDeoptAbility {
// There was no leaf call or a leaf call that can cause deoptimization
// after-call.
kCanLazyDeopt,
// There was a leaf call and the VM cannot cause deoptimize after-call.
kCannotLazyDeopt,
};
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
// The safepoint level a thread is on or a safepoint operation is requested for
//
// The higher the number the stronger the guarantees:
// * the time-to-safepoint latency increases with level
// * the frequency of hitting possible safe points decreases with level
enum SafepointLevel {
// Safe to GC
kGC,
// Safe to GC as well as Deopt.
kGCAndDeopt,
// Number of levels.
kNumLevels,
};
// A VM thread; may be executing Dart code or performing helper tasks like
// garbage collection or compilation. The Thread structure associated with
// a thread is allocated by EnsureInit before entering an isolate, and destroyed
// automatically when the underlying OS thread exits. NOTE: On Windows, CleanUp
// must currently be called manually (issue 23474).
class Thread : public ThreadState {
public:
// The kind of task this thread is performing. Sampled by the profiler.
enum TaskKind {
kUnknownTask = 0x0,
kMutatorTask = 0x1,
kCompilerTask = 0x2,
kMarkerTask = 0x4,
kSweeperTask = 0x8,
kCompactorTask = 0x10,
kScavengerTask = 0x20,
};
// Converts a TaskKind to its corresponding C-String name.
static const char* TaskKindToCString(TaskKind kind);
~Thread();
// The currently executing thread, or NULL if not yet initialized.
static Thread* Current() {
#if defined(HAS_C11_THREAD_LOCAL)
return static_cast<Thread*>(OSThread::CurrentVMThread());
#else
BaseThread* thread = OSThread::GetCurrentTLS();
if (thread == NULL || thread->is_os_thread()) {
return NULL;
}
return static_cast<Thread*>(thread);
#endif
}
// Makes the current thread enter 'isolate'.
static bool EnterIsolate(Isolate* isolate, bool is_nested_reenter = false);
// Makes the current thread exit its isolate.
static void ExitIsolate(bool is_nested_exit = false);
// A VM thread other than the main mutator thread can enter an isolate as a
// "helper" to gain limited concurrent access to the isolate. One example is
// SweeperTask (which uses the class table, which is copy-on-write).
// TODO(koda): Properly synchronize heap access to expand allowed operations.
static bool EnterIsolateAsHelper(Isolate* isolate,
TaskKind kind,
bool bypass_safepoint = false);
static void ExitIsolateAsHelper(bool bypass_safepoint = false);
static bool EnterIsolateGroupAsHelper(IsolateGroup* isolate_group,
TaskKind kind,
bool bypass_safepoint);
static void ExitIsolateGroupAsHelper(bool bypass_safepoint);
// Empties the store buffer block into the isolate.
void ReleaseStoreBuffer();
void AcquireMarkingStack();
void ReleaseMarkingStack();
void SetStackLimit(uword value);
void ClearStackLimit();
// Access to the current stack limit for generated code. Either the true OS
// thread's stack limit minus some headroom, or a special value to trigger
// interrupts.
uword stack_limit_address() const {
return reinterpret_cast<uword>(&stack_limit_);
}
static intptr_t stack_limit_offset() {
return OFFSET_OF(Thread, stack_limit_);
}
// The true stack limit for this OS thread.
static intptr_t saved_stack_limit_offset() {
return OFFSET_OF(Thread, saved_stack_limit_);
}
uword saved_stack_limit() const { return saved_stack_limit_; }
#if defined(USING_SAFE_STACK)
uword saved_safestack_limit() const { return saved_safestack_limit_; }
void set_saved_safestack_limit(uword limit) {
saved_safestack_limit_ = limit;
}
#endif
static uword saved_shadow_call_stack_offset() {
return OFFSET_OF(Thread, saved_shadow_call_stack_);
}
// Stack overflow flags
enum {
kOsrRequest = 0x1, // Current stack overflow caused by OSR request.
};
uword write_barrier_mask() const { return write_barrier_mask_; }
uword heap_base() const { return heap_base_; }
static intptr_t write_barrier_mask_offset() {
return OFFSET_OF(Thread, write_barrier_mask_);
}
static intptr_t heap_base_offset() { return OFFSET_OF(Thread, heap_base_); }
static intptr_t stack_overflow_flags_offset() {
return OFFSET_OF(Thread, stack_overflow_flags_);
}
int32_t IncrementAndGetStackOverflowCount() {
return ++stack_overflow_count_;
}
uint32_t IncrementAndGetRuntimeCallCount() { return ++runtime_call_count_; }
[vm] Decouple assemblers from runtime. This is the next step towards preventing compiler from directly peeking into runtime and instead interact with runtime through a well defined surface. The goal of the refactoring to locate all places where compiler accesses some runtime information and partion those accesses into two categories: - creating objects in the host runtime (e.g. allocating strings, numbers, etc) during compilation; - accessing properties of the target runtime (e.g. offsets of fields) to embed those into the generated code; This change introduces dart::compiler and dart::compiler::target namespaces. All code in the compiler will gradually be moved into dart::compiler namespace. One of the motivations for this change is to be able to prevent access to globally defined host constants like kWordSize by shadowing them in the dart::compiler namespace. The nested namespace dart::compiler::target hosts all information about target runtime that compiler could access, e.g. compiler::target::kWordSize defines word size of the target which will eventually be made different from the host kWordSize (defined by dart::kWordSize). The API for compiler to runtime interaction is placed into compiler_api.h. Note that we still permit runtime to access compiler internals directly - this is not going to be decoupled as part of this work. Issue https://github.com/dart-lang/sdk/issues/31709 Change-Id: If4396d295879391becfa6c38d4802bbff81f5b20 Reviewed-on: https://dart-review.googlesource.com/c/90242 Commit-Queue: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2019-01-25 16:45:13 +00:00
static uword stack_overflow_shared_stub_entry_point_offset(bool fpu_regs) {
return fpu_regs
? stack_overflow_shared_with_fpu_regs_entry_point_offset()
: stack_overflow_shared_without_fpu_regs_entry_point_offset();
}
static intptr_t safepoint_state_offset() {
return OFFSET_OF(Thread, safepoint_state_);
}
static intptr_t callback_code_offset() {
return OFFSET_OF(Thread, ffi_callback_code_);
}
[vm/ffi] Support passing structs by value This CL adds passing structs by value in FFI trampolines. Nested structs and inline arrays are future work. C defines passing empty structs as undefined behavior, so that is not supported in this CL. Suggested review order: 1) commit message 2) ffi/marshaller (decisions for what is done in IL and what in MC) 3) frontend/kernel_to_il (IL construction) 4) backend/il (MC generation from IL) 5) rest in VM Overall architecture is that structs are split up into word-size chunks in IL when this is possible: 1 definition in IL per chunk, 1 Location in IL per chunk, and 1 NativeLocation for the backend per chunk. In some cases it is not possible or less convenient to split into chunks. In these cases TypedDataBase objects are stored into and loaded from directly in machine code. The various cases: - FFI call arguments which are not passed as pointers: pass individual chunks to FFI call which already have the right location. - FFI call arguments which are passed as pointers: Pass in TypedDataBase to FFI call, allocate space on the stack, and make a copy on the stack and pass the copies' address to the callee. - FFI call return value: pass in TypedData to FFI call, and copy result in machine code. - FFI callback arguments which are not passed as pointers: IL definition for each chunk, and populate a new TypedData with those chunks. - FFI callback arguments which are passed as pointer: IL definition for the pointer, and copying of contents in IL. - FFI return value when location is pointer: Copy data to callee result location in IL. - FFI return value when location is not a pointer: Copy data in machine code to the right registers. Some other notes about the implementation: - Due to Store/LoadIndexed loading doubles from float arrays, we use a int32 instead and use the BitCastInstr. - Linux ia32 uses `ret 4` when returning structs by value. This requires special casing in the FFI callback trampolines to either use `ret` or `ret 4` when returning. - The 1 IL definition, 1 Location, and 1 NativeLocation approach does not remove the need for special casing PairLocations in the machine code generation because they are 1 Location belonging to 1 definition. Because of the amount of corner cases in the calling conventions that need to be covered, the tests are generated, rather than hand-written. ABIs tested on CQ: x64 (Linux, MacOS, Windows), ia32 (Linux, Windows), arm (Android softFP, Linux hardFP), arm64 Android. ABIs tested locally through Flutter: ia32 Android (emulator), x64 iOS (simulator), arm64 iOS. ABIs not tested: arm iOS. TEST=runtime/bin/ffi_test/ffi_test_functions_generated.cc TEST=runtime/bin/ffi_test/ffi_test_functions.cc TEST=tests/{ffi,ffi_2}/function_structs_by_value_generated_test.dart TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_generated_tes TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_test.dart TEST=tests/{ffi,ffi_2}/vmspecific_static_checks_test.dart Closes https://github.com/dart-lang/sdk/issues/36730. Change-Id: I474d3a4ee1faadbe767ddadd1b696e24d8dc364c Cq-Include-Trybots: luci.dart.try:dart-sdk-linux-try,dart-sdk-mac-try,dart-sdk-win-try,vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try,vm-kernel-asan-linux-release-x64-try,vm-kernel-mac-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-nnbd-mac-release-x64-try,vm-kernel-nnbd-win-debug-x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-reload-linux-debug-x64-try,vm-kernel-reload-rollback-linux-debug-x64-try,vm-kernel-win-debug-x64-try,vm-kernel-win-debug-ia32-try,vm-precomp-ffi-qemu-linux-release-arm-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,vm-kernel-msan-linux-release-x64-try,vm-kernel-precomp-msan-linux-release-x64-try,vm-kernel-precomp-android-release-arm_x64-try,analyzer-analysis-server-linux-try Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/140290 Commit-Queue: Daco Harkes <dacoharkes@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-12-14 16:22:48 +00:00
static intptr_t callback_stack_return_offset() {
return OFFSET_OF(Thread, ffi_callback_stack_return_);
}
[vm/ffi] Convert Objects to Dart_Handles in FFI calls This includes support for calling Dart_PropagateError in native code when doing FFI calls, and catching uncaught exceptions with Dart_IsError when doing FFI callbacks. The support for Dart_PropagateError adds a catch entry to the FFI trampoline, which prevents inlining these trampolines in AOT. This regresses the FfiCall benchmarks by 1-2% in AOT. In addition, Dart_PropagateError requires maintaining a bit whether we entered native/VM code from generated code through FFI or not. That way we can do the proper transition on the exception path. When entering generated code, we store this bit on the stack, right after the entry frame. Design: http://go/dart-ffi-handles Issue: https://github.com/dart-lang/sdk/issues/36858 Issue: https://github.com/dart-lang/sdk/issues/41319 Change-Id: Idfd7ff69132fb29cc730931a4113d914d4437396 Cq-Include-Trybots: luci.dart.try:vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try,app-kernel-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-win-debug-x64-try,vm-kernel-win-debug-ia32-try,vm-kernel-precomp-linux-debug-x64-try,vm-dartkb-linux-release-x64-abi-try,vm-kernel-precomp-android-release-arm64-try,vm-kernel-asan-linux-release-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-precomp-android-release-arm_x64-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,dart-sdk-linux-try,analyzer-analysis-server-linux-try,analyzer-linux-release-try,front-end-linux-release-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-mac-debug-x64-try,vm-precomp-ffi-qemu-linux-release-arm-try,vm-kernel-nnbd-linux-debug-x64-try,analyzer-nnbd-linux-release-try,front-end-nnbd-linux-release-x64-try Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/145591 Commit-Queue: Daco Harkes <dacoharkes@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2020-06-12 11:14:22 +00:00
// Tag state is maintained on transitions.
enum {
// Always true in generated state.
kDidNotExit = 0,
// The VM did exit the generated state through FFI.
// This can be true in both native and VM state.
kExitThroughFfi = 1,
// The VM exited the generated state through FFI.
// This can be true in both native and VM state.
kExitThroughRuntimeCall = 2,
};
static intptr_t exit_through_ffi_offset() {
return OFFSET_OF(Thread, exit_through_ffi_);
}
TaskKind task_kind() const { return task_kind_; }
// Retrieves and clears the stack overflow flags. These are set by
// the generated code before the slow path runtime routine for a
// stack overflow is called.
uword GetAndClearStackOverflowFlags();
// Interrupt bits.
enum {
kVMInterrupt = 0x1, // Internal VM checks: safepoints, store buffers, etc.
kMessageInterrupt = 0x2, // An interrupt to process an out of band message.
kInterruptsMask = (kVMInterrupt | kMessageInterrupt),
};
void ScheduleInterrupts(uword interrupt_bits);
void ScheduleInterruptsLocked(uword interrupt_bits);
ErrorPtr HandleInterrupts();
uword GetAndClearInterrupts();
bool HasScheduledInterrupts() const {
return (stack_limit_ & kInterruptsMask) != 0;
}
// Monitor corresponding to this thread.
Monitor* thread_lock() const { return &thread_lock_; }
// The reusable api local scope for this thread.
ApiLocalScope* api_reusable_scope() const { return api_reusable_scope_; }
void set_api_reusable_scope(ApiLocalScope* value) {
ASSERT(value == NULL || api_reusable_scope_ == NULL);
api_reusable_scope_ = value;
}
// The api local scope for this thread, this where all local handles
// are allocated.
ApiLocalScope* api_top_scope() const { return api_top_scope_; }
void set_api_top_scope(ApiLocalScope* value) { api_top_scope_ = value; }
[vm/ffi] Convert Objects to Dart_Handles in FFI calls This includes support for calling Dart_PropagateError in native code when doing FFI calls, and catching uncaught exceptions with Dart_IsError when doing FFI callbacks. The support for Dart_PropagateError adds a catch entry to the FFI trampoline, which prevents inlining these trampolines in AOT. This regresses the FfiCall benchmarks by 1-2% in AOT. In addition, Dart_PropagateError requires maintaining a bit whether we entered native/VM code from generated code through FFI or not. That way we can do the proper transition on the exception path. When entering generated code, we store this bit on the stack, right after the entry frame. Design: http://go/dart-ffi-handles Issue: https://github.com/dart-lang/sdk/issues/36858 Issue: https://github.com/dart-lang/sdk/issues/41319 Change-Id: Idfd7ff69132fb29cc730931a4113d914d4437396 Cq-Include-Trybots: luci.dart.try:vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try,app-kernel-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-win-debug-x64-try,vm-kernel-win-debug-ia32-try,vm-kernel-precomp-linux-debug-x64-try,vm-dartkb-linux-release-x64-abi-try,vm-kernel-precomp-android-release-arm64-try,vm-kernel-asan-linux-release-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-precomp-android-release-arm_x64-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,dart-sdk-linux-try,analyzer-analysis-server-linux-try,analyzer-linux-release-try,front-end-linux-release-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-mac-debug-x64-try,vm-precomp-ffi-qemu-linux-release-arm-try,vm-kernel-nnbd-linux-debug-x64-try,analyzer-nnbd-linux-release-try,front-end-nnbd-linux-release-x64-try Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/145591 Commit-Queue: Daco Harkes <dacoharkes@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2020-06-12 11:14:22 +00:00
static intptr_t api_top_scope_offset() {
return OFFSET_OF(Thread, api_top_scope_);
}
void EnterApiScope();
void ExitApiScope();
// The isolate that this thread is operating on, or nullptr if none.
Isolate* isolate() const { return isolate_; }
static intptr_t isolate_offset() { return OFFSET_OF(Thread, isolate_); }
static intptr_t isolate_group_offset() {
return OFFSET_OF(Thread, isolate_group_);
}
// The isolate group that this thread is operating on, or nullptr if none.
IsolateGroup* isolate_group() const { return isolate_group_; }
static intptr_t field_table_values_offset() {
return OFFSET_OF(Thread, field_table_values_);
}
bool IsMutatorThread() const { return is_mutator_thread_; }
#if defined(DEBUG)
bool IsInsideCompiler() const { return inside_compiler_; }
#endif
bool CanCollectGarbage() const;
// Offset of Dart TimelineStream object.
static intptr_t dart_stream_offset() {
return OFFSET_OF(Thread, dart_stream_);
}
// Is |this| executing Dart code?
bool IsExecutingDartCode() const;
// Has |this| exited Dart code?
bool HasExitedDartCode() const;
CompilerState& compiler_state() {
ASSERT(compiler_state_ != nullptr);
return *compiler_state_;
}
HierarchyInfo* hierarchy_info() const {
ASSERT(isolate_group_ != nullptr);
return hierarchy_info_;
}
void set_hierarchy_info(HierarchyInfo* value) {
ASSERT(isolate_group_ != nullptr);
ASSERT((hierarchy_info_ == nullptr && value != nullptr) ||
(hierarchy_info_ != nullptr && value == nullptr));
hierarchy_info_ = value;
}
Reland "[VM] Introduction of type testing stubs - Part 1-4" Relands 165c583d57af613836cf7d08242ce969521db00b [VM] Introduction of type testing stubs - Part 1 This CL: * Adds a field to [RawAbstractType] which will always hold a pointer to the entrypoint of a type testing stub * Makes this new field be initialized to a default stub whenever a instances are created (e.g. via Type::New(), snapshot reader, ...) * Makes the clustered snapshotter write a reference to the corresponding [RawInstructions] object when writing the field and do the reverse when reading it. * Makes us call the type testing stub for performing assert-assignable checks. To reduce unnecessary loads on callsites, we store the entrypoint of the type testing stubs directly in the type objects. This means that the caller of type testing stubs can simply branch there without populating a code object first. This also means that the type testing stubs themselves have no access to a pool and we therefore also don't hold on to the [Code] object, only the [Instruction] object is necessary. The type testing stubs do not setup a frame themselves and also have no safepoint. In the case when the type testing stubs could not determine a positive answer they will tail-call a general-purpose stub. The general-purpose stub sets up a stub frame, tries to consult a [SubtypeTestCache] and bails out to runtime if this was unsuccessful. This CL is just the the first, for ease of reviewing. The actual type-specialized type testing stubs will be generated in later CLs. Reviewed-on: https://dart-review.googlesource.com/44787 Relands f226c22424c483d65499545e560efc059f9dde1c [VM] Introduction of type testing stubs - Part 2 This CL starts building type testing stubs specialzed for [Type] objects we test against. More specifically, it adds support for: * Handling obvious fast cases on the call sites (while still having a call to stub for negative case) * Handling type tests against type parameters, by loading the value of the type parameter on the call sites and invoking it's type testing stub. * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subtype-checks. ==> e.g. String/List<dynamic> * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the type arguments. ==> e.g. Widget<State>, where we know [Widget] is only extended and not implemented. * Specialzed type testing stubs for certain non-instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the instantiated type arguments and cid based comparisons for type parameters. (Note that this fast-case migth result in some false-negatives!) ==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only extended and not implemented. This optimizes cases where the caller uses `new HashMap<A, B>()` and only uses `A` and `B` as key/values (and not subclasses of it). The false-negative can occur when subtypes of A or B are used. In such cases we fall back to the [SubtypeTestCache]-based imlementation. Reviewed-on: https://dart-review.googlesource.com/44788 Relands 25f98bcc7561006d70a487ba3de55551658ac683 [VM] Introduction of type testing stubs - Part 3 The changes include: * Make AssertAssignableInstr no longer have a call-summary, which helps methods with several parameter checks by not having to re-load/re-initialize type arguments registers * Lazily create SubtypeTestCaches: We already go to runtime to warm up the caches, so we now also create the caches on the first runtime call and patch the pool entries. * No longer load the destination name into a register: We only need the name when we throw an exception, so it is not on the hot path. Instead we let the runtime look at the call site, decoding a pool index from the instructions stream. The destination name will be available in the pool, at a consecutive index to the subtype cache. * Remove the fall-through to N=1 case for probing subtypeing tests, since those will always be handled by the optimized stubs. * Do not generate optimized stubs for FutureOr<T> (so far it just falled-through to TTS). We can make optimzed version of that later, but it requires special subtyping rules. * Local code quality improvement in the type-testing-stubs: Avoid extra jump at last case of cid-class-range checks. There are still a number of optimization opportunities we can do in future changes. Reviewed-on: https://dart-review.googlesource.com/46984 Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b [VM] Introduction of type testing stubs - Part 4 In order to avoid generating type testing stubs for too many types in the system - and thereby potentially cause an increase in code size - this change introduces a smarter way to decide for which types we should generate optimized type testing stubs. The precompiler creates a [TypeUsageInfo] which we use to collect information. More specifically: a) We collect the destination types for all type checks we emit (we do this inside AssertAssignableInstr::EmitNativeCode). -> These are types we might want to generate optimized type testing stubs for. b) We collect type argument vectors used in instance creations (we do this inside AllocateObjectInstr::EmitNativeCode) and keep a set of of used type argument vectors for each class. After the precompiler has finished compiling normal code we scan the set of destination types collected in a) for uninstantiated types (or more specifically, type parameter types). We then propagate the type argument vectors used on object allocation sites, which were collected in b), in order to find out what kind of types are flowing into those type parameters. This allows us to extend the set of types which we test against, by adding the types that flow into type parameters. We use this final augmented set of destination types as a "filter" when making the decision whether to generate an optimized type testing stub for a given type. Reviewed-on: https://dart-review.googlesource.com/48640 Issue https://github.com/dart-lang/sdk/issues/32603 Closes https://github.com/dart-lang/sdk/issues/32852 Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44 Reviewed-on: https://dart-review.googlesource.com/50944 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
TypeUsageInfo* type_usage_info() const {
ASSERT(isolate_group_ != nullptr);
Reland "[VM] Introduction of type testing stubs - Part 1-4" Relands 165c583d57af613836cf7d08242ce969521db00b [VM] Introduction of type testing stubs - Part 1 This CL: * Adds a field to [RawAbstractType] which will always hold a pointer to the entrypoint of a type testing stub * Makes this new field be initialized to a default stub whenever a instances are created (e.g. via Type::New(), snapshot reader, ...) * Makes the clustered snapshotter write a reference to the corresponding [RawInstructions] object when writing the field and do the reverse when reading it. * Makes us call the type testing stub for performing assert-assignable checks. To reduce unnecessary loads on callsites, we store the entrypoint of the type testing stubs directly in the type objects. This means that the caller of type testing stubs can simply branch there without populating a code object first. This also means that the type testing stubs themselves have no access to a pool and we therefore also don't hold on to the [Code] object, only the [Instruction] object is necessary. The type testing stubs do not setup a frame themselves and also have no safepoint. In the case when the type testing stubs could not determine a positive answer they will tail-call a general-purpose stub. The general-purpose stub sets up a stub frame, tries to consult a [SubtypeTestCache] and bails out to runtime if this was unsuccessful. This CL is just the the first, for ease of reviewing. The actual type-specialized type testing stubs will be generated in later CLs. Reviewed-on: https://dart-review.googlesource.com/44787 Relands f226c22424c483d65499545e560efc059f9dde1c [VM] Introduction of type testing stubs - Part 2 This CL starts building type testing stubs specialzed for [Type] objects we test against. More specifically, it adds support for: * Handling obvious fast cases on the call sites (while still having a call to stub for negative case) * Handling type tests against type parameters, by loading the value of the type parameter on the call sites and invoking it's type testing stub. * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subtype-checks. ==> e.g. String/List<dynamic> * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the type arguments. ==> e.g. Widget<State>, where we know [Widget] is only extended and not implemented. * Specialzed type testing stubs for certain non-instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the instantiated type arguments and cid based comparisons for type parameters. (Note that this fast-case migth result in some false-negatives!) ==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only extended and not implemented. This optimizes cases where the caller uses `new HashMap<A, B>()` and only uses `A` and `B` as key/values (and not subclasses of it). The false-negative can occur when subtypes of A or B are used. In such cases we fall back to the [SubtypeTestCache]-based imlementation. Reviewed-on: https://dart-review.googlesource.com/44788 Relands 25f98bcc7561006d70a487ba3de55551658ac683 [VM] Introduction of type testing stubs - Part 3 The changes include: * Make AssertAssignableInstr no longer have a call-summary, which helps methods with several parameter checks by not having to re-load/re-initialize type arguments registers * Lazily create SubtypeTestCaches: We already go to runtime to warm up the caches, so we now also create the caches on the first runtime call and patch the pool entries. * No longer load the destination name into a register: We only need the name when we throw an exception, so it is not on the hot path. Instead we let the runtime look at the call site, decoding a pool index from the instructions stream. The destination name will be available in the pool, at a consecutive index to the subtype cache. * Remove the fall-through to N=1 case for probing subtypeing tests, since those will always be handled by the optimized stubs. * Do not generate optimized stubs for FutureOr<T> (so far it just falled-through to TTS). We can make optimzed version of that later, but it requires special subtyping rules. * Local code quality improvement in the type-testing-stubs: Avoid extra jump at last case of cid-class-range checks. There are still a number of optimization opportunities we can do in future changes. Reviewed-on: https://dart-review.googlesource.com/46984 Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b [VM] Introduction of type testing stubs - Part 4 In order to avoid generating type testing stubs for too many types in the system - and thereby potentially cause an increase in code size - this change introduces a smarter way to decide for which types we should generate optimized type testing stubs. The precompiler creates a [TypeUsageInfo] which we use to collect information. More specifically: a) We collect the destination types for all type checks we emit (we do this inside AssertAssignableInstr::EmitNativeCode). -> These are types we might want to generate optimized type testing stubs for. b) We collect type argument vectors used in instance creations (we do this inside AllocateObjectInstr::EmitNativeCode) and keep a set of of used type argument vectors for each class. After the precompiler has finished compiling normal code we scan the set of destination types collected in a) for uninstantiated types (or more specifically, type parameter types). We then propagate the type argument vectors used on object allocation sites, which were collected in b), in order to find out what kind of types are flowing into those type parameters. This allows us to extend the set of types which we test against, by adding the types that flow into type parameters. We use this final augmented set of destination types as a "filter" when making the decision whether to generate an optimized type testing stub for a given type. Reviewed-on: https://dart-review.googlesource.com/48640 Issue https://github.com/dart-lang/sdk/issues/32603 Closes https://github.com/dart-lang/sdk/issues/32852 Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44 Reviewed-on: https://dart-review.googlesource.com/50944 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
return type_usage_info_;
}
void set_type_usage_info(TypeUsageInfo* value) {
ASSERT(isolate_group_ != nullptr);
ASSERT((type_usage_info_ == nullptr && value != nullptr) ||
(type_usage_info_ != nullptr && value == nullptr));
Reland "[VM] Introduction of type testing stubs - Part 1-4" Relands 165c583d57af613836cf7d08242ce969521db00b [VM] Introduction of type testing stubs - Part 1 This CL: * Adds a field to [RawAbstractType] which will always hold a pointer to the entrypoint of a type testing stub * Makes this new field be initialized to a default stub whenever a instances are created (e.g. via Type::New(), snapshot reader, ...) * Makes the clustered snapshotter write a reference to the corresponding [RawInstructions] object when writing the field and do the reverse when reading it. * Makes us call the type testing stub for performing assert-assignable checks. To reduce unnecessary loads on callsites, we store the entrypoint of the type testing stubs directly in the type objects. This means that the caller of type testing stubs can simply branch there without populating a code object first. This also means that the type testing stubs themselves have no access to a pool and we therefore also don't hold on to the [Code] object, only the [Instruction] object is necessary. The type testing stubs do not setup a frame themselves and also have no safepoint. In the case when the type testing stubs could not determine a positive answer they will tail-call a general-purpose stub. The general-purpose stub sets up a stub frame, tries to consult a [SubtypeTestCache] and bails out to runtime if this was unsuccessful. This CL is just the the first, for ease of reviewing. The actual type-specialized type testing stubs will be generated in later CLs. Reviewed-on: https://dart-review.googlesource.com/44787 Relands f226c22424c483d65499545e560efc059f9dde1c [VM] Introduction of type testing stubs - Part 2 This CL starts building type testing stubs specialzed for [Type] objects we test against. More specifically, it adds support for: * Handling obvious fast cases on the call sites (while still having a call to stub for negative case) * Handling type tests against type parameters, by loading the value of the type parameter on the call sites and invoking it's type testing stub. * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subtype-checks. ==> e.g. String/List<dynamic> * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the type arguments. ==> e.g. Widget<State>, where we know [Widget] is only extended and not implemented. * Specialzed type testing stubs for certain non-instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the instantiated type arguments and cid based comparisons for type parameters. (Note that this fast-case migth result in some false-negatives!) ==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only extended and not implemented. This optimizes cases where the caller uses `new HashMap<A, B>()` and only uses `A` and `B` as key/values (and not subclasses of it). The false-negative can occur when subtypes of A or B are used. In such cases we fall back to the [SubtypeTestCache]-based imlementation. Reviewed-on: https://dart-review.googlesource.com/44788 Relands 25f98bcc7561006d70a487ba3de55551658ac683 [VM] Introduction of type testing stubs - Part 3 The changes include: * Make AssertAssignableInstr no longer have a call-summary, which helps methods with several parameter checks by not having to re-load/re-initialize type arguments registers * Lazily create SubtypeTestCaches: We already go to runtime to warm up the caches, so we now also create the caches on the first runtime call and patch the pool entries. * No longer load the destination name into a register: We only need the name when we throw an exception, so it is not on the hot path. Instead we let the runtime look at the call site, decoding a pool index from the instructions stream. The destination name will be available in the pool, at a consecutive index to the subtype cache. * Remove the fall-through to N=1 case for probing subtypeing tests, since those will always be handled by the optimized stubs. * Do not generate optimized stubs for FutureOr<T> (so far it just falled-through to TTS). We can make optimzed version of that later, but it requires special subtyping rules. * Local code quality improvement in the type-testing-stubs: Avoid extra jump at last case of cid-class-range checks. There are still a number of optimization opportunities we can do in future changes. Reviewed-on: https://dart-review.googlesource.com/46984 Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b [VM] Introduction of type testing stubs - Part 4 In order to avoid generating type testing stubs for too many types in the system - and thereby potentially cause an increase in code size - this change introduces a smarter way to decide for which types we should generate optimized type testing stubs. The precompiler creates a [TypeUsageInfo] which we use to collect information. More specifically: a) We collect the destination types for all type checks we emit (we do this inside AssertAssignableInstr::EmitNativeCode). -> These are types we might want to generate optimized type testing stubs for. b) We collect type argument vectors used in instance creations (we do this inside AllocateObjectInstr::EmitNativeCode) and keep a set of of used type argument vectors for each class. After the precompiler has finished compiling normal code we scan the set of destination types collected in a) for uninstantiated types (or more specifically, type parameter types). We then propagate the type argument vectors used on object allocation sites, which were collected in b), in order to find out what kind of types are flowing into those type parameters. This allows us to extend the set of types which we test against, by adding the types that flow into type parameters. We use this final augmented set of destination types as a "filter" when making the decision whether to generate an optimized type testing stub for a given type. Reviewed-on: https://dart-review.googlesource.com/48640 Issue https://github.com/dart-lang/sdk/issues/32603 Closes https://github.com/dart-lang/sdk/issues/32852 Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44 Reviewed-on: https://dart-review.googlesource.com/50944 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
type_usage_info_ = value;
}
CompilerTimings* compiler_timings() const { return compiler_timings_; }
void set_compiler_timings(CompilerTimings* stats) {
compiler_timings_ = stats;
}
int32_t no_callback_scope_depth() const { return no_callback_scope_depth_; }
void IncrementNoCallbackScopeDepth() {
ASSERT(no_callback_scope_depth_ < INT_MAX);
no_callback_scope_depth_ += 1;
}
void DecrementNoCallbackScopeDepth() {
ASSERT(no_callback_scope_depth_ > 0);
no_callback_scope_depth_ -= 1;
}
#if defined(DEBUG)
void EnterCompiler() {
ASSERT(!IsInsideCompiler());
inside_compiler_ = true;
}
void LeaveCompiler() {
ASSERT(IsInsideCompiler());
inside_compiler_ = false;
}
#endif
void StoreBufferAddObject(ObjectPtr obj);
void StoreBufferAddObjectGC(ObjectPtr obj);
#if defined(TESTING)
bool StoreBufferContains(ObjectPtr obj) const {
return store_buffer_block_->Contains(obj);
}
#endif
void StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy);
static intptr_t store_buffer_block_offset() {
return OFFSET_OF(Thread, store_buffer_block_);
}
bool is_marking() const { return marking_stack_block_ != NULL; }
void MarkingStackAddObject(ObjectPtr obj);
void DeferredMarkingStackAddObject(ObjectPtr obj);
void MarkingStackBlockProcess();
void DeferredMarkingStackBlockProcess();
static intptr_t marking_stack_block_offset() {
return OFFSET_OF(Thread, marking_stack_block_);
}
uword top_exit_frame_info() const { return top_exit_frame_info_; }
void set_top_exit_frame_info(uword top_exit_frame_info) {
top_exit_frame_info_ = top_exit_frame_info;
}
static intptr_t top_exit_frame_info_offset() {
return OFFSET_OF(Thread, top_exit_frame_info_);
}
// Heap of the isolate that this thread is operating on.
Heap* heap() const { return heap_; }
static intptr_t heap_offset() { return OFFSET_OF(Thread, heap_); }
uword top() const { return top_; }
uword end() const { return end_; }
void set_top(uword top) { top_ = top; }
void set_end(uword end) { end_ = end; }
static intptr_t top_offset() { return OFFSET_OF(Thread, top_); }
static intptr_t end_offset() { return OFFSET_OF(Thread, end_); }
int32_t no_safepoint_scope_depth() const {
#if defined(DEBUG)
return no_safepoint_scope_depth_;
#else
return 0;
#endif
}
void IncrementNoSafepointScopeDepth() {
#if defined(DEBUG)
ASSERT(no_safepoint_scope_depth_ < INT_MAX);
no_safepoint_scope_depth_ += 1;
#endif
}
void DecrementNoSafepointScopeDepth() {
#if defined(DEBUG)
ASSERT(no_safepoint_scope_depth_ > 0);
no_safepoint_scope_depth_ -= 1;
#endif
}
bool IsInNoReloadScope() const { return no_reload_scope_depth_ > 0; }
bool IsInStoppedMutatorsScope() const {
return stopped_mutators_scope_depth_ > 0;
}
#define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value) \
static intptr_t member_name##offset() { \
return OFFSET_OF(Thread, member_name); \
}
CACHED_CONSTANTS_LIST(DEFINE_OFFSET_METHOD)
#undef DEFINE_OFFSET_METHOD
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) || \
defined(TARGET_ARCH_X64)
static intptr_t write_barrier_wrappers_thread_offset(Register reg) {
ASSERT((kDartAvailableCpuRegs & (1 << reg)) != 0);
intptr_t index = 0;
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
if (i == reg) break;
++index;
}
return OFFSET_OF(Thread, write_barrier_wrappers_entry_points_) +
index * sizeof(uword);
}
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg) {
intptr_t index = 0;
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
if (i == reg) {
return index * kStoreBufferWrapperSize;
}
++index;
}
UNREACHABLE();
return 0;
}
#endif
#define DEFINE_OFFSET_METHOD(name) \
static intptr_t name##_entry_point_offset() { \
return OFFSET_OF(Thread, name##_entry_point_); \
}
RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
#undef DEFINE_OFFSET_METHOD
#define DEFINE_OFFSET_METHOD(returntype, name, ...) \
static intptr_t name##_entry_point_offset() { \
return OFFSET_OF(Thread, name##_entry_point_); \
}
LEAF_RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
#undef DEFINE_OFFSET_METHOD
ObjectPoolPtr global_object_pool() const { return global_object_pool_; }
void set_global_object_pool(ObjectPoolPtr raw_value) {
[VM] Bare instructions - Part 4: Add --use-bare-instructions flag to AOT compiler & runtime This is the final CL which adds a new --use-bare-instructions flag to the VM. If this flag is set during AOT compilation, we will: * Build one global object pool (abbr: GOP) which all code objects share. This gop will be stored in the object store. The PP register is populated in the enter dart stub and it is restored when returning from native calls. * Gets rid of the CODE_REG/PP slots from the dart frames. Instead the compiled code uses the global object pool, which is always in PP. * Starts emitting pc-relative calls for calls between two dart functions or when invoking a stub. Limitation: We only emit pc-relative calls between two code objects in the same isolate (this is because the image writer is writing instruction objects for vm-isolate/main-isolate seperately) * We do compile-time relocation of those static calls after the precompiler has finished its work, but before writing the snapshot. This patches all the instruction objects with pc-relative calls to have the right .text distance. * We emit a sorted list of code objects in ObjectStore::reverse_code_table, which will be used by the AOT runtime to go back from PC to Code objects (where all metadata, e.g. stack maps, catch entry moves, pc descriptors are available). Issue https://github.com/dart-lang/sdk/issues/33274 Change-Id: I6c5dd2b1571e3a889b27e804a24c2986c71e03b6 Reviewed-on: https://dart-review.googlesource.com/c/85769 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
2018-12-14 16:03:04 +00:00
global_object_pool_ = raw_value;
}
[vm/aot] Use a global dispatch table for instance calls in AOT. This is a Dart-tailored implementation of the "row-displacement dispatch table" technique for closed-world instance calls: All interface targets in the program are grouped into selectors such that all targets that could potentially be called from the same call site have the same selector (currently just grouped by name). Each selector is assigned a selector offset such that offset + classid is unique for all selector/classid combinations where the class implements the selector. At every instance call site that has an interface target (i.e. where the static type of the receiver is not dynamic), the selector offset + receiver classid is computed and used as index into a global table of entry points. If the receiver can be null (as determined by the front-end TFA and the VM type propagation), a null check is inserted before the call. An arguments descriptor is provided (only) for selectors that need it (those which have type parameters or optional/named parameters). The dispatch table calls don't need the monomorphic entry code, so for functions that are only called via dispatch table calls (i.e. never called dynamically), the monomorphic entry code is left out. Some future improvements to the table dispatch implementation are mentioned in https://github.com/dart-lang/sdk/issues/40188 The table dispatch flag is disabled by default in this commit. A separate commit enables the flag. Change-Id: Ic2911742b4a2c9a8d3bc7df60605454cbe4c0714 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/126648 Commit-Queue: Aske Simon Christensen <askesc@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2020-02-03 11:04:15 +00:00
const uword* dispatch_table_array() const { return dispatch_table_array_; }
[vm] Reland two dispatch table related changes as a single change. These changes were originally submitted separately on different days, and a major performance regression was seen after the first change when creating snapshots that led to both being reverted. However, that performance regression should be addressed by the followup. First change: "[vm] Treat the dispatch table as a root in the snapshot. Additional changes: * Only serialize a dispatch table in precompiled snapshots. * Add information in v8 snapshot profiles for the dispatch table. * Fix a typo in a field name. * Print the number of Instructions objects (or payloads, for precompiled bare instructions mode) in the fake cluster for the data section. * Fix v8 snapshots profiles so objects in memory mapped segments and only those are prefixed with "(RO) ". * Add names for Instructions objects in v8 snapshot profiles when we can use the assembly namer. * Add command line flag for old #define'd false flag." Second change: "[vm/aot] Keep GC-visible references to dispatch table Code entries. This change splits dispatch table handling into four distinct parts: * The dispatch table generator does not make a dispatch table directly, but rather creates an Array that contains the Code objects for dispatch table entries. * The precompiler takes this Array and puts it in the object store, which makes it a new GC root. * The serializer takes this information and serializes the dispatch table information in the same form as before. * The deserializer creates a DispatchTable object and populates it using the serialized information. The change in the precompiler ensures that the Code objects used in the dispatch table have GC-visible references. Thus, even if all other references to them from the other GC roots were removed, they would be accessible in the serializer in the case of a GC pass between the precompiler and serializer. This change also means that the serializer can retrieve and trace the Code objects directly rather than first looking up the Code objects by their entry point." Bug: https://github.com/dart-lang/sdk/issues/41022 Change-Id: I52c83b0536fc588da0bef9aed1f0c72e8ee4663f Cq-Include-Trybots: luci.dart.try:vm-kernel-precomp-linux-release-x64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm_x64-try,vm-kernel-precomp-android-release-arm64-try,vm-kernel-precomp-android-release-arm_x64-try,vm-kernel-precomp-mac-release-simarm64-try,vm-kernel-precomp-win-release-x64-try Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/139285 Commit-Queue: Teagan Strickland <sstrickl@google.com> Reviewed-by: Alexander Aprelev <aam@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-03-13 17:19:52 +00:00
void set_dispatch_table_array(const uword* array) {
dispatch_table_array_ = array;
}
[vm/aot] Use a global dispatch table for instance calls in AOT. This is a Dart-tailored implementation of the "row-displacement dispatch table" technique for closed-world instance calls: All interface targets in the program are grouped into selectors such that all targets that could potentially be called from the same call site have the same selector (currently just grouped by name). Each selector is assigned a selector offset such that offset + classid is unique for all selector/classid combinations where the class implements the selector. At every instance call site that has an interface target (i.e. where the static type of the receiver is not dynamic), the selector offset + receiver classid is computed and used as index into a global table of entry points. If the receiver can be null (as determined by the front-end TFA and the VM type propagation), a null check is inserted before the call. An arguments descriptor is provided (only) for selectors that need it (those which have type parameters or optional/named parameters). The dispatch table calls don't need the monomorphic entry code, so for functions that are only called via dispatch table calls (i.e. never called dynamically), the monomorphic entry code is left out. Some future improvements to the table dispatch implementation are mentioned in https://github.com/dart-lang/sdk/issues/40188 The table dispatch flag is disabled by default in this commit. A separate commit enables the flag. Change-Id: Ic2911742b4a2c9a8d3bc7df60605454cbe4c0714 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/126648 Commit-Queue: Aske Simon Christensen <askesc@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2020-02-03 11:04:15 +00:00
static bool CanLoadFromThread(const Object& object);
static intptr_t OffsetFromThread(const Object& object);
static bool ObjectAtOffset(intptr_t offset, Object* object);
static intptr_t OffsetFromThread(const RuntimeEntry* runtime_entry);
#if defined(DEBUG)
// For asserts only. Has false positives when running with a simulator or
// SafeStack.
bool TopErrorHandlerIsSetJump() const;
bool TopErrorHandlerIsExitFrame() const;
#endif
uword vm_tag() const { return vm_tag_; }
void set_vm_tag(uword tag) { vm_tag_ = tag; }
static intptr_t vm_tag_offset() { return OFFSET_OF(Thread, vm_tag_); }
int64_t unboxed_int64_runtime_arg() const {
return unboxed_int64_runtime_arg_;
}
void set_unboxed_int64_runtime_arg(int64_t value) {
unboxed_int64_runtime_arg_ = value;
}
static intptr_t unboxed_int64_runtime_arg_offset() {
return OFFSET_OF(Thread, unboxed_int64_runtime_arg_);
}
GrowableObjectArrayPtr pending_functions();
void clear_pending_functions();
[vm/aot] Ensure global object pool is relocated by compactor and reloaded when returning from natives When the compactor moves the global object pool the pointer in Thread was not updated because it was in a macro list which can only hold objects inside the read-only VM isolate. All calls into dart need to populate PP as well as all calls returning from C++ need to restore PP. There was a missing case where we return from natives and don't restore PP (i.e. not normal runtime calls). This started to be an issue after we started moving old space objects. Cq-Include-Trybots: luci.dart.try:vm-canary-linux-debug-try, vm-dartkb-linux-debug-x64-try, vm-dartkb-linux-release-x64-try, vm-kernel-asan-linux-release-x64-try, vm-kernel-checked-linux-release-x64-try, vm-kernel-linux-debug-ia32-try, vm-kernel-linux-debug-simdbc64-try, vm-kernel-linux-debug-x64-try, vm-kernel-linux-product-x64-try, vm-kernel-linux-release-ia32-try, vm-kernel-linux-release-simarm-try, vm-kernel-linux-release-simarm64-try, vm-kernel-linux-release-simdbc64-try, vm-kernel-linux-release-x64-try, vm-kernel-optcounter-threshold-linux-release-ia32-try, vm-kernel-optcounter-threshold-linux-release-x64-try, vm-kernel-precomp-android-release-arm-try, vm-kernel-precomp-bare-linux-release-simarm-try, vm-kernel-precomp-bare-linux-release-simarm64-try, vm-kernel-precomp-bare-linux-release-x64-try, vm-kernel-precomp-linux-debug-x64-try, vm-kernel-precomp-linux-product-x64-try, vm-kernel-precomp-linux-release-simarm-try, vm-kernel-precomp-linux-release-simarm64-try, vm-kernel-precomp-linux-release-x64-try, vm-kernel-precomp-obfuscate-linux-release-x64-try, vm-kernel-precomp-win-release-simarm64-try, vm-kernel-precomp-win-release-x64-try, vm-kernel-reload-linux-debug-x64-try, vm-kernel-reload-linux-release-x64-try, vm-kernel-reload-rollback-linux-debug-x64-try, vm-kernel-reload-rollback-linux-release-x64-try, vm-kernel-win-debug-ia32-try, vm-kernel-win-debug-x64-try, vm-kernel-win-product-x64-try, vm-kernel-win-release-ia32-try, vm-kernel-win-release-x64-try Change-Id: I14776be226431e2c53ae888b3c98a1b8c540c4ec Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/98343 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
2019-03-29 15:13:25 +00:00
static intptr_t global_object_pool_offset() {
return OFFSET_OF(Thread, global_object_pool_);
}
[vm/aot] Use a global dispatch table for instance calls in AOT. This is a Dart-tailored implementation of the "row-displacement dispatch table" technique for closed-world instance calls: All interface targets in the program are grouped into selectors such that all targets that could potentially be called from the same call site have the same selector (currently just grouped by name). Each selector is assigned a selector offset such that offset + classid is unique for all selector/classid combinations where the class implements the selector. At every instance call site that has an interface target (i.e. where the static type of the receiver is not dynamic), the selector offset + receiver classid is computed and used as index into a global table of entry points. If the receiver can be null (as determined by the front-end TFA and the VM type propagation), a null check is inserted before the call. An arguments descriptor is provided (only) for selectors that need it (those which have type parameters or optional/named parameters). The dispatch table calls don't need the monomorphic entry code, so for functions that are only called via dispatch table calls (i.e. never called dynamically), the monomorphic entry code is left out. Some future improvements to the table dispatch implementation are mentioned in https://github.com/dart-lang/sdk/issues/40188 The table dispatch flag is disabled by default in this commit. A separate commit enables the flag. Change-Id: Ic2911742b4a2c9a8d3bc7df60605454cbe4c0714 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/126648 Commit-Queue: Aske Simon Christensen <askesc@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2020-02-03 11:04:15 +00:00
static intptr_t dispatch_table_array_offset() {
return OFFSET_OF(Thread, dispatch_table_array_);
}
ObjectPtr active_exception() const { return active_exception_; }
void set_active_exception(const Object& value);
static intptr_t active_exception_offset() {
return OFFSET_OF(Thread, active_exception_);
}
ObjectPtr active_stacktrace() const { return active_stacktrace_; }
void set_active_stacktrace(const Object& value);
static intptr_t active_stacktrace_offset() {
return OFFSET_OF(Thread, active_stacktrace_);
}
uword resume_pc() const { return resume_pc_; }
void set_resume_pc(uword value) { resume_pc_ = value; }
static uword resume_pc_offset() { return OFFSET_OF(Thread, resume_pc_); }
ErrorPtr sticky_error() const;
void set_sticky_error(const Error& value);
void ClearStickyError();
DART_WARN_UNUSED_RESULT ErrorPtr StealStickyError();
#if defined(DEBUG)
#define REUSABLE_HANDLE_SCOPE_ACCESSORS(object) \
void set_reusable_##object##_handle_scope_active(bool value) { \
reusable_##object##_handle_scope_active_ = value; \
} \
bool reusable_##object##_handle_scope_active() const { \
return reusable_##object##_handle_scope_active_; \
}
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_ACCESSORS)
#undef REUSABLE_HANDLE_SCOPE_ACCESSORS
bool IsAnyReusableHandleScopeActive() const {
#define IS_REUSABLE_HANDLE_SCOPE_ACTIVE(object) \
if (reusable_##object##_handle_scope_active_) { \
return true; \
}
REUSABLE_HANDLE_LIST(IS_REUSABLE_HANDLE_SCOPE_ACTIVE)
return false;
#undef IS_REUSABLE_HANDLE_SCOPE_ACTIVE
}
#endif // defined(DEBUG)
void ClearReusableHandles();
#define REUSABLE_HANDLE(object) \
object& object##Handle() const { return *object##_handle_; }
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE)
#undef REUSABLE_HANDLE
/*
* Fields used to support safepointing a thread.
*
* - Bit 0 of the safepoint_state_ field is used to indicate if the thread is
* already at a safepoint,
* - Bit 1 of the safepoint_state_ field is used to indicate if a safepoint
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
* is requested for this thread.
* - Bit 2 of the safepoint_state_ field is used to indicate if the thread is
* already at a deopt safepoint,
* - Bit 3 of the safepoint_state_ field is used to indicate if a deopt
* safepoint is requested for this thread.
* - Bit 4 of the safepoint_state_ field is used to indicate that the thread
* is blocked at a (deopt)safepoint and has to be woken up once the
* (deopt)safepoint operation is complete.
*
* The safepoint execution state (described above) for a thread is stored in
* in the execution_state_ field.
* Potential execution states a thread could be in:
* kThreadInGenerated - The thread is running jitted dart/stub code.
* kThreadInVM - The thread is running VM code.
* kThreadInNative - The thread is running native code.
* kThreadInBlockedState - The thread is blocked waiting for a resource.
*/
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
static bool IsAtSafepoint(SafepointLevel level, uword state) {
const uword mask = AtSafepointBits(level);
return (state & mask) == mask;
}
bool IsAtSafepoint() const {
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
return IsAtSafepoint(current_safepoint_level());
}
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
bool IsAtSafepoint(SafepointLevel level) const {
return IsAtSafepoint(level, safepoint_state_.load());
}
void SetAtSafepoint(bool value) {
ASSERT(thread_lock()->IsOwnedByCurrentThread());
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
if (value) {
safepoint_state_ |= AtSafepointBits(current_safepoint_level());
} else {
safepoint_state_ &= ~AtSafepointBits(current_safepoint_level());
}
}
bool IsSafepointRequestedLocked() const {
ASSERT(thread_lock()->IsOwnedByCurrentThread());
return IsSafepointRequested();
}
bool IsSafepointRequested() const {
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
const uword state = safepoint_state_.load();
for (intptr_t level = current_safepoint_level(); level >= 0; --level) {
if (IsSafepointLevelRequested(state, static_cast<SafepointLevel>(level)))
return true;
}
return false;
}
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
bool IsSafepointLevelRequestedLocked(SafepointLevel level) const {
ASSERT(thread_lock()->IsOwnedByCurrentThread());
if (level > current_safepoint_level()) return false;
const uword state = safepoint_state_.load();
return IsSafepointLevelRequested(state, level);
}
static bool IsSafepointLevelRequested(uword state, SafepointLevel level) {
switch (level) {
case SafepointLevel::kGC:
return (state & SafepointRequestedField::mask_in_place()) != 0;
case SafepointLevel::kGCAndDeopt:
return (state & DeoptSafepointRequestedField::mask_in_place()) != 0;
case SafepointLevel::kNumLevels:
UNREACHABLE();
}
}
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
void BlockForSafepoint();
uword SetSafepointRequested(SafepointLevel level, bool value) {
ASSERT(thread_lock()->IsOwnedByCurrentThread());
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
const uword mask = level == SafepointLevel::kGC
? SafepointRequestedField::mask_in_place()
: DeoptSafepointRequestedField::mask_in_place();
if (value) {
// acquire pulls from the release in TryEnterSafepoint.
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
return safepoint_state_.fetch_or(mask, std::memory_order_acquire);
} else {
// release pushes to the acquire in TryExitSafepoint.
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
return safepoint_state_.fetch_and(~mask, std::memory_order_release);
}
}
static bool IsBlockedForSafepoint(uword state) {
return BlockedForSafepointField::decode(state);
}
bool IsBlockedForSafepoint() const {
return BlockedForSafepointField::decode(safepoint_state_);
}
void SetBlockedForSafepoint(bool value) {
ASSERT(thread_lock()->IsOwnedByCurrentThread());
safepoint_state_ =
BlockedForSafepointField::update(value, safepoint_state_);
}
bool BypassSafepoints() const {
return BypassSafepointsField::decode(safepoint_state_);
}
static uword SetBypassSafepoints(bool value, uword state) {
return BypassSafepointsField::update(value, state);
}
enum ExecutionState {
kThreadInVM = 0,
kThreadInGenerated,
kThreadInNative,
kThreadInBlockedState
};
ExecutionState execution_state() const {
return static_cast<ExecutionState>(execution_state_);
}
// Normally execution state is only accessed for the current thread.
NO_SANITIZE_THREAD
ExecutionState execution_state_cross_thread_for_testing() const {
return static_cast<ExecutionState>(execution_state_);
}
void set_execution_state(ExecutionState state) {
execution_state_ = static_cast<uword>(state);
}
static intptr_t execution_state_offset() {
return OFFSET_OF(Thread, execution_state_);
}
[vm] Decouple assemblers from runtime. This is the next step towards preventing compiler from directly peeking into runtime and instead interact with runtime through a well defined surface. The goal of the refactoring to locate all places where compiler accesses some runtime information and partion those accesses into two categories: - creating objects in the host runtime (e.g. allocating strings, numbers, etc) during compilation; - accessing properties of the target runtime (e.g. offsets of fields) to embed those into the generated code; This change introduces dart::compiler and dart::compiler::target namespaces. All code in the compiler will gradually be moved into dart::compiler namespace. One of the motivations for this change is to be able to prevent access to globally defined host constants like kWordSize by shadowing them in the dart::compiler namespace. The nested namespace dart::compiler::target hosts all information about target runtime that compiler could access, e.g. compiler::target::kWordSize defines word size of the target which will eventually be made different from the host kWordSize (defined by dart::kWordSize). The API for compiler to runtime interaction is placed into compiler_api.h. Note that we still permit runtime to access compiler internals directly - this is not going to be decoupled as part of this work. Issue https://github.com/dart-lang/sdk/issues/31709 Change-Id: If4396d295879391becfa6c38d4802bbff81f5b20 Reviewed-on: https://dart-review.googlesource.com/c/90242 Commit-Queue: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2019-01-25 16:45:13 +00:00
virtual bool MayAllocateHandles() {
return (execution_state() == kThreadInVM) ||
(execution_state() == kThreadInGenerated);
}
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
static uword full_safepoint_state_unacquired() {
return (0 << AtSafepointField::shift()) |
(0 << AtDeoptSafepointField::shift());
}
static uword full_safepoint_state_acquired() {
return (1 << AtSafepointField::shift()) |
(1 << AtDeoptSafepointField::shift());
}
bool TryEnterSafepoint() {
uword old_state = 0;
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
uword new_state = AtSafepointField::encode(true);
if (current_safepoint_level() == SafepointLevel::kGCAndDeopt) {
new_state |= AtDeoptSafepointField::encode(true);
}
return safepoint_state_.compare_exchange_strong(old_state, new_state,
std::memory_order_release);
}
void EnterSafepoint() {
ASSERT(no_safepoint_scope_depth() == 0);
// First try a fast update of the thread state to indicate it is at a
// safepoint.
if (!TryEnterSafepoint()) {
// Fast update failed which means we could potentially be in the middle
// of a safepoint operation.
EnterSafepointUsingLock();
}
}
bool TryExitSafepoint() {
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
uword old_state = AtSafepointField::encode(true);
if (current_safepoint_level() == SafepointLevel::kGCAndDeopt) {
old_state |= AtDeoptSafepointField::encode(true);
}
uword new_state = 0;
return safepoint_state_.compare_exchange_strong(old_state, new_state,
std::memory_order_acquire);
}
void ExitSafepoint() {
// First try a fast update of the thread state to indicate it is not at a
// safepoint anymore.
if (!TryExitSafepoint()) {
// Fast update failed which means we could potentially be in the middle
// of a safepoint operation.
ExitSafepointUsingLock();
}
}
void CheckForSafepoint() {
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
// If we are in a runtime call that doesn't support lazy deopt, we will only
// respond to gc safepointing requests.
ASSERT(no_safepoint_scope_depth() == 0);
if (IsSafepointRequested()) {
BlockForSafepoint();
}
}
int32_t AllocateFfiCallbackId();
// Store 'code' for the native callback identified by 'callback_id'.
//
[vm/ffi] Support passing structs by value This CL adds passing structs by value in FFI trampolines. Nested structs and inline arrays are future work. C defines passing empty structs as undefined behavior, so that is not supported in this CL. Suggested review order: 1) commit message 2) ffi/marshaller (decisions for what is done in IL and what in MC) 3) frontend/kernel_to_il (IL construction) 4) backend/il (MC generation from IL) 5) rest in VM Overall architecture is that structs are split up into word-size chunks in IL when this is possible: 1 definition in IL per chunk, 1 Location in IL per chunk, and 1 NativeLocation for the backend per chunk. In some cases it is not possible or less convenient to split into chunks. In these cases TypedDataBase objects are stored into and loaded from directly in machine code. The various cases: - FFI call arguments which are not passed as pointers: pass individual chunks to FFI call which already have the right location. - FFI call arguments which are passed as pointers: Pass in TypedDataBase to FFI call, allocate space on the stack, and make a copy on the stack and pass the copies' address to the callee. - FFI call return value: pass in TypedData to FFI call, and copy result in machine code. - FFI callback arguments which are not passed as pointers: IL definition for each chunk, and populate a new TypedData with those chunks. - FFI callback arguments which are passed as pointer: IL definition for the pointer, and copying of contents in IL. - FFI return value when location is pointer: Copy data to callee result location in IL. - FFI return value when location is not a pointer: Copy data in machine code to the right registers. Some other notes about the implementation: - Due to Store/LoadIndexed loading doubles from float arrays, we use a int32 instead and use the BitCastInstr. - Linux ia32 uses `ret 4` when returning structs by value. This requires special casing in the FFI callback trampolines to either use `ret` or `ret 4` when returning. - The 1 IL definition, 1 Location, and 1 NativeLocation approach does not remove the need for special casing PairLocations in the machine code generation because they are 1 Location belonging to 1 definition. Because of the amount of corner cases in the calling conventions that need to be covered, the tests are generated, rather than hand-written. ABIs tested on CQ: x64 (Linux, MacOS, Windows), ia32 (Linux, Windows), arm (Android softFP, Linux hardFP), arm64 Android. ABIs tested locally through Flutter: ia32 Android (emulator), x64 iOS (simulator), arm64 iOS. ABIs not tested: arm iOS. TEST=runtime/bin/ffi_test/ffi_test_functions_generated.cc TEST=runtime/bin/ffi_test/ffi_test_functions.cc TEST=tests/{ffi,ffi_2}/function_structs_by_value_generated_test.dart TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_generated_tes TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_test.dart TEST=tests/{ffi,ffi_2}/vmspecific_static_checks_test.dart Closes https://github.com/dart-lang/sdk/issues/36730. Change-Id: I474d3a4ee1faadbe767ddadd1b696e24d8dc364c Cq-Include-Trybots: luci.dart.try:dart-sdk-linux-try,dart-sdk-mac-try,dart-sdk-win-try,vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try,vm-kernel-asan-linux-release-x64-try,vm-kernel-mac-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-nnbd-mac-release-x64-try,vm-kernel-nnbd-win-debug-x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-reload-linux-debug-x64-try,vm-kernel-reload-rollback-linux-debug-x64-try,vm-kernel-win-debug-x64-try,vm-kernel-win-debug-ia32-try,vm-precomp-ffi-qemu-linux-release-arm-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,vm-kernel-msan-linux-release-x64-try,vm-kernel-precomp-msan-linux-release-x64-try,vm-kernel-precomp-android-release-arm_x64-try,analyzer-analysis-server-linux-try Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/140290 Commit-Queue: Daco Harkes <dacoharkes@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-12-14 16:22:48 +00:00
// Expands the callback code array as necessary to accomodate the callback
// ID.
void SetFfiCallbackCode(int32_t callback_id, const Code& code);
[vm/ffi] Support passing structs by value This CL adds passing structs by value in FFI trampolines. Nested structs and inline arrays are future work. C defines passing empty structs as undefined behavior, so that is not supported in this CL. Suggested review order: 1) commit message 2) ffi/marshaller (decisions for what is done in IL and what in MC) 3) frontend/kernel_to_il (IL construction) 4) backend/il (MC generation from IL) 5) rest in VM Overall architecture is that structs are split up into word-size chunks in IL when this is possible: 1 definition in IL per chunk, 1 Location in IL per chunk, and 1 NativeLocation for the backend per chunk. In some cases it is not possible or less convenient to split into chunks. In these cases TypedDataBase objects are stored into and loaded from directly in machine code. The various cases: - FFI call arguments which are not passed as pointers: pass individual chunks to FFI call which already have the right location. - FFI call arguments which are passed as pointers: Pass in TypedDataBase to FFI call, allocate space on the stack, and make a copy on the stack and pass the copies' address to the callee. - FFI call return value: pass in TypedData to FFI call, and copy result in machine code. - FFI callback arguments which are not passed as pointers: IL definition for each chunk, and populate a new TypedData with those chunks. - FFI callback arguments which are passed as pointer: IL definition for the pointer, and copying of contents in IL. - FFI return value when location is pointer: Copy data to callee result location in IL. - FFI return value when location is not a pointer: Copy data in machine code to the right registers. Some other notes about the implementation: - Due to Store/LoadIndexed loading doubles from float arrays, we use a int32 instead and use the BitCastInstr. - Linux ia32 uses `ret 4` when returning structs by value. This requires special casing in the FFI callback trampolines to either use `ret` or `ret 4` when returning. - The 1 IL definition, 1 Location, and 1 NativeLocation approach does not remove the need for special casing PairLocations in the machine code generation because they are 1 Location belonging to 1 definition. Because of the amount of corner cases in the calling conventions that need to be covered, the tests are generated, rather than hand-written. ABIs tested on CQ: x64 (Linux, MacOS, Windows), ia32 (Linux, Windows), arm (Android softFP, Linux hardFP), arm64 Android. ABIs tested locally through Flutter: ia32 Android (emulator), x64 iOS (simulator), arm64 iOS. ABIs not tested: arm iOS. TEST=runtime/bin/ffi_test/ffi_test_functions_generated.cc TEST=runtime/bin/ffi_test/ffi_test_functions.cc TEST=tests/{ffi,ffi_2}/function_structs_by_value_generated_test.dart TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_generated_tes TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_test.dart TEST=tests/{ffi,ffi_2}/vmspecific_static_checks_test.dart Closes https://github.com/dart-lang/sdk/issues/36730. Change-Id: I474d3a4ee1faadbe767ddadd1b696e24d8dc364c Cq-Include-Trybots: luci.dart.try:dart-sdk-linux-try,dart-sdk-mac-try,dart-sdk-win-try,vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try,vm-kernel-asan-linux-release-x64-try,vm-kernel-mac-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-nnbd-mac-release-x64-try,vm-kernel-nnbd-win-debug-x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-reload-linux-debug-x64-try,vm-kernel-reload-rollback-linux-debug-x64-try,vm-kernel-win-debug-x64-try,vm-kernel-win-debug-ia32-try,vm-precomp-ffi-qemu-linux-release-arm-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,vm-kernel-msan-linux-release-x64-try,vm-kernel-precomp-msan-linux-release-x64-try,vm-kernel-precomp-android-release-arm_x64-try,analyzer-analysis-server-linux-try Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/140290 Commit-Queue: Daco Harkes <dacoharkes@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-12-14 16:22:48 +00:00
// Store 'stack_return' for the native callback identified by 'callback_id'.
//
// Expands the callback stack return array as necessary to accomodate the
// callback ID.
void SetFfiCallbackStackReturn(int32_t callback_id,
intptr_t stack_return_delta);
// Ensure that 'callback_id' refers to a valid callback in this isolate.
//
// If "entry != 0", additionally checks that entry is inside the instructions
// of this callback.
//
// Aborts if any of these conditions fails.
void VerifyCallbackIsolate(int32_t callback_id, uword entry);
Thread* next() const { return next_; }
// Visit all object pointers.
void VisitObjectPointers(ObjectPointerVisitor* visitor,
ValidationPolicy validate_frames);
void RememberLiveTemporaries();
void DeferredMarkLiveTemporaries();
bool IsValidHandle(Dart_Handle object) const;
bool IsValidLocalHandle(Dart_Handle object) const;
intptr_t CountLocalHandles() const;
int ZoneSizeInBytes() const;
void UnwindScopes(uword stack_marker);
void InitVMConstants();
Random* random() { return &thread_random_; }
uint64_t* GetFfiMarshalledArguments(intptr_t size) {
if (ffi_marshalled_arguments_size_ < size) {
if (ffi_marshalled_arguments_size_ > 0) {
free(ffi_marshalled_arguments_);
}
ffi_marshalled_arguments_ =
reinterpret_cast<uint64_t*>(malloc(size * sizeof(uint64_t)));
}
return ffi_marshalled_arguments_;
}
#ifndef PRODUCT
void PrintJSON(JSONStream* stream) const;
#endif
PendingDeopts& pending_deopts() { return pending_deopts_; }
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
SafepointLevel current_safepoint_level() const {
return runtime_call_deopt_ability_ ==
RuntimeCallDeoptAbility::kCannotLazyDeopt
? SafepointLevel::kGC
: SafepointLevel::kGCAndDeopt;
}
private:
template <class T>
T* AllocateReusableHandle();
enum class RestoreWriteBarrierInvariantOp {
kAddToRememberedSet,
kAddToDeferredMarkingStack
};
friend class RestoreWriteBarrierInvariantVisitor;
void RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op);
// Set the current compiler state and return the previous compiler state.
CompilerState* SetCompilerState(CompilerState* state) {
CompilerState* previous = compiler_state_;
compiler_state_ = state;
return previous;
}
// Accessed from generated code.
// ** This block of fields must come first! **
// For AOT cross-compilation, we rely on these members having the same offsets
// in SIMARM(IA32) and ARM, and the same offsets in SIMARM64(X64) and ARM64.
// We use only word-sized fields to avoid differences in struct packing on the
// different architectures. See also CheckOffsets in dart.cc.
RelaxedAtomic<uword> stack_limit_;
uword write_barrier_mask_;
uword heap_base_;
Isolate* isolate_;
[vm] Reland two dispatch table related changes as a single change. These changes were originally submitted separately on different days, and a major performance regression was seen after the first change when creating snapshots that led to both being reverted. However, that performance regression should be addressed by the followup. First change: "[vm] Treat the dispatch table as a root in the snapshot. Additional changes: * Only serialize a dispatch table in precompiled snapshots. * Add information in v8 snapshot profiles for the dispatch table. * Fix a typo in a field name. * Print the number of Instructions objects (or payloads, for precompiled bare instructions mode) in the fake cluster for the data section. * Fix v8 snapshots profiles so objects in memory mapped segments and only those are prefixed with "(RO) ". * Add names for Instructions objects in v8 snapshot profiles when we can use the assembly namer. * Add command line flag for old #define'd false flag." Second change: "[vm/aot] Keep GC-visible references to dispatch table Code entries. This change splits dispatch table handling into four distinct parts: * The dispatch table generator does not make a dispatch table directly, but rather creates an Array that contains the Code objects for dispatch table entries. * The precompiler takes this Array and puts it in the object store, which makes it a new GC root. * The serializer takes this information and serializes the dispatch table information in the same form as before. * The deserializer creates a DispatchTable object and populates it using the serialized information. The change in the precompiler ensures that the Code objects used in the dispatch table have GC-visible references. Thus, even if all other references to them from the other GC roots were removed, they would be accessible in the serializer in the case of a GC pass between the precompiler and serializer. This change also means that the serializer can retrieve and trace the Code objects directly rather than first looking up the Code objects by their entry point." Bug: https://github.com/dart-lang/sdk/issues/41022 Change-Id: I52c83b0536fc588da0bef9aed1f0c72e8ee4663f Cq-Include-Trybots: luci.dart.try:vm-kernel-precomp-linux-release-x64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm_x64-try,vm-kernel-precomp-android-release-arm64-try,vm-kernel-precomp-android-release-arm_x64-try,vm-kernel-precomp-mac-release-simarm64-try,vm-kernel-precomp-win-release-x64-try Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/139285 Commit-Queue: Teagan Strickland <sstrickl@google.com> Reviewed-by: Alexander Aprelev <aam@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-03-13 17:19:52 +00:00
const uword* dispatch_table_array_;
uword top_ = 0;
uword end_ = 0;
[vm/aot] Use a global dispatch table for instance calls in AOT. This is a Dart-tailored implementation of the "row-displacement dispatch table" technique for closed-world instance calls: All interface targets in the program are grouped into selectors such that all targets that could potentially be called from the same call site have the same selector (currently just grouped by name). Each selector is assigned a selector offset such that offset + classid is unique for all selector/classid combinations where the class implements the selector. At every instance call site that has an interface target (i.e. where the static type of the receiver is not dynamic), the selector offset + receiver classid is computed and used as index into a global table of entry points. If the receiver can be null (as determined by the front-end TFA and the VM type propagation), a null check is inserted before the call. An arguments descriptor is provided (only) for selectors that need it (those which have type parameters or optional/named parameters). The dispatch table calls don't need the monomorphic entry code, so for functions that are only called via dispatch table calls (i.e. never called dynamically), the monomorphic entry code is left out. Some future improvements to the table dispatch implementation are mentioned in https://github.com/dart-lang/sdk/issues/40188 The table dispatch flag is disabled by default in this commit. A separate commit enables the flag. Change-Id: Ic2911742b4a2c9a8d3bc7df60605454cbe4c0714 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/126648 Commit-Queue: Aske Simon Christensen <askesc@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2020-02-03 11:04:15 +00:00
// Offsets up to this point can all fit in a byte on X64. All of the above
// fields are very abundantly accessed from code. Thus, keeping them first
// is important for code size (although code size on X64 is not a priority).
uword saved_stack_limit_;
uword stack_overflow_flags_;
InstancePtr* field_table_values_;
[vm/aot] Use a global dispatch table for instance calls in AOT. This is a Dart-tailored implementation of the "row-displacement dispatch table" technique for closed-world instance calls: All interface targets in the program are grouped into selectors such that all targets that could potentially be called from the same call site have the same selector (currently just grouped by name). Each selector is assigned a selector offset such that offset + classid is unique for all selector/classid combinations where the class implements the selector. At every instance call site that has an interface target (i.e. where the static type of the receiver is not dynamic), the selector offset + receiver classid is computed and used as index into a global table of entry points. If the receiver can be null (as determined by the front-end TFA and the VM type propagation), a null check is inserted before the call. An arguments descriptor is provided (only) for selectors that need it (those which have type parameters or optional/named parameters). The dispatch table calls don't need the monomorphic entry code, so for functions that are only called via dispatch table calls (i.e. never called dynamically), the monomorphic entry code is left out. Some future improvements to the table dispatch implementation are mentioned in https://github.com/dart-lang/sdk/issues/40188 The table dispatch flag is disabled by default in this commit. A separate commit enables the flag. Change-Id: Ic2911742b4a2c9a8d3bc7df60605454cbe4c0714 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/126648 Commit-Queue: Aske Simon Christensen <askesc@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2020-02-03 11:04:15 +00:00
Heap* heap_;
uword volatile top_exit_frame_info_;
StoreBufferBlock* store_buffer_block_;
MarkingStackBlock* marking_stack_block_;
MarkingStackBlock* deferred_marking_stack_block_;
uword volatile vm_tag_;
// Memory location dedicated for passing unboxed int64 values from
// generated code to runtime.
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
ALIGN8 int64_t unboxed_int64_runtime_arg_;
// State that is cached in the TLS for fast access in generated code.
#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value) \
type_name member_name;
CACHED_CONSTANTS_LIST(DECLARE_MEMBERS)
#undef DECLARE_MEMBERS
#define DECLARE_MEMBERS(name) uword name##_entry_point_;
RUNTIME_ENTRY_LIST(DECLARE_MEMBERS)
#undef DECLARE_MEMBERS
#define DECLARE_MEMBERS(returntype, name, ...) uword name##_entry_point_;
LEAF_RUNTIME_ENTRY_LIST(DECLARE_MEMBERS)
#undef DECLARE_MEMBERS
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) || \
defined(TARGET_ARCH_X64)
uword write_barrier_wrappers_entry_points_[kNumberOfDartAvailableCpuRegs];
#endif
// JumpToExceptionHandler state:
ObjectPtr active_exception_;
ObjectPtr active_stacktrace_;
ObjectPoolPtr global_object_pool_;
uword resume_pc_;
uword saved_shadow_call_stack_ = 0;
uword execution_state_;
std::atomic<uword> safepoint_state_;
GrowableObjectArrayPtr ffi_callback_code_;
[vm/ffi] Support passing structs by value This CL adds passing structs by value in FFI trampolines. Nested structs and inline arrays are future work. C defines passing empty structs as undefined behavior, so that is not supported in this CL. Suggested review order: 1) commit message 2) ffi/marshaller (decisions for what is done in IL and what in MC) 3) frontend/kernel_to_il (IL construction) 4) backend/il (MC generation from IL) 5) rest in VM Overall architecture is that structs are split up into word-size chunks in IL when this is possible: 1 definition in IL per chunk, 1 Location in IL per chunk, and 1 NativeLocation for the backend per chunk. In some cases it is not possible or less convenient to split into chunks. In these cases TypedDataBase objects are stored into and loaded from directly in machine code. The various cases: - FFI call arguments which are not passed as pointers: pass individual chunks to FFI call which already have the right location. - FFI call arguments which are passed as pointers: Pass in TypedDataBase to FFI call, allocate space on the stack, and make a copy on the stack and pass the copies' address to the callee. - FFI call return value: pass in TypedData to FFI call, and copy result in machine code. - FFI callback arguments which are not passed as pointers: IL definition for each chunk, and populate a new TypedData with those chunks. - FFI callback arguments which are passed as pointer: IL definition for the pointer, and copying of contents in IL. - FFI return value when location is pointer: Copy data to callee result location in IL. - FFI return value when location is not a pointer: Copy data in machine code to the right registers. Some other notes about the implementation: - Due to Store/LoadIndexed loading doubles from float arrays, we use a int32 instead and use the BitCastInstr. - Linux ia32 uses `ret 4` when returning structs by value. This requires special casing in the FFI callback trampolines to either use `ret` or `ret 4` when returning. - The 1 IL definition, 1 Location, and 1 NativeLocation approach does not remove the need for special casing PairLocations in the machine code generation because they are 1 Location belonging to 1 definition. Because of the amount of corner cases in the calling conventions that need to be covered, the tests are generated, rather than hand-written. ABIs tested on CQ: x64 (Linux, MacOS, Windows), ia32 (Linux, Windows), arm (Android softFP, Linux hardFP), arm64 Android. ABIs tested locally through Flutter: ia32 Android (emulator), x64 iOS (simulator), arm64 iOS. ABIs not tested: arm iOS. TEST=runtime/bin/ffi_test/ffi_test_functions_generated.cc TEST=runtime/bin/ffi_test/ffi_test_functions.cc TEST=tests/{ffi,ffi_2}/function_structs_by_value_generated_test.dart TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_generated_tes TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_test.dart TEST=tests/{ffi,ffi_2}/vmspecific_static_checks_test.dart Closes https://github.com/dart-lang/sdk/issues/36730. Change-Id: I474d3a4ee1faadbe767ddadd1b696e24d8dc364c Cq-Include-Trybots: luci.dart.try:dart-sdk-linux-try,dart-sdk-mac-try,dart-sdk-win-try,vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try,vm-kernel-asan-linux-release-x64-try,vm-kernel-mac-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-nnbd-mac-release-x64-try,vm-kernel-nnbd-win-debug-x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-reload-linux-debug-x64-try,vm-kernel-reload-rollback-linux-debug-x64-try,vm-kernel-win-debug-x64-try,vm-kernel-win-debug-ia32-try,vm-precomp-ffi-qemu-linux-release-arm-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,vm-kernel-msan-linux-release-x64-try,vm-kernel-precomp-msan-linux-release-x64-try,vm-kernel-precomp-android-release-arm_x64-try,analyzer-analysis-server-linux-try Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/140290 Commit-Queue: Daco Harkes <dacoharkes@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-12-14 16:22:48 +00:00
TypedDataPtr ffi_callback_stack_return_;
[vm/ffi] Convert Objects to Dart_Handles in FFI calls This includes support for calling Dart_PropagateError in native code when doing FFI calls, and catching uncaught exceptions with Dart_IsError when doing FFI callbacks. The support for Dart_PropagateError adds a catch entry to the FFI trampoline, which prevents inlining these trampolines in AOT. This regresses the FfiCall benchmarks by 1-2% in AOT. In addition, Dart_PropagateError requires maintaining a bit whether we entered native/VM code from generated code through FFI or not. That way we can do the proper transition on the exception path. When entering generated code, we store this bit on the stack, right after the entry frame. Design: http://go/dart-ffi-handles Issue: https://github.com/dart-lang/sdk/issues/36858 Issue: https://github.com/dart-lang/sdk/issues/41319 Change-Id: Idfd7ff69132fb29cc730931a4113d914d4437396 Cq-Include-Trybots: luci.dart.try:vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try,app-kernel-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-win-debug-x64-try,vm-kernel-win-debug-ia32-try,vm-kernel-precomp-linux-debug-x64-try,vm-dartkb-linux-release-x64-abi-try,vm-kernel-precomp-android-release-arm64-try,vm-kernel-asan-linux-release-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-precomp-android-release-arm_x64-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,dart-sdk-linux-try,analyzer-analysis-server-linux-try,analyzer-linux-release-try,front-end-linux-release-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-mac-debug-x64-try,vm-precomp-ffi-qemu-linux-release-arm-try,vm-kernel-nnbd-linux-debug-x64-try,analyzer-nnbd-linux-release-try,front-end-nnbd-linux-release-x64-try Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/145591 Commit-Queue: Daco Harkes <dacoharkes@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2020-06-12 11:14:22 +00:00
uword exit_through_ffi_ = 0;
ApiLocalScope* api_top_scope_;
// ---- End accessed from generated code. ----
// The layout of Thread object up to this point should not depend
// on DART_PRECOMPILED_RUNTIME, as it is accessed from generated code.
// The code is generated without DART_PRECOMPILED_RUNTIME, but used with
// DART_PRECOMPILED_RUNTIME.
TaskKind task_kind_;
TimelineStream* dart_stream_;
IsolateGroup* isolate_group_ = nullptr;
mutable Monitor thread_lock_;
ApiLocalScope* api_reusable_scope_;
int32_t no_callback_scope_depth_;
intptr_t no_reload_scope_depth_ = 0;
intptr_t stopped_mutators_scope_depth_ = 0;
#if defined(DEBUG)
int32_t no_safepoint_scope_depth_;
#endif
VMHandles reusable_handles_;
intptr_t defer_oob_messages_count_;
uint16_t deferred_interrupts_mask_;
uint16_t deferred_interrupts_;
int32_t stack_overflow_count_;
uint32_t runtime_call_count_ = 0;
// Deoptimization of stack frames.
RuntimeCallDeoptAbility runtime_call_deopt_ability_ =
RuntimeCallDeoptAbility::kCanLazyDeopt;
PendingDeopts pending_deopts_;
// Compiler state:
CompilerState* compiler_state_ = nullptr;
HierarchyInfo* hierarchy_info_;
Reland "[VM] Introduction of type testing stubs - Part 1-4" Relands 165c583d57af613836cf7d08242ce969521db00b [VM] Introduction of type testing stubs - Part 1 This CL: * Adds a field to [RawAbstractType] which will always hold a pointer to the entrypoint of a type testing stub * Makes this new field be initialized to a default stub whenever a instances are created (e.g. via Type::New(), snapshot reader, ...) * Makes the clustered snapshotter write a reference to the corresponding [RawInstructions] object when writing the field and do the reverse when reading it. * Makes us call the type testing stub for performing assert-assignable checks. To reduce unnecessary loads on callsites, we store the entrypoint of the type testing stubs directly in the type objects. This means that the caller of type testing stubs can simply branch there without populating a code object first. This also means that the type testing stubs themselves have no access to a pool and we therefore also don't hold on to the [Code] object, only the [Instruction] object is necessary. The type testing stubs do not setup a frame themselves and also have no safepoint. In the case when the type testing stubs could not determine a positive answer they will tail-call a general-purpose stub. The general-purpose stub sets up a stub frame, tries to consult a [SubtypeTestCache] and bails out to runtime if this was unsuccessful. This CL is just the the first, for ease of reviewing. The actual type-specialized type testing stubs will be generated in later CLs. Reviewed-on: https://dart-review.googlesource.com/44787 Relands f226c22424c483d65499545e560efc059f9dde1c [VM] Introduction of type testing stubs - Part 2 This CL starts building type testing stubs specialzed for [Type] objects we test against. More specifically, it adds support for: * Handling obvious fast cases on the call sites (while still having a call to stub for negative case) * Handling type tests against type parameters, by loading the value of the type parameter on the call sites and invoking it's type testing stub. * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subtype-checks. ==> e.g. String/List<dynamic> * Specialzed type testing stubs for instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the type arguments. ==> e.g. Widget<State>, where we know [Widget] is only extended and not implemented. * Specialzed type testing stubs for certain non-instantiated types where we can do [CidRange]-based subclass-checks for the class and [CidRange]-based subtype-checks for the instantiated type arguments and cid based comparisons for type parameters. (Note that this fast-case migth result in some false-negatives!) ==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only extended and not implemented. This optimizes cases where the caller uses `new HashMap<A, B>()` and only uses `A` and `B` as key/values (and not subclasses of it). The false-negative can occur when subtypes of A or B are used. In such cases we fall back to the [SubtypeTestCache]-based imlementation. Reviewed-on: https://dart-review.googlesource.com/44788 Relands 25f98bcc7561006d70a487ba3de55551658ac683 [VM] Introduction of type testing stubs - Part 3 The changes include: * Make AssertAssignableInstr no longer have a call-summary, which helps methods with several parameter checks by not having to re-load/re-initialize type arguments registers * Lazily create SubtypeTestCaches: We already go to runtime to warm up the caches, so we now also create the caches on the first runtime call and patch the pool entries. * No longer load the destination name into a register: We only need the name when we throw an exception, so it is not on the hot path. Instead we let the runtime look at the call site, decoding a pool index from the instructions stream. The destination name will be available in the pool, at a consecutive index to the subtype cache. * Remove the fall-through to N=1 case for probing subtypeing tests, since those will always be handled by the optimized stubs. * Do not generate optimized stubs for FutureOr<T> (so far it just falled-through to TTS). We can make optimzed version of that later, but it requires special subtyping rules. * Local code quality improvement in the type-testing-stubs: Avoid extra jump at last case of cid-class-range checks. There are still a number of optimization opportunities we can do in future changes. Reviewed-on: https://dart-review.googlesource.com/46984 Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b [VM] Introduction of type testing stubs - Part 4 In order to avoid generating type testing stubs for too many types in the system - and thereby potentially cause an increase in code size - this change introduces a smarter way to decide for which types we should generate optimized type testing stubs. The precompiler creates a [TypeUsageInfo] which we use to collect information. More specifically: a) We collect the destination types for all type checks we emit (we do this inside AssertAssignableInstr::EmitNativeCode). -> These are types we might want to generate optimized type testing stubs for. b) We collect type argument vectors used in instance creations (we do this inside AllocateObjectInstr::EmitNativeCode) and keep a set of of used type argument vectors for each class. After the precompiler has finished compiling normal code we scan the set of destination types collected in a) for uninstantiated types (or more specifically, type parameter types). We then propagate the type argument vectors used on object allocation sites, which were collected in b), in order to find out what kind of types are flowing into those type parameters. This allows us to extend the set of types which we test against, by adding the types that flow into type parameters. We use this final augmented set of destination types as a "filter" when making the decision whether to generate an optimized type testing stub for a given type. Reviewed-on: https://dart-review.googlesource.com/48640 Issue https://github.com/dart-lang/sdk/issues/32603 Closes https://github.com/dart-lang/sdk/issues/32852 Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44 Reviewed-on: https://dart-review.googlesource.com/50944 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
TypeUsageInfo* type_usage_info_;
GrowableObjectArrayPtr pending_functions_;
CompilerTimings* compiler_timings_ = nullptr;
ErrorPtr sticky_error_;
Random thread_random_;
intptr_t ffi_marshalled_arguments_size_ = 0;
uint64_t* ffi_marshalled_arguments_;
InstancePtr* field_table_values() const { return field_table_values_; }
// Reusable handles support.
#define REUSABLE_HANDLE_FIELDS(object) object* object##_handle_;
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_FIELDS)
#undef REUSABLE_HANDLE_FIELDS
#if defined(DEBUG)
#define REUSABLE_HANDLE_SCOPE_VARIABLE(object) \
bool reusable_##object##_handle_scope_active_;
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_VARIABLE);
#undef REUSABLE_HANDLE_SCOPE_VARIABLE
#endif // defined(DEBUG)
class AtSafepointField : public BitField<uword, bool, 0, 1> {};
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
class SafepointRequestedField
: public BitField<uword, bool, AtSafepointField::kNextBit, 1> {};
class AtDeoptSafepointField
: public BitField<uword, bool, SafepointRequestedField::kNextBit, 1> {};
class DeoptSafepointRequestedField
: public BitField<uword, bool, AtDeoptSafepointField::kNextBit, 1> {};
class BlockedForSafepointField
: public BitField<uword,
bool,
DeoptSafepointRequestedField::kNextBit,
1> {};
class BypassSafepointsField
: public BitField<uword, bool, BlockedForSafepointField::kNextBit, 1> {};
static uword AtSafepointBits(SafepointLevel level) {
switch (level) {
case SafepointLevel::kGC:
return AtSafepointField::mask_in_place();
case SafepointLevel::kGCAndDeopt:
return AtSafepointField::mask_in_place() |
AtDeoptSafepointField::mask_in_place();
case SafepointLevel::kNumLevels:
UNREACHABLE();
}
}
#if defined(USING_SAFE_STACK)
uword saved_safestack_limit_;
#endif
Thread* next_; // Used to chain the thread structures in an isolate.
bool is_mutator_thread_ = false;
#if defined(DEBUG)
bool inside_compiler_ = false;
#endif
explicit Thread(bool is_vm_isolate);
void StoreBufferRelease(
StoreBuffer::ThresholdPolicy policy = StoreBuffer::kCheckThreshold);
void StoreBufferAcquire();
void MarkingStackRelease();
void MarkingStackAcquire();
void DeferredMarkingStackRelease();
void DeferredMarkingStackAcquire();
void set_safepoint_state(uint32_t value) { safepoint_state_ = value; }
void EnterSafepointUsingLock();
void ExitSafepointUsingLock();
void FinishEntering(TaskKind kind);
void PrepareLeaving();
static void SetCurrent(Thread* current) { OSThread::SetCurrentTLS(current); }
void DeferOOBMessageInterrupts();
void RestoreOOBMessageInterrupts();
#define REUSABLE_FRIEND_DECLARATION(name) \
friend class Reusable##name##HandleScope;
REUSABLE_HANDLE_LIST(REUSABLE_FRIEND_DECLARATION)
#undef REUSABLE_FRIEND_DECLARATION
friend class ApiZone;
[vm/concurrency] Final support for hot-reload of multi-isolate groups This is the initial implementation of hot reload with multi-isolate groups. Implementation: As before, when a service API call triggers a reload it will be routed as an OOB message to a specific isolate (**). As opposed to before, that isolate has now to coordinate with all other isolates, ensuring that it "owns" the reload and all other isolates are waiting in a state that allows reload. This is implemented as a [ReloadOperationScope] which first participates in other reloads (if there are any) and then owns the reload. It will send a new kind of service message to all other registered isolates. All of them have to check in before reload can proceed. If a new isolate is about to join the group, it will participate when registering the isolate. If an old isolate wants to die, it will participate when unregistering the isolate. This means that in addition to the existing StackOverFlow checks that can process OOB messages and therefore reload, we'll have isolate registration and unregistration as well as a new Isolate::kCheckForReload OOB message handler where an isolate can participate in a reload. We consider the isolate group to be reloadable if the main isolate has loaded the program and set the root library. Helper isolates don't need to load any more kernel code and only initialize core libraries, so it's fine to reload them during this time. (**) The reason we continue to send reload service API calls to any isolate in an isolate group is that re-loading might involve calling out to the embedder's tag handler. Doing so currently requires an active isolate. If we allowed a subset of dart_api.h (the subset needed by the tag handler) to be used only with an active IsolateGroup instead of an active Isolate we could remove this requirement. Edge cases: There's various edge cases to consider: The main edge case is, we currently maintain an upper limit to the number of isolates executing in parallel (to ensure each can have big enough chunk of new space, i.e. TLAB). If there are more isolates with active work they are waiting until one of the exiting ones "yields". To ensure progress, if any such actively running isolate gets a request to participate in a reload, it will mark its own thread as "blocked" and therefore "yields", so another isolate can make progress until all isolates are participating and the reload can start. Marking an isolate as "blocked" happens by exiting that isolate. It will free up it's TLAB, decrease active mutator count and (if running on VM's thread pool) also temporarily increase the thread pool size. The side-effect of this is that it will use one pthread per isolate during reload. In the future we can extend this first implementation, by specially handling isolates that don't have a message handler running. Doing so would require careful consideration to avoid races. Testing: In order to test this we use a small helper framework for reload tests. The helper framework will, similar to real world reload e.g. in flutter, will spawn a subprocess. It will use the service API to trigger reloads in this subproces. To synchronize between the reload driver and the application being reloaded it allows watching for events to be printed to stdout/stderr. The reload test itself can be written - similar to multitests - with annotations such as `// @include-in-relload-0` in them. The testing framework will then generate multiple application versions that all get compiled to kernel. For simplicity we generate the kernel using the standalone VM with `--snapshot-kind=kernel` and avoid using the incremental compiler. There are 4 different tests exercising different aspects of multi-isolate reload: vm/dart_2/isolates/reload_active_stack_test: Performs a reload while a fixed number of isolates have an active stack, thereby ensuring e.g. that all frames of all isolate mutator stacks get deoptimized, ... vm/dart_2/isolates/reload_no_active_stack_test: Similar to the test above, but instead of having an active stack the isolates can yield to the event loop, possibly be even descheduled vm/dart_2/isolates/reload_many_isolates_test: Similar to the test above, but this test uses many more isolates. vm/dart_2/isolates/reload_many_isolates_live_and_die_test: Performs a reload where isolates get spawned and die all the time. There are always P isolates alive at any given point in time, each of them spawns children when their parent has died. Performing a reload catches isolates as various stages of their lifecycle and can therefore cover a lot of corner cases. TEST=vm/dart_2/isolates/reload_*_test.dart Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I97039b4084de040b7f2e22f5832a40d57ba398d5 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/187461 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2021-03-02 18:57:02 +00:00
friend class DisabledNoActiveIsolateScope;
friend class InterruptChecker;
friend class Isolate;
friend class IsolateGroup;
friend class IsolateTestHelper;
[vm/concurrency] Final support for hot-reload of multi-isolate groups This is the initial implementation of hot reload with multi-isolate groups. Implementation: As before, when a service API call triggers a reload it will be routed as an OOB message to a specific isolate (**). As opposed to before, that isolate has now to coordinate with all other isolates, ensuring that it "owns" the reload and all other isolates are waiting in a state that allows reload. This is implemented as a [ReloadOperationScope] which first participates in other reloads (if there are any) and then owns the reload. It will send a new kind of service message to all other registered isolates. All of them have to check in before reload can proceed. If a new isolate is about to join the group, it will participate when registering the isolate. If an old isolate wants to die, it will participate when unregistering the isolate. This means that in addition to the existing StackOverFlow checks that can process OOB messages and therefore reload, we'll have isolate registration and unregistration as well as a new Isolate::kCheckForReload OOB message handler where an isolate can participate in a reload. We consider the isolate group to be reloadable if the main isolate has loaded the program and set the root library. Helper isolates don't need to load any more kernel code and only initialize core libraries, so it's fine to reload them during this time. (**) The reason we continue to send reload service API calls to any isolate in an isolate group is that re-loading might involve calling out to the embedder's tag handler. Doing so currently requires an active isolate. If we allowed a subset of dart_api.h (the subset needed by the tag handler) to be used only with an active IsolateGroup instead of an active Isolate we could remove this requirement. Edge cases: There's various edge cases to consider: The main edge case is, we currently maintain an upper limit to the number of isolates executing in parallel (to ensure each can have big enough chunk of new space, i.e. TLAB). If there are more isolates with active work they are waiting until one of the exiting ones "yields". To ensure progress, if any such actively running isolate gets a request to participate in a reload, it will mark its own thread as "blocked" and therefore "yields", so another isolate can make progress until all isolates are participating and the reload can start. Marking an isolate as "blocked" happens by exiting that isolate. It will free up it's TLAB, decrease active mutator count and (if running on VM's thread pool) also temporarily increase the thread pool size. The side-effect of this is that it will use one pthread per isolate during reload. In the future we can extend this first implementation, by specially handling isolates that don't have a message handler running. Doing so would require careful consideration to avoid races. Testing: In order to test this we use a small helper framework for reload tests. The helper framework will, similar to real world reload e.g. in flutter, will spawn a subprocess. It will use the service API to trigger reloads in this subproces. To synchronize between the reload driver and the application being reloaded it allows watching for events to be printed to stdout/stderr. The reload test itself can be written - similar to multitests - with annotations such as `// @include-in-relload-0` in them. The testing framework will then generate multiple application versions that all get compiled to kernel. For simplicity we generate the kernel using the standalone VM with `--snapshot-kind=kernel` and avoid using the incremental compiler. There are 4 different tests exercising different aspects of multi-isolate reload: vm/dart_2/isolates/reload_active_stack_test: Performs a reload while a fixed number of isolates have an active stack, thereby ensuring e.g. that all frames of all isolate mutator stacks get deoptimized, ... vm/dart_2/isolates/reload_no_active_stack_test: Similar to the test above, but instead of having an active stack the isolates can yield to the event loop, possibly be even descheduled vm/dart_2/isolates/reload_many_isolates_test: Similar to the test above, but this test uses many more isolates. vm/dart_2/isolates/reload_many_isolates_live_and_die_test: Performs a reload where isolates get spawned and die all the time. There are always P isolates alive at any given point in time, each of them spawns children when their parent has died. Performing a reload catches isolates as various stages of their lifecycle and can therefore cover a lot of corner cases. TEST=vm/dart_2/isolates/reload_*_test.dart Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I97039b4084de040b7f2e22f5832a40d57ba398d5 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/187461 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2021-03-02 18:57:02 +00:00
friend class NoActiveIsolateScope;
friend class NoOOBMessageScope;
friend class NoReloadScope;
friend class Simulator;
friend class StackZone;
friend class StoppedMutatorsScope;
friend class ThreadRegistry;
friend class CompilerState;
friend class compiler::target::Thread;
friend class FieldTable;
friend class RuntimeCallDeoptScope;
[vm/concurrency] Distinguish "gc safepoint operations" from "deopt safepoint operations" This extends the existing safepoint operation mechanism by allowing to perform two different operations: * "gc safepoint operations": All mutators are stopped at places where it's safe to GC. It therefore requires stackmaps to be available for all optimized mutator frames. * "deopt safepoint operations": All mutators are stopped at places where it's safe to GC, but also safe to lazy-deopt mutator frames. It therefore requires deopt-id/deopt-info to be available for all optimized mutator frames. Mutators can be asked to block for any of those two safepoint operations. If a mutator is at a place where its safe to GC it will respond to "gc safepoint operations" requests, if a mutator is additionally at a place where it's also safe to lazy-deopt it will respond to "deopt safepoint operation" requests. Depending on how the runtime was entered (which is tracked via the [Thread::runtime_call_deopt_ability_] value) - the mutator might participate in both or only in gc safepoint operations. During the start of a "deopt safepoint operation", the safepoint handler will request all threads to stop at a "deopt safepoint". Some threads might first want to initiate their own "gc safepoint operation" (e.g. due to allocation failure) before they reach a "deopt safepoint". We do allow this by letting the safepoint handler own a "deopt safepoint operation" but still participate in other thread's "gc safepoint operation" requests until all mutators are checked into places where it's safe to lazy-deopt at which point the "deopt safepoint operation" also owns a "gc safepoint operation". In order to facilitate this, the Thread's safepoint_state will be extended to consist of the following bits: * AtSafepoint * SafepointRequested * AtDeoptSafepoint * DeoptSafepointRequested * BlockedForSafepoint Issue https://github.com/dart-lang/sdk/issues/45213 TEST=vm/cc/SafepointOperation_* Change-Id: Icdc2827718f6780818f99b829a5e806d6bb5b130 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/196927 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com>
2021-05-10 09:13:09 +00:00
friend class
TransitionGeneratedToVM; // IsSafepointRequested/BlockForSafepoint
friend class
TransitionVMToGenerated; // IsSafepointRequested/BlockForSafepoint
friend class MonitorLocker; // ExitSafepointUsingLock
friend Isolate* CreateWithinExistingIsolateGroup(IsolateGroup*,
const char*,
char**);
DISALLOW_COPY_AND_ASSIGN(Thread);
};
class RuntimeCallDeoptScope : public StackResource {
public:
RuntimeCallDeoptScope(Thread* thread, RuntimeCallDeoptAbility kind)
: StackResource(thread) {
// We cannot have nested calls into the VM without deopt support.
ASSERT(thread->runtime_call_deopt_ability_ ==
RuntimeCallDeoptAbility::kCanLazyDeopt);
thread->runtime_call_deopt_ability_ = kind;
}
virtual ~RuntimeCallDeoptScope() {
thread()->runtime_call_deopt_ability_ =
RuntimeCallDeoptAbility::kCanLazyDeopt;
}
private:
Thread* thread() {
return reinterpret_cast<Thread*>(StackResource::thread());
}
};
#if defined(HOST_OS_WINDOWS)
// Clears the state of the current thread and frees the allocation.
void WindowsThreadCleanUp();
#endif
// Disable thread interrupts.
class DisableThreadInterruptsScope : public StackResource {
public:
explicit DisableThreadInterruptsScope(Thread* thread);
~DisableThreadInterruptsScope();
};
// Within a NoSafepointScope, the thread must not reach any safepoint. Used
// around code that manipulates raw object pointers directly without handles.
#if defined(DEBUG)
class NoSafepointScope : public ThreadStackResource {
public:
explicit NoSafepointScope(Thread* thread = nullptr)
: ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
this->thread()->IncrementNoSafepointScopeDepth();
}
~NoSafepointScope() { thread()->DecrementNoSafepointScopeDepth(); }
private:
DISALLOW_COPY_AND_ASSIGN(NoSafepointScope);
};
#else // defined(DEBUG)
class NoSafepointScope : public ValueObject {
public:
explicit NoSafepointScope(Thread* thread = nullptr) {}
private:
DISALLOW_COPY_AND_ASSIGN(NoSafepointScope);
};
#endif // defined(DEBUG)
class NoReloadScope : public ThreadStackResource {
public:
explicit NoReloadScope(Thread* thread) : ThreadStackResource(thread) {
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
thread->no_reload_scope_depth_++;
ASSERT(thread->no_reload_scope_depth_ >= 0);
#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
}
~NoReloadScope() {
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
thread()->no_reload_scope_depth_ -= 1;
ASSERT(thread()->no_reload_scope_depth_ >= 0);
#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
}
private:
DISALLOW_COPY_AND_ASSIGN(NoReloadScope);
};
class StoppedMutatorsScope : public ThreadStackResource {
public:
explicit StoppedMutatorsScope(Thread* thread) : ThreadStackResource(thread) {
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
thread->stopped_mutators_scope_depth_++;
ASSERT(thread->stopped_mutators_scope_depth_ >= 0);
#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
}
~StoppedMutatorsScope() {
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
thread()->stopped_mutators_scope_depth_ -= 1;
ASSERT(thread()->stopped_mutators_scope_depth_ >= 0);
#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
}
private:
DISALLOW_COPY_AND_ASSIGN(StoppedMutatorsScope);
};
// Within a EnterCompilerScope, the thread must operate on cloned fields.
#if defined(DEBUG)
class EnterCompilerScope : public ThreadStackResource {
public:
explicit EnterCompilerScope(Thread* thread = nullptr)
: ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
previously_is_inside_compiler_ = this->thread()->IsInsideCompiler();
if (!previously_is_inside_compiler_) {
this->thread()->EnterCompiler();
}
}
~EnterCompilerScope() {
if (!previously_is_inside_compiler_) {
thread()->LeaveCompiler();
}
}
private:
bool previously_is_inside_compiler_;
DISALLOW_COPY_AND_ASSIGN(EnterCompilerScope);
};
#else // defined(DEBUG)
class EnterCompilerScope : public ValueObject {
public:
explicit EnterCompilerScope(Thread* thread = nullptr) {}
private:
DISALLOW_COPY_AND_ASSIGN(EnterCompilerScope);
};
#endif // defined(DEBUG)
// Within a LeaveCompilerScope, the thread must operate on cloned fields.
#if defined(DEBUG)
class LeaveCompilerScope : public ThreadStackResource {
public:
explicit LeaveCompilerScope(Thread* thread = nullptr)
: ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
previously_is_inside_compiler_ = this->thread()->IsInsideCompiler();
if (previously_is_inside_compiler_) {
this->thread()->LeaveCompiler();
}
}
~LeaveCompilerScope() {
if (previously_is_inside_compiler_) {
thread()->EnterCompiler();
}
}
private:
bool previously_is_inside_compiler_;
DISALLOW_COPY_AND_ASSIGN(LeaveCompilerScope);
};
#else // defined(DEBUG)
class LeaveCompilerScope : public ValueObject {
public:
explicit LeaveCompilerScope(Thread* thread = nullptr) {}
private:
DISALLOW_COPY_AND_ASSIGN(LeaveCompilerScope);
};
#endif // defined(DEBUG)
} // namespace dart
#endif // RUNTIME_VM_THREAD_H_