2015-01-22 14:14:16 +00:00
|
|
|
// Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
|
|
|
|
// for details. All rights reserved. Use of this source code is governed by a
|
|
|
|
// BSD-style license that can be found in the LICENSE file.
|
|
|
|
|
2016-10-26 07:26:03 +00:00
|
|
|
#ifndef RUNTIME_VM_THREAD_H_
|
|
|
|
#define RUNTIME_VM_THREAD_H_
|
2015-01-22 14:14:16 +00:00
|
|
|
|
2019-01-25 16:45:13 +00:00
|
|
|
#if defined(SHOULD_NOT_INCLUDE_RUNTIME)
|
|
|
|
#error "Should not include runtime"
|
|
|
|
#endif
|
|
|
|
|
2015-11-25 19:07:22 +00:00
|
|
|
#include "include/dart_api.h"
|
2016-02-01 18:57:34 +00:00
|
|
|
#include "platform/assert.h"
|
2018-03-28 23:16:09 +00:00
|
|
|
#include "platform/atomic.h"
|
2017-11-22 19:27:54 +00:00
|
|
|
#include "platform/safe_stack.h"
|
2016-02-01 18:57:34 +00:00
|
|
|
#include "vm/bitfield.h"
|
2019-09-18 14:56:27 +00:00
|
|
|
#include "vm/compiler/runtime_api.h"
|
2018-07-27 18:51:20 +00:00
|
|
|
#include "vm/constants.h"
|
2015-03-25 22:41:33 +00:00
|
|
|
#include "vm/globals.h"
|
2015-10-09 17:10:34 +00:00
|
|
|
#include "vm/handles.h"
|
2018-08-07 23:49:14 +00:00
|
|
|
#include "vm/heap/pointer_block.h"
|
2015-03-25 22:41:33 +00:00
|
|
|
#include "vm/os_thread.h"
|
2021-03-03 09:31:53 +00:00
|
|
|
#include "vm/pending_deopts.h"
|
2019-04-05 10:15:44 +00:00
|
|
|
#include "vm/random.h"
|
2015-09-02 21:58:26 +00:00
|
|
|
#include "vm/runtime_entry_list.h"
|
2019-01-25 16:45:13 +00:00
|
|
|
#include "vm/thread_stack_resource.h"
|
2019-01-11 20:47:10 +00:00
|
|
|
#include "vm/thread_state.h"
|
2021-05-10 08:58:29 +00:00
|
|
|
|
2015-01-22 14:14:16 +00:00
|
|
|
namespace dart {
|
|
|
|
|
2015-10-09 17:10:34 +00:00
|
|
|
class AbstractType;
|
2015-11-25 19:07:22 +00:00
|
|
|
class ApiLocalScope;
|
2015-10-09 17:10:34 +00:00
|
|
|
class Array;
|
2018-09-03 16:01:24 +00:00
|
|
|
class CompilerState;
|
2021-05-11 11:15:53 +00:00
|
|
|
class CompilerTimings;
|
2015-10-09 17:10:34 +00:00
|
|
|
class Class;
|
|
|
|
class Code;
|
|
|
|
class Error;
|
|
|
|
class ExceptionHandlers;
|
|
|
|
class Field;
|
2020-01-17 18:12:24 +00:00
|
|
|
class FieldTable;
|
2015-10-09 17:10:34 +00:00
|
|
|
class Function;
|
|
|
|
class GrowableObjectArray;
|
2015-07-17 02:17:30 +00:00
|
|
|
class HandleScope;
|
2015-08-03 14:26:23 +00:00
|
|
|
class Heap;
|
2018-01-25 07:13:42 +00:00
|
|
|
class HierarchyInfo;
|
2015-10-09 17:10:34 +00:00
|
|
|
class Instance;
|
2015-03-25 22:41:33 +00:00
|
|
|
class Isolate;
|
2019-07-23 10:58:11 +00:00
|
|
|
class IsolateGroup;
|
2015-10-09 17:10:34 +00:00
|
|
|
class Library;
|
2015-07-09 18:22:26 +00:00
|
|
|
class Object;
|
2015-11-19 21:45:10 +00:00
|
|
|
class OSThread;
|
2016-12-12 17:56:49 +00:00
|
|
|
class JSONObject;
|
2015-10-09 17:10:34 +00:00
|
|
|
class PcDescriptors;
|
2015-09-02 21:58:26 +00:00
|
|
|
class RuntimeEntry;
|
2016-05-03 00:07:31 +00:00
|
|
|
class Smi;
|
2015-07-09 18:22:26 +00:00
|
|
|
class StackResource;
|
2017-02-09 23:39:44 +00:00
|
|
|
class StackTrace;
|
2015-10-09 17:10:34 +00:00
|
|
|
class String;
|
2016-05-16 17:30:48 +00:00
|
|
|
class TimelineStream;
|
2015-10-09 17:10:34 +00:00
|
|
|
class TypeArguments;
|
|
|
|
class TypeParameter;
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
class TypeUsageInfo;
|
2015-07-09 18:22:26 +00:00
|
|
|
class Zone;
|
2015-07-08 11:37:47 +00:00
|
|
|
|
2019-08-21 13:33:37 +00:00
|
|
|
namespace compiler {
|
|
|
|
namespace target {
|
|
|
|
class Thread;
|
|
|
|
} // namespace target
|
|
|
|
} // namespace compiler
|
|
|
|
|
2015-10-09 17:10:34 +00:00
|
|
|
#define REUSABLE_HANDLE_LIST(V) \
|
|
|
|
V(AbstractType) \
|
|
|
|
V(Array) \
|
|
|
|
V(Class) \
|
|
|
|
V(Code) \
|
|
|
|
V(Error) \
|
|
|
|
V(ExceptionHandlers) \
|
|
|
|
V(Field) \
|
|
|
|
V(Function) \
|
|
|
|
V(GrowableObjectArray) \
|
|
|
|
V(Instance) \
|
|
|
|
V(Library) \
|
|
|
|
V(Object) \
|
|
|
|
V(PcDescriptors) \
|
2016-05-03 00:07:31 +00:00
|
|
|
V(Smi) \
|
2015-10-09 17:10:34 +00:00
|
|
|
V(String) \
|
2021-05-05 23:43:14 +00:00
|
|
|
V(TypeParameters) \
|
2015-10-09 17:10:34 +00:00
|
|
|
V(TypeArguments) \
|
2016-11-08 21:54:47 +00:00
|
|
|
V(TypeParameter)
|
2015-10-09 17:10:34 +00:00
|
|
|
|
2016-04-18 21:02:01 +00:00
|
|
|
#define CACHED_VM_STUBS_LIST(V) \
|
2021-01-15 23:32:02 +00:00
|
|
|
V(CodePtr, write_barrier_code_, StubCode::WriteBarrier().ptr(), nullptr) \
|
|
|
|
V(CodePtr, array_write_barrier_code_, StubCode::ArrayWriteBarrier().ptr(), \
|
2020-04-25 05:21:27 +00:00
|
|
|
nullptr) \
|
2021-01-15 23:32:02 +00:00
|
|
|
V(CodePtr, fix_callers_target_code_, StubCode::FixCallersTarget().ptr(), \
|
2020-04-25 05:21:27 +00:00
|
|
|
nullptr) \
|
|
|
|
V(CodePtr, fix_allocation_stub_code_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::FixAllocationStubTarget().ptr(), nullptr) \
|
|
|
|
V(CodePtr, invoke_dart_code_stub_, StubCode::InvokeDartCode().ptr(), \
|
2020-04-25 05:21:27 +00:00
|
|
|
nullptr) \
|
2021-01-15 23:32:02 +00:00
|
|
|
V(CodePtr, call_to_runtime_stub_, StubCode::CallToRuntime().ptr(), nullptr) \
|
2020-10-26 18:55:26 +00:00
|
|
|
V(CodePtr, late_initialization_error_shared_without_fpu_regs_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::LateInitializationErrorSharedWithoutFPURegs().ptr(), nullptr) \
|
2020-10-26 18:55:26 +00:00
|
|
|
V(CodePtr, late_initialization_error_shared_with_fpu_regs_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::LateInitializationErrorSharedWithFPURegs().ptr(), nullptr) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(CodePtr, null_error_shared_without_fpu_regs_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::NullErrorSharedWithoutFPURegs().ptr(), nullptr) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(CodePtr, null_error_shared_with_fpu_regs_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::NullErrorSharedWithFPURegs().ptr(), nullptr) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(CodePtr, null_arg_error_shared_without_fpu_regs_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::NullArgErrorSharedWithoutFPURegs().ptr(), nullptr) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(CodePtr, null_arg_error_shared_with_fpu_regs_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::NullArgErrorSharedWithFPURegs().ptr(), nullptr) \
|
2020-07-01 20:52:34 +00:00
|
|
|
V(CodePtr, null_cast_error_shared_without_fpu_regs_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::NullCastErrorSharedWithoutFPURegs().ptr(), nullptr) \
|
2020-07-01 20:52:34 +00:00
|
|
|
V(CodePtr, null_cast_error_shared_with_fpu_regs_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::NullCastErrorSharedWithFPURegs().ptr(), nullptr) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(CodePtr, range_error_shared_without_fpu_regs_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::RangeErrorSharedWithoutFPURegs().ptr(), nullptr) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(CodePtr, range_error_shared_with_fpu_regs_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::RangeErrorSharedWithFPURegs().ptr(), nullptr) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(CodePtr, allocate_mint_with_fpu_regs_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::AllocateMintSharedWithFPURegs().ptr(), nullptr) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(CodePtr, allocate_mint_without_fpu_regs_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::AllocateMintSharedWithoutFPURegs().ptr(), nullptr) \
|
|
|
|
V(CodePtr, allocate_object_stub_, StubCode::AllocateObject().ptr(), nullptr) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(CodePtr, allocate_object_parameterized_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::AllocateObjectParameterized().ptr(), nullptr) \
|
|
|
|
V(CodePtr, allocate_object_slow_stub_, StubCode::AllocateObjectSlow().ptr(), \
|
2020-04-25 05:21:27 +00:00
|
|
|
nullptr) \
|
|
|
|
V(CodePtr, stack_overflow_shared_without_fpu_regs_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::StackOverflowSharedWithoutFPURegs().ptr(), nullptr) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(CodePtr, stack_overflow_shared_with_fpu_regs_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::StackOverflowSharedWithFPURegs().ptr(), nullptr) \
|
|
|
|
V(CodePtr, switchable_call_miss_stub_, StubCode::SwitchableCallMiss().ptr(), \
|
2020-04-25 05:21:27 +00:00
|
|
|
nullptr) \
|
2021-01-15 23:32:02 +00:00
|
|
|
V(CodePtr, throw_stub_, StubCode::Throw().ptr(), nullptr) \
|
|
|
|
V(CodePtr, re_throw_stub_, StubCode::Throw().ptr(), nullptr) \
|
|
|
|
V(CodePtr, assert_boolean_stub_, StubCode::AssertBoolean().ptr(), nullptr) \
|
|
|
|
V(CodePtr, optimize_stub_, StubCode::OptimizeFunction().ptr(), nullptr) \
|
|
|
|
V(CodePtr, deoptimize_stub_, StubCode::Deoptimize().ptr(), nullptr) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(CodePtr, lazy_deopt_from_return_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::DeoptimizeLazyFromReturn().ptr(), nullptr) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(CodePtr, lazy_deopt_from_throw_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::DeoptimizeLazyFromThrow().ptr(), nullptr) \
|
|
|
|
V(CodePtr, slow_type_test_stub_, StubCode::SlowTypeTest().ptr(), nullptr) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(CodePtr, lazy_specialize_type_test_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::LazySpecializeTypeTest().ptr(), nullptr) \
|
|
|
|
V(CodePtr, enter_safepoint_stub_, StubCode::EnterSafepoint().ptr(), nullptr) \
|
|
|
|
V(CodePtr, exit_safepoint_stub_, StubCode::ExitSafepoint().ptr(), nullptr) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(CodePtr, call_native_through_safepoint_stub_, \
|
2021-01-15 23:32:02 +00:00
|
|
|
StubCode::CallNativeThroughSafepoint().ptr(), nullptr)
|
2015-09-19 11:21:09 +00:00
|
|
|
|
2018-03-02 14:52:04 +00:00
|
|
|
#define CACHED_NON_VM_STUB_LIST(V) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(ObjectPtr, object_null_, Object::null(), nullptr) \
|
2021-01-15 23:32:02 +00:00
|
|
|
V(BoolPtr, bool_true_, Object::bool_true().ptr(), nullptr) \
|
|
|
|
V(BoolPtr, bool_false_, Object::bool_false().ptr(), nullptr)
|
2018-03-02 14:52:04 +00:00
|
|
|
|
2016-04-18 21:02:01 +00:00
|
|
|
// List of VM-global objects/addresses cached in each Thread object.
|
2017-10-04 20:03:41 +00:00
|
|
|
// Important: constant false must immediately follow constant true.
|
2016-04-18 21:02:01 +00:00
|
|
|
#define CACHED_VM_OBJECTS_LIST(V) \
|
2018-03-02 14:52:04 +00:00
|
|
|
CACHED_NON_VM_STUB_LIST(V) \
|
2016-11-08 21:54:47 +00:00
|
|
|
CACHED_VM_STUBS_LIST(V)
|
2016-04-18 21:02:01 +00:00
|
|
|
|
2017-10-04 20:03:41 +00:00
|
|
|
// This assertion marks places which assume that boolean false immediate
|
|
|
|
// follows bool true in the CACHED_VM_OBJECTS_LIST
|
|
|
|
#define ASSERT_BOOL_FALSE_FOLLOWS_BOOL_TRUE() \
|
|
|
|
ASSERT((Thread::bool_true_offset() + kWordSize) == \
|
|
|
|
Thread::bool_false_offset());
|
|
|
|
|
2016-04-18 21:02:01 +00:00
|
|
|
#define CACHED_VM_STUBS_ADDRESSES_LIST(V) \
|
2018-11-21 17:45:11 +00:00
|
|
|
V(uword, write_barrier_entry_point_, StubCode::WriteBarrier().EntryPoint(), \
|
|
|
|
0) \
|
2018-11-05 22:03:34 +00:00
|
|
|
V(uword, array_write_barrier_entry_point_, \
|
2018-11-21 17:45:11 +00:00
|
|
|
StubCode::ArrayWriteBarrier().EntryPoint(), 0) \
|
2016-03-11 18:03:12 +00:00
|
|
|
V(uword, call_to_runtime_entry_point_, \
|
2018-11-21 17:45:11 +00:00
|
|
|
StubCode::CallToRuntime().EntryPoint(), 0) \
|
2019-12-10 15:28:45 +00:00
|
|
|
V(uword, allocate_mint_with_fpu_regs_entry_point_, \
|
2020-04-08 12:01:12 +00:00
|
|
|
StubCode::AllocateMintSharedWithFPURegs().EntryPoint(), 0) \
|
2019-12-10 15:28:45 +00:00
|
|
|
V(uword, allocate_mint_without_fpu_regs_entry_point_, \
|
2020-04-08 12:01:12 +00:00
|
|
|
StubCode::AllocateMintSharedWithoutFPURegs().EntryPoint(), 0) \
|
[Compiler] Adds a set of generic AllocateObject stubs.
- AllocateObjectStub
- AllocateObjectParameterizedStub
- AllocateObjectSlowStub
Benchmark results:
- flutter-release
- android-armv8
- flutter_gallery_instructions_size (Pixel 2) -5.322%
- flutter_gallery_vmisolate_size (Pixel 2) +0.8471%
- flutter_gallery_total_size (Pixel 2) -3.118%
- flutter_gallery_apk_size (Pixel 2) -0.0948%
- android-armv7
- flutter_gallery_instructions_size (Moto G4) -4.289%
- flutter_gallery_vmisolate_size (Moto G4) +0.8475%
- flutter_gallery_total_size (Moto G4) -2.736%
- flutter_gallery_apk_size (Moto G4) -0.0845%
Cq-Include-Trybots: luci.dart.try:vm-canary-linux-debug-try,vm-dartkb-linux-release-simarm64-try,vm-kernel-linux-debug-x64-try,vm-kernel-linux-release-simarm64-try,vm-kernel-optcounter-threshold-linux-release-ia32-try,vm-kernel-precomp-android-release-arm_x64-try,vm-kernel-precomp-mac-release-simarm64-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-win-debug-ia32-try,vm-kernel-win-debug-x64-try,vm-kernel-win-product-x64-try,vm-kernel-win-release-ia32-try,vm-kernel-win-release-x64-try,vm-kernel-precomp-linux-debug-simarm_x64-try
Bug: https://github.com/dart-lang/sdk/issues/41175
Change-Id: Ic768dc809abf38ae64dbb1273f0d4846e4e7989d
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/137979
Commit-Queue: Clement Skau <cskau@google.com>
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
2020-04-07 06:55:58 +00:00
|
|
|
V(uword, allocate_object_entry_point_, \
|
|
|
|
StubCode::AllocateObject().EntryPoint(), 0) \
|
|
|
|
V(uword, allocate_object_parameterized_entry_point_, \
|
|
|
|
StubCode::AllocateObjectParameterized().EntryPoint(), 0) \
|
|
|
|
V(uword, allocate_object_slow_entry_point_, \
|
|
|
|
StubCode::AllocateObjectSlow().EntryPoint(), 0) \
|
2018-06-29 12:32:28 +00:00
|
|
|
V(uword, stack_overflow_shared_without_fpu_regs_entry_point_, \
|
2018-11-21 17:45:11 +00:00
|
|
|
StubCode::StackOverflowSharedWithoutFPURegs().EntryPoint(), 0) \
|
2018-06-29 12:32:28 +00:00
|
|
|
V(uword, stack_overflow_shared_with_fpu_regs_entry_point_, \
|
2018-11-21 17:45:11 +00:00
|
|
|
StubCode::StackOverflowSharedWithFPURegs().EntryPoint(), 0) \
|
2016-08-24 00:27:57 +00:00
|
|
|
V(uword, megamorphic_call_checked_entry_, \
|
2018-11-21 17:45:11 +00:00
|
|
|
StubCode::MegamorphicCall().EntryPoint(), 0) \
|
2020-04-04 15:03:48 +00:00
|
|
|
V(uword, switchable_call_miss_entry_, \
|
|
|
|
StubCode::SwitchableCallMiss().EntryPoint(), 0) \
|
2019-05-16 23:04:22 +00:00
|
|
|
V(uword, optimize_entry_, StubCode::OptimizeFunction().EntryPoint(), 0) \
|
2019-05-24 02:39:13 +00:00
|
|
|
V(uword, deoptimize_entry_, StubCode::Deoptimize().EntryPoint(), 0) \
|
2019-08-07 11:37:20 +00:00
|
|
|
V(uword, call_native_through_safepoint_entry_point_, \
|
2020-04-18 20:27:38 +00:00
|
|
|
StubCode::CallNativeThroughSafepoint().EntryPoint(), 0) \
|
|
|
|
V(uword, slow_type_test_entry_point_, StubCode::SlowTypeTest().EntryPoint(), \
|
|
|
|
0)
|
2016-04-18 21:02:01 +00:00
|
|
|
|
|
|
|
#define CACHED_ADDRESSES_LIST(V) \
|
|
|
|
CACHED_VM_STUBS_ADDRESSES_LIST(V) \
|
2019-11-18 19:54:53 +00:00
|
|
|
V(uword, bootstrap_native_wrapper_entry_point_, \
|
|
|
|
NativeEntry::BootstrapNativeCallWrapperEntry(), 0) \
|
2017-04-05 22:30:03 +00:00
|
|
|
V(uword, no_scope_native_wrapper_entry_point_, \
|
|
|
|
NativeEntry::NoScopeNativeCallWrapperEntry(), 0) \
|
|
|
|
V(uword, auto_scope_native_wrapper_entry_point_, \
|
|
|
|
NativeEntry::AutoScopeNativeCallWrapperEntry(), 0) \
|
2020-04-25 05:21:27 +00:00
|
|
|
V(StringPtr*, predefined_symbols_address_, Symbols::PredefinedAddress(), \
|
2016-11-08 21:54:47 +00:00
|
|
|
NULL) \
|
2018-06-02 00:05:35 +00:00
|
|
|
V(uword, double_nan_address_, reinterpret_cast<uword>(&double_nan_constant), \
|
|
|
|
0) \
|
2015-12-04 17:46:21 +00:00
|
|
|
V(uword, double_negate_address_, \
|
|
|
|
reinterpret_cast<uword>(&double_negate_constant), 0) \
|
2016-11-08 21:54:47 +00:00
|
|
|
V(uword, double_abs_address_, reinterpret_cast<uword>(&double_abs_constant), \
|
|
|
|
0) \
|
|
|
|
V(uword, float_not_address_, reinterpret_cast<uword>(&float_not_constant), \
|
|
|
|
0) \
|
2015-12-04 17:46:21 +00:00
|
|
|
V(uword, float_negate_address_, \
|
|
|
|
reinterpret_cast<uword>(&float_negate_constant), 0) \
|
|
|
|
V(uword, float_absolute_address_, \
|
|
|
|
reinterpret_cast<uword>(&float_absolute_constant), 0) \
|
|
|
|
V(uword, float_zerow_address_, \
|
2016-11-08 21:54:47 +00:00
|
|
|
reinterpret_cast<uword>(&float_zerow_constant), 0)
|
2015-07-08 11:37:47 +00:00
|
|
|
|
|
|
|
#define CACHED_CONSTANTS_LIST(V) \
|
|
|
|
CACHED_VM_OBJECTS_LIST(V) \
|
2016-11-08 21:54:47 +00:00
|
|
|
CACHED_ADDRESSES_LIST(V)
|
2015-07-08 11:37:47 +00:00
|
|
|
|
2018-06-04 14:46:26 +00:00
|
|
|
enum class ValidationPolicy {
|
|
|
|
kValidateFrames = 0,
|
|
|
|
kDontValidateFrames = 1,
|
|
|
|
};
|
|
|
|
|
2021-05-10 08:58:29 +00:00
|
|
|
enum class RuntimeCallDeoptAbility {
|
|
|
|
// There was no leaf call or a leaf call that can cause deoptimization
|
|
|
|
// after-call.
|
|
|
|
kCanLazyDeopt,
|
|
|
|
// There was a leaf call and the VM cannot cause deoptimize after-call.
|
|
|
|
kCannotLazyDeopt,
|
|
|
|
};
|
|
|
|
|
2021-05-10 09:13:09 +00:00
|
|
|
// The safepoint level a thread is on or a safepoint operation is requested for
|
|
|
|
//
|
|
|
|
// The higher the number the stronger the guarantees:
|
|
|
|
// * the time-to-safepoint latency increases with level
|
|
|
|
// * the frequency of hitting possible safe points decreases with level
|
|
|
|
enum SafepointLevel {
|
|
|
|
// Safe to GC
|
|
|
|
kGC,
|
|
|
|
// Safe to GC as well as Deopt.
|
|
|
|
kGCAndDeopt,
|
|
|
|
// Number of levels.
|
|
|
|
kNumLevels,
|
|
|
|
};
|
|
|
|
|
2015-01-22 14:14:16 +00:00
|
|
|
// A VM thread; may be executing Dart code or performing helper tasks like
|
2015-04-01 17:48:11 +00:00
|
|
|
// garbage collection or compilation. The Thread structure associated with
|
2015-05-15 17:30:33 +00:00
|
|
|
// a thread is allocated by EnsureInit before entering an isolate, and destroyed
|
|
|
|
// automatically when the underlying OS thread exits. NOTE: On Windows, CleanUp
|
|
|
|
// must currently be called manually (issue 23474).
|
2019-01-11 20:47:10 +00:00
|
|
|
class Thread : public ThreadState {
|
2015-01-22 14:14:16 +00:00
|
|
|
public:
|
2016-03-21 16:41:47 +00:00
|
|
|
// The kind of task this thread is performing. Sampled by the profiler.
|
|
|
|
enum TaskKind {
|
|
|
|
kUnknownTask = 0x0,
|
|
|
|
kMutatorTask = 0x1,
|
|
|
|
kCompilerTask = 0x2,
|
2017-12-15 01:38:42 +00:00
|
|
|
kMarkerTask = 0x4,
|
|
|
|
kSweeperTask = 0x8,
|
|
|
|
kCompactorTask = 0x10,
|
2020-04-10 19:27:04 +00:00
|
|
|
kScavengerTask = 0x20,
|
2016-03-21 16:41:47 +00:00
|
|
|
};
|
2016-12-13 01:08:45 +00:00
|
|
|
// Converts a TaskKind to its corresponding C-String name.
|
|
|
|
static const char* TaskKindToCString(TaskKind kind);
|
|
|
|
|
2015-11-19 21:45:10 +00:00
|
|
|
~Thread();
|
|
|
|
|
2015-04-01 17:48:11 +00:00
|
|
|
// The currently executing thread, or NULL if not yet initialized.
|
2015-01-22 14:14:16 +00:00
|
|
|
static Thread* Current() {
|
2018-07-11 23:52:43 +00:00
|
|
|
#if defined(HAS_C11_THREAD_LOCAL)
|
2019-01-11 20:47:10 +00:00
|
|
|
return static_cast<Thread*>(OSThread::CurrentVMThread());
|
2018-07-11 23:52:43 +00:00
|
|
|
#else
|
2015-11-19 21:45:10 +00:00
|
|
|
BaseThread* thread = OSThread::GetCurrentTLS();
|
|
|
|
if (thread == NULL || thread->is_os_thread()) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-01-11 20:47:10 +00:00
|
|
|
return static_cast<Thread*>(thread);
|
2018-07-11 23:52:43 +00:00
|
|
|
#endif
|
2015-03-17 19:24:26 +00:00
|
|
|
}
|
2015-04-01 17:48:11 +00:00
|
|
|
|
2015-05-15 17:30:33 +00:00
|
|
|
// Makes the current thread enter 'isolate'.
|
2021-02-25 18:44:16 +00:00
|
|
|
static bool EnterIsolate(Isolate* isolate, bool is_nested_reenter = false);
|
2015-05-15 12:48:49 +00:00
|
|
|
// Makes the current thread exit its isolate.
|
2021-02-25 18:44:16 +00:00
|
|
|
static void ExitIsolate(bool is_nested_exit = false);
|
2015-04-01 17:48:11 +00:00
|
|
|
|
2015-05-15 12:48:49 +00:00
|
|
|
// A VM thread other than the main mutator thread can enter an isolate as a
|
|
|
|
// "helper" to gain limited concurrent access to the isolate. One example is
|
|
|
|
// SweeperTask (which uses the class table, which is copy-on-write).
|
|
|
|
// TODO(koda): Properly synchronize heap access to expand allowed operations.
|
2015-12-18 18:16:28 +00:00
|
|
|
static bool EnterIsolateAsHelper(Isolate* isolate,
|
2016-03-21 16:41:47 +00:00
|
|
|
TaskKind kind,
|
2015-08-28 17:00:05 +00:00
|
|
|
bool bypass_safepoint = false);
|
|
|
|
static void ExitIsolateAsHelper(bool bypass_safepoint = false);
|
2015-05-15 12:48:49 +00:00
|
|
|
|
2020-02-20 21:08:35 +00:00
|
|
|
static bool EnterIsolateGroupAsHelper(IsolateGroup* isolate_group,
|
|
|
|
TaskKind kind,
|
|
|
|
bool bypass_safepoint);
|
|
|
|
static void ExitIsolateGroupAsHelper(bool bypass_safepoint);
|
|
|
|
|
2015-06-09 16:33:36 +00:00
|
|
|
// Empties the store buffer block into the isolate.
|
2018-09-21 21:30:33 +00:00
|
|
|
void ReleaseStoreBuffer();
|
|
|
|
void AcquireMarkingStack();
|
|
|
|
void ReleaseMarkingStack();
|
2015-06-09 16:33:36 +00:00
|
|
|
|
2016-03-17 19:57:36 +00:00
|
|
|
void SetStackLimit(uword value);
|
|
|
|
void ClearStackLimit();
|
|
|
|
|
2019-10-25 19:21:56 +00:00
|
|
|
// Access to the current stack limit for generated code. Either the true OS
|
|
|
|
// thread's stack limit minus some headroom, or a special value to trigger
|
|
|
|
// interrupts.
|
2016-03-17 19:57:36 +00:00
|
|
|
uword stack_limit_address() const {
|
|
|
|
return reinterpret_cast<uword>(&stack_limit_);
|
|
|
|
}
|
|
|
|
static intptr_t stack_limit_offset() {
|
|
|
|
return OFFSET_OF(Thread, stack_limit_);
|
|
|
|
}
|
|
|
|
|
2019-10-25 19:21:56 +00:00
|
|
|
// The true stack limit for this OS thread.
|
|
|
|
static intptr_t saved_stack_limit_offset() {
|
|
|
|
return OFFSET_OF(Thread, saved_stack_limit_);
|
|
|
|
}
|
2016-03-17 19:57:36 +00:00
|
|
|
uword saved_stack_limit() const { return saved_stack_limit_; }
|
|
|
|
|
2017-11-22 19:27:54 +00:00
|
|
|
#if defined(USING_SAFE_STACK)
|
|
|
|
uword saved_safestack_limit() const { return saved_safestack_limit_; }
|
|
|
|
void set_saved_safestack_limit(uword limit) {
|
|
|
|
saved_safestack_limit_ = limit;
|
|
|
|
}
|
|
|
|
#endif
|
2019-10-02 05:33:29 +00:00
|
|
|
static uword saved_shadow_call_stack_offset() {
|
|
|
|
return OFFSET_OF(Thread, saved_shadow_call_stack_);
|
|
|
|
}
|
2017-11-22 19:27:54 +00:00
|
|
|
|
2016-03-17 19:57:36 +00:00
|
|
|
// Stack overflow flags
|
|
|
|
enum {
|
|
|
|
kOsrRequest = 0x1, // Current stack overflow caused by OSR request.
|
|
|
|
};
|
|
|
|
|
2018-09-11 21:14:25 +00:00
|
|
|
uword write_barrier_mask() const { return write_barrier_mask_; }
|
2021-02-18 17:47:37 +00:00
|
|
|
uword heap_base() const { return heap_base_; }
|
2018-09-11 21:14:25 +00:00
|
|
|
|
|
|
|
static intptr_t write_barrier_mask_offset() {
|
|
|
|
return OFFSET_OF(Thread, write_barrier_mask_);
|
2016-03-17 19:57:36 +00:00
|
|
|
}
|
2021-02-18 17:47:37 +00:00
|
|
|
static intptr_t heap_base_offset() { return OFFSET_OF(Thread, heap_base_); }
|
2016-03-17 19:57:36 +00:00
|
|
|
static intptr_t stack_overflow_flags_offset() {
|
|
|
|
return OFFSET_OF(Thread, stack_overflow_flags_);
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t IncrementAndGetStackOverflowCount() {
|
|
|
|
return ++stack_overflow_count_;
|
|
|
|
}
|
|
|
|
|
2021-03-09 11:27:00 +00:00
|
|
|
uint32_t IncrementAndGetRuntimeCallCount() { return ++runtime_call_count_; }
|
|
|
|
|
2019-01-25 16:45:13 +00:00
|
|
|
static uword stack_overflow_shared_stub_entry_point_offset(bool fpu_regs) {
|
|
|
|
return fpu_regs
|
|
|
|
? stack_overflow_shared_with_fpu_regs_entry_point_offset()
|
|
|
|
: stack_overflow_shared_without_fpu_regs_entry_point_offset();
|
|
|
|
}
|
|
|
|
|
2019-04-11 11:59:54 +00:00
|
|
|
static intptr_t safepoint_state_offset() {
|
|
|
|
return OFFSET_OF(Thread, safepoint_state_);
|
|
|
|
}
|
|
|
|
|
2019-05-24 02:39:13 +00:00
|
|
|
static intptr_t callback_code_offset() {
|
|
|
|
return OFFSET_OF(Thread, ffi_callback_code_);
|
|
|
|
}
|
|
|
|
|
[vm/ffi] Support passing structs by value
This CL adds passing structs by value in FFI trampolines.
Nested structs and inline arrays are future work.
C defines passing empty structs as undefined behavior, so that is not
supported in this CL.
Suggested review order:
1) commit message
2) ffi/marshaller (decisions for what is done in IL and what in MC)
3) frontend/kernel_to_il (IL construction)
4) backend/il (MC generation from IL)
5) rest in VM
Overall architecture is that structs are split up into word-size chunks
in IL when this is possible: 1 definition in IL per chunk, 1 Location in
IL per chunk, and 1 NativeLocation for the backend per chunk.
In some cases it is not possible or less convenient to split into
chunks. In these cases TypedDataBase objects are stored into and loaded
from directly in machine code.
The various cases:
- FFI call arguments which are not passed as pointers: pass individual
chunks to FFI call which already have the right location.
- FFI call arguments which are passed as pointers: Pass in TypedDataBase
to FFI call, allocate space on the stack, and make a copy on the stack
and pass the copies' address to the callee.
- FFI call return value: pass in TypedData to FFI call, and copy result
in machine code.
- FFI callback arguments which are not passed as pointers: IL definition
for each chunk, and populate a new TypedData with those chunks.
- FFI callback arguments which are passed as pointer: IL definition for
the pointer, and copying of contents in IL.
- FFI return value when location is pointer: Copy data to callee result
location in IL.
- FFI return value when location is not a pointer: Copy data in machine
code to the right registers.
Some other notes about the implementation:
- Due to Store/LoadIndexed loading doubles from float arrays, we use
a int32 instead and use the BitCastInstr.
- Linux ia32 uses `ret 4` when returning structs by value. This requires
special casing in the FFI callback trampolines to either use `ret` or
`ret 4` when returning.
- The 1 IL definition, 1 Location, and 1 NativeLocation approach does
not remove the need for special casing PairLocations in the machine
code generation because they are 1 Location belonging to 1 definition.
Because of the amount of corner cases in the calling conventions that
need to be covered, the tests are generated, rather than hand-written.
ABIs tested on CQ: x64 (Linux, MacOS, Windows), ia32 (Linux, Windows),
arm (Android softFP, Linux hardFP), arm64 Android.
ABIs tested locally through Flutter: ia32 Android (emulator), x64 iOS
(simulator), arm64 iOS.
ABIs not tested: arm iOS.
TEST=runtime/bin/ffi_test/ffi_test_functions_generated.cc
TEST=runtime/bin/ffi_test/ffi_test_functions.cc
TEST=tests/{ffi,ffi_2}/function_structs_by_value_generated_test.dart
TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_generated_tes
TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_test.dart
TEST=tests/{ffi,ffi_2}/vmspecific_static_checks_test.dart
Closes https://github.com/dart-lang/sdk/issues/36730.
Change-Id: I474d3a4ee1faadbe767ddadd1b696e24d8dc364c
Cq-Include-Trybots: luci.dart.try:dart-sdk-linux-try,dart-sdk-mac-try,dart-sdk-win-try,vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try,vm-kernel-asan-linux-release-x64-try,vm-kernel-mac-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-nnbd-mac-release-x64-try,vm-kernel-nnbd-win-debug-x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-reload-linux-debug-x64-try,vm-kernel-reload-rollback-linux-debug-x64-try,vm-kernel-win-debug-x64-try,vm-kernel-win-debug-ia32-try,vm-precomp-ffi-qemu-linux-release-arm-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,vm-kernel-msan-linux-release-x64-try,vm-kernel-precomp-msan-linux-release-x64-try,vm-kernel-precomp-android-release-arm_x64-try,analyzer-analysis-server-linux-try
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/140290
Commit-Queue: Daco Harkes <dacoharkes@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-12-14 16:22:48 +00:00
|
|
|
static intptr_t callback_stack_return_offset() {
|
|
|
|
return OFFSET_OF(Thread, ffi_callback_stack_return_);
|
|
|
|
}
|
|
|
|
|
[vm/ffi] Convert Objects to Dart_Handles in FFI calls
This includes support for calling Dart_PropagateError in native code
when doing FFI calls, and catching uncaught exceptions with Dart_IsError
when doing FFI callbacks.
The support for Dart_PropagateError adds a catch entry to the FFI
trampoline, which prevents inlining these trampolines in AOT. This
regresses the FfiCall benchmarks by 1-2% in AOT.
In addition, Dart_PropagateError requires maintaining a bit whether we
entered native/VM code from generated code through FFI or not. That way
we can do the proper transition on the exception path. When entering
generated code, we store this bit on the stack, right after the entry
frame.
Design: http://go/dart-ffi-handles
Issue: https://github.com/dart-lang/sdk/issues/36858
Issue: https://github.com/dart-lang/sdk/issues/41319
Change-Id: Idfd7ff69132fb29cc730931a4113d914d4437396
Cq-Include-Trybots: luci.dart.try:vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try,app-kernel-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-win-debug-x64-try,vm-kernel-win-debug-ia32-try,vm-kernel-precomp-linux-debug-x64-try,vm-dartkb-linux-release-x64-abi-try,vm-kernel-precomp-android-release-arm64-try,vm-kernel-asan-linux-release-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-precomp-android-release-arm_x64-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,dart-sdk-linux-try,analyzer-analysis-server-linux-try,analyzer-linux-release-try,front-end-linux-release-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-mac-debug-x64-try,vm-precomp-ffi-qemu-linux-release-arm-try,vm-kernel-nnbd-linux-debug-x64-try,analyzer-nnbd-linux-release-try,front-end-nnbd-linux-release-x64-try
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/145591
Commit-Queue: Daco Harkes <dacoharkes@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
Reviewed-by: Alexander Markov <alexmarkov@google.com>
2020-06-12 11:14:22 +00:00
|
|
|
// Tag state is maintained on transitions.
|
|
|
|
enum {
|
|
|
|
// Always true in generated state.
|
|
|
|
kDidNotExit = 0,
|
|
|
|
// The VM did exit the generated state through FFI.
|
|
|
|
// This can be true in both native and VM state.
|
|
|
|
kExitThroughFfi = 1,
|
|
|
|
// The VM exited the generated state through FFI.
|
|
|
|
// This can be true in both native and VM state.
|
|
|
|
kExitThroughRuntimeCall = 2,
|
|
|
|
};
|
|
|
|
|
|
|
|
static intptr_t exit_through_ffi_offset() {
|
|
|
|
return OFFSET_OF(Thread, exit_through_ffi_);
|
|
|
|
}
|
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
TaskKind task_kind() const { return task_kind_; }
|
2016-03-21 16:41:47 +00:00
|
|
|
|
2016-03-17 19:57:36 +00:00
|
|
|
// Retrieves and clears the stack overflow flags. These are set by
|
|
|
|
// the generated code before the slow path runtime routine for a
|
|
|
|
// stack overflow is called.
|
|
|
|
uword GetAndClearStackOverflowFlags();
|
|
|
|
|
|
|
|
// Interrupt bits.
|
|
|
|
enum {
|
|
|
|
kVMInterrupt = 0x1, // Internal VM checks: safepoints, store buffers, etc.
|
|
|
|
kMessageInterrupt = 0x2, // An interrupt to process an out of band message.
|
|
|
|
|
|
|
|
kInterruptsMask = (kVMInterrupt | kMessageInterrupt),
|
|
|
|
};
|
|
|
|
|
|
|
|
void ScheduleInterrupts(uword interrupt_bits);
|
|
|
|
void ScheduleInterruptsLocked(uword interrupt_bits);
|
2020-04-25 05:21:27 +00:00
|
|
|
ErrorPtr HandleInterrupts();
|
2016-03-17 19:57:36 +00:00
|
|
|
uword GetAndClearInterrupts();
|
2018-08-31 23:10:30 +00:00
|
|
|
bool HasScheduledInterrupts() const {
|
|
|
|
return (stack_limit_ & kInterruptsMask) != 0;
|
|
|
|
}
|
2016-03-17 19:57:36 +00:00
|
|
|
|
2016-02-01 18:57:34 +00:00
|
|
|
// Monitor corresponding to this thread.
|
2019-05-13 21:17:51 +00:00
|
|
|
Monitor* thread_lock() const { return &thread_lock_; }
|
2016-02-01 18:57:34 +00:00
|
|
|
|
2015-11-25 19:07:22 +00:00
|
|
|
// The reusable api local scope for this thread.
|
|
|
|
ApiLocalScope* api_reusable_scope() const { return api_reusable_scope_; }
|
|
|
|
void set_api_reusable_scope(ApiLocalScope* value) {
|
|
|
|
ASSERT(value == NULL || api_reusable_scope_ == NULL);
|
|
|
|
api_reusable_scope_ = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The api local scope for this thread, this where all local handles
|
|
|
|
// are allocated.
|
|
|
|
ApiLocalScope* api_top_scope() const { return api_top_scope_; }
|
|
|
|
void set_api_top_scope(ApiLocalScope* value) { api_top_scope_ = value; }
|
[vm/ffi] Convert Objects to Dart_Handles in FFI calls
This includes support for calling Dart_PropagateError in native code
when doing FFI calls, and catching uncaught exceptions with Dart_IsError
when doing FFI callbacks.
The support for Dart_PropagateError adds a catch entry to the FFI
trampoline, which prevents inlining these trampolines in AOT. This
regresses the FfiCall benchmarks by 1-2% in AOT.
In addition, Dart_PropagateError requires maintaining a bit whether we
entered native/VM code from generated code through FFI or not. That way
we can do the proper transition on the exception path. When entering
generated code, we store this bit on the stack, right after the entry
frame.
Design: http://go/dart-ffi-handles
Issue: https://github.com/dart-lang/sdk/issues/36858
Issue: https://github.com/dart-lang/sdk/issues/41319
Change-Id: Idfd7ff69132fb29cc730931a4113d914d4437396
Cq-Include-Trybots: luci.dart.try:vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try,app-kernel-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-win-debug-x64-try,vm-kernel-win-debug-ia32-try,vm-kernel-precomp-linux-debug-x64-try,vm-dartkb-linux-release-x64-abi-try,vm-kernel-precomp-android-release-arm64-try,vm-kernel-asan-linux-release-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-precomp-android-release-arm_x64-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,dart-sdk-linux-try,analyzer-analysis-server-linux-try,analyzer-linux-release-try,front-end-linux-release-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-mac-debug-x64-try,vm-precomp-ffi-qemu-linux-release-arm-try,vm-kernel-nnbd-linux-debug-x64-try,analyzer-nnbd-linux-release-try,front-end-nnbd-linux-release-x64-try
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/145591
Commit-Queue: Daco Harkes <dacoharkes@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
Reviewed-by: Alexander Markov <alexmarkov@google.com>
2020-06-12 11:14:22 +00:00
|
|
|
static intptr_t api_top_scope_offset() {
|
|
|
|
return OFFSET_OF(Thread, api_top_scope_);
|
|
|
|
}
|
2015-11-25 19:07:22 +00:00
|
|
|
|
2018-10-31 19:51:52 +00:00
|
|
|
void EnterApiScope();
|
|
|
|
void ExitApiScope();
|
|
|
|
|
2019-07-23 10:58:11 +00:00
|
|
|
// The isolate that this thread is operating on, or nullptr if none.
|
2015-03-25 22:41:33 +00:00
|
|
|
Isolate* isolate() const { return isolate_; }
|
2016-11-08 21:54:47 +00:00
|
|
|
static intptr_t isolate_offset() { return OFFSET_OF(Thread, isolate_); }
|
2021-02-10 20:48:05 +00:00
|
|
|
static intptr_t isolate_group_offset() {
|
|
|
|
return OFFSET_OF(Thread, isolate_group_);
|
|
|
|
}
|
2019-07-23 10:58:11 +00:00
|
|
|
|
|
|
|
// The isolate group that this thread is operating on, or nullptr if none.
|
|
|
|
IsolateGroup* isolate_group() const { return isolate_group_; }
|
|
|
|
|
2020-01-17 18:12:24 +00:00
|
|
|
static intptr_t field_table_values_offset() {
|
|
|
|
return OFFSET_OF(Thread, field_table_values_);
|
|
|
|
}
|
|
|
|
|
2020-06-11 22:57:16 +00:00
|
|
|
bool IsMutatorThread() const { return is_mutator_thread_; }
|
|
|
|
|
2020-12-17 00:22:13 +00:00
|
|
|
#if defined(DEBUG)
|
|
|
|
bool IsInsideCompiler() const { return inside_compiler_; }
|
|
|
|
#endif
|
|
|
|
|
2016-01-11 19:00:59 +00:00
|
|
|
bool CanCollectGarbage() const;
|
2015-02-09 18:54:20 +00:00
|
|
|
|
2016-05-16 17:30:48 +00:00
|
|
|
// Offset of Dart TimelineStream object.
|
|
|
|
static intptr_t dart_stream_offset() {
|
|
|
|
return OFFSET_OF(Thread, dart_stream_);
|
|
|
|
}
|
|
|
|
|
2015-11-04 15:59:16 +00:00
|
|
|
// Is |this| executing Dart code?
|
|
|
|
bool IsExecutingDartCode() const;
|
|
|
|
|
|
|
|
// Has |this| exited Dart code?
|
|
|
|
bool HasExitedDartCode() const;
|
|
|
|
|
2018-09-03 16:01:24 +00:00
|
|
|
CompilerState& compiler_state() {
|
|
|
|
ASSERT(compiler_state_ != nullptr);
|
|
|
|
return *compiler_state_;
|
2015-11-19 21:45:10 +00:00
|
|
|
}
|
2015-03-18 00:29:26 +00:00
|
|
|
|
2018-01-25 07:13:42 +00:00
|
|
|
HierarchyInfo* hierarchy_info() const {
|
2021-02-04 17:45:57 +00:00
|
|
|
ASSERT(isolate_group_ != nullptr);
|
2018-01-25 07:13:42 +00:00
|
|
|
return hierarchy_info_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_hierarchy_info(HierarchyInfo* value) {
|
2021-02-04 17:45:57 +00:00
|
|
|
ASSERT(isolate_group_ != nullptr);
|
|
|
|
ASSERT((hierarchy_info_ == nullptr && value != nullptr) ||
|
|
|
|
(hierarchy_info_ != nullptr && value == nullptr));
|
2018-01-25 07:13:42 +00:00
|
|
|
hierarchy_info_ = value;
|
|
|
|
}
|
|
|
|
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
TypeUsageInfo* type_usage_info() const {
|
2021-02-04 17:45:57 +00:00
|
|
|
ASSERT(isolate_group_ != nullptr);
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
return type_usage_info_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_type_usage_info(TypeUsageInfo* value) {
|
2021-02-04 17:45:57 +00:00
|
|
|
ASSERT(isolate_group_ != nullptr);
|
|
|
|
ASSERT((type_usage_info_ == nullptr && value != nullptr) ||
|
|
|
|
(type_usage_info_ != nullptr && value == nullptr));
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
type_usage_info_ = value;
|
|
|
|
}
|
|
|
|
|
2021-05-11 11:15:53 +00:00
|
|
|
CompilerTimings* compiler_timings() const { return compiler_timings_; }
|
|
|
|
|
|
|
|
void set_compiler_timings(CompilerTimings* stats) {
|
|
|
|
compiler_timings_ = stats;
|
|
|
|
}
|
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
int32_t no_callback_scope_depth() const { return no_callback_scope_depth_; }
|
2015-10-13 21:29:43 +00:00
|
|
|
|
|
|
|
void IncrementNoCallbackScopeDepth() {
|
|
|
|
ASSERT(no_callback_scope_depth_ < INT_MAX);
|
|
|
|
no_callback_scope_depth_ += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DecrementNoCallbackScopeDepth() {
|
|
|
|
ASSERT(no_callback_scope_depth_ > 0);
|
|
|
|
no_callback_scope_depth_ -= 1;
|
|
|
|
}
|
|
|
|
|
2020-12-17 00:22:13 +00:00
|
|
|
#if defined(DEBUG)
|
|
|
|
void EnterCompiler() {
|
|
|
|
ASSERT(!IsInsideCompiler());
|
|
|
|
inside_compiler_ = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void LeaveCompiler() {
|
|
|
|
ASSERT(IsInsideCompiler());
|
|
|
|
inside_compiler_ = false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
void StoreBufferAddObject(ObjectPtr obj);
|
|
|
|
void StoreBufferAddObjectGC(ObjectPtr obj);
|
2015-06-09 16:33:36 +00:00
|
|
|
#if defined(TESTING)
|
2020-04-25 05:21:27 +00:00
|
|
|
bool StoreBufferContains(ObjectPtr obj) const {
|
2015-06-09 16:33:36 +00:00
|
|
|
return store_buffer_block_->Contains(obj);
|
|
|
|
}
|
|
|
|
#endif
|
2015-09-17 19:21:55 +00:00
|
|
|
void StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy);
|
2015-06-09 16:33:36 +00:00
|
|
|
static intptr_t store_buffer_block_offset() {
|
|
|
|
return OFFSET_OF(Thread, store_buffer_block_);
|
|
|
|
}
|
|
|
|
|
2018-09-21 21:30:33 +00:00
|
|
|
bool is_marking() const { return marking_stack_block_ != NULL; }
|
2020-04-25 05:21:27 +00:00
|
|
|
void MarkingStackAddObject(ObjectPtr obj);
|
|
|
|
void DeferredMarkingStackAddObject(ObjectPtr obj);
|
2018-09-21 21:30:33 +00:00
|
|
|
void MarkingStackBlockProcess();
|
2018-12-14 02:02:54 +00:00
|
|
|
void DeferredMarkingStackBlockProcess();
|
2018-09-21 21:30:33 +00:00
|
|
|
static intptr_t marking_stack_block_offset() {
|
|
|
|
return OFFSET_OF(Thread, marking_stack_block_);
|
|
|
|
}
|
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
uword top_exit_frame_info() const { return top_exit_frame_info_; }
|
2016-08-19 23:48:34 +00:00
|
|
|
void set_top_exit_frame_info(uword top_exit_frame_info) {
|
|
|
|
top_exit_frame_info_ = top_exit_frame_info;
|
|
|
|
}
|
2015-07-09 18:22:26 +00:00
|
|
|
static intptr_t top_exit_frame_info_offset() {
|
2015-11-19 21:45:10 +00:00
|
|
|
return OFFSET_OF(Thread, top_exit_frame_info_);
|
2015-07-09 18:22:26 +00:00
|
|
|
}
|
|
|
|
|
2015-11-25 19:07:22 +00:00
|
|
|
// Heap of the isolate that this thread is operating on.
|
|
|
|
Heap* heap() const { return heap_; }
|
2016-11-08 21:54:47 +00:00
|
|
|
static intptr_t heap_offset() { return OFFSET_OF(Thread, heap_); }
|
2015-08-03 14:26:23 +00:00
|
|
|
|
2020-05-01 02:01:27 +00:00
|
|
|
uword top() const { return top_; }
|
|
|
|
uword end() const { return end_; }
|
|
|
|
void set_top(uword top) { top_ = top; }
|
|
|
|
void set_end(uword end) { end_ = end; }
|
2017-07-13 20:46:15 +00:00
|
|
|
static intptr_t top_offset() { return OFFSET_OF(Thread, top_); }
|
|
|
|
static intptr_t end_offset() { return OFFSET_OF(Thread, end_); }
|
|
|
|
|
2015-07-22 22:09:54 +00:00
|
|
|
int32_t no_safepoint_scope_depth() const {
|
|
|
|
#if defined(DEBUG)
|
2015-11-19 21:45:10 +00:00
|
|
|
return no_safepoint_scope_depth_;
|
2015-07-22 22:09:54 +00:00
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void IncrementNoSafepointScopeDepth() {
|
|
|
|
#if defined(DEBUG)
|
2015-11-19 21:45:10 +00:00
|
|
|
ASSERT(no_safepoint_scope_depth_ < INT_MAX);
|
|
|
|
no_safepoint_scope_depth_ += 1;
|
2015-07-22 22:09:54 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void DecrementNoSafepointScopeDepth() {
|
|
|
|
#if defined(DEBUG)
|
2015-11-19 21:45:10 +00:00
|
|
|
ASSERT(no_safepoint_scope_depth_ > 0);
|
|
|
|
no_safepoint_scope_depth_ -= 1;
|
2015-07-22 22:09:54 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-03-02 14:11:42 +00:00
|
|
|
bool IsInNoReloadScope() const { return no_reload_scope_depth_ > 0; }
|
|
|
|
|
2021-04-03 03:36:32 +00:00
|
|
|
bool IsInStoppedMutatorsScope() const {
|
|
|
|
return stopped_mutators_scope_depth_ > 0;
|
|
|
|
}
|
|
|
|
|
2015-07-08 11:37:47 +00:00
|
|
|
#define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value) \
|
|
|
|
static intptr_t member_name##offset() { \
|
|
|
|
return OFFSET_OF(Thread, member_name); \
|
|
|
|
}
|
2016-11-08 21:54:47 +00:00
|
|
|
CACHED_CONSTANTS_LIST(DEFINE_OFFSET_METHOD)
|
2015-09-02 21:58:26 +00:00
|
|
|
#undef DEFINE_OFFSET_METHOD
|
|
|
|
|
2018-07-27 18:51:20 +00:00
|
|
|
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) || \
|
|
|
|
defined(TARGET_ARCH_X64)
|
2019-01-18 16:13:17 +00:00
|
|
|
static intptr_t write_barrier_wrappers_thread_offset(Register reg) {
|
2018-07-27 18:51:20 +00:00
|
|
|
ASSERT((kDartAvailableCpuRegs & (1 << reg)) != 0);
|
|
|
|
intptr_t index = 0;
|
|
|
|
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
|
|
|
|
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
|
|
|
|
if (i == reg) break;
|
|
|
|
++index;
|
|
|
|
}
|
2018-09-11 21:14:25 +00:00
|
|
|
return OFFSET_OF(Thread, write_barrier_wrappers_entry_points_) +
|
2018-07-27 18:51:20 +00:00
|
|
|
index * sizeof(uword);
|
|
|
|
}
|
2019-01-18 16:13:17 +00:00
|
|
|
|
|
|
|
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg) {
|
|
|
|
intptr_t index = 0;
|
|
|
|
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
|
|
|
|
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
|
|
|
|
if (i == reg) {
|
|
|
|
return index * kStoreBufferWrapperSize;
|
|
|
|
}
|
|
|
|
++index;
|
|
|
|
}
|
|
|
|
UNREACHABLE();
|
|
|
|
return 0;
|
|
|
|
}
|
2018-07-27 18:51:20 +00:00
|
|
|
#endif
|
|
|
|
|
2015-09-02 21:58:26 +00:00
|
|
|
#define DEFINE_OFFSET_METHOD(name) \
|
|
|
|
static intptr_t name##_entry_point_offset() { \
|
|
|
|
return OFFSET_OF(Thread, name##_entry_point_); \
|
|
|
|
}
|
2016-11-08 21:54:47 +00:00
|
|
|
RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
|
2015-09-02 21:58:26 +00:00
|
|
|
#undef DEFINE_OFFSET_METHOD
|
|
|
|
|
|
|
|
#define DEFINE_OFFSET_METHOD(returntype, name, ...) \
|
|
|
|
static intptr_t name##_entry_point_offset() { \
|
|
|
|
return OFFSET_OF(Thread, name##_entry_point_); \
|
|
|
|
}
|
2016-11-08 21:54:47 +00:00
|
|
|
LEAF_RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
|
2015-07-08 11:37:47 +00:00
|
|
|
#undef DEFINE_OFFSET_METHOD
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
ObjectPoolPtr global_object_pool() const { return global_object_pool_; }
|
|
|
|
void set_global_object_pool(ObjectPoolPtr raw_value) {
|
2018-12-14 16:03:04 +00:00
|
|
|
global_object_pool_ = raw_value;
|
|
|
|
}
|
|
|
|
|
2020-02-03 11:04:15 +00:00
|
|
|
const uword* dispatch_table_array() const { return dispatch_table_array_; }
|
[vm] Reland two dispatch table related changes as a single change.
These changes were originally submitted separately on different days,
and a major performance regression was seen after the first change
when creating snapshots that led to both being reverted. However,
that performance regression should be addressed by the followup.
First change:
"[vm] Treat the dispatch table as a root in the snapshot.
Additional changes:
* Only serialize a dispatch table in precompiled snapshots.
* Add information in v8 snapshot profiles for the dispatch table.
* Fix a typo in a field name.
* Print the number of Instructions objects (or payloads, for
precompiled bare instructions mode) in the fake cluster for
the data section.
* Fix v8 snapshots profiles so objects in memory mapped segments
and only those are prefixed with "(RO) ".
* Add names for Instructions objects in v8 snapshot profiles
when we can use the assembly namer.
* Add command line flag for old #define'd false flag."
Second change:
"[vm/aot] Keep GC-visible references to dispatch table Code entries.
This change splits dispatch table handling into four distinct
parts:
* The dispatch table generator does not make a dispatch table
directly, but rather creates an Array that contains the Code
objects for dispatch table entries.
* The precompiler takes this Array and puts it in the object
store, which makes it a new GC root.
* The serializer takes this information and serializes the
dispatch table information in the same form as before.
* The deserializer creates a DispatchTable object and populates
it using the serialized information.
The change in the precompiler ensures that the Code objects
used in the dispatch table have GC-visible references. Thus,
even if all other references to them from the other GC roots
were removed, they would be accessible in the serializer in
the case of a GC pass between the precompiler and serializer.
This change also means that the serializer can retrieve and
trace the Code objects directly rather than first looking up
the Code objects by their entry point."
Bug: https://github.com/dart-lang/sdk/issues/41022
Change-Id: I52c83b0536fc588da0bef9aed1f0c72e8ee4663f
Cq-Include-Trybots: luci.dart.try:vm-kernel-precomp-linux-release-x64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm_x64-try,vm-kernel-precomp-android-release-arm64-try,vm-kernel-precomp-android-release-arm_x64-try,vm-kernel-precomp-mac-release-simarm64-try,vm-kernel-precomp-win-release-x64-try
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/139285
Commit-Queue: Teagan Strickland <sstrickl@google.com>
Reviewed-by: Alexander Aprelev <aam@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-03-13 17:19:52 +00:00
|
|
|
void set_dispatch_table_array(const uword* array) {
|
|
|
|
dispatch_table_array_ = array;
|
|
|
|
}
|
2020-02-03 11:04:15 +00:00
|
|
|
|
2015-07-08 11:37:47 +00:00
|
|
|
static bool CanLoadFromThread(const Object& object);
|
|
|
|
static intptr_t OffsetFromThread(const Object& object);
|
2015-11-12 23:18:31 +00:00
|
|
|
static bool ObjectAtOffset(intptr_t offset, Object* object);
|
2015-09-02 21:58:26 +00:00
|
|
|
static intptr_t OffsetFromThread(const RuntimeEntry* runtime_entry);
|
2015-07-08 11:37:47 +00:00
|
|
|
|
2018-12-05 18:57:02 +00:00
|
|
|
#if defined(DEBUG)
|
|
|
|
// For asserts only. Has false positives when running with a simulator or
|
|
|
|
// SafeStack.
|
|
|
|
bool TopErrorHandlerIsSetJump() const;
|
|
|
|
bool TopErrorHandlerIsExitFrame() const;
|
|
|
|
#endif
|
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
uword vm_tag() const { return vm_tag_; }
|
|
|
|
void set_vm_tag(uword tag) { vm_tag_ = tag; }
|
|
|
|
static intptr_t vm_tag_offset() { return OFFSET_OF(Thread, vm_tag_); }
|
2015-10-05 19:50:17 +00:00
|
|
|
|
2018-06-22 15:43:28 +00:00
|
|
|
int64_t unboxed_int64_runtime_arg() const {
|
|
|
|
return unboxed_int64_runtime_arg_;
|
|
|
|
}
|
|
|
|
void set_unboxed_int64_runtime_arg(int64_t value) {
|
|
|
|
unboxed_int64_runtime_arg_ = value;
|
|
|
|
}
|
|
|
|
static intptr_t unboxed_int64_runtime_arg_offset() {
|
|
|
|
return OFFSET_OF(Thread, unboxed_int64_runtime_arg_);
|
|
|
|
}
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
GrowableObjectArrayPtr pending_functions();
|
2016-02-05 23:46:55 +00:00
|
|
|
void clear_pending_functions();
|
|
|
|
|
[vm/aot] Ensure global object pool is relocated by compactor and reloaded when returning from natives
When the compactor moves the global object pool the pointer in Thread
was not updated because it was in a macro list which can only hold
objects inside the read-only VM isolate.
All calls into dart need to populate PP as well as all calls returning
from C++ need to restore PP. There was a missing case where we return
from natives and don't restore PP (i.e. not normal runtime calls).
This started to be an issue after we started moving old space objects.
Cq-Include-Trybots: luci.dart.try:vm-canary-linux-debug-try, vm-dartkb-linux-debug-x64-try, vm-dartkb-linux-release-x64-try, vm-kernel-asan-linux-release-x64-try, vm-kernel-checked-linux-release-x64-try, vm-kernel-linux-debug-ia32-try, vm-kernel-linux-debug-simdbc64-try, vm-kernel-linux-debug-x64-try, vm-kernel-linux-product-x64-try, vm-kernel-linux-release-ia32-try, vm-kernel-linux-release-simarm-try, vm-kernel-linux-release-simarm64-try, vm-kernel-linux-release-simdbc64-try, vm-kernel-linux-release-x64-try, vm-kernel-optcounter-threshold-linux-release-ia32-try, vm-kernel-optcounter-threshold-linux-release-x64-try, vm-kernel-precomp-android-release-arm-try, vm-kernel-precomp-bare-linux-release-simarm-try, vm-kernel-precomp-bare-linux-release-simarm64-try, vm-kernel-precomp-bare-linux-release-x64-try, vm-kernel-precomp-linux-debug-x64-try, vm-kernel-precomp-linux-product-x64-try, vm-kernel-precomp-linux-release-simarm-try, vm-kernel-precomp-linux-release-simarm64-try, vm-kernel-precomp-linux-release-x64-try, vm-kernel-precomp-obfuscate-linux-release-x64-try, vm-kernel-precomp-win-release-simarm64-try, vm-kernel-precomp-win-release-x64-try, vm-kernel-reload-linux-debug-x64-try, vm-kernel-reload-linux-release-x64-try, vm-kernel-reload-rollback-linux-debug-x64-try, vm-kernel-reload-rollback-linux-release-x64-try, vm-kernel-win-debug-ia32-try, vm-kernel-win-debug-x64-try, vm-kernel-win-product-x64-try, vm-kernel-win-release-ia32-try, vm-kernel-win-release-x64-try
Change-Id: I14776be226431e2c53ae888b3c98a1b8c540c4ec
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/98343
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
2019-03-29 15:13:25 +00:00
|
|
|
static intptr_t global_object_pool_offset() {
|
|
|
|
return OFFSET_OF(Thread, global_object_pool_);
|
|
|
|
}
|
|
|
|
|
2020-02-03 11:04:15 +00:00
|
|
|
static intptr_t dispatch_table_array_offset() {
|
|
|
|
return OFFSET_OF(Thread, dispatch_table_array_);
|
|
|
|
}
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
ObjectPtr active_exception() const { return active_exception_; }
|
2016-11-21 16:49:18 +00:00
|
|
|
void set_active_exception(const Object& value);
|
|
|
|
static intptr_t active_exception_offset() {
|
|
|
|
return OFFSET_OF(Thread, active_exception_);
|
|
|
|
}
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
ObjectPtr active_stacktrace() const { return active_stacktrace_; }
|
2016-11-21 16:49:18 +00:00
|
|
|
void set_active_stacktrace(const Object& value);
|
|
|
|
static intptr_t active_stacktrace_offset() {
|
|
|
|
return OFFSET_OF(Thread, active_stacktrace_);
|
|
|
|
}
|
|
|
|
|
|
|
|
uword resume_pc() const { return resume_pc_; }
|
|
|
|
void set_resume_pc(uword value) { resume_pc_ = value; }
|
|
|
|
static uword resume_pc_offset() { return OFFSET_OF(Thread, resume_pc_); }
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
ErrorPtr sticky_error() const;
|
2016-02-05 23:46:55 +00:00
|
|
|
void set_sticky_error(const Error& value);
|
2018-12-05 18:39:41 +00:00
|
|
|
void ClearStickyError();
|
2020-04-25 05:21:27 +00:00
|
|
|
DART_WARN_UNUSED_RESULT ErrorPtr StealStickyError();
|
2016-02-01 18:57:34 +00:00
|
|
|
|
2015-10-09 17:10:34 +00:00
|
|
|
#if defined(DEBUG)
|
|
|
|
#define REUSABLE_HANDLE_SCOPE_ACCESSORS(object) \
|
|
|
|
void set_reusable_##object##_handle_scope_active(bool value) { \
|
|
|
|
reusable_##object##_handle_scope_active_ = value; \
|
|
|
|
} \
|
|
|
|
bool reusable_##object##_handle_scope_active() const { \
|
|
|
|
return reusable_##object##_handle_scope_active_; \
|
|
|
|
}
|
|
|
|
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_ACCESSORS)
|
|
|
|
#undef REUSABLE_HANDLE_SCOPE_ACCESSORS
|
|
|
|
|
2015-10-09 22:52:09 +00:00
|
|
|
bool IsAnyReusableHandleScopeActive() const {
|
2015-10-09 17:10:34 +00:00
|
|
|
#define IS_REUSABLE_HANDLE_SCOPE_ACTIVE(object) \
|
2016-11-08 21:54:47 +00:00
|
|
|
if (reusable_##object##_handle_scope_active_) { \
|
|
|
|
return true; \
|
|
|
|
}
|
2015-10-09 22:52:09 +00:00
|
|
|
REUSABLE_HANDLE_LIST(IS_REUSABLE_HANDLE_SCOPE_ACTIVE)
|
|
|
|
return false;
|
2015-10-09 17:10:34 +00:00
|
|
|
#undef IS_REUSABLE_HANDLE_SCOPE_ACTIVE
|
2015-10-09 22:52:09 +00:00
|
|
|
}
|
2015-10-09 17:10:34 +00:00
|
|
|
#endif // defined(DEBUG)
|
|
|
|
|
2015-10-09 22:52:09 +00:00
|
|
|
void ClearReusableHandles();
|
|
|
|
|
2015-10-09 17:10:34 +00:00
|
|
|
#define REUSABLE_HANDLE(object) \
|
2016-11-08 21:54:47 +00:00
|
|
|
object& object##Handle() const { return *object##_handle_; }
|
2015-10-09 17:10:34 +00:00
|
|
|
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE)
|
|
|
|
#undef REUSABLE_HANDLE
|
|
|
|
|
2016-02-01 18:57:34 +00:00
|
|
|
/*
|
|
|
|
* Fields used to support safepointing a thread.
|
|
|
|
*
|
|
|
|
* - Bit 0 of the safepoint_state_ field is used to indicate if the thread is
|
|
|
|
* already at a safepoint,
|
|
|
|
* - Bit 1 of the safepoint_state_ field is used to indicate if a safepoint
|
2021-05-10 09:13:09 +00:00
|
|
|
* is requested for this thread.
|
|
|
|
* - Bit 2 of the safepoint_state_ field is used to indicate if the thread is
|
|
|
|
* already at a deopt safepoint,
|
|
|
|
* - Bit 3 of the safepoint_state_ field is used to indicate if a deopt
|
|
|
|
* safepoint is requested for this thread.
|
|
|
|
* - Bit 4 of the safepoint_state_ field is used to indicate that the thread
|
|
|
|
* is blocked at a (deopt)safepoint and has to be woken up once the
|
|
|
|
* (deopt)safepoint operation is complete.
|
2016-02-01 18:57:34 +00:00
|
|
|
*
|
|
|
|
* The safepoint execution state (described above) for a thread is stored in
|
|
|
|
* in the execution_state_ field.
|
|
|
|
* Potential execution states a thread could be in:
|
|
|
|
* kThreadInGenerated - The thread is running jitted dart/stub code.
|
|
|
|
* kThreadInVM - The thread is running VM code.
|
|
|
|
* kThreadInNative - The thread is running native code.
|
|
|
|
* kThreadInBlockedState - The thread is blocked waiting for a resource.
|
|
|
|
*/
|
2021-05-10 09:13:09 +00:00
|
|
|
static bool IsAtSafepoint(SafepointLevel level, uword state) {
|
|
|
|
const uword mask = AtSafepointBits(level);
|
|
|
|
return (state & mask) == mask;
|
2016-02-01 18:57:34 +00:00
|
|
|
}
|
|
|
|
bool IsAtSafepoint() const {
|
2021-05-10 09:13:09 +00:00
|
|
|
return IsAtSafepoint(current_safepoint_level());
|
2016-02-01 18:57:34 +00:00
|
|
|
}
|
2021-05-10 09:13:09 +00:00
|
|
|
bool IsAtSafepoint(SafepointLevel level) const {
|
|
|
|
return IsAtSafepoint(level, safepoint_state_.load());
|
2016-02-01 18:57:34 +00:00
|
|
|
}
|
|
|
|
void SetAtSafepoint(bool value) {
|
|
|
|
ASSERT(thread_lock()->IsOwnedByCurrentThread());
|
2021-05-10 09:13:09 +00:00
|
|
|
if (value) {
|
|
|
|
safepoint_state_ |= AtSafepointBits(current_safepoint_level());
|
|
|
|
} else {
|
|
|
|
safepoint_state_ &= ~AtSafepointBits(current_safepoint_level());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bool IsSafepointRequestedLocked() const {
|
|
|
|
ASSERT(thread_lock()->IsOwnedByCurrentThread());
|
|
|
|
return IsSafepointRequested();
|
2016-02-01 18:57:34 +00:00
|
|
|
}
|
|
|
|
bool IsSafepointRequested() const {
|
2021-05-10 09:13:09 +00:00
|
|
|
const uword state = safepoint_state_.load();
|
|
|
|
for (intptr_t level = current_safepoint_level(); level >= 0; --level) {
|
|
|
|
if (IsSafepointLevelRequested(state, static_cast<SafepointLevel>(level)))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
2016-02-01 18:57:34 +00:00
|
|
|
}
|
2021-05-10 09:13:09 +00:00
|
|
|
bool IsSafepointLevelRequestedLocked(SafepointLevel level) const {
|
|
|
|
ASSERT(thread_lock()->IsOwnedByCurrentThread());
|
|
|
|
if (level > current_safepoint_level()) return false;
|
|
|
|
const uword state = safepoint_state_.load();
|
|
|
|
return IsSafepointLevelRequested(state, level);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool IsSafepointLevelRequested(uword state, SafepointLevel level) {
|
|
|
|
switch (level) {
|
|
|
|
case SafepointLevel::kGC:
|
|
|
|
return (state & SafepointRequestedField::mask_in_place()) != 0;
|
|
|
|
case SafepointLevel::kGCAndDeopt:
|
|
|
|
return (state & DeoptSafepointRequestedField::mask_in_place()) != 0;
|
|
|
|
case SafepointLevel::kNumLevels:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2016-02-01 18:57:34 +00:00
|
|
|
}
|
2021-05-10 09:13:09 +00:00
|
|
|
|
|
|
|
void BlockForSafepoint();
|
|
|
|
uword SetSafepointRequested(SafepointLevel level, bool value) {
|
2016-02-01 18:57:34 +00:00
|
|
|
ASSERT(thread_lock()->IsOwnedByCurrentThread());
|
2021-05-10 09:13:09 +00:00
|
|
|
|
|
|
|
const uword mask = level == SafepointLevel::kGC
|
|
|
|
? SafepointRequestedField::mask_in_place()
|
|
|
|
: DeoptSafepointRequestedField::mask_in_place();
|
|
|
|
|
2019-10-21 16:26:39 +00:00
|
|
|
if (value) {
|
2020-03-03 16:52:11 +00:00
|
|
|
// acquire pulls from the release in TryEnterSafepoint.
|
2021-05-10 09:13:09 +00:00
|
|
|
return safepoint_state_.fetch_or(mask, std::memory_order_acquire);
|
2019-10-21 16:26:39 +00:00
|
|
|
} else {
|
2020-03-03 16:52:11 +00:00
|
|
|
// release pushes to the acquire in TryExitSafepoint.
|
2021-05-10 09:13:09 +00:00
|
|
|
return safepoint_state_.fetch_and(~mask, std::memory_order_release);
|
2019-10-21 16:26:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
static bool IsBlockedForSafepoint(uword state) {
|
2016-02-01 18:57:34 +00:00
|
|
|
return BlockedForSafepointField::decode(state);
|
|
|
|
}
|
|
|
|
bool IsBlockedForSafepoint() const {
|
|
|
|
return BlockedForSafepointField::decode(safepoint_state_);
|
|
|
|
}
|
|
|
|
void SetBlockedForSafepoint(bool value) {
|
|
|
|
ASSERT(thread_lock()->IsOwnedByCurrentThread());
|
|
|
|
safepoint_state_ =
|
|
|
|
BlockedForSafepointField::update(value, safepoint_state_);
|
|
|
|
}
|
2018-08-21 20:12:23 +00:00
|
|
|
bool BypassSafepoints() const {
|
|
|
|
return BypassSafepointsField::decode(safepoint_state_);
|
|
|
|
}
|
2019-10-21 16:26:39 +00:00
|
|
|
static uword SetBypassSafepoints(bool value, uword state) {
|
2018-08-21 20:12:23 +00:00
|
|
|
return BypassSafepointsField::update(value, state);
|
|
|
|
}
|
2016-02-01 18:57:34 +00:00
|
|
|
|
|
|
|
enum ExecutionState {
|
|
|
|
kThreadInVM = 0,
|
|
|
|
kThreadInGenerated,
|
|
|
|
kThreadInNative,
|
|
|
|
kThreadInBlockedState
|
|
|
|
};
|
|
|
|
|
|
|
|
ExecutionState execution_state() const {
|
|
|
|
return static_cast<ExecutionState>(execution_state_);
|
2020-03-23 21:19:36 +00:00
|
|
|
}
|
|
|
|
// Normally execution state is only accessed for the current thread.
|
|
|
|
NO_SANITIZE_THREAD
|
|
|
|
ExecutionState execution_state_cross_thread_for_testing() const {
|
|
|
|
return static_cast<ExecutionState>(execution_state_);
|
2016-02-01 18:57:34 +00:00
|
|
|
}
|
|
|
|
void set_execution_state(ExecutionState state) {
|
2019-04-11 11:59:54 +00:00
|
|
|
execution_state_ = static_cast<uword>(state);
|
|
|
|
}
|
|
|
|
static intptr_t execution_state_offset() {
|
|
|
|
return OFFSET_OF(Thread, execution_state_);
|
2016-02-01 18:57:34 +00:00
|
|
|
}
|
2017-11-09 22:20:02 +00:00
|
|
|
|
2019-01-25 16:45:13 +00:00
|
|
|
virtual bool MayAllocateHandles() {
|
2018-10-31 19:51:52 +00:00
|
|
|
return (execution_state() == kThreadInVM) ||
|
|
|
|
(execution_state() == kThreadInGenerated);
|
|
|
|
}
|
|
|
|
|
2021-05-10 09:13:09 +00:00
|
|
|
static uword full_safepoint_state_unacquired() {
|
|
|
|
return (0 << AtSafepointField::shift()) |
|
|
|
|
(0 << AtDeoptSafepointField::shift());
|
|
|
|
}
|
|
|
|
static uword full_safepoint_state_acquired() {
|
|
|
|
return (1 << AtSafepointField::shift()) |
|
|
|
|
(1 << AtDeoptSafepointField::shift());
|
|
|
|
}
|
2019-04-11 11:59:54 +00:00
|
|
|
|
2017-11-09 22:20:02 +00:00
|
|
|
bool TryEnterSafepoint() {
|
2019-10-21 16:26:39 +00:00
|
|
|
uword old_state = 0;
|
2021-05-10 09:13:09 +00:00
|
|
|
uword new_state = AtSafepointField::encode(true);
|
|
|
|
if (current_safepoint_level() == SafepointLevel::kGCAndDeopt) {
|
|
|
|
new_state |= AtDeoptSafepointField::encode(true);
|
|
|
|
}
|
2019-10-21 16:26:39 +00:00
|
|
|
return safepoint_state_.compare_exchange_strong(old_state, new_state,
|
2020-03-03 16:52:11 +00:00
|
|
|
std::memory_order_release);
|
2016-02-01 18:57:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void EnterSafepoint() {
|
2019-06-19 21:31:31 +00:00
|
|
|
ASSERT(no_safepoint_scope_depth() == 0);
|
2016-02-01 18:57:34 +00:00
|
|
|
// First try a fast update of the thread state to indicate it is at a
|
|
|
|
// safepoint.
|
2017-11-09 22:20:02 +00:00
|
|
|
if (!TryEnterSafepoint()) {
|
2016-02-01 18:57:34 +00:00
|
|
|
// Fast update failed which means we could potentially be in the middle
|
|
|
|
// of a safepoint operation.
|
|
|
|
EnterSafepointUsingLock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-09 22:20:02 +00:00
|
|
|
bool TryExitSafepoint() {
|
2021-05-10 09:13:09 +00:00
|
|
|
uword old_state = AtSafepointField::encode(true);
|
|
|
|
if (current_safepoint_level() == SafepointLevel::kGCAndDeopt) {
|
|
|
|
old_state |= AtDeoptSafepointField::encode(true);
|
|
|
|
}
|
2019-10-21 16:26:39 +00:00
|
|
|
uword new_state = 0;
|
|
|
|
return safepoint_state_.compare_exchange_strong(old_state, new_state,
|
2020-03-03 16:52:11 +00:00
|
|
|
std::memory_order_acquire);
|
2017-11-09 22:20:02 +00:00
|
|
|
}
|
|
|
|
|
2016-02-01 18:57:34 +00:00
|
|
|
void ExitSafepoint() {
|
|
|
|
// First try a fast update of the thread state to indicate it is not at a
|
|
|
|
// safepoint anymore.
|
2017-11-09 22:20:02 +00:00
|
|
|
if (!TryExitSafepoint()) {
|
2016-02-01 18:57:34 +00:00
|
|
|
// Fast update failed which means we could potentially be in the middle
|
|
|
|
// of a safepoint operation.
|
|
|
|
ExitSafepointUsingLock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckForSafepoint() {
|
2021-05-10 09:13:09 +00:00
|
|
|
// If we are in a runtime call that doesn't support lazy deopt, we will only
|
|
|
|
// respond to gc safepointing requests.
|
2019-06-19 21:31:31 +00:00
|
|
|
ASSERT(no_safepoint_scope_depth() == 0);
|
2016-02-01 18:57:34 +00:00
|
|
|
if (IsSafepointRequested()) {
|
|
|
|
BlockForSafepoint();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-18 14:56:27 +00:00
|
|
|
int32_t AllocateFfiCallbackId();
|
|
|
|
|
|
|
|
// Store 'code' for the native callback identified by 'callback_id'.
|
|
|
|
//
|
[vm/ffi] Support passing structs by value
This CL adds passing structs by value in FFI trampolines.
Nested structs and inline arrays are future work.
C defines passing empty structs as undefined behavior, so that is not
supported in this CL.
Suggested review order:
1) commit message
2) ffi/marshaller (decisions for what is done in IL and what in MC)
3) frontend/kernel_to_il (IL construction)
4) backend/il (MC generation from IL)
5) rest in VM
Overall architecture is that structs are split up into word-size chunks
in IL when this is possible: 1 definition in IL per chunk, 1 Location in
IL per chunk, and 1 NativeLocation for the backend per chunk.
In some cases it is not possible or less convenient to split into
chunks. In these cases TypedDataBase objects are stored into and loaded
from directly in machine code.
The various cases:
- FFI call arguments which are not passed as pointers: pass individual
chunks to FFI call which already have the right location.
- FFI call arguments which are passed as pointers: Pass in TypedDataBase
to FFI call, allocate space on the stack, and make a copy on the stack
and pass the copies' address to the callee.
- FFI call return value: pass in TypedData to FFI call, and copy result
in machine code.
- FFI callback arguments which are not passed as pointers: IL definition
for each chunk, and populate a new TypedData with those chunks.
- FFI callback arguments which are passed as pointer: IL definition for
the pointer, and copying of contents in IL.
- FFI return value when location is pointer: Copy data to callee result
location in IL.
- FFI return value when location is not a pointer: Copy data in machine
code to the right registers.
Some other notes about the implementation:
- Due to Store/LoadIndexed loading doubles from float arrays, we use
a int32 instead and use the BitCastInstr.
- Linux ia32 uses `ret 4` when returning structs by value. This requires
special casing in the FFI callback trampolines to either use `ret` or
`ret 4` when returning.
- The 1 IL definition, 1 Location, and 1 NativeLocation approach does
not remove the need for special casing PairLocations in the machine
code generation because they are 1 Location belonging to 1 definition.
Because of the amount of corner cases in the calling conventions that
need to be covered, the tests are generated, rather than hand-written.
ABIs tested on CQ: x64 (Linux, MacOS, Windows), ia32 (Linux, Windows),
arm (Android softFP, Linux hardFP), arm64 Android.
ABIs tested locally through Flutter: ia32 Android (emulator), x64 iOS
(simulator), arm64 iOS.
ABIs not tested: arm iOS.
TEST=runtime/bin/ffi_test/ffi_test_functions_generated.cc
TEST=runtime/bin/ffi_test/ffi_test_functions.cc
TEST=tests/{ffi,ffi_2}/function_structs_by_value_generated_test.dart
TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_generated_tes
TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_test.dart
TEST=tests/{ffi,ffi_2}/vmspecific_static_checks_test.dart
Closes https://github.com/dart-lang/sdk/issues/36730.
Change-Id: I474d3a4ee1faadbe767ddadd1b696e24d8dc364c
Cq-Include-Trybots: luci.dart.try:dart-sdk-linux-try,dart-sdk-mac-try,dart-sdk-win-try,vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try,vm-kernel-asan-linux-release-x64-try,vm-kernel-mac-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-nnbd-mac-release-x64-try,vm-kernel-nnbd-win-debug-x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-reload-linux-debug-x64-try,vm-kernel-reload-rollback-linux-debug-x64-try,vm-kernel-win-debug-x64-try,vm-kernel-win-debug-ia32-try,vm-precomp-ffi-qemu-linux-release-arm-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,vm-kernel-msan-linux-release-x64-try,vm-kernel-precomp-msan-linux-release-x64-try,vm-kernel-precomp-android-release-arm_x64-try,analyzer-analysis-server-linux-try
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/140290
Commit-Queue: Daco Harkes <dacoharkes@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-12-14 16:22:48 +00:00
|
|
|
// Expands the callback code array as necessary to accomodate the callback
|
|
|
|
// ID.
|
2019-05-24 02:39:13 +00:00
|
|
|
void SetFfiCallbackCode(int32_t callback_id, const Code& code);
|
|
|
|
|
[vm/ffi] Support passing structs by value
This CL adds passing structs by value in FFI trampolines.
Nested structs and inline arrays are future work.
C defines passing empty structs as undefined behavior, so that is not
supported in this CL.
Suggested review order:
1) commit message
2) ffi/marshaller (decisions for what is done in IL and what in MC)
3) frontend/kernel_to_il (IL construction)
4) backend/il (MC generation from IL)
5) rest in VM
Overall architecture is that structs are split up into word-size chunks
in IL when this is possible: 1 definition in IL per chunk, 1 Location in
IL per chunk, and 1 NativeLocation for the backend per chunk.
In some cases it is not possible or less convenient to split into
chunks. In these cases TypedDataBase objects are stored into and loaded
from directly in machine code.
The various cases:
- FFI call arguments which are not passed as pointers: pass individual
chunks to FFI call which already have the right location.
- FFI call arguments which are passed as pointers: Pass in TypedDataBase
to FFI call, allocate space on the stack, and make a copy on the stack
and pass the copies' address to the callee.
- FFI call return value: pass in TypedData to FFI call, and copy result
in machine code.
- FFI callback arguments which are not passed as pointers: IL definition
for each chunk, and populate a new TypedData with those chunks.
- FFI callback arguments which are passed as pointer: IL definition for
the pointer, and copying of contents in IL.
- FFI return value when location is pointer: Copy data to callee result
location in IL.
- FFI return value when location is not a pointer: Copy data in machine
code to the right registers.
Some other notes about the implementation:
- Due to Store/LoadIndexed loading doubles from float arrays, we use
a int32 instead and use the BitCastInstr.
- Linux ia32 uses `ret 4` when returning structs by value. This requires
special casing in the FFI callback trampolines to either use `ret` or
`ret 4` when returning.
- The 1 IL definition, 1 Location, and 1 NativeLocation approach does
not remove the need for special casing PairLocations in the machine
code generation because they are 1 Location belonging to 1 definition.
Because of the amount of corner cases in the calling conventions that
need to be covered, the tests are generated, rather than hand-written.
ABIs tested on CQ: x64 (Linux, MacOS, Windows), ia32 (Linux, Windows),
arm (Android softFP, Linux hardFP), arm64 Android.
ABIs tested locally through Flutter: ia32 Android (emulator), x64 iOS
(simulator), arm64 iOS.
ABIs not tested: arm iOS.
TEST=runtime/bin/ffi_test/ffi_test_functions_generated.cc
TEST=runtime/bin/ffi_test/ffi_test_functions.cc
TEST=tests/{ffi,ffi_2}/function_structs_by_value_generated_test.dart
TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_generated_tes
TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_test.dart
TEST=tests/{ffi,ffi_2}/vmspecific_static_checks_test.dart
Closes https://github.com/dart-lang/sdk/issues/36730.
Change-Id: I474d3a4ee1faadbe767ddadd1b696e24d8dc364c
Cq-Include-Trybots: luci.dart.try:dart-sdk-linux-try,dart-sdk-mac-try,dart-sdk-win-try,vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try,vm-kernel-asan-linux-release-x64-try,vm-kernel-mac-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-nnbd-mac-release-x64-try,vm-kernel-nnbd-win-debug-x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-reload-linux-debug-x64-try,vm-kernel-reload-rollback-linux-debug-x64-try,vm-kernel-win-debug-x64-try,vm-kernel-win-debug-ia32-try,vm-precomp-ffi-qemu-linux-release-arm-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,vm-kernel-msan-linux-release-x64-try,vm-kernel-precomp-msan-linux-release-x64-try,vm-kernel-precomp-android-release-arm_x64-try,analyzer-analysis-server-linux-try
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/140290
Commit-Queue: Daco Harkes <dacoharkes@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-12-14 16:22:48 +00:00
|
|
|
// Store 'stack_return' for the native callback identified by 'callback_id'.
|
|
|
|
//
|
|
|
|
// Expands the callback stack return array as necessary to accomodate the
|
|
|
|
// callback ID.
|
|
|
|
void SetFfiCallbackStackReturn(int32_t callback_id,
|
|
|
|
intptr_t stack_return_delta);
|
|
|
|
|
2019-08-21 13:33:37 +00:00
|
|
|
// Ensure that 'callback_id' refers to a valid callback in this isolate.
|
|
|
|
//
|
|
|
|
// If "entry != 0", additionally checks that entry is inside the instructions
|
|
|
|
// of this callback.
|
|
|
|
//
|
|
|
|
// Aborts if any of these conditions fails.
|
2019-05-24 02:39:13 +00:00
|
|
|
void VerifyCallbackIsolate(int32_t callback_id, uword entry);
|
|
|
|
|
2016-02-01 18:57:34 +00:00
|
|
|
Thread* next() const { return next_; }
|
2015-10-17 00:02:43 +00:00
|
|
|
|
2015-11-25 19:07:22 +00:00
|
|
|
// Visit all object pointers.
|
2018-06-04 14:46:26 +00:00
|
|
|
void VisitObjectPointers(ObjectPointerVisitor* visitor,
|
|
|
|
ValidationPolicy validate_frames);
|
2020-03-11 13:05:59 +00:00
|
|
|
void RememberLiveTemporaries();
|
|
|
|
void DeferredMarkLiveTemporaries();
|
2015-11-25 19:07:22 +00:00
|
|
|
|
2017-01-20 23:50:15 +00:00
|
|
|
bool IsValidHandle(Dart_Handle object) const;
|
2015-11-25 19:07:22 +00:00
|
|
|
bool IsValidLocalHandle(Dart_Handle object) const;
|
2017-01-03 20:21:16 +00:00
|
|
|
intptr_t CountLocalHandles() const;
|
2015-11-25 19:07:22 +00:00
|
|
|
int ZoneSizeInBytes() const;
|
|
|
|
void UnwindScopes(uword stack_marker);
|
|
|
|
|
2015-11-19 21:45:10 +00:00
|
|
|
void InitVMConstants();
|
2015-10-26 18:07:16 +00:00
|
|
|
|
2021-02-12 21:15:35 +00:00
|
|
|
Random* random() { return &thread_random_; }
|
2019-04-05 10:15:44 +00:00
|
|
|
|
2019-05-09 11:46:03 +00:00
|
|
|
uint64_t* GetFfiMarshalledArguments(intptr_t size) {
|
|
|
|
if (ffi_marshalled_arguments_size_ < size) {
|
|
|
|
if (ffi_marshalled_arguments_size_ > 0) {
|
|
|
|
free(ffi_marshalled_arguments_);
|
|
|
|
}
|
|
|
|
ffi_marshalled_arguments_ =
|
|
|
|
reinterpret_cast<uint64_t*>(malloc(size * sizeof(uint64_t)));
|
|
|
|
}
|
|
|
|
return ffi_marshalled_arguments_;
|
|
|
|
}
|
|
|
|
|
2016-12-12 17:56:49 +00:00
|
|
|
#ifndef PRODUCT
|
|
|
|
void PrintJSON(JSONStream* stream) const;
|
|
|
|
#endif
|
|
|
|
|
2021-03-03 09:31:53 +00:00
|
|
|
PendingDeopts& pending_deopts() { return pending_deopts_; }
|
|
|
|
|
2021-05-10 09:13:09 +00:00
|
|
|
SafepointLevel current_safepoint_level() const {
|
|
|
|
return runtime_call_deopt_ability_ ==
|
|
|
|
RuntimeCallDeoptAbility::kCannotLazyDeopt
|
|
|
|
? SafepointLevel::kGC
|
|
|
|
: SafepointLevel::kGCAndDeopt;
|
|
|
|
}
|
|
|
|
|
2015-01-22 14:14:16 +00:00
|
|
|
private:
|
2016-11-08 21:54:47 +00:00
|
|
|
template <class T>
|
|
|
|
T* AllocateReusableHandle();
|
2015-10-09 17:10:34 +00:00
|
|
|
|
2020-03-11 13:05:59 +00:00
|
|
|
enum class RestoreWriteBarrierInvariantOp {
|
|
|
|
kAddToRememberedSet,
|
|
|
|
kAddToDeferredMarkingStack
|
|
|
|
};
|
|
|
|
friend class RestoreWriteBarrierInvariantVisitor;
|
|
|
|
void RestoreWriteBarrierInvariant(RestoreWriteBarrierInvariantOp op);
|
|
|
|
|
2018-09-03 16:01:24 +00:00
|
|
|
// Set the current compiler state and return the previous compiler state.
|
|
|
|
CompilerState* SetCompilerState(CompilerState* state) {
|
|
|
|
CompilerState* previous = compiler_state_;
|
|
|
|
compiler_state_ = state;
|
|
|
|
return previous;
|
|
|
|
}
|
|
|
|
|
2017-02-07 19:26:54 +00:00
|
|
|
// Accessed from generated code.
|
|
|
|
// ** This block of fields must come first! **
|
|
|
|
// For AOT cross-compilation, we rely on these members having the same offsets
|
|
|
|
// in SIMARM(IA32) and ARM, and the same offsets in SIMARM64(X64) and ARM64.
|
|
|
|
// We use only word-sized fields to avoid differences in struct packing on the
|
|
|
|
// different architectures. See also CheckOffsets in dart.cc.
|
2020-02-27 18:01:13 +00:00
|
|
|
RelaxedAtomic<uword> stack_limit_;
|
2018-09-11 21:14:25 +00:00
|
|
|
uword write_barrier_mask_;
|
2021-02-18 17:47:37 +00:00
|
|
|
uword heap_base_;
|
2015-03-25 22:41:33 +00:00
|
|
|
Isolate* isolate_;
|
[vm] Reland two dispatch table related changes as a single change.
These changes were originally submitted separately on different days,
and a major performance regression was seen after the first change
when creating snapshots that led to both being reverted. However,
that performance regression should be addressed by the followup.
First change:
"[vm] Treat the dispatch table as a root in the snapshot.
Additional changes:
* Only serialize a dispatch table in precompiled snapshots.
* Add information in v8 snapshot profiles for the dispatch table.
* Fix a typo in a field name.
* Print the number of Instructions objects (or payloads, for
precompiled bare instructions mode) in the fake cluster for
the data section.
* Fix v8 snapshots profiles so objects in memory mapped segments
and only those are prefixed with "(RO) ".
* Add names for Instructions objects in v8 snapshot profiles
when we can use the assembly namer.
* Add command line flag for old #define'd false flag."
Second change:
"[vm/aot] Keep GC-visible references to dispatch table Code entries.
This change splits dispatch table handling into four distinct
parts:
* The dispatch table generator does not make a dispatch table
directly, but rather creates an Array that contains the Code
objects for dispatch table entries.
* The precompiler takes this Array and puts it in the object
store, which makes it a new GC root.
* The serializer takes this information and serializes the
dispatch table information in the same form as before.
* The deserializer creates a DispatchTable object and populates
it using the serialized information.
The change in the precompiler ensures that the Code objects
used in the dispatch table have GC-visible references. Thus,
even if all other references to them from the other GC roots
were removed, they would be accessible in the serializer in
the case of a GC pass between the precompiler and serializer.
This change also means that the serializer can retrieve and
trace the Code objects directly rather than first looking up
the Code objects by their entry point."
Bug: https://github.com/dart-lang/sdk/issues/41022
Change-Id: I52c83b0536fc588da0bef9aed1f0c72e8ee4663f
Cq-Include-Trybots: luci.dart.try:vm-kernel-precomp-linux-release-x64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm_x64-try,vm-kernel-precomp-android-release-arm64-try,vm-kernel-precomp-android-release-arm_x64-try,vm-kernel-precomp-mac-release-simarm64-try,vm-kernel-precomp-win-release-x64-try
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/139285
Commit-Queue: Teagan Strickland <sstrickl@google.com>
Reviewed-by: Alexander Aprelev <aam@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-03-13 17:19:52 +00:00
|
|
|
const uword* dispatch_table_array_;
|
2020-04-21 12:35:52 +00:00
|
|
|
uword top_ = 0;
|
|
|
|
uword end_ = 0;
|
2020-02-03 11:04:15 +00:00
|
|
|
// Offsets up to this point can all fit in a byte on X64. All of the above
|
|
|
|
// fields are very abundantly accessed from code. Thus, keeping them first
|
|
|
|
// is important for code size (although code size on X64 is not a priority).
|
|
|
|
uword saved_stack_limit_;
|
|
|
|
uword stack_overflow_flags_;
|
2020-04-25 05:21:27 +00:00
|
|
|
InstancePtr* field_table_values_;
|
2020-02-03 11:04:15 +00:00
|
|
|
Heap* heap_;
|
2019-04-09 20:57:47 +00:00
|
|
|
uword volatile top_exit_frame_info_;
|
2016-03-17 19:57:36 +00:00
|
|
|
StoreBufferBlock* store_buffer_block_;
|
2018-09-21 21:30:33 +00:00
|
|
|
MarkingStackBlock* marking_stack_block_;
|
2018-12-14 02:02:54 +00:00
|
|
|
MarkingStackBlock* deferred_marking_stack_block_;
|
2019-04-09 20:57:47 +00:00
|
|
|
uword volatile vm_tag_;
|
2018-06-22 15:43:28 +00:00
|
|
|
// Memory location dedicated for passing unboxed int64 values from
|
|
|
|
// generated code to runtime.
|
|
|
|
// TODO(dartbug.com/33549): Clean this up when unboxed values
|
|
|
|
// could be passed as arguments.
|
2018-09-11 21:14:25 +00:00
|
|
|
ALIGN8 int64_t unboxed_int64_runtime_arg_;
|
2018-06-22 15:43:28 +00:00
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
// State that is cached in the TLS for fast access in generated code.
|
2016-03-17 19:57:36 +00:00
|
|
|
#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value) \
|
|
|
|
type_name member_name;
|
2016-11-08 21:54:47 +00:00
|
|
|
CACHED_CONSTANTS_LIST(DECLARE_MEMBERS)
|
2016-03-17 19:57:36 +00:00
|
|
|
#undef DECLARE_MEMBERS
|
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
#define DECLARE_MEMBERS(name) uword name##_entry_point_;
|
|
|
|
RUNTIME_ENTRY_LIST(DECLARE_MEMBERS)
|
2016-03-17 19:57:36 +00:00
|
|
|
#undef DECLARE_MEMBERS
|
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
#define DECLARE_MEMBERS(returntype, name, ...) uword name##_entry_point_;
|
|
|
|
LEAF_RUNTIME_ENTRY_LIST(DECLARE_MEMBERS)
|
2016-03-17 19:57:36 +00:00
|
|
|
#undef DECLARE_MEMBERS
|
|
|
|
|
2018-07-27 18:51:20 +00:00
|
|
|
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) || \
|
|
|
|
defined(TARGET_ARCH_X64)
|
2018-09-11 21:14:25 +00:00
|
|
|
uword write_barrier_wrappers_entry_points_[kNumberOfDartAvailableCpuRegs];
|
2018-07-27 18:51:20 +00:00
|
|
|
#endif
|
2018-10-25 18:08:18 +00:00
|
|
|
|
|
|
|
// JumpToExceptionHandler state:
|
2020-04-25 05:21:27 +00:00
|
|
|
ObjectPtr active_exception_;
|
|
|
|
ObjectPtr active_stacktrace_;
|
|
|
|
ObjectPoolPtr global_object_pool_;
|
2018-10-25 18:08:18 +00:00
|
|
|
uword resume_pc_;
|
2019-10-02 05:33:29 +00:00
|
|
|
uword saved_shadow_call_stack_ = 0;
|
2019-04-11 11:59:54 +00:00
|
|
|
uword execution_state_;
|
2019-10-21 16:26:39 +00:00
|
|
|
std::atomic<uword> safepoint_state_;
|
2020-04-25 05:21:27 +00:00
|
|
|
GrowableObjectArrayPtr ffi_callback_code_;
|
[vm/ffi] Support passing structs by value
This CL adds passing structs by value in FFI trampolines.
Nested structs and inline arrays are future work.
C defines passing empty structs as undefined behavior, so that is not
supported in this CL.
Suggested review order:
1) commit message
2) ffi/marshaller (decisions for what is done in IL and what in MC)
3) frontend/kernel_to_il (IL construction)
4) backend/il (MC generation from IL)
5) rest in VM
Overall architecture is that structs are split up into word-size chunks
in IL when this is possible: 1 definition in IL per chunk, 1 Location in
IL per chunk, and 1 NativeLocation for the backend per chunk.
In some cases it is not possible or less convenient to split into
chunks. In these cases TypedDataBase objects are stored into and loaded
from directly in machine code.
The various cases:
- FFI call arguments which are not passed as pointers: pass individual
chunks to FFI call which already have the right location.
- FFI call arguments which are passed as pointers: Pass in TypedDataBase
to FFI call, allocate space on the stack, and make a copy on the stack
and pass the copies' address to the callee.
- FFI call return value: pass in TypedData to FFI call, and copy result
in machine code.
- FFI callback arguments which are not passed as pointers: IL definition
for each chunk, and populate a new TypedData with those chunks.
- FFI callback arguments which are passed as pointer: IL definition for
the pointer, and copying of contents in IL.
- FFI return value when location is pointer: Copy data to callee result
location in IL.
- FFI return value when location is not a pointer: Copy data in machine
code to the right registers.
Some other notes about the implementation:
- Due to Store/LoadIndexed loading doubles from float arrays, we use
a int32 instead and use the BitCastInstr.
- Linux ia32 uses `ret 4` when returning structs by value. This requires
special casing in the FFI callback trampolines to either use `ret` or
`ret 4` when returning.
- The 1 IL definition, 1 Location, and 1 NativeLocation approach does
not remove the need for special casing PairLocations in the machine
code generation because they are 1 Location belonging to 1 definition.
Because of the amount of corner cases in the calling conventions that
need to be covered, the tests are generated, rather than hand-written.
ABIs tested on CQ: x64 (Linux, MacOS, Windows), ia32 (Linux, Windows),
arm (Android softFP, Linux hardFP), arm64 Android.
ABIs tested locally through Flutter: ia32 Android (emulator), x64 iOS
(simulator), arm64 iOS.
ABIs not tested: arm iOS.
TEST=runtime/bin/ffi_test/ffi_test_functions_generated.cc
TEST=runtime/bin/ffi_test/ffi_test_functions.cc
TEST=tests/{ffi,ffi_2}/function_structs_by_value_generated_test.dart
TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_generated_tes
TEST=tests/{ffi,ffi_2}/function_callbacks_structs_by_value_test.dart
TEST=tests/{ffi,ffi_2}/vmspecific_static_checks_test.dart
Closes https://github.com/dart-lang/sdk/issues/36730.
Change-Id: I474d3a4ee1faadbe767ddadd1b696e24d8dc364c
Cq-Include-Trybots: luci.dart.try:dart-sdk-linux-try,dart-sdk-mac-try,dart-sdk-win-try,vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try,vm-kernel-asan-linux-release-x64-try,vm-kernel-mac-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-nnbd-mac-release-x64-try,vm-kernel-nnbd-win-debug-x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-linux-debug-simarm_x64-try,vm-kernel-precomp-nnbd-linux-debug-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-reload-linux-debug-x64-try,vm-kernel-reload-rollback-linux-debug-x64-try,vm-kernel-win-debug-x64-try,vm-kernel-win-debug-ia32-try,vm-precomp-ffi-qemu-linux-release-arm-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,vm-kernel-msan-linux-release-x64-try,vm-kernel-precomp-msan-linux-release-x64-try,vm-kernel-precomp-android-release-arm_x64-try,analyzer-analysis-server-linux-try
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/140290
Commit-Queue: Daco Harkes <dacoharkes@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
2020-12-14 16:22:48 +00:00
|
|
|
TypedDataPtr ffi_callback_stack_return_;
|
[vm/ffi] Convert Objects to Dart_Handles in FFI calls
This includes support for calling Dart_PropagateError in native code
when doing FFI calls, and catching uncaught exceptions with Dart_IsError
when doing FFI callbacks.
The support for Dart_PropagateError adds a catch entry to the FFI
trampoline, which prevents inlining these trampolines in AOT. This
regresses the FfiCall benchmarks by 1-2% in AOT.
In addition, Dart_PropagateError requires maintaining a bit whether we
entered native/VM code from generated code through FFI or not. That way
we can do the proper transition on the exception path. When entering
generated code, we store this bit on the stack, right after the entry
frame.
Design: http://go/dart-ffi-handles
Issue: https://github.com/dart-lang/sdk/issues/36858
Issue: https://github.com/dart-lang/sdk/issues/41319
Change-Id: Idfd7ff69132fb29cc730931a4113d914d4437396
Cq-Include-Trybots: luci.dart.try:vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try,app-kernel-linux-debug-x64-try,vm-kernel-linux-debug-ia32-try,vm-kernel-win-debug-x64-try,vm-kernel-win-debug-ia32-try,vm-kernel-precomp-linux-debug-x64-try,vm-dartkb-linux-release-x64-abi-try,vm-kernel-precomp-android-release-arm64-try,vm-kernel-asan-linux-release-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-precomp-android-release-arm_x64-try,vm-kernel-precomp-obfuscate-linux-release-x64-try,dart-sdk-linux-try,analyzer-analysis-server-linux-try,analyzer-linux-release-try,front-end-linux-release-x64-try,vm-kernel-precomp-win-release-x64-try,vm-kernel-mac-debug-x64-try,vm-precomp-ffi-qemu-linux-release-arm-try,vm-kernel-nnbd-linux-debug-x64-try,analyzer-nnbd-linux-release-try,front-end-nnbd-linux-release-x64-try
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/145591
Commit-Queue: Daco Harkes <dacoharkes@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
Reviewed-by: Alexander Markov <alexmarkov@google.com>
2020-06-12 11:14:22 +00:00
|
|
|
uword exit_through_ffi_ = 0;
|
|
|
|
ApiLocalScope* api_top_scope_;
|
2018-10-25 18:08:18 +00:00
|
|
|
|
|
|
|
// ---- End accessed from generated code. ----
|
|
|
|
|
|
|
|
// The layout of Thread object up to this point should not depend
|
|
|
|
// on DART_PRECOMPILED_RUNTIME, as it is accessed from generated code.
|
|
|
|
// The code is generated without DART_PRECOMPILED_RUNTIME, but used with
|
|
|
|
// DART_PRECOMPILED_RUNTIME.
|
2018-07-27 18:51:20 +00:00
|
|
|
|
2018-09-21 21:30:33 +00:00
|
|
|
TaskKind task_kind_;
|
2016-05-16 17:58:21 +00:00
|
|
|
TimelineStream* dart_stream_;
|
2019-07-23 10:58:11 +00:00
|
|
|
IsolateGroup* isolate_group_ = nullptr;
|
2019-05-13 21:17:51 +00:00
|
|
|
mutable Monitor thread_lock_;
|
2015-11-25 19:07:22 +00:00
|
|
|
ApiLocalScope* api_reusable_scope_;
|
2015-11-19 21:45:10 +00:00
|
|
|
int32_t no_callback_scope_depth_;
|
2021-03-02 14:11:42 +00:00
|
|
|
intptr_t no_reload_scope_depth_ = 0;
|
2021-04-03 03:36:32 +00:00
|
|
|
intptr_t stopped_mutators_scope_depth_ = 0;
|
2015-11-19 21:45:10 +00:00
|
|
|
#if defined(DEBUG)
|
|
|
|
int32_t no_safepoint_scope_depth_;
|
|
|
|
#endif
|
|
|
|
VMHandles reusable_handles_;
|
2016-05-17 19:19:06 +00:00
|
|
|
intptr_t defer_oob_messages_count_;
|
2016-03-17 19:57:36 +00:00
|
|
|
uint16_t deferred_interrupts_mask_;
|
|
|
|
uint16_t deferred_interrupts_;
|
|
|
|
int32_t stack_overflow_count_;
|
2021-03-09 11:27:00 +00:00
|
|
|
uint32_t runtime_call_count_ = 0;
|
2015-11-19 21:45:10 +00:00
|
|
|
|
2021-03-03 09:31:53 +00:00
|
|
|
// Deoptimization of stack frames.
|
2021-05-10 08:58:29 +00:00
|
|
|
RuntimeCallDeoptAbility runtime_call_deopt_ability_ =
|
|
|
|
RuntimeCallDeoptAbility::kCanLazyDeopt;
|
2021-03-03 09:31:53 +00:00
|
|
|
PendingDeopts pending_deopts_;
|
|
|
|
|
2015-11-19 21:45:10 +00:00
|
|
|
// Compiler state:
|
2018-09-03 16:01:24 +00:00
|
|
|
CompilerState* compiler_state_ = nullptr;
|
2018-01-25 07:13:42 +00:00
|
|
|
HierarchyInfo* hierarchy_info_;
|
Reland "[VM] Introduction of type testing stubs - Part 1-4"
Relands 165c583d57af613836cf7d08242ce969521db00b
[VM] Introduction of type testing stubs - Part 1
This CL:
* Adds a field to [RawAbstractType] which will always hold a pointer
to the entrypoint of a type testing stub
* Makes this new field be initialized to a default stub whenever a
instances are created (e.g. via Type::New(), snapshot reader, ...)
* Makes the clustered snapshotter write a reference to the
corresponding [RawInstructions] object when writing the field and do
the reverse when reading it.
* Makes us call the type testing stub for performing assert-assignable
checks.
To reduce unnecessary loads on callsites, we store the entrypoint of the
type testing stubs directly in the type objects. This means that the
caller of type testing stubs can simply branch there without populating
a code object first. This also means that the type testing stubs
themselves have no access to a pool and we therefore also don't hold on
to the [Code] object, only the [Instruction] object is necessary.
The type testing stubs do not setup a frame themselves and also have no
safepoint. In the case when the type testing stubs could not determine
a positive answer they will tail-call a general-purpose stub.
The general-purpose stub sets up a stub frame, tries to consult a
[SubtypeTestCache] and bails out to runtime if this was unsuccessful.
This CL is just the the first, for ease of reviewing. The actual
type-specialized type testing stubs will be generated in later CLs.
Reviewed-on: https://dart-review.googlesource.com/44787
Relands f226c22424c483d65499545e560efc059f9dde1c
[VM] Introduction of type testing stubs - Part 2
This CL starts building type testing stubs specialzed for [Type] objects
we test against.
More specifically, it adds support for:
* Handling obvious fast cases on the call sites (while still having a
call to stub for negative case)
* Handling type tests against type parameters, by loading the value
of the type parameter on the call sites and invoking it's type testing stub.
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subtype-checks.
==> e.g. String/List<dynamic>
* Specialzed type testing stubs for instantiated types where we can
do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the type arguments.
==> e.g. Widget<State>, where we know [Widget] is only extended and not
implemented.
* Specialzed type testing stubs for certain non-instantiated types where we
can do [CidRange]-based subclass-checks for the class and
[CidRange]-based subtype-checks for the instantiated type arguments and
cid based comparisons for type parameters. (Note that this fast-case migth
result in some false-negatives!)
==> e.g. _HashMapEntry<K, V>, where we know [_HashMapEntry] is only
extended and not implemented.
This optimizes cases where the caller uses `new HashMap<A, B>()` and only
uses `A` and `B` as key/values (and not subclasses of it). The false-negative
can occur when subtypes of A or B are used. In such cases we fall back to the
[SubtypeTestCache]-based imlementation.
Reviewed-on: https://dart-review.googlesource.com/44788
Relands 25f98bcc7561006d70a487ba3de55551658ac683
[VM] Introduction of type testing stubs - Part 3
The changes include:
* Make AssertAssignableInstr no longer have a call-summary, which
helps methods with several parameter checks by not having to
re-load/re-initialize type arguments registers
* Lazily create SubtypeTestCaches: We already go to runtime to warm up
the caches, so we now also create the caches on the first runtime
call and patch the pool entries.
* No longer load the destination name into a register: We only need
the name when we throw an exception, so it is not on the hot path.
Instead we let the runtime look at the call site, decoding a pool
index from the instructions stream. The destination name will be
available in the pool, at a consecutive index to the subtype cache.
* Remove the fall-through to N=1 case for probing subtypeing tests,
since those will always be handled by the optimized stubs.
* Do not generate optimized stubs for FutureOr<T> (so far it just
falled-through to TTS). We can make optimzed version of that later,
but it requires special subtyping rules.
* Local code quality improvement in the type-testing-stubs: Avoid
extra jump at last case of cid-class-range checks.
There are still a number of optimization opportunities we can do in
future changes.
Reviewed-on: https://dart-review.googlesource.com/46984
Relands 2c52480ec87392992a1388517c46ccc97bdc9b2b
[VM] Introduction of type testing stubs - Part 4
In order to avoid generating type testing stubs for too many types in
the system - and thereby potentially cause an increase in code size -
this change introduces a smarter way to decide for which types we should
generate optimized type testing stubs.
The precompiler creates a [TypeUsageInfo] which we use to collect
information. More specifically:
a) We collect the destination types for all type checks we emit
(we do this inside AssertAssignableInstr::EmitNativeCode).
-> These are types we might want to generate optimized type testing
stubs for.
b) We collect type argument vectors used in instance creations (we do
this inside AllocateObjectInstr::EmitNativeCode) and keep a set of
of used type argument vectors for each class.
After the precompiler has finished compiling normal code we scan the set
of destination types collected in a) for uninstantiated types (or more
specifically, type parameter types).
We then propagate the type argument vectors used on object allocation sites,
which were collected in b), in order to find out what kind of types are flowing
into those type parameters.
This allows us to extend the set of types which we test against, by
adding the types that flow into type parameters.
We use this final augmented set of destination types as a "filter" when
making the decision whether to generate an optimized type testing stub
for a given type.
Reviewed-on: https://dart-review.googlesource.com/48640
Issue https://github.com/dart-lang/sdk/issues/32603
Closes https://github.com/dart-lang/sdk/issues/32852
Change-Id: Ib79fbe7f043aa88f32bddad62d7656c638914b44
Reviewed-on: https://dart-review.googlesource.com/50944
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
2018-04-13 09:06:56 +00:00
|
|
|
TypeUsageInfo* type_usage_info_;
|
2020-04-25 05:21:27 +00:00
|
|
|
GrowableObjectArrayPtr pending_functions_;
|
2015-11-19 21:45:10 +00:00
|
|
|
|
2021-05-11 11:15:53 +00:00
|
|
|
CompilerTimings* compiler_timings_ = nullptr;
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
ErrorPtr sticky_error_;
|
2016-02-05 23:46:55 +00:00
|
|
|
|
2019-04-05 10:15:44 +00:00
|
|
|
Random thread_random_;
|
|
|
|
|
2019-05-09 11:46:03 +00:00
|
|
|
intptr_t ffi_marshalled_arguments_size_ = 0;
|
|
|
|
uint64_t* ffi_marshalled_arguments_;
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
InstancePtr* field_table_values() const { return field_table_values_; }
|
2020-01-17 18:12:24 +00:00
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
// Reusable handles support.
|
|
|
|
#define REUSABLE_HANDLE_FIELDS(object) object* object##_handle_;
|
2015-10-09 17:10:34 +00:00
|
|
|
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_FIELDS)
|
|
|
|
#undef REUSABLE_HANDLE_FIELDS
|
|
|
|
|
|
|
|
#if defined(DEBUG)
|
|
|
|
#define REUSABLE_HANDLE_SCOPE_VARIABLE(object) \
|
|
|
|
bool reusable_##object##_handle_scope_active_;
|
|
|
|
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_VARIABLE);
|
|
|
|
#undef REUSABLE_HANDLE_SCOPE_VARIABLE
|
|
|
|
#endif // defined(DEBUG)
|
|
|
|
|
2019-10-21 16:26:39 +00:00
|
|
|
class AtSafepointField : public BitField<uword, bool, 0, 1> {};
|
2021-05-10 09:13:09 +00:00
|
|
|
class SafepointRequestedField
|
|
|
|
: public BitField<uword, bool, AtSafepointField::kNextBit, 1> {};
|
|
|
|
class AtDeoptSafepointField
|
|
|
|
: public BitField<uword, bool, SafepointRequestedField::kNextBit, 1> {};
|
|
|
|
class DeoptSafepointRequestedField
|
|
|
|
: public BitField<uword, bool, AtDeoptSafepointField::kNextBit, 1> {};
|
|
|
|
class BlockedForSafepointField
|
|
|
|
: public BitField<uword,
|
|
|
|
bool,
|
|
|
|
DeoptSafepointRequestedField::kNextBit,
|
|
|
|
1> {};
|
|
|
|
class BypassSafepointsField
|
|
|
|
: public BitField<uword, bool, BlockedForSafepointField::kNextBit, 1> {};
|
|
|
|
|
|
|
|
static uword AtSafepointBits(SafepointLevel level) {
|
|
|
|
switch (level) {
|
|
|
|
case SafepointLevel::kGC:
|
|
|
|
return AtSafepointField::mask_in_place();
|
|
|
|
case SafepointLevel::kGCAndDeopt:
|
|
|
|
return AtSafepointField::mask_in_place() |
|
|
|
|
AtDeoptSafepointField::mask_in_place();
|
|
|
|
case SafepointLevel::kNumLevels:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
2016-02-01 18:57:34 +00:00
|
|
|
|
2017-11-22 19:27:54 +00:00
|
|
|
#if defined(USING_SAFE_STACK)
|
|
|
|
uword saved_safestack_limit_;
|
|
|
|
#endif
|
|
|
|
|
2015-11-19 21:45:10 +00:00
|
|
|
Thread* next_; // Used to chain the thread structures in an isolate.
|
2020-02-20 21:08:35 +00:00
|
|
|
bool is_mutator_thread_ = false;
|
2015-10-13 16:49:20 +00:00
|
|
|
|
2020-12-17 00:22:13 +00:00
|
|
|
#if defined(DEBUG)
|
|
|
|
bool inside_compiler_ = false;
|
|
|
|
#endif
|
|
|
|
|
2019-07-11 13:36:02 +00:00
|
|
|
explicit Thread(bool is_vm_isolate);
|
2015-07-09 18:22:26 +00:00
|
|
|
|
2015-09-17 19:21:55 +00:00
|
|
|
void StoreBufferRelease(
|
|
|
|
StoreBuffer::ThresholdPolicy policy = StoreBuffer::kCheckThreshold);
|
2015-08-18 14:23:17 +00:00
|
|
|
void StoreBufferAcquire();
|
|
|
|
|
2018-09-21 21:30:33 +00:00
|
|
|
void MarkingStackRelease();
|
|
|
|
void MarkingStackAcquire();
|
2018-12-14 02:02:54 +00:00
|
|
|
void DeferredMarkingStackRelease();
|
|
|
|
void DeferredMarkingStackAcquire();
|
2018-09-21 21:30:33 +00:00
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
void set_safepoint_state(uint32_t value) { safepoint_state_ = value; }
|
2016-02-01 18:57:34 +00:00
|
|
|
void EnterSafepointUsingLock();
|
|
|
|
void ExitSafepointUsingLock();
|
|
|
|
|
2020-02-11 20:17:44 +00:00
|
|
|
void FinishEntering(TaskKind kind);
|
|
|
|
void PrepareLeaving();
|
|
|
|
|
2018-07-11 23:52:43 +00:00
|
|
|
static void SetCurrent(Thread* current) { OSThread::SetCurrentTLS(current); }
|
2015-07-09 18:22:26 +00:00
|
|
|
|
2016-03-17 19:57:36 +00:00
|
|
|
void DeferOOBMessageInterrupts();
|
|
|
|
void RestoreOOBMessageInterrupts();
|
|
|
|
|
2015-10-09 17:10:34 +00:00
|
|
|
#define REUSABLE_FRIEND_DECLARATION(name) \
|
|
|
|
friend class Reusable##name##HandleScope;
|
2016-11-08 21:54:47 +00:00
|
|
|
REUSABLE_HANDLE_LIST(REUSABLE_FRIEND_DECLARATION)
|
2015-10-09 17:10:34 +00:00
|
|
|
#undef REUSABLE_FRIEND_DECLARATION
|
|
|
|
|
2015-07-21 16:37:23 +00:00
|
|
|
friend class ApiZone;
|
[vm/concurrency] Final support for hot-reload of multi-isolate groups
This is the initial implementation of hot reload with multi-isolate
groups.
Implementation:
As before, when a service API call triggers a reload it will be routed
as an OOB message to a specific isolate (**). As opposed to before, that
isolate has now to coordinate with all other isolates, ensuring that it
"owns" the reload and all other isolates are waiting in a state that
allows reload.
This is implemented as a [ReloadOperationScope] which first participates
in other reloads (if there are any) and then owns the reload. It will
send a new kind of service message to all other registered isolates. All
of them have to check in before reload can proceed. If a new isolate
is about to join the group, it will participate when registering the
isolate. If an old isolate wants to die, it will participate when
unregistering the isolate.
This means that in addition to the existing StackOverFlow checks that
can process OOB messages and therefore reload, we'll have isolate
registration and unregistration as well as a new
Isolate::kCheckForReload OOB message handler where an isolate can
participate in a reload.
We consider the isolate group to be reloadable if the main isolate has
loaded the program and set the root library. Helper isolates don't need
to load any more kernel code and only initialize core libraries, so it's
fine to reload them during this time.
(**) The reason we continue to send reload service API calls to any
isolate in an isolate group is that re-loading might involve calling out
to the embedder's tag handler. Doing so currently requires an active
isolate.
If we allowed a subset of dart_api.h (the subset needed by the tag
handler) to be used only with an active IsolateGroup instead of an
active Isolate we could remove this requirement.
Edge cases:
There's various edge cases to consider: The main edge case is, we currently
maintain an upper limit to the number of isolates executing in parallel
(to ensure each can have big enough chunk of new space, i.e. TLAB).
If there are more isolates with active work they are waiting until one
of the exiting ones "yields". To ensure progress, if any such actively
running isolate gets a request to participate in a reload, it will mark
its own thread as "blocked" and therefore "yields", so another isolate
can make progress until all isolates are participating and the reload
can start.
Marking an isolate as "blocked" happens by exiting that isolate. It will
free up it's TLAB, decrease active mutator count and (if running on VM's
thread pool) also temporarily increase the thread pool size.
The side-effect of this is that it will use one pthread per isolate
during reload. In the future we can extend this first implementation, by
specially handling isolates that don't have a message handler running.
Doing so would require careful consideration to avoid races.
Testing:
In order to test this we use a small helper framework for reload tests.
The helper framework will, similar to real world reload e.g. in flutter,
will spawn a subprocess. It will use the service API to trigger reloads
in this subproces.
To synchronize between the reload driver and the application being
reloaded it allows watching for events to be printed to stdout/stderr.
The reload test itself can be written - similar to multitests - with
annotations such as `// @include-in-relload-0` in them. The testing
framework will then generate multiple application versions that all get
compiled to kernel.
For simplicity we generate the kernel using the standalone VM with
`--snapshot-kind=kernel` and avoid using the incremental compiler.
There are 4 different tests exercising different aspects of
multi-isolate reload:
vm/dart_2/isolates/reload_active_stack_test:
Performs a reload while a fixed number of isolates have an active
stack, thereby ensuring e.g. that all frames of all isolate mutator
stacks get deoptimized, ...
vm/dart_2/isolates/reload_no_active_stack_test:
Similar to the test above, but instead of having an active stack the
isolates can yield to the event loop, possibly be even descheduled
vm/dart_2/isolates/reload_many_isolates_test:
Similar to the test above, but this test uses many more isolates.
vm/dart_2/isolates/reload_many_isolates_live_and_die_test:
Performs a reload where isolates get spawned and die all the time.
There are always P isolates alive at any given point in time, each
of them spawns children when their parent has died.
Performing a reload catches isolates as various stages of their
lifecycle and can therefore cover a lot of corner cases.
TEST=vm/dart_2/isolates/reload_*_test.dart
Issue https://github.com/dart-lang/sdk/issues/36097
Change-Id: I97039b4084de040b7f2e22f5832a40d57ba398d5
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/187461
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Alexander Aprelev <aam@google.com>
2021-03-02 18:57:02 +00:00
|
|
|
friend class DisabledNoActiveIsolateScope;
|
2016-03-17 19:57:36 +00:00
|
|
|
friend class InterruptChecker;
|
2015-07-09 18:22:26 +00:00
|
|
|
friend class Isolate;
|
2019-07-23 10:58:11 +00:00
|
|
|
friend class IsolateGroup;
|
2016-03-17 19:57:36 +00:00
|
|
|
friend class IsolateTestHelper;
|
[vm/concurrency] Final support for hot-reload of multi-isolate groups
This is the initial implementation of hot reload with multi-isolate
groups.
Implementation:
As before, when a service API call triggers a reload it will be routed
as an OOB message to a specific isolate (**). As opposed to before, that
isolate has now to coordinate with all other isolates, ensuring that it
"owns" the reload and all other isolates are waiting in a state that
allows reload.
This is implemented as a [ReloadOperationScope] which first participates
in other reloads (if there are any) and then owns the reload. It will
send a new kind of service message to all other registered isolates. All
of them have to check in before reload can proceed. If a new isolate
is about to join the group, it will participate when registering the
isolate. If an old isolate wants to die, it will participate when
unregistering the isolate.
This means that in addition to the existing StackOverFlow checks that
can process OOB messages and therefore reload, we'll have isolate
registration and unregistration as well as a new
Isolate::kCheckForReload OOB message handler where an isolate can
participate in a reload.
We consider the isolate group to be reloadable if the main isolate has
loaded the program and set the root library. Helper isolates don't need
to load any more kernel code and only initialize core libraries, so it's
fine to reload them during this time.
(**) The reason we continue to send reload service API calls to any
isolate in an isolate group is that re-loading might involve calling out
to the embedder's tag handler. Doing so currently requires an active
isolate.
If we allowed a subset of dart_api.h (the subset needed by the tag
handler) to be used only with an active IsolateGroup instead of an
active Isolate we could remove this requirement.
Edge cases:
There's various edge cases to consider: The main edge case is, we currently
maintain an upper limit to the number of isolates executing in parallel
(to ensure each can have big enough chunk of new space, i.e. TLAB).
If there are more isolates with active work they are waiting until one
of the exiting ones "yields". To ensure progress, if any such actively
running isolate gets a request to participate in a reload, it will mark
its own thread as "blocked" and therefore "yields", so another isolate
can make progress until all isolates are participating and the reload
can start.
Marking an isolate as "blocked" happens by exiting that isolate. It will
free up it's TLAB, decrease active mutator count and (if running on VM's
thread pool) also temporarily increase the thread pool size.
The side-effect of this is that it will use one pthread per isolate
during reload. In the future we can extend this first implementation, by
specially handling isolates that don't have a message handler running.
Doing so would require careful consideration to avoid races.
Testing:
In order to test this we use a small helper framework for reload tests.
The helper framework will, similar to real world reload e.g. in flutter,
will spawn a subprocess. It will use the service API to trigger reloads
in this subproces.
To synchronize between the reload driver and the application being
reloaded it allows watching for events to be printed to stdout/stderr.
The reload test itself can be written - similar to multitests - with
annotations such as `// @include-in-relload-0` in them. The testing
framework will then generate multiple application versions that all get
compiled to kernel.
For simplicity we generate the kernel using the standalone VM with
`--snapshot-kind=kernel` and avoid using the incremental compiler.
There are 4 different tests exercising different aspects of
multi-isolate reload:
vm/dart_2/isolates/reload_active_stack_test:
Performs a reload while a fixed number of isolates have an active
stack, thereby ensuring e.g. that all frames of all isolate mutator
stacks get deoptimized, ...
vm/dart_2/isolates/reload_no_active_stack_test:
Similar to the test above, but instead of having an active stack the
isolates can yield to the event loop, possibly be even descheduled
vm/dart_2/isolates/reload_many_isolates_test:
Similar to the test above, but this test uses many more isolates.
vm/dart_2/isolates/reload_many_isolates_live_and_die_test:
Performs a reload where isolates get spawned and die all the time.
There are always P isolates alive at any given point in time, each
of them spawns children when their parent has died.
Performing a reload catches isolates as various stages of their
lifecycle and can therefore cover a lot of corner cases.
TEST=vm/dart_2/isolates/reload_*_test.dart
Issue https://github.com/dart-lang/sdk/issues/36097
Change-Id: I97039b4084de040b7f2e22f5832a40d57ba398d5
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/187461
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Alexander Aprelev <aam@google.com>
2021-03-02 18:57:02 +00:00
|
|
|
friend class NoActiveIsolateScope;
|
2016-03-17 19:57:36 +00:00
|
|
|
friend class NoOOBMessageScope;
|
2021-03-02 14:11:42 +00:00
|
|
|
friend class NoReloadScope;
|
2015-10-20 18:20:22 +00:00
|
|
|
friend class Simulator;
|
2015-07-09 18:22:26 +00:00
|
|
|
friend class StackZone;
|
2021-04-03 03:36:32 +00:00
|
|
|
friend class StoppedMutatorsScope;
|
2015-08-11 16:41:06 +00:00
|
|
|
friend class ThreadRegistry;
|
2018-09-03 16:01:24 +00:00
|
|
|
friend class CompilerState;
|
2019-08-21 13:33:37 +00:00
|
|
|
friend class compiler::target::Thread;
|
2020-01-17 18:12:24 +00:00
|
|
|
friend class FieldTable;
|
2021-05-10 08:58:29 +00:00
|
|
|
friend class RuntimeCallDeoptScope;
|
2021-05-10 09:13:09 +00:00
|
|
|
friend class
|
|
|
|
TransitionGeneratedToVM; // IsSafepointRequested/BlockForSafepoint
|
|
|
|
friend class
|
|
|
|
TransitionVMToGenerated; // IsSafepointRequested/BlockForSafepoint
|
|
|
|
friend class MonitorLocker; // ExitSafepointUsingLock
|
2020-02-28 15:43:51 +00:00
|
|
|
friend Isolate* CreateWithinExistingIsolateGroup(IsolateGroup*,
|
|
|
|
const char*,
|
|
|
|
char**);
|
2015-01-22 14:14:16 +00:00
|
|
|
DISALLOW_COPY_AND_ASSIGN(Thread);
|
|
|
|
};
|
|
|
|
|
2021-05-10 08:58:29 +00:00
|
|
|
class RuntimeCallDeoptScope : public StackResource {
|
|
|
|
public:
|
|
|
|
RuntimeCallDeoptScope(Thread* thread, RuntimeCallDeoptAbility kind)
|
|
|
|
: StackResource(thread) {
|
|
|
|
// We cannot have nested calls into the VM without deopt support.
|
|
|
|
ASSERT(thread->runtime_call_deopt_ability_ ==
|
|
|
|
RuntimeCallDeoptAbility::kCanLazyDeopt);
|
|
|
|
thread->runtime_call_deopt_ability_ = kind;
|
|
|
|
}
|
|
|
|
virtual ~RuntimeCallDeoptScope() {
|
|
|
|
thread()->runtime_call_deopt_ability_ =
|
|
|
|
RuntimeCallDeoptAbility::kCanLazyDeopt;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Thread* thread() {
|
|
|
|
return reinterpret_cast<Thread*>(StackResource::thread());
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-03-15 20:11:05 +00:00
|
|
|
#if defined(HOST_OS_WINDOWS)
|
2015-10-14 14:59:33 +00:00
|
|
|
// Clears the state of the current thread and frees the allocation.
|
|
|
|
void WindowsThreadCleanUp();
|
|
|
|
#endif
|
|
|
|
|
2015-11-04 15:59:16 +00:00
|
|
|
// Disable thread interrupts.
|
|
|
|
class DisableThreadInterruptsScope : public StackResource {
|
|
|
|
public:
|
|
|
|
explicit DisableThreadInterruptsScope(Thread* thread);
|
|
|
|
~DisableThreadInterruptsScope();
|
|
|
|
};
|
|
|
|
|
2019-01-11 20:47:10 +00:00
|
|
|
// Within a NoSafepointScope, the thread must not reach any safepoint. Used
|
|
|
|
// around code that manipulates raw object pointers directly without handles.
|
|
|
|
#if defined(DEBUG)
|
|
|
|
class NoSafepointScope : public ThreadStackResource {
|
|
|
|
public:
|
|
|
|
explicit NoSafepointScope(Thread* thread = nullptr)
|
|
|
|
: ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
|
|
|
|
this->thread()->IncrementNoSafepointScopeDepth();
|
|
|
|
}
|
|
|
|
~NoSafepointScope() { thread()->DecrementNoSafepointScopeDepth(); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(NoSafepointScope);
|
|
|
|
};
|
|
|
|
#else // defined(DEBUG)
|
|
|
|
class NoSafepointScope : public ValueObject {
|
|
|
|
public:
|
|
|
|
explicit NoSafepointScope(Thread* thread = nullptr) {}
|
|
|
|
|
|
|
|
private:
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(NoSafepointScope);
|
|
|
|
};
|
|
|
|
#endif // defined(DEBUG)
|
|
|
|
|
2021-03-02 14:11:42 +00:00
|
|
|
class NoReloadScope : public ThreadStackResource {
|
|
|
|
public:
|
2021-04-10 00:03:23 +00:00
|
|
|
explicit NoReloadScope(Thread* thread) : ThreadStackResource(thread) {
|
2021-03-02 14:11:42 +00:00
|
|
|
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
thread->no_reload_scope_depth_++;
|
|
|
|
ASSERT(thread->no_reload_scope_depth_ >= 0);
|
|
|
|
#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
}
|
|
|
|
|
|
|
|
~NoReloadScope() {
|
|
|
|
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
|
2021-04-10 00:03:23 +00:00
|
|
|
thread()->no_reload_scope_depth_ -= 1;
|
|
|
|
ASSERT(thread()->no_reload_scope_depth_ >= 0);
|
2021-03-02 14:11:42 +00:00
|
|
|
#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(NoReloadScope);
|
|
|
|
};
|
|
|
|
|
2021-04-03 03:36:32 +00:00
|
|
|
class StoppedMutatorsScope : public ThreadStackResource {
|
|
|
|
public:
|
2021-04-10 00:03:23 +00:00
|
|
|
explicit StoppedMutatorsScope(Thread* thread) : ThreadStackResource(thread) {
|
2021-04-03 03:36:32 +00:00
|
|
|
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
thread->stopped_mutators_scope_depth_++;
|
|
|
|
ASSERT(thread->stopped_mutators_scope_depth_ >= 0);
|
|
|
|
#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
}
|
|
|
|
|
|
|
|
~StoppedMutatorsScope() {
|
|
|
|
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
|
2021-04-10 00:03:23 +00:00
|
|
|
thread()->stopped_mutators_scope_depth_ -= 1;
|
|
|
|
ASSERT(thread()->stopped_mutators_scope_depth_ >= 0);
|
2021-04-03 03:36:32 +00:00
|
|
|
#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(StoppedMutatorsScope);
|
|
|
|
};
|
|
|
|
|
2020-12-17 00:22:13 +00:00
|
|
|
// Within a EnterCompilerScope, the thread must operate on cloned fields.
|
|
|
|
#if defined(DEBUG)
|
|
|
|
class EnterCompilerScope : public ThreadStackResource {
|
|
|
|
public:
|
|
|
|
explicit EnterCompilerScope(Thread* thread = nullptr)
|
|
|
|
: ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
|
|
|
|
previously_is_inside_compiler_ = this->thread()->IsInsideCompiler();
|
|
|
|
if (!previously_is_inside_compiler_) {
|
|
|
|
this->thread()->EnterCompiler();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
~EnterCompilerScope() {
|
|
|
|
if (!previously_is_inside_compiler_) {
|
|
|
|
thread()->LeaveCompiler();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
bool previously_is_inside_compiler_;
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(EnterCompilerScope);
|
|
|
|
};
|
|
|
|
#else // defined(DEBUG)
|
|
|
|
class EnterCompilerScope : public ValueObject {
|
|
|
|
public:
|
|
|
|
explicit EnterCompilerScope(Thread* thread = nullptr) {}
|
|
|
|
|
|
|
|
private:
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(EnterCompilerScope);
|
|
|
|
};
|
|
|
|
#endif // defined(DEBUG)
|
|
|
|
|
|
|
|
// Within a LeaveCompilerScope, the thread must operate on cloned fields.
|
|
|
|
#if defined(DEBUG)
|
|
|
|
class LeaveCompilerScope : public ThreadStackResource {
|
|
|
|
public:
|
|
|
|
explicit LeaveCompilerScope(Thread* thread = nullptr)
|
|
|
|
: ThreadStackResource(thread != nullptr ? thread : Thread::Current()) {
|
|
|
|
previously_is_inside_compiler_ = this->thread()->IsInsideCompiler();
|
|
|
|
if (previously_is_inside_compiler_) {
|
|
|
|
this->thread()->LeaveCompiler();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
~LeaveCompilerScope() {
|
|
|
|
if (previously_is_inside_compiler_) {
|
|
|
|
thread()->EnterCompiler();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
bool previously_is_inside_compiler_;
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(LeaveCompilerScope);
|
|
|
|
};
|
|
|
|
#else // defined(DEBUG)
|
|
|
|
class LeaveCompilerScope : public ValueObject {
|
|
|
|
public:
|
|
|
|
explicit LeaveCompilerScope(Thread* thread = nullptr) {}
|
|
|
|
|
|
|
|
private:
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(LeaveCompilerScope);
|
|
|
|
};
|
|
|
|
#endif // defined(DEBUG)
|
|
|
|
|
2015-01-22 14:14:16 +00:00
|
|
|
} // namespace dart
|
|
|
|
|
2016-10-26 07:26:03 +00:00
|
|
|
#endif // RUNTIME_VM_THREAD_H_
|