2015-01-22 14:14:16 +00:00
|
|
|
// Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
|
|
|
|
// for details. All rights reserved. Use of this source code is governed by a
|
|
|
|
// BSD-style license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
#ifndef VM_THREAD_H_
|
|
|
|
#define VM_THREAD_H_
|
|
|
|
|
2015-03-25 22:41:33 +00:00
|
|
|
#include "vm/globals.h"
|
2015-10-09 17:10:34 +00:00
|
|
|
#include "vm/handles.h"
|
2015-03-25 22:41:33 +00:00
|
|
|
#include "vm/os_thread.h"
|
2015-06-09 16:33:36 +00:00
|
|
|
#include "vm/store_buffer.h"
|
2015-09-02 21:58:26 +00:00
|
|
|
#include "vm/runtime_entry_list.h"
|
2015-01-22 14:14:16 +00:00
|
|
|
|
|
|
|
namespace dart {
|
|
|
|
|
2015-10-09 17:10:34 +00:00
|
|
|
class AbstractType;
|
|
|
|
class Array;
|
2015-03-25 22:41:33 +00:00
|
|
|
class CHA;
|
2015-10-09 17:10:34 +00:00
|
|
|
class Class;
|
|
|
|
class Code;
|
|
|
|
class Error;
|
|
|
|
class ExceptionHandlers;
|
|
|
|
class Field;
|
|
|
|
class Function;
|
|
|
|
class GrowableObjectArray;
|
2015-07-17 02:17:30 +00:00
|
|
|
class HandleScope;
|
2015-08-03 14:26:23 +00:00
|
|
|
class Heap;
|
2015-10-09 17:10:34 +00:00
|
|
|
class Instance;
|
2015-03-25 22:41:33 +00:00
|
|
|
class Isolate;
|
2015-10-09 17:10:34 +00:00
|
|
|
class Library;
|
2015-09-09 22:30:38 +00:00
|
|
|
class Log;
|
2015-08-17 20:29:17 +00:00
|
|
|
class LongJumpScope;
|
2015-07-09 18:22:26 +00:00
|
|
|
class Object;
|
2015-10-09 17:10:34 +00:00
|
|
|
class PcDescriptors;
|
2015-07-08 11:37:47 +00:00
|
|
|
class RawBool;
|
|
|
|
class RawObject;
|
2015-09-19 11:21:09 +00:00
|
|
|
class RawCode;
|
2015-10-17 00:02:43 +00:00
|
|
|
class RawGrowableObjectArray;
|
2015-09-04 17:32:46 +00:00
|
|
|
class RawString;
|
2015-09-02 21:58:26 +00:00
|
|
|
class RuntimeEntry;
|
2015-07-09 18:22:26 +00:00
|
|
|
class StackResource;
|
2015-10-09 17:10:34 +00:00
|
|
|
class String;
|
2015-08-07 17:55:13 +00:00
|
|
|
class TimelineEventBlock;
|
2015-10-09 17:10:34 +00:00
|
|
|
class TypeArguments;
|
|
|
|
class TypeParameter;
|
2015-07-09 18:22:26 +00:00
|
|
|
class Zone;
|
2015-07-08 11:37:47 +00:00
|
|
|
|
2015-10-09 17:10:34 +00:00
|
|
|
#define REUSABLE_HANDLE_LIST(V) \
|
|
|
|
V(AbstractType) \
|
|
|
|
V(Array) \
|
|
|
|
V(Class) \
|
|
|
|
V(Code) \
|
|
|
|
V(Error) \
|
|
|
|
V(ExceptionHandlers) \
|
|
|
|
V(Field) \
|
|
|
|
V(Function) \
|
|
|
|
V(GrowableObjectArray) \
|
|
|
|
V(Instance) \
|
|
|
|
V(Library) \
|
|
|
|
V(Object) \
|
|
|
|
V(PcDescriptors) \
|
|
|
|
V(String) \
|
|
|
|
V(TypeArguments) \
|
|
|
|
V(TypeParameter) \
|
|
|
|
|
|
|
|
|
2015-07-08 11:37:47 +00:00
|
|
|
// List of VM-global objects/addresses cached in each Thread object.
|
|
|
|
#define CACHED_VM_OBJECTS_LIST(V) \
|
|
|
|
V(RawObject*, object_null_, Object::null(), NULL) \
|
|
|
|
V(RawBool*, bool_true_, Object::bool_true().raw(), NULL) \
|
|
|
|
V(RawBool*, bool_false_, Object::bool_false().raw(), NULL) \
|
2015-09-19 11:21:09 +00:00
|
|
|
V(RawCode*, update_store_buffer_code_, \
|
|
|
|
StubCode::UpdateStoreBuffer_entry()->code(), NULL) \
|
|
|
|
V(RawCode*, fix_callers_target_code_, \
|
|
|
|
StubCode::FixCallersTarget_entry()->code(), NULL) \
|
|
|
|
V(RawCode*, fix_allocation_stub_code_, \
|
|
|
|
StubCode::FixAllocationStubTarget_entry()->code(), NULL) \
|
|
|
|
V(RawCode*, invoke_dart_code_stub_, \
|
|
|
|
StubCode::InvokeDartCode_entry()->code(), NULL) \
|
|
|
|
|
2015-07-08 11:37:47 +00:00
|
|
|
|
|
|
|
#define CACHED_ADDRESSES_LIST(V) \
|
|
|
|
V(uword, update_store_buffer_entry_point_, \
|
2015-09-02 21:58:26 +00:00
|
|
|
StubCode::UpdateStoreBuffer_entry()->EntryPoint(), 0) \
|
2015-09-20 20:48:17 +00:00
|
|
|
V(uword, native_call_wrapper_entry_point_, \
|
|
|
|
NativeEntry::NativeCallWrapperEntry(), 0) \
|
2015-09-04 17:32:46 +00:00
|
|
|
V(RawString**, predefined_symbols_address_, \
|
|
|
|
Symbols::PredefinedAddress(), NULL) \
|
2015-07-08 11:37:47 +00:00
|
|
|
|
|
|
|
#define CACHED_CONSTANTS_LIST(V) \
|
|
|
|
CACHED_VM_OBJECTS_LIST(V) \
|
2015-09-02 21:58:26 +00:00
|
|
|
CACHED_ADDRESSES_LIST(V) \
|
2015-07-08 11:37:47 +00:00
|
|
|
|
2015-08-20 21:38:38 +00:00
|
|
|
struct InterruptedThreadState {
|
|
|
|
ThreadId tid;
|
|
|
|
uintptr_t pc;
|
|
|
|
uintptr_t csp;
|
|
|
|
uintptr_t dsp;
|
|
|
|
uintptr_t fp;
|
|
|
|
uintptr_t lr;
|
|
|
|
};
|
|
|
|
|
|
|
|
// When a thread is interrupted the thread specific interrupt callback will be
|
|
|
|
// invoked. Each callback is given an InterruptedThreadState and the user data
|
|
|
|
// pointer. When inside a thread interrupt callback doing any of the following
|
|
|
|
// is forbidden:
|
|
|
|
// * Accessing TLS -- Because on Windows the callback will be running in a
|
|
|
|
// different thread.
|
|
|
|
// * Allocating memory -- Because this takes locks which may already be held,
|
|
|
|
// resulting in a dead lock.
|
|
|
|
// * Taking a lock -- See above.
|
|
|
|
typedef void (*ThreadInterruptCallback)(const InterruptedThreadState& state,
|
|
|
|
void* data);
|
2015-03-25 22:41:33 +00:00
|
|
|
|
2015-01-22 14:14:16 +00:00
|
|
|
// A VM thread; may be executing Dart code or performing helper tasks like
|
2015-04-01 17:48:11 +00:00
|
|
|
// garbage collection or compilation. The Thread structure associated with
|
2015-05-15 17:30:33 +00:00
|
|
|
// a thread is allocated by EnsureInit before entering an isolate, and destroyed
|
|
|
|
// automatically when the underlying OS thread exits. NOTE: On Windows, CleanUp
|
|
|
|
// must currently be called manually (issue 23474).
|
2015-01-22 14:14:16 +00:00
|
|
|
class Thread {
|
|
|
|
public:
|
2015-04-01 17:48:11 +00:00
|
|
|
// The currently executing thread, or NULL if not yet initialized.
|
2015-01-22 14:14:16 +00:00
|
|
|
static Thread* Current() {
|
2015-03-27 18:11:51 +00:00
|
|
|
return reinterpret_cast<Thread*>(OSThread::GetThreadLocal(thread_key_));
|
2015-03-17 19:24:26 +00:00
|
|
|
}
|
2015-04-01 17:48:11 +00:00
|
|
|
|
2015-05-15 17:30:33 +00:00
|
|
|
// Initializes the current thread as a VM thread, if not already done.
|
|
|
|
static void EnsureInit();
|
|
|
|
|
|
|
|
// Makes the current thread enter 'isolate'.
|
2015-04-01 17:48:11 +00:00
|
|
|
static void EnterIsolate(Isolate* isolate);
|
2015-05-15 12:48:49 +00:00
|
|
|
// Makes the current thread exit its isolate.
|
2015-04-01 17:48:11 +00:00
|
|
|
static void ExitIsolate();
|
|
|
|
|
2015-05-15 12:48:49 +00:00
|
|
|
// A VM thread other than the main mutator thread can enter an isolate as a
|
|
|
|
// "helper" to gain limited concurrent access to the isolate. One example is
|
|
|
|
// SweeperTask (which uses the class table, which is copy-on-write).
|
|
|
|
// TODO(koda): Properly synchronize heap access to expand allowed operations.
|
2015-08-28 17:00:05 +00:00
|
|
|
static void EnterIsolateAsHelper(Isolate* isolate,
|
|
|
|
bool bypass_safepoint = false);
|
|
|
|
static void ExitIsolateAsHelper(bool bypass_safepoint = false);
|
2015-05-15 12:48:49 +00:00
|
|
|
|
2015-06-09 16:33:36 +00:00
|
|
|
// Called when the current thread transitions from mutator to collector.
|
|
|
|
// Empties the store buffer block into the isolate.
|
|
|
|
// TODO(koda): Always run GC in separate thread.
|
|
|
|
static void PrepareForGC();
|
|
|
|
|
2015-04-01 17:48:11 +00:00
|
|
|
// Called at VM startup.
|
2015-07-08 11:37:47 +00:00
|
|
|
static void InitOnceBeforeIsolate();
|
|
|
|
static void InitOnceAfterObjectAndStubCode();
|
2015-01-22 14:14:16 +00:00
|
|
|
|
2015-10-13 16:49:20 +00:00
|
|
|
// Called at VM shutdown
|
|
|
|
static void Shutdown();
|
2015-07-14 00:49:49 +00:00
|
|
|
~Thread();
|
|
|
|
|
2015-01-22 14:14:16 +00:00
|
|
|
// The topmost zone used for allocation in this thread.
|
2015-07-09 18:22:26 +00:00
|
|
|
Zone* zone() const { return state_.zone; }
|
2015-01-22 14:14:16 +00:00
|
|
|
|
2015-03-25 22:41:33 +00:00
|
|
|
// The isolate that this thread is operating on, or NULL if none.
|
|
|
|
Isolate* isolate() const { return isolate_; }
|
2015-05-26 20:46:12 +00:00
|
|
|
static intptr_t isolate_offset() {
|
|
|
|
return OFFSET_OF(Thread, isolate_);
|
|
|
|
}
|
2015-02-09 18:54:20 +00:00
|
|
|
|
2015-10-13 21:29:43 +00:00
|
|
|
// The (topmost) CHA for the compilation in this thread.
|
2015-04-13 20:59:51 +00:00
|
|
|
CHA* cha() const;
|
|
|
|
void set_cha(CHA* value);
|
2015-03-18 00:29:26 +00:00
|
|
|
|
2015-10-13 21:29:43 +00:00
|
|
|
int32_t no_callback_scope_depth() const {
|
|
|
|
return no_callback_scope_depth_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void IncrementNoCallbackScopeDepth() {
|
|
|
|
ASSERT(no_callback_scope_depth_ < INT_MAX);
|
|
|
|
no_callback_scope_depth_ += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DecrementNoCallbackScopeDepth() {
|
|
|
|
ASSERT(no_callback_scope_depth_ > 0);
|
|
|
|
no_callback_scope_depth_ -= 1;
|
|
|
|
}
|
|
|
|
|
2015-06-09 16:33:36 +00:00
|
|
|
void StoreBufferAddObject(RawObject* obj);
|
|
|
|
void StoreBufferAddObjectGC(RawObject* obj);
|
|
|
|
#if defined(TESTING)
|
|
|
|
bool StoreBufferContains(RawObject* obj) const {
|
|
|
|
return store_buffer_block_->Contains(obj);
|
|
|
|
}
|
|
|
|
#endif
|
2015-09-17 19:21:55 +00:00
|
|
|
void StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy);
|
2015-06-09 16:33:36 +00:00
|
|
|
static intptr_t store_buffer_block_offset() {
|
|
|
|
return OFFSET_OF(Thread, store_buffer_block_);
|
|
|
|
}
|
|
|
|
|
2015-07-09 18:22:26 +00:00
|
|
|
uword top_exit_frame_info() const { return state_.top_exit_frame_info; }
|
|
|
|
static intptr_t top_exit_frame_info_offset() {
|
|
|
|
return OFFSET_OF(Thread, state_) + OFFSET_OF(State, top_exit_frame_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
StackResource* top_resource() const { return state_.top_resource; }
|
|
|
|
void set_top_resource(StackResource* value) {
|
|
|
|
state_.top_resource = value;
|
|
|
|
}
|
|
|
|
static intptr_t top_resource_offset() {
|
|
|
|
return OFFSET_OF(Thread, state_) + OFFSET_OF(State, top_resource);
|
|
|
|
}
|
|
|
|
|
2015-08-03 14:26:23 +00:00
|
|
|
static intptr_t heap_offset() {
|
|
|
|
return OFFSET_OF(Thread, heap_);
|
|
|
|
}
|
|
|
|
|
2015-07-17 02:17:30 +00:00
|
|
|
int32_t no_handle_scope_depth() const {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
return state_.no_handle_scope_depth;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void IncrementNoHandleScopeDepth() {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
ASSERT(state_.no_handle_scope_depth < INT_MAX);
|
|
|
|
state_.no_handle_scope_depth += 1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void DecrementNoHandleScopeDepth() {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
ASSERT(state_.no_handle_scope_depth > 0);
|
|
|
|
state_.no_handle_scope_depth -= 1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
HandleScope* top_handle_scope() const {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
return state_.top_handle_scope;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_top_handle_scope(HandleScope* handle_scope) {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
state_.top_handle_scope = handle_scope;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2015-07-22 22:09:54 +00:00
|
|
|
int32_t no_safepoint_scope_depth() const {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
return state_.no_safepoint_scope_depth;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void IncrementNoSafepointScopeDepth() {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
ASSERT(state_.no_safepoint_scope_depth < INT_MAX);
|
|
|
|
state_.no_safepoint_scope_depth += 1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void DecrementNoSafepointScopeDepth() {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
ASSERT(state_.no_safepoint_scope_depth > 0);
|
|
|
|
state_.no_safepoint_scope_depth -= 1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2015-07-09 18:22:26 +00:00
|
|
|
// Collection of isolate-specific state of a thread that is saved/restored
|
|
|
|
// on isolate exit/re-entry.
|
|
|
|
struct State {
|
|
|
|
Zone* zone;
|
|
|
|
uword top_exit_frame_info;
|
|
|
|
StackResource* top_resource;
|
2015-08-17 20:29:17 +00:00
|
|
|
LongJumpScope* long_jump_base;
|
2015-07-17 02:17:30 +00:00
|
|
|
#if defined(DEBUG)
|
|
|
|
HandleScope* top_handle_scope;
|
|
|
|
intptr_t no_handle_scope_depth;
|
2015-07-22 22:09:54 +00:00
|
|
|
int32_t no_safepoint_scope_depth;
|
2015-07-17 02:17:30 +00:00
|
|
|
#endif
|
2015-07-09 18:22:26 +00:00
|
|
|
};
|
|
|
|
|
2015-07-08 11:37:47 +00:00
|
|
|
#define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value) \
|
|
|
|
static intptr_t member_name##offset() { \
|
|
|
|
return OFFSET_OF(Thread, member_name); \
|
|
|
|
}
|
|
|
|
CACHED_CONSTANTS_LIST(DEFINE_OFFSET_METHOD)
|
2015-09-02 21:58:26 +00:00
|
|
|
#undef DEFINE_OFFSET_METHOD
|
|
|
|
|
|
|
|
#define DEFINE_OFFSET_METHOD(name) \
|
|
|
|
static intptr_t name##_entry_point_offset() { \
|
|
|
|
return OFFSET_OF(Thread, name##_entry_point_); \
|
|
|
|
}
|
|
|
|
RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
|
|
|
|
#undef DEFINE_OFFSET_METHOD
|
|
|
|
|
|
|
|
#define DEFINE_OFFSET_METHOD(returntype, name, ...) \
|
|
|
|
static intptr_t name##_entry_point_offset() { \
|
|
|
|
return OFFSET_OF(Thread, name##_entry_point_); \
|
|
|
|
}
|
|
|
|
LEAF_RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
|
2015-07-08 11:37:47 +00:00
|
|
|
#undef DEFINE_OFFSET_METHOD
|
|
|
|
|
|
|
|
static bool CanLoadFromThread(const Object& object);
|
|
|
|
static intptr_t OffsetFromThread(const Object& object);
|
2015-09-02 21:58:26 +00:00
|
|
|
static intptr_t OffsetFromThread(const RuntimeEntry* runtime_entry);
|
2015-07-08 11:37:47 +00:00
|
|
|
|
2015-09-24 18:32:21 +00:00
|
|
|
Mutex* timeline_block_lock() {
|
|
|
|
return &timeline_block_lock_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only safe to access when holding |timeline_block_lock_|.
|
2015-08-07 17:55:13 +00:00
|
|
|
TimelineEventBlock* timeline_block() const {
|
2015-10-16 20:36:58 +00:00
|
|
|
return timeline_block_;
|
2015-08-07 17:55:13 +00:00
|
|
|
}
|
|
|
|
|
2015-09-24 18:32:21 +00:00
|
|
|
// Only safe to access when holding |timeline_block_lock_|.
|
2015-08-07 17:55:13 +00:00
|
|
|
void set_timeline_block(TimelineEventBlock* block) {
|
2015-10-16 20:36:58 +00:00
|
|
|
timeline_block_ = block;
|
2015-08-07 17:55:13 +00:00
|
|
|
}
|
|
|
|
|
2015-09-09 22:30:38 +00:00
|
|
|
class Log* log() const;
|
|
|
|
|
2015-10-13 17:08:14 +00:00
|
|
|
static const intptr_t kNoDeoptId = -1;
|
|
|
|
static const intptr_t kDeoptIdStep = 2;
|
|
|
|
static const intptr_t kDeoptIdBeforeOffset = 0;
|
|
|
|
static const intptr_t kDeoptIdAfterOffset = 1;
|
|
|
|
intptr_t deopt_id() const { return deopt_id_; }
|
|
|
|
void set_deopt_id(int value) {
|
|
|
|
ASSERT(value >= 0);
|
|
|
|
deopt_id_ = value;
|
|
|
|
}
|
|
|
|
intptr_t GetNextDeoptId() {
|
|
|
|
ASSERT(deopt_id_ != kNoDeoptId);
|
|
|
|
const intptr_t id = deopt_id_;
|
|
|
|
deopt_id_ += kDeoptIdStep;
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
|
|
|
static intptr_t ToDeoptAfter(intptr_t deopt_id) {
|
|
|
|
ASSERT(IsDeoptBefore(deopt_id));
|
|
|
|
return deopt_id + kDeoptIdAfterOffset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool IsDeoptBefore(intptr_t deopt_id) {
|
|
|
|
return (deopt_id % kDeoptIdStep) == kDeoptIdBeforeOffset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool IsDeoptAfter(intptr_t deopt_id) {
|
|
|
|
return (deopt_id % kDeoptIdStep) == kDeoptIdAfterOffset;
|
|
|
|
}
|
|
|
|
|
2015-08-17 20:29:17 +00:00
|
|
|
LongJumpScope* long_jump_base() const { return state_.long_jump_base; }
|
|
|
|
void set_long_jump_base(LongJumpScope* value) {
|
|
|
|
state_.long_jump_base = value;
|
|
|
|
}
|
|
|
|
|
2015-10-05 19:50:17 +00:00
|
|
|
uword vm_tag() const {
|
|
|
|
return vm_tag_;
|
|
|
|
}
|
|
|
|
void set_vm_tag(uword tag) {
|
|
|
|
vm_tag_ = tag;
|
|
|
|
}
|
|
|
|
static intptr_t vm_tag_offset() {
|
|
|
|
return OFFSET_OF(Thread, vm_tag_);
|
|
|
|
}
|
|
|
|
|
2015-08-20 21:38:38 +00:00
|
|
|
ThreadId id() const {
|
|
|
|
ASSERT(id_ != OSThread::kInvalidThreadId);
|
|
|
|
return id_;
|
|
|
|
}
|
|
|
|
|
2015-10-26 18:07:16 +00:00
|
|
|
ThreadId join_id() const {
|
|
|
|
ASSERT(join_id_ != OSThread::kInvalidThreadJoinId);
|
|
|
|
return join_id_;
|
|
|
|
}
|
|
|
|
|
2015-08-20 21:38:38 +00:00
|
|
|
void SetThreadInterrupter(ThreadInterruptCallback callback, void* data);
|
|
|
|
|
|
|
|
bool IsThreadInterrupterEnabled(ThreadInterruptCallback* callback,
|
|
|
|
void** data) const;
|
|
|
|
|
2015-10-09 17:10:34 +00:00
|
|
|
#if defined(DEBUG)
|
|
|
|
#define REUSABLE_HANDLE_SCOPE_ACCESSORS(object) \
|
|
|
|
void set_reusable_##object##_handle_scope_active(bool value) { \
|
|
|
|
reusable_##object##_handle_scope_active_ = value; \
|
|
|
|
} \
|
|
|
|
bool reusable_##object##_handle_scope_active() const { \
|
|
|
|
return reusable_##object##_handle_scope_active_; \
|
|
|
|
}
|
|
|
|
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_ACCESSORS)
|
|
|
|
#undef REUSABLE_HANDLE_SCOPE_ACCESSORS
|
|
|
|
|
2015-10-09 22:52:09 +00:00
|
|
|
bool IsAnyReusableHandleScopeActive() const {
|
2015-10-09 17:10:34 +00:00
|
|
|
#define IS_REUSABLE_HANDLE_SCOPE_ACTIVE(object) \
|
2015-10-09 22:52:09 +00:00
|
|
|
if (reusable_##object##_handle_scope_active_) return true;
|
|
|
|
REUSABLE_HANDLE_LIST(IS_REUSABLE_HANDLE_SCOPE_ACTIVE)
|
|
|
|
return false;
|
2015-10-09 17:10:34 +00:00
|
|
|
#undef IS_REUSABLE_HANDLE_SCOPE_ACTIVE
|
2015-10-09 22:52:09 +00:00
|
|
|
}
|
2015-10-09 17:10:34 +00:00
|
|
|
#endif // defined(DEBUG)
|
|
|
|
|
2015-10-09 22:52:09 +00:00
|
|
|
void ClearReusableHandles();
|
|
|
|
|
2015-10-09 17:10:34 +00:00
|
|
|
#define REUSABLE_HANDLE(object) \
|
|
|
|
object& object##Handle() const { \
|
|
|
|
return *object##_handle_; \
|
|
|
|
}
|
|
|
|
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE)
|
|
|
|
#undef REUSABLE_HANDLE
|
|
|
|
|
2015-10-17 00:02:43 +00:00
|
|
|
RawGrowableObjectArray* pending_functions();
|
|
|
|
|
2015-10-09 17:10:34 +00:00
|
|
|
void VisitObjectPointers(ObjectPointerVisitor* visitor);
|
|
|
|
|
2015-10-26 18:07:16 +00:00
|
|
|
static bool IsThreadInList(ThreadId join_id);
|
|
|
|
|
2015-01-22 14:14:16 +00:00
|
|
|
private:
|
2015-10-09 17:10:34 +00:00
|
|
|
template<class T> T* AllocateReusableHandle();
|
|
|
|
|
2015-03-27 18:11:51 +00:00
|
|
|
static ThreadLocalKey thread_key_;
|
2015-03-25 22:41:33 +00:00
|
|
|
|
2015-08-20 21:38:38 +00:00
|
|
|
const ThreadId id_;
|
2015-10-26 18:07:16 +00:00
|
|
|
const ThreadId join_id_;
|
2015-08-20 21:38:38 +00:00
|
|
|
ThreadInterruptCallback thread_interrupt_callback_;
|
|
|
|
void* thread_interrupt_data_;
|
2015-03-25 22:41:33 +00:00
|
|
|
Isolate* isolate_;
|
2015-08-03 14:26:23 +00:00
|
|
|
Heap* heap_;
|
2015-07-09 18:22:26 +00:00
|
|
|
State state_;
|
2015-09-24 18:32:21 +00:00
|
|
|
Mutex timeline_block_lock_;
|
2015-10-16 20:36:58 +00:00
|
|
|
TimelineEventBlock* timeline_block_;
|
2015-06-09 16:33:36 +00:00
|
|
|
StoreBufferBlock* store_buffer_block_;
|
2015-09-09 22:30:38 +00:00
|
|
|
class Log* log_;
|
2015-07-08 11:37:47 +00:00
|
|
|
#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value) \
|
|
|
|
type_name member_name;
|
|
|
|
CACHED_CONSTANTS_LIST(DECLARE_MEMBERS)
|
2015-09-02 21:58:26 +00:00
|
|
|
#undef DECLARE_MEMBERS
|
|
|
|
|
|
|
|
#define DECLARE_MEMBERS(name) \
|
|
|
|
uword name##_entry_point_;
|
|
|
|
RUNTIME_ENTRY_LIST(DECLARE_MEMBERS)
|
|
|
|
#undef DECLARE_MEMBERS
|
|
|
|
|
|
|
|
#define DECLARE_MEMBERS(returntype, name, ...) \
|
|
|
|
uword name##_entry_point_;
|
|
|
|
LEAF_RUNTIME_ENTRY_LIST(DECLARE_MEMBERS)
|
2015-07-08 11:37:47 +00:00
|
|
|
#undef DECLARE_MEMBERS
|
|
|
|
|
2015-10-09 17:10:34 +00:00
|
|
|
// Reusable handles support.
|
|
|
|
#define REUSABLE_HANDLE_FIELDS(object) \
|
|
|
|
object* object##_handle_;
|
|
|
|
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_FIELDS)
|
|
|
|
#undef REUSABLE_HANDLE_FIELDS
|
|
|
|
|
|
|
|
#if defined(DEBUG)
|
|
|
|
#define REUSABLE_HANDLE_SCOPE_VARIABLE(object) \
|
|
|
|
bool reusable_##object##_handle_scope_active_;
|
|
|
|
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_VARIABLE);
|
|
|
|
#undef REUSABLE_HANDLE_SCOPE_VARIABLE
|
|
|
|
#endif // defined(DEBUG)
|
|
|
|
|
|
|
|
VMHandles reusable_handles_;
|
|
|
|
|
2015-10-17 00:02:43 +00:00
|
|
|
// Compiler state:
|
2015-10-13 21:29:43 +00:00
|
|
|
CHA* cha_;
|
2015-10-17 00:02:43 +00:00
|
|
|
intptr_t deopt_id_; // Compilation specific counter.
|
|
|
|
uword vm_tag_;
|
|
|
|
RawGrowableObjectArray* pending_functions_;
|
|
|
|
|
2015-10-13 21:29:43 +00:00
|
|
|
int32_t no_callback_scope_depth_;
|
|
|
|
|
2015-10-13 16:49:20 +00:00
|
|
|
// All |Thread|s are registered in the thread list.
|
|
|
|
Thread* thread_list_next_;
|
|
|
|
|
|
|
|
static Thread* thread_list_head_;
|
|
|
|
static Mutex* thread_list_lock_;
|
|
|
|
|
|
|
|
static void AddThreadToList(Thread* thread);
|
|
|
|
static void RemoveThreadFromList(Thread* thread);
|
|
|
|
|
2015-07-08 11:37:47 +00:00
|
|
|
explicit Thread(bool init_vm_constants = true);
|
2015-03-25 22:41:33 +00:00
|
|
|
|
2015-07-08 11:37:47 +00:00
|
|
|
void InitVMConstants();
|
2015-04-01 17:48:11 +00:00
|
|
|
|
2015-10-17 00:02:43 +00:00
|
|
|
void ClearState();
|
2015-07-09 18:22:26 +00:00
|
|
|
|
2015-09-17 19:21:55 +00:00
|
|
|
void StoreBufferRelease(
|
|
|
|
StoreBuffer::ThresholdPolicy policy = StoreBuffer::kCheckThreshold);
|
2015-08-18 14:23:17 +00:00
|
|
|
void StoreBufferAcquire();
|
|
|
|
|
2015-07-09 18:22:26 +00:00
|
|
|
void set_zone(Zone* zone) {
|
|
|
|
state_.zone = zone;
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_top_exit_frame_info(uword top_exit_frame_info) {
|
|
|
|
state_.top_exit_frame_info = top_exit_frame_info;
|
|
|
|
}
|
|
|
|
|
2015-04-01 17:48:11 +00:00
|
|
|
static void SetCurrent(Thread* current);
|
|
|
|
|
2015-08-28 17:00:05 +00:00
|
|
|
void Schedule(Isolate* isolate, bool bypass_safepoint = false);
|
|
|
|
void Unschedule(bool bypass_safepoint = false);
|
2015-07-09 18:22:26 +00:00
|
|
|
|
2015-10-09 17:10:34 +00:00
|
|
|
#define REUSABLE_FRIEND_DECLARATION(name) \
|
|
|
|
friend class Reusable##name##HandleScope;
|
|
|
|
REUSABLE_HANDLE_LIST(REUSABLE_FRIEND_DECLARATION)
|
|
|
|
#undef REUSABLE_FRIEND_DECLARATION
|
|
|
|
|
2015-07-21 16:37:23 +00:00
|
|
|
friend class ApiZone;
|
2015-07-09 18:22:26 +00:00
|
|
|
friend class Isolate;
|
2015-10-20 18:20:22 +00:00
|
|
|
friend class Simulator;
|
2015-07-09 18:22:26 +00:00
|
|
|
friend class StackZone;
|
2015-10-13 16:49:20 +00:00
|
|
|
friend class ThreadIterator;
|
|
|
|
friend class ThreadIteratorTestHelper;
|
2015-08-11 16:41:06 +00:00
|
|
|
friend class ThreadRegistry;
|
2015-10-13 16:49:20 +00:00
|
|
|
|
2015-01-22 14:14:16 +00:00
|
|
|
DISALLOW_COPY_AND_ASSIGN(Thread);
|
|
|
|
};
|
|
|
|
|
2015-10-13 16:49:20 +00:00
|
|
|
|
|
|
|
// Note that this takes the thread list lock, prohibiting threads from coming
|
|
|
|
// on- or off-line.
|
|
|
|
class ThreadIterator : public ValueObject {
|
|
|
|
public:
|
|
|
|
ThreadIterator();
|
|
|
|
~ThreadIterator();
|
|
|
|
|
|
|
|
// Returns false when there are no more threads left.
|
|
|
|
bool HasNext() const;
|
|
|
|
|
|
|
|
// Returns the current thread and moves forward.
|
|
|
|
Thread* Next();
|
|
|
|
|
|
|
|
private:
|
|
|
|
Thread* next_;
|
|
|
|
};
|
|
|
|
|
2015-10-14 14:59:33 +00:00
|
|
|
#if defined(TARGET_OS_WINDOWS)
|
|
|
|
// Clears the state of the current thread and frees the allocation.
|
|
|
|
void WindowsThreadCleanUp();
|
|
|
|
#endif
|
|
|
|
|
2015-01-22 14:14:16 +00:00
|
|
|
} // namespace dart
|
|
|
|
|
|
|
|
#endif // VM_THREAD_H_
|