2015-01-22 14:14:16 +00:00
|
|
|
// Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
|
|
|
|
// for details. All rights reserved. Use of this source code is governed by a
|
|
|
|
// BSD-style license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
#ifndef VM_THREAD_H_
|
|
|
|
#define VM_THREAD_H_
|
|
|
|
|
2015-03-25 22:41:33 +00:00
|
|
|
#include "vm/globals.h"
|
|
|
|
#include "vm/os_thread.h"
|
2015-06-09 16:33:36 +00:00
|
|
|
#include "vm/store_buffer.h"
|
2015-01-22 14:14:16 +00:00
|
|
|
|
|
|
|
namespace dart {
|
|
|
|
|
2015-03-25 22:41:33 +00:00
|
|
|
class CHA;
|
2015-07-17 02:17:30 +00:00
|
|
|
class HandleScope;
|
2015-08-03 14:26:23 +00:00
|
|
|
class Heap;
|
2015-08-07 17:28:56 +00:00
|
|
|
class InterruptableThreadState;
|
2015-03-25 22:41:33 +00:00
|
|
|
class Isolate;
|
2015-07-09 18:22:26 +00:00
|
|
|
class Object;
|
2015-07-08 11:37:47 +00:00
|
|
|
class RawBool;
|
|
|
|
class RawObject;
|
2015-07-09 18:22:26 +00:00
|
|
|
class StackResource;
|
2015-08-07 17:55:13 +00:00
|
|
|
class TimelineEventBlock;
|
2015-07-09 18:22:26 +00:00
|
|
|
class Zone;
|
2015-07-08 11:37:47 +00:00
|
|
|
|
2015-08-03 14:26:23 +00:00
|
|
|
|
2015-07-08 11:37:47 +00:00
|
|
|
// List of VM-global objects/addresses cached in each Thread object.
|
|
|
|
#define CACHED_VM_OBJECTS_LIST(V) \
|
|
|
|
V(RawObject*, object_null_, Object::null(), NULL) \
|
|
|
|
V(RawBool*, bool_true_, Object::bool_true().raw(), NULL) \
|
|
|
|
V(RawBool*, bool_false_, Object::bool_false().raw(), NULL) \
|
|
|
|
|
|
|
|
#define CACHED_ADDRESSES_LIST(V) \
|
|
|
|
V(uword, update_store_buffer_entry_point_, \
|
2015-08-05 08:18:35 +00:00
|
|
|
StubCode::UpdateStoreBuffer_entry()->EntryPoint(), 0)
|
2015-07-08 11:37:47 +00:00
|
|
|
|
|
|
|
#define CACHED_CONSTANTS_LIST(V) \
|
|
|
|
CACHED_VM_OBJECTS_LIST(V) \
|
|
|
|
CACHED_ADDRESSES_LIST(V)
|
|
|
|
|
2015-03-25 22:41:33 +00:00
|
|
|
|
2015-01-22 14:14:16 +00:00
|
|
|
// A VM thread; may be executing Dart code or performing helper tasks like
|
2015-04-01 17:48:11 +00:00
|
|
|
// garbage collection or compilation. The Thread structure associated with
|
2015-05-15 17:30:33 +00:00
|
|
|
// a thread is allocated by EnsureInit before entering an isolate, and destroyed
|
|
|
|
// automatically when the underlying OS thread exits. NOTE: On Windows, CleanUp
|
|
|
|
// must currently be called manually (issue 23474).
|
2015-01-22 14:14:16 +00:00
|
|
|
class Thread {
|
|
|
|
public:
|
2015-04-01 17:48:11 +00:00
|
|
|
// The currently executing thread, or NULL if not yet initialized.
|
2015-01-22 14:14:16 +00:00
|
|
|
static Thread* Current() {
|
2015-03-27 18:11:51 +00:00
|
|
|
return reinterpret_cast<Thread*>(OSThread::GetThreadLocal(thread_key_));
|
2015-03-17 19:24:26 +00:00
|
|
|
}
|
2015-04-01 17:48:11 +00:00
|
|
|
|
2015-05-15 17:30:33 +00:00
|
|
|
// Initializes the current thread as a VM thread, if not already done.
|
|
|
|
static void EnsureInit();
|
|
|
|
|
|
|
|
// Makes the current thread enter 'isolate'.
|
2015-04-01 17:48:11 +00:00
|
|
|
static void EnterIsolate(Isolate* isolate);
|
2015-05-15 12:48:49 +00:00
|
|
|
// Makes the current thread exit its isolate.
|
2015-04-01 17:48:11 +00:00
|
|
|
static void ExitIsolate();
|
|
|
|
|
2015-05-15 12:48:49 +00:00
|
|
|
// A VM thread other than the main mutator thread can enter an isolate as a
|
|
|
|
// "helper" to gain limited concurrent access to the isolate. One example is
|
|
|
|
// SweeperTask (which uses the class table, which is copy-on-write).
|
|
|
|
// TODO(koda): Properly synchronize heap access to expand allowed operations.
|
|
|
|
static void EnterIsolateAsHelper(Isolate* isolate);
|
|
|
|
static void ExitIsolateAsHelper();
|
|
|
|
|
2015-06-09 16:33:36 +00:00
|
|
|
// Called when the current thread transitions from mutator to collector.
|
|
|
|
// Empties the store buffer block into the isolate.
|
|
|
|
// TODO(koda): Always run GC in separate thread.
|
|
|
|
static void PrepareForGC();
|
|
|
|
|
2015-05-15 17:30:33 +00:00
|
|
|
#if defined(TARGET_OS_WINDOWS)
|
|
|
|
// Clears the state of the current thread and frees the allocation.
|
2015-04-01 17:48:11 +00:00
|
|
|
static void CleanUp();
|
2015-05-15 17:30:33 +00:00
|
|
|
#endif
|
2015-04-01 17:48:11 +00:00
|
|
|
|
|
|
|
// Called at VM startup.
|
2015-07-08 11:37:47 +00:00
|
|
|
static void InitOnceBeforeIsolate();
|
|
|
|
static void InitOnceAfterObjectAndStubCode();
|
2015-01-22 14:14:16 +00:00
|
|
|
|
2015-07-14 00:49:49 +00:00
|
|
|
~Thread();
|
|
|
|
|
2015-01-22 14:14:16 +00:00
|
|
|
// The topmost zone used for allocation in this thread.
|
2015-07-09 18:22:26 +00:00
|
|
|
Zone* zone() const { return state_.zone; }
|
2015-01-22 14:14:16 +00:00
|
|
|
|
2015-03-25 22:41:33 +00:00
|
|
|
// The isolate that this thread is operating on, or NULL if none.
|
|
|
|
Isolate* isolate() const { return isolate_; }
|
2015-05-26 20:46:12 +00:00
|
|
|
static intptr_t isolate_offset() {
|
|
|
|
return OFFSET_OF(Thread, isolate_);
|
|
|
|
}
|
2015-02-09 18:54:20 +00:00
|
|
|
|
2015-04-13 20:59:51 +00:00
|
|
|
// The (topmost) CHA for the compilation in the isolate of this thread.
|
|
|
|
// TODO(23153): Move this out of Isolate/Thread.
|
|
|
|
CHA* cha() const;
|
|
|
|
void set_cha(CHA* value);
|
2015-03-18 00:29:26 +00:00
|
|
|
|
2015-06-09 16:33:36 +00:00
|
|
|
void StoreBufferAddObject(RawObject* obj);
|
|
|
|
void StoreBufferAddObjectGC(RawObject* obj);
|
|
|
|
#if defined(TESTING)
|
|
|
|
bool StoreBufferContains(RawObject* obj) const {
|
|
|
|
return store_buffer_block_->Contains(obj);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
void StoreBufferBlockProcess(bool check_threshold);
|
|
|
|
static intptr_t store_buffer_block_offset() {
|
|
|
|
return OFFSET_OF(Thread, store_buffer_block_);
|
|
|
|
}
|
|
|
|
|
2015-07-09 18:22:26 +00:00
|
|
|
uword top_exit_frame_info() const { return state_.top_exit_frame_info; }
|
|
|
|
static intptr_t top_exit_frame_info_offset() {
|
|
|
|
return OFFSET_OF(Thread, state_) + OFFSET_OF(State, top_exit_frame_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
StackResource* top_resource() const { return state_.top_resource; }
|
|
|
|
void set_top_resource(StackResource* value) {
|
|
|
|
state_.top_resource = value;
|
|
|
|
}
|
|
|
|
static intptr_t top_resource_offset() {
|
|
|
|
return OFFSET_OF(Thread, state_) + OFFSET_OF(State, top_resource);
|
|
|
|
}
|
|
|
|
|
2015-08-07 17:28:56 +00:00
|
|
|
void set_thread_state(InterruptableThreadState* state) {
|
|
|
|
ASSERT((thread_state() == NULL) || (state == NULL));
|
|
|
|
state_.thread_state = state;
|
|
|
|
}
|
|
|
|
|
|
|
|
InterruptableThreadState* thread_state() const {
|
|
|
|
return state_.thread_state;
|
|
|
|
}
|
|
|
|
|
2015-08-03 14:26:23 +00:00
|
|
|
static intptr_t heap_offset() {
|
|
|
|
return OFFSET_OF(Thread, heap_);
|
|
|
|
}
|
|
|
|
|
2015-07-17 02:17:30 +00:00
|
|
|
int32_t no_handle_scope_depth() const {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
return state_.no_handle_scope_depth;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void IncrementNoHandleScopeDepth() {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
ASSERT(state_.no_handle_scope_depth < INT_MAX);
|
|
|
|
state_.no_handle_scope_depth += 1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void DecrementNoHandleScopeDepth() {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
ASSERT(state_.no_handle_scope_depth > 0);
|
|
|
|
state_.no_handle_scope_depth -= 1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
HandleScope* top_handle_scope() const {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
return state_.top_handle_scope;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_top_handle_scope(HandleScope* handle_scope) {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
state_.top_handle_scope = handle_scope;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2015-07-22 22:09:54 +00:00
|
|
|
int32_t no_safepoint_scope_depth() const {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
return state_.no_safepoint_scope_depth;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void IncrementNoSafepointScopeDepth() {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
ASSERT(state_.no_safepoint_scope_depth < INT_MAX);
|
|
|
|
state_.no_safepoint_scope_depth += 1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void DecrementNoSafepointScopeDepth() {
|
|
|
|
#if defined(DEBUG)
|
|
|
|
ASSERT(state_.no_safepoint_scope_depth > 0);
|
|
|
|
state_.no_safepoint_scope_depth -= 1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2015-07-09 18:22:26 +00:00
|
|
|
// Collection of isolate-specific state of a thread that is saved/restored
|
|
|
|
// on isolate exit/re-entry.
|
|
|
|
struct State {
|
|
|
|
Zone* zone;
|
|
|
|
uword top_exit_frame_info;
|
|
|
|
StackResource* top_resource;
|
2015-08-07 17:55:13 +00:00
|
|
|
TimelineEventBlock* timeline_block;
|
2015-08-07 17:28:56 +00:00
|
|
|
// TODO(koda): Migrate individual fields of InterruptableThreadState.
|
|
|
|
InterruptableThreadState* thread_state;
|
2015-07-17 02:17:30 +00:00
|
|
|
#if defined(DEBUG)
|
|
|
|
HandleScope* top_handle_scope;
|
|
|
|
intptr_t no_handle_scope_depth;
|
2015-07-22 22:09:54 +00:00
|
|
|
int32_t no_safepoint_scope_depth;
|
2015-07-17 02:17:30 +00:00
|
|
|
#endif
|
2015-07-09 18:22:26 +00:00
|
|
|
};
|
|
|
|
|
2015-07-08 11:37:47 +00:00
|
|
|
#define DEFINE_OFFSET_METHOD(type_name, member_name, expr, default_init_value) \
|
|
|
|
static intptr_t member_name##offset() { \
|
|
|
|
return OFFSET_OF(Thread, member_name); \
|
|
|
|
}
|
|
|
|
CACHED_CONSTANTS_LIST(DEFINE_OFFSET_METHOD)
|
|
|
|
#undef DEFINE_OFFSET_METHOD
|
|
|
|
|
|
|
|
static bool CanLoadFromThread(const Object& object);
|
|
|
|
static intptr_t OffsetFromThread(const Object& object);
|
|
|
|
|
2015-08-07 17:55:13 +00:00
|
|
|
TimelineEventBlock* timeline_block() const {
|
|
|
|
return state_.timeline_block;
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_timeline_block(TimelineEventBlock* block) {
|
|
|
|
state_.timeline_block = block;
|
|
|
|
}
|
|
|
|
|
2015-01-22 14:14:16 +00:00
|
|
|
private:
|
2015-03-27 18:11:51 +00:00
|
|
|
static ThreadLocalKey thread_key_;
|
2015-03-25 22:41:33 +00:00
|
|
|
|
|
|
|
Isolate* isolate_;
|
2015-08-03 14:26:23 +00:00
|
|
|
Heap* heap_;
|
2015-07-09 18:22:26 +00:00
|
|
|
State state_;
|
2015-06-09 16:33:36 +00:00
|
|
|
StoreBufferBlock* store_buffer_block_;
|
2015-07-08 11:37:47 +00:00
|
|
|
#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value) \
|
|
|
|
type_name member_name;
|
|
|
|
CACHED_CONSTANTS_LIST(DECLARE_MEMBERS)
|
|
|
|
#undef DECLARE_MEMBERS
|
|
|
|
|
|
|
|
explicit Thread(bool init_vm_constants = true);
|
2015-03-25 22:41:33 +00:00
|
|
|
|
2015-07-08 11:37:47 +00:00
|
|
|
void InitVMConstants();
|
2015-04-01 17:48:11 +00:00
|
|
|
|
2015-07-09 18:22:26 +00:00
|
|
|
void ClearState() {
|
|
|
|
memset(&state_, 0, sizeof(state_));
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_zone(Zone* zone) {
|
|
|
|
state_.zone = zone;
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_top_exit_frame_info(uword top_exit_frame_info) {
|
|
|
|
state_.top_exit_frame_info = top_exit_frame_info;
|
|
|
|
}
|
|
|
|
|
2015-04-01 17:48:11 +00:00
|
|
|
static void SetCurrent(Thread* current);
|
|
|
|
|
2015-07-09 18:22:26 +00:00
|
|
|
void Schedule(Isolate* isolate);
|
|
|
|
void Unschedule();
|
|
|
|
|
2015-07-21 16:37:23 +00:00
|
|
|
friend class ApiZone;
|
2015-07-09 18:22:26 +00:00
|
|
|
friend class Isolate;
|
|
|
|
friend class StackZone;
|
2015-01-22 14:14:16 +00:00
|
|
|
DISALLOW_COPY_AND_ASSIGN(Thread);
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace dart
|
|
|
|
|
|
|
|
#endif // VM_THREAD_H_
|