[ VM ] Add support for heap sampling profiler

This CL introduces new embedding APIs for supporting heap sample
profiling. A registered sampling callback is invoked approximately every
N bytes based on an exponential distribution, providing information
about the isolate group the allocation occurred in, the user visible
name of the allocated object type, a weak persistent handle to the
allocated object, and the size of the allocation.

Sampling is triggered using artificial TLAB boundaries to cause
allocations to be sampled to take the allocation slow path where the
registered callback can be invoked with the allocation information.

Only new space allocations are currently traced, with old space
allocation support to be added in a future CL.

TEST=Dart_HeapSampling

Change-Id: I22bcdeec6e823bc1ab44898d4c596fbed7169fa1
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/264520
Commit-Queue: Ben Konyi <bkonyi@google.com>
Reviewed-by: Siva Annamalai <asiva@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Ben Konyi 2022-11-22 18:05:24 +00:00 committed by Commit Queue
parent aa252e907e
commit 8caeaf7a6c
18 changed files with 3208 additions and 2670 deletions

View file

@ -1270,6 +1270,52 @@ DART_EXPORT void Dart_KillIsolate(Dart_Isolate isolate);
*/
DART_EXPORT void Dart_NotifyIdle(int64_t deadline);
typedef void (*Dart_HeapSamplingCallback)(void* isolate_group_data,
Dart_Handle cls_name,
Dart_WeakPersistentHandle obj,
uintptr_t size);
/**
* Starts the heap sampling profiler for each thread in the VM.
*/
DART_EXPORT void Dart_EnableHeapSampling();
/*
* Stops the heap sampling profiler for each thread in the VM.
*/
DART_EXPORT void Dart_DisableHeapSampling();
/*
* Registers a callback that is invoked once per sampled allocation.
*
* Important notes:
*
* - When invoked, |cls_name| will be a handle to a Dart String representing
* the class name of the allocated object. This handle is stable and can be
* used as an identifier as it has the lifetime of its isolate group.
*
* - |obj| is a weak persistent handle to the object which caused the
* allocation. The value of this handle will be set to null when the object is
* garbage collected. |obj| should only be used to determine whether the
* object has been collected as there is no guarantee that it has been fully
* initialized. This handle should eventually be freed with
* Dart_DeleteWeakPersistentHandle once the embedder no longer needs it.
*
* - The provided callback must not call into the VM and should do as little
* work as possible to avoid performance penalities.
*/
DART_EXPORT void Dart_RegisterHeapSamplingCallback(
Dart_HeapSamplingCallback callback);
/*
* Sets the average heap sampling rate based on a number of |bytes| for each
* thread.
*
* In other words, approximately every |bytes| allocated will create a sample.
* Defaults to 512 KiB.
*/
DART_EXPORT void Dart_SetHeapSamplingPeriod(intptr_t bytes);
/**
* Notifies the VM that the embedder expects the application's working set has
* recently shrunk significantly and is not expected to rise in the near future.

View file

@ -32,6 +32,7 @@ class JSONStream;
template <typename T>
class MallocGrowableArray;
class ObjectPointerVisitor;
class PersistentHandle;
// A 64-bit bitmap describing unboxed fields in a class.
//
@ -424,6 +425,13 @@ class ClassTable : public MallocAllocated {
classes_.GetColumn<kAllocationTracingStateIndex>());
}
PersistentHandle* UserVisibleNameFor(intptr_t cid) {
return classes_.At<kClassNameIndex>(cid);
}
void SetUserVisibleNameFor(intptr_t cid, PersistentHandle* name) {
classes_.At<kClassNameIndex>(cid) = name;
}
#else
void UpdateCachedAllocationTracingStateTablePointer() {}
#endif // !defined(PRODUCT)
@ -542,7 +550,8 @@ class ClassTable : public MallocAllocated {
kSizeIndex,
kUnboxedFieldBitmapIndex,
#if !defined(PRODUCT)
kAllocationTracingStateIndex
kAllocationTracingStateIndex,
kClassNameIndex,
#endif
};
@ -551,7 +560,8 @@ class ClassTable : public MallocAllocated {
ClassPtr,
uint32_t,
UnboxedFieldBitmap,
uint8_t>
uint8_t,
PersistentHandle*>
classes_;
#else
CidIndexedTable<ClassIdTagType, ClassPtr, uint32_t, UnboxedFieldBitmap>

File diff suppressed because it is too large Load diff

View file

@ -1826,6 +1826,45 @@ DART_EXPORT void Dart_NotifyDestroyed() {
T->heap()->NotifyDestroyed();
}
DART_EXPORT void Dart_EnableHeapSampling() {
#if !defined(PRODUCT)
IsolateGroup::ForEach([&](IsolateGroup* group) {
group->thread_registry()->ForEachThread(
[&](Thread* thread) { thread->heap_sampler().Enable(true); });
});
#endif
}
DART_EXPORT void Dart_DisableHeapSampling() {
#if !defined(PRODUCT)
IsolateGroup::ForEach([&](IsolateGroup* group) {
group->thread_registry()->ForEachThread(
[&](Thread* thread) { thread->heap_sampler().Enable(false); });
});
#endif
}
DART_EXPORT void Dart_RegisterHeapSamplingCallback(
Dart_HeapSamplingCallback callback) {
#if !defined(PRODUCT)
IsolateGroup::ForEach([&](IsolateGroup* group) {
group->thread_registry()->ForEachThread([&](Thread* thread) {
thread->heap_sampler().SetSamplingCallback(callback);
});
});
#endif
}
DART_EXPORT void Dart_SetHeapSamplingPeriod(intptr_t bytes) {
#if !defined(PRODUCT)
IsolateGroup::ForEach([&](IsolateGroup* group) {
group->thread_registry()->ForEachThread([&](Thread* thread) {
thread->heap_sampler().SetSamplingInterval(bytes);
});
});
#endif
}
DART_EXPORT void Dart_NotifyLowMemory() {
API_TIMELINE_BEGIN_END(Thread::Current());
Page::ClearCache();

View file

@ -10494,6 +10494,96 @@ TEST_CASE(DartAPI_UserTags) {
"Dart_SetCurrentUserTag expects argument 'user_tag' to be non-null");
}
void* last_isolate_group_data = nullptr;
Dart_PersistentHandle last_allocation_cls = nullptr;
intptr_t heap_samples = 0;
void HeapSamplingCallback(void* isolate_group_data,
Dart_PersistentHandle cls_type,
Dart_WeakPersistentHandle obj,
uintptr_t size) {
last_isolate_group_data = isolate_group_data;
last_allocation_cls = cls_type;
heap_samples++;
}
TEST_CASE(DartAPI_HeapSampling) {
Dart_RegisterHeapSamplingCallback(HeapSamplingCallback);
Dart_EnableHeapSampling();
// Start with sampling on every byte allocated.
Dart_SetHeapSamplingPeriod(1);
auto isolate_group_data = Dart_CurrentIsolateGroupData();
// Some simple allocations
USE(Dart_NewList(100));
const char* name = nullptr;
Dart_Handle result = Dart_StringToCString(last_allocation_cls, &name);
EXPECT_VALID(result);
EXPECT(heap_samples > 0);
EXPECT_STREQ("List", name);
EXPECT_EQ(last_isolate_group_data, isolate_group_data);
heap_samples = 0;
USE(Dart_NewStringFromCString("Foo"));
result = Dart_StringToCString(last_allocation_cls, &name);
EXPECT_VALID(result);
EXPECT(heap_samples > 0);
EXPECT_STREQ("String", name);
EXPECT_EQ(last_isolate_group_data, isolate_group_data);
// Increase the sampling period and check that we don't sample each
// allocation. This should cause samples to be collected for approximately
// every 1KiB allocated.
Dart_SetHeapSamplingPeriod(1 << 10);
heap_samples = 0;
const intptr_t kNumAllocations = 1000;
for (intptr_t i = 0; i < kNumAllocations; ++i) {
USE(Dart_NewList(10));
}
EXPECT(heap_samples > 0);
EXPECT(heap_samples < kNumAllocations);
heap_samples = 0;
last_allocation_cls = nullptr;
const char* kScriptChars = R"(
foo() {
final list = [];
for (int i = 0; i < 1000; ++i) {
list.add(List.filled(100, 0));
}
}
)";
Dart_DisableHeapSampling();
Dart_Handle lib = TestCase::LoadTestScript(kScriptChars, nullptr);
EXPECT_VALID(lib);
Dart_EnableHeapSampling();
result = Dart_Invoke(lib, NewString("foo"), 0, nullptr);
EXPECT_VALID(result);
EXPECT(heap_samples > 0);
EXPECT(heap_samples < kNumAllocations);
Dart_DisableHeapSampling();
// Sampling on every byte allocated.
Dart_SetHeapSamplingPeriod(1);
// Ensure no more samples are collected.
heap_samples = 0;
last_allocation_cls = nullptr;
last_isolate_group_data = nullptr;
USE(Dart_NewList(10));
EXPECT_EQ(heap_samples, 0);
EXPECT_EQ(last_allocation_cls, nullptr);
EXPECT_EQ(last_isolate_group_data, nullptr);
// Clear heap sampling callback state.
Dart_RegisterHeapSamplingCallback(nullptr);
}
#endif // !PRODUCT
} // namespace dart

View file

@ -23,6 +23,8 @@ heap_sources = [
"pointer_block.h",
"safepoint.cc",
"safepoint.h",
"sampler.cc",
"sampler.h",
"scavenger.cc",
"scavenger.h",
"spaces.h",

View file

@ -248,6 +248,7 @@ class Page {
owner_ = thread;
thread->set_top(top_);
thread->set_end(end_);
thread->set_true_end(end_);
}
void Release(Thread* thread) {
ASSERT(owner_ == thread);
@ -255,6 +256,7 @@ class Page {
top_ = thread->top();
thread->set_top(0);
thread->set_end(0);
thread->set_true_end(0);
}
void Release() {
if (owner_ != nullptr) {

170
runtime/vm/heap/sampler.cc Normal file
View file

@ -0,0 +1,170 @@
// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#if !defined(PRODUCT)
#include <math.h>
#include <algorithm>
#include "vm/heap/safepoint.h"
#include "vm/heap/sampler.h"
#include "vm/isolate.h"
#include "vm/lockers.h"
#include "vm/os.h"
#include "vm/random.h"
#include "vm/thread.h"
namespace dart {
HeapProfileSampler::HeapProfileSampler(Thread* thread)
: lock_(new RwLock()),
interval_to_next_sample_(kUninitialized),
thread_(thread) {}
HeapProfileSampler::~HeapProfileSampler() {
delete lock_;
lock_ = nullptr;
}
void HeapProfileSampler::Enable(bool enabled) {
WriteRwLocker locker(Thread::Current(), lock_);
enabled_ = enabled;
if (enabled) {
SetNextSamplingIntervalLocked(GetNextSamplingIntervalLocked());
} else if (!enabled) {
// Reset the TLAB boundaries to the true end to avoid unnecessary slow
// path invocations when sampling is disabled.
thread_->set_end(thread_->true_end());
}
}
void HeapProfileSampler::HandleNewTLAB(intptr_t old_tlab_remaining_space) {
ReadRwLocker locker(thread_, lock_);
if (!enabled_ || next_tlab_offset_ == kUninitialized) {
return;
}
thread_->set_end(next_tlab_offset_ + old_tlab_remaining_space +
thread_->top());
next_tlab_offset_ = kUninitialized;
}
void HeapProfileSampler::SetSamplingInterval(intptr_t bytes_interval) {
WriteRwLocker locker(Thread::Current(), lock_);
ASSERT(bytes_interval >= 0);
sampling_interval_ = bytes_interval;
// Force reset the next sampling point.
thread_->set_end(thread_->true_end());
SetNextSamplingIntervalLocked(GetNextSamplingIntervalLocked());
}
void HeapProfileSampler::SetSamplingCallback(
Dart_HeapSamplingCallback callback) {
// Protect against the callback being changed in the middle of a sample.
WriteRwLocker locker(Thread::Current(), lock_);
callback_ = callback;
}
void HeapProfileSampler::InvokeCallbackForLastSample(
Dart_PersistentHandle type_name,
Dart_WeakPersistentHandle obj) {
ReadRwLocker locker(thread_, lock_);
if (!enabled_) {
return;
}
if (callback_ != nullptr) {
callback_(
reinterpret_cast<void*>(thread_->isolate_group()->embedder_data()),
type_name, obj, last_sample_size_);
}
last_sample_size_ = kUninitialized;
}
void HeapProfileSampler::SampleSize(intptr_t allocation_size) {
ReadRwLocker locker(thread_, lock_);
if (!enabled_) {
return;
}
// We should never be sampling an allocation that won't fit in the
// current TLAB.
ASSERT(allocation_size <=
static_cast<intptr_t>(thread_->true_end() - thread_->top()));
ASSERT(sampling_interval_ >= 0);
if (UNLIKELY(allocation_size >= sampling_interval_)) {
last_sample_size_ = allocation_size;
// Reset the sampling interval, but only count the sample once.
NumberOfSamplesLocked(allocation_size);
return;
}
last_sample_size_ =
sampling_interval_ * NumberOfSamplesLocked(allocation_size);
}
// Determines the next sampling interval by sampling from a poisson
intptr_t HeapProfileSampler::GetNextSamplingIntervalLocked() {
ASSERT(thread_->isolate_group() != nullptr);
double u = thread_->isolate_group()->random()->NextDouble();
ASSERT(u >= 0.0 && u <= 1.0);
// Approximate sampling from a poisson distribution using an exponential
// distribution. We take the sample by feeding in a random uniform value in
// the range [0,1] to the inverse of the exponential CDF.
double next = -log(1 - u) * sampling_interval_;
ASSERT(next > 0);
// + 1 since the sample implies the number of "failures" before the next
// success, which should be included in our interval.
return std::max(kObjectAlignment, static_cast<intptr_t>(next) + 1);
}
intptr_t HeapProfileSampler::NumberOfSamplesLocked(intptr_t allocation_size) {
// There's always at least a single sample if we've reached this point.
intptr_t sample_count = 1;
intptr_t next_interval = GetNextSamplingIntervalLocked();
intptr_t total_next_interval = next_interval;
// The remaining portion of the allocation that hasn't been accounted for yet.
intptr_t remaining_size =
allocation_size - static_cast<intptr_t>(thread_->end() - thread_->top());
while (remaining_size > 0) {
if (remaining_size > next_interval) {
// The allocation is large enough to be counted again.
sample_count++;
}
remaining_size =
std::max(remaining_size - next_interval, static_cast<intptr_t>(0));
next_interval = GetNextSamplingIntervalLocked();
total_next_interval += next_interval;
}
// Update the TLAB boundary to account for the potential multiple samples
// the last allocation generated.
SetNextSamplingIntervalLocked(total_next_interval);
return sample_count;
}
intptr_t HeapProfileSampler::SetNextSamplingIntervalLocked(
intptr_t next_interval) {
intptr_t new_end = thread_->end();
const intptr_t top = static_cast<intptr_t>(thread_->top());
const intptr_t true_end = static_cast<intptr_t>(thread_->true_end());
if (new_end == true_end) {
// Sampling was likely just enabled.
new_end = top;
}
new_end += next_interval;
if (new_end > true_end) {
// The next sampling point is in the next TLAB.
next_tlab_offset_ = new_end - true_end;
new_end = true_end;
}
ASSERT(top <= new_end);
thread_->set_end(new_end);
return next_interval;
}
} // namespace dart
#endif // !defined(PRODUCT)

84
runtime/vm/heap/sampler.h Normal file
View file

@ -0,0 +1,84 @@
// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_HEAP_SAMPLER_H_
#define RUNTIME_VM_HEAP_SAMPLER_H_
#if !defined(PRODUCT)
#include "include/dart_api.h"
#include "vm/globals.h"
namespace dart {
// Forward declarations.
class RwLock;
class Thread;
// Poisson sampler for memory allocations. We apply sampling individually to
// each byte. The whole allocation gets accounted as often as the number of
// sampled bytes it contains.
//
// Heavily inspired by Perfetto's Sampler class:
// https://source.chromium.org/chromium/chromium/src/+/531db6ec90bd7194d4d8064588966a0118d1495c:third_party/perfetto/src/profiling/memory/sampler.h;l=34
class HeapProfileSampler {
public:
explicit HeapProfileSampler(Thread* thread);
~HeapProfileSampler();
void Enable(bool enabled);
void HandleNewTLAB(intptr_t old_tlab_remaining_space);
void SetSamplingInterval(intptr_t bytes_interval);
void SetSamplingCallback(Dart_HeapSamplingCallback callback);
void InvokeCallbackForLastSample(Dart_PersistentHandle type_name,
Dart_WeakPersistentHandle obj);
bool HasOutstandingSample() const {
return last_sample_size_ != kUninitialized;
}
// Returns number of bytes that should be be attributed to the sample.
// If returned size is 0, the allocation should not be sampled.
//
// Due to how the poission sampling works, some samples should be accounted
// multiple times if they cover allocations larger than the average sampling
// rate.
void SampleSize(intptr_t allocation_size);
private:
intptr_t SetNextSamplingIntervalLocked(intptr_t next_interval);
intptr_t GetNextSamplingIntervalLocked();
intptr_t NumberOfSamplesLocked(intptr_t allocation_size);
// Protects sampling logic from modifications of callback_, sampling_interval,
// and enabled_ while collecting a sample.
RwLock* lock_;
bool enabled_ = false;
Dart_HeapSamplingCallback callback_;
const intptr_t kUninitialized = -1;
const intptr_t kDefaultSamplingInterval = 1 << 19; // 512KiB
intptr_t sampling_interval_ = kDefaultSamplingInterval;
intptr_t interval_to_next_sample_;
intptr_t next_tlab_offset_ = kUninitialized;
intptr_t last_sample_size_ = kUninitialized;
Thread* thread_;
DISALLOW_COPY_AND_ASSIGN(HeapProfileSampler);
};
} // namespace dart
#endif // !defined(PRODUCT)
#endif // RUNTIME_VM_HEAP_SAMPLER_H_

View file

@ -1580,8 +1580,19 @@ void Scavenger::TryAllocateNewTLAB(Thread* thread,
ASSERT(heap_ != Dart::vm_isolate_group()->heap());
ASSERT(!scavenging_);
AbandonRemainingTLAB(thread);
#if !defined(PRODUCT)
// Find the remaining space available in the TLAB before abandoning it so we
// can reset the heap sampling offset in the new TLAB.
intptr_t remaining = thread->true_end() - thread->top();
const bool heap_sampling_enabled = thread->end() != thread->true_end();
if (heap_sampling_enabled && remaining > min_size) {
// This is a sampling point and the TLAB isn't actually full.
thread->heap_sampler().SampleSize(min_size);
return;
}
#endif
AbandonRemainingTLAB(thread);
if (can_safepoint && !thread->force_growth()) {
ASSERT(thread->no_safepoint_scope_depth() == 0);
heap_->CheckConcurrentMarking(thread, GCReason::kNewSpace, kPageSize);
@ -1594,6 +1605,9 @@ void Scavenger::TryAllocateNewTLAB(Thread* thread,
(page->end() - kAllocationRedZoneSize) - page->object_end();
if (available >= min_size) {
page->Acquire(thread);
#if !defined(PRODUCT)
thread->heap_sampler().HandleNewTLAB(remaining);
#endif
return;
}
}
@ -1603,6 +1617,9 @@ void Scavenger::TryAllocateNewTLAB(Thread* thread,
return;
}
page->Acquire(thread);
#if !defined(PRODUCT)
thread->heap_sampler().HandleNewTLAB(remaining);
#endif
}
void Scavenger::AbandonRemainingTLABForDebugging(Thread* thread) {

View file

@ -251,7 +251,6 @@ class Scavenger {
if (UNLIKELY(remaining < size)) {
return 0;
}
ASSERT(to_->Contains(result));
ASSERT((result & kObjectAlignmentMask) == kNewObjectAlignmentOffset);
thread->set_top(result + size);

View file

@ -40,6 +40,7 @@
#include "vm/hash_table.h"
#include "vm/heap/become.h"
#include "vm/heap/heap.h"
#include "vm/heap/sampler.h"
#include "vm/heap/weak_code.h"
#include "vm/image_snapshot.h"
#include "vm/isolate_reload.h"
@ -2750,6 +2751,10 @@ void Object::CheckHandle() const {
#endif
}
#if !defined(PRODUCT)
static void NoopFinalizer(void* isolate_callback_data, void* peer) {}
#endif
ObjectPtr Object::Allocate(intptr_t cls_id,
intptr_t size,
Heap::Space space,
@ -2779,8 +2784,9 @@ ObjectPtr Object::Allocate(intptr_t cls_id,
OUT_OF_MEMORY();
}
}
NoSafepointScope no_safepoint(thread);
ObjectPtr raw_obj;
NoSafepointScope no_safepoint(thread);
InitializeObject(address, cls_id, size, compressed);
raw_obj = static_cast<ObjectPtr>(address + kHeapObjectTag);
ASSERT(cls_id == UntaggedObject::ClassIdTag::decode(raw_obj->untag()->tags_));
@ -2794,14 +2800,38 @@ ObjectPtr Object::Allocate(intptr_t cls_id,
raw_obj->untag()->SetMarkBitRelease();
heap->old_space()->AllocateBlack(size);
}
#ifndef PRODUCT
#if !defined(PRODUCT)
HeapProfileSampler& heap_sampler = thread->heap_sampler();
auto class_table = thread->isolate_group()->class_table();
if (heap_sampler.HasOutstandingSample()) {
IsolateGroup* isolate_group = thread->isolate_group();
Api::Scope api_scope(thread);
PersistentHandle* type_name = class_table->UserVisibleNameFor(cls_id);
if (type_name == nullptr) {
// Try the vm-isolate's class table for core types.
type_name =
Dart::vm_isolate_group()->class_table()->UserVisibleNameFor(cls_id);
}
// If type_name is still null, then we haven't finished initializing yet and
// should drop the sample.
if (type_name != nullptr) {
thread->IncrementNoCallbackScopeDepth();
Object& obj = Object::Handle(raw_obj);
auto weak_obj = FinalizablePersistentHandle::New(
isolate_group, obj, nullptr, NoopFinalizer, 0, /*auto_delete=*/false);
heap_sampler.InvokeCallbackForLastSample(
type_name->apiHandle(), weak_obj->ApiWeakPersistentHandle());
thread->DecrementNoCallbackScopeDepth();
}
}
if (class_table->ShouldTraceAllocationFor(cls_id)) {
uint32_t hash =
HeapSnapshotWriter::GetHeapSnapshotIdentityHash(thread, raw_obj);
Profiler::SampleAllocation(thread, cls_id, hash);
}
#endif // !PRODUCT
#endif // !defined(PRODUCT)
return raw_obj;
}
@ -5129,6 +5159,11 @@ void Class::set_name(const String& value) const {
const String& user_name = String::Handle(
Symbols::New(Thread::Current(), GenerateUserVisibleName()));
set_user_name(user_name);
IsolateGroup* isolate_group = IsolateGroup::Current();
PersistentHandle* type_name =
isolate_group->api_state()->AllocatePersistentHandle();
type_name->set_ptr(UserVisibleName());
isolate_group->class_table()->SetUserVisibleNameFor(id(), type_name);
}
#endif // !defined(PRODUCT)
}

View file

@ -97,4 +97,11 @@ uint64_t Random::GlobalNextUInt64() {
return global_random->NextUInt64();
}
double Random::NextDouble() {
uint64_t mantissa = NextUInt64() & 0xFFFFFFFFFFFFF;
// The exponent value 0 in biased form.
const uint64_t exp = 1023;
return bit_cast<double>(exp << 52 | mantissa) - 1.0;
}
} // namespace dart

View file

@ -32,6 +32,9 @@ class Random {
static void Init();
static void Cleanup();
// Generates a uniform random variable in the range [0,1].
double NextDouble();
private:
uint64_t NextState();
void Initialize(uint64_t seed);

View file

@ -108,7 +108,13 @@ Thread::Thread(bool is_vm_isolate)
#if defined(USING_SAFE_STACK)
saved_safestack_limit_(0),
#endif
next_(nullptr) {
#if !defined(PRODUCT)
next_(nullptr),
heap_sampler_(this) {
#else
next_(nullptr) {
#endif
#if defined(SUPPORT_TIMELINE)
dart_stream_ = Timeline::GetDartStream();
ASSERT(dart_stream_ != nullptr);

View file

@ -21,6 +21,7 @@
#include "vm/globals.h"
#include "vm/handles.h"
#include "vm/heap/pointer_block.h"
#include "vm/heap/sampler.h"
#include "vm/os_thread.h"
#include "vm/pending_deopts.h"
#include "vm/random.h"
@ -660,10 +661,20 @@ class Thread : public ThreadState {
Heap* heap() const { return heap_; }
static intptr_t heap_offset() { return OFFSET_OF(Thread, heap_); }
// The TLAB memory boundaries.
//
// When the heap sampling profiler is enabled, we use the TLAB boundary to
// trigger slow path allocations so we can take a sample. This means that
// true_end() >= end(), where true_end() is the actual end address of the
// TLAB and end() is the chosen sampling boundary for the thread.
//
// When the heap sampling profiler is disabled, true_end() == end().
uword top() const { return top_; }
uword end() const { return end_; }
uword true_end() const { return true_end_; }
void set_top(uword top) { top_ = top; }
void set_end(uword end) { end_ = end; }
void set_true_end(uword true_end) { true_end_ = true_end; }
static intptr_t top_offset() { return OFFSET_OF(Thread, top_); }
static intptr_t end_offset() { return OFFSET_OF(Thread, end_); }
@ -1120,6 +1131,7 @@ class Thread : public ThreadState {
#ifndef PRODUCT
void PrintJSON(JSONStream* stream) const;
HeapProfileSampler& heap_sampler() { return heap_sampler_; }
#endif
PendingDeopts& pending_deopts() { return pending_deopts_; }
@ -1165,6 +1177,7 @@ class Thread : public ThreadState {
// Offsets up to this point can all fit in a byte on X64. All of the above
// fields are very abundantly accessed from code. Thus, keeping them first
// is important for code size (although code size on X64 is not a priority).
uword true_end_ = 0;
uword saved_stack_limit_;
uword stack_overflow_flags_;
ObjectPtr* field_table_values_;
@ -1316,6 +1329,10 @@ class Thread : public ThreadState {
bool inside_compiler_ = false;
#endif
#if !defined(PRODUCT)
HeapProfileSampler heap_sampler_;
#endif
explicit Thread(bool is_vm_isolate);
void StoreBufferRelease(

View file

@ -59,6 +59,16 @@ void ThreadRegistry::VisitObjectPointers(
}
}
void ThreadRegistry::ForEachThread(
std::function<void(Thread* thread)> callback) {
MonitorLocker ml(threads_lock());
Thread* thread = active_list_;
while (thread != nullptr) {
callback(thread);
thread = thread->next_;
}
}
void ThreadRegistry::ReleaseStoreBuffers() {
MonitorLocker ml(threads_lock());
Thread* thread = active_list_;

View file

@ -29,6 +29,7 @@ class ThreadRegistry {
ObjectPointerVisitor* visitor,
ValidationPolicy validate_frames);
void ForEachThread(std::function<void(Thread* thread)> callback);
void ReleaseStoreBuffers();
void AcquireMarkingStacks();
void ReleaseMarkingStacks();