[ VM / Service ] Allow for CpuSamples to be streamed when samples are about to be overwritten.

TEST=pkg/vm_service/test/cpu_samples_stream_test.dart

Change-Id: I1fcb49b6a79cde725a1f0622d1327b9a86165ae9
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/206920
Commit-Queue: Ben Konyi <bkonyi@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Ben Konyi 2021-07-16 20:50:58 +00:00 committed by commit-bot@chromium.org
parent 1e7db45b31
commit 4c9e322dd4
23 changed files with 893 additions and 328 deletions

View file

@ -11,7 +11,7 @@
"constraint, update this by running tools/generate_package_config.dart."
],
"configVersion": 2,
"generated": "2021-07-14T10:43:41.119864",
"generated": "2021-07-15T08:50:03.843620",
"generator": "tools/generate_package_config.dart",
"packages": [
{

View file

@ -66,6 +66,7 @@ front_end/tool/incremental_perf_test: Slow, Pass
kernel/testcases/*: Skip # These are not tests but input for tests.
vm/test/transformations/type_flow/transformer_test: Slow, Pass
vm/testcases/*: SkipByDesign # These are not tests but input for tests.
vm_service/test/cpu_samples_stream_test: Slow, Pass # Requires CPU sample buffer to fill.
wasm/*: SkipByDesign # These can't be run without running wasm:setup first.
[ $compiler == dart2analyzer ]

View file

@ -137,6 +137,7 @@ String assertEventKind(String obj) {
if (obj == "BreakpointRemoved") return obj;
if (obj == "BreakpointResolved") return obj;
if (obj == "BreakpointUpdated") return obj;
if (obj == "CpuSamples") return obj;
if (obj == "Extension") return obj;
if (obj == "GC") return obj;
if (obj == "Inspect") return obj;

View file

@ -1 +1 @@
version=3.48
version=3.49

View file

@ -26,7 +26,7 @@ export 'snapshot_graph.dart'
HeapSnapshotObjectNoData,
HeapSnapshotObjectNullData;
const String vmServiceVersion = '3.48.0';
const String vmServiceVersion = '3.49.0';
/// @optional
const String optional = 'optional';
@ -1149,7 +1149,7 @@ abstract class VmServiceInterface {
/// Debug | PauseStart, PauseExit, PauseBreakpoint, PauseInterrupted,
/// PauseException, PausePostRequest, Resume, BreakpointAdded,
/// BreakpointResolved, BreakpointRemoved, BreakpointUpdated, Inspect, None
/// Profiler | UserTagChanged
/// Profiler | CpuSamples, UserTagChanged
/// GC | GC
/// Extension | Extension
/// Timeline | TimelineEvents, TimelineStreamsSubscriptionUpdate
@ -1690,7 +1690,7 @@ class VmService implements VmServiceInterface {
// PauseStart, PauseExit, PauseBreakpoint, PauseInterrupted, PauseException, PausePostRequest, Resume, BreakpointAdded, BreakpointResolved, BreakpointRemoved, BreakpointUpdated, Inspect, None
Stream<Event> get onDebugEvent => _getEventController('Debug').stream;
// UserTagChanged
// CpuSamples, UserTagChanged
Stream<Event> get onProfilerEvent => _getEventController('Profiler').stream;
// GC
@ -2502,6 +2502,9 @@ class EventKind {
/// Notification that the UserTag for an isolate has been changed.
static const String kUserTagChanged = 'UserTagChanged';
/// A block of recently collected CPU samples.
static const String kCpuSamples = 'CpuSamples';
}
/// Adding new values to `InstanceKind` is considered a backwards compatible
@ -3899,6 +3902,10 @@ class Event extends Response {
@optional
String? previousTag;
/// A CPU profile containing recent samples.
@optional
CpuSamples? cpuSamples;
/// Binary data associated with the event.
///
/// This is provided for the event kinds:
@ -3933,6 +3940,7 @@ class Event extends Response {
this.last,
this.updatedTag,
this.previousTag,
this.cpuSamples,
this.data,
});
@ -3977,6 +3985,8 @@ class Event extends Response {
last = json['last'];
updatedTag = json['updatedTag'];
previousTag = json['previousTag'];
cpuSamples = createServiceObject(json['cpuSamples'], const ['CpuSamples'])
as CpuSamples?;
data = json['data'];
}
@ -4018,6 +4028,7 @@ class Event extends Response {
_setIfNotNull(json, 'last', last);
_setIfNotNull(json, 'updatedTag', updatedTag);
_setIfNotNull(json, 'previousTag', previousTag);
_setIfNotNull(json, 'cpuSamples', cpuSamples?.toJson());
_setIfNotNull(json, 'data', data);
return json;
}

View file

@ -0,0 +1,71 @@
// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
import 'dart:async';
import 'package:test/test.dart';
import 'package:vm_service/vm_service.dart';
import 'common/service_test_common.dart';
import 'common/test_helper.dart';
fib(int n) {
if (n <= 1) {
return n;
}
return fib(n - 1) + fib(n - 2);
}
void testMain() async {
int i = 10;
while (true) {
++i;
// Create progressively deeper stacks to more quickly fill the sample
// buffer.
fib(i);
}
}
late StreamSubscription sub;
var tests = <IsolateTest>[
(VmService service, IsolateRef isolate) async {
final completer = Completer<void>();
int count = 0;
int previousOrigin = 0;
sub = service.onProfilerEvent.listen((event) async {
count++;
expect(event.kind, EventKind.kCpuSamples);
expect(event.cpuSamples, isNotNull);
expect(event.cpuSamples!.samples!.isNotEmpty, true);
if (previousOrigin != 0) {
expect(
event.cpuSamples!.timeOriginMicros! >= previousOrigin,
true,
);
}
previousOrigin = event.cpuSamples!.timeOriginMicros!;
if (count == 2) {
await sub.cancel();
completer.complete();
}
});
await service.streamListen(EventStreams.kProfiler);
await completer.future;
await service.streamCancel(EventStreams.kProfiler);
},
];
main([args = const <String>[]]) async => await runIsolateTests(
args,
tests,
'cpu_samples_stream_test.dart',
testeeConcurrent: testMain,
extraArgs: [
'--sample-buffer-duration=1',
'--profile-vm',
],
);

View file

@ -12,7 +12,7 @@ var tests = <VMTest>[
final result = await vm.invokeRpcNoUpgrade('getVersion', {});
expect(result['type'], 'Version');
expect(result['major'], 3);
expect(result['minor'], 48);
expect(result['minor'], 49);
expect(result['_privateMajor'], 0);
expect(result['_privateMinor'], 0);
},

View file

@ -12,7 +12,7 @@ var tests = <VMTest>[
final result = await vm.invokeRpcNoUpgrade('getVersion', {});
expect(result['type'], equals('Version'));
expect(result['major'], equals(3));
expect(result['minor'], equals(48));
expect(result['minor'], equals(49));
expect(result['_privateMajor'], equals(0));
expect(result['_privateMinor'], equals(0));
},

View file

@ -45,6 +45,10 @@ class RelaxedAtomic {
return value_.fetch_and(arg, order);
}
T exchange(T arg, std::memory_order order = std::memory_order_relaxed) {
return value_.exchange(arg, order);
}
bool compare_exchange_weak(
T& expected, // NOLINT
T desired,
@ -76,6 +80,10 @@ class RelaxedAtomic {
}
T operator+=(T arg) { return fetch_add(arg) + arg; }
T operator-=(T arg) { return fetch_sub(arg) - arg; }
T& operator++() { return fetch_add(1) + 1; }
T& operator--() { return fetch_sub(1) - 1; }
T operator++(int) { return fetch_add(1); }
T operator--(int) { return fetch_sub(1); }
private:
std::atomic<T> value_;

View file

@ -2383,6 +2383,25 @@ void Isolate::Run() {
reinterpret_cast<uword>(this));
}
#if !defined(PRODUCT)
void Isolate::set_current_sample_block(SampleBlock* current) {
ASSERT(current_sample_block_lock_.IsOwnedByCurrentThread());
if (current != nullptr) {
current->set_is_allocation_block(false);
current->set_owner(this);
}
current_sample_block_ = current;
}
void Isolate::set_current_allocation_sample_block(SampleBlock* current) {
if (current != nullptr) {
current->set_is_allocation_block(true);
current->set_owner(this);
}
current_allocation_sample_block_ = current;
}
#endif // !defined(PRODUCT)
// static
void Isolate::NotifyLowMemory() {
Isolate::KillAllIsolates(Isolate::kLowMemoryMsg);
@ -2516,11 +2535,12 @@ void Isolate::Shutdown() {
}
void Isolate::LowLevelCleanup(Isolate* isolate) {
#if !defined(DART_PECOMPILED_RUNTIME)
#if !defined(DART_PRECOMPILED_RUNTIME)
if (KernelIsolate::IsKernelIsolate(isolate)) {
KernelIsolate::SetKernelIsolate(nullptr);
}
#endif
} else if (ServiceIsolate::IsServiceIsolate(isolate)) {
if (ServiceIsolate::IsServiceIsolate(isolate)) {
ServiceIsolate::SetServiceIsolate(nullptr);
}
@ -2538,6 +2558,26 @@ void Isolate::LowLevelCleanup(Isolate* isolate) {
// requests anymore.
Thread::ExitIsolate();
#if !defined(PRODUCT)
// Cleanup profiler state.
SampleBlock* cpu_block = isolate->current_sample_block();
if (cpu_block != nullptr) {
cpu_block->release_block();
}
SampleBlock* allocation_block = isolate->current_allocation_sample_block();
if (allocation_block != nullptr) {
allocation_block->release_block();
}
// Process the previously assigned sample blocks if we're using the
// profiler's sample buffer. Some tests create their own SampleBlockBuffer
// and handle block processing themselves.
if ((cpu_block != nullptr || allocation_block != nullptr) &&
Profiler::sample_block_buffer() != nullptr) {
Profiler::sample_block_buffer()->ProcessCompletedBlocks();
}
#endif // !defined(PRODUCT)
// Now it's safe to delete the isolate.
delete isolate;

View file

@ -74,6 +74,8 @@ class RwLock;
class SafepointRwLock;
class SafepointHandler;
class SampleBuffer;
class SampleBlock;
class SampleBlockBuffer;
class SendPort;
class SerializedObjectBuffer;
class ServiceIdZone;
@ -1084,6 +1086,27 @@ class Isolate : public BaseIsolate, public IntrusiveDListEntry<Isolate> {
#if !defined(PRODUCT)
Debugger* debugger() const { return debugger_; }
// NOTE: this lock should only be acquired within the profiler signal handler.
Mutex* current_sample_block_lock() const {
return const_cast<Mutex*>(&current_sample_block_lock_);
}
// Returns the current SampleBlock used to track CPU profiling samples.
//
// NOTE: current_sample_block_lock() should be held when accessing this
// block.
SampleBlock* current_sample_block() const { return current_sample_block_; }
void set_current_sample_block(SampleBlock* current);
// Returns the current SampleBlock used to track Dart allocation samples.
//
// Allocations should only occur on the mutator thread for an isolate, so we
// don't need to worry about grabbing a lock while accessing this block.
SampleBlock* current_allocation_sample_block() const {
return current_allocation_sample_block_;
}
void set_current_allocation_sample_block(SampleBlock* current);
void set_single_step(bool value) { single_step_ = value; }
bool single_step() const { return single_step_; }
static intptr_t single_step_offset() {
@ -1528,6 +1551,21 @@ class Isolate : public BaseIsolate, public IntrusiveDListEntry<Isolate> {
// the top.
#if !defined(PRODUCT)
Debugger* debugger_ = nullptr;
// SampleBlock containing CPU profiling samples.
//
// Can be accessed by multiple threads, so current_sample_block_lock_ should
// be acquired before accessing.
SampleBlock* current_sample_block_ = nullptr;
Mutex current_sample_block_lock_;
// SampleBlock containing Dart allocation profiling samples.
//
// Allocations should only occur on the mutator thread for an isolate, so we
// shouldn't need to worry about grabbing a lock for the allocation sample
// block.
SampleBlock* current_allocation_sample_block_ = nullptr;
int64_t last_resume_timestamp_;
VMTagCounters vm_tag_counters_;

View file

@ -176,7 +176,7 @@ ISOLATE_UNIT_TEST_CASE(StackTraceMallocHookSimpleJSONTest) {
EnableMallocHooksAndStacksScope scope;
ClearProfileVisitor cpv(Isolate::Current());
Profiler::sample_buffer()->VisitSamples(&cpv);
Profiler::sample_block_buffer()->VisitSamples(&cpv);
char* var = static_cast<char*>(malloc(16 * sizeof(char)));
JSONStream js;

View file

@ -19,6 +19,7 @@
#include "vm/object.h"
#include "vm/os.h"
#include "vm/profiler.h"
#include "vm/profiler_service.h"
#include "vm/reusable_handles.h"
#include "vm/signal_handler.h"
#include "vm/simulator.h"
@ -61,8 +62,8 @@ DEFINE_FLAG(
#ifndef PRODUCT
RelaxedAtomic<bool> Profiler::initialized_ = false;
SampleBuffer* Profiler::sample_buffer_ = NULL;
AllocationSampleBuffer* Profiler::allocation_sample_buffer_ = NULL;
SampleBlockBuffer* Profiler::sample_block_buffer_ = nullptr;
AllocationSampleBuffer* Profiler::allocation_sample_buffer_ = nullptr;
ProfilerCounters Profiler::counters_ = {};
void Profiler::Init() {
@ -75,9 +76,9 @@ void Profiler::Init() {
SetSamplePeriod(FLAG_profile_period);
// The profiler may have been shutdown previously, in which case the sample
// buffer will have already been initialized.
if (sample_buffer_ == NULL) {
intptr_t capacity = CalculateSampleBufferCapacity();
sample_buffer_ = new SampleBuffer(capacity);
if (sample_block_buffer_ == nullptr) {
intptr_t num_blocks = CalculateSampleBufferCapacity();
sample_block_buffer_ = new SampleBlockBuffer(num_blocks);
Profiler::InitAllocationSampleBuffer();
}
ThreadInterrupter::Init();
@ -92,14 +93,30 @@ void Profiler::InitAllocationSampleBuffer() {
}
}
class SampleBlockCleanupVisitor : public IsolateVisitor {
public:
SampleBlockCleanupVisitor() = default;
virtual ~SampleBlockCleanupVisitor() = default;
void VisitIsolate(Isolate* isolate) {
isolate->set_current_allocation_sample_block(nullptr);
{
MutexLocker ml(isolate->current_sample_block_lock());
isolate->set_current_sample_block(nullptr);
}
}
};
void Profiler::Cleanup() {
if (!FLAG_profiler) {
return;
}
ASSERT(initialized_);
ThreadInterrupter::Cleanup();
delete sample_buffer_;
sample_buffer_ = NULL;
SampleBlockCleanupVisitor visitor;
Isolate::VisitIsolates(&visitor);
delete sample_block_buffer_;
sample_block_buffer_ = nullptr;
initialized_ = false;
}
@ -130,16 +147,16 @@ static intptr_t SamplesPerSecond() {
intptr_t Profiler::CalculateSampleBufferCapacity() {
if (FLAG_sample_buffer_duration <= 0) {
return SampleBuffer::kDefaultBufferCapacity;
return SampleBlockBuffer::kDefaultBlockCount;
}
// Deeper stacks require more than a single Sample object to be represented
// correctly. These samples are chained, so we need to determine the worst
// case sample chain length for a single stack.
const intptr_t max_sample_chain_length =
FLAG_max_profile_depth / kMaxSamplesPerTick;
const intptr_t buffer_size = FLAG_sample_buffer_duration *
SamplesPerSecond() * max_sample_chain_length;
return buffer_size;
const intptr_t sample_count = FLAG_sample_buffer_duration *
SamplesPerSecond() * max_sample_chain_length;
return (sample_count / SampleBlock::kSamplesPerBlock) + 1;
}
void Profiler::SetSamplePeriod(intptr_t period) {
@ -156,7 +173,166 @@ void Profiler::UpdateSamplePeriod() {
SetSamplePeriod(FLAG_profile_period);
}
SampleBuffer::SampleBuffer(intptr_t capacity) {
SampleBlockBuffer::SampleBlockBuffer(intptr_t blocks,
intptr_t samples_per_block) {
const intptr_t size = Utils::RoundUp(
blocks * samples_per_block * sizeof(Sample), VirtualMemory::PageSize());
const bool kNotExecutable = false;
memory_ = VirtualMemory::Allocate(size, kNotExecutable, "dart-profiler");
if (memory_ == NULL) {
OUT_OF_MEMORY();
}
sample_buffer_ = reinterpret_cast<Sample*>(memory_->address());
blocks_ = new SampleBlock[blocks];
for (intptr_t i = 0; i < blocks; ++i) {
blocks_[i].Init(&sample_buffer_[i * samples_per_block], samples_per_block);
}
capacity_ = blocks;
cursor_ = 0;
free_list_head_ = nullptr;
free_list_tail_ = nullptr;
}
SampleBlockBuffer::~SampleBlockBuffer() {
delete[] blocks_;
blocks_ = nullptr;
capacity_ = 0;
cursor_ = 0;
}
SampleBlock* SampleBlockBuffer::ReserveSampleBlock() {
// Don't increment right away to avoid unlikely wrap-around errors.
if (cursor_.load() < capacity_) {
intptr_t index = cursor_.fetch_add(1u);
// Check the index again to make sure the last block hasn't been snatched
// from underneath us.
if (index < capacity_) {
return &blocks_[index];
}
}
// Try to re-use a previously freed SampleBlock once we've handed out each
// block at least once. Freed blocks aren't cleared immediately and are still
// valid until they're re-allocated, similar to how a ring buffer would clear
// the oldest samples.
SampleBlock* block = GetFreeBlock();
if (block != nullptr) {
block->Clear();
}
return block;
}
void SampleBlockBuffer::ProcessCompletedBlocks() {
Thread* thread = Thread::Current();
int64_t start = Dart_TimelineGetMicros();
for (intptr_t i = 0; i < capacity_; ++i) {
SampleBlock* block = &blocks_[i];
if (block->is_full() && !block->evictable()) {
if (Service::profiler_stream.enabled()) {
Profile profile(block->owner());
profile.Build(thread, nullptr, block);
ServiceEvent event(block->owner(), ServiceEvent::kCpuSamples);
event.set_cpu_profile(&profile);
Service::HandleEvent(&event);
}
block->evictable_ = true;
FreeBlock(block);
}
}
int64_t end = Dart_TimelineGetMicros();
Dart_TimelineEvent("SampleBlockBuffer::ProcessCompletedBlocks", start, end,
Dart_Timeline_Event_Duration, 0, nullptr, nullptr);
}
ProcessedSampleBuffer* SampleBlockBuffer::BuildProcessedSampleBuffer(
SampleFilter* filter,
ProcessedSampleBuffer* buffer) {
ASSERT(filter != NULL);
Thread* thread = Thread::Current();
Zone* zone = thread->zone();
if (buffer == nullptr) {
buffer = new (zone) ProcessedSampleBuffer();
}
for (intptr_t i = 0; i < capacity_; ++i) {
(&blocks_[i])->BuildProcessedSampleBuffer(filter, buffer);
}
return buffer;
}
Sample* SampleBlock::ReserveSample() {
if (full_.load()) {
return nullptr;
}
intptr_t slot = cursor_.fetch_add(1u);
if (slot + 1 == capacity_) {
full_ = true;
}
return (slot < capacity_) ? At(slot) : nullptr;
}
Sample* SampleBlock::ReserveSampleAndLink(Sample* previous) {
ASSERT(previous != nullptr);
SampleBlockBuffer* buffer = Profiler::sample_block_buffer();
Isolate* isolate = owner_;
ASSERT(isolate != nullptr);
Sample* next = previous->is_allocation_sample()
? buffer->ReserveAllocationSample(isolate)
: buffer->ReserveCPUSample(isolate);
next->Init(previous->port(), previous->timestamp(), previous->tid());
next->set_head_sample(false);
// Mark that previous continues at next.
previous->SetContinuation(next);
return next;
}
Sample* SampleBlockBuffer::ReserveCPUSample(Isolate* isolate) {
return ReserveSampleImpl(isolate, false);
}
Sample* SampleBlockBuffer::ReserveAllocationSample(Isolate* isolate) {
return ReserveSampleImpl(isolate, true);
}
Sample* SampleBlockBuffer::ReserveSampleImpl(Isolate* isolate,
bool allocation_sample) {
SampleBlock* block = allocation_sample
? isolate->current_allocation_sample_block()
: isolate->current_sample_block();
Sample* sample = nullptr;
if (block != nullptr) {
sample = block->ReserveSample();
}
if (sample != nullptr) {
return sample;
}
SampleBlock* next = nullptr;
if (allocation_sample) {
// We only need to be locked while accessing the CPU sample block since
// Dart allocations can only occur on the mutator thread.
next = ReserveSampleBlock();
if (next == nullptr) {
// We're out of blocks to reserve. Drop the sample.
return nullptr;
}
isolate->set_current_allocation_sample_block(next);
} else {
MutexLocker locker(isolate->current_sample_block_lock());
next = ReserveSampleBlock();
if (next == nullptr) {
// We're out of blocks to reserve. Drop the sample.
return nullptr;
}
isolate->set_current_sample_block(next);
}
next->set_is_allocation_block(allocation_sample);
can_process_block_.store(true);
isolate->mutator_thread()->ScheduleInterrupts(Thread::kVMInterrupt);
return ReserveSampleImpl(isolate, allocation_sample);
}
AllocationSampleBuffer::AllocationSampleBuffer(intptr_t capacity) {
const intptr_t size =
Utils::RoundUp(capacity * sizeof(Sample), VirtualMemory::PageSize());
const bool kNotExecutable = false;
@ -164,85 +340,19 @@ SampleBuffer::SampleBuffer(intptr_t capacity) {
if (memory_ == NULL) {
OUT_OF_MEMORY();
}
samples_ = reinterpret_cast<Sample*>(memory_->address());
capacity_ = capacity;
Init(reinterpret_cast<Sample*>(memory_->address()), capacity);
free_sample_list_ = nullptr;
cursor_ = 0;
if (FLAG_trace_profiler) {
OS::PrintErr("Profiler holds %" Pd " samples\n", capacity);
OS::PrintErr("Profiler sample is %" Pd " bytes\n", sizeof(Sample));
OS::PrintErr("Profiler memory usage = %" Pd " bytes\n", size);
}
if (FLAG_sample_buffer_duration != 0) {
OS::PrintErr(
"** WARNING ** Custom sample buffer size provided via "
"--sample-buffer-duration\n");
OS::PrintErr(
"The sample buffer can hold at least %ds worth of "
"samples with stacks depths of up to %d, collected at "
"a sample rate of %" Pd "Hz.\n",
FLAG_sample_buffer_duration, FLAG_max_profile_depth,
SamplesPerSecond());
OS::PrintErr("The resulting sample buffer size is %" Pd " bytes.\n", size);
}
}
AllocationSampleBuffer::AllocationSampleBuffer(intptr_t capacity)
: SampleBuffer(capacity), mutex_(), free_sample_list_(NULL) {}
SampleBuffer::~SampleBuffer() {
delete memory_;
}
AllocationSampleBuffer::~AllocationSampleBuffer() {
}
Sample* SampleBuffer::At(intptr_t idx) const {
ASSERT(idx >= 0);
ASSERT(idx < capacity_);
return &samples_[idx];
}
intptr_t SampleBuffer::ReserveSampleSlot() {
ASSERT(samples_ != NULL);
uintptr_t cursor = cursor_.fetch_add(1u);
// Map back into sample buffer range.
cursor = cursor % capacity_;
return cursor;
}
Sample* SampleBuffer::ReserveSample() {
return At(ReserveSampleSlot());
}
Sample* SampleBuffer::ReserveSampleAndLink(Sample* previous) {
ASSERT(previous != NULL);
intptr_t next_index = ReserveSampleSlot();
Sample* next = At(next_index);
next->Init(previous->port(), previous->timestamp(), previous->tid());
next->set_head_sample(false);
// Mark that previous continues at next.
previous->SetContinuationIndex(next_index);
return next;
}
void AllocationSampleBuffer::FreeAllocationSample(Sample* sample) {
MutexLocker ml(&mutex_);
while (sample != NULL) {
intptr_t continuation_index = -1;
if (sample->is_continuation_sample()) {
continuation_index = sample->continuation_index();
}
while (sample != nullptr) {
Sample* next = sample->continuation_sample();
sample->Clear();
sample->set_next_free(free_sample_list_);
free_sample_list_ = sample;
if (continuation_index != -1) {
sample = At(continuation_index);
} else {
sample = NULL;
}
sample = next;
}
}
@ -255,7 +365,7 @@ intptr_t AllocationSampleBuffer::ReserveSampleSlotLocked() {
uint8_t* free_sample_ptr = reinterpret_cast<uint8_t*>(free_sample);
return static_cast<intptr_t>((free_sample_ptr - samples_array_ptr) /
sizeof(Sample));
} else if (cursor_ < static_cast<uintptr_t>(capacity_ - 1)) {
} else if (cursor_ < static_cast<intptr_t>(capacity_ - 1)) {
return cursor_ += 1;
} else {
return -1;
@ -277,7 +387,7 @@ Sample* AllocationSampleBuffer::ReserveSampleAndLink(Sample* previous) {
previous->native_allocation_size_bytes());
next->set_head_sample(false);
// Mark that previous continues at next.
previous->SetContinuationIndex(next_index);
previous->SetContinuation(next);
return next;
}
@ -957,12 +1067,16 @@ static bool InitialRegisterCheck(uintptr_t pc, uintptr_t fp, uintptr_t sp) {
}
static Sample* SetupSample(Thread* thread,
SampleBuffer* sample_buffer,
bool allocation_sample,
ThreadId tid) {
ASSERT(thread != NULL);
Isolate* isolate = thread->isolate();
ASSERT(sample_buffer != NULL);
Sample* sample = sample_buffer->ReserveSample();
SampleBlockBuffer* buffer = Profiler::sample_block_buffer();
Sample* sample = allocation_sample ? buffer->ReserveAllocationSample(isolate)
: buffer->ReserveCPUSample(isolate);
if (sample == nullptr) {
return nullptr;
}
sample->Init(isolate->main_port(), OS::GetCurrentMonotonicMicros(), tid);
uword vm_tag = thread->vm_tag();
#if defined(USING_SIMULATOR)
@ -980,7 +1094,8 @@ static Sample* SetupSample(Thread* thread,
return sample;
}
static Sample* SetupSampleNative(SampleBuffer* sample_buffer, ThreadId tid) {
static Sample* SetupSampleNative(AllocationSampleBuffer* sample_buffer,
ThreadId tid) {
Sample* sample = sample_buffer->ReserveSample();
if (sample == NULL) {
return NULL;
@ -1129,11 +1244,10 @@ void Profiler::SampleAllocation(Thread* thread,
if (!CheckIsolate(isolate)) {
return;
}
const bool exited_dart_code = thread->HasExitedDartCode();
SampleBuffer* sample_buffer = Profiler::sample_buffer();
if (sample_buffer == NULL) {
SampleBlockBuffer* buffer = Profiler::sample_block_buffer();
if (buffer == nullptr) {
// Profiler not initialized.
return;
}
@ -1157,24 +1271,30 @@ void Profiler::SampleAllocation(Thread* thread,
return;
}
Sample* sample = SetupSample(thread, sample_buffer, os_thread->trace_id());
Sample* sample =
SetupSample(thread, /*allocation_block*/ true, os_thread->trace_id());
if (sample == nullptr) {
// We were unable to assign a sample for this allocation.
counters_.sample_allocation_failure++;
return;
}
sample->SetAllocationCid(cid);
sample->set_allocation_identity_hash(identity_hash);
if (FLAG_profile_vm_allocation) {
ProfilerNativeStackWalker native_stack_walker(
&counters_, (isolate != NULL) ? isolate->main_port() : ILLEGAL_PORT,
sample, sample_buffer, stack_lower, stack_upper, pc, fp, sp);
sample, isolate->current_allocation_sample_block(), stack_lower,
stack_upper, pc, fp, sp);
native_stack_walker.walk();
} else if (exited_dart_code) {
ProfilerDartStackWalker dart_exit_stack_walker(
thread, sample, sample_buffer, pc, fp, /* allocation_sample*/ true);
thread, sample, isolate->current_allocation_sample_block(), pc, fp,
/* allocation_sample*/ true);
dart_exit_stack_walker.walk();
} else {
// Fall back.
uintptr_t pc = OS::GetProgramCounter();
Sample* sample = SetupSample(thread, sample_buffer, os_thread->trace_id());
sample->SetAllocationCid(cid);
sample->SetAt(0, pc);
}
}
@ -1231,20 +1351,16 @@ Sample* Profiler::SampleNativeAllocation(intptr_t skip_count,
return sample;
}
void Profiler::SampleThreadSingleFrame(Thread* thread, uintptr_t pc) {
void Profiler::SampleThreadSingleFrame(Thread* thread,
Sample* sample,
uintptr_t pc) {
ASSERT(thread != NULL);
OSThread* os_thread = thread->os_thread();
ASSERT(os_thread != NULL);
Isolate* isolate = thread->isolate();
SampleBuffer* sample_buffer = Profiler::sample_buffer();
if (sample_buffer == NULL) {
// Profiler not initialized.
return;
}
ASSERT(Profiler::sample_block_buffer() != nullptr);
// Setup sample.
Sample* sample = SetupSample(thread, sample_buffer, os_thread->trace_id());
// Increment counter for vm tag.
VMTagCounters* counters = isolate->vm_tag_counters();
ASSERT(counters != NULL);
@ -1306,22 +1422,37 @@ void Profiler::SampleThread(Thread* thread,
return;
}
SampleBlockBuffer* sample_block_buffer = Profiler::sample_block_buffer();
if (sample_block_buffer == nullptr) {
// Profiler not initialized.
return;
}
// Setup sample.
Sample* sample =
SetupSample(thread, /*allocation_block*/ false, os_thread->trace_id());
if (sample == nullptr) {
// We were unable to assign a sample for this profiler tick.
counters_.sample_allocation_failure++;
return;
}
if (thread->IsMutatorThread()) {
if (isolate->IsDeoptimizing()) {
counters_.single_frame_sample_deoptimizing.fetch_add(1);
SampleThreadSingleFrame(thread, pc);
SampleThreadSingleFrame(thread, sample, pc);
return;
}
if (isolate->group()->compaction_in_progress()) {
// The Dart stack isn't fully walkable.
SampleThreadSingleFrame(thread, pc);
SampleThreadSingleFrame(thread, sample, pc);
return;
}
}
if (!InitialRegisterCheck(pc, fp, sp)) {
counters_.single_frame_sample_register_check.fetch_add(1);
SampleThreadSingleFrame(thread, pc);
SampleThreadSingleFrame(thread, sample, pc);
return;
}
@ -1331,20 +1462,13 @@ void Profiler::SampleThread(Thread* thread,
&stack_upper)) {
counters_.single_frame_sample_get_and_validate_stack_bounds.fetch_add(1);
// Could not get stack boundary.
SampleThreadSingleFrame(thread, pc);
SampleThreadSingleFrame(thread, sample, pc);
return;
}
// At this point we have a valid stack boundary for this isolate and
// know that our initial stack and frame pointers are within the boundary.
SampleBuffer* sample_buffer = Profiler::sample_buffer();
if (sample_buffer == NULL) {
// Profiler not initialized.
return;
}
// Setup sample.
Sample* sample = SetupSample(thread, sample_buffer, os_thread->trace_id());
// Increment counter for vm tag.
VMTagCounters* counters = isolate->vm_tag_counters();
ASSERT(counters != NULL);
@ -1354,9 +1478,11 @@ void Profiler::SampleThread(Thread* thread,
ProfilerNativeStackWalker native_stack_walker(
&counters_, (isolate != NULL) ? isolate->main_port() : ILLEGAL_PORT,
sample, sample_buffer, stack_lower, stack_upper, pc, fp, sp);
sample, isolate->current_sample_block(), stack_lower, stack_upper, pc, fp,
sp);
const bool exited_dart_code = thread->HasExitedDartCode();
ProfilerDartStackWalker dart_stack_walker(thread, sample, sample_buffer, pc,
ProfilerDartStackWalker dart_stack_walker(thread, sample,
isolate->current_sample_block(), pc,
fp, /* allocation_sample*/ false);
// All memory access is done inside CollectSample.
@ -1480,12 +1606,14 @@ const CodeDescriptor* CodeLookupTable::FindCode(uword pc) const {
}
ProcessedSampleBuffer* SampleBuffer::BuildProcessedSampleBuffer(
SampleFilter* filter) {
ASSERT(filter != NULL);
SampleFilter* filter,
ProcessedSampleBuffer* buffer) {
Thread* thread = Thread::Current();
Zone* zone = thread->zone();
ProcessedSampleBuffer* buffer = new (zone) ProcessedSampleBuffer();
if (buffer == nullptr) {
buffer = new (zone) ProcessedSampleBuffer();
}
const intptr_t length = capacity();
for (intptr_t i = 0; i < length; i++) {
@ -1498,12 +1626,6 @@ ProcessedSampleBuffer* SampleBuffer::BuildProcessedSampleBuffer(
// An inner sample in a chain of samples.
continue;
}
// If we're requesting all the native allocation samples, we don't care
// whether or not we're in the same isolate as the sample.
if (sample->port() != filter->port()) {
// Another isolate.
continue;
}
if (sample->timestamp() == 0) {
// Empty.
continue;
@ -1512,17 +1634,25 @@ ProcessedSampleBuffer* SampleBuffer::BuildProcessedSampleBuffer(
// No frames.
continue;
}
if (!filter->TimeFilterSample(sample)) {
// Did not pass time filter.
continue;
}
if (!filter->TaskFilterSample(sample)) {
// Did not pass task filter.
continue;
}
if (!filter->FilterSample(sample)) {
// Did not pass filter.
continue;
if (filter != nullptr) {
// If we're requesting all the native allocation samples, we don't care
// whether or not we're in the same isolate as the sample.
if (sample->port() != filter->port()) {
// Another isolate.
continue;
}
if (!filter->TimeFilterSample(sample)) {
// Did not pass time filter.
continue;
}
if (!filter->TaskFilterSample(sample)) {
// Did not pass task filter.
continue;
}
if (!filter->FilterSample(sample)) {
// Did not pass filter.
continue;
}
}
buffer->Add(BuildProcessedSample(sample, buffer->code_lookup_table()));
}
@ -1577,7 +1707,7 @@ ProcessedSample* SampleBuffer::BuildProcessedSample(
Sample* SampleBuffer::Next(Sample* sample) {
if (!sample->is_continuation_sample()) return NULL;
Sample* next_sample = At(sample->continuation_index());
Sample* next_sample = sample->continuation_sample();
// Sanity check.
ASSERT(sample != next_sample);
// Detect invalid chaining.

View file

@ -29,7 +29,7 @@ class ProcessedSampleBuffer;
class Sample;
class AllocationSampleBuffer;
class SampleBuffer;
class SampleBlock;
class ProfileTrieNode;
#define PROFILER_COUNTERS(V) \
@ -46,7 +46,8 @@ class ProfileTrieNode;
V(incomplete_sample_fp_bounds) \
V(incomplete_sample_fp_step) \
V(incomplete_sample_bad_pc) \
V(failure_native_allocation_sample)
V(failure_native_allocation_sample) \
V(sample_allocation_failure)
struct ProfilerCounters {
#define DECLARE_PROFILER_COUNTER(name) RelaxedAtomic<int64_t> name;
@ -69,7 +70,9 @@ class Profiler : public AllStatic {
// service protocol.
static void UpdateRunningState();
static SampleBuffer* sample_buffer() { return sample_buffer_; }
static SampleBlockBuffer* sample_block_buffer() {
return sample_block_buffer_;
}
static AllocationSampleBuffer* allocation_sample_buffer() {
return allocation_sample_buffer_;
}
@ -111,10 +114,12 @@ class Profiler : public AllStatic {
static intptr_t CalculateSampleBufferCapacity();
// Does not walk the thread's stack.
static void SampleThreadSingleFrame(Thread* thread, uintptr_t pc);
static void SampleThreadSingleFrame(Thread* thread,
Sample* sample,
uintptr_t pc);
static RelaxedAtomic<bool> initialized_;
static SampleBuffer* sample_buffer_;
static SampleBlockBuffer* sample_block_buffer_;
static AllocationSampleBuffer* allocation_sample_buffer_;
static ProfilerCounters counters_;
@ -129,6 +134,8 @@ class SampleVisitor : public ValueObject {
virtual void VisitSample(Sample* sample) = 0;
virtual void Reset() { visited_ = 0; }
intptr_t visited() const { return visited_; }
void IncrementVisited() { visited_++; }
@ -189,11 +196,14 @@ class ClearProfileVisitor : public SampleVisitor {
// Each Sample holds a stack trace from an isolate.
class Sample {
public:
Sample() = default;
void Init(Dart_Port port, int64_t timestamp, ThreadId tid) {
Clear();
timestamp_ = timestamp;
tid_ = tid;
port_ = port;
next_ = nullptr;
}
Dart_Port port() const { return port_; }
@ -214,7 +224,7 @@ class Sample {
vm_tag_ = VMTag::kInvalidTagId;
user_tag_ = UserTags::kDefaultUserTag;
state_ = 0;
continuation_index_ = -1;
next_ = nullptr;
allocation_identity_hash_ = 0;
#if defined(DART_USE_TCMALLOC) && defined(DEBUG)
native_allocation_address_ = 0;
@ -352,18 +362,14 @@ class Sample {
return ContinuationSampleBit::decode(state_);
}
void SetContinuationIndex(intptr_t index) {
void SetContinuation(Sample* next) {
ASSERT(!is_continuation_sample());
ASSERT(continuation_index_ == -1);
ASSERT(next_ == nullptr);
state_ = ContinuationSampleBit::update(true, state_);
continuation_index_ = index;
ASSERT(is_continuation_sample());
next_ = next;
}
intptr_t continuation_index() const {
ASSERT(is_continuation_sample());
return continuation_index_;
}
Sample* continuation_sample() const { return next_; }
intptr_t allocation_cid() const {
ASSERT(is_allocation_sample());
@ -431,7 +437,7 @@ class Sample {
uword vm_tag_;
uword user_tag_;
uint32_t state_;
int32_t continuation_index_;
Sample* next_;
uint32_t allocation_identity_hash_;
#if defined(DART_USE_TCMALLOC) && defined(DEBUG)
@ -623,21 +629,26 @@ class CodeLookupTable : public ZoneAllocated {
DISALLOW_COPY_AND_ASSIGN(CodeLookupTable);
};
// Ring buffer of Samples that is (usually) shared by many isolates.
class SampleBuffer {
// Interface for a class that can create a ProcessedSampleBuffer.
class ProcessedSampleBufferBuilder {
public:
// Up to 1 minute @ 1000Hz, less if samples are deep.
static const intptr_t kDefaultBufferCapacity = 60000;
virtual ~ProcessedSampleBufferBuilder() = default;
virtual ProcessedSampleBuffer* BuildProcessedSampleBuffer(
SampleFilter* filter,
ProcessedSampleBuffer* buffer = nullptr) = 0;
};
explicit SampleBuffer(intptr_t capacity = kDefaultBufferCapacity);
virtual ~SampleBuffer();
class SampleBuffer : public ProcessedSampleBufferBuilder {
public:
SampleBuffer() = default;
virtual ~SampleBuffer() = default;
intptr_t capacity() const { return capacity_; }
Sample* At(intptr_t idx) const;
intptr_t ReserveSampleSlot();
virtual Sample* ReserveSample();
virtual Sample* ReserveSampleAndLink(Sample* previous);
virtual void Init(Sample* samples, intptr_t capacity) {
ASSERT(samples != nullptr);
ASSERT(capacity > 0);
samples_ = samples;
capacity_ = capacity;
}
void VisitSamples(SampleVisitor* visitor) {
ASSERT(visitor != NULL);
@ -669,45 +680,218 @@ class SampleBuffer {
}
}
ProcessedSampleBuffer* BuildProcessedSampleBuffer(SampleFilter* filter);
virtual Sample* ReserveSample() = 0;
virtual Sample* ReserveSampleAndLink(Sample* previous) = 0;
intptr_t Size() { return memory_->size(); }
Sample* At(intptr_t idx) const {
ASSERT(idx >= 0);
ASSERT(idx < capacity_);
return &samples_[idx];
}
intptr_t capacity() const { return capacity_; }
virtual ProcessedSampleBuffer* BuildProcessedSampleBuffer(
SampleFilter* filter,
ProcessedSampleBuffer* buffer = nullptr);
protected:
ProcessedSample* BuildProcessedSample(Sample* sample,
const CodeLookupTable& clt);
Sample* Next(Sample* sample);
VirtualMemory* memory_;
ProcessedSample* BuildProcessedSample(Sample* sample,
const CodeLookupTable& clt);
Sample* samples_;
intptr_t capacity_;
RelaxedAtomic<uintptr_t> cursor_;
DISALLOW_COPY_AND_ASSIGN(SampleBuffer);
};
class SampleBlock : public SampleBuffer {
public:
// The default number of samples per block. Overridden by some tests.
static const intptr_t kSamplesPerBlock = 1000;
SampleBlock() = default;
virtual ~SampleBlock() = default;
void Clear() {
allocation_block_ = false;
cursor_ = 0;
full_ = false;
evictable_ = false;
next_free_ = nullptr;
}
// Returns the number of samples contained within this block.
intptr_t capacity() const { return capacity_; }
// Specify whether or not this block is used for assigning allocation
// samples.
void set_is_allocation_block(bool is_allocation_block) {
allocation_block_ = is_allocation_block;
}
Isolate* owner() const { return owner_; }
void set_owner(Isolate* isolate) { owner_ = isolate; }
// Manually marks the block as full so it can be processed and added back to
// the pool of available blocks.
void release_block() { full_.store(true); }
// When true, this sample block is considered complete and will no longer be
// used to assign new Samples. This block is **not** available for
// re-allocation simply because it's full. It must be processed by
// SampleBlockBuffer::ProcessCompletedBlocks before it can be considered
// evictable and available for re-allocation.
bool is_full() const { return full_.load(); }
// When true, this sample block is available for re-allocation.
bool evictable() const { return evictable_.load(); }
virtual Sample* ReserveSample();
virtual Sample* ReserveSampleAndLink(Sample* previous);
protected:
Isolate* owner_ = nullptr;
bool allocation_block_ = false;
intptr_t index_;
RelaxedAtomic<int> cursor_ = 0;
RelaxedAtomic<bool> full_ = false;
RelaxedAtomic<bool> evictable_ = false;
SampleBlock* next_free_ = nullptr;
private:
DISALLOW_COPY_AND_ASSIGN(SampleBuffer);
friend class SampleBlockBuffer;
DISALLOW_COPY_AND_ASSIGN(SampleBlock);
};
class SampleBlockBuffer : public ProcessedSampleBufferBuilder {
public:
static const intptr_t kDefaultBlockCount = 60;
// Creates a SampleBlockBuffer with a predetermined number of blocks.
//
// Defaults to kDefaultBlockCount blocks. Block size is fixed to
// SampleBlock::kSamplesPerBlock samples per block, except for in tests.
explicit SampleBlockBuffer(
intptr_t blocks = kDefaultBlockCount,
intptr_t samples_per_block = SampleBlock::kSamplesPerBlock);
virtual ~SampleBlockBuffer();
void VisitSamples(SampleVisitor* visitor) {
ASSERT(visitor != NULL);
for (intptr_t i = 0; i < cursor_.load(); ++i) {
(&blocks_[i])->VisitSamples(visitor);
}
}
// Returns true when there is at least a single block that needs to be
// processed.
//
// NOTE: this should only be called from the interrupt handler as
// invocation will have the side effect of clearing the underlying flag.
bool process_blocks() { return can_process_block_.exchange(false); }
// Iterates over the blocks in the buffer and processes blocks marked as
// full. Processing consists of sending a service event with the samples from
// completed, unprocessed blocks and marking these blocks are evictable
// (i.e., safe to be re-allocated and re-used).
void ProcessCompletedBlocks();
// Reserves a sample for a CPU profile.
//
// Returns nullptr when a sample can't be reserved.
Sample* ReserveCPUSample(Isolate* isolate);
// Reserves a sample for a Dart object allocation profile.
//
// Returns nullptr when a sample can't be reserved.
Sample* ReserveAllocationSample(Isolate* isolate);
intptr_t Size() const { return memory_->size(); }
virtual ProcessedSampleBuffer* BuildProcessedSampleBuffer(
SampleFilter* filter,
ProcessedSampleBuffer* buffer = nullptr);
private:
Sample* ReserveSampleImpl(Isolate* isolate, bool allocation_sample);
// Returns nullptr if there are no available blocks.
SampleBlock* ReserveSampleBlock();
void FreeBlock(SampleBlock* block) {
ASSERT(block->next_free_ == nullptr);
MutexLocker ml(&free_block_lock_);
if (free_list_head_ == nullptr) {
free_list_head_ = block;
free_list_tail_ = block;
return;
}
free_list_tail_->next_free_ = block;
free_list_tail_ = block;
}
SampleBlock* GetFreeBlock() {
MutexLocker ml(&free_block_lock_);
if (free_list_head_ == nullptr) {
return nullptr;
}
SampleBlock* block = free_list_head_;
free_list_head_ = block->next_free_;
if (free_list_head_ == nullptr) {
free_list_tail_ = nullptr;
}
block->next_free_ = nullptr;
return block;
}
Mutex free_block_lock_;
RelaxedAtomic<bool> can_process_block_ = false;
// Sample block management.
RelaxedAtomic<int> cursor_;
SampleBlock* blocks_;
intptr_t capacity_;
SampleBlock* free_list_head_;
SampleBlock* free_list_tail_;
// Sample buffer management.
VirtualMemory* memory_;
Sample* sample_buffer_;
DISALLOW_COPY_AND_ASSIGN(SampleBlockBuffer);
};
class AllocationSampleBuffer : public SampleBuffer {
public:
explicit AllocationSampleBuffer(intptr_t capacity = kDefaultBufferCapacity);
virtual ~AllocationSampleBuffer();
explicit AllocationSampleBuffer(intptr_t capacity = 60000);
virtual ~AllocationSampleBuffer() = default;
intptr_t ReserveSampleSlotLocked();
virtual Sample* ReserveSample();
virtual Sample* ReserveSampleAndLink(Sample* previous);
void FreeAllocationSample(Sample* sample);
intptr_t Size() { return memory_->size(); }
private:
intptr_t ReserveSampleSlotLocked();
Mutex mutex_;
Sample* free_sample_list_;
VirtualMemory* memory_;
RelaxedAtomic<int> cursor_ = 0;
DISALLOW_COPY_AND_ASSIGN(AllocationSampleBuffer);
};
intptr_t Profiler::Size() {
intptr_t size = 0;
if (sample_buffer_ != nullptr) {
size += sample_buffer_->Size();
if (sample_block_buffer_ != nullptr) {
size += sample_block_buffer_->Size();
}
if (allocation_sample_buffer_ != nullptr) {
size += allocation_sample_buffer_->Size();

View file

@ -7,6 +7,7 @@
#include "platform/text_buffer.h"
#include "vm/growable_array.h"
#include "vm/hash_map.h"
#include "vm/heap/safepoint.h"
#include "vm/log.h"
#include "vm/malloc_hooks.h"
#include "vm/native_symbol.h"
@ -15,6 +16,8 @@
#include "vm/profiler.h"
#include "vm/reusable_handles.h"
#include "vm/scope_timer.h"
#include "vm/service.h"
#include "vm/service_event.h"
#include "vm/timeline.h"
namespace dart {
@ -919,7 +922,7 @@ class ProfileBuilder : public ValueObject {
ProfileBuilder(Thread* thread,
SampleFilter* filter,
SampleBuffer* sample_buffer,
ProcessedSampleBufferBuilder* sample_buffer,
Profile* profile)
: thread_(thread),
vm_isolate_(Dart::vm_isolate()),
@ -932,8 +935,6 @@ class ProfileBuilder : public ValueObject {
inlined_functions_cache_(new ProfileCodeInlinedFunctionsCache()),
samples_(NULL),
info_kind_(kNone) {
ASSERT((sample_buffer_ == Profiler::sample_buffer()) ||
(sample_buffer_ == Profiler::allocation_sample_buffer()));
ASSERT(profile_ != NULL);
}
@ -975,7 +976,7 @@ class ProfileBuilder : public ValueObject {
bool FilterSamples() {
ScopeTimer sw("ProfileBuilder::FilterSamples", FLAG_trace_profiler);
ASSERT(sample_buffer_ != NULL);
ASSERT(sample_buffer_ != nullptr);
samples_ = sample_buffer_->BuildProcessedSampleBuffer(filter_);
profile_->samples_ = samples_;
profile_->sample_count_ = samples_->length();
@ -1439,7 +1440,7 @@ class ProfileBuilder : public ValueObject {
Thread* thread_;
Isolate* vm_isolate_;
SampleFilter* filter_;
SampleBuffer* sample_buffer_;
ProcessedSampleBufferBuilder* sample_buffer_;
Profile* profile_;
const AbstractCode null_code_;
const Function& null_function_;
@ -1466,11 +1467,10 @@ Profile::Profile(Isolate* isolate)
void Profile::Build(Thread* thread,
SampleFilter* filter,
SampleBuffer* sample_buffer) {
ProcessedSampleBufferBuilder* sample_buffer) {
// Disable thread interrupts while processing the buffer.
DisableThreadInterruptsScope dtis(thread);
ThreadInterrupter::SampleBufferReaderScope scope;
ProfileBuilder builder(thread, filter, sample_buffer, this);
builder.Build();
}
@ -1722,12 +1722,16 @@ ProfileFunction* Profile::FindFunction(const Function& function) {
}
void Profile::PrintProfileJSON(JSONStream* stream, bool include_code_samples) {
ScopeTimer sw("Profile::PrintProfileJSON", FLAG_trace_profiler);
JSONObject obj(stream);
obj.AddProperty("type", "CpuSamples");
PrintHeaderJSON(&obj);
PrintProfileJSON(&obj, include_code_samples);
}
void Profile::PrintProfileJSON(JSONObject* obj, bool include_code_samples) {
ScopeTimer sw("Profile::PrintProfileJSON", FLAG_trace_profiler);
obj->AddProperty("type", "CpuSamples");
PrintHeaderJSON(obj);
if (include_code_samples) {
JSONArray codes(&obj, "_codes");
JSONArray codes(obj, "_codes");
for (intptr_t i = 0; i < live_code_->length(); i++) {
ProfileCode* code = live_code_->At(i);
ASSERT(code != NULL);
@ -1746,30 +1750,30 @@ void Profile::PrintProfileJSON(JSONStream* stream, bool include_code_samples) {
}
{
JSONArray functions(&obj, "functions");
JSONArray functions(obj, "functions");
for (intptr_t i = 0; i < functions_->length(); i++) {
ProfileFunction* function = functions_->At(i);
ASSERT(function != NULL);
function->PrintToJSONArray(&functions);
}
}
PrintSamplesJSON(&obj, include_code_samples);
PrintSamplesJSON(obj, include_code_samples);
}
void ProfilerService::PrintJSONImpl(Thread* thread,
JSONStream* stream,
SampleFilter* filter,
SampleBuffer* sample_buffer,
ProcessedSampleBufferBuilder* buffer,
bool include_code_samples) {
Isolate* isolate = thread->isolate();
// We should bail out in service.cc if the profiler is disabled.
ASSERT(sample_buffer != NULL);
ASSERT(buffer != nullptr);
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile(isolate);
profile.Build(thread, filter, sample_buffer);
profile.Build(thread, filter, buffer);
profile.PrintProfileJSON(stream, include_code_samples);
}
@ -1795,7 +1799,7 @@ void ProfilerService::PrintJSON(JSONStream* stream,
Isolate* isolate = thread->isolate();
NoAllocationSampleFilter filter(isolate->main_port(), Thread::kMutatorTask,
time_origin_micros, time_extent_micros);
PrintJSONImpl(thread, stream, &filter, Profiler::sample_buffer(),
PrintJSONImpl(thread, stream, &filter, Profiler::sample_block_buffer(),
include_code_samples);
}
@ -1820,7 +1824,7 @@ void ProfilerService::PrintAllocationJSON(JSONStream* stream,
Isolate* isolate = thread->isolate();
AllocationSampleFilter filter(isolate->main_port(), Thread::kMutatorTask,
time_origin_micros, time_extent_micros);
PrintJSONImpl(thread, stream, &filter, Profiler::sample_buffer(), true);
PrintJSONImpl(thread, stream, &filter, Profiler::sample_block_buffer(), true);
}
class ClassAllocationSampleFilter : public SampleFilter {
@ -1856,7 +1860,7 @@ void ProfilerService::PrintAllocationJSON(JSONStream* stream,
ClassAllocationSampleFilter filter(isolate->main_port(), cls,
Thread::kMutatorTask, time_origin_micros,
time_extent_micros);
PrintJSONImpl(thread, stream, &filter, Profiler::sample_buffer(), true);
PrintJSONImpl(thread, stream, &filter, Profiler::sample_block_buffer(), true);
}
void ProfilerService::PrintNativeAllocationJSON(JSONStream* stream,
@ -1870,8 +1874,8 @@ void ProfilerService::PrintNativeAllocationJSON(JSONStream* stream,
}
void ProfilerService::ClearSamples() {
SampleBuffer* sample_buffer = Profiler::sample_buffer();
if (sample_buffer == NULL) {
SampleBlockBuffer* sample_block_buffer = Profiler::sample_block_buffer();
if (sample_block_buffer == nullptr) {
return;
}
@ -1883,7 +1887,7 @@ void ProfilerService::ClearSamples() {
ThreadInterrupter::SampleBufferReaderScope scope;
ClearProfileVisitor clear_profile(isolate);
sample_buffer->VisitSamples(&clear_profile);
sample_block_buffer->VisitSamples(&clear_profile);
}
#endif // !PRODUCT

View file

@ -367,7 +367,9 @@ class Profile : public ValueObject {
explicit Profile(Isolate* isolate);
// Build a filtered model using |filter|.
void Build(Thread* thread, SampleFilter* filter, SampleBuffer* sample_buffer);
void Build(Thread* thread,
SampleFilter* filter,
ProcessedSampleBufferBuilder* sample_block_buffer);
// After building:
int64_t min_time() const { return min_time_; }
@ -383,6 +385,7 @@ class Profile : public ValueObject {
ProfileCode* GetCodeFromPC(uword pc, int64_t timestamp);
void PrintProfileJSON(JSONStream* stream, bool include_code_samples);
void PrintProfileJSON(JSONObject* obj, bool include_code_samples);
ProfileFunction* FindFunction(const Function& function);
@ -445,7 +448,7 @@ class ProfilerService : public AllStatic {
static void PrintJSONImpl(Thread* thread,
JSONStream* stream,
SampleFilter* filter,
SampleBuffer* sample_buffer,
ProcessedSampleBufferBuilder* buffer,
bool include_code_samples);
};

View file

@ -22,6 +22,12 @@ DECLARE_FLAG(bool, profile_vm_allocation);
DECLARE_FLAG(int, max_profile_depth);
DECLARE_FLAG(int, optimization_counter_threshold);
// SampleVisitor ignores samples with timestamp == 0.
const int64_t kValidTimeStamp = 1;
// SampleVisitor ignores samples with pc == 0.
const uword kValidPc = 0xFF;
// Some tests are written assuming native stack trace profiling is disabled.
class DisableNativeProfileScope : public ValueObject {
public:
@ -71,87 +77,123 @@ class MaxProfileDepthScope : public ValueObject {
const intptr_t FLAG_max_profile_depth_;
};
class ProfileSampleBufferTestHelper {
class ProfileSampleBufferTestHelper : public SampleVisitor {
public:
static intptr_t IterateCount(const Dart_Port port,
const SampleBuffer& sample_buffer) {
intptr_t c = 0;
for (intptr_t i = 0; i < sample_buffer.capacity(); i++) {
Sample* sample = sample_buffer.At(i);
if (sample->port() != port) {
continue;
}
c++;
}
return c;
explicit ProfileSampleBufferTestHelper(Dart_Port port)
: SampleVisitor(port) {}
void VisitSample(Sample* sample) { sum_ += sample->At(0); }
void Reset() {
sum_ = 0;
SampleVisitor::Reset();
}
static intptr_t IterateSumPC(const Dart_Port port,
const SampleBuffer& sample_buffer) {
intptr_t c = 0;
for (intptr_t i = 0; i < sample_buffer.capacity(); i++) {
Sample* sample = sample_buffer.At(i);
if (sample->port() != port) {
continue;
}
c += sample->At(0);
}
return c;
}
intptr_t sum() const { return sum_; }
private:
intptr_t sum_ = 0;
};
void VisitSamples(SampleBlockBuffer* buffer, SampleVisitor* visitor) {
// Mark the completed blocks as free so they can be re-used.
buffer->ProcessCompletedBlocks();
visitor->Reset();
buffer->VisitSamples(visitor);
}
TEST_CASE(Profiler_SampleBufferWrapTest) {
SampleBuffer* sample_buffer = new SampleBuffer(3);
Isolate* isolate = Isolate::Current();
SampleBlockBuffer* sample_buffer = new SampleBlockBuffer(3, 1);
Dart_Port i = 123;
EXPECT_EQ(0, ProfileSampleBufferTestHelper::IterateSumPC(i, *sample_buffer));
ProfileSampleBufferTestHelper visitor(i);
VisitSamples(sample_buffer, &visitor);
EXPECT_EQ(0, visitor.sum());
Sample* s;
s = sample_buffer->ReserveSample();
s->Init(i, 0, 0);
s = sample_buffer->ReserveCPUSample(isolate);
s->Init(i, kValidTimeStamp, 0);
s->SetAt(0, 2);
EXPECT_EQ(2, ProfileSampleBufferTestHelper::IterateSumPC(i, *sample_buffer));
s = sample_buffer->ReserveSample();
s->Init(i, 0, 0);
VisitSamples(sample_buffer, &visitor);
EXPECT_EQ(2, visitor.sum());
s = sample_buffer->ReserveCPUSample(isolate);
s->Init(i, kValidTimeStamp, 0);
s->SetAt(0, 4);
EXPECT_EQ(6, ProfileSampleBufferTestHelper::IterateSumPC(i, *sample_buffer));
s = sample_buffer->ReserveSample();
s->Init(i, 0, 0);
VisitSamples(sample_buffer, &visitor);
EXPECT_EQ(6, visitor.sum());
s = sample_buffer->ReserveCPUSample(isolate);
s->Init(i, kValidTimeStamp, 0);
s->SetAt(0, 6);
EXPECT_EQ(12, ProfileSampleBufferTestHelper::IterateSumPC(i, *sample_buffer));
s = sample_buffer->ReserveSample();
s->Init(i, 0, 0);
VisitSamples(sample_buffer, &visitor);
EXPECT_EQ(12, visitor.sum());
// Mark the completed blocks as free so they can be re-used.
sample_buffer->ProcessCompletedBlocks();
s = sample_buffer->ReserveCPUSample(isolate);
s->Init(i, kValidTimeStamp, 0);
s->SetAt(0, 8);
EXPECT_EQ(18, ProfileSampleBufferTestHelper::IterateSumPC(i, *sample_buffer));
VisitSamples(sample_buffer, &visitor);
EXPECT_EQ(18, visitor.sum());
{
MutexLocker ml(isolate->current_sample_block_lock());
isolate->set_current_sample_block(nullptr);
}
delete sample_buffer;
}
TEST_CASE(Profiler_SampleBufferIterateTest) {
SampleBuffer* sample_buffer = new SampleBuffer(3);
Isolate* isolate = Isolate::Current();
SampleBlockBuffer* sample_buffer = new SampleBlockBuffer(3, 1);
Dart_Port i = 123;
EXPECT_EQ(0, ProfileSampleBufferTestHelper::IterateCount(i, *sample_buffer));
ProfileSampleBufferTestHelper visitor(i);
sample_buffer->VisitSamples(&visitor);
EXPECT_EQ(0, visitor.visited());
Sample* s;
s = sample_buffer->ReserveSample();
s->Init(i, 0, 0);
EXPECT_EQ(1, ProfileSampleBufferTestHelper::IterateCount(i, *sample_buffer));
s = sample_buffer->ReserveSample();
s->Init(i, 0, 0);
EXPECT_EQ(2, ProfileSampleBufferTestHelper::IterateCount(i, *sample_buffer));
s = sample_buffer->ReserveSample();
s->Init(i, 0, 0);
EXPECT_EQ(3, ProfileSampleBufferTestHelper::IterateCount(i, *sample_buffer));
s = sample_buffer->ReserveSample();
s->Init(i, 0, 0);
EXPECT_EQ(3, ProfileSampleBufferTestHelper::IterateCount(i, *sample_buffer));
s = sample_buffer->ReserveCPUSample(isolate);
s->Init(i, kValidTimeStamp, 0);
s->SetAt(0, kValidPc);
VisitSamples(sample_buffer, &visitor);
EXPECT_EQ(1, visitor.visited());
s = sample_buffer->ReserveCPUSample(isolate);
s->Init(i, kValidTimeStamp, 0);
s->SetAt(0, kValidPc);
VisitSamples(sample_buffer, &visitor);
EXPECT_EQ(2, visitor.visited());
s = sample_buffer->ReserveCPUSample(isolate);
s->Init(i, kValidTimeStamp, 0);
s->SetAt(0, kValidPc);
VisitSamples(sample_buffer, &visitor);
EXPECT_EQ(3, visitor.visited());
s = sample_buffer->ReserveCPUSample(isolate);
s->Init(i, kValidTimeStamp, 0);
s->SetAt(0, kValidPc);
VisitSamples(sample_buffer, &visitor);
EXPECT_EQ(3, visitor.visited());
{
MutexLocker ml(isolate->current_sample_block_lock());
isolate->set_current_sample_block(nullptr);
}
delete sample_buffer;
}
TEST_CASE(Profiler_AllocationSampleTest) {
Isolate* isolate = Isolate::Current();
SampleBuffer* sample_buffer = new SampleBuffer(3);
Sample* sample = sample_buffer->ReserveSample();
SampleBlockBuffer* sample_buffer = new SampleBlockBuffer(1, 1);
Sample* sample = sample_buffer->ReserveAllocationSample(isolate);
sample->Init(isolate->main_port(), 0, 0);
sample->set_metadata(99);
sample->set_is_allocation_sample(true);
EXPECT_EQ(99, sample->allocation_cid());
isolate->set_current_allocation_sample_block(nullptr);
delete sample_buffer;
}
@ -443,7 +485,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_TrivialRecordAllocation) {
AllocationFilter filter(isolate->main_port(), class_a.id(),
before_allocations_micros,
allocation_extent_micros);
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have 1 allocation sample.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile);
@ -471,7 +513,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_TrivialRecordAllocation) {
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id(),
Dart_TimelineGetMicros(), 16000);
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have no allocation samples because none occured within
// the specified time range.
EXPECT_EQ(0, profile.sample_count());
@ -561,7 +603,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_NativeAllocation) {
// Filter for the class in the time range.
NativeAllocationSampleFilter filter(before_allocations_micros,
allocation_extent_micros);
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have 0 allocation samples since we freed the memory.
EXPECT_EQ(0, profile.sample_count());
}
@ -574,7 +616,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_NativeAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
NativeAllocationSampleFilter filter(Dart_TimelineGetMicros(), 16000);
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have no allocation samples because none occured within
// the specified time range.
EXPECT_EQ(0, profile.sample_count());
@ -620,7 +662,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ToggleRecordAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have no allocation samples.
EXPECT_EQ(0, profile.sample_count());
}
@ -637,7 +679,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ToggleRecordAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile);
@ -667,7 +709,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ToggleRecordAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should still only have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
}
@ -705,7 +747,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_CodeTicks) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have no allocation samples.
EXPECT_EQ(0, profile.sample_count());
}
@ -725,7 +767,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_CodeTicks) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have three allocation samples.
EXPECT_EQ(3, profile.sample_count());
ProfileStackWalker walker(&profile);
@ -780,7 +822,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_FunctionTicks) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have no allocation samples.
EXPECT_EQ(0, profile.sample_count());
}
@ -800,7 +842,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_FunctionTicks) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have three allocation samples.
EXPECT_EQ(3, profile.sample_count());
ProfileStackWalker walker(&profile, true);
@ -850,7 +892,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_IntrinsicAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), double_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have no allocation samples.
EXPECT_EQ(0, profile.sample_count());
}
@ -863,7 +905,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_IntrinsicAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), double_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile);
@ -885,7 +927,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_IntrinsicAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), double_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should still only have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
}
@ -912,7 +954,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ArrayAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), array_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have no allocation samples.
EXPECT_EQ(0, profile.sample_count());
}
@ -925,7 +967,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ArrayAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), array_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile);
@ -947,7 +989,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ArrayAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), array_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should still only have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
}
@ -969,7 +1011,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ArrayAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), array_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have no allocation samples, since empty
// growable lists use a shared backing.
EXPECT_EQ(0, profile.sample_count());
@ -999,7 +1041,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ContextAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), context_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have no allocation samples.
EXPECT_EQ(0, profile.sample_count());
}
@ -1012,7 +1054,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ContextAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), context_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile);
@ -1032,7 +1074,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ContextAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), context_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should still only have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
}
@ -1073,7 +1115,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ClosureAllocation) {
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), closure_class.id());
filter.set_enable_vm_ticks(true);
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile);
@ -1097,7 +1139,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ClosureAllocation) {
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), closure_class.id());
filter.set_enable_vm_ticks(true);
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should still only have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
}
@ -1127,7 +1169,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_TypedArrayAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), float32_list_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have no allocation samples.
EXPECT_EQ(0, profile.sample_count());
}
@ -1140,7 +1182,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_TypedArrayAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), float32_list_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile);
@ -1162,7 +1204,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_TypedArrayAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), float32_list_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should still only have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
}
@ -1175,7 +1217,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_TypedArrayAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), float32_list_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should now have two allocation samples.
EXPECT_EQ(2, profile.sample_count());
}
@ -1207,7 +1249,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have no allocation samples.
EXPECT_EQ(0, profile.sample_count());
}
@ -1220,7 +1262,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should still only have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile);
@ -1240,7 +1282,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should still only have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
}
@ -1253,7 +1295,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringAllocation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should now have two allocation samples.
EXPECT_EQ(2, profile.sample_count());
}
@ -1285,7 +1327,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringInterpolation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have no allocation samples.
EXPECT_EQ(0, profile.sample_count());
}
@ -1298,7 +1340,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringInterpolation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should still only have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile);
@ -1324,7 +1366,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringInterpolation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should still only have one allocation sample.
EXPECT_EQ(1, profile.sample_count());
}
@ -1337,7 +1379,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringInterpolation) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should now have two allocation samples.
EXPECT_EQ(2, profile.sample_count());
}
@ -1393,7 +1435,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_FunctionInline) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have no allocation samples.
EXPECT_EQ(0, profile.sample_count());
}
@ -1411,7 +1453,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_FunctionInline) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have 50,000 allocation samples.
EXPECT_EQ(50000, profile.sample_count());
{
@ -1540,7 +1582,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_InliningIntervalBoundry) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have no allocation samples.
EXPECT_EQ(0, profile.sample_count());
}
@ -1557,7 +1599,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_InliningIntervalBoundry) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile, true);
@ -1633,7 +1675,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ChainedSamples) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have 1 allocation sample.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile);
@ -1728,7 +1770,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_BasicSourcePosition) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have one allocation samples.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile, true);
@ -1810,7 +1852,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_BasicSourcePositionOptimized) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have one allocation samples.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile, true);
@ -1888,7 +1930,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_SourcePosition) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have one allocation samples.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile, true);
@ -1998,7 +2040,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_SourcePositionOptimized) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have one allocation samples.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile, true);
@ -2093,7 +2135,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_BinaryOperatorSourcePosition) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have one allocation samples.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile, true);
@ -2211,7 +2253,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_BinaryOperatorSourcePositionOptimized) {
HANDLESCOPE(thread);
Profile profile(isolate);
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_buffer());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
// We should have one allocation samples.
EXPECT_EQ(1, profile.sample_count());
ProfileStackWalker walker(&profile, true);
@ -2256,10 +2298,10 @@ ISOLATE_UNIT_TEST_CASE(Profiler_BinaryOperatorSourcePositionOptimized) {
}
}
static void InsertFakeSample(SampleBuffer* sample_buffer, uword* pc_offsets) {
ASSERT(sample_buffer != NULL);
static void InsertFakeSample(uword* pc_offsets) {
Isolate* isolate = Isolate::Current();
Sample* sample = sample_buffer->ReserveSample();
ASSERT(Profiler::sample_block_buffer() != nullptr);
Sample* sample = Profiler::sample_block_buffer()->ReserveCPUSample(isolate);
ASSERT(sample != NULL);
sample->Init(isolate->main_port(), OS::GetCurrentMonotonicMicros(),
OSThread::Current()->trace_id());
@ -2319,8 +2361,8 @@ ISOLATE_UNIT_TEST_CASE(Profiler_GetSourceReport) {
DisableBackgroundCompilationScope dbcs;
SampleBuffer* sample_buffer = Profiler::sample_buffer();
EXPECT(sample_buffer != NULL);
SampleBlockBuffer* sample_block_buffer = Profiler::sample_block_buffer();
ASSERT(sample_block_buffer != nullptr);
const Library& root_library = Library::Handle(LoadTestScript(kScript));
@ -2330,7 +2372,7 @@ ISOLATE_UNIT_TEST_CASE(Profiler_GetSourceReport) {
{
// Clear the profile for this isolate.
ClearProfileVisitor cpv(Isolate::Current());
sample_buffer->VisitSamples(&cpv);
sample_block_buffer->VisitSamples(&cpv);
}
// Query the code object for main and determine the PC at some token
@ -2390,9 +2432,9 @@ ISOLATE_UNIT_TEST_CASE(Profiler_GetSourceReport) {
callPositionPc, // main.
0};
InsertFakeSample(sample_buffer, &sample1[0]);
InsertFakeSample(sample_buffer, &sample2[0]);
InsertFakeSample(sample_buffer, &sample3[0]);
InsertFakeSample(&sample1[0]);
InsertFakeSample(&sample2[0]);
InsertFakeSample(&sample3[0]);
// Generate source report for main.
JSONStream js;

View file

@ -15,7 +15,7 @@
namespace dart {
#define SERVICE_PROTOCOL_MAJOR_VERSION 3
#define SERVICE_PROTOCOL_MINOR_VERSION 48
#define SERVICE_PROTOCOL_MINOR_VERSION 49
class Array;
class EmbedderServiceHandler;

View file

@ -1,8 +1,8 @@
# Dart VM Service Protocol 3.48
# Dart VM Service Protocol 3.49
> Please post feedback to the [observatory-discuss group][discuss-list]
This document describes of _version 3.48_ of the Dart VM Service Protocol. This
This document describes of _version 3.49_ of the Dart VM Service Protocol. This
protocol is used to communicate with a running Dart Virtual Machine.
To use the Service Protocol, start the VM with the *--observe* flag.
@ -1477,7 +1477,7 @@ streamId | event types provided
VM | VMUpdate, VMFlagUpdate
Isolate | IsolateStart, IsolateRunnable, IsolateExit, IsolateUpdate, IsolateReload, ServiceExtensionAdded
Debug | PauseStart, PauseExit, PauseBreakpoint, PauseInterrupted, PauseException, PausePostRequest, Resume, BreakpointAdded, BreakpointResolved, BreakpointRemoved, BreakpointUpdated, Inspect, None
Profiler | UserTagChanged
Profiler | CpuSamples, UserTagChanged
GC | GC
Extension | Extension
Timeline | TimelineEvents, TimelineStreamsSubscriptionUpdate
@ -2161,6 +2161,9 @@ class Event extends Response {
// The previous UserTag label.
string previousTag [optional];
// A CPU profile containing recent samples.
CpuSamples cpuSamples [optional];
}
```
@ -2274,6 +2277,9 @@ enum EventKind {
// Notification that the UserTag for an isolate has been changed.
UserTagChanged,
// A block of recently collected CPU samples.
CpuSamples,
}
```
@ -4069,5 +4075,6 @@ version | comments
3.46 | Moved `sourceLocation` property into reference types for `Class`, `Field`, and `Function`.
3.47 | Added `shows` and `hides` properties to `LibraryDependency`.
3.48 | Added `Profiler` stream, `UserTagChanged` event kind, and `updatedTag` and `previousTag` properties to `Event`.
3.49 | Added `CpuSamples` event kind, and `cpuSamples` property to `Event`.
[discuss-list]: https://groups.google.com/a/dartlang.org/forum/#!forum/observatory-discuss

View file

@ -64,7 +64,8 @@ ServiceEvent::ServiceEvent(IsolateGroup* isolate_group,
event_kind == ServiceEvent::kNone ||
// VM service can print Observatory information to Stdout or Stderr
// which are embedder streams.
event_kind == ServiceEvent::kEmbedder)));
event_kind == ServiceEvent::kEmbedder ||
event_kind == ServiceEvent::kCpuSamples)));
if ((event_kind == ServiceEvent::kPauseStart) ||
(event_kind == ServiceEvent::kPauseExit)) {
@ -141,6 +142,8 @@ const char* ServiceEvent::KindAsCString() const {
return "TimelineStreamSubscriptionsUpdate";
case kUserTagChanged:
return "UserTagChanged";
case kCpuSamples:
return "CpuSamples";
default:
UNREACHABLE();
return "Unknown";
@ -193,6 +196,7 @@ const StreamInfo* ServiceEvent::stream_info() const {
case kEmbedder:
return nullptr;
case kCpuSamples:
case kUserTagChanged:
return &Service::profiler_stream;
@ -300,6 +304,11 @@ void ServiceEvent::PrintJSON(JSONStream* js) const {
js->AppendSerializedObject("extensionData",
extension_event_.event_data->ToCString());
}
if (kind() == kCpuSamples) {
JSONObject cpu_profile(&jsobj, "cpuSamples");
cpu_profile_->PrintProfileJSON(&cpu_profile, false);
}
}
void ServiceEvent::PrintJSONHeader(JSONObject* jsobj) const {

View file

@ -7,6 +7,7 @@
#include "vm/globals.h"
#include "vm/heap/heap.h"
#include "vm/profiler_service.h"
namespace dart {
@ -62,6 +63,8 @@ class ServiceEvent {
kUserTagChanged,
kCpuSamples,
kIllegal,
};
@ -213,6 +216,9 @@ class ServiceEvent {
timeline_event_block_ = block;
}
Profile* cpu_profile() const { return cpu_profile_; }
void set_cpu_profile(Profile* profile) { cpu_profile_ = profile; }
void PrintJSON(JSONStream* js) const;
void PrintJSONHeader(JSONObject* jsobj) const;
@ -246,6 +252,7 @@ class ServiceEvent {
intptr_t bytes_length_;
LogRecord log_record_;
ExtensionEvent extension_event_;
Profile* cpu_profile_;
int64_t timestamp_;
};

View file

@ -59,7 +59,7 @@ void SourceReport::Init(Thread* thread,
// Build the profile.
SampleFilter samplesForIsolate(thread_->isolate()->main_port(),
Thread::kMutatorTask, -1, -1);
profile_.Build(thread, &samplesForIsolate, Profiler::sample_buffer());
profile_.Build(thread, &samplesForIsolate, Profiler::sample_block_buffer());
}
}

View file

@ -448,6 +448,15 @@ ErrorPtr Thread::HandleInterrupts() {
}
heap()->CollectGarbage(Heap::kNew);
}
#if !defined(PRODUCT)
// Processes completed SampleBlocks and sends CPU sample events over the
// service protocol when applicable.
SampleBlockBuffer* sample_buffer = Profiler::sample_block_buffer();
if (sample_buffer != nullptr && sample_buffer->process_blocks()) {
sample_buffer->ProcessCompletedBlocks();
}
#endif // !defined(PRODUCT)
}
if ((interrupt_bits & kMessageInterrupt) != 0) {
MessageHandler::MessageStatus status =