Reapply "[vm] Streamline Zones."

Fix imprecision in Zone::SizeInBytes that was finally noticed by vm/cc/AllocateZone because the size of the initial inline buffer changed.

TEST=ci
Bug: https://github.com/dart-lang/sdk/issues/47399
Change-Id: I152d24d03a59b21267a9a24e5d929b51af57af71
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/215980
Reviewed-by: Siva Annamalai <asiva@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Ryan Macnak 2021-10-14 00:10:12 +00:00 committed by commit-bot@chromium.org
parent b5c911e2a3
commit fbcacd7c05
28 changed files with 2200 additions and 2377 deletions

View file

@ -48,7 +48,6 @@ BENCHMARK(CorelibCompileAll) {
bin::Builtin::SetNativeResolver(bin::Builtin::kCLILibrary);
TransitionNativeToVM transition(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
Timer timer;
timer.Start();
const Error& error =
@ -413,7 +412,6 @@ BENCHMARK_SIZE(CoreSnapshotSize) {
TransitionNativeToVM transition(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
Api::CheckAndFinalizePendingClasses(thread);
@ -451,7 +449,6 @@ BENCHMARK_SIZE(StandaloneSnapshotSize) {
TransitionNativeToVM transition(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
Api::CheckAndFinalizePendingClasses(thread);
@ -496,7 +493,6 @@ BENCHMARK(EnterExitIsolate) {
{
TransitionNativeToVM transition(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
Api::CheckAndFinalizePendingClasses(thread);
}
Dart_Isolate isolate = Dart_CurrentIsolate();
@ -514,7 +510,6 @@ BENCHMARK(EnterExitIsolate) {
BENCHMARK(SerializeNull) {
TransitionNativeToVM transition(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
const Object& null_object = Object::Handle();
const intptr_t kLoopCount = 1000000;
Timer timer;
@ -536,7 +531,6 @@ BENCHMARK(SerializeNull) {
BENCHMARK(SerializeSmi) {
TransitionNativeToVM transition(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
const Integer& smi_object = Integer::Handle(Smi::New(42));
const intptr_t kLoopCount = 1000000;
Timer timer;
@ -558,7 +552,6 @@ BENCHMARK(SerializeSmi) {
BENCHMARK(SimpleMessage) {
TransitionNativeToVM transition(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
const Array& array_object = Array::Handle(Array::New(2));
array_object.SetAt(0, Integer::Handle(Smi::New(42)));
array_object.SetAt(1, Object::Handle());
@ -592,7 +585,6 @@ BENCHMARK(LargeMap) {
EXPECT_VALID(h_result);
TransitionNativeToVM transition(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
Instance& map = Instance::Handle();
map ^= Api::UnwrapHandle(h_result);
const intptr_t kLoopCount = 100;

View file

@ -1731,7 +1731,6 @@ void ClassFinalizer::ClearAllCode(bool including_nonchanging_cids) {
auto const isolate_group = thread->isolate_group();
SafepointWriteRwLocker ml(thread, isolate_group->program_lock());
StackZone stack_zone(thread);
HANDLESCOPE(thread);
auto const zone = thread->zone();
class ClearCodeVisitor : public FunctionVisitor {

View file

@ -3028,7 +3028,6 @@ void Precompiler::FinalizeAllClasses() {
// otherwise unreachable constants of dropped classes, which would
// cause assertion failures during GC after classes are dropped.
StackZone stack_zone(thread());
HANDLESCOPE(thread());
error_ = Library::FinalizeAllClasses();
if (!error_.IsNull()) {

View file

@ -1133,7 +1133,6 @@ void BackgroundCompiler::Run() {
Thread* thread = Thread::Current();
StackZone stack_zone(thread);
Zone* zone = stack_zone.GetZone();
HANDLESCOPE(thread);
Function& function = Function::Handle(zone);
{
SafepointMonitorLocker ml(&queue_monitor_);

File diff suppressed because it is too large Load diff

View file

@ -1301,7 +1301,6 @@ static Dart_Isolate CreateIsolate(IsolateGroup* group,
bool success = false;
{
StackZone zone(T);
HANDLESCOPE(T);
// We enter an API scope here as InitializeIsolate could compile some
// bootstrap library files which call out to a tag handler that may create
// Api Handles when an error is encountered.
@ -2053,7 +2052,6 @@ DART_EXPORT bool Dart_RunLoopAsync(bool errors_are_fatal,
auto thread = Thread::Current();
TransitionNativeToVM transition(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
if (on_error_port != ILLEGAL_PORT) {
const auto& port =

View file

@ -3128,7 +3128,6 @@ VM_UNIT_TEST_CASE(DartAPI_PersistentHandles) {
{
TransitionNativeToVM transition(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
for (int i = 0; i < 500; i++) {
String& str = String::Handle();
str ^= PersistentHandle::Cast(handles[i])->ptr();
@ -4551,7 +4550,6 @@ VM_UNIT_TEST_CASE(DartAPI_LocalHandles) {
{
TransitionNativeToVM transition1(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
Smi& val = Smi::Handle();
TransitionVMToNative transition2(thread);
@ -4619,9 +4617,11 @@ VM_UNIT_TEST_CASE(DartAPI_LocalZoneMemory) {
Thread* thread = Thread::Current();
EXPECT(thread != NULL);
ApiLocalScope* scope = thread->api_top_scope();
EXPECT_EQ(0, thread->ZoneSizeInBytes());
{
// Start a new scope and allocate some memory.
Dart_EnterScope();
EXPECT_EQ(0, thread->ZoneSizeInBytes());
for (int i = 0; i < 100; i++) {
Dart_ScopeAllocate(16);
}

View file

@ -14,6 +14,4 @@
namespace dart {
RelaxedAtomic<intptr_t> ApiNativeScope::current_memory_usage_ = 0;
} // namespace dart

View file

@ -110,7 +110,7 @@ class ApiZone {
if ((thread != NULL) && (thread->zone() == &zone_)) {
thread->set_zone(zone_.previous_);
}
zone_.DeleteAll();
zone_.Reset();
}
private:
@ -656,18 +656,11 @@ class ApiNativeScope {
ASSERT(Current() == NULL);
OSThread::SetThreadLocal(Api::api_native_key_,
reinterpret_cast<uword>(this));
// We manually increment the memory usage counter since there is memory
// initially allocated within the zone on creation.
IncrementNativeScopeMemoryCapacity(zone_.GetZone()->CapacityInBytes());
}
~ApiNativeScope() {
ASSERT(Current() == this);
OSThread::SetThreadLocal(Api::api_native_key_, 0);
// We must also manually decrement the memory usage counter since the native
// is still holding it's initial memory and ~Zone() won't be able to
// determine which memory usage counter to decrement.
DecrementNativeScopeMemoryCapacity(zone_.GetZone()->CapacityInBytes());
}
static inline ApiNativeScope* Current() {
@ -675,16 +668,6 @@ class ApiNativeScope {
OSThread::GetThreadLocal(Api::api_native_key_));
}
static uintptr_t current_memory_usage() { return current_memory_usage_; }
static void IncrementNativeScopeMemoryCapacity(intptr_t size) {
current_memory_usage_.fetch_add(size);
}
static void DecrementNativeScopeMemoryCapacity(intptr_t size) {
current_memory_usage_.fetch_sub(size);
}
Zone* zone() {
Zone* result = zone_.GetZone();
ASSERT(result->handles()->CountScopedHandles() == 0);
@ -693,9 +676,6 @@ class ApiNativeScope {
}
private:
// The current total memory usage within ApiNativeScopes.
static RelaxedAtomic<intptr_t> current_memory_usage_;
ApiZone zone_;
};

View file

@ -19,16 +19,6 @@ namespace dart {
DEFINE_FLAG(bool, verify_handles, false, "Verify handles.");
VMHandles::~VMHandles() {
if (FLAG_trace_handles) {
OS::PrintErr("*** Handle Counts for 0x(%" Px "):Zone = %d,Scoped = %d\n",
reinterpret_cast<intptr_t>(this), CountZoneHandles(),
CountScopedHandles());
OS::PrintErr("*** Deleting VM handle block 0x%" Px "\n",
reinterpret_cast<intptr_t>(this));
}
}
void VMHandles::VisitObjectPointers(ObjectPointerVisitor* visitor) {
return Handles<kVMHandleSizeInWords, kVMHandlesPerChunk,
kOffsetOfRawPtr>::VisitObjectPointers(visitor);

View file

@ -107,6 +107,24 @@ class Handles {
return true;
}
intptr_t ZoneHandlesCapacityInBytes() const {
intptr_t capacity = 0;
for (HandlesBlock* block = zone_blocks_; block != nullptr;
block = block->next_block()) {
capacity += sizeof(*block);
}
return capacity;
}
intptr_t ScopedHandlesCapacityInBytes() const {
intptr_t capacity = 0;
for (HandlesBlock* block = scoped_blocks_; block != nullptr;
block = block->next_block()) {
capacity += sizeof(*block);
}
return capacity;
}
protected:
// Returns a count of active handles (used for testing purposes).
int CountScopedHandles() const;
@ -126,7 +144,7 @@ class Handles {
class HandlesBlock : public MallocAllocated {
public:
explicit HandlesBlock(HandlesBlock* next)
: next_handle_slot_(0), next_block_(next) {}
: next_block_(next), next_handle_slot_(0) {}
~HandlesBlock();
// Reinitializes handle block for reuse.
@ -175,9 +193,9 @@ class Handles {
void set_next_block(HandlesBlock* next) { next_block_ = next; }
private:
uword data_[kHandleSizeInWords * kHandlesPerChunk]; // Handles area.
intptr_t next_handle_slot_; // Next slot for allocation in current block.
HandlesBlock* next_block_; // Link to next block of handles.
intptr_t next_handle_slot_; // Next slot for allocation in current block.
uword data_[kHandleSizeInWords * kHandlesPerChunk]; // Handles area.
DISALLOW_COPY_AND_ASSIGN(HandlesBlock);
};
@ -222,7 +240,7 @@ class Handles {
};
static const int kVMHandleSizeInWords = 2;
static const int kVMHandlesPerChunk = 64;
static const int kVMHandlesPerChunk = 63;
static const int kOffsetOfRawPtr = kWordSize;
class VMHandles : public Handles<kVMHandleSizeInWords,
kVMHandlesPerChunk,
@ -232,12 +250,25 @@ class VMHandles : public Handles<kVMHandleSizeInWords,
VMHandles()
: Handles<kVMHandleSizeInWords, kVMHandlesPerChunk, kOffsetOfRawPtr>() {
#if defined(DEBUG)
if (FLAG_trace_handles) {
OS::PrintErr("*** Starting a new VM handle block 0x%" Px "\n",
reinterpret_cast<intptr_t>(this));
}
#endif
}
~VMHandles() {
#if defined(DEBUG)
if (FLAG_trace_handles) {
OS::PrintErr("*** Handle Counts for 0x(%" Px
"):Zone = %d,Scoped = %d\n",
reinterpret_cast<intptr_t>(this), CountZoneHandles(),
CountScopedHandles());
OS::PrintErr("*** Deleting VM handle block 0x%" Px "\n",
reinterpret_cast<intptr_t>(this));
}
#endif
}
~VMHandles();
// Visit all object pointers stored in the various handles.
void VisitObjectPointers(ObjectPointerVisitor* visitor);

View file

@ -81,7 +81,6 @@ uword Handles<kHandleSizeInWords, kHandlesPerChunk, kOffsetOfRawPtr>::
AllocateHandle(Zone* zone) {
#if defined(DEBUG)
Thread* thread = Thread::Current();
ASSERT(thread->top_handle_scope() != NULL);
ASSERT(thread->MayAllocateHandles());
#endif // DEBUG
Handles* handles = zone->handles();

View file

@ -592,7 +592,6 @@ VM_UNIT_TEST_CASE(CleanupBequestNeverReceived) {
Thread* thread = Thread::Current();
TransitionNativeToVM transition(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
String& string = String::Handle(String::New(TEST_MESSAGE));
PersistentHandle* handle =
@ -626,7 +625,6 @@ VM_UNIT_TEST_CASE(ReceivesSendAndExitMessage) {
Thread* thread = Thread::Current();
TransitionNativeToVM transition(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
String& string = String::Handle(String::New(TEST_MESSAGE));
@ -644,7 +642,6 @@ VM_UNIT_TEST_CASE(ReceivesSendAndExitMessage) {
Thread* thread = Thread::Current();
TransitionNativeToVM transition(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
EXPECT_EQ(MessageHandler::kOK, handler.HandleNextMessage());
}

View file

@ -174,7 +174,6 @@ class RunKernelTask : public ThreadPool::Task {
Thread* T = Thread::Current();
ASSERT(I == T->isolate());
StackZone zone(T);
HANDLESCOPE(T);
// Invoke main which will return the port to which load requests are sent.
const Library& root_library =
Library::Handle(Z, I->group()->object_store()->root_library());

View file

@ -53,7 +53,6 @@ VM_UNIT_TEST_CASE(Metric_OnDemand) {
Thread* thread = Thread::Current();
TransitionNativeToVM transition(thread);
StackZone zone(thread);
HANDLESCOPE(thread);
MyMetric metric;
metric.InitInstance(Isolate::Current(), "a.b.c", "foobar", Metric::kByte);

View file

@ -1777,7 +1777,6 @@ void ProfilerService::PrintJSONImpl(Thread* thread,
ASSERT(buffer != nullptr);
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
profile.Build(thread, filter, buffer);
profile.PrintProfileJSON(stream, include_code_samples);

View file

@ -501,7 +501,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_TrivialRecordAllocation) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
// Filter for the class in the time range.
AllocationFilter filter(isolate->main_port(), class_a.id(),
@ -531,7 +530,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_TrivialRecordAllocation) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id(),
Dart_TimelineGetMicros(), 16000);
@ -578,7 +576,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_NativeAllocation) {
{
Thread* thread = Thread::Current();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
// Filter for the class in the time range.
@ -617,7 +614,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_NativeAllocation) {
{
Thread* thread = Thread::Current();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
// Filter for the class in the time range.
@ -632,7 +628,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_NativeAllocation) {
{
Thread* thread = Thread::Current();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
NativeAllocationSampleFilter filter(Dart_TimelineGetMicros(), 16000);
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -678,7 +673,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ToggleRecordAllocation) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -695,7 +689,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ToggleRecordAllocation) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -725,7 +718,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ToggleRecordAllocation) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -763,7 +755,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_CodeTicks) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -783,7 +774,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_CodeTicks) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -838,7 +828,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_FunctionTicks) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -858,7 +847,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_FunctionTicks) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -908,7 +896,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_IntrinsicAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), double_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -921,7 +908,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_IntrinsicAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), double_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -943,7 +929,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_IntrinsicAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), double_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -970,7 +955,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ArrayAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), array_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -983,7 +967,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ArrayAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), array_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1005,7 +988,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ArrayAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), array_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1027,7 +1009,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ArrayAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), array_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1057,7 +1038,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ContextAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), context_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1070,7 +1050,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ContextAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), context_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1090,7 +1069,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ContextAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), context_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1130,7 +1108,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ClosureAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), closure_class.id());
filter.set_enable_vm_ticks(true);
@ -1154,7 +1131,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ClosureAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), closure_class.id());
filter.set_enable_vm_ticks(true);
@ -1185,7 +1161,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_TypedArrayAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), float32_list_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1198,7 +1173,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_TypedArrayAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), float32_list_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1220,7 +1194,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_TypedArrayAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), float32_list_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1233,7 +1206,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_TypedArrayAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), float32_list_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1265,7 +1237,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1278,7 +1249,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1298,7 +1268,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1311,7 +1280,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringAllocation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1343,7 +1311,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringInterpolation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1356,7 +1323,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringInterpolation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1382,7 +1348,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringInterpolation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1395,7 +1360,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_StringInterpolation) {
{
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), one_byte_string_class.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1451,7 +1415,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_FunctionInline) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1469,7 +1432,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_FunctionInline) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1598,7 +1560,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_InliningIntervalBoundry) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1615,7 +1576,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_InliningIntervalBoundry) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1691,7 +1651,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_ChainedSamples) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1786,7 +1745,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_BasicSourcePosition) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1868,7 +1826,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_BasicSourcePositionOptimized) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -1946,7 +1903,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_SourcePosition) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -2056,7 +2012,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_SourcePositionOptimized) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -2151,7 +2106,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_BinaryOperatorSourcePosition) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());
@ -2269,7 +2223,6 @@ ISOLATE_UNIT_TEST_CASE(Profiler_BinaryOperatorSourcePositionOptimized) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
Profile profile;
AllocationFilter filter(isolate->main_port(), class_a.id());
profile.Build(thread, &filter, Profiler::sample_block_buffer());

View file

@ -1504,7 +1504,6 @@ class ProgramHashVisitor : public CodeVisitor {
uint32_t ProgramVisitor::Hash(Thread* thread) {
StackZone stack_zone(thread);
HANDLESCOPE(thread);
Zone* zone = thread->zone();
ProgramHashVisitor visitor(zone);

View file

@ -3265,7 +3265,6 @@ DEFINE_LEAF_RUNTIME_ENTRY(intptr_t,
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
// All registers have been saved below last-fp as if they were locals.
const uword last_fp =
@ -3337,7 +3336,6 @@ DEFINE_LEAF_RUNTIME_ENTRY(void, DeoptimizeFillFrame, 1, uword last_fp) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
StackZone zone(thread);
HANDLESCOPE(thread);
DeoptContext* deopt_context = isolate->deopt_context();
DartFrameIterator iterator(last_fp, thread,

View file

@ -120,7 +120,6 @@ class RuntimeEntry : public BaseRuntimeEntry {
Isolate* isolate = thread->isolate(); \
TransitionGeneratedToVM transition(thread); \
StackZone zone(thread); \
HANDLESCOPE(thread); \
CHECK_SIMULATOR_STACK_OVERFLOW(); \
if (FLAG_deoptimize_on_runtime_call_every > 0) { \
OnEveryRuntimeEntryCall(thread, "" #name, can_lazy_deopt); \

View file

@ -844,7 +844,6 @@ void Service::PostError(const String& method_name,
const Error& error) {
Thread* T = Thread::Current();
StackZone zone(T);
HANDLESCOPE(T);
JSONStream js;
js.Setup(zone.GetZone(), SendPort::Cast(reply_port).Id(), id, method_name,
parameter_keys, parameter_values);
@ -865,7 +864,6 @@ ErrorPtr Service::InvokeMethod(Isolate* I,
{
StackZone zone(T);
HANDLESCOPE(T);
Instance& reply_port = Instance::Handle(Z);
Instance& seq = String::Handle(Z);
@ -3227,7 +3225,6 @@ static void GetInstances(Thread* thread, JSONStream* js) {
// Ensure the array and handles created below are promptly destroyed.
StackZone zone(thread);
HANDLESCOPE(thread);
ZoneGrowableHandlePtrArray<Object> storage(thread->zone(), limit);
GetInstancesVisitor visitor(&storage, limit);
@ -3278,7 +3275,6 @@ static void GetInstancesAsArray(Thread* thread, JSONStream* js) {
Array& instances = Array::Handle();
{
StackZone zone(thread);
HANDLESCOPE(thread);
ZoneGrowableHandlePtrArray<Object> storage(thread->zone(), 1024);
GetInstancesVisitor visitor(&storage, kSmiMax);
@ -3306,7 +3302,6 @@ static const MethodParameter* const get_ports_params[] = {
static void GetPorts(Thread* thread, JSONStream* js) {
// Ensure the array and handles created below are promptly destroyed.
StackZone zone(thread);
HANDLESCOPE(thread);
const GrowableObjectArray& ports = GrowableObjectArray::Handle(
GrowableObjectArray::RawCast(DartLibraryCalls::LookupOpenPorts()));
JSONObject jsobj(js);
@ -4896,8 +4891,6 @@ void Service::PrintJSONForVM(JSONStream* js, bool ref) {
#endif
jsobj.AddProperty("_features", Dart::FeaturesString(nullptr, true, kind));
jsobj.AddProperty("_profilerMode", FLAG_profile_vm ? "VM" : "Dart");
jsobj.AddProperty64("_nativeZoneMemoryUsage",
ApiNativeScope::current_memory_usage());
jsobj.AddProperty64("pid", OS::ProcessId());
jsobj.AddPropertyTimeMillis(
"startTime", OS::GetCurrentTimeMillis() - Dart::UptimeMillis());

View file

@ -438,7 +438,6 @@ class RunServiceTask : public ThreadPool::Task {
Thread* T = Thread::Current();
ASSERT(I == T->isolate());
StackZone zone(T);
HANDLESCOPE(T);
// Invoke main which will set up the service port.
const Library& root_library =
Library::Handle(Z, I->group()->object_store()->root_library());

View file

@ -9,21 +9,7 @@
namespace dart {
ThreadState::ThreadState(bool is_os_thread) : BaseThread(is_os_thread) {
// This thread should not yet own any zones. If it does, we need to make sure
// we've accounted for any memory it has already allocated.
if (zone_ == nullptr) {
ASSERT(current_zone_capacity_ == 0);
} else {
Zone* current = zone_;
uintptr_t total_zone_capacity = 0;
while (current != nullptr) {
total_zone_capacity += current->CapacityInBytes();
current = current->previous();
}
ASSERT(current_zone_capacity_ == total_zone_capacity);
}
}
ThreadState::ThreadState(bool is_os_thread) : BaseThread(is_os_thread) {}
ThreadState::~ThreadState() {}

View file

@ -40,17 +40,6 @@ class ThreadState : public BaseThread {
bool ZoneIsOwnedByThread(Zone* zone) const;
void IncrementMemoryCapacity(uintptr_t value) {
current_zone_capacity_ += value;
}
void DecrementMemoryCapacity(uintptr_t value) {
ASSERT(current_zone_capacity_ >= value);
current_zone_capacity_ -= value;
}
uintptr_t current_zone_capacity() const { return current_zone_capacity_; }
StackResource* top_resource() const { return top_resource_; }
void set_top_resource(StackResource* value) { top_resource_ = value; }
static intptr_t top_resource_offset() {
@ -86,7 +75,6 @@ class ThreadState : public BaseThread {
OSThread* os_thread_ = nullptr;
Zone* zone_ = nullptr;
uintptr_t current_zone_capacity_ = 0;
StackResource* top_resource_ = nullptr;
LongJumpScope* long_jump_base_ = nullptr;

View file

@ -121,7 +121,6 @@ class TaskWithZoneAllocation : public ThreadPool::Task {
Thread* thread = Thread::Current();
// Create a zone (which is also a stack resource) and exercise it a bit.
StackZone stack_zone(thread);
HANDLESCOPE(thread);
Zone* zone = thread->zone();
EXPECT_EQ(zone, stack_zone.GetZone());
ZoneGrowableArray<bool>* a0 = new (zone) ZoneGrowableArray<bool>(zone, 1);
@ -255,7 +254,6 @@ class SimpleTaskWithZoneAllocation : public ThreadPool::Task {
*thread_ptr_ = thread;
StackZone stack_zone(thread);
HANDLESCOPE(thread);
Zone* zone = thread->zone();
EXPECT_EQ(zone, stack_zone.GetZone());
@ -390,7 +388,6 @@ class ICDataTestTask : public ThreadPool::Task {
{
StackZone stack_zone(thread);
HANDLESCOPE(thread);
ICData& ic_data = ICData::Handle();
Array& arr = Array::Handle();
@ -536,7 +533,6 @@ class SafepointTestTask : public ThreadPool::Task {
for (int i = reinterpret_cast<intptr_t>(thread);; ++i) {
StackZone stack_zone(thread);
Zone* zone = thread->zone();
HANDLESCOPE(thread);
const intptr_t kUniqueSmi = 928327281;
Smi& smi = Smi::Handle(zone, Smi::New(kUniqueSmi));
if ((i % 100) != 0) {
@ -852,7 +848,6 @@ class AllocAndGCTask : public ThreadPool::Task {
Thread* thread = Thread::Current();
StackZone stack_zone(thread);
Zone* zone = stack_zone.GetZone();
HANDLESCOPE(thread);
String& old_str = String::Handle(zone, String::New("old", Heap::kOld));
isolate_->group()->heap()->CollectAllGarbage();
EXPECT(old_str.Equals("old"));
@ -900,7 +895,6 @@ class AllocateGlobsOfMemoryTask : public ThreadPool::Task {
Thread* thread = Thread::Current();
StackZone stack_zone(thread);
Zone* zone = stack_zone.GetZone();
HANDLESCOPE(thread);
int count = 100 * 1000;
while (count-- > 0) {
String::Handle(zone, String::New("abc"));

View file

@ -33,8 +33,6 @@ class Zone::Segment {
// Allocate or delete individual segments.
static Segment* New(intptr_t size, Segment* next);
static void DeleteSegmentList(Segment* segment);
static void IncrementMemoryCapacity(uintptr_t size);
static void DecrementMemoryCapacity(uintptr_t size);
private:
Segment* next_;
@ -107,7 +105,6 @@ Zone::Segment* Zone::Segment::New(intptr_t size, Zone::Segment* next) {
LSAN_REGISTER_ROOT_REGION(result, sizeof(*result));
IncrementMemoryCapacity(size);
return result;
}
@ -115,7 +112,6 @@ void Zone::Segment::DeleteSegmentList(Segment* head) {
Segment* current = head;
while (current != NULL) {
intptr_t size = current->size();
DecrementMemoryCapacity(size);
Segment* next = current->next();
VirtualMemory* memory = current->memory();
#ifdef DEBUG
@ -141,38 +137,13 @@ void Zone::Segment::DeleteSegmentList(Segment* head) {
}
}
void Zone::Segment::IncrementMemoryCapacity(uintptr_t size) {
ThreadState* current_thread = ThreadState::Current();
if (current_thread != NULL) {
current_thread->IncrementMemoryCapacity(size);
} else if (ApiNativeScope::Current() != NULL) {
// If there is no current thread, we might be inside of a native scope.
ApiNativeScope::IncrementNativeScopeMemoryCapacity(size);
}
}
void Zone::Segment::DecrementMemoryCapacity(uintptr_t size) {
ThreadState* current_thread = ThreadState::Current();
if (current_thread != NULL) {
current_thread->DecrementMemoryCapacity(size);
} else if (ApiNativeScope::Current() != NULL) {
// If there is no current thread, we might be inside of a native scope.
ApiNativeScope::DecrementNativeScopeMemoryCapacity(size);
}
}
// TODO(bkonyi): We need to account for the initial chunk size when a new zone
// is created within a new thread or ApiNativeScope when calculating high
// watermarks or memory consumption.
Zone::Zone()
: position_(reinterpret_cast<uword>(&buffer_)),
limit_(position_ + kInitialChunkSize),
head_(NULL),
large_segments_(NULL),
previous_(NULL),
segments_(nullptr),
previous_(nullptr),
handles_() {
ASSERT(Utils::IsAligned(position_, kAlignment));
Segment::IncrementMemoryCapacity(kInitialChunkSize);
#ifdef DEBUG
// Zap the entire initial buffer.
memset(&buffer_, kZapUninitializedByte, kInitialChunkSize);
@ -181,70 +152,57 @@ Zone::Zone()
Zone::~Zone() {
if (FLAG_trace_zones) {
DumpZoneSizes();
Print();
}
DeleteAll();
Segment::DecrementMemoryCapacity(kInitialChunkSize);
Segment::DeleteSegmentList(segments_);
}
void Zone::DeleteAll() {
void Zone::Reset() {
// Traverse the chained list of segments, zapping (in debug mode)
// and freeing every zone segment.
if (head_ != NULL) {
Segment::DeleteSegmentList(head_);
}
if (large_segments_ != NULL) {
Segment::DeleteSegmentList(large_segments_);
}
// Reset zone state.
Segment::DeleteSegmentList(segments_);
segments_ = nullptr;
#ifdef DEBUG
memset(&buffer_, kZapDeletedByte, kInitialChunkSize);
#endif
position_ = reinterpret_cast<uword>(&buffer_);
limit_ = position_ + kInitialChunkSize;
size_ = 0;
small_segment_capacity_ = 0;
head_ = NULL;
large_segments_ = NULL;
previous_ = NULL;
previous_ = nullptr;
handles_.Reset();
}
uintptr_t Zone::SizeInBytes() const {
uintptr_t size = 0;
for (Segment* s = large_segments_; s != NULL; s = s->next()) {
size += s->size();
}
if (head_ == NULL) {
return size + (position_ - reinterpret_cast<uword>(&buffer_));
}
size += kInitialChunkSize;
for (Segment* s = head_->next(); s != NULL; s = s->next()) {
size += s->size();
}
return size + (position_ - head_->start());
return size_;
}
uintptr_t Zone::CapacityInBytes() const {
uintptr_t size = 0;
for (Segment* s = large_segments_; s != NULL; s = s->next()) {
size += s->size();
}
if (head_ == NULL) {
return size + kInitialChunkSize;
}
size += kInitialChunkSize;
for (Segment* s = head_; s != NULL; s = s->next()) {
uintptr_t size = kInitialChunkSize;
for (Segment* s = segments_; s != nullptr; s = s->next()) {
size += s->size();
}
return size;
}
void Zone::Print() const {
intptr_t segment_size = CapacityInBytes();
intptr_t scoped_handle_size = handles_.ScopedHandlesCapacityInBytes();
intptr_t zone_handle_size = handles_.ZoneHandlesCapacityInBytes();
intptr_t total_size = segment_size + scoped_handle_size + zone_handle_size;
OS::PrintErr("Zone(%p, segments: %" Pd ", scoped_handles: %" Pd
", zone_handles: %" Pd ", total: %" Pd ")\n",
this, segment_size, scoped_handle_size, zone_handle_size,
total_size);
}
uword Zone::AllocateExpand(intptr_t size) {
ASSERT(size >= 0);
if (FLAG_trace_zones) {
OS::PrintErr("*** Expanding zone 0x%" Px "\n",
reinterpret_cast<intptr_t>(this));
DumpZoneSizes();
Print();
}
// Make sure the requested size is already properly aligned and that
// there isn't enough room in the Zone to satisfy the request.
@ -274,13 +232,14 @@ uword Zone::AllocateExpand(intptr_t size) {
ASSERT(next_size >= kSegmentSize);
// Allocate another segment and chain it up.
head_ = Segment::New(next_size, head_);
segments_ = Segment::New(next_size, segments_);
small_segment_capacity_ += next_size;
// Recompute 'position' and 'limit' based on the new head segment.
uword result = Utils::RoundUp(head_->start(), kAlignment);
uword result = Utils::RoundUp(segments_->start(), kAlignment);
position_ = result + size;
limit_ = head_->end();
limit_ = segments_->end();
size_ += size;
ASSERT(position_ <= limit_);
return result;
}
@ -295,10 +254,11 @@ uword Zone::AllocateLargeSegment(intptr_t size) {
// Create a new large segment and chain it up.
// Account for book keeping fields in size.
size_ += size;
size += Utils::RoundUp(sizeof(Segment), kAlignment);
large_segments_ = Segment::New(size, large_segments_);
segments_ = Segment::New(size, segments_);
uword result = Utils::RoundUp(large_segments_->start(), kAlignment);
uword result = Utils::RoundUp(segments_->start(), kAlignment);
return result;
}
@ -337,17 +297,6 @@ char* Zone::ConcatStrings(const char* a, const char* b, char join) {
return copy;
}
void Zone::DumpZoneSizes() {
intptr_t size = 0;
for (Segment* s = large_segments_; s != NULL; s = s->next()) {
size += s->size();
}
OS::PrintErr("*** Zone(0x%" Px
") size in bytes,"
" Total = %" Pd " Large Segments = %" Pd "\n",
reinterpret_cast<intptr_t>(this), SizeInBytes(), size);
}
void Zone::VisitObjectPointers(ObjectPointerVisitor* visitor) {
Zone* zone = this;
while (zone != NULL) {

View file

@ -60,13 +60,15 @@ class Zone {
char* PrintToString(const char* format, ...) PRINTF_ATTRIBUTE(2, 3);
char* VPrint(const char* format, va_list args);
// Compute the total size of this zone. This includes wasted space that is
// due to internal fragmentation in the segments.
// Compute the total size of allocations in this zone.
uintptr_t SizeInBytes() const;
// Computes the amount of space used in the zone.
// Computes the amount of space used by the zone.
uintptr_t CapacityInBytes() const;
// Dump the current allocated sizes in the zone object.
void Print() const;
// Structure for managing handles allocation.
VMHandles* handles() { return &handles_; }
@ -95,7 +97,7 @@ class Zone {
~Zone(); // Delete all memory associated with the zone.
// Default initial chunk size.
static const intptr_t kInitialChunkSize = 1 * KB;
static const intptr_t kInitialChunkSize = 128;
// Default segment size.
static const intptr_t kSegmentSize = 64 * KB;
@ -119,7 +121,7 @@ class Zone {
void Link(Zone* current_zone) { previous_ = current_zone; }
// Delete all objects and free all memory allocated in the zone.
void DeleteAll();
void Reset();
// Does not actually free any memory. Enables templated containers like
// BaseGrowableArray to use different allocators.
@ -134,9 +136,6 @@ class Zone {
#endif
}
// Dump the current allocated sizes in the zone object.
void DumpZoneSizes();
// Overflow check (FATAL) for array length.
template <class ElementType>
static inline void CheckLength(intptr_t len);
@ -152,14 +151,14 @@ class Zone {
// implementation is in zone.cc.
class Segment;
// Total size of all allocations in this zone.
intptr_t size_ = 0;
// Total size of all segments in [head_].
intptr_t small_segment_capacity_ = 0;
// The current head segment; may be NULL.
Segment* head_;
// List of large segments allocated in this zone; may be NULL.
Segment* large_segments_;
// List of all segments allocated in this zone; may be NULL.
Segment* segments_;
// Used for chaining zones in order to allow unwinding of stacks.
Zone* previous_;
@ -243,6 +242,7 @@ inline uword Zone::AllocUnsafe(intptr_t size) {
if (free_size >= size) {
result = position_;
position_ += size;
size_ += size;
} else {
result = AllocateExpand(size);
}
@ -284,6 +284,7 @@ inline ElementType* Zone::Realloc(ElementType* old_data,
if (new_end <= limit_) {
ASSERT(new_len >= old_len);
position_ = Utils::RoundUp(new_end, kAlignment);
size_ += (new_end - old_end);
return old_data;
}
}

View file

@ -165,21 +165,6 @@ TEST_CASE(PrintToString) {
EXPECT_STREQ("Hello World!", result);
}
VM_UNIT_TEST_CASE(NativeScopeZoneAllocation) {
ASSERT(ApiNativeScope::Current() == NULL);
ASSERT(Thread::Current() == NULL);
EXPECT_EQ(0UL, ApiNativeScope::current_memory_usage());
{
ApiNativeScope scope;
EXPECT_EQ(scope.zone()->CapacityInBytes(),
ApiNativeScope::current_memory_usage());
(void)Dart_ScopeAllocate(2048);
EXPECT_EQ(scope.zone()->CapacityInBytes(),
ApiNativeScope::current_memory_usage());
}
EXPECT_EQ(0UL, ApiNativeScope::current_memory_usage());
}
#if !defined(PRODUCT)
// Allow for pooling in the malloc implementation.
static const int64_t kRssSlack = 20 * MB;