Revert "[vm, gc] Address data race when recycling TLABs."

This reverts commit 8241779d83.

Revert "[vm, gc] Divide new-space into pages like old-space."

This reverts commit 3495158ec9.

Revert "[vm, gc] Set the scavenger's forwarding bit to equal to the pointer tag bit."

This reverts commit 6e37605661.

Bug: b/155227688
Change-Id: I7e26bd86e188fb3658b82ba311a261468db96b78
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/145444
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Alexander Aprelev <aam@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Ryan Macnak 2020-04-29 19:47:24 +00:00 committed by commit-bot@chromium.org
parent 70f8e1810c
commit 3388e5765b
7 changed files with 530 additions and 445 deletions

View file

@ -776,6 +776,9 @@ bool Heap::VerifyGC(MarkExpectation mark_expectation) {
auto thread = Thread::Current();
StackZone stack_zone(thread);
// Change the new space's top_ with the more up-to-date thread's view of top_
new_space_.MakeNewSpaceIterable();
ObjectSet* allocated_set =
CreateAllocatedObjectSet(stack_zone.GetZone(), mark_expectation);
VerifyPointersVisitor visitor(isolate_group(), allocated_set);

File diff suppressed because it is too large Load diff

View file

@ -12,6 +12,7 @@
#include "vm/flags.h"
#include "vm/globals.h"
#include "vm/heap/spaces.h"
#include "vm/heap/tlab.h"
#include "vm/lockers.h"
#include "vm/raw_object.h"
#include "vm/ring_buffer.h"
@ -24,40 +25,47 @@ namespace dart {
class Heap;
class Isolate;
class JSONObject;
class NewPage;
class ObjectSet;
template <bool parallel>
class ScavengerVisitorBase;
// Wrapper around VirtualMemory that adds caching and handles the empty case.
class SemiSpace {
public:
static void Init();
static void Cleanup();
explicit SemiSpace(intptr_t max_capacity_in_words);
~SemiSpace();
// Get a space of the given size. Returns NULL on out of memory. If size is 0,
// returns an empty space: pointer(), start() and end() all return NULL.
// The name parameter may be NULL. If non-NULL it is ued to give the OS a name
// for the underlying virtual memory region.
static SemiSpace* New(intptr_t size_in_words, const char* name);
NewPage* TryAllocatePageLocked(bool link);
// Hand back an unused space.
void Delete();
bool Contains(uword addr) const;
void* pointer() const { return region_.pointer(); }
uword start() const { return region_.start(); }
uword end() const { return region_.end(); }
intptr_t size_in_words() const {
return static_cast<intptr_t>(region_.size()) >> kWordSizeLog2;
}
bool Contains(uword address) const { return region_.Contains(address); }
// Set write protection mode for this space. The space must not be protected
// when Delete is called.
// TODO(koda): Remember protection mode in VirtualMemory and assert this.
void WriteProtect(bool read_only);
intptr_t capacity_in_words() const { return capacity_in_words_; }
intptr_t max_capacity_in_words() const { return max_capacity_in_words_; }
NewPage* head() const { return head_; }
void AddList(NewPage* head, NewPage* tail);
private:
// Size of NewPages in this semi-space.
intptr_t capacity_in_words_ = 0;
explicit SemiSpace(VirtualMemory* reserved);
~SemiSpace();
// Size of NewPages before we trigger a scavenge.
intptr_t max_capacity_in_words_;
VirtualMemory* reserved_; // NULL for an empty space.
MemoryRegion region_;
NewPage* head_ = nullptr;
NewPage* tail_ = nullptr;
static SemiSpace* cache_;
static Mutex* mutex_;
};
// Statistics for a particular scavenge.
@ -129,11 +137,28 @@ class Scavenger {
if (LIKELY(addr != 0)) {
return addr;
}
TryAllocateNewTLAB(thread, size);
TryAllocateNewTLAB(thread);
return TryAllocateFromTLAB(thread, size);
}
void AbandonRemainingTLAB(Thread* thread);
void MakeTLABIterable(const TLAB& tlab);
void AbandonRemainingTLABForDebugging(Thread* thread);
template <bool parallel>
bool TryAllocateNewTLAB(ScavengerVisitorBase<parallel>* visitor);
// When a thread gets scheduled it will try to acquire a TLAB.
void TryAcquireCachedTLAB(Thread* thread) {
MutexLocker ml(&space_lock_);
thread->set_tlab(TryAcquireCachedTLABLocked());
}
TLAB TryAcquireCachedTLABLocked();
// When a thread gets unscheduled it will release it's TLAB.
void ReleaseAndCacheTLAB(Thread* thread) {
MutexLocker ml(&space_lock_);
CacheTLABLocked(thread->tlab());
thread->set_tlab(TLAB());
}
void CacheTLABLocked(TLAB tlab);
// Collect the garbage in this scavenger.
void Scavenge();
@ -141,11 +166,23 @@ class Scavenger {
// Promote all live objects.
void Evacuate();
// Report (TLAB) abandoned bytes that should be taken account when
// deciding whether to grow new space or not.
void AddAbandonedInBytes(intptr_t value) {
MutexLocker ml(&space_lock_);
AddAbandonedInBytesLocked(value);
}
int64_t GetAndResetAbandonedInBytes() {
int64_t result = abandoned_;
abandoned_ = 0;
return result;
}
int64_t UsedInWords() const {
MutexLocker ml(&space_lock_);
return to_->capacity_in_words();
return (top_ - FirstObjectStart()) >> kWordSizeLog2;
}
int64_t CapacityInWords() const { return to_->max_capacity_in_words(); }
int64_t CapacityInWords() const { return to_->size_in_words(); }
int64_t ExternalInWords() const { return external_size_ >> kWordSizeLog2; }
SpaceUsage GetCurrentUsage() const {
SpaceUsage usage;
@ -155,8 +192,8 @@ class Scavenger {
return usage;
}
void VisitObjects(ObjectVisitor* visitor) const;
void VisitObjectPointers(ObjectPointerVisitor* visitor) const;
void VisitObjects(ObjectVisitor* visitor);
void VisitObjectPointers(ObjectPointerVisitor* visitor);
void AddRegionsToObjectSet(ObjectSet* set) const;
@ -195,6 +232,8 @@ class Scavenger {
bool scavenging() const { return scavenging_; }
private:
static const intptr_t kTLABSize = 512 * KB;
// Ids for time and data records in Heap::GCStats.
enum {
// Time
@ -214,20 +253,27 @@ class Scavenger {
uword TryAllocateFromTLAB(Thread* thread, intptr_t size) {
ASSERT(Utils::IsAligned(size, kObjectAlignment));
ASSERT(heap_ != Dart::vm_isolate()->heap());
const uword result = thread->top();
const intptr_t remaining = thread->end() - result;
TLAB tlab = thread->tlab();
const intptr_t remaining = tlab.RemainingSize();
if (UNLIKELY(remaining < size)) {
return 0;
}
const uword result = tlab.top;
ASSERT(to_->Contains(result));
ASSERT((result & kObjectAlignmentMask) == kNewObjectAlignmentOffset);
thread->set_top(result + size);
const uword new_top = tlab.top + size;
ASSERT(to_->Contains(new_top) || new_top == to_->end());
thread->set_tlab(tlab.BumpAllocate(size));
return result;
}
void TryAllocateNewTLAB(Thread* thread, intptr_t size);
void TryAllocateNewTLAB(Thread* thread);
void AddAbandonedInBytesLocked(intptr_t value) { abandoned_ += value; }
void AbandonTLABsLocked();
uword FirstObjectStart() const {
return to_->start() + kNewObjectAlignmentOffset;
}
SemiSpace* Prologue();
intptr_t ParallelScavenge(SemiSpace* from);
intptr_t SerialScavenge(SemiSpace* from);
@ -253,17 +299,33 @@ class Scavenger {
intptr_t NewSizeInWords(intptr_t old_size_in_words) const;
Heap* heap_;
uword top_;
uword end_;
MallocGrowableArray<TLAB> abandoned_tlabs_;
MallocGrowableArray<TLAB> free_tlabs_;
SemiSpace* to_;
Heap* heap_;
// A pointer to the first unscanned object. Scanning completes when
// this value meets the allocation top.
uword resolved_top_;
// Objects below this address have survived a scavenge.
uword survivor_end_;
// Abandoned (TLAB) bytes that need to be accounted for when deciding
// whether to grow newspace or not.
intptr_t abandoned_ = 0;
PromotionStack promotion_stack_;
intptr_t max_semi_capacity_in_words_;
// Keep track whether a scavenge is currently running.
bool scavenging_;
bool early_tenure_ = false;
RelaxedAtomic<intptr_t> root_slices_started_;
StoreBufferBlock* blocks_;

37
runtime/vm/heap/tlab.h Normal file
View file

@ -0,0 +1,37 @@
// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_HEAP_TLAB_H_
#define RUNTIME_VM_HEAP_TLAB_H_
#include "platform/assert.h"
#include "platform/globals.h"
namespace dart {
struct TLAB {
TLAB() : top(0), end(0) {}
TLAB(uword top, uword end) : top(top), end(end) {}
TLAB(const TLAB& other) : top(other.top), end(other.end) {}
TLAB& operator=(const TLAB& other) {
top = other.top;
end = other.end;
return *this;
}
intptr_t RemainingSize() const { return end - top; }
bool IsAbandoned() const { return top == 0 && end == 0; }
TLAB BumpAllocate(intptr_t size) const {
ASSERT(RemainingSize() >= size);
return TLAB(top + size, end);
}
uword top;
uword end;
};
} // namespace dart
#endif // RUNTIME_VM_HEAP_TLAB_H_

View file

@ -272,17 +272,18 @@ DART_FORCE_INLINE static bool TryAllocate(Thread* thread,
ASSERT(instance_size > 0);
ASSERT(Utils::IsAligned(instance_size, kObjectAlignment));
const TLAB tlab = thread->tlab();
#ifndef PRODUCT
auto table = thread->isolate_group()->shared_class_table();
if (UNLIKELY(table->TraceAllocationFor(class_id))) {
return false;
}
#endif
const uword top = thread->top();
const intptr_t remaining = thread->end() - top;
const intptr_t remaining = tlab.RemainingSize();
if (LIKELY(remaining >= instance_size)) {
thread->set_top(top + instance_size);
*result = InitializeHeader(top, class_id, instance_size);
const uword old_top = tlab.top;
thread->set_tlab(tlab.BumpAllocate(instance_size));
*result = InitializeHeader(old_top, class_id, instance_size);
return true;
}
return false;

View file

@ -422,6 +422,8 @@ Thread* IsolateGroup::ScheduleThreadLocked(MonitorLocker* ml,
os_thread->set_thread(thread);
Thread::SetCurrent(thread);
os_thread->EnableThreadInterrupts();
thread->heap()->new_space()->TryAcquireCachedTLAB(thread);
}
return thread;
}
@ -430,7 +432,7 @@ void IsolateGroup::UnscheduleThreadLocked(MonitorLocker* ml,
Thread* thread,
bool is_mutator,
bool bypass_safepoint) {
thread->heap()->new_space()->AbandonRemainingTLAB(thread);
thread->heap()->new_space()->ReleaseAndCacheTLAB(thread);
// Clear since GC will not visit the thread once it is unscheduled. Do this
// under the thread lock to prevent races with the GC visiting thread roots.

View file

@ -19,6 +19,7 @@
#include "vm/globals.h"
#include "vm/handles.h"
#include "vm/heap/pointer_block.h"
#include "vm/heap/tlab.h"
#include "vm/os_thread.h"
#include "vm/random.h"
#include "vm/runtime_entry_list.h"
@ -494,10 +495,13 @@ class Thread : public ThreadState {
Heap* heap() const { return heap_; }
static intptr_t heap_offset() { return OFFSET_OF(Thread, heap_); }
uword top() const { return top_; }
uword end() const { return end_; }
void set_top(uword top) { top_ = top; }
void set_end(uword end) { end_ = end; }
void set_tlab(TLAB tlab) {
top_ = tlab.top;
end_ = tlab.end;
}
TLAB tlab() { return TLAB(top_, end_); }
static intptr_t top_offset() { return OFFSET_OF(Thread, top_); }
static intptr_t end_offset() { return OFFSET_OF(Thread, end_); }