Revert "Reland "[vm, gc] Parallel scavenge.""

This reverts commit 898b080d09.

Reason for revert: Seems to cause crashes in g3, see b/153524644.

Original change's description:
> Reland "[vm, gc] Parallel scavenge."
> 
> Remove header access from typed data case in RawObject::HeapSizeFromClass.
> 
> Address data race between visiting remembered cards and allocating new large pages for promotion.
> 
> Change-Id: I92cc4604ed38ab8f42b87b140a26a8118e7030d4
> Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/142565
> Reviewed-by: Martin Kustermann <kustermann@google.com>
> Commit-Queue: Ryan Macnak <rmacnak@google.com>

TBR=kustermann@google.com,aam@google.com,rmacnak@google.com,asiva@google.com

Change-Id: I3ef7bd5c3d0ca575f33cf5f712f58476037d2d90
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/142861
Reviewed-by: Martin Kustermann <kustermann@google.com>
Commit-Queue: Martin Kustermann <kustermann@google.com>
This commit is contained in:
Martin Kustermann 2020-04-08 14:21:32 +00:00 committed by commit-bot@chromium.org
parent afd7efb860
commit 13c84d8794
23 changed files with 513 additions and 995 deletions

View file

@ -27,10 +27,6 @@ On 64-bit architectures, the header of heap objects also contains a 32-bit ident
See [Cheney's algorithm](https://en.wikipedia.org/wiki/Cheney's_algorithm).
## Parallel Scavenge
FLAG_scavenger_tasks (default 2) workers are started on separate threads. Each worker competes to process parts of the root set (including the remembered set). When a worker copies an object to to-space, it allocates from a worker-local bump allocation region. The same worker will process the copied object. When a worker promotes an object to old-space, it allocates from a worker-local freelist, which uses bump allocation for large free blocks. The promoted object is added to a work list that implements work stealing, so some other worker may process the promoted object. After the object is evacuated, the worker using a compare-and-swap to install the forwarding pointer into the from-space object's header. If it loses the race, it un-allocates the to-space or old-space object it just allocated, and uses the winner's object to update the pointer it was processing. Workers run until all of the work set have been processed, and every worker have processed its to-space objects and local part of the promoted work list.
## Mark-Sweep
All objects have a bit in their header called the mark bit. At the start of a collection cycle, all objects have this bit clear.
@ -55,8 +51,6 @@ To perform these operations, all mutators need to temporarily stop accessing the
Note that a mutator can be at a safepoint without being suspended. It might be performing a long task that doesn't access the heap. It will, however, need to wait for any safepoint operation to complete in order to leave its safepoint and resume accessing the heap.
Because a safepoint operation excludes excution of Dart code, it is sometimes used for non-GC tasks that requires only this property. For example, when a background compilation has completed and wants to install its result, it uses a safepoint operation to ensure no Dart execution sees the intermediate states during installation.
## Concurrent Marking
To reduce the time the mutator is paused for old-space GCs, we allow the mutator to continue running during most of the marking work.

View file

@ -251,6 +251,14 @@ typedef simd128_value_t fpu_register_t;
#error Automatic compiler detection failed.
#endif
#ifdef _MSC_VER
#define DART_FLATTEN
#elif __GNUC__
#define DART_FLATTEN __attribute__((flatten))
#else
#error Automatic compiler detection failed.
#endif
#ifdef _MSC_VER
#elif __GNUC__
#define DART_HAS_COMPUTED_GOTO 1

View file

@ -19,10 +19,6 @@
// VMOptions=--concurrent_mark --concurrent_sweep
// VMOptions=--concurrent_mark --use_compactor
// VMOptions=--concurrent_mark --use_compactor --force_evacuation
// VMOptions=--scavenger_tasks=0
// VMOptions=--scavenger_tasks=1
// VMOptions=--scavenger_tasks=2
// VMOptions=--scavenger_tasks=3
// VMOptions=--verify_before_gc
// VMOptions=--verify_after_gc
// VMOptions=--verify_before_gc --verify_after_gc

View file

@ -6075,16 +6075,13 @@ void Deserializer::Deserialize() {
class HeapLocker : public StackResource {
public:
HeapLocker(Thread* thread, PageSpace* page_space)
: StackResource(thread),
page_space_(page_space),
freelist_(page_space->DataFreeList()) {
page_space_->AcquireLock(freelist_);
: StackResource(thread), page_space_(page_space) {
page_space_->AcquireDataLock();
}
~HeapLocker() { page_space_->ReleaseLock(freelist_); }
~HeapLocker() { page_space_->ReleaseDataLock(); }
private:
PageSpace* page_space_;
FreeList* freelist_;
};
void Deserializer::AddVMIsolateBaseObjects() {

View file

@ -143,9 +143,6 @@ constexpr bool kDartUseBackgroundCompilation = true;
P(link_natives_lazily, bool, false, "Link native calls lazily") \
R(log_marker_tasks, false, bool, false, \
"Log debugging information for old gen GC marking tasks.") \
P(scavenger_tasks, int, 2, \
"The number of tasks to spawn during scavenging (0 means " \
"perform all marking on main thread).") \
P(marker_tasks, int, 2, \
"The number of tasks to spawn during old gen GC marking (0 means " \
"perform all marking on main thread).") \

View file

@ -54,7 +54,8 @@ intptr_t FreeListElement::HeaderSizeFor(intptr_t size) {
return ((size > RawObject::SizeTag::kMaxSizeTag) ? 3 : 2) * kWordSize;
}
FreeList::FreeList() : mutex_() {
FreeList::FreeList()
: mutex_(), freelist_search_budget_(kInitialFreeListSearchBudget) {
Reset();
}

View file

@ -117,46 +117,6 @@ class FreeList {
return 0;
}
uword TryAllocateBumpLocked(intptr_t size) {
ASSERT(mutex_.IsOwnedByCurrentThread());
uword result = top_;
uword new_top = result + size;
if (new_top <= end_) {
top_ = new_top;
unaccounted_size_ += size;
return result;
}
return 0;
}
intptr_t TakeUnaccountedSizeLocked() {
ASSERT(mutex_.IsOwnedByCurrentThread());
intptr_t result = unaccounted_size_;
unaccounted_size_ = 0;
return result;
}
// Ensures HeapPage::VisitObjects can successful walk over a partially
// allocated bump region.
void MakeIterable() {
if (top_ < end_) {
FreeListElement::AsElement(top_, end_ - top_);
}
}
// Returns the bump region to the free list.
void AbandonBumpAllocation() {
if (top_ < end_) {
Free(top_, end_ - top_);
top_ = 0;
end_ = 0;
}
}
uword top() const { return top_; }
uword end() const { return end_; }
void set_top(uword value) { top_ = value; }
void set_end(uword value) { end_ = value; }
void AddUnaccountedSize(intptr_t size) { unaccounted_size_ += size; }
void MergeOtherFreelist(FreeList* freelist, bool is_protected);
private:
@ -201,15 +161,6 @@ class FreeList {
void PrintSmall() const;
void PrintLarge() const;
// Bump pointer region.
uword top_ = 0;
uword end_ = 0;
// Allocated from the bump pointer region, but not yet added to
// PageSpace::usage_. Used to avoid expensive atomic adds during parallel
// scavenge.
intptr_t unaccounted_size_ = 0;
// Lock protecting the free list data structures.
mutable Mutex mutex_;
@ -217,7 +168,7 @@ class FreeList {
FreeListElement* free_lists_[kNumLists + 1];
intptr_t freelist_search_budget_ = kInitialFreeListSearchBudget;
intptr_t freelist_search_budget_;
// The largest available small size in bytes, or negative if there is none.
intptr_t last_free_small_size_;

View file

@ -22,6 +22,65 @@
namespace dart {
class MarkerWorkList : public ValueObject {
public:
explicit MarkerWorkList(MarkingStack* marking_stack)
: marking_stack_(marking_stack) {
work_ = marking_stack_->PopEmptyBlock();
}
~MarkerWorkList() {
ASSERT(work_ == NULL);
ASSERT(marking_stack_ == NULL);
}
// Returns NULL if no more work was found.
RawObject* Pop() {
ASSERT(work_ != NULL);
if (work_->IsEmpty()) {
// TODO(koda): Track over/underflow events and use in heuristics to
// distribute work and prevent degenerate flip-flopping.
MarkingStack::Block* new_work = marking_stack_->PopNonEmptyBlock();
if (new_work == NULL) {
return NULL;
}
marking_stack_->PushBlock(work_);
work_ = new_work;
// Generated code appends to marking stacks; tell MemorySanitizer.
MSAN_UNPOISON(work_, sizeof(*work_));
}
return work_->Pop();
}
void Push(RawObject* raw_obj) {
if (work_->IsFull()) {
// TODO(koda): Track over/underflow events and use in heuristics to
// distribute work and prevent degenerate flip-flopping.
marking_stack_->PushBlock(work_);
work_ = marking_stack_->PopEmptyBlock();
}
work_->Push(raw_obj);
}
void Finalize() {
ASSERT(work_->IsEmpty());
marking_stack_->PushBlock(work_);
work_ = NULL;
// Fail fast on attempts to mark after finalizing.
marking_stack_ = NULL;
}
void AbandonWork() {
marking_stack_->PushBlock(work_);
work_ = NULL;
marking_stack_ = NULL;
}
private:
MarkingStack::Block* work_;
MarkingStack* marking_stack_;
};
template <bool sync>
class MarkingVisitorBase : public ObjectPointerVisitor {
public:
@ -437,7 +496,7 @@ void GCMarker::ProcessRememberedSet(Thread* thread) {
TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessRememberedSet");
// Filter collected objects from the remembered set.
StoreBuffer* store_buffer = isolate_group_->store_buffer();
StoreBufferBlock* reading = store_buffer->TakeBlocks();
StoreBufferBlock* reading = store_buffer->Blocks();
StoreBufferBlock* writing = store_buffer->PopNonFullBlock();
while (reading != NULL) {
StoreBufferBlock* next = reading->next();

View file

@ -118,8 +118,7 @@ void HeapPage::VisitObjectPointers(ObjectPointerVisitor* visitor) const {
}
void HeapPage::VisitRememberedCards(ObjectPointerVisitor* visitor) {
ASSERT(Thread::Current()->IsAtSafepoint() ||
(Thread::Current()->task_kind() == Thread::kScavengerTask));
ASSERT(Thread::Current()->IsAtSafepoint());
NoSafepointScope no_safepoint;
if (card_table_ == NULL) {
@ -219,10 +218,11 @@ void HeapPage::WriteProtect(bool read_only) {
static const intptr_t kConservativeInitialMarkSpeed = 20;
PageSpace::PageSpace(Heap* heap, intptr_t max_capacity_in_words)
: heap_(heap),
num_freelists_(Utils::Maximum(FLAG_scavenger_tasks, 1) + 1),
freelists_(new FreeList[num_freelists_]),
: freelist_(),
heap_(heap),
pages_lock_(),
bump_top_(0),
bump_end_(0),
max_capacity_in_words_(max_capacity_in_words),
usage_(),
allocated_black_in_words_(0),
@ -245,10 +245,6 @@ PageSpace::PageSpace(Heap* heap, intptr_t max_capacity_in_words)
// We aren't holding the lock but no one can reference us yet.
UpdateMaxCapacityLocked();
UpdateMaxUsed();
for (intptr_t i = 0; i < num_freelists_; i++) {
freelists_[i].Reset();
}
}
PageSpace::~PageSpace() {
@ -263,7 +259,6 @@ PageSpace::~PageSpace() {
FreePages(large_pages_);
FreePages(image_pages_);
ASSERT(marker_ == NULL);
delete[] freelists_;
}
intptr_t PageSpace::LargePageSizeInWordsFor(intptr_t size) {
@ -463,7 +458,6 @@ void PageSpace::EvaluateConcurrentMarking(GrowthPolicy growth_policy) {
}
uword PageSpace::TryAllocateInFreshPage(intptr_t size,
FreeList* freelist,
HeapPage::PageType type,
GrowthPolicy growth_policy,
bool is_locked) {
@ -491,9 +485,9 @@ uword PageSpace::TryAllocateInFreshPage(intptr_t size,
intptr_t free_size = page->object_end() - free_start;
if (free_size > 0) {
if (is_locked) {
freelist->FreeLocked(free_start, free_size);
freelist_[type].FreeLocked(free_start, free_size);
} else {
freelist->Free(free_start, free_size);
freelist_[type].Free(free_start, free_size);
}
}
}
@ -530,7 +524,6 @@ uword PageSpace::TryAllocateInFreshLargePage(intptr_t size,
}
uword PageSpace::TryAllocateInternal(intptr_t size,
FreeList* freelist,
HeapPage::PageType type,
GrowthPolicy growth_policy,
bool is_protected,
@ -540,13 +533,12 @@ uword PageSpace::TryAllocateInternal(intptr_t size,
uword result = 0;
if (Heap::IsAllocatableViaFreeLists(size)) {
if (is_locked) {
result = freelist->TryAllocateLocked(size, is_protected);
result = freelist_[type].TryAllocateLocked(size, is_protected);
} else {
result = freelist->TryAllocate(size, is_protected);
result = freelist_[type].TryAllocate(size, is_protected);
}
if (result == 0) {
result = TryAllocateInFreshPage(size, freelist, type, growth_policy,
is_locked);
result = TryAllocateInFreshPage(size, type, growth_policy, is_locked);
// usage_ is updated by the call above.
} else {
usage_.used_in_words += (size >> kWordSizeLog2);
@ -559,16 +551,20 @@ uword PageSpace::TryAllocateInternal(intptr_t size,
return result;
}
void PageSpace::AcquireLock(FreeList* freelist) {
freelist->mutex()->Lock();
void PageSpace::AcquireDataLock() {
freelist_[HeapPage::kData].mutex()->Lock();
}
void PageSpace::ReleaseLock(FreeList* freelist) {
intptr_t size = freelist->TakeUnaccountedSizeLocked();
usage_.used_in_words += (size >> kWordSizeLog2);
freelist->mutex()->Unlock();
void PageSpace::ReleaseDataLock() {
freelist_[HeapPage::kData].mutex()->Unlock();
}
#if defined(DEBUG)
bool PageSpace::CurrentThreadOwnsDataLock() {
return freelist_[HeapPage::kData].mutex()->IsOwnedByCurrentThread();
}
#endif
void PageSpace::AllocateExternal(intptr_t cid, intptr_t size) {
intptr_t size_in_words = size >> kWordSizeLog2;
usage_.external_in_words += size_in_words;
@ -685,14 +681,16 @@ void PageSpace::MakeIterable() const {
// Assert not called from concurrent sweeper task.
// TODO(koda): Use thread/task identity when implemented.
ASSERT(IsolateGroup::Current()->heap() != NULL);
for (intptr_t i = 0; i < num_freelists_; i++) {
freelists_[i].MakeIterable();
if (bump_top_ < bump_end_) {
FreeListElement::AsElement(bump_top_, bump_end_ - bump_top_);
}
}
void PageSpace::AbandonBumpAllocation() {
for (intptr_t i = 0; i < num_freelists_; i++) {
freelists_[i].AbandonBumpAllocation();
if (bump_top_ < bump_end_) {
freelist_[HeapPage::kData].Free(bump_top_, bump_end_ - bump_top_);
bump_top_ = 0;
bump_end_ = 0;
}
}
@ -810,8 +808,7 @@ void PageSpace::VisitObjectPointers(ObjectPointerVisitor* visitor) const {
}
void PageSpace::VisitRememberedCards(ObjectPointerVisitor* visitor) const {
ASSERT(Thread::Current()->IsAtSafepoint() ||
(Thread::Current()->task_kind() == Thread::kScavengerTask));
ASSERT(Thread::Current()->IsAtSafepoint());
// Wait for the sweeper to finish mutating the large page list.
MonitorLocker ml(tasks_lock());
@ -819,21 +816,8 @@ void PageSpace::VisitRememberedCards(ObjectPointerVisitor* visitor) const {
ml.Wait(); // No safepoint check.
}
// Large pages may be added concurrently due to promotion in another scavenge
// worker, so terminate the traversal when we hit the tail we saw while
// holding the pages lock, instead of at NULL, otherwise we are racing when we
// read HeapPage::next_ and HeapPage::remembered_cards_.
HeapPage* page;
HeapPage* tail;
{
MutexLocker ml(&pages_lock_);
page = large_pages_;
tail = large_pages_tail_;
}
while (page != nullptr) {
for (HeapPage* page = large_pages_; page != nullptr; page = page->next()) {
page->VisitRememberedCards(visitor);
if (page == tail) break;
page = page->next();
}
}
@ -1121,10 +1105,10 @@ void PageSpace::CollectGarbageAtSafepoint(bool compact,
NoSafepointScope no_safepoints;
if (FLAG_print_free_list_before_gc) {
for (intptr_t i = 0; i < num_freelists_; i++) {
OS::PrintErr("Before GC: Freelist %" Pd "\n", i);
freelists_[i].Print();
}
OS::PrintErr("Data Freelist (before GC):\n");
freelist_[HeapPage::kData].Print();
OS::PrintErr("Executable Freelist (before GC):\n");
freelist_[HeapPage::kExecutable].Print();
}
if (FLAG_verify_before_gc) {
@ -1165,9 +1149,8 @@ void PageSpace::CollectGarbageAtSafepoint(bool compact,
// Abandon the remainder of the bump allocation block.
AbandonBumpAllocation();
// Reset the freelists and setup sweeping.
for (intptr_t i = 0; i < num_freelists_; i++) {
freelists_[i].Reset();
}
freelist_[HeapPage::kData].Reset();
freelist_[HeapPage::kExecutable].Reset();
int64_t mid2 = OS::GetCurrentMonotonicMicros();
int64_t mid3 = 0;
@ -1186,7 +1169,7 @@ void PageSpace::CollectGarbageAtSafepoint(bool compact,
GCSweeper sweeper;
HeapPage* prev_page = NULL;
HeapPage* page = exec_pages_;
FreeList* freelist = &freelists_[HeapPage::kExecutable];
FreeList* freelist = &freelist_[HeapPage::kExecutable];
MutexLocker ml(freelist->mutex());
while (page != NULL) {
HeapPage* next_page = page->next();
@ -1232,10 +1215,10 @@ void PageSpace::CollectGarbageAtSafepoint(bool compact,
heap_->RecordTime(kSweepLargePages, end - mid3);
if (FLAG_print_free_list_after_gc) {
for (intptr_t i = 0; i < num_freelists_; i++) {
OS::PrintErr("After GC: Freelist %" Pd "\n", i);
freelists_[i].Print();
}
OS::PrintErr("Data Freelist (after GC):\n");
freelist_[HeapPage::kData].Print();
OS::PrintErr("Executable Freelist (after GC):\n");
freelist_[HeapPage::kExecutable].Print();
}
UpdateMaxUsed();
@ -1268,21 +1251,14 @@ void PageSpace::Sweep() {
TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "Sweep");
GCSweeper sweeper;
intptr_t shard = 0;
const intptr_t num_shards = Utils::Maximum(FLAG_scavenger_tasks, 1);
for (intptr_t i = 0; i < num_shards; i++) {
DataFreeList(i)->mutex()->Lock();
}
HeapPage* prev_page = nullptr;
HeapPage* page = pages_;
FreeList* freelist = &freelist_[HeapPage::kData];
MutexLocker ml(freelist_->mutex());
while (page != nullptr) {
HeapPage* next_page = page->next();
ASSERT(page->type() == HeapPage::kData);
shard = (shard + 1) % num_shards;
bool page_in_use =
sweeper.SweepPage(page, DataFreeList(shard), true /*is_locked*/);
bool page_in_use = sweeper.SweepPage(page, freelist, true /*is_locked*/);
if (page_in_use) {
prev_page = page;
} else {
@ -1292,10 +1268,6 @@ void PageSpace::Sweep() {
page = next_page;
}
for (intptr_t i = 0; i < num_shards; i++) {
DataFreeList(i)->mutex()->Unlock();
}
if (FLAG_verify_after_gc) {
OS::PrintErr("Verifying after sweeping...");
heap_->VerifyGC(kForbidMarked);
@ -1306,13 +1278,13 @@ void PageSpace::Sweep() {
void PageSpace::ConcurrentSweep(IsolateGroup* isolate_group) {
// Start the concurrent sweeper task now.
GCSweeper::SweepConcurrent(isolate_group, pages_, pages_tail_, large_pages_,
large_pages_tail_, &freelists_[HeapPage::kData]);
large_pages_tail_, &freelist_[HeapPage::kData]);
}
void PageSpace::Compact(Thread* thread) {
thread->isolate_group()->set_compaction_in_progress(true);
GCCompactor compactor(thread, heap_);
compactor.Compact(pages_, &freelists_[HeapPage::kData], &pages_lock_);
compactor.Compact(pages_, &freelist_[HeapPage::kData], &pages_lock_);
thread->isolate_group()->set_compaction_in_progress(false);
if (FLAG_verify_after_gc) {
@ -1322,57 +1294,63 @@ void PageSpace::Compact(Thread* thread) {
}
}
uword PageSpace::TryAllocateDataBumpLocked(FreeList* freelist, intptr_t size) {
uword PageSpace::TryAllocateDataBumpLocked(intptr_t size) {
ASSERT(size >= kObjectAlignment);
ASSERT(Utils::IsAligned(size, kObjectAlignment));
intptr_t remaining = freelist->end() - freelist->top();
intptr_t remaining = bump_end_ - bump_top_;
if (UNLIKELY(remaining < size)) {
// Checking this first would be logical, but needlessly slow.
if (!Heap::IsAllocatableViaFreeLists(size)) {
return TryAllocateDataLocked(freelist, size, kForceGrowth);
return TryAllocateDataLocked(size, kForceGrowth);
}
FreeListElement* block = freelist->TryAllocateLargeLocked(size);
FreeListElement* block =
freelist_[HeapPage::kData].TryAllocateLargeLocked(size);
if (block == NULL) {
// Allocating from a new page (if growth policy allows) will have the
// side-effect of populating the freelist with a large block. The next
// bump allocation request will have a chance to consume that block.
// TODO(koda): Could take freelist lock just once instead of twice.
return TryAllocateInFreshPage(size, freelist, HeapPage::kData,
kForceGrowth, true /* is_locked*/);
return TryAllocateInFreshPage(size, HeapPage::kData, kForceGrowth,
true /* is_locked*/);
}
intptr_t block_size = block->HeapSize();
if (remaining > 0) {
freelist->FreeLocked(freelist->top(), remaining);
freelist_[HeapPage::kData].FreeLocked(bump_top_, remaining);
}
freelist->set_top(reinterpret_cast<uword>(block));
freelist->set_end(freelist->top() + block_size);
bump_top_ = reinterpret_cast<uword>(block);
bump_end_ = bump_top_ + block_size;
remaining = block_size;
}
ASSERT(remaining >= size);
uword result = freelist->top();
freelist->set_top(result + size);
uword result = bump_top_;
bump_top_ += size;
freelist->AddUnaccountedSize(size);
// No need for atomic operation: This is either running during a scavenge or
// isolate snapshot loading. Note that operator+= is atomic.
usage_.used_in_words = usage_.used_in_words + (size >> kWordSizeLog2);
// Note: Remaining block is unwalkable until MakeIterable is called.
#ifdef DEBUG
if (freelist->top() < freelist->end()) {
if (bump_top_ < bump_end_) {
// Fail fast if we try to walk the remaining block.
COMPILE_ASSERT(kIllegalCid == 0);
*reinterpret_cast<uword*>(freelist->top()) = 0;
*reinterpret_cast<uword*>(bump_top_) = 0;
}
#endif // DEBUG
return result;
}
uword PageSpace::TryAllocatePromoLockedSlow(FreeList* freelist, intptr_t size) {
DART_FLATTEN
uword PageSpace::TryAllocatePromoLocked(intptr_t size) {
FreeList* freelist = &freelist_[HeapPage::kData];
uword result = freelist->TryAllocateSmallLocked(size);
if (result != 0) {
freelist->AddUnaccountedSize(size);
// No need for atomic operation: we're at a safepoint. Note that
// operator+= is atomic.
usage_.used_in_words = usage_.used_in_words + (size >> kWordSizeLog2);
return result;
}
return TryAllocateDataBumpLocked(freelist, size);
return TryAllocateDataBumpLocked(size);
}
void PageSpace::SetupImagePage(void* pointer, uword size, bool is_executable) {
@ -1460,19 +1438,18 @@ static void EnsureEqualImagePages(HeapPage* pages, HeapPage* other_pages) {
void PageSpace::MergeOtherPageSpace(PageSpace* other) {
other->AbandonBumpAllocation();
ASSERT(other->bump_top_ == 0 && other->bump_end_ == 0);
ASSERT(other->tasks_ == 0);
ASSERT(other->concurrent_marker_tasks_ == 0);
ASSERT(other->phase_ == kDone);
DEBUG_ASSERT(other->iterating_thread_ == nullptr);
ASSERT(other->marker_ == nullptr);
for (intptr_t i = 0; i < num_freelists_; ++i) {
ASSERT(other->freelists_[i].top() == 0);
ASSERT(other->freelists_[i].end() == 0);
for (intptr_t i = 0; i < HeapPage::kNumPageTypes; ++i) {
const bool is_protected =
FLAG_write_protect_code && i == HeapPage::kExecutable;
freelists_[i].MergeOtherFreelist(&other->freelists_[i], is_protected);
other->freelists_[i].Reset();
freelist_[i].MergeOtherFreelist(&other->freelist_[i], is_protected);
other->freelist_[i].Reset();
}
// The freelist locks will be taken in MergeOtherFreelist above, and the

View file

@ -39,7 +39,7 @@ static const intptr_t kBlocksPerPage = kPageSize / kBlockSize;
// A page containing old generation objects.
class HeapPage {
public:
enum PageType { kExecutable = 0, kData };
enum PageType { kData = 0, kExecutable, kNumPageTypes };
HeapPage* next() const { return next_; }
void set_next(HeapPage* next) { next_ = next; }
@ -303,8 +303,8 @@ class PageSpace {
bool is_protected =
(type == HeapPage::kExecutable) && FLAG_write_protect_code;
bool is_locked = false;
return TryAllocateInternal(size, &freelists_[type], type, growth_policy,
is_protected, is_locked);
return TryAllocateInternal(size, type, growth_policy, is_protected,
is_locked);
}
bool NeedsGarbageCollection() const {
@ -415,18 +415,16 @@ class PageSpace {
void FreeExternal(intptr_t size);
// Bulk data allocation.
FreeList* DataFreeList(intptr_t i = 0) {
return &freelists_[HeapPage::kData + i];
}
void AcquireLock(FreeList* freelist);
void ReleaseLock(FreeList* freelist);
void AcquireDataLock();
void ReleaseDataLock();
#if defined(DEBUG)
bool CurrentThreadOwnsDataLock();
#endif
uword TryAllocateDataLocked(FreeList* freelist,
intptr_t size,
GrowthPolicy growth_policy) {
uword TryAllocateDataLocked(intptr_t size, GrowthPolicy growth_policy) {
bool is_protected = false;
bool is_locked = true;
return TryAllocateInternal(size, freelist, HeapPage::kData, growth_policy,
return TryAllocateInternal(size, HeapPage::kData, growth_policy,
is_protected, is_locked);
}
@ -445,19 +443,9 @@ class PageSpace {
void set_phase(Phase val) { phase_ = val; }
// Attempt to allocate from bump block rather than normal freelist.
uword TryAllocateDataBumpLocked(intptr_t size) {
return TryAllocateDataBumpLocked(&freelists_[HeapPage::kData], size);
}
uword TryAllocateDataBumpLocked(FreeList* freelist, intptr_t size);
DART_FORCE_INLINE
uword TryAllocatePromoLocked(FreeList* freelist, intptr_t size) {
uword result = freelist->TryAllocateBumpLocked(size);
if (result != 0) {
return result;
}
return TryAllocatePromoLockedSlow(freelist, size);
}
uword TryAllocatePromoLockedSlow(FreeList* freelist, intptr_t size);
uword TryAllocateDataBumpLocked(intptr_t size);
// Prefer small freelist blocks, then chip away at the bump block.
uword TryAllocatePromoLocked(intptr_t size);
void SetupImagePage(void* pointer, uword size, bool is_executable);
@ -493,13 +481,11 @@ class PageSpace {
};
uword TryAllocateInternal(intptr_t size,
FreeList* freelist,
HeapPage::PageType type,
GrowthPolicy growth_policy,
bool is_protected,
bool is_locked);
uword TryAllocateInFreshPage(intptr_t size,
FreeList* freelist,
HeapPage::PageType type,
GrowthPolicy growth_policy,
bool is_locked);
@ -549,15 +535,9 @@ class PageSpace {
(increase_in_words <= free_capacity_in_words));
}
Heap* const heap_;
FreeList freelist_[HeapPage::kNumPageTypes];
// One list for executable pages at freelists_[HeapPage::kExecutable].
// FLAG_scavenger_tasks count of lists for data pages starting at
// freelists_[HeapPage::kData]. The sweeper inserts into the data page
// lists round-robin. The scavenger workers each use one of them without
// locking.
const intptr_t num_freelists_;
FreeList* freelists_;
Heap* heap_;
// Use ExclusivePageIterator for safe access to these.
mutable Mutex pages_lock_;
@ -569,6 +549,11 @@ class PageSpace {
HeapPage* large_pages_tail_ = nullptr;
HeapPage* image_pages_ = nullptr;
// A block of memory in a data page, managed by bump allocation. The remainder
// is kept formatted as a FreeListElement, but is not in any freelist.
uword bump_top_;
uword bump_end_;
// Various sizes being tracked for this generation.
intptr_t max_capacity_in_words_;

View file

@ -69,7 +69,7 @@ void BlockStack<BlockSize>::Reset() {
}
template <int BlockSize>
typename BlockStack<BlockSize>::Block* BlockStack<BlockSize>::TakeBlocks() {
typename BlockStack<BlockSize>::Block* BlockStack<BlockSize>::Blocks() {
MutexLocker ml(&mutex_);
while (!partial_.IsEmpty()) {
full_.Push(partial_.Pop());

View file

@ -24,7 +24,7 @@ class PointerBlock {
void Reset() {
top_ = 0;
next_ = nullptr;
next_ = NULL;
}
PointerBlock<Size>* next() const { return next_; }
@ -64,7 +64,7 @@ class PointerBlock {
void VisitObjectPointers(ObjectPointerVisitor* visitor);
private:
PointerBlock() : next_(nullptr), top_(0) {}
PointerBlock() : next_(NULL), top_(0) {}
~PointerBlock() {
ASSERT(IsEmpty()); // Guard against unintentionally discarding pointers.
}
@ -100,7 +100,7 @@ class BlockStack {
Block* PopNonEmptyBlock();
// Pops and returns all non-empty blocks as a linked list (owned by caller).
Block* TakeBlocks();
Block* Blocks();
// Discards the contents of all non-empty blocks.
void Reset();
@ -110,12 +110,12 @@ class BlockStack {
protected:
class List {
public:
List() : head_(nullptr), length_(0) {}
List() : head_(NULL), length_(0) {}
~List();
void Push(Block* block);
Block* Pop();
intptr_t length() const { return length_; }
bool IsEmpty() const { return head_ == nullptr; }
bool IsEmpty() const { return head_ == NULL; }
Block* PopAll();
Block* Peek() { return head_; }
@ -144,74 +144,6 @@ class BlockStack {
DISALLOW_COPY_AND_ASSIGN(BlockStack);
};
template <typename Stack>
class BlockWorkList : public ValueObject {
public:
typedef typename Stack::Block Block;
explicit BlockWorkList(Stack* stack) : stack_(stack) {
work_ = stack_->PopEmptyBlock();
}
~BlockWorkList() {
ASSERT(work_ == nullptr);
ASSERT(stack_ == nullptr);
}
// Returns nullptr if no more work was found.
RawObject* Pop() {
ASSERT(work_ != nullptr);
if (work_->IsEmpty()) {
// TODO(koda): Track over/underflow events and use in heuristics to
// distribute work and prevent degenerate flip-flopping.
Block* new_work = stack_->PopNonEmptyBlock();
if (new_work == nullptr) {
return nullptr;
}
stack_->PushBlock(work_);
work_ = new_work;
// Generated code appends to marking stacks; tell MemorySanitizer.
MSAN_UNPOISON(work_, sizeof(*work_));
}
return work_->Pop();
}
void Push(RawObject* raw_obj) {
if (work_->IsFull()) {
// TODO(koda): Track over/underflow events and use in heuristics to
// distribute work and prevent degenerate flip-flopping.
stack_->PushBlock(work_);
work_ = stack_->PopEmptyBlock();
}
work_->Push(raw_obj);
}
void Finalize() {
ASSERT(work_->IsEmpty());
stack_->PushBlock(work_);
work_ = nullptr;
// Fail fast on attempts to mark after finalizing.
stack_ = nullptr;
}
void AbandonWork() {
stack_->PushBlock(work_);
work_ = nullptr;
stack_ = nullptr;
}
bool IsEmpty() {
if (!work_->IsEmpty()) {
return false;
}
return stack_->IsEmpty();
}
private:
Block* work_;
Stack* stack_;
};
static const int kStoreBufferBlockSize = 1024;
class StoreBuffer : public BlockStack<kStoreBufferBlockSize> {
public:
@ -244,19 +176,6 @@ class MarkingStack : public BlockStack<kMarkingStackBlockSize> {
};
typedef MarkingStack::Block MarkingStackBlock;
typedef BlockWorkList<MarkingStack> MarkerWorkList;
static const int kPromotionStackBlockSize = 64;
class PromotionStack : public BlockStack<kPromotionStackBlockSize> {
public:
// Adds and transfers ownership of the block to the buffer.
void PushBlock(Block* block) {
BlockStack<Block::kSize>::PushBlockImpl(block);
}
};
typedef PromotionStack::Block PromotionStackBlock;
typedef BlockWorkList<PromotionStack> PromotionWorkList;
} // namespace dart

File diff suppressed because it is too large Load diff

View file

@ -24,8 +24,7 @@ class Heap;
class Isolate;
class JSONObject;
class ObjectSet;
template <bool parallel>
class ScavengerVisitorBase;
class ScavengerVisitor;
// Wrapper around VirtualMemory that adds caching and handles the empty case.
class SemiSpace {
@ -140,8 +139,23 @@ class Scavenger {
}
void MakeTLABIterable(Thread* thread);
void AbandonRemainingTLAB(Thread* thread);
template <bool parallel>
bool TryAllocateNewTLAB(ScavengerVisitorBase<parallel>* visitor);
uword AllocateGC(intptr_t size) {
ASSERT(Utils::IsAligned(size, kObjectAlignment));
ASSERT(heap_ != Dart::vm_isolate()->heap());
ASSERT(scavenging_);
uword result = top_;
intptr_t remaining = end_ - top_;
// This allocation happens only in GC and only when copying objects to
// the new to_ space. It must succeed.
ASSERT(size <= remaining);
ASSERT(to_->Contains(result));
ASSERT((result & kObjectAlignmentMask) == kNewObjectAlignmentOffset);
top_ += size;
ASSERT((to_->Contains(top_)) || (top_ == to_->end()));
return result;
}
// Collect the garbage in this scavenger.
void Scavenge();
@ -149,6 +163,15 @@ class Scavenger {
// Promote all live objects.
void Evacuate();
uword top() { return top_; }
uword end() { return end_; }
void set_top(uword value) { top_ = value; }
void set_end(uword value) {
ASSERT(to_->end() == value);
end_ = value;
}
// Report (TLAB) abandoned bytes that should be taken account when
// deciding whether to grow new space or not.
void AddAbandonedInBytes(intptr_t value) {
@ -202,8 +225,6 @@ class Scavenger {
void MakeNewSpaceIterable() const;
int64_t FreeSpaceInWords(Isolate* isolate) const;
bool scavenging() const { return scavenging_; }
private:
static const intptr_t kTLABSize = 512 * KB;
@ -243,33 +264,58 @@ class Scavenger {
void TryAllocateNewTLAB(Thread* thread);
void AddAbandonedInBytesLocked(intptr_t value) { abandoned_ += value; }
void AbandonRemainingTLABLocked(Thread* thread);
void AbandonTLABsLocked();
void AbandonTLABsLocked(IsolateGroup* isolate_group);
uword FirstObjectStart() const {
return to_->start() + kNewObjectAlignmentOffset;
}
SemiSpace* Prologue();
intptr_t ParallelScavenge(SemiSpace* from);
intptr_t SerialScavenge(SemiSpace* from);
void IterateIsolateRoots(ObjectPointerVisitor* visitor);
template <bool parallel>
void IterateStoreBuffers(ScavengerVisitorBase<parallel>* visitor);
template <bool parallel>
void IterateRememberedCards(ScavengerVisitorBase<parallel>* visitor);
void IterateObjectIdTable(ObjectPointerVisitor* visitor);
template <bool parallel>
void IterateRoots(ScavengerVisitorBase<parallel>* visitor);
void MournWeakHandles();
void Epilogue(SemiSpace* from);
SemiSpace* Prologue(IsolateGroup* isolate_group);
void IterateStoreBuffers(IsolateGroup* isolate_group,
ScavengerVisitor* visitor);
void IterateObjectIdTable(IsolateGroup* isolate_group,
ScavengerVisitor* visitor);
void IterateRoots(IsolateGroup* isolate_group, ScavengerVisitor* visitor);
void IterateWeakProperties(IsolateGroup* isolate_group,
ScavengerVisitor* visitor);
void IterateWeakReferences(IsolateGroup* isolate_group,
ScavengerVisitor* visitor);
void IterateWeakRoots(IsolateGroup* isolate_group, HandleVisitor* visitor);
void ProcessToSpace(ScavengerVisitor* visitor);
void EnqueueWeakProperty(RawWeakProperty* raw_weak);
uword ProcessWeakProperty(RawWeakProperty* raw_weak,
ScavengerVisitor* visitor);
void Epilogue(IsolateGroup* isolate_group, SemiSpace* from);
bool IsUnreachable(RawObject** p);
void VerifyStoreBuffers();
// During a scavenge we need to remember the promoted objects.
// This is implemented as a stack of objects at the end of the to space. As
// object sizes are always greater than sizeof(uword) and promoted objects do
// not consume space in the to space they leave enough room for this stack.
void PushToPromotedStack(uword addr) {
ASSERT(scavenging_);
end_ -= sizeof(addr);
ASSERT(end_ > top_);
*reinterpret_cast<uword*>(end_) = addr;
}
uword PopFromPromotedStack() {
ASSERT(scavenging_);
uword result = *reinterpret_cast<uword*>(end_);
end_ += sizeof(result);
ASSERT(end_ <= to_->end());
return result;
}
bool PromotedStackHasMore() const {
ASSERT(scavenging_);
return end_ < to_->end();
}
void UpdateMaxHeapCapacity();
void UpdateMaxHeapUsage();
void MournWeakTables();
void ProcessWeakReferences();
intptr_t NewSizeInWords(intptr_t old_size_in_words) const;
@ -291,14 +337,13 @@ class Scavenger {
// whether to grow newspace or not.
intptr_t abandoned_ = 0;
PromotionStack promotion_stack_;
intptr_t max_semi_capacity_in_words_;
// Keep track whether a scavenge is currently running.
bool scavenging_;
RelaxedAtomic<intptr_t> root_slices_started_;
StoreBufferBlock* blocks_;
// Keep track of pending weak properties discovered while scagenging.
RawWeakProperty* delayed_weak_properties_;
int64_t gc_time_micros_;
intptr_t collections_;
@ -316,8 +361,7 @@ class Scavenger {
// Protects new space during the allocation of new TLABs
mutable Mutex space_lock_;
template <bool>
friend class ScavengerVisitorBase;
friend class ScavengerVisitor;
friend class ScavengerWeakVisitor;
DISALLOW_COPY_AND_ASSIGN(Scavenger);

View file

@ -111,17 +111,20 @@ class ConcurrentSweeperTask : public ThreadPool::Task {
HeapPage* first,
HeapPage* last,
HeapPage* large_first,
HeapPage* large_last)
HeapPage* large_last,
FreeList* freelist)
: task_isolate_group_(isolate_group),
old_space_(old_space),
first_(first),
last_(last),
large_first_(large_first),
large_last_(large_last) {
large_last_(large_last),
freelist_(freelist) {
ASSERT(task_isolate_group_ != NULL);
ASSERT(first_ != NULL);
ASSERT(old_space_ != NULL);
ASSERT(last_ != NULL);
ASSERT(freelist_ != NULL);
MonitorLocker ml(old_space_->tasks_lock());
old_space_->set_tasks(old_space_->tasks() + 1);
old_space_->set_phase(PageSpace::kSweepingLarge);
@ -166,8 +169,6 @@ class ConcurrentSweeperTask : public ThreadPool::Task {
ml.NotifyAll();
}
intptr_t shard = 0;
const intptr_t num_shards = Utils::Maximum(FLAG_scavenger_tasks, 1);
page = first_;
prev_page = NULL;
while (page != NULL) {
@ -180,9 +181,7 @@ class ConcurrentSweeperTask : public ThreadPool::Task {
next_page = page->next();
}
ASSERT(page->type() == HeapPage::kData);
shard = (shard + 1) % num_shards;
bool page_in_use =
sweeper.SweepPage(page, old_space_->DataFreeList(shard), false);
bool page_in_use = sweeper.SweepPage(page, freelist_, false);
if (page_in_use) {
prev_page = page;
} else {
@ -216,6 +215,7 @@ class ConcurrentSweeperTask : public ThreadPool::Task {
HeapPage* last_;
HeapPage* large_first_;
HeapPage* large_last_;
FreeList* freelist_;
};
void GCSweeper::SweepConcurrent(IsolateGroup* isolate_group,
@ -226,7 +226,7 @@ void GCSweeper::SweepConcurrent(IsolateGroup* isolate_group,
FreeList* freelist) {
bool result = Dart::thread_pool()->Run<ConcurrentSweeperTask>(
isolate_group, isolate_group->heap()->old_space(), first, last,
large_first, large_last);
large_first, large_last, freelist);
ASSERT(result);
}

View file

@ -79,7 +79,6 @@ static void DeterministicModeHandler(bool value) {
FLAG_background_compilation = false; // Timing dependent.
FLAG_concurrent_mark = false; // Timing dependent.
FLAG_concurrent_sweep = false; // Timing dependent.
FLAG_scavenger_tasks = 0; // Timing dependent.
FLAG_random_seed = 0x44617274; // "Dart"
}
}
@ -2495,8 +2494,7 @@ void IsolateGroup::ForEachIsolate(
ASSERT(Thread::Current()->IsAtSafepoint() ||
(Thread::Current()->task_kind() == Thread::kMutatorTask) ||
(Thread::Current()->task_kind() == Thread::kMarkerTask) ||
(Thread::Current()->task_kind() == Thread::kCompactorTask) ||
(Thread::Current()->task_kind() == Thread::kScavengerTask));
(Thread::Current()->task_kind() == Thread::kCompactorTask));
for (Isolate* isolate : isolates_) {
function(isolate);
}

View file

@ -2588,15 +2588,12 @@ void Object::CheckHandle() const {
ASSERT(vtable() == builtin_vtables_[cid]);
if (FLAG_verify_handles && raw_->IsHeapObject()) {
Heap* isolate_heap = IsolateGroup::Current()->heap();
if (!isolate_heap->new_space()->scavenging()) {
Heap* vm_isolate_heap = Dart::vm_isolate()->heap();
uword addr = RawObject::ToAddr(raw_);
if (!isolate_heap->Contains(addr) && !vm_isolate_heap->Contains(addr)) {
ASSERT(FLAG_write_protect_code);
addr = RawObject::ToAddr(HeapPage::ToWritable(raw_));
ASSERT(isolate_heap->Contains(addr) ||
vm_isolate_heap->Contains(addr));
}
Heap* vm_isolate_heap = Dart::vm_isolate()->heap();
uword addr = RawObject::ToAddr(raw_);
if (!isolate_heap->Contains(addr) && !vm_isolate_heap->Contains(addr)) {
ASSERT(FLAG_write_protect_code);
addr = RawObject::ToAddr(HeapPage::ToWritable(raw_));
ASSERT(isolate_heap->Contains(addr) || vm_isolate_heap->Contains(addr));
}
}
}

View file

@ -10855,16 +10855,12 @@ void Object::SetRaw(RawObject* value) {
if (FLAG_verify_handles && raw_->IsHeapObject()) {
Isolate* isolate = Isolate::Current();
Heap* isolate_heap = isolate->heap();
// TODO(rmacnak): Remove after rewriting StackFrame::VisitObjectPointers
// to not use handles.
if (!isolate_heap->new_space()->scavenging()) {
Heap* vm_isolate_heap = Dart::vm_isolate()->heap();
uword addr = RawObject::ToAddr(raw_);
if (!isolate_heap->Contains(addr) && !vm_isolate_heap->Contains(addr)) {
ASSERT(FLAG_write_protect_code);
addr = RawObject::ToAddr(HeapPage::ToWritable(raw_));
ASSERT(isolate_heap->Contains(addr) || vm_isolate_heap->Contains(addr));
}
Heap* vm_isolate_heap = Dart::vm_isolate()->heap();
uword addr = RawObject::ToAddr(raw_);
if (!isolate_heap->Contains(addr) && !vm_isolate_heap->Contains(addr)) {
ASSERT(FLAG_write_protect_code);
addr = RawObject::ToAddr(HeapPage::ToWritable(raw_));
ASSERT(isolate_heap->Contains(addr) || vm_isolate_heap->Contains(addr));
}
}
#endif

View file

@ -73,7 +73,7 @@ void RawObject::Validate(IsolateGroup* isolate_group) const {
return;
}
intptr_t size_from_tags = SizeTag::decode(tags);
intptr_t size_from_class = HeapSizeFromClass(tags);
intptr_t size_from_class = HeapSizeFromClass();
if ((size_from_tags != 0) && (size_from_tags != size_from_class)) {
FATAL3(
"Inconsistent size encountered "
@ -85,12 +85,11 @@ void RawObject::Validate(IsolateGroup* isolate_group) const {
// Can't look at the class object because it can be called during
// compaction when the class objects are moving. Can use the class
// id in the header and the sizes in the Class Table.
// Cannot deference ptr()->tags_. May dereference other parts of the object.
intptr_t RawObject::HeapSizeFromClass(uint32_t tags) const {
intptr_t RawObject::HeapSizeFromClass() const {
// Only reasonable to be called on heap objects.
ASSERT(IsHeapObject());
intptr_t class_id = ClassIdTag::decode(tags);
intptr_t class_id = GetClassId();
intptr_t instance_size = 0;
switch (class_id) {
case kCodeCid: {
@ -159,9 +158,9 @@ intptr_t RawObject::HeapSizeFromClass(uint32_t tags) const {
CLASS_LIST_TYPED_DATA(SIZE_FROM_CLASS) {
const RawTypedData* raw_obj =
reinterpret_cast<const RawTypedData*>(this);
intptr_t cid = raw_obj->GetClassId();
intptr_t array_len = Smi::Value(raw_obj->ptr()->length_);
intptr_t lengthInBytes =
array_len * TypedData::ElementSizeInBytes(class_id);
intptr_t lengthInBytes = array_len * TypedData::ElementSizeInBytes(cid);
instance_size = TypedData::InstanceSize(lengthInBytes);
break;
}
@ -246,7 +245,7 @@ intptr_t RawObject::HeapSizeFromClass(uint32_t tags) const {
if (!class_table->IsValidIndex(class_id) ||
(!class_table->HasValidClassAt(class_id) && !use_saved_class_table)) {
FATAL3("Invalid cid: %" Pd ", obj: %p, tags: %x. Corrupt heap?",
class_id, this, static_cast<uint32_t>(tags));
class_id, this, static_cast<uint32_t>(ptr()->tags_));
}
#endif // DEBUG
instance_size = isolate_group->GetClassSizeForHeapWalkAt(class_id);
@ -254,6 +253,7 @@ intptr_t RawObject::HeapSizeFromClass(uint32_t tags) const {
}
ASSERT(instance_size != 0);
#if defined(DEBUG)
uint32_t tags = ptr()->tags_;
intptr_t tags_size = SizeTag::decode(tags);
if ((class_id == kArrayCid) && (instance_size > tags_size && tags_size > 0)) {
// TODO(22501): Array::MakeFixedLength could be in the process of shrinking

View file

@ -442,7 +442,7 @@ class RawObject {
// leading to inconsistency between HeapSizeFromClass() and
// SizeTag::decode(tags). We are working around it by reloading tags_ and
// recomputing size from tags.
const intptr_t size_from_class = HeapSizeFromClass(tags);
const intptr_t size_from_class = HeapSizeFromClass();
if ((result > size_from_class) && (GetClassId() == kArrayCid) &&
(ptr()->tags_) != tags) {
result = SizeTag::decode(ptr()->tags_);
@ -451,19 +451,7 @@ class RawObject {
#endif
return result;
}
result = HeapSizeFromClass(tags);
ASSERT(result > SizeTag::kMaxSizeTag);
return result;
}
// This variant must not deference ptr()->tags_.
intptr_t HeapSize(uint32_t tags) const {
ASSERT(IsHeapObject());
intptr_t result = SizeTag::decode(tags);
if (result != 0) {
return result;
}
result = HeapSizeFromClass(tags);
result = HeapSizeFromClass();
ASSERT(result > SizeTag::kMaxSizeTag);
return result;
}
@ -622,7 +610,7 @@ class RawObject {
intptr_t VisitPointersPredefined(ObjectPointerVisitor* visitor,
intptr_t class_id);
intptr_t HeapSizeFromClass(uint32_t tags) const;
intptr_t HeapSizeFromClass() const;
void SetClassId(intptr_t new_cid) {
ptr()->tags_.UpdateUnsynchronized<ClassIdTag>(new_cid);
@ -778,8 +766,7 @@ class RawObject {
friend class OneByteString; // StoreSmi
friend class RawInstance;
friend class Scavenger;
template <bool>
friend class ScavengerVisitorBase;
friend class ScavengerVisitor;
friend class ImageReader; // tags_ check
friend class ImageWriter;
friend class AssemblyImageWriter;
@ -2620,8 +2607,7 @@ class RawTypedDataView : public RawTypedDataBase {
friend class ObjectPoolSerializationCluster;
friend class RawObjectPool;
friend class GCCompactor;
template <bool>
friend class ScavengerVisitorBase;
friend class ScavengerVisitor;
friend class SnapshotReader;
};
@ -2915,8 +2901,7 @@ class RawWeakProperty : public RawInstance {
template <bool>
friend class MarkingVisitorBase;
friend class Scavenger;
template <bool>
friend class ScavengerVisitorBase;
friend class ScavengerVisitor;
};
// MirrorReferences are used by mirrors to hold reflectees that are VM

View file

@ -685,6 +685,18 @@ Object* SnapshotReader::GetBackRef(intptr_t id) {
return NULL;
}
class HeapLocker : public StackResource {
public:
HeapLocker(Thread* thread, PageSpace* page_space)
: StackResource(thread), page_space_(page_space) {
page_space_->AcquireDataLock();
}
~HeapLocker() { page_space_->ReleaseDataLock(); }
private:
PageSpace* page_space_;
};
RawApiError* SnapshotReader::VerifyVersionAndFeatures(Isolate* isolate) {
// If the version string doesn't match, return an error.
// Note: New things are allocated only if we're going to return an error.

View file

@ -246,7 +246,6 @@ class Thread : public ThreadState {
kMarkerTask = 0x4,
kSweeperTask = 0x8,
kCompactorTask = 0x10,
kScavengerTask = 0x20,
};
// Converts a TaskKind to its corresponding C-String name.
static const char* TaskKindToCString(TaskKind kind);

View file

@ -18,10 +18,6 @@
// VMOptions=--concurrent_mark --concurrent_sweep
// VMOptions=--concurrent_mark --use_compactor
// VMOptions=--concurrent_mark --use_compactor --force_evacuation
// VMOptions=--scavenger_tasks=0
// VMOptions=--scavenger_tasks=1
// VMOptions=--scavenger_tasks=2
// VMOptions=--scavenger_tasks=3
main() {
final List<List> arrays = [];