mirror of
https://github.com/dart-lang/sdk
synced 2024-10-14 23:09:51 +00:00
[vm, gc] Incremental compaction, take 2.
- Fix missing store buffer flush when --marker_tasks=0. - Fix passing untagged pointer to store barrier check on ARM/ARM64 (6bc417dd17
). - Fix passing uninitialized header to store barrier check on ARM64/RISCV (1447193053
). TEST=ci Bug: https://github.com/dart-lang/sdk/issues/52513 Bug: https://github.com/dart-lang/sdk/issues/55754 Change-Id: Id2aa95b6d776b82d83464cde0d00e6f3b29b7b77 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/367202 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
This commit is contained in:
parent
fab56db71b
commit
9077bf991f
|
@ -95,18 +95,18 @@ But we combine the generational and incremental checks with a shift-and-mask.
|
|||
```c++
|
||||
enum HeaderBits {
|
||||
...
|
||||
kNotMarkedBit, // Incremental barrier target.
|
||||
kNewBit, // Generational barrier target.
|
||||
kAlwaysSetBit, // Incremental barrier source.
|
||||
kOldAndNotRememberedBit, // Generational barrier source.
|
||||
kNotMarkedBit, // Incremental barrier target.
|
||||
kNewOrEvacuationCandidateBit, // Generational barrier target.
|
||||
kAlwaysSetBit, // Incremental barrier source.
|
||||
kOldAndNotRememberedBit, // Generational barrier source.
|
||||
...
|
||||
};
|
||||
|
||||
static constexpr intptr_t kGenerationalBarrierMask = 1 << kNewBit;
|
||||
static constexpr intptr_t kGenerationalBarrierMask = 1 << kNewOrEvacuationCandidateBit;
|
||||
static constexpr intptr_t kIncrementalBarrierMask = 1 << kNotMarkedBit;
|
||||
static constexpr intptr_t kBarrierOverlapShift = 2;
|
||||
COMPILE_ASSERT(kNotMarkedBit + kBarrierOverlapShift == kAlwaysSetBit);
|
||||
COMPILE_ASSERT(kNewBit + kBarrierOverlapShift == kOldAndNotRememberedBit);
|
||||
COMPILE_ASSERT(kNewOrEvacuationCandidateBit + kBarrierOverlapShift == kOldAndNotRememberedBit);
|
||||
|
||||
StorePointer(ObjectPtr source, ObjectPtr* slot, ObjectPtr target) {
|
||||
*slot = target;
|
||||
|
@ -178,7 +178,6 @@ We can eliminate these checks when the compiler can prove these cases cannot hap
|
|||
* `value` is a constant. Constants are always old, and they will be marked via the constant pools even if we fail to mark them via `container`.
|
||||
* `value` has the static type bool. All possible values of the bool type (null, false, true) are constants.
|
||||
* `value` is known to be a Smi. Smis are not heap objects.
|
||||
* `container` is the same object as `value`. The GC never needs to retain an additional object if it sees a self-reference, so ignoring a self-reference cannot cause us to free a reachable object.
|
||||
* `container` is known to be a new object or known to be an old object that is in the remembered set and is marked if marking is in progress.
|
||||
|
||||
We can know that `container` meets the last property if `container` is the result of an allocation (instead of a heap load), and there is no instruction that can trigger a GC between the allocation and the store. This is because the allocation stubs ensure the result of AllocateObject is either a new-space object (common case, bump pointer allocation succeeds), or has been preemptively added to the remembered set and marking worklist (uncommon case, entered runtime to allocate object, possibly triggering GC).
|
||||
|
|
|
@ -80,8 +80,8 @@ class RelaxedAtomic {
|
|||
}
|
||||
T operator+=(T arg) { return fetch_add(arg) + arg; }
|
||||
T operator-=(T arg) { return fetch_sub(arg) - arg; }
|
||||
T& operator++() { return fetch_add(1) + 1; }
|
||||
T& operator--() { return fetch_sub(1) - 1; }
|
||||
T operator++() { return fetch_add(1) + 1; }
|
||||
T operator--() { return fetch_sub(1) - 1; }
|
||||
T operator++(int) { return fetch_add(1); }
|
||||
T operator--(int) { return fetch_sub(1); }
|
||||
|
||||
|
|
|
@ -893,7 +893,7 @@ void Deserializer::InitializeHeader(ObjectPtr raw,
|
|||
tags = UntaggedObject::AlwaysSetBit::update(true, tags);
|
||||
tags = UntaggedObject::NotMarkedBit::update(true, tags);
|
||||
tags = UntaggedObject::OldAndNotRememberedBit::update(true, tags);
|
||||
tags = UntaggedObject::NewBit::update(false, tags);
|
||||
tags = UntaggedObject::NewOrEvacuationCandidateBit::update(false, tags);
|
||||
tags = UntaggedObject::ImmutableBit::update(is_immutable, tags);
|
||||
raw->untag()->tags_ = tags;
|
||||
}
|
||||
|
|
|
@ -36,6 +36,9 @@ class AtomicBitFieldContainer : AtomicBitFieldContainerBase {
|
|||
}
|
||||
|
||||
T load(std::memory_order order) const { return field_.load(order); }
|
||||
NO_SANITIZE_THREAD T load_ignore_race() const {
|
||||
return *reinterpret_cast<const T*>(&field_);
|
||||
}
|
||||
void store(T value, std::memory_order order) { field_.store(value, order); }
|
||||
|
||||
bool compare_exchange_weak(T old_tags, T new_tags, std::memory_order order) {
|
||||
|
@ -48,11 +51,6 @@ class AtomicBitFieldContainer : AtomicBitFieldContainerBase {
|
|||
return TargetBitField::decode(field_.load(order));
|
||||
}
|
||||
|
||||
template <class TargetBitField>
|
||||
NO_SANITIZE_THREAD typename TargetBitField::Type ReadIgnoreRace() const {
|
||||
return TargetBitField::decode(*reinterpret_cast<const T*>(&field_));
|
||||
}
|
||||
|
||||
template <class TargetBitField,
|
||||
std::memory_order order = std::memory_order_relaxed>
|
||||
void UpdateBool(bool value) {
|
||||
|
|
|
@ -1933,7 +1933,7 @@ void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
|
|||
Label done;
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
ldrb(TMP, FieldAddress(value, target::Object::tags_offset()));
|
||||
tst(TMP, Operand(1 << target::UntaggedObject::kNewBit));
|
||||
tst(TMP, Operand(1 << target::UntaggedObject::kNewOrEvacuationCandidateBit));
|
||||
b(&done, ZERO);
|
||||
ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
|
||||
tst(TMP, Operand(1 << target::UntaggedObject::kOldAndNotRememberedBit));
|
||||
|
|
|
@ -1231,7 +1231,7 @@ void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
|
|||
Label done;
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
ldr(TMP, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
|
||||
tbz(&done, TMP, target::UntaggedObject::kNewBit);
|
||||
tbz(&done, TMP, target::UntaggedObject::kNewOrEvacuationCandidateBit);
|
||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
|
||||
tbz(&done, TMP, target::UntaggedObject::kOldAndNotRememberedBit);
|
||||
Stop("Write barrier is required");
|
||||
|
|
|
@ -2207,7 +2207,7 @@ void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
|
|||
Label done;
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
testb(FieldAddress(value, target::Object::tags_offset()),
|
||||
Immediate(1 << target::UntaggedObject::kNewBit));
|
||||
Immediate(1 << target::UntaggedObject::kNewOrEvacuationCandidateBit));
|
||||
j(ZERO, &done, Assembler::kNearJump);
|
||||
testb(FieldAddress(object, target::Object::tags_offset()),
|
||||
Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
|
||||
|
|
|
@ -3518,7 +3518,7 @@ void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
|
|||
Label done;
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
|
||||
andi(TMP2, TMP2, 1 << target::UntaggedObject::kNewBit);
|
||||
andi(TMP2, TMP2, 1 << target::UntaggedObject::kNewOrEvacuationCandidateBit);
|
||||
beqz(TMP2, &done, kNearJump);
|
||||
lbu(TMP2, FieldAddress(object, target::Object::tags_offset()));
|
||||
andi(TMP2, TMP2, 1 << target::UntaggedObject::kOldAndNotRememberedBit);
|
||||
|
|
|
@ -1684,7 +1684,7 @@ void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
|
|||
Label done;
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
testb(FieldAddress(value, target::Object::tags_offset()),
|
||||
Immediate(1 << target::UntaggedObject::kNewBit));
|
||||
Immediate(1 << target::UntaggedObject::kNewOrEvacuationCandidateBit));
|
||||
j(ZERO, &done, Assembler::kNearJump);
|
||||
testb(FieldAddress(object, target::Object::tags_offset()),
|
||||
Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
|
||||
|
|
|
@ -1392,10 +1392,17 @@ bool Value::NeedsWriteBarrier() {
|
|||
|
||||
// Strictly speaking, the incremental barrier can only be skipped for
|
||||
// immediate objects (Smis) or permanent objects (vm-isolate heap or
|
||||
// image pages). Here we choose to skip the barrier for any constant on
|
||||
// the assumption it will remain reachable through the object pool.
|
||||
// image pages). For AOT, we choose to skip the barrier for any constant on
|
||||
// the assumptions it will remain reachable through the object pool and it
|
||||
// is on a page created by snapshot loading that is marked so as to never be
|
||||
// evacuated.
|
||||
if (value->BindsToConstant()) {
|
||||
return false;
|
||||
if (FLAG_precompiled_mode) {
|
||||
return false;
|
||||
} else {
|
||||
const Object& constant = value->BoundConstant();
|
||||
return constant.ptr()->IsHeapObject() && !constant.InVMIsolateHeap();
|
||||
}
|
||||
}
|
||||
|
||||
// Follow the chain of redefinitions as redefined value could have a more
|
||||
|
|
|
@ -6417,11 +6417,6 @@ class StoreFieldInstr : public TemplateInstruction<2, NoThrow> {
|
|||
// The target field is native and unboxed, so not traversed by the GC.
|
||||
return false;
|
||||
}
|
||||
if (instance()->definition() == value()->definition()) {
|
||||
// `x.slot = x` cannot create an old->new or old&marked->old&unmarked
|
||||
// reference.
|
||||
return false;
|
||||
}
|
||||
|
||||
if (value()->definition()->Type()->IsBool()) {
|
||||
return false;
|
||||
|
@ -7074,12 +7069,6 @@ class StoreIndexedInstr : public TemplateInstruction<3, NoThrow> {
|
|||
bool aligned() const { return alignment_ == kAlignedAccess; }
|
||||
|
||||
bool ShouldEmitStoreBarrier() const {
|
||||
if (array()->definition() == value()->definition()) {
|
||||
// `x[slot] = x` cannot create an old->new or old&marked->old&unmarked
|
||||
// reference.
|
||||
return false;
|
||||
}
|
||||
|
||||
if (value()->definition()->Type()->IsBool()) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -522,11 +522,9 @@ Fragment BaseFlowGraphBuilder::StoreNativeField(
|
|||
StoreBarrierType emit_store_barrier /* = kEmitStoreBarrier */,
|
||||
compiler::Assembler::MemoryOrder memory_order /* = kRelaxed */) {
|
||||
Value* value = Pop();
|
||||
if (value->BindsToConstant()) {
|
||||
emit_store_barrier = kNoStoreBarrier;
|
||||
}
|
||||
Value* instance = Pop();
|
||||
StoreFieldInstr* store = new (Z)
|
||||
StoreFieldInstr(slot, Pop(), value, emit_store_barrier,
|
||||
StoreFieldInstr(slot, instance, value, emit_store_barrier,
|
||||
stores_inner_pointer, InstructionSource(position), kind);
|
||||
return Fragment(store);
|
||||
}
|
||||
|
|
|
@ -361,7 +361,7 @@ uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size) {
|
|||
return dart::UntaggedObject::SizeTag::encode(
|
||||
TranslateOffsetInWordsToHost(instance_size)) |
|
||||
dart::UntaggedObject::ClassIdTag::encode(cid) |
|
||||
dart::UntaggedObject::NewBit::encode(true) |
|
||||
dart::UntaggedObject::NewOrEvacuationCandidateBit::encode(true) |
|
||||
dart::UntaggedObject::AlwaysSetBit::encode(true) |
|
||||
dart::UntaggedObject::NotMarkedBit::encode(true) |
|
||||
dart::UntaggedObject::ImmutableBit::encode(
|
||||
|
@ -377,7 +377,8 @@ const word UntaggedObject::kCardRememberedBit =
|
|||
|
||||
const word UntaggedObject::kCanonicalBit = dart::UntaggedObject::kCanonicalBit;
|
||||
|
||||
const word UntaggedObject::kNewBit = dart::UntaggedObject::kNewBit;
|
||||
const word UntaggedObject::kNewOrEvacuationCandidateBit =
|
||||
dart::UntaggedObject::kNewOrEvacuationCandidateBit;
|
||||
|
||||
const word UntaggedObject::kOldAndNotRememberedBit =
|
||||
dart::UntaggedObject::kOldAndNotRememberedBit;
|
||||
|
|
|
@ -418,7 +418,7 @@ class UntaggedObject : public AllStatic {
|
|||
public:
|
||||
static const word kCardRememberedBit;
|
||||
static const word kCanonicalBit;
|
||||
static const word kNewBit;
|
||||
static const word kNewOrEvacuationCandidateBit;
|
||||
static const word kOldAndNotRememberedBit;
|
||||
static const word kNotMarkedBit;
|
||||
static const word kImmutableBit;
|
||||
|
|
|
@ -127,8 +127,8 @@ constexpr bool FLAG_support_il_printer = false;
|
|||
R(log_marker_tasks, false, bool, false, \
|
||||
"Log debugging information for old gen GC marking tasks.") \
|
||||
P(scavenger_tasks, int, 2, \
|
||||
"The number of tasks to spawn during scavenging (0 means " \
|
||||
"perform all marking on main thread).") \
|
||||
"The number of tasks to spawn during scavenging and incremental " \
|
||||
"compaction (0 means perform all work on the main thread).") \
|
||||
P(mark_when_idle, bool, false, \
|
||||
"The Dart thread will assist in concurrent marking during idle time and " \
|
||||
"is counted as one marker task") \
|
||||
|
@ -216,6 +216,8 @@ constexpr bool FLAG_support_il_printer = false;
|
|||
P(truncating_left_shift, bool, true, \
|
||||
"Optimize left shift to truncate if possible") \
|
||||
P(use_compactor, bool, false, "Compact the heap during old-space GC.") \
|
||||
P(use_incremental_compactor, bool, true, \
|
||||
"Compact the heap during old-space GC.") \
|
||||
P(use_cha_deopt, bool, true, \
|
||||
"Use class hierarchy analysis even if it can cause deoptimization.") \
|
||||
P(use_field_guards, bool, true, "Use field guards and track field types") \
|
||||
|
|
|
@ -29,7 +29,7 @@ ForwardingCorpse* ForwardingCorpse::AsForwarder(uword addr, intptr_t size) {
|
|||
bool is_old = (addr & kNewObjectAlignmentOffset) == kOldObjectAlignmentOffset;
|
||||
tags = UntaggedObject::NotMarkedBit::update(true, tags);
|
||||
tags = UntaggedObject::OldAndNotRememberedBit::update(is_old, tags);
|
||||
tags = UntaggedObject::NewBit::update(!is_old, tags);
|
||||
tags = UntaggedObject::NewOrEvacuationCandidateBit::update(!is_old, tags);
|
||||
|
||||
result->tags_ = tags;
|
||||
if (size > UntaggedObject::SizeTag::kMaxSizeTag) {
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
|
||||
#include "platform/atomic.h"
|
||||
#include "vm/globals.h"
|
||||
#include "vm/heap/become.h"
|
||||
#include "vm/heap/heap.h"
|
||||
#include "vm/heap/pages.h"
|
||||
#include "vm/heap/sweeper.h"
|
||||
#include "vm/thread_barrier.h"
|
||||
#include "vm/timeline.h"
|
||||
|
||||
|
@ -184,18 +184,52 @@ class CompactorTask : public ThreadPool::Task {
|
|||
void GCCompactor::Compact(Page* pages, FreeList* freelist, Mutex* pages_lock) {
|
||||
SetupImagePageBoundaries();
|
||||
|
||||
// Divide the heap.
|
||||
Page* fixed_head = nullptr;
|
||||
Page* fixed_tail = nullptr;
|
||||
|
||||
// Divide the heap, and set aside never-evacuate pages.
|
||||
// TODO(30978): Try to divide based on live bytes or with work stealing.
|
||||
intptr_t num_pages = 0;
|
||||
for (Page* page = pages; page != nullptr; page = page->next()) {
|
||||
num_pages++;
|
||||
Page* page = pages;
|
||||
Page* prev = nullptr;
|
||||
while (page != nullptr) {
|
||||
Page* next = page->next();
|
||||
if (page->is_never_evacuate()) {
|
||||
if (prev != nullptr) {
|
||||
prev->set_next(next);
|
||||
} else {
|
||||
pages = next;
|
||||
}
|
||||
if (fixed_tail == nullptr) {
|
||||
fixed_tail = page;
|
||||
}
|
||||
page->set_next(fixed_head);
|
||||
fixed_head = page;
|
||||
} else {
|
||||
prev = page;
|
||||
num_pages++;
|
||||
}
|
||||
page = next;
|
||||
}
|
||||
fixed_pages_ = fixed_head;
|
||||
|
||||
intptr_t num_tasks = FLAG_compactor_tasks;
|
||||
RELEASE_ASSERT(num_tasks >= 1);
|
||||
if (num_pages < num_tasks) {
|
||||
num_tasks = num_pages;
|
||||
}
|
||||
if (num_tasks == 0) {
|
||||
ASSERT(pages == nullptr);
|
||||
|
||||
// Move pages to sweeper work lists.
|
||||
heap_->old_space()->pages_ = nullptr;
|
||||
heap_->old_space()->pages_tail_ = nullptr;
|
||||
heap_->old_space()->sweep_regular_ = fixed_head;
|
||||
|
||||
heap_->old_space()->Sweep(/*exclusive*/ true);
|
||||
heap_->old_space()->SweepLarge();
|
||||
return;
|
||||
}
|
||||
|
||||
Partition* partitions = new Partition[num_tasks];
|
||||
|
||||
|
@ -206,6 +240,7 @@ void GCCompactor::Compact(Page* pages, FreeList* freelist, Mutex* pages_lock) {
|
|||
Page* page = pages;
|
||||
Page* prev = nullptr;
|
||||
while (task_index < num_tasks) {
|
||||
ASSERT(!page->is_never_evacuate());
|
||||
if (page_index % pages_per_task == 0) {
|
||||
partitions[task_index].head = page;
|
||||
partitions[task_index].tail = nullptr;
|
||||
|
@ -352,6 +387,12 @@ void GCCompactor::Compact(Page* pages, FreeList* freelist, Mutex* pages_lock) {
|
|||
partitions[num_tasks - 1].tail->set_next(nullptr);
|
||||
heap_->old_space()->pages_ = pages = partitions[0].head;
|
||||
heap_->old_space()->pages_tail_ = partitions[num_tasks - 1].tail;
|
||||
if (fixed_head != nullptr) {
|
||||
fixed_tail->set_next(heap_->old_space()->pages_);
|
||||
heap_->old_space()->pages_ = fixed_head;
|
||||
|
||||
ASSERT(heap_->old_space()->pages_tail_ != nullptr);
|
||||
}
|
||||
|
||||
delete[] partitions;
|
||||
}
|
||||
|
@ -486,6 +527,7 @@ void CompactorTask::RunEnteredIsolateGroup() {
|
|||
}
|
||||
|
||||
void CompactorTask::PlanPage(Page* page) {
|
||||
ASSERT(!page->is_never_evacuate());
|
||||
uword current = page->object_start();
|
||||
uword end = page->object_end();
|
||||
|
||||
|
@ -498,6 +540,7 @@ void CompactorTask::PlanPage(Page* page) {
|
|||
}
|
||||
|
||||
void CompactorTask::SlidePage(Page* page) {
|
||||
ASSERT(!page->is_never_evacuate());
|
||||
uword current = page->object_start();
|
||||
uword end = page->object_end();
|
||||
|
||||
|
@ -667,6 +710,11 @@ void GCCompactor::ForwardPointer(ObjectPtr* ptr) {
|
|||
if (forwarding_page == nullptr) {
|
||||
return; // Not moved (VM isolate, large page, code page).
|
||||
}
|
||||
if (page->is_never_evacuate()) {
|
||||
// Forwarding page is non-NULL since one is still reserved for use as a
|
||||
// counting page, but it doesn't have forwarding information.
|
||||
return;
|
||||
}
|
||||
|
||||
ObjectPtr new_target =
|
||||
UntaggedObject::FromAddr(forwarding_page->Lookup(old_addr));
|
||||
|
@ -703,6 +751,11 @@ void GCCompactor::ForwardCompressedPointer(uword heap_base,
|
|||
if (forwarding_page == nullptr) {
|
||||
return; // Not moved (VM isolate, large page, code page).
|
||||
}
|
||||
if (page->is_never_evacuate()) {
|
||||
// Forwarding page is non-NULL since one is still reserved for use as a
|
||||
// counting page, but it doesn't have forwarding information.
|
||||
return;
|
||||
}
|
||||
|
||||
ObjectPtr new_target =
|
||||
UntaggedObject::FromAddr(forwarding_page->Lookup(old_addr));
|
||||
|
@ -796,6 +849,24 @@ void GCCompactor::ForwardLargePages() {
|
|||
page->VisitObjectPointers(this);
|
||||
ml.Lock();
|
||||
}
|
||||
while (fixed_pages_ != nullptr) {
|
||||
Page* page = fixed_pages_;
|
||||
fixed_pages_ = page->next();
|
||||
ml.Unlock();
|
||||
|
||||
GCSweeper sweeper;
|
||||
FreeList* freelist = heap_->old_space()->DataFreeList(0);
|
||||
bool page_in_use;
|
||||
{
|
||||
MutexLocker ml(freelist->mutex());
|
||||
page_in_use = sweeper.SweepPage(page, freelist);
|
||||
}
|
||||
ASSERT(page_in_use);
|
||||
|
||||
page->VisitObjectPointers(this);
|
||||
|
||||
ml.Lock();
|
||||
}
|
||||
}
|
||||
|
||||
void GCCompactor::ForwardStackPointers() {
|
||||
|
|
|
@ -74,6 +74,7 @@ class GCCompactor : public ValueObject,
|
|||
|
||||
Mutex large_pages_mutex_;
|
||||
Page* large_pages_ = nullptr;
|
||||
Page* fixed_pages_ = nullptr;
|
||||
|
||||
// The typed data views whose inner pointer must be updated after sliding is
|
||||
// complete.
|
||||
|
|
|
@ -28,7 +28,7 @@ FreeListElement* FreeListElement::AsElement(uword addr, intptr_t size) {
|
|||
tags = UntaggedObject::AlwaysSetBit::update(true, tags);
|
||||
tags = UntaggedObject::NotMarkedBit::update(true, tags);
|
||||
tags = UntaggedObject::OldAndNotRememberedBit::update(true, tags);
|
||||
tags = UntaggedObject::NewBit::update(false, tags);
|
||||
tags = UntaggedObject::NewOrEvacuationCandidateBit::update(false, tags);
|
||||
result->tags_ = tags;
|
||||
|
||||
if (size > UntaggedObject::SizeTag::kMaxSizeTag) {
|
||||
|
@ -53,7 +53,7 @@ FreeListElement* FreeListElement::AsElementNew(uword addr, intptr_t size) {
|
|||
tags = UntaggedObject::AlwaysSetBit::update(true, tags);
|
||||
tags = UntaggedObject::NotMarkedBit::update(true, tags);
|
||||
tags = UntaggedObject::OldAndNotRememberedBit::update(false, tags);
|
||||
tags = UntaggedObject::NewBit::update(true, tags);
|
||||
tags = UntaggedObject::NewOrEvacuationCandidateBit::update(true, tags);
|
||||
result->tags_ = tags;
|
||||
|
||||
if (size > UntaggedObject::SizeTag::kMaxSizeTag) {
|
||||
|
|
|
@ -224,6 +224,9 @@ class FreeList {
|
|||
// The largest available small size in bytes, or negative if there is none.
|
||||
intptr_t last_free_small_size_;
|
||||
|
||||
friend class GCIncrementalCompactor;
|
||||
friend class PrologueTask;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(FreeList);
|
||||
};
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include "vm/compiler/jit/compiler.h"
|
||||
#include "vm/dart.h"
|
||||
#include "vm/flags.h"
|
||||
#include "vm/heap/incremental_compactor.h"
|
||||
#include "vm/heap/pages.h"
|
||||
#include "vm/heap/safepoint.h"
|
||||
#include "vm/heap/scavenger.h"
|
||||
|
@ -467,6 +468,16 @@ void Heap::CollectNewSpaceGarbage(Thread* thread,
|
|||
VMTagScope tagScope(thread, reason == GCReason::kIdle
|
||||
? VMTag::kGCIdleTagId
|
||||
: VMTag::kGCNewSpaceTagId);
|
||||
if (reason == GCReason::kStoreBuffer) {
|
||||
// The remembered set may become too full, increasing the time of
|
||||
// stop-the-world phases, if new-space or to-be-evacuated objects are
|
||||
// pointed to by too many objects. This is resolved by evacuating
|
||||
// new-space (so there are no old->new pointers) and aborting an
|
||||
// incremental compaction (so there are no old->to-be-evacuated
|
||||
// pointers). If we had separate remembered sets, would could do these
|
||||
// actions separately.
|
||||
GCIncrementalCompactor::Abort(old_space());
|
||||
}
|
||||
TIMELINE_FUNCTION_GC_DURATION(thread, "CollectNewGeneration");
|
||||
new_space_.Scavenge(thread, type, reason);
|
||||
RecordAfterGC(type);
|
||||
|
|
|
@ -387,6 +387,7 @@ class Heap {
|
|||
friend class Serializer; // VisitObjectsImagePages
|
||||
friend class HeapTestHelper;
|
||||
friend class GCTestHelper;
|
||||
friend class GCIncrementalCompactor;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(Heap);
|
||||
};
|
||||
|
|
|
@ -13,6 +13,8 @@ heap_sources = [
|
|||
"gc_shared.h",
|
||||
"heap.cc",
|
||||
"heap.h",
|
||||
"incremental_compactor.cc",
|
||||
"incremental_compactor.h",
|
||||
"marker.cc",
|
||||
"marker.h",
|
||||
"page.cc",
|
||||
|
|
1013
runtime/vm/heap/incremental_compactor.cc
Normal file
1013
runtime/vm/heap/incremental_compactor.cc
Normal file
File diff suppressed because it is too large
Load diff
40
runtime/vm/heap/incremental_compactor.h
Normal file
40
runtime/vm/heap/incremental_compactor.h
Normal file
|
@ -0,0 +1,40 @@
|
|||
// Copyright (c) 2024, the Dart project authors. Please see the AUTHORS file
|
||||
// for details. All rights reserved. Use of this source code is governed by a
|
||||
// BSD-style license that can be found in the LICENSE file.
|
||||
|
||||
#ifndef RUNTIME_VM_HEAP_INCREMENTAL_COMPACTOR_H_
|
||||
#define RUNTIME_VM_HEAP_INCREMENTAL_COMPACTOR_H_
|
||||
|
||||
#include "vm/allocation.h"
|
||||
|
||||
namespace dart {
|
||||
|
||||
// Forward declarations.
|
||||
class PageSpace;
|
||||
class ObjectVisitor;
|
||||
class IncrementalForwardingVisitor;
|
||||
|
||||
// An evacuating compactor that is incremental in the sense that building the
|
||||
// remembered set is interleaved with the mutator. The evacuation and forwarding
|
||||
// is not interleaved with the mutator, which would require a read barrier.
|
||||
class GCIncrementalCompactor : public AllStatic {
|
||||
public:
|
||||
static void Prologue(PageSpace* old_space);
|
||||
static bool Epilogue(PageSpace* old_space);
|
||||
static void Abort(PageSpace* old_space);
|
||||
|
||||
private:
|
||||
static bool SelectEvacuationCandidates(PageSpace* old_space);
|
||||
static void CheckFreeLists(PageSpace* old_space);
|
||||
|
||||
static bool HasEvacuationCandidates(PageSpace* old_space);
|
||||
static void CheckPreEvacuate(PageSpace* old_space);
|
||||
static void Evacuate(PageSpace* old_space);
|
||||
static void CheckPostEvacuate(PageSpace* old_space);
|
||||
static void FreeEvacuatedPages(PageSpace* old_space);
|
||||
static void VerifyAfterIncrementalCompaction(PageSpace* old_space);
|
||||
};
|
||||
|
||||
} // namespace dart
|
||||
|
||||
#endif // RUNTIME_VM_HEAP_INCREMENTAL_COMPACTOR_H_
|
|
@ -39,7 +39,8 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
deferred_work_list_(deferred_marking_stack),
|
||||
marked_bytes_(0),
|
||||
marked_micros_(0),
|
||||
concurrent_(true) {}
|
||||
concurrent_(true),
|
||||
has_evacuation_candidate_(false) {}
|
||||
~MarkingVisitorBase() { ASSERT(delayed_.IsEmpty()); }
|
||||
|
||||
uintptr_t marked_bytes() const { return marked_bytes_; }
|
||||
|
@ -56,6 +57,11 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
return raw->untag()->IsMarked();
|
||||
}
|
||||
|
||||
void FinishedRoots() {
|
||||
// Nothing to remember for roots. Don't carry over to objects.
|
||||
has_evacuation_candidate_ = false;
|
||||
}
|
||||
|
||||
bool ProcessPendingWeakProperties() {
|
||||
bool more_to_mark = false;
|
||||
WeakPropertyPtr cur_weak = delayed_.weak_properties.Release();
|
||||
|
@ -74,6 +80,15 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
// The key is marked so we make sure to properly visit all pointers
|
||||
// originating from this weak property.
|
||||
cur_weak->untag()->VisitPointersNonvirtual(this);
|
||||
if (has_evacuation_candidate_) {
|
||||
has_evacuation_candidate_ = false;
|
||||
if (!cur_weak->untag()->IsCardRemembered()) {
|
||||
if (cur_weak->untag()->TryAcquireRememberedBit()) {
|
||||
Thread::Current()->StoreBufferAddObjectGC(cur_weak);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// Requeue this weak property to be handled later.
|
||||
ASSERT(IsMarked(cur_weak));
|
||||
|
@ -85,10 +100,24 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
return more_to_mark;
|
||||
}
|
||||
|
||||
DART_NOINLINE
|
||||
void YieldConcurrentMarking() {
|
||||
work_list_.Flush();
|
||||
new_work_list_.Flush();
|
||||
deferred_work_list_.Flush();
|
||||
Thread* thread = Thread::Current();
|
||||
thread->StoreBufferReleaseGC();
|
||||
page_space_->YieldConcurrentMarking();
|
||||
thread->StoreBufferAcquireGC();
|
||||
}
|
||||
|
||||
void DrainMarkingStackWithPauseChecks() {
|
||||
Thread* thread = Thread::Current();
|
||||
do {
|
||||
ObjectPtr obj;
|
||||
while (work_list_.Pop(&obj)) {
|
||||
ASSERT(!has_evacuation_candidate_);
|
||||
|
||||
if (obj->IsNewObject()) {
|
||||
Page* page = Page::Of(obj);
|
||||
uword top = page->original_top();
|
||||
|
@ -97,10 +126,7 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
if (top <= addr && addr < end) {
|
||||
new_work_list_.Push(obj);
|
||||
if (UNLIKELY(page_space_->pause_concurrent_marking())) {
|
||||
work_list_.Flush();
|
||||
new_work_list_.Flush();
|
||||
deferred_work_list_.Flush();
|
||||
page_space_->YieldConcurrentMarking();
|
||||
YieldConcurrentMarking();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -124,18 +150,26 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
// Shape changing is not compatible with concurrent marking.
|
||||
deferred_work_list_.Push(obj);
|
||||
size = obj->untag()->HeapSize();
|
||||
} else if (obj->untag()->IsCardRemembered()) {
|
||||
ASSERT((class_id == kArrayCid) || (class_id == kImmutableArrayCid));
|
||||
size = VisitCards(static_cast<ArrayPtr>(obj));
|
||||
} else {
|
||||
size = obj->untag()->VisitPointersNonvirtual(this);
|
||||
}
|
||||
if (has_evacuation_candidate_) {
|
||||
has_evacuation_candidate_ = false;
|
||||
if (!obj->untag()->IsCardRemembered()) {
|
||||
if (obj->untag()->TryAcquireRememberedBit()) {
|
||||
thread->StoreBufferAddObjectGC(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!obj->IsNewObject()) {
|
||||
marked_bytes_ += size;
|
||||
}
|
||||
|
||||
if (UNLIKELY(page_space_->pause_concurrent_marking())) {
|
||||
work_list_.Flush();
|
||||
new_work_list_.Flush();
|
||||
deferred_work_list_.Flush();
|
||||
page_space_->YieldConcurrentMarking();
|
||||
YieldConcurrentMarking();
|
||||
}
|
||||
}
|
||||
} while (ProcessPendingWeakProperties());
|
||||
|
@ -146,6 +180,44 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
deferred_work_list_.Flush();
|
||||
}
|
||||
|
||||
intptr_t VisitCards(ArrayPtr obj) {
|
||||
ASSERT(obj->IsArray() || obj->IsImmutableArray());
|
||||
ASSERT(obj->untag()->IsCardRemembered());
|
||||
CompressedObjectPtr* obj_from = obj->untag()->from();
|
||||
CompressedObjectPtr* obj_to =
|
||||
obj->untag()->to(Smi::Value(obj->untag()->length()));
|
||||
uword heap_base = obj.heap_base();
|
||||
|
||||
Page* page = Page::Of(obj);
|
||||
for (intptr_t i = 0, n = page->card_table_size(); i < n; i++) {
|
||||
CompressedObjectPtr* card_from =
|
||||
reinterpret_cast<CompressedObjectPtr*>(page) +
|
||||
(i << Page::kSlotsPerCardLog2);
|
||||
CompressedObjectPtr* card_to =
|
||||
reinterpret_cast<CompressedObjectPtr*>(card_from) +
|
||||
(1 << Page::kSlotsPerCardLog2) - 1;
|
||||
// Minus 1 because to is inclusive.
|
||||
|
||||
if (card_from < obj_from) {
|
||||
// First card overlaps with header.
|
||||
card_from = obj_from;
|
||||
}
|
||||
if (card_to > obj_to) {
|
||||
// Last card(s) may extend past the object. Array truncation can make
|
||||
// this happen for more than one card.
|
||||
card_to = obj_to;
|
||||
}
|
||||
|
||||
VisitCompressedPointers(heap_base, card_from, card_to);
|
||||
if (has_evacuation_candidate_) {
|
||||
has_evacuation_candidate_ = false;
|
||||
page->RememberCard(card_from);
|
||||
}
|
||||
}
|
||||
|
||||
return obj->untag()->HeapSize();
|
||||
}
|
||||
|
||||
void DrainMarkingStack() {
|
||||
while (ProcessMarkingStack(kIntptrMax)) {
|
||||
}
|
||||
|
@ -170,10 +242,13 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
}
|
||||
|
||||
bool ProcessMarkingStack(intptr_t remaining_budget) {
|
||||
Thread* thread = Thread::Current();
|
||||
do {
|
||||
// First drain the marking stacks.
|
||||
ObjectPtr obj;
|
||||
while (work_list_.Pop(&obj)) {
|
||||
ASSERT(!has_evacuation_candidate_);
|
||||
|
||||
if (sync && concurrent_ && obj->IsNewObject()) {
|
||||
Page* page = Page::Of(obj);
|
||||
uword top = page->original_top();
|
||||
|
@ -218,7 +293,19 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
return true; // More to mark.
|
||||
}
|
||||
}
|
||||
size = obj->untag()->VisitPointersNonvirtual(this);
|
||||
if (obj->untag()->IsCardRemembered()) {
|
||||
ASSERT((class_id == kArrayCid) || (class_id == kImmutableArrayCid));
|
||||
size = VisitCards(static_cast<ArrayPtr>(obj));
|
||||
} else {
|
||||
size = obj->untag()->VisitPointersNonvirtual(this);
|
||||
}
|
||||
}
|
||||
if (has_evacuation_candidate_) {
|
||||
has_evacuation_candidate_ = false;
|
||||
if (!obj->untag()->IsCardRemembered() &&
|
||||
obj->untag()->TryAcquireRememberedBit()) {
|
||||
thread->StoreBufferAddObjectGC(obj);
|
||||
}
|
||||
}
|
||||
if (!obj->IsNewObject()) {
|
||||
marked_bytes_ += size;
|
||||
|
@ -254,19 +341,23 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
}
|
||||
|
||||
void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
|
||||
bool has_evacuation_candidate = false;
|
||||
for (ObjectPtr* current = first; current <= last; current++) {
|
||||
MarkObject(LoadPointerIgnoreRace(current));
|
||||
has_evacuation_candidate |= MarkObject(LoadPointerIgnoreRace(current));
|
||||
}
|
||||
has_evacuation_candidate_ |= has_evacuation_candidate;
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void VisitCompressedPointers(uword heap_base,
|
||||
CompressedObjectPtr* first,
|
||||
CompressedObjectPtr* last) override {
|
||||
bool has_evacuation_candidate = false;
|
||||
for (CompressedObjectPtr* current = first; current <= last; current++) {
|
||||
MarkObject(
|
||||
has_evacuation_candidate |= MarkObject(
|
||||
LoadCompressedPointerIgnoreRace(current).Decompress(heap_base));
|
||||
}
|
||||
has_evacuation_candidate_ |= has_evacuation_candidate;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -291,17 +382,25 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
ObjectPtr raw_target =
|
||||
LoadCompressedPointerIgnoreRace(&raw_weak->untag()->target_)
|
||||
.Decompress(raw_weak->heap_base());
|
||||
if (raw_target->IsHeapObject() && !raw_target->untag()->IsMarked()) {
|
||||
// Target was white. Enqueue the weak reference. It is potentially dead.
|
||||
// It might still be made alive by weak properties in next rounds.
|
||||
ASSERT(IsMarked(raw_weak));
|
||||
delayed_.weak_references.Enqueue(raw_weak);
|
||||
if (raw_target->IsHeapObject()) {
|
||||
if (!raw_target->untag()->IsMarked()) {
|
||||
// Target was white. Enqueue the weak reference. It is potentially dead.
|
||||
// It might still be made alive by weak properties in next rounds.
|
||||
ASSERT(IsMarked(raw_weak));
|
||||
delayed_.weak_references.Enqueue(raw_weak);
|
||||
} else {
|
||||
if (raw_target->untag()->IsEvacuationCandidate()) {
|
||||
has_evacuation_candidate_ = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Always visit the type argument.
|
||||
ObjectPtr raw_type_arguments =
|
||||
LoadCompressedPointerIgnoreRace(&raw_weak->untag()->type_arguments_)
|
||||
.Decompress(raw_weak->heap_base());
|
||||
MarkObject(raw_type_arguments);
|
||||
if (MarkObject(raw_type_arguments)) {
|
||||
has_evacuation_candidate_ = true;
|
||||
}
|
||||
return raw_weak->untag()->HeapSize();
|
||||
}
|
||||
|
||||
|
@ -314,18 +413,24 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
ASSERT(IsMarked(raw_entry));
|
||||
delayed_.finalizer_entries.Enqueue(raw_entry);
|
||||
// Only visit token and next.
|
||||
MarkObject(LoadCompressedPointerIgnoreRace(&raw_entry->untag()->token_)
|
||||
.Decompress(raw_entry->heap_base()));
|
||||
MarkObject(LoadCompressedPointerIgnoreRace(&raw_entry->untag()->next_)
|
||||
.Decompress(raw_entry->heap_base()));
|
||||
if (MarkObject(LoadCompressedPointerIgnoreRace(&raw_entry->untag()->token_)
|
||||
.Decompress(raw_entry->heap_base()))) {
|
||||
has_evacuation_candidate_ = true;
|
||||
}
|
||||
if (MarkObject(LoadCompressedPointerIgnoreRace(&raw_entry->untag()->next_)
|
||||
.Decompress(raw_entry->heap_base()))) {
|
||||
has_evacuation_candidate_ = true;
|
||||
}
|
||||
return raw_entry->untag()->HeapSize();
|
||||
}
|
||||
|
||||
void ProcessDeferredMarking() {
|
||||
TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ProcessDeferredMarking");
|
||||
Thread* thread = Thread::Current();
|
||||
TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessDeferredMarking");
|
||||
|
||||
ObjectPtr obj;
|
||||
while (deferred_work_list_.Pop(&obj)) {
|
||||
ASSERT(!has_evacuation_candidate_);
|
||||
ASSERT(obj->IsHeapObject());
|
||||
// We need to scan objects even if they were already scanned via ordinary
|
||||
// marking. An object may have changed since its ordinary scan and been
|
||||
|
@ -351,6 +456,13 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
marked_bytes_ += size;
|
||||
}
|
||||
}
|
||||
if (has_evacuation_candidate_) {
|
||||
has_evacuation_candidate_ = false;
|
||||
if (!obj->untag()->IsCardRemembered() &&
|
||||
obj->untag()->TryAcquireRememberedBit()) {
|
||||
thread->StoreBufferAddObjectGC(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -425,6 +537,15 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
if (target->untag()->IsMarked()) {
|
||||
// Object already null (which is permanently marked) or has survived this
|
||||
// GC.
|
||||
if (target->untag()->IsEvacuationCandidate()) {
|
||||
if (parent->untag()->IsCardRemembered()) {
|
||||
Page::Of(parent)->RememberCard(slot);
|
||||
} else {
|
||||
if (parent->untag()->TryAcquireRememberedBit()) {
|
||||
Thread::Current()->StoreBufferAddObjectGC(parent);
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
*slot = Object::null();
|
||||
|
@ -485,16 +606,16 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
}
|
||||
|
||||
DART_FORCE_INLINE
|
||||
void MarkObject(ObjectPtr obj) {
|
||||
bool MarkObject(ObjectPtr obj) {
|
||||
if (obj->IsImmediateObject()) {
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (sync && concurrent_ && obj->IsNewObject()) {
|
||||
if (TryAcquireMarkBit(obj)) {
|
||||
PushMarked(obj);
|
||||
}
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
// While it might seem this is redundant with TryAcquireMarkBit, we must
|
||||
|
@ -507,26 +628,26 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
// was allocated after the concurrent marker started. It can read either a
|
||||
// zero or the header of an object allocated black, both of which appear
|
||||
// marked.
|
||||
if (obj->untag()->IsMarkedIgnoreRace()) {
|
||||
return;
|
||||
uword tags = obj->untag()->tags_ignore_race();
|
||||
if (UntaggedObject::IsMarked(tags)) {
|
||||
return UntaggedObject::IsEvacuationCandidate(tags);
|
||||
}
|
||||
|
||||
intptr_t class_id = obj->GetClassId();
|
||||
intptr_t class_id = UntaggedObject::ClassIdTag::decode(tags);
|
||||
ASSERT(class_id != kFreeListElement);
|
||||
|
||||
if (sync && UNLIKELY(class_id == kInstructionsCid)) {
|
||||
// If this is the concurrent marker, this object may be non-writable due
|
||||
// to W^X (--write-protect-code).
|
||||
deferred_work_list_.Push(obj);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!TryAcquireMarkBit(obj)) {
|
||||
// Already marked.
|
||||
return;
|
||||
if (TryAcquireMarkBit(obj)) {
|
||||
PushMarked(obj);
|
||||
}
|
||||
|
||||
PushMarked(obj);
|
||||
return UntaggedObject::IsEvacuationCandidate(tags);
|
||||
}
|
||||
|
||||
PageSpace* page_space_;
|
||||
|
@ -537,6 +658,7 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
uintptr_t marked_bytes_;
|
||||
int64_t marked_micros_;
|
||||
bool concurrent_;
|
||||
bool has_evacuation_candidate_;
|
||||
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(MarkingVisitorBase);
|
||||
};
|
||||
|
@ -805,6 +927,7 @@ class ParallelMarkTask : public ThreadPool::Task {
|
|||
num_busy_->fetch_add(1u);
|
||||
visitor_->set_concurrent(false);
|
||||
marker_->IterateRoots(visitor_);
|
||||
visitor_->FinishedRoots();
|
||||
|
||||
visitor_->ProcessDeferredMarking();
|
||||
|
||||
|
@ -855,6 +978,8 @@ class ParallelMarkTask : public ThreadPool::Task {
|
|||
// Don't MournFinalizerEntries here, do it on main thread, so that we
|
||||
// don't have to coordinate workers.
|
||||
|
||||
thread->ReleaseStoreBuffer(); // Ahead of IterateWeak
|
||||
barrier_->Sync();
|
||||
marker_->IterateWeakRoots(thread);
|
||||
int64_t stop = OS::GetCurrentMonotonicMicros();
|
||||
visitor_->AddMicros(stop - start);
|
||||
|
@ -901,6 +1026,7 @@ class ConcurrentMarkTask : public ThreadPool::Task {
|
|||
int64_t start = OS::GetCurrentMonotonicMicros();
|
||||
|
||||
marker_->IterateRoots(visitor_);
|
||||
visitor_->FinishedRoots();
|
||||
|
||||
visitor_->DrainMarkingStackWithPauseChecks();
|
||||
int64_t stop = OS::GetCurrentMonotonicMicros();
|
||||
|
@ -1023,6 +1149,7 @@ void GCMarker::StartConcurrentMark(PageSpace* page_space) {
|
|||
TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ConcurrentMark");
|
||||
int64_t start = OS::GetCurrentMonotonicMicros();
|
||||
IterateRoots(visitor);
|
||||
visitor->FinishedRoots();
|
||||
int64_t stop = OS::GetCurrentMonotonicMicros();
|
||||
visitor->AddMicros(stop - start);
|
||||
if (FLAG_log_marker_tasks) {
|
||||
|
@ -1173,6 +1300,7 @@ void GCMarker::MarkObjects(PageSpace* page_space) {
|
|||
visitor.set_concurrent(false);
|
||||
ResetSlices();
|
||||
IterateRoots(&visitor);
|
||||
visitor.FinishedRoots();
|
||||
visitor.ProcessDeferredMarking();
|
||||
visitor.DrainMarkingStack();
|
||||
visitor.ProcessDeferredMarking();
|
||||
|
@ -1181,6 +1309,7 @@ void GCMarker::MarkObjects(PageSpace* page_space) {
|
|||
visitor.MournWeakReferences();
|
||||
visitor.MournWeakArrays();
|
||||
visitor.MournFinalizerEntries();
|
||||
thread->ReleaseStoreBuffer(); // Ahead of IterateWeak
|
||||
IterateWeakRoots(thread);
|
||||
// All marking done; detach code, etc.
|
||||
int64_t stop = OS::GetCurrentMonotonicMicros();
|
||||
|
|
|
@ -114,6 +114,7 @@ Page* Page::Allocate(intptr_t size, uword flags) {
|
|||
result->end_ = 0;
|
||||
result->survivor_end_ = 0;
|
||||
result->resolved_top_ = 0;
|
||||
result->live_bytes_ = 0;
|
||||
|
||||
if ((flags & kNew) != 0) {
|
||||
uword top = result->object_start();
|
||||
|
@ -171,7 +172,8 @@ void Page::Deallocate() {
|
|||
}
|
||||
|
||||
void Page::VisitObjects(ObjectVisitor* visitor) const {
|
||||
ASSERT(Thread::Current()->OwnsGCSafepoint());
|
||||
ASSERT(Thread::Current()->OwnsGCSafepoint() ||
|
||||
(Thread::Current()->task_kind() == Thread::kIncrementalCompactorTask));
|
||||
NoSafepointScope no_safepoint;
|
||||
uword obj_addr = object_start();
|
||||
uword end_addr = object_end();
|
||||
|
@ -207,9 +209,11 @@ void Page::VisitObjectPointers(ObjectPointerVisitor* visitor) const {
|
|||
ASSERT(obj_addr == end_addr);
|
||||
}
|
||||
|
||||
void Page::VisitRememberedCards(PredicateObjectPointerVisitor* visitor) {
|
||||
void Page::VisitRememberedCards(PredicateObjectPointerVisitor* visitor,
|
||||
bool only_marked) {
|
||||
ASSERT(Thread::Current()->OwnsGCSafepoint() ||
|
||||
(Thread::Current()->task_kind() == Thread::kScavengerTask));
|
||||
(Thread::Current()->task_kind() == Thread::kScavengerTask) ||
|
||||
(Thread::Current()->task_kind() == Thread::kIncrementalCompactorTask));
|
||||
NoSafepointScope no_safepoint;
|
||||
|
||||
if (card_table_ == nullptr) {
|
||||
|
@ -220,6 +224,7 @@ void Page::VisitRememberedCards(PredicateObjectPointerVisitor* visitor) {
|
|||
static_cast<ArrayPtr>(UntaggedObject::FromAddr(object_start()));
|
||||
ASSERT(obj->IsArray() || obj->IsImmutableArray());
|
||||
ASSERT(obj->untag()->IsCardRemembered());
|
||||
if (only_marked && !obj->untag()->IsMarked()) return;
|
||||
CompressedObjectPtr* obj_from = obj->untag()->from();
|
||||
CompressedObjectPtr* obj_to =
|
||||
obj->untag()->to(Smi::Value(obj->untag()->length()));
|
||||
|
|
|
@ -72,6 +72,7 @@ class Page {
|
|||
kVMIsolate = 1 << 3,
|
||||
kNew = 1 << 4,
|
||||
kEvacuationCandidate = 1 << 5,
|
||||
kNeverEvacuate = 1 << 6,
|
||||
};
|
||||
bool is_executable() const { return (flags_ & kExecutable) != 0; }
|
||||
bool is_large() const { return (flags_ & kLarge) != 0; }
|
||||
|
@ -82,6 +83,21 @@ class Page {
|
|||
bool is_evacuation_candidate() const {
|
||||
return (flags_ & kEvacuationCandidate) != 0;
|
||||
}
|
||||
void set_evacuation_candidate(bool value) {
|
||||
if (value) {
|
||||
flags_ |= kEvacuationCandidate;
|
||||
} else {
|
||||
flags_ &= ~kEvacuationCandidate;
|
||||
}
|
||||
}
|
||||
bool is_never_evacuate() const { return (flags_ & kNeverEvacuate) != 0; }
|
||||
void set_never_evacuate(bool value) {
|
||||
if (value) {
|
||||
flags_ |= kNeverEvacuate;
|
||||
} else {
|
||||
flags_ &= ~kNeverEvacuate;
|
||||
}
|
||||
}
|
||||
|
||||
Page* next() const { return next_; }
|
||||
void set_next(Page* next) { next_ = next; }
|
||||
|
@ -105,6 +121,11 @@ class Page {
|
|||
}
|
||||
intptr_t used() const { return object_end() - object_start(); }
|
||||
|
||||
intptr_t live_bytes() const { return live_bytes_; }
|
||||
void set_live_bytes(intptr_t value) { live_bytes_ = value; }
|
||||
void add_live_bytes(intptr_t value) { live_bytes_ += value; }
|
||||
void sub_live_bytes(intptr_t value) { live_bytes_ -= value; }
|
||||
|
||||
ForwardingPage* forwarding_page() const { return forwarding_page_; }
|
||||
void RegisterUnwindingRecords();
|
||||
void UnregisterUnwindingRecords();
|
||||
|
@ -142,11 +163,12 @@ class Page {
|
|||
ASSERT(obj->IsHeapObject());
|
||||
return reinterpret_cast<Page*>(static_cast<uword>(obj) & kPageMask);
|
||||
}
|
||||
|
||||
// Warning: This does not work for addresses on image pages or on large pages.
|
||||
static Page* Of(uword addr) {
|
||||
return reinterpret_cast<Page*>(addr & kPageMask);
|
||||
}
|
||||
static Page* Of(void* addr) {
|
||||
return reinterpret_cast<Page*>(reinterpret_cast<uword>(addr) & kPageMask);
|
||||
}
|
||||
|
||||
// 1 card = 32 slots.
|
||||
static constexpr intptr_t kSlotsPerCardLog2 = 5;
|
||||
|
@ -173,7 +195,8 @@ class Page {
|
|||
return IsCardRemembered(reinterpret_cast<uword>(slot));
|
||||
}
|
||||
#endif
|
||||
void VisitRememberedCards(PredicateObjectPointerVisitor* visitor);
|
||||
void VisitRememberedCards(PredicateObjectPointerVisitor* visitor,
|
||||
bool only_marked = false);
|
||||
void ResetProgressBar();
|
||||
|
||||
Thread* owner() const { return owner_; }
|
||||
|
@ -263,7 +286,8 @@ class Page {
|
|||
intptr_t word_offset = index >> kBitsPerWordLog2;
|
||||
intptr_t bit_offset = index & (kBitsPerWord - 1);
|
||||
uword bit_mask = static_cast<uword>(1) << bit_offset;
|
||||
card_table_[word_offset] |= bit_mask;
|
||||
reinterpret_cast<std::atomic<uword>*>(&card_table_[word_offset])
|
||||
->fetch_or(bit_mask, std::memory_order_relaxed);
|
||||
}
|
||||
bool IsCardRemembered(uword slot) {
|
||||
ASSERT(Contains(slot));
|
||||
|
@ -316,7 +340,10 @@ class Page {
|
|||
// value meets the allocation top. Called "SCAN" in the original Cheney paper.
|
||||
uword resolved_top_;
|
||||
|
||||
friend class CheckStoreBufferVisitor;
|
||||
RelaxedAtomic<intptr_t> live_bytes_;
|
||||
|
||||
friend class CheckStoreBufferScavengeVisitor;
|
||||
friend class CheckStoreBufferEvacuateVisitor;
|
||||
friend class GCCompactor;
|
||||
friend class PageSpace;
|
||||
template <bool>
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "vm/dart.h"
|
||||
#include "vm/heap/become.h"
|
||||
#include "vm/heap/compactor.h"
|
||||
#include "vm/heap/incremental_compactor.h"
|
||||
#include "vm/heap/marker.h"
|
||||
#include "vm/heap/safepoint.h"
|
||||
#include "vm/heap/sweeper.h"
|
||||
|
@ -346,6 +347,7 @@ uword PageSpace::TryAllocateInFreshPage(intptr_t size,
|
|||
// Start of the newly allocated page is the allocated object.
|
||||
result = page->object_start();
|
||||
// Note: usage_.capacity_in_words is increased by AllocatePage.
|
||||
Page::Of(result)->add_live_bytes(size);
|
||||
usage_.used_in_words += (size >> kWordSizeLog2);
|
||||
// Enqueue the remainder in the free list.
|
||||
uword free_start = result + size;
|
||||
|
@ -387,6 +389,7 @@ uword PageSpace::TryAllocateInFreshLargePage(intptr_t size,
|
|||
if (page != nullptr) {
|
||||
result = page->object_start();
|
||||
// Note: usage_.capacity_in_words is increased by AllocateLargePage.
|
||||
Page::Of(result)->add_live_bytes(size);
|
||||
usage_.used_in_words += (size >> kWordSizeLog2);
|
||||
}
|
||||
}
|
||||
|
@ -413,6 +416,9 @@ uword PageSpace::TryAllocateInternal(intptr_t size,
|
|||
is_locked);
|
||||
// usage_ is updated by the call above.
|
||||
} else {
|
||||
if (!is_protected) {
|
||||
Page::Of(result)->add_live_bytes(size);
|
||||
}
|
||||
usage_.used_in_words += (size >> kWordSizeLog2);
|
||||
}
|
||||
} else {
|
||||
|
@ -1039,6 +1045,9 @@ void PageSpace::CollectGarbageHelper(Thread* thread,
|
|||
if (marker_ == nullptr) {
|
||||
ASSERT(phase() == kDone);
|
||||
marker_ = new GCMarker(isolate_group, heap_);
|
||||
if (FLAG_use_incremental_compactor) {
|
||||
GCIncrementalCompactor::Prologue(this);
|
||||
}
|
||||
} else {
|
||||
ASSERT(phase() == kAwaitingFinalization);
|
||||
}
|
||||
|
@ -1059,15 +1068,26 @@ void PageSpace::CollectGarbageHelper(Thread* thread,
|
|||
delete marker_;
|
||||
marker_ = nullptr;
|
||||
|
||||
// Reset the freelists and setup sweeping.
|
||||
for (intptr_t i = 0; i < num_freelists_; i++) {
|
||||
freelists_[i].Reset();
|
||||
if (FLAG_verify_store_buffer) {
|
||||
VerifyStoreBuffers("Verifying remembered set after marking");
|
||||
}
|
||||
|
||||
if (FLAG_verify_before_gc) {
|
||||
heap_->VerifyGC("Verifying before sweeping", kAllowMarked);
|
||||
}
|
||||
|
||||
bool has_reservation = MarkReservation();
|
||||
|
||||
bool new_space_is_swept = false;
|
||||
if (FLAG_use_incremental_compactor) {
|
||||
new_space_is_swept = GCIncrementalCompactor::Epilogue(this);
|
||||
}
|
||||
|
||||
// Reset the freelists and setup sweeping.
|
||||
for (intptr_t i = 0; i < num_freelists_; i++) {
|
||||
freelists_[i].Reset();
|
||||
}
|
||||
|
||||
{
|
||||
// Executable pages are always swept immediately to simplify
|
||||
// code protection.
|
||||
|
@ -1090,8 +1110,6 @@ void PageSpace::CollectGarbageHelper(Thread* thread,
|
|||
}
|
||||
}
|
||||
|
||||
bool has_reservation = MarkReservation();
|
||||
|
||||
{
|
||||
// Move pages to sweeper work lists.
|
||||
MutexLocker ml(&pages_lock_);
|
||||
|
@ -1105,23 +1123,24 @@ void PageSpace::CollectGarbageHelper(Thread* thread,
|
|||
}
|
||||
}
|
||||
|
||||
bool can_verify;
|
||||
SweepNew();
|
||||
if (!new_space_is_swept) {
|
||||
SweepNew();
|
||||
}
|
||||
bool is_concurrent_sweep_running = false;
|
||||
if (compact) {
|
||||
Compact(thread);
|
||||
set_phase(kDone);
|
||||
can_verify = true;
|
||||
is_concurrent_sweep_running = true;
|
||||
} else if (FLAG_concurrent_sweep && has_reservation) {
|
||||
ConcurrentSweep(isolate_group);
|
||||
can_verify = false;
|
||||
is_concurrent_sweep_running = true;
|
||||
} else {
|
||||
SweepLarge();
|
||||
Sweep(/*exclusive*/ true);
|
||||
set_phase(kDone);
|
||||
can_verify = true;
|
||||
}
|
||||
|
||||
if (FLAG_verify_after_gc && can_verify) {
|
||||
if (FLAG_verify_after_gc && !is_concurrent_sweep_running) {
|
||||
heap_->VerifyGC("Verifying after sweeping", kForbidMarked);
|
||||
}
|
||||
|
||||
|
@ -1149,6 +1168,163 @@ void PageSpace::CollectGarbageHelper(Thread* thread,
|
|||
}
|
||||
}
|
||||
|
||||
class CollectStoreBufferEvacuateVisitor : public ObjectPointerVisitor {
|
||||
public:
|
||||
CollectStoreBufferEvacuateVisitor(ObjectSet* in_store_buffer, const char* msg)
|
||||
: ObjectPointerVisitor(IsolateGroup::Current()),
|
||||
in_store_buffer_(in_store_buffer),
|
||||
msg_(msg) {}
|
||||
|
||||
void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
|
||||
for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
|
||||
ObjectPtr obj = *ptr;
|
||||
RELEASE_ASSERT_WITH_MSG(obj->untag()->IsRemembered(), msg_);
|
||||
RELEASE_ASSERT_WITH_MSG(obj->IsOldObject(), msg_);
|
||||
|
||||
RELEASE_ASSERT_WITH_MSG(!obj->untag()->IsCardRemembered(), msg_);
|
||||
if (obj.GetClassId() == kArrayCid) {
|
||||
const uword length =
|
||||
Smi::Value(static_cast<UntaggedArray*>(obj.untag())->length());
|
||||
RELEASE_ASSERT_WITH_MSG(!Array::UseCardMarkingForAllocation(length),
|
||||
msg_);
|
||||
}
|
||||
in_store_buffer_->Add(obj);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void VisitCompressedPointers(uword heap_base,
|
||||
CompressedObjectPtr* from,
|
||||
CompressedObjectPtr* to) override {
|
||||
UNREACHABLE(); // Store buffer blocks are not compressed.
|
||||
}
|
||||
#endif
|
||||
|
||||
private:
|
||||
ObjectSet* const in_store_buffer_;
|
||||
const char* msg_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(CollectStoreBufferEvacuateVisitor);
|
||||
};
|
||||
|
||||
class CheckStoreBufferEvacuateVisitor : public ObjectVisitor,
|
||||
public ObjectPointerVisitor {
|
||||
public:
|
||||
CheckStoreBufferEvacuateVisitor(ObjectSet* in_store_buffer, const char* msg)
|
||||
: ObjectVisitor(),
|
||||
ObjectPointerVisitor(IsolateGroup::Current()),
|
||||
in_store_buffer_(in_store_buffer),
|
||||
msg_(msg) {}
|
||||
|
||||
void VisitObject(ObjectPtr obj) override {
|
||||
if (obj->IsPseudoObject()) return;
|
||||
RELEASE_ASSERT_WITH_MSG(obj->IsOldObject(), msg_);
|
||||
if (!obj->untag()->IsMarked()) return;
|
||||
|
||||
if (obj->untag()->IsRemembered()) {
|
||||
RELEASE_ASSERT_WITH_MSG(in_store_buffer_->Contains(obj), msg_);
|
||||
} else {
|
||||
RELEASE_ASSERT_WITH_MSG(!in_store_buffer_->Contains(obj), msg_);
|
||||
}
|
||||
|
||||
visiting_ = obj;
|
||||
is_remembered_ = obj->untag()->IsRemembered();
|
||||
is_card_remembered_ = obj->untag()->IsCardRemembered();
|
||||
if (is_card_remembered_) {
|
||||
RELEASE_ASSERT_WITH_MSG(!is_remembered_, msg_);
|
||||
RELEASE_ASSERT_WITH_MSG(Page::Of(obj)->progress_bar_ == 0, msg_);
|
||||
}
|
||||
obj->untag()->VisitPointers(this);
|
||||
}
|
||||
|
||||
void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
|
||||
for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
|
||||
ObjectPtr obj = *ptr;
|
||||
if (obj->IsHeapObject() && obj->untag()->IsEvacuationCandidate()) {
|
||||
if (is_card_remembered_) {
|
||||
if (!Page::Of(visiting_)->IsCardRemembered(ptr)) {
|
||||
FATAL(
|
||||
"%s: Old object %#" Px " references new object %#" Px
|
||||
", but the "
|
||||
"slot's card is not remembered. Consider using rr to watch the "
|
||||
"slot %p and reverse-continue to find the store with a missing "
|
||||
"barrier.\n",
|
||||
msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
|
||||
ptr);
|
||||
}
|
||||
} else if (!is_remembered_) {
|
||||
FATAL("%s: Old object %#" Px " references new object %#" Px
|
||||
", but it is "
|
||||
"not in any store buffer. Consider using rr to watch the "
|
||||
"slot %p and reverse-continue to find the store with a missing "
|
||||
"barrier.\n",
|
||||
msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
|
||||
ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void VisitCompressedPointers(uword heap_base,
|
||||
CompressedObjectPtr* from,
|
||||
CompressedObjectPtr* to) override {
|
||||
for (CompressedObjectPtr* ptr = from; ptr <= to; ptr++) {
|
||||
ObjectPtr obj = ptr->Decompress(heap_base);
|
||||
if (obj->IsHeapObject() && obj->IsNewObject()) {
|
||||
if (is_card_remembered_) {
|
||||
if (!Page::Of(visiting_)->IsCardRemembered(ptr)) {
|
||||
FATAL(
|
||||
"%s: Old object %#" Px " references new object %#" Px
|
||||
", but the "
|
||||
"slot's card is not remembered. Consider using rr to watch the "
|
||||
"slot %p and reverse-continue to find the store with a missing "
|
||||
"barrier.\n",
|
||||
msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
|
||||
ptr);
|
||||
}
|
||||
} else if (!is_remembered_) {
|
||||
FATAL("%s: Old object %#" Px " references new object %#" Px
|
||||
", but it is "
|
||||
"not in any store buffer. Consider using rr to watch the "
|
||||
"slot %p and reverse-continue to find the store with a missing "
|
||||
"barrier.\n",
|
||||
msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
|
||||
ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
private:
|
||||
const ObjectSet* const in_store_buffer_;
|
||||
ObjectPtr visiting_;
|
||||
bool is_remembered_;
|
||||
bool is_card_remembered_;
|
||||
const char* msg_;
|
||||
};
|
||||
|
||||
void PageSpace::VerifyStoreBuffers(const char* msg) {
|
||||
ASSERT(msg != nullptr);
|
||||
Thread* thread = Thread::Current();
|
||||
StackZone stack_zone(thread);
|
||||
Zone* zone = stack_zone.GetZone();
|
||||
|
||||
ObjectSet* in_store_buffer = new (zone) ObjectSet(zone);
|
||||
heap_->AddRegionsToObjectSet(in_store_buffer);
|
||||
|
||||
{
|
||||
CollectStoreBufferEvacuateVisitor visitor(in_store_buffer, msg);
|
||||
heap_->isolate_group()->store_buffer()->VisitObjectPointers(&visitor);
|
||||
}
|
||||
|
||||
{
|
||||
CheckStoreBufferEvacuateVisitor visitor(in_store_buffer, msg);
|
||||
heap_->old_space()->VisitObjects(&visitor);
|
||||
}
|
||||
}
|
||||
|
||||
void PageSpace::SweepNew() {
|
||||
// TODO(rmacnak): Run in parallel with SweepExecutable.
|
||||
TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "SweepNew");
|
||||
|
@ -1279,6 +1455,7 @@ uword PageSpace::TryAllocateDataBumpLocked(FreeList* freelist, intptr_t size) {
|
|||
intptr_t block_size = block->HeapSize();
|
||||
if (remaining > 0) {
|
||||
usage_.used_in_words -= (remaining >> kWordSizeLog2);
|
||||
Page::Of(freelist->top())->add_live_bytes(remaining);
|
||||
freelist->FreeLocked(freelist->top(), remaining);
|
||||
}
|
||||
freelist->set_top(reinterpret_cast<uword>(block));
|
||||
|
@ -1287,6 +1464,7 @@ uword PageSpace::TryAllocateDataBumpLocked(FreeList* freelist, intptr_t size) {
|
|||
// the size of the whole bump area here and subtract the remaining size
|
||||
// when switching to a new area.
|
||||
usage_.used_in_words += (block_size >> kWordSizeLog2);
|
||||
Page::Of(block)->add_live_bytes(block_size);
|
||||
remaining = block_size;
|
||||
}
|
||||
ASSERT(remaining >= size);
|
||||
|
@ -1307,6 +1485,7 @@ uword PageSpace::TryAllocateDataBumpLocked(FreeList* freelist, intptr_t size) {
|
|||
uword PageSpace::TryAllocatePromoLockedSlow(FreeList* freelist, intptr_t size) {
|
||||
uword result = freelist->TryAllocateSmallLocked(size);
|
||||
if (result != 0) {
|
||||
Page::Of(result)->add_live_bytes(size);
|
||||
freelist->AddUnaccountedSize(size);
|
||||
return result;
|
||||
}
|
||||
|
@ -1349,6 +1528,7 @@ void PageSpace::SetupImagePage(void* pointer, uword size, bool is_executable) {
|
|||
page->end_ = memory->end();
|
||||
page->survivor_end_ = 0;
|
||||
page->resolved_top_ = 0;
|
||||
page->live_bytes_ = 0;
|
||||
|
||||
MutexLocker ml(&pages_lock_);
|
||||
page->next_ = image_pages_;
|
||||
|
|
|
@ -184,6 +184,11 @@ class PageSpace {
|
|||
}
|
||||
void EvaluateAfterLoading() {
|
||||
page_space_controller_.EvaluateAfterLoading(usage_);
|
||||
|
||||
MutexLocker ml(&pages_lock_);
|
||||
for (Page* page = pages_; page != nullptr; page = page->next()) {
|
||||
page->set_never_evacuate(true);
|
||||
}
|
||||
}
|
||||
|
||||
intptr_t UsedInWords() const { return usage_.used_in_words; }
|
||||
|
@ -412,6 +417,7 @@ class PageSpace {
|
|||
void FreePages(Page* pages);
|
||||
|
||||
void CollectGarbageHelper(Thread* thread, bool compact, bool finalize);
|
||||
void VerifyStoreBuffers(const char* msg);
|
||||
void SweepNew();
|
||||
void SweepLarge();
|
||||
void Sweep(bool exclusive);
|
||||
|
@ -496,6 +502,9 @@ class PageSpace {
|
|||
friend class PageSpaceController;
|
||||
friend class ConcurrentSweeperTask;
|
||||
friend class GCCompactor;
|
||||
friend class GCIncrementalCompactor;
|
||||
friend class PrologueTask;
|
||||
friend class EpilogueTask;
|
||||
friend class CompactorTask;
|
||||
friend class Code;
|
||||
|
||||
|
|
|
@ -266,6 +266,9 @@ void BlockStack<BlockSize>::TrimGlobalEmpty() {
|
|||
}
|
||||
}
|
||||
|
||||
template class PointerBlock<kStoreBufferBlockSize>;
|
||||
template class PointerBlock<kMarkingStackBlockSize>;
|
||||
|
||||
template class BlockStack<kStoreBufferBlockSize>;
|
||||
template class BlockStack<kMarkingStackBlockSize>;
|
||||
|
||||
|
|
|
@ -129,7 +129,7 @@ class BlockStack {
|
|||
|
||||
private:
|
||||
Block* head_;
|
||||
intptr_t length_;
|
||||
RelaxedAtomic<intptr_t> length_;
|
||||
DISALLOW_COPY_AND_ASSIGN(List);
|
||||
};
|
||||
|
||||
|
|
|
@ -220,8 +220,11 @@ class ScavengerVisitorBase : public ObjectPointerVisitor,
|
|||
}
|
||||
|
||||
void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
|
||||
#if !defined(TARGET_ARCH_IA32)
|
||||
// Pointers embedded in Instructions are not aligned.
|
||||
ASSERT(Utils::IsAligned(first, sizeof(*first)));
|
||||
ASSERT(Utils::IsAligned(last, sizeof(*last)));
|
||||
#endif
|
||||
for (ObjectPtr* current = first; current <= last; current++) {
|
||||
ScavengePointer(current);
|
||||
}
|
||||
|
@ -379,9 +382,12 @@ class ScavengerVisitorBase : public ObjectPointerVisitor,
|
|||
// ScavengePointer cannot be called recursively.
|
||||
ObjectPtr obj = *p;
|
||||
|
||||
if (obj->IsImmediateOrOldObject()) {
|
||||
if (obj->IsImmediateObject()) {
|
||||
return false;
|
||||
}
|
||||
if (obj->IsOldObject()) {
|
||||
return obj->untag()->IsEvacuationCandidate();
|
||||
}
|
||||
|
||||
ObjectPtr new_obj = ScavengeObject(obj);
|
||||
|
||||
|
@ -408,10 +414,12 @@ class ScavengerVisitorBase : public ObjectPointerVisitor,
|
|||
// ScavengePointer cannot be called recursively.
|
||||
ObjectPtr obj = p->Decompress(heap_base);
|
||||
|
||||
// Could be tested without decompression.
|
||||
if (obj->IsImmediateOrOldObject()) {
|
||||
if (obj->IsImmediateObject()) {
|
||||
return false;
|
||||
}
|
||||
if (obj->IsOldObject()) {
|
||||
return obj->untag()->IsEvacuationCandidate();
|
||||
}
|
||||
|
||||
ObjectPtr new_obj = ScavengeObject(obj);
|
||||
|
||||
|
@ -483,7 +491,7 @@ class ScavengerVisitorBase : public ObjectPointerVisitor,
|
|||
// Promoted: update age/barrier tags.
|
||||
uword tags = static_cast<uword>(header);
|
||||
tags = UntaggedObject::OldAndNotRememberedBit::update(true, tags);
|
||||
tags = UntaggedObject::NewBit::update(false, tags);
|
||||
tags = UntaggedObject::NewOrEvacuationCandidateBit::update(false, tags);
|
||||
new_obj->untag()->tags_.store(tags, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
|
@ -506,6 +514,7 @@ class ScavengerVisitorBase : public ObjectPointerVisitor,
|
|||
if (new_obj->IsOldObject()) {
|
||||
// Abandon as a free list element.
|
||||
FreeListElement::AsElement(new_addr, size);
|
||||
Page::Of(new_addr)->sub_live_bytes(size);
|
||||
bytes_promoted_ -= size;
|
||||
} else {
|
||||
// Undo to-space allocation.
|
||||
|
@ -862,9 +871,9 @@ intptr_t Scavenger::NewSizeInWords(intptr_t old_size_in_words,
|
|||
return old_size_in_words;
|
||||
}
|
||||
|
||||
class CollectStoreBufferVisitor : public ObjectPointerVisitor {
|
||||
class CollectStoreBufferScavengeVisitor : public ObjectPointerVisitor {
|
||||
public:
|
||||
CollectStoreBufferVisitor(ObjectSet* in_store_buffer, const char* msg)
|
||||
CollectStoreBufferScavengeVisitor(ObjectSet* in_store_buffer, const char* msg)
|
||||
: ObjectPointerVisitor(IsolateGroup::Current()),
|
||||
in_store_buffer_(in_store_buffer),
|
||||
msg_(msg) {}
|
||||
|
@ -897,14 +906,16 @@ class CollectStoreBufferVisitor : public ObjectPointerVisitor {
|
|||
private:
|
||||
ObjectSet* const in_store_buffer_;
|
||||
const char* msg_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(CollectStoreBufferScavengeVisitor);
|
||||
};
|
||||
|
||||
class CheckStoreBufferVisitor : public ObjectVisitor,
|
||||
public ObjectPointerVisitor {
|
||||
class CheckStoreBufferScavengeVisitor : public ObjectVisitor,
|
||||
public ObjectPointerVisitor {
|
||||
public:
|
||||
CheckStoreBufferVisitor(ObjectSet* in_store_buffer,
|
||||
const SemiSpace* to,
|
||||
const char* msg)
|
||||
CheckStoreBufferScavengeVisitor(ObjectSet* in_store_buffer,
|
||||
const SemiSpace* to,
|
||||
const char* msg)
|
||||
: ObjectVisitor(),
|
||||
ObjectPointerVisitor(IsolateGroup::Current()),
|
||||
in_store_buffer_(in_store_buffer),
|
||||
|
@ -915,8 +926,11 @@ class CheckStoreBufferVisitor : public ObjectVisitor,
|
|||
if (obj->IsPseudoObject()) return;
|
||||
RELEASE_ASSERT_WITH_MSG(obj->IsOldObject(), msg_);
|
||||
|
||||
RELEASE_ASSERT_WITH_MSG(
|
||||
obj->untag()->IsRemembered() == in_store_buffer_->Contains(obj), msg_);
|
||||
if (obj->untag()->IsRemembered()) {
|
||||
RELEASE_ASSERT_WITH_MSG(in_store_buffer_->Contains(obj), msg_);
|
||||
} else {
|
||||
RELEASE_ASSERT_WITH_MSG(!in_store_buffer_->Contains(obj), msg_);
|
||||
}
|
||||
|
||||
visiting_ = obj;
|
||||
is_remembered_ = obj->untag()->IsRemembered();
|
||||
|
@ -999,6 +1013,8 @@ class CheckStoreBufferVisitor : public ObjectVisitor,
|
|||
bool is_remembered_;
|
||||
bool is_card_remembered_;
|
||||
const char* msg_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(CheckStoreBufferScavengeVisitor);
|
||||
};
|
||||
|
||||
void Scavenger::VerifyStoreBuffers(const char* msg) {
|
||||
|
@ -1011,12 +1027,12 @@ void Scavenger::VerifyStoreBuffers(const char* msg) {
|
|||
heap_->AddRegionsToObjectSet(in_store_buffer);
|
||||
|
||||
{
|
||||
CollectStoreBufferVisitor visitor(in_store_buffer, msg);
|
||||
CollectStoreBufferScavengeVisitor visitor(in_store_buffer, msg);
|
||||
heap_->isolate_group()->store_buffer()->VisitObjectPointers(&visitor);
|
||||
}
|
||||
|
||||
{
|
||||
CheckStoreBufferVisitor visitor(in_store_buffer, to_, msg);
|
||||
CheckStoreBufferScavengeVisitor visitor(in_store_buffer, to_, msg);
|
||||
heap_->old_space()->VisitObjects(&visitor);
|
||||
}
|
||||
}
|
||||
|
@ -1612,10 +1628,20 @@ bool ScavengerVisitorBase<parallel>::ForwardOrSetNullIfCollected(
|
|||
ObjectPtr parent,
|
||||
CompressedObjectPtr* slot) {
|
||||
ObjectPtr target = slot->Decompress(parent->heap_base());
|
||||
if (target->IsImmediateOrOldObject()) {
|
||||
if (target->IsImmediateObject()) {
|
||||
// Object already null (which is old) or not touched during this GC.
|
||||
return false;
|
||||
}
|
||||
if (target->IsOldObject()) {
|
||||
if (parent->IsOldObject() && target->untag()->IsEvacuationCandidate()) {
|
||||
if (!parent->untag()->IsCardRemembered()) {
|
||||
if (parent->untag()->TryAcquireRememberedBit()) {
|
||||
Thread::Current()->StoreBufferAddObjectGC(parent);
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
uword header = ReadHeaderRelaxed(target);
|
||||
if (IsForwarding(header)) {
|
||||
// Get the new location of the object.
|
||||
|
@ -1644,7 +1670,8 @@ void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const {
|
|||
|
||||
void Scavenger::VisitObjects(ObjectVisitor* visitor) const {
|
||||
ASSERT(Thread::Current()->OwnsGCSafepoint() ||
|
||||
(Thread::Current()->task_kind() == Thread::kMarkerTask));
|
||||
(Thread::Current()->task_kind() == Thread::kMarkerTask) ||
|
||||
(Thread::Current()->task_kind() == Thread::kIncrementalCompactorTask));
|
||||
for (Page* page = to_->head(); page != nullptr; page = page->next()) {
|
||||
page->VisitObjects(visitor);
|
||||
}
|
||||
|
@ -1814,7 +1841,6 @@ void Scavenger::Scavenge(Thread* thread, GCType type, GCReason reason) {
|
|||
}
|
||||
}
|
||||
ASSERT(promotion_stack_.IsEmpty());
|
||||
heap_->old_space()->ResumeConcurrentMarking();
|
||||
|
||||
// Scavenge finished. Run accounting.
|
||||
int64_t end = OS::GetCurrentMonotonicMicros();
|
||||
|
@ -1822,6 +1848,7 @@ void Scavenger::Scavenge(Thread* thread, GCType type, GCReason reason) {
|
|||
start, end, usage_before, GetCurrentUsage(), promo_candidate_words,
|
||||
bytes_promoted >> kWordSizeLog2, abandoned_bytes >> kWordSizeLog2));
|
||||
Epilogue(from);
|
||||
heap_->old_space()->ResumeConcurrentMarking();
|
||||
|
||||
if (FLAG_verify_after_gc) {
|
||||
heap_->WaitForSweeperTasksAtSafepoint(thread);
|
||||
|
@ -1908,7 +1935,8 @@ void Scavenger::ReverseScavenge(SemiSpace** from) {
|
|||
uword from_header = static_cast<uword>(to_header);
|
||||
from_header =
|
||||
UntaggedObject::OldAndNotRememberedBit::update(false, from_header);
|
||||
from_header = UntaggedObject::NewBit::update(true, from_header);
|
||||
from_header = UntaggedObject::NewOrEvacuationCandidateBit::update(
|
||||
true, from_header);
|
||||
|
||||
WriteHeaderRelaxed(from_obj, from_header);
|
||||
|
||||
|
|
|
@ -102,6 +102,7 @@ bool GCSweeper::SweepPage(Page* page, FreeList* freelist) {
|
|||
}
|
||||
// Only add to the free list if not covering the whole page.
|
||||
if ((current == start) && (free_end == end)) {
|
||||
page->set_live_bytes(0);
|
||||
return false; // Not in use.
|
||||
}
|
||||
obj_size = free_end - current;
|
||||
|
@ -131,6 +132,7 @@ bool GCSweeper::SweepPage(Page* page, FreeList* freelist) {
|
|||
}
|
||||
ASSERT(current == end);
|
||||
ASSERT(used_in_bytes != 0);
|
||||
page->set_live_bytes(used_in_bytes);
|
||||
return true; // In use.
|
||||
}
|
||||
|
||||
|
|
|
@ -601,7 +601,7 @@ static constexpr uword kReadOnlyGCBits =
|
|||
UntaggedObject::AlwaysSetBit::encode(true) |
|
||||
UntaggedObject::NotMarkedBit::encode(false) |
|
||||
UntaggedObject::OldAndNotRememberedBit::encode(true) |
|
||||
UntaggedObject::NewBit::encode(false);
|
||||
UntaggedObject::NewOrEvacuationCandidateBit::encode(false);
|
||||
|
||||
uword ImageWriter::GetMarkedTags(classid_t cid,
|
||||
intptr_t size,
|
||||
|
|
|
@ -2804,7 +2804,8 @@ void IsolateGroup::ForEachIsolate(
|
|||
(thread->task_kind() == Thread::kMutatorTask) ||
|
||||
(thread->task_kind() == Thread::kMarkerTask) ||
|
||||
(thread->task_kind() == Thread::kCompactorTask) ||
|
||||
(thread->task_kind() == Thread::kScavengerTask));
|
||||
(thread->task_kind() == Thread::kScavengerTask) ||
|
||||
(thread->task_kind() == Thread::kIncrementalCompactorTask));
|
||||
for (Isolate* isolate : isolates_) {
|
||||
function(isolate);
|
||||
}
|
||||
|
|
|
@ -2735,7 +2735,7 @@ void Object::InitializeObject(uword address,
|
|||
tags = UntaggedObject::AlwaysSetBit::update(true, tags);
|
||||
tags = UntaggedObject::NotMarkedBit::update(true, tags);
|
||||
tags = UntaggedObject::OldAndNotRememberedBit::update(is_old, tags);
|
||||
tags = UntaggedObject::NewBit::update(!is_old, tags);
|
||||
tags = UntaggedObject::NewOrEvacuationCandidateBit::update(!is_old, tags);
|
||||
tags = UntaggedObject::ImmutableBit::update(
|
||||
Object::ShouldHaveImmutabilityBitSet(class_id), tags);
|
||||
#if defined(HASH_IN_OBJECT_HEADER)
|
||||
|
|
|
@ -235,7 +235,7 @@ void SetNewSpaceTaggingWord(ObjectPtr to, classid_t cid, uint32_t size) {
|
|||
tags = UntaggedObject::NotMarkedBit::update(true, tags);
|
||||
tags = UntaggedObject::OldAndNotRememberedBit::update(false, tags);
|
||||
tags = UntaggedObject::CanonicalBit::update(false, tags);
|
||||
tags = UntaggedObject::NewBit::update(true, tags);
|
||||
tags = UntaggedObject::NewOrEvacuationCandidateBit::update(true, tags);
|
||||
tags = UntaggedObject::ImmutableBit::update(
|
||||
IsUnmodifiableTypedDataViewClassId(cid), tags);
|
||||
#if defined(HASH_IN_OBJECT_HEADER)
|
||||
|
|
|
@ -48,16 +48,12 @@ void UntaggedObject::Validate(IsolateGroup* isolate_group) const {
|
|||
// Validate that the tags_ field is sensible.
|
||||
uword tags = tags_;
|
||||
if (IsNewObject()) {
|
||||
if (!NewBit::decode(tags)) {
|
||||
if (!NewOrEvacuationCandidateBit::decode(tags)) {
|
||||
FATAL("New object missing kNewBit: %" Px "\n", tags);
|
||||
}
|
||||
if (OldAndNotRememberedBit::decode(tags)) {
|
||||
FATAL("New object has kOldAndNotRememberedBit: %" Px "\n", tags);
|
||||
}
|
||||
} else {
|
||||
if (NewBit::decode(tags)) {
|
||||
FATAL("Old object has kNewBit: %" Px "\n", tags);
|
||||
}
|
||||
}
|
||||
const intptr_t class_id = ClassIdTag::decode(tags);
|
||||
if (!isolate_group->class_table()->IsValidIndex(class_id)) {
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#endif
|
||||
|
||||
#include "platform/assert.h"
|
||||
#include "platform/thread_sanitizer.h"
|
||||
#include "vm/class_id.h"
|
||||
#include "vm/compiler/method_recognizer.h"
|
||||
#include "vm/compiler/runtime_api.h"
|
||||
|
@ -162,10 +163,10 @@ class UntaggedObject {
|
|||
enum TagBits {
|
||||
kCardRememberedBit = 0,
|
||||
kCanonicalBit = 1,
|
||||
kNotMarkedBit = 2, // Incremental barrier target.
|
||||
kNewBit = 3, // Generational barrier target.
|
||||
kAlwaysSetBit = 4, // Incremental barrier source.
|
||||
kOldAndNotRememberedBit = 5, // Generational barrier source.
|
||||
kNotMarkedBit = 2, // Incremental barrier target.
|
||||
kNewOrEvacuationCandidateBit = 3, // Generational barrier target.
|
||||
kAlwaysSetBit = 4, // Incremental barrier source.
|
||||
kOldAndNotRememberedBit = 5, // Generational barrier source.
|
||||
kImmutableBit = 6,
|
||||
kReservedBit = 7,
|
||||
|
||||
|
@ -177,11 +178,13 @@ class UntaggedObject {
|
|||
kHashTagSize = 32,
|
||||
};
|
||||
|
||||
static constexpr intptr_t kGenerationalBarrierMask = 1 << kNewBit;
|
||||
static constexpr intptr_t kGenerationalBarrierMask =
|
||||
1 << kNewOrEvacuationCandidateBit;
|
||||
static constexpr intptr_t kIncrementalBarrierMask = 1 << kNotMarkedBit;
|
||||
static constexpr intptr_t kBarrierOverlapShift = 2;
|
||||
COMPILE_ASSERT(kNotMarkedBit + kBarrierOverlapShift == kAlwaysSetBit);
|
||||
COMPILE_ASSERT(kNewBit + kBarrierOverlapShift == kOldAndNotRememberedBit);
|
||||
COMPILE_ASSERT(kNewOrEvacuationCandidateBit + kBarrierOverlapShift ==
|
||||
kOldAndNotRememberedBit);
|
||||
|
||||
// The bit in the Smi tag position must be something that can be set to 0
|
||||
// for a dead filler object of either generation.
|
||||
|
@ -246,7 +249,8 @@ class UntaggedObject {
|
|||
|
||||
class NotMarkedBit : public BitField<uword, bool, kNotMarkedBit, 1> {};
|
||||
|
||||
class NewBit : public BitField<uword, bool, kNewBit, 1> {};
|
||||
class NewOrEvacuationCandidateBit
|
||||
: public BitField<uword, bool, kNewOrEvacuationCandidateBit, 1> {};
|
||||
|
||||
class CanonicalBit : public BitField<uword, bool, kCanonicalBit, 1> {};
|
||||
|
||||
|
@ -292,14 +296,12 @@ class UntaggedObject {
|
|||
}
|
||||
|
||||
uword tags() const { return tags_; }
|
||||
uword tags_ignore_race() const { return tags_.load_ignore_race(); }
|
||||
|
||||
// Support for GC marking bit. Marked objects are either grey (not yet
|
||||
// visited) or black (already visited).
|
||||
static bool IsMarked(uword tags) { return !NotMarkedBit::decode(tags); }
|
||||
bool IsMarked() const { return !tags_.Read<NotMarkedBit>(); }
|
||||
bool IsMarkedIgnoreRace() const {
|
||||
return !tags_.ReadIgnoreRace<NotMarkedBit>();
|
||||
}
|
||||
void SetMarkBit() {
|
||||
ASSERT(!IsMarked());
|
||||
tags_.UpdateBool<NotMarkedBit>(false);
|
||||
|
@ -324,6 +326,25 @@ class UntaggedObject {
|
|||
DART_WARN_UNUSED_RESULT
|
||||
bool TryAcquireMarkBit() { return tags_.TryClear<NotMarkedBit>(); }
|
||||
|
||||
static bool IsEvacuationCandidate(uword tags) {
|
||||
return NewOrEvacuationCandidateBit::decode(tags);
|
||||
}
|
||||
bool IsEvacuationCandidate() {
|
||||
return tags_.Read<NewOrEvacuationCandidateBit>();
|
||||
}
|
||||
void SetIsEvacuationCandidate() {
|
||||
ASSERT(IsOldObject());
|
||||
tags_.UpdateBool<NewOrEvacuationCandidateBit>(true);
|
||||
}
|
||||
void SetIsEvacuationCandidateUnsynchronized() {
|
||||
ASSERT(IsOldObject());
|
||||
tags_.UpdateUnsynchronized<NewOrEvacuationCandidateBit>(true);
|
||||
}
|
||||
void ClearIsEvacuationCandidateUnsynchronized() {
|
||||
ASSERT(IsOldObject());
|
||||
tags_.UpdateUnsynchronized<NewOrEvacuationCandidateBit>(false);
|
||||
}
|
||||
|
||||
// Canonical objects have the property that two canonical objects are
|
||||
// logically equal iff they are the same object (pointer equal).
|
||||
bool IsCanonical() const { return tags_.Read<CanonicalBit>(); }
|
||||
|
@ -3223,6 +3244,8 @@ class UntaggedArray : public UntaggedInstance {
|
|||
template <typename Table, bool kAllCanonicalObjectsAreIncludedIntoSet>
|
||||
friend class CanonicalSetDeserializationCluster;
|
||||
friend class Page;
|
||||
template <bool>
|
||||
friend class MarkingVisitorBase;
|
||||
friend class FastObjectCopy; // For initializing fields.
|
||||
friend void UpdateLengthField(intptr_t, ObjectPtr, ObjectPtr); // length_
|
||||
};
|
||||
|
|
|
@ -670,7 +670,7 @@ void Thread::FreeActiveThread(Thread* thread, bool bypass_safepoint) {
|
|||
}
|
||||
|
||||
void Thread::ReleaseStoreBuffer() {
|
||||
ASSERT(IsAtSafepoint() || OwnsSafepoint());
|
||||
ASSERT(IsAtSafepoint() || OwnsSafepoint() || task_kind_ == kMarkerTask);
|
||||
if (store_buffer_block_ == nullptr || store_buffer_block_->IsEmpty()) {
|
||||
return; // Nothing to release.
|
||||
}
|
||||
|
@ -813,6 +813,17 @@ void Thread::StoreBufferAcquire() {
|
|||
store_buffer_block_ = isolate_group()->store_buffer()->PopNonFullBlock();
|
||||
}
|
||||
|
||||
void Thread::StoreBufferReleaseGC() {
|
||||
StoreBufferBlock* block = store_buffer_block_;
|
||||
store_buffer_block_ = nullptr;
|
||||
isolate_group()->store_buffer()->PushBlock(block,
|
||||
StoreBuffer::kIgnoreThreshold);
|
||||
}
|
||||
|
||||
void Thread::StoreBufferAcquireGC() {
|
||||
store_buffer_block_ = isolate_group()->store_buffer()->PopNonFullBlock();
|
||||
}
|
||||
|
||||
void Thread::MarkingStackBlockProcess() {
|
||||
MarkingStackRelease();
|
||||
MarkingStackAcquire();
|
||||
|
@ -965,7 +976,6 @@ class RestoreWriteBarrierInvariantVisitor : public ObjectPointerVisitor {
|
|||
void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
|
||||
for (; first != last + 1; first++) {
|
||||
ObjectPtr obj = *first;
|
||||
// Stores into new-space objects don't need a write barrier.
|
||||
if (obj->IsImmediateObject()) continue;
|
||||
|
||||
// To avoid adding too much work into the remembered set, skip large
|
||||
|
|
|
@ -351,6 +351,7 @@ class Thread : public ThreadState {
|
|||
kCompactorTask = 0x10,
|
||||
kScavengerTask = 0x20,
|
||||
kSampleBlockTask = 0x40,
|
||||
kIncrementalCompactorTask = 0x80,
|
||||
};
|
||||
// Converts a TaskKind to its corresponding C-String name.
|
||||
static const char* TaskKindToCString(TaskKind kind);
|
||||
|
@ -662,6 +663,8 @@ class Thread : public ThreadState {
|
|||
}
|
||||
#endif
|
||||
void StoreBufferBlockProcess(StoreBuffer::ThresholdPolicy policy);
|
||||
void StoreBufferReleaseGC();
|
||||
void StoreBufferAcquireGC();
|
||||
static intptr_t store_buffer_block_offset() {
|
||||
return OFFSET_OF(Thread, store_buffer_block_);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue