Spelling runtime vm heap

Closes https://github.com/dart-lang/sdk/pull/50785

TEST=ci

GitOrigin-RevId: a09e4d5c6ccf5514fe7fccab26c220525d3a9feb
Change-Id: I0896a003fab240f8edf828955cd827f63dcd4a31
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/276683
Reviewed-by: Slava Egorov <vegorov@google.com>
Commit-Queue: Slava Egorov <vegorov@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
This commit is contained in:
Josh Soref 2023-01-02 10:36:03 +00:00 committed by Commit Queue
parent 23c0ff5dba
commit a11bd0ce91
9 changed files with 11 additions and 11 deletions

View file

@ -398,7 +398,7 @@ void Heap::NotifyIdle(int64_t deadline) {
if (old_space_.ShouldPerformIdleMarkCompact(deadline)) { if (old_space_.ShouldPerformIdleMarkCompact(deadline)) {
// We prefer mark-compact over other old space GCs if we have enough time, // We prefer mark-compact over other old space GCs if we have enough time,
// since it removes old space fragmentation and frees up most memory. // since it removes old space fragmentation and frees up most memory.
// Blocks for O(heap), roughtly twice as costly as mark-sweep. // Blocks for O(heap), roughly twice as costly as mark-sweep.
CollectOldSpaceGarbage(thread, GCType::kMarkCompact, GCReason::kIdle); CollectOldSpaceGarbage(thread, GCType::kMarkCompact, GCReason::kIdle);
} else if (old_space_.ReachedHardThreshold()) { } else if (old_space_.ReachedHardThreshold()) {
// Even though the following GC may exceed our idle deadline, we need to // Even though the following GC may exceed our idle deadline, we need to
@ -977,7 +977,7 @@ void Heap::ForwardWeakTables(ObjectPointerVisitor* visitor) {
GetWeakTable(Heap::kOld, selector)->Forward(visitor); GetWeakTable(Heap::kOld, selector)->Forward(visitor);
} }
// Isolates might have forwarding tables (used for during snapshoting in // Isolates might have forwarding tables (used for during snapshotting in
// isolate communication). // isolate communication).
isolate_group()->ForEachIsolate( isolate_group()->ForEachIsolate(
[&](Isolate* isolate) { [&](Isolate* isolate) {

View file

@ -256,7 +256,7 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
// failing to acquire the mark bit here doesn't reliably indicate the // failing to acquire the mark bit here doesn't reliably indicate the
// object was already encountered through the deferred marking stack. Our // object was already encountered through the deferred marking stack. Our
// processing here is idempotent, so repeated visits only hurt performance // processing here is idempotent, so repeated visits only hurt performance
// but not correctness. Duplicatation is expected to be low. // but not correctness. Duplication is expected to be low.
// By the absence of a special case, we are treating WeakProperties as // By the absence of a special case, we are treating WeakProperties as
// strong references here. This guarantees a WeakProperty will only be // strong references here. This guarantees a WeakProperty will only be
// added to the delayed_weak_properties_ list of the worker that // added to the delayed_weak_properties_ list of the worker that
@ -880,7 +880,7 @@ void GCMarker::StartConcurrentMark(PageSpace* page_space) {
{ {
// Bulk increase task count before starting any task, instead of // Bulk increase task count before starting any task, instead of
// incrementing as each task is started, to prevent a task which // incrementing as each task is started, to prevent a task which
// races ahead from falsly beleiving it was the last task to complete. // races ahead from falsely believing it was the last task to complete.
MonitorLocker ml(page_space->tasks_lock()); MonitorLocker ml(page_space->tasks_lock());
ASSERT(page_space->phase() == PageSpace::kDone); ASSERT(page_space->phase() == PageSpace::kDone);
page_space->set_phase(PageSpace::kMarking); page_space->set_phase(PageSpace::kMarking);

View file

@ -25,7 +25,7 @@ class Thread;
// The class GCMarker is used to mark reachable old generation objects as part // The class GCMarker is used to mark reachable old generation objects as part
// of the mark-sweep collection. The marking bit used is defined in RawObject. // of the mark-sweep collection. The marking bit used is defined in RawObject.
// Instances have a lifetime that spans from the beginining of concurrent // Instances have a lifetime that spans from the beginning of concurrent
// marking (or stop-the-world marking) until marking is complete. In particular, // marking (or stop-the-world marking) until marking is complete. In particular,
// an instance may be created and destroyed on different threads if the isolate // an instance may be created and destroyed on different threads if the isolate
// is exited during concurrent marking. // is exited during concurrent marking.

View file

@ -22,7 +22,7 @@
namespace dart { namespace dart {
// This cache needs to be at least as big as FLAG_new_gen_semi_max_size or // This cache needs to be at least as big as FLAG_new_gen_semi_max_size or
// munmap will noticably impact performance. // munmap will noticeably impact performance.
static constexpr intptr_t kPageCacheCapacity = 8 * kWordSize; static constexpr intptr_t kPageCacheCapacity = 8 * kWordSize;
static Mutex* page_cache_mutex = nullptr; static Mutex* page_cache_mutex = nullptr;
static VirtualMemory* page_cache[kPageCacheCapacity] = {nullptr}; static VirtualMemory* page_cache[kPageCacheCapacity] = {nullptr};

View file

@ -22,7 +22,7 @@ class Thread;
// Pages are allocated with kPageSize alignment so that the Page of any object // Pages are allocated with kPageSize alignment so that the Page of any object
// can be computed by masking the object with kPageMask. This does not apply to // can be computed by masking the object with kPageMask. This does not apply to
// image pages, whose address is choosen by the system loader rather than the // image pages, whose address is chosen by the system loader rather than the
// Dart VM. // Dart VM.
static constexpr intptr_t kPageSize = 512 * KB; static constexpr intptr_t kPageSize = 512 * KB;
static constexpr intptr_t kPageSizeInWords = kPageSize / kWordSize; static constexpr intptr_t kPageSizeInWords = kPageSize / kWordSize;

View file

@ -47,7 +47,7 @@ class HeapProfileSampler {
// Returns number of bytes that should be be attributed to the sample. // Returns number of bytes that should be be attributed to the sample.
// If returned size is 0, the allocation should not be sampled. // If returned size is 0, the allocation should not be sampled.
// //
// Due to how the poission sampling works, some samples should be accounted // Due to how the poisson sampling works, some samples should be accounted
// multiple times if they cover allocations larger than the average sampling // multiple times if they cover allocations larger than the average sampling
// rate. // rate.
void SampleSize(intptr_t allocation_size); void SampleSize(intptr_t allocation_size);

View file

@ -157,7 +157,7 @@ class ScavengerVisitorBase : public ObjectPointerVisitor {
// update is needed. If the underlying typed data is internal, the pointer // update is needed. If the underlying typed data is internal, the pointer
// must be updated if the typed data was copied or promoted. We cannot // must be updated if the typed data was copied or promoted. We cannot
// safely dereference the underlying typed data to make this distinction. // safely dereference the underlying typed data to make this distinction.
// It may have been forwarded by a different scavanger worker, so the access // It may have been forwarded by a different scavenger worker, so the access
// could have a data race. Rather than checking the CID of the underlying // could have a data race. Rather than checking the CID of the underlying
// typed data, which requires dereferencing the copied/promoted header, we // typed data, which requires dereferencing the copied/promoted header, we
// compare the view's internal pointer to what it should be if the // compare the view's internal pointer to what it should be if the

View file

@ -86,7 +86,7 @@ class ScavengeStats {
// Of all data before scavenge, what fraction was found to be garbage? // Of all data before scavenge, what fraction was found to be garbage?
// If this scavenge included growth, assume the extra capacity would become // If this scavenge included growth, assume the extra capacity would become
// garbage to give the scavenger a chance to stablize at the new capacity. // garbage to give the scavenger a chance to stabilize at the new capacity.
double ExpectedGarbageFraction() const { double ExpectedGarbageFraction() const {
double work = double work =
after_.used_in_words + promoted_in_words_ + abandoned_in_words_; after_.used_in_words + promoted_in_words_ + abandoned_in_words_;

View file

@ -30,7 +30,7 @@ bool GCSweeper::SweepPage(Page* page, FreeList* freelist, bool locked) {
ObjectPtr raw_obj = UntaggedObject::FromAddr(current); ObjectPtr raw_obj = UntaggedObject::FromAddr(current);
ASSERT(Page::Of(raw_obj) == page); ASSERT(Page::Of(raw_obj) == page);
// These acquire operations balance release operations in array // These acquire operations balance release operations in array
// truncaton, ensuring the writes creating the filler object are ordered // truncation, ensuring the writes creating the filler object are ordered
// before the writes inserting the filler object into the freelist. // before the writes inserting the filler object into the freelist.
uword tags = raw_obj->untag()->tags_.load(std::memory_order_acquire); uword tags = raw_obj->untag()->tags_.load(std::memory_order_acquire);
intptr_t obj_size = raw_obj->untag()->HeapSize(tags); intptr_t obj_size = raw_obj->untag()->HeapSize(tags);