Reverted due to vm test failure.

Revert "Makes new space iterable by filling up the mutator's TLAB"

This reverts commit 8b6fcf50e8.

R=rmacnak@google.com

Review-Url: https://codereview.chromium.org/2998453002 .
This commit is contained in:
Diogenes Nunez 2017-08-04 10:56:33 -07:00
parent 544713bced
commit 0b0f07d303
6 changed files with 42 additions and 147 deletions

View file

@ -57,72 +57,23 @@ Heap::~Heap() {
}
}
void Heap::FillRemainingTLAB(Thread* thread) {
uword start = thread->top();
uword end = thread->end();
ASSERT(end >= start);
intptr_t size = end - start;
ASSERT(Utils::IsAligned(size, kObjectAlignment));
if (size >= kObjectAlignment) {
FreeListElement::AsElement(start, size);
ASSERT(RawObject::FromAddr(start)->Size() == size);
ASSERT((start + size) == new_space_.top());
}
}
void Heap::AbandonRemainingTLAB(Thread* thread) {
FillRemainingTLAB(thread);
thread->set_top(0);
thread->set_end(0);
}
intptr_t Heap::CalculateTLABSize() {
intptr_t size = new_space_.end() - new_space_.top();
return Utils::RoundDown(size, kObjectAlignment);
}
uword Heap::AllocateNew(intptr_t size) {
ASSERT(Thread::Current()->no_safepoint_scope_depth() == 0);
// Currently, only the Dart thread may allocate in new space.
isolate()->AssertCurrentThreadIsMutator();
Thread* thread = Thread::Current();
uword addr = new_space_.TryAllocateInTLAB(thread, size);
if (addr != 0) {
return addr;
}
intptr_t tlab_size = CalculateTLABSize();
if ((tlab_size > 0) && (size > tlab_size)) {
return AllocateOld(size, HeapPage::kData);
}
AbandonRemainingTLAB(thread);
if (tlab_size > 0) {
uword tlab_top = new_space_.TryAllocateNewTLAB(thread, tlab_size);
if (tlab_top != 0) {
addr = new_space_.TryAllocateInTLAB(thread, size);
ASSERT(addr != 0);
return addr;
}
}
ASSERT(!thread->HasActiveTLAB());
// This call to CollectGarbage might end up "reusing" a collection spawned
// from a different thread and will be racing to allocate the requested
// memory with other threads being released after the collection.
CollectGarbage(kNew);
tlab_size = CalculateTLABSize();
uword tlab_top = new_space_.TryAllocateNewTLAB(thread, tlab_size);
if (tlab_top != 0) {
if (addr == 0) {
// This call to CollectGarbage might end up "reusing" a collection spawned
// from a different thread and will be racing to allocate the requested
// memory with other threads being released after the collection.
CollectGarbage(kNew);
addr = new_space_.TryAllocateInTLAB(thread, size);
// It is possible a GC doesn't clear enough space.
// In that case, we must fall through and allocate into old space.
if (addr != 0) {
return addr;
if (addr == 0) {
return AllocateOld(size, HeapPage::kData);
}
}
return AllocateOld(size, HeapPage::kData);
return addr;
}
uword Heap::AllocateOld(intptr_t size, HeapPage::PageType type) {
@ -604,14 +555,13 @@ bool Heap::VerifyGC(MarkExpectation mark_expectation) const {
StackZone stack_zone(Thread::Current());
// Change the new space's top_ with the more up-to-date thread's view of top_
uword saved_top = new_space_.FlushTLS();
new_space_.FlushTLS();
ObjectSet* allocated_set =
CreateAllocatedObjectSet(stack_zone.GetZone(), mark_expectation);
VerifyPointersVisitor visitor(isolate(), allocated_set);
VisitObjectPointers(&visitor);
new_space_.UnflushTLS(saved_top);
// Only returning a value so that Heap::Validate can be called from an ASSERT.
return true;
}

View file

@ -256,10 +256,6 @@ class Heap {
old_space_.SetupImagePage(pointer, size, is_executable);
}
intptr_t CalculateTLABSize();
void FillRemainingTLAB(Thread* thread);
void AbandonRemainingTLAB(Thread* thread);
private:
class GCStats : public ValueObject {
public:

View file

@ -2574,10 +2574,9 @@ Thread* Isolate::ScheduleThread(bool is_mutator, bool bypass_safepoint) {
os_thread->set_thread(thread);
if (is_mutator) {
mutator_thread_ = thread;
if ((Dart::vm_isolate() != NULL) &&
(heap() != Dart::vm_isolate()->heap())) {
mutator_thread_->set_top(0);
mutator_thread_->set_end(0);
if (this != Dart::vm_isolate()) {
mutator_thread_->set_top(heap()->new_space()->top());
mutator_thread_->set_end(heap()->new_space()->end());
}
}
Thread::SetCurrent(thread);
@ -2618,11 +2617,11 @@ void Isolate::UnscheduleThread(Thread* thread,
OSThread::SetCurrent(os_thread);
if (is_mutator) {
if (this != Dart::vm_isolate()) {
if (mutator_thread_->HasActiveTLAB()) {
heap()->AbandonRemainingTLAB(mutator_thread_);
}
heap()->new_space()->set_top(mutator_thread_->top_);
heap()->new_space()->set_end(mutator_thread_->end_);
}
ASSERT(!mutator_thread_->HasActiveTLAB());
mutator_thread_->top_ = 0;
mutator_thread_->end_ = 0;
mutator_thread_ = NULL;
}
thread->isolate_ = NULL;

View file

@ -394,6 +394,13 @@ SemiSpace* Scavenger::Prologue(Isolate* isolate, bool invoke_api_callbacks) {
resolved_top_ = top_;
end_ = to_->end();
// Throw out the old information about the from space
if (isolate->IsMutatorThreadScheduled()) {
Thread* mutator_thread = isolate->mutator_thread();
mutator_thread->set_top(top_);
mutator_thread->set_end(end_);
}
return from;
}
@ -403,11 +410,12 @@ void Scavenger::Epilogue(Isolate* isolate,
// All objects in the to space have been copied from the from space at this
// moment.
// Ensure the mutator thread will fail the next allocation. This will force
// mutator to allocate a new TLAB
// Ensure the mutator thread now has the up-to-date top_ and end_ of the
// semispace
if (isolate->IsMutatorThreadScheduled()) {
Thread* mutator_thread = isolate->mutator_thread();
ASSERT(!mutator_thread->HasActiveTLAB());
Thread* thread = isolate->mutator_thread();
thread->set_top(top_);
thread->set_end(end_);
}
double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction();
@ -714,49 +722,31 @@ void Scavenger::ProcessWeakReferences() {
}
}
uword Scavenger::FlushTLS() const {
void Scavenger::FlushTLS() const {
ASSERT(heap_ != NULL);
uword saved_top = top_;
if (heap_->isolate()->IsMutatorThreadScheduled() && !scavenging_) {
if (heap_->isolate()->IsMutatorThreadScheduled()) {
Thread* mutator_thread = heap_->isolate()->mutator_thread();
saved_top = mutator_thread->heap()->new_space()->top();
if (mutator_thread->HasActiveTLAB()) {
ASSERT(mutator_thread->top() <=
mutator_thread->heap()->new_space()->top());
heap_->FillRemainingTLAB(mutator_thread);
}
}
return saved_top;
}
void Scavenger::UnflushTLS(uword value) const {
ASSERT(heap_ != NULL);
if (heap_->isolate()->IsMutatorThreadScheduled() && !scavenging_) {
Thread* mutator_thread = heap_->isolate()->mutator_thread();
mutator_thread->heap()->new_space()->set_top(value);
ASSERT(mutator_thread->top() <= mutator_thread->heap()->new_space()->top());
mutator_thread->heap()->new_space()->set_top(mutator_thread->top());
}
}
void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) const {
uword saved_top = FlushTLS();
FlushTLS();
uword cur = FirstObjectStart();
while (cur < top_) {
RawObject* raw_obj = RawObject::FromAddr(cur);
cur += raw_obj->VisitPointers(visitor);
}
UnflushTLS(saved_top);
}
void Scavenger::VisitObjects(ObjectVisitor* visitor) const {
uword saved_top = FlushTLS();
FlushTLS();
uword cur = FirstObjectStart();
while (cur < top_) {
RawObject* raw_obj = RawObject::FromAddr(cur);
visitor->VisitObject(raw_obj);
cur += raw_obj->Size();
}
UnflushTLS(saved_top);
}
void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const {
@ -765,21 +755,19 @@ void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const {
RawObject* Scavenger::FindObject(FindObjectVisitor* visitor) const {
ASSERT(!scavenging_);
uword saved_top = FlushTLS();
FlushTLS();
uword cur = FirstObjectStart();
if (visitor->VisitRange(cur, top_)) {
while (cur < top_) {
RawObject* raw_obj = RawObject::FromAddr(cur);
uword next = cur + raw_obj->Size();
if (visitor->VisitRange(cur, next) && raw_obj->FindObject(visitor)) {
UnflushTLS(saved_top);
return raw_obj; // Found object, return it.
}
cur = next;
}
ASSERT(cur == top_);
}
UnflushTLS(saved_top);
return Object::null();
}
@ -814,13 +802,6 @@ void Scavenger::Scavenge(bool invoke_api_callbacks) {
int64_t post_safe_point = OS::GetCurrentMonotonicMicros();
heap_->RecordTime(kSafePoint, post_safe_point - pre_safe_point);
if (isolate->IsMutatorThreadScheduled()) {
Thread* mutator_thread = isolate->mutator_thread();
if (mutator_thread->HasActiveTLAB()) {
heap_->AbandonRemainingTLAB(mutator_thread);
}
}
// TODO(koda): Make verification more compatible with concurrent sweep.
if (FLAG_verify_before_gc && !FLAG_concurrent_sweep) {
OS::PrintErr("Verifying before Scavenge...");
@ -934,9 +915,7 @@ void Scavenger::Evacuate() {
if (heap_->isolate()->IsMutatorThreadScheduled()) {
Thread* mutator_thread = heap_->isolate()->mutator_thread();
if (mutator_thread->HasActiveTLAB()) {
survivor_end_ = mutator_thread->top();
}
survivor_end_ = mutator_thread->top();
}
Scavenge();
@ -946,11 +925,4 @@ void Scavenger::Evacuate() {
ASSERT((UsedInWords() == 0) || failed_to_promote_);
}
int64_t Scavenger::UsedInWords() const {
uword saved_top = FlushTLS();
int64_t used_in_words = (top_ - FirstObjectStart()) >> kWordSizeLog2;
UnflushTLS(saved_top);
return used_in_words;
}
} // namespace dart

View file

@ -121,25 +121,6 @@ class Scavenger {
RawObject* FindObject(FindObjectVisitor* visitor) const;
uword TryAllocateNewTLAB(Thread* thread, intptr_t size) {
ASSERT(Utils::IsAligned(size, kObjectAlignment));
ASSERT(heap_ != Dart::vm_isolate()->heap());
ASSERT(!scavenging_);
uword result = top_;
intptr_t remaining = end_ - top_;
if (remaining < size) {
return 0;
}
ASSERT(to_->Contains(result));
ASSERT((result & kObjectAlignmentMask) == object_alignment_);
top_ += size;
ASSERT(to_->Contains(top_) || (top_ == to_->end()));
ASSERT(result < top_);
thread->set_top(result);
thread->set_end(top_);
return result;
}
uword AllocateGC(intptr_t size) {
ASSERT(Utils::IsAligned(size, kObjectAlignment));
ASSERT(heap_ != Dart::vm_isolate()->heap());
@ -153,7 +134,7 @@ class Scavenger {
ASSERT(to_->Contains(result));
ASSERT((result & kObjectAlignmentMask) == object_alignment_);
top_ += size;
ASSERT((to_->Contains(top_)) || (top_ == to_->end()));
ASSERT(to_->Contains(top_) || (top_ == to_->end()));
return result;
}
@ -162,8 +143,6 @@ class Scavenger {
ASSERT(heap_ != Dart::vm_isolate()->heap());
ASSERT(thread->IsMutatorThread());
ASSERT(thread->isolate()->IsMutatorThreadScheduled());
ASSERT(thread->top() <= top_);
ASSERT((thread->end() == 0) || (thread->end() == top_));
#if defined(DEBUG)
if (FLAG_gc_at_alloc) {
ASSERT(!scavenging_);
@ -180,7 +159,7 @@ class Scavenger {
ASSERT(to_->Contains(result));
ASSERT((result & kObjectAlignmentMask) == object_alignment_);
top += size;
ASSERT((to_->Contains(top)) || (top == to_->end()));
ASSERT(to_->Contains(top) || (top == to_->end()));
thread->set_top(top);
return result;
}
@ -201,7 +180,9 @@ class Scavenger {
end_ = value;
}
int64_t UsedInWords() const;
int64_t UsedInWords() const {
return (top_ - FirstObjectStart()) >> kWordSizeLog2;
}
int64_t CapacityInWords() const { return to_->size_in_words(); }
int64_t ExternalInWords() const { return external_size_ >> kWordSizeLog2; }
SpaceUsage GetCurrentUsage() const {
@ -234,9 +215,7 @@ class Scavenger {
void AllocateExternal(intptr_t size);
void FreeExternal(intptr_t size);
uword FlushTLS() const;
void UnflushTLS(uword value) const;
uword FirstObjectStart() const { return to_->start() | object_alignment_; }
void FlushTLS() const;
private:
// Ids for time and data records in Heap::GCStats.
@ -255,6 +234,7 @@ class Scavenger {
kToKBAfterStoreBuffer = 3
};
uword FirstObjectStart() const { return to_->start() | object_alignment_; }
SemiSpace* Prologue(Isolate* isolate, bool invoke_api_callbacks);
void IterateStoreBuffers(Isolate* isolate, ScavengerVisitor* visitor);
void IterateObjectIdTable(Isolate* isolate, ScavengerVisitor* visitor);

View file

@ -377,8 +377,6 @@ class Thread : public BaseThread {
uword top() { return top_; }
uword end() { return end_; }
bool HasActiveTLAB() { return end_ > 0; }
static intptr_t top_offset() { return OFFSET_OF(Thread, top_); }
static intptr_t end_offset() { return OFFSET_OF(Thread, end_); }