During promotion, use bump allocation whenever there are no small blocks available.

R=iposva@google.com

Review URL: https://codereview.chromium.org//534653002

git-svn-id: https://dart.googlecode.com/svn/branches/bleeding_edge/dart@39893 260f80e4-7a28-3924-810f-c04153c831b5
This commit is contained in:
koda@google.com 2014-09-05 01:07:57 +00:00
parent ce242e4d44
commit cdafe57cac
8 changed files with 121 additions and 20 deletions

View file

@ -299,6 +299,7 @@ const uword kUwordMax = kMaxUint64;
const int kBitsPerByte = 8;
const int kBitsPerByteLog2 = 3;
const int kBitsPerWord = kWordSize * kBitsPerByte;
const int kBitsPerWordLog2 = kWordSizeLog2 + kBitsPerByteLog2;
// System-wide named constants.
const intptr_t KB = 1024;

View file

@ -34,6 +34,7 @@ int Utils::CountOneBits(uint32_t x) {
}
// TODO(koda): Compare to flsll call/intrinsic.
int Utils::HighestBit(int64_t v) {
uint64_t x = static_cast<uint64_t>((v > 0) ? v : -v);
uint64_t t;

View file

@ -22,33 +22,44 @@ class BitSet {
void Set(intptr_t i, bool value) {
ASSERT(i >= 0);
ASSERT(i < N);
uword mask = (static_cast<uword>(1) << (i % kBitsPerWord));
uword mask = (static_cast<uword>(1) << (i & (kBitsPerWord - 1)));
if (value) {
data_[i / kBitsPerWord] |= mask;
data_[i >> kBitsPerWordLog2] |= mask;
} else {
data_[i / kBitsPerWord] &= ~mask;
data_[i >> kBitsPerWordLog2] &= ~mask;
}
}
bool Test(intptr_t i) {
bool Test(intptr_t i) const {
ASSERT(i >= 0);
ASSERT(i < N);
uword mask = (static_cast<uword>(1) << (i % kBitsPerWord));
return (data_[i / kBitsPerWord] & mask) != 0;
uword mask = (static_cast<uword>(1) << (i & (kBitsPerWord - 1)));
return (data_[i >> kBitsPerWordLog2] & mask) != 0;
}
intptr_t Next(intptr_t i) {
intptr_t Next(intptr_t i) const {
ASSERT(i >= 0);
ASSERT(i < N);
intptr_t w = i / kBitsPerWord;
intptr_t w = i >> kBitsPerWordLog2;
uword mask = ~static_cast<uword>(0) << i;
if ((data_[w] & mask) != 0) {
uword tz = Utils::CountTrailingZeros(data_[w] & mask);
return kBitsPerWord*w + tz;
return (w << kBitsPerWordLog2) + tz;
}
while (++w < (1 + ((N - 1) / kBitsPerWord))) {
while (++w < kLengthInWords) {
if (data_[w] != 0) {
return kBitsPerWord*w + Utils::CountTrailingZeros(data_[w]);
return (w << kBitsPerWordLog2) + Utils::CountTrailingZeros(data_[w]);
}
}
return -1;
}
intptr_t Last() const {
for (int w = kLengthInWords - 1; w >= 0; --w) {
uword d = data_[w];
if (d != 0) {
// TODO(koda): Define HighestBit(uword) or use uint64_t[] for data_.
return (w << kBitsPerWordLog2) + Utils::HighestBit(d);
}
}
return -1;
@ -63,7 +74,8 @@ class BitSet {
}
private:
uword data_[1 + ((N - 1) / kBitsPerWord)];
static const int kLengthInWords = 1 + ((N - 1) / kBitsPerWord);
uword data_[kLengthInWords];
};
} // namespace dart

View file

@ -196,6 +196,7 @@ void FreeList::FreeLocked(uword addr, intptr_t size) {
void FreeList::Reset() {
MutexLocker ml(mutex_);
free_map_.Reset();
last_free_small_size_ = -1;
for (int i = 0; i < (kNumLists + 1); i++) {
free_lists_[i] = NULL;
}
@ -206,7 +207,7 @@ intptr_t FreeList::IndexForSize(intptr_t size) {
ASSERT(size >= kObjectAlignment);
ASSERT(Utils::IsAligned(size, kObjectAlignment));
intptr_t index = size / kObjectAlignment;
intptr_t index = size >> kObjectAlignmentLog2;
if (index >= kNumLists) {
index = kNumLists;
}
@ -218,6 +219,8 @@ void FreeList::EnqueueElement(FreeListElement* element, intptr_t index) {
FreeListElement* next = free_lists_[index];
if (next == NULL && index != kNumLists) {
free_map_.Set(index, true);
last_free_small_size_ = Utils::Maximum(last_free_small_size_,
index << kObjectAlignmentLog2);
}
element->set_next(next);
free_lists_[index] = element;
@ -229,6 +232,12 @@ FreeListElement* FreeList::DequeueElement(intptr_t index) {
FreeListElement* next = result->next();
if (next == NULL && index != kNumLists) {
free_map_.Set(index, false);
intptr_t size = index << kObjectAlignmentLog2;
if (size == last_free_small_size_) {
// Note: Last() returns -1 if none are set; avoid shift of negative.
last_free_small_size_ = free_map_.Last() * kObjectAlignment;
// TODO(koda): Consider adding BitSet::Previous(i).
}
}
free_lists_[index] = next;
return result;
@ -344,6 +353,12 @@ void FreeList::SplitElementAfterAndEnqueue(FreeListElement* element,
FreeListElement* FreeList::TryAllocateLarge(intptr_t minimum_size) {
MutexLocker ml(mutex_);
return TryAllocateLargeLocked(minimum_size);
}
FreeListElement* FreeList::TryAllocateLargeLocked(intptr_t minimum_size) {
DEBUG_ASSERT(mutex_->Owner() == Isolate::Current());
FreeListElement* previous = NULL;
FreeListElement* current = free_lists_[kNumLists];
// TODO(koda): Find largest.
@ -363,4 +378,25 @@ FreeListElement* FreeList::TryAllocateLarge(intptr_t minimum_size) {
return NULL;
}
uword FreeList::TryAllocateSmallLocked(intptr_t size) {
DEBUG_ASSERT(mutex_->Owner() == Isolate::Current());
if (size > last_free_small_size_) {
return 0;
}
int index = IndexForSize(size);
if (index != kNumLists && free_map_.Test(index)) {
return reinterpret_cast<uword>(DequeueElement(index));
}
if ((index + 1) < kNumLists) {
intptr_t next_index = free_map_.Next(index + 1);
if (next_index != -1) {
FreeListElement* element = DequeueElement(next_index);
SplitElementAfterAndEnqueue(element, size, false);
return reinterpret_cast<uword>(element);
}
}
return 0;
}
} // namespace dart

View file

@ -94,6 +94,11 @@ class FreeList {
// Returns a large element, at least 'minimum_size', or NULL if none exists.
FreeListElement* TryAllocateLarge(intptr_t minimum_size);
FreeListElement* TryAllocateLargeLocked(intptr_t minimum_size);
// Allocates locked and unprotected memory, but only from small elements
// (i.e., fixed size lists).
uword TryAllocateSmallLocked(intptr_t size);
private:
static const int kNumLists = 128;
@ -119,6 +124,9 @@ class FreeList {
FreeListElement* free_lists_[kNumLists + 1];
// The largest available small size in bytes, or negative if there is none.
intptr_t last_free_small_size_;
DISALLOW_COPY_AND_ASSIGN(FreeList);
};

View file

@ -770,17 +770,22 @@ void PageSpace::MarkSweep(bool invoke_api_callbacks) {
}
uword PageSpace::TryAllocateDataBump(intptr_t size,
GrowthPolicy growth_policy) {
uword PageSpace::TryAllocateDataBumpInternal(intptr_t size,
GrowthPolicy growth_policy,
bool is_locked) {
ASSERT(size >= kObjectAlignment);
ASSERT(Utils::IsAligned(size, kObjectAlignment));
intptr_t remaining = bump_end_ - bump_top_;
if (remaining < size) {
// Checking this first would be logical, but needlessly slow.
if (size >= kAllocatablePageSize) {
return TryAllocate(size, HeapPage::kData, growth_policy);
return is_locked ?
TryAllocateDataLocked(size, growth_policy) :
TryAllocate(size, HeapPage::kData, growth_policy);
}
FreeListElement* block = freelist_[HeapPage::kData].TryAllocateLarge(size);
FreeListElement* block = is_locked ?
freelist_[HeapPage::kData].TryAllocateLargeLocked(size) :
freelist_[HeapPage::kData].TryAllocateLarge(size);
if (block == NULL) {
// Allocating from a new page (if growth policy allows) will have the
// side-effect of populating the freelist with a large block. The next
@ -789,9 +794,16 @@ uword PageSpace::TryAllocateDataBump(intptr_t size,
return TryAllocateInFreshPage(size,
HeapPage::kData,
growth_policy,
/* is_locked = */ false);
is_locked);
}
intptr_t block_size = block->Size();
if (remaining > 0) {
if (is_locked) {
freelist_[HeapPage::kData].FreeLocked(bump_top_, remaining);
} else {
freelist_[HeapPage::kData].Free(bump_top_, remaining);
}
}
bump_top_ = reinterpret_cast<uword>(block);
bump_end_ = bump_top_ + block_size;
remaining = block_size;
@ -808,6 +820,32 @@ uword PageSpace::TryAllocateDataBump(intptr_t size,
}
uword PageSpace::TryAllocateDataBump(intptr_t size,
GrowthPolicy growth_policy) {
return TryAllocateDataBumpInternal(size, growth_policy, false);
}
uword PageSpace::TryAllocateDataBumpLocked(intptr_t size,
GrowthPolicy growth_policy) {
return TryAllocateDataBumpInternal(size, growth_policy, true);
}
uword PageSpace::TryAllocatePromoLocked(intptr_t size,
GrowthPolicy growth_policy) {
FreeList* freelist = &freelist_[HeapPage::kData];
uword result = freelist->TryAllocateSmallLocked(size);
if (result != 0) {
usage_.used_in_words += size >> kWordSizeLog2;
return result;
}
result = TryAllocateDataBumpLocked(size, growth_policy);
if (result != 0) return result;
return TryAllocateDataLocked(size, growth_policy);
}
PageSpaceController::PageSpaceController(Heap* heap,
int heap_growth_ratio,
int heap_growth_max,

View file

@ -305,6 +305,8 @@ class PageSpace {
// Attempt to allocate from bump block rather than normal freelist.
uword TryAllocateDataBump(intptr_t size, GrowthPolicy growth_policy);
uword TryAllocateDataBumpLocked(intptr_t size, GrowthPolicy growth_policy);
uword TryAllocatePromoLocked(intptr_t size, GrowthPolicy growth_policy);
private:
// Ids for time and data records in Heap::GCStats.
@ -332,6 +334,9 @@ class PageSpace {
HeapPage::PageType type,
GrowthPolicy growth_policy,
bool is_locked);
uword TryAllocateDataBumpInternal(intptr_t size,
GrowthPolicy growth_policy,
bool is_locked);
HeapPage* AllocatePage(HeapPage::PageType type);
void FreePage(HeapPage* page, HeapPage* previous_page);
HeapPage* AllocateLargePage(intptr_t size, HeapPage::PageType type);

View file

@ -205,8 +205,8 @@ class ScavengerVisitor : public ObjectPointerVisitor {
//
// This object is a survivor of a previous scavenge. Attempt to promote
// the object.
new_addr = page_space_->TryAllocateDataLocked(size,
PageSpace::kForceGrowth);
new_addr =
page_space_->TryAllocatePromoLocked(size, PageSpace::kForceGrowth);
if (new_addr != 0) {
// If promotion succeeded then we need to remember it so that it can
// be traversed later.