Reland "[vm] Align heap allocations to 2GB regions in comp ptr mode"

Fixed the mac build.

This reverts commit 0ed3fb3dee.

Bug: https://github.com/dart-lang/sdk/issues/45059
Change-Id: I0cd6710a641b195403dda717108bdbe3395a21fa
TEST=CI
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/186340
Commit-Queue: Liam Appelbe <liama@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Liam Appelbe 2021-02-22 19:58:49 +00:00 committed by commit-bot@chromium.org
parent 8f9cdb3b2f
commit be0978cbde
9 changed files with 229 additions and 40 deletions

View file

@ -673,6 +673,7 @@ char* Dart::Cleanup() {
IsolateGroupReloadContext::SetFileModifiedCallback(NULL);
Service::SetEmbedderStreamCallbacks(NULL, NULL);
#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
VirtualMemory::Cleanup();
return NULL;
}

View file

@ -392,6 +392,11 @@ OldPage* PageSpace::AllocateLargePage(intptr_t size, OldPage::PageType type) {
if (page == nullptr) {
IncreaseCapacityInWordsLocked(-page_size_in_words);
return nullptr;
} else {
intptr_t actual_size_in_words = page->memory_->size() >> kWordSizeLog2;
if (actual_size_in_words != page_size_in_words) {
IncreaseCapacityInWordsLocked(actual_size_in_words - page_size_in_words);
}
}
if (is_exec) {
AddExecPageLocked(page);

View file

@ -2510,8 +2510,12 @@ void Object::InitializeObject(uword address, intptr_t class_id, intptr_t size) {
// If the size is greater than both kNewAllocatableSize and
// kAllocatablePageSize, the object must have been allocated to a new
// large page, which must already have been zero initialized by the OS.
#if defined(DART_COMPRESSED_POINTERS)
needs_init = true;
#else
needs_init = Heap::IsAllocatableInNewSpace(size) ||
Heap::IsAllocatableViaFreeLists(size);
#endif
} else {
initial_value = static_cast<uword>(null_);
needs_init = true;

View file

@ -19,12 +19,13 @@ void VirtualMemory::Truncate(intptr_t new_size) {
ASSERT(new_size <= size());
if (reserved_.size() ==
region_.size()) { // Don't create holes in reservation.
FreeSubSegment(reinterpret_cast<void*>(start() + new_size),
size() - new_size);
reserved_.set_size(new_size);
if (AliasOffset() != 0) {
FreeSubSegment(reinterpret_cast<void*>(alias_.start() + new_size),
alias_.size() - new_size);
if (FreeSubSegment(reinterpret_cast<void*>(start() + new_size),
size() - new_size)) {
reserved_.set_size(new_size);
if (AliasOffset() != 0) {
FreeSubSegment(reinterpret_cast<void*>(alias_.start() + new_size),
alias_.size() - new_size);
}
}
}
region_.Subregion(region_, 0, new_size);

View file

@ -32,6 +32,7 @@ class VirtualMemory {
intptr_t AliasOffset() const { return alias_.start() - region_.start(); }
static void Init();
static void Cleanup();
// Returns true if dual mapping is enabled.
static bool DualMappingEnabled();
@ -87,7 +88,7 @@ class VirtualMemory {
// Free a sub segment. On operating systems that support it this
// can give back the virtual memory to the system. Returns true on success.
static void FreeSubSegment(void* address, intptr_t size);
static bool FreeSubSegment(void* address, intptr_t size);
// These constructors are only used internally when reserving new virtual
// spaces. They do not reserve any virtual address space on their own.

View file

@ -51,6 +51,8 @@ void VirtualMemory::Init() {
page_size_ = CalculatePageSize();
}
void VirtualMemory::Cleanup() {}
static void Unmap(zx_handle_t vmar, uword start, uword end) {
ASSERT(start <= end);
const uword size = end - start;
@ -174,10 +176,11 @@ VirtualMemory::~VirtualMemory() {
}
}
void VirtualMemory::FreeSubSegment(void* address, intptr_t size) {
bool VirtualMemory::FreeSubSegment(void* address, intptr_t size) {
const uword start = reinterpret_cast<uword>(address);
Unmap(zx_vmar_root_self(), start, start + size);
LOG_INFO("zx_vmar_unmap(0x%p, 0x%lx) success\n", address, size);
return true;
}
void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {

View file

@ -43,6 +43,43 @@ DECLARE_FLAG(bool, generate_perf_jitdump);
uword VirtualMemory::page_size_ = 0;
static void unmap(uword start, uword end);
#if defined(DART_COMPRESSED_POINTERS)
static uword compressed_heap_base_ = 0;
static uint8_t* compressed_heap_pages_ = nullptr;
static uword compressed_heap_minimum_free_page_id_ = 0;
static Mutex* compressed_heap_mutex_ = nullptr;
static constexpr intptr_t kCompressedHeapSize = 2 * GB;
static constexpr intptr_t kCompressedHeapAlignment = 4 * GB;
static constexpr intptr_t kCompressedHeapPageSize = kOldPageSize;
static constexpr intptr_t kCompressedHeapNumPages =
kCompressedHeapSize / kOldPageSize;
static constexpr intptr_t kCompressedHeapBitmapSize =
kCompressedHeapNumPages / 8;
#endif // defined(DART_COMPRESSED_POINTERS)
static void* GenericMapAligned(int prot,
intptr_t size,
intptr_t alignment,
intptr_t allocated_size,
int map_flags) {
void* address = mmap(nullptr, allocated_size, prot, map_flags, -1, 0);
LOG_INFO("mmap(nullptr, 0x%" Px ", %u, ...): %p\n", allocated_size, prot,
address);
if (address == MAP_FAILED) {
return nullptr;
}
const uword base = reinterpret_cast<uword>(address);
const uword aligned_base = Utils::RoundUp(base, alignment);
unmap(base, aligned_base);
unmap(aligned_base + size, base + allocated_size);
return reinterpret_cast<void*>(aligned_base);
}
intptr_t VirtualMemory::CalculatePageSize() {
const intptr_t page_size = getpagesize();
ASSERT(page_size != 0);
@ -51,6 +88,20 @@ intptr_t VirtualMemory::CalculatePageSize() {
}
void VirtualMemory::Init() {
#if defined(DART_COMPRESSED_POINTERS)
if (compressed_heap_pages_ == nullptr) {
compressed_heap_pages_ = new uint8_t[kCompressedHeapBitmapSize];
memset(compressed_heap_pages_, 0, kCompressedHeapBitmapSize);
compressed_heap_base_ = reinterpret_cast<uword>(GenericMapAligned(
PROT_READ | PROT_WRITE, kCompressedHeapSize, kCompressedHeapAlignment,
kCompressedHeapSize + kCompressedHeapAlignment,
MAP_PRIVATE | MAP_ANONYMOUS));
ASSERT(compressed_heap_base_ != 0);
ASSERT(Utils::IsAligned(compressed_heap_base_, kCompressedHeapAlignment));
compressed_heap_mutex_ = new Mutex(NOT_IN_PRODUCT("compressed_heap_mutex"));
}
#endif // defined(DART_COMPRESSED_POINTERS)
if (page_size_ != 0) {
// Already initialized.
return;
@ -78,7 +129,7 @@ void VirtualMemory::Init() {
intptr_t size = PageSize();
intptr_t alignment = kOldPageSize;
VirtualMemory* vm = AllocateAligned(size, alignment, true, "memfd-test");
if (vm == NULL) {
if (vm == nullptr) {
LOG_INFO("memfd_create not supported; disabling dual mapping of code.\n");
FLAG_dual_map_code = false;
return;
@ -115,6 +166,18 @@ void VirtualMemory::Init() {
#endif
}
void VirtualMemory::Cleanup() {
#if defined(DART_COMPRESSED_POINTERS)
unmap(compressed_heap_base_, compressed_heap_base_ + kCompressedHeapSize);
delete[] compressed_heap_pages_;
delete compressed_heap_mutex_;
compressed_heap_base_ = 0;
compressed_heap_pages_ = nullptr;
compressed_heap_minimum_free_page_id_ = 0;
compressed_heap_mutex_ = nullptr;
#endif // defined(DART_COMPRESSED_POINTERS)
}
bool VirtualMemory::DualMappingEnabled() {
return FLAG_dual_map_code;
}
@ -156,12 +219,12 @@ static void* MapAligned(int fd,
intptr_t size,
intptr_t alignment,
intptr_t allocated_size) {
void* address =
mmap(NULL, allocated_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
LOG_INFO("mmap(NULL, 0x%" Px ", PROT_NONE, ...): %p\n", allocated_size,
void* address = mmap(nullptr, allocated_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
LOG_INFO("mmap(nullptr, 0x%" Px ", PROT_NONE, ...): %p\n", allocated_size,
address);
if (address == MAP_FAILED) {
return NULL;
return nullptr;
}
const uword base = reinterpret_cast<uword>(address);
@ -176,7 +239,7 @@ static void* MapAligned(int fd,
prot, address);
if (address == MAP_FAILED) {
unmap(base, base + allocated_size);
return NULL;
return nullptr;
}
ASSERT(address == reinterpret_cast<void*>(aligned_base));
unmap(base, aligned_base);
@ -185,6 +248,93 @@ static void* MapAligned(int fd,
}
#endif // defined(DUAL_MAPPING_SUPPORTED)
#if defined(DART_COMPRESSED_POINTERS)
uint8_t PageMask(uword page_id) {
return static_cast<uint8_t>(1 << (page_id % 8));
}
bool IsCompressedHeapPageUsed(uword page_id) {
if (page_id >= kCompressedHeapNumPages) return false;
return compressed_heap_pages_[page_id / 8] & PageMask(page_id);
}
void SetCompressedHeapPageUsed(uword page_id) {
ASSERT(page_id < kCompressedHeapNumPages);
compressed_heap_pages_[page_id / 8] |= PageMask(page_id);
}
void ClearCompressedHeapPageUsed(uword page_id) {
ASSERT(page_id < kCompressedHeapNumPages);
compressed_heap_pages_[page_id / 8] &= ~PageMask(page_id);
}
static MemoryRegion MapInCompressedHeap(intptr_t size, intptr_t alignment) {
ASSERT(alignment <= kCompressedHeapAlignment);
const intptr_t allocated_size = Utils::RoundUp(size, kCompressedHeapPageSize);
uword pages = allocated_size / kCompressedHeapPageSize;
uword page_alignment = alignment > kCompressedHeapPageSize
? alignment / kCompressedHeapPageSize
: 1;
MutexLocker ml(compressed_heap_mutex_);
// Find a gap with enough empty pages, using the bitmap. Note that reading
// outside the bitmap range always returns 0, so this loop will terminate.
uword page_id =
Utils::RoundUp(compressed_heap_minimum_free_page_id_, page_alignment);
for (uword gap = 0;;) {
if (IsCompressedHeapPageUsed(page_id)) {
gap = 0;
page_id = Utils::RoundUp(page_id + 1, page_alignment);
} else {
++gap;
if (gap >= pages) {
page_id += 1 - gap;
break;
}
++page_id;
}
}
ASSERT(page_id % page_alignment == 0);
// Make sure we're not trying to allocate past the end of the heap.
uword end = page_id + pages;
if (end > kCompressedHeapSize / kCompressedHeapPageSize) {
return MemoryRegion();
}
// Mark all the pages in the bitmap as allocated.
for (uword i = page_id; i < end; ++i) {
ASSERT(!IsCompressedHeapPageUsed(i));
SetCompressedHeapPageUsed(i);
}
// Find the next free page, to speed up subsequent allocations.
while (IsCompressedHeapPageUsed(compressed_heap_minimum_free_page_id_)) {
++compressed_heap_minimum_free_page_id_;
}
uword address = compressed_heap_base_ + page_id * kCompressedHeapPageSize;
ASSERT(Utils::IsAligned(address, kCompressedHeapPageSize));
return MemoryRegion(reinterpret_cast<void*>(address), allocated_size);
}
static void UnmapInCompressedHeap(uword start, uword size) {
ASSERT(Utils::IsAligned(start, kCompressedHeapPageSize));
ASSERT(Utils::IsAligned(size, kCompressedHeapPageSize));
MutexLocker ml(compressed_heap_mutex_);
ASSERT(start >= compressed_heap_base_);
uword page_id = (start - compressed_heap_base_) / kCompressedHeapPageSize;
uword end = page_id + size / kCompressedHeapPageSize;
for (uword i = page_id; i < end; ++i) {
ClearCompressedHeapPageUsed(i);
}
if (page_id < compressed_heap_minimum_free_page_id_) {
compressed_heap_minimum_free_page_id_ = page_id;
}
}
static bool IsInCompressedHeap(uword address) {
return address >= compressed_heap_base_ &&
address < compressed_heap_base_ + kCompressedHeapSize;
}
#endif // defined(DART_COMPRESSED_POINTERS)
VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
intptr_t alignment,
bool is_executable,
@ -199,6 +349,18 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
ASSERT(Utils::IsPowerOfTwo(alignment));
ASSERT(Utils::IsAligned(alignment, PageSize()));
ASSERT(name != nullptr);
#if defined(DART_COMPRESSED_POINTERS)
if (!is_executable) {
MemoryRegion region = MapInCompressedHeap(size, alignment);
if (region.pointer() == nullptr) {
return nullptr;
}
mprotect(region.pointer(), region.size(), PROT_READ | PROT_WRITE);
return new VirtualMemory(region, region);
}
#endif // defined(DART_COMPRESSED_POINTERS)
const intptr_t allocated_size = size + alignment - PageSize();
#if defined(DUAL_MAPPING_SUPPORTED)
const bool dual_mapping =
@ -206,18 +368,18 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
if (dual_mapping) {
int fd = memfd_create(name, MFD_CLOEXEC);
if (fd == -1) {
return NULL;
return nullptr;
}
if (ftruncate(fd, size) == -1) {
close(fd);
return NULL;
return nullptr;
}
const int region_prot = PROT_READ | PROT_WRITE;
void* region_ptr =
MapAligned(fd, region_prot, size, alignment, allocated_size);
if (region_ptr == NULL) {
if (region_ptr == nullptr) {
close(fd);
return NULL;
return nullptr;
}
// The mapping will be RX and stays that way until it will eventually be
// unmapped.
@ -228,10 +390,10 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
void* alias_ptr =
MapAligned(fd, alias_prot, size, alignment, allocated_size);
close(fd);
if (alias_ptr == NULL) {
if (alias_ptr == nullptr) {
const uword region_base = reinterpret_cast<uword>(region_ptr);
unmap(region_base, region_base + size);
return NULL;
return nullptr;
}
ASSERT(region_ptr != alias_ptr);
MemoryRegion alias(alias_ptr, size);
@ -250,16 +412,16 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
if (FLAG_dual_map_code) {
int fd = memfd_create(name, MFD_CLOEXEC);
if (fd == -1) {
return NULL;
return nullptr;
}
if (ftruncate(fd, size) == -1) {
close(fd);
return NULL;
return nullptr;
}
void* region_ptr = MapAligned(fd, prot, size, alignment, allocated_size);
close(fd);
if (region_ptr == NULL) {
return NULL;
if (region_ptr == nullptr) {
return nullptr;
}
MemoryRegion region(region_ptr, size);
return new VirtualMemory(region, region);
@ -272,24 +434,23 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
map_flags |= MAP_JIT;
}
#endif // defined(HOST_OS_MACOS)
void* address = mmap(NULL, allocated_size, prot, map_flags, -1, 0);
LOG_INFO("mmap(NULL, 0x%" Px ", %u, ...): %p\n", allocated_size, prot,
address);
void* address =
GenericMapAligned(prot, size, alignment, allocated_size, map_flags);
if (address == MAP_FAILED) {
return NULL;
return nullptr;
}
const uword base = reinterpret_cast<uword>(address);
const uword aligned_base = Utils::RoundUp(base, alignment);
unmap(base, aligned_base);
unmap(aligned_base + size, base + allocated_size);
MemoryRegion region(reinterpret_cast<void*>(aligned_base), size);
MemoryRegion region(reinterpret_cast<void*>(address), size);
return new VirtualMemory(region, region);
}
VirtualMemory::~VirtualMemory() {
#if defined(DART_COMPRESSED_POINTERS)
if (IsInCompressedHeap(reserved_.start())) {
UnmapInCompressedHeap(reserved_.start(), reserved_.size());
return;
}
#endif // defined(DART_COMPRESSED_POINTERS)
if (vm_owns_region()) {
unmap(reserved_.start(), reserved_.end());
const intptr_t alias_offset = AliasOffset();
@ -299,10 +460,16 @@ VirtualMemory::~VirtualMemory() {
}
}
void VirtualMemory::FreeSubSegment(void* address,
intptr_t size) {
bool VirtualMemory::FreeSubSegment(void* address, intptr_t size) {
const uword start = reinterpret_cast<uword>(address);
#if defined(DART_COMPRESSED_POINTERS)
// Don't free the sub segment if it's managed by the compressed pointer heap.
if (IsInCompressedHeap(start)) {
return false;
}
#endif // defined(DART_COMPRESSED_POINTERS)
unmap(start, start + size);
return true;
}
void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {

View file

@ -24,9 +24,14 @@ VM_UNIT_TEST_CASE(AllocateVirtualMemory) {
VirtualMemory::Allocate(kVirtualMemoryBlockSize, false, "test");
EXPECT(vm != NULL);
EXPECT(vm->address() != NULL);
EXPECT_EQ(kVirtualMemoryBlockSize, vm->size());
EXPECT_EQ(vm->start(), reinterpret_cast<uword>(vm->address()));
#if defined(DART_COMPRESSED_POINTERS)
EXPECT_EQ(kOldPageSize, vm->size());
EXPECT_EQ(vm->start() + kOldPageSize, vm->end());
#else
EXPECT_EQ(kVirtualMemoryBlockSize, vm->size());
EXPECT_EQ(vm->start() + kVirtualMemoryBlockSize, vm->end());
#endif // defined(DART_COMPRESSED_POINTERS)
EXPECT(vm->Contains(vm->start()));
EXPECT(vm->Contains(vm->start() + 1));
EXPECT(vm->Contains(vm->start() + kVirtualMemoryBlockSize - 1));

View file

@ -31,6 +31,8 @@ void VirtualMemory::Init() {
page_size_ = CalculatePageSize();
}
void VirtualMemory::Cleanup() {}
bool VirtualMemory::DualMappingEnabled() {
return false;
}
@ -80,11 +82,11 @@ VirtualMemory::~VirtualMemory() {
}
}
void VirtualMemory::FreeSubSegment(void* address,
intptr_t size) {
bool VirtualMemory::FreeSubSegment(void* address, intptr_t size) {
if (VirtualFree(address, size, MEM_DECOMMIT) == 0) {
FATAL1("VirtualFree failed: Error code %d\n", GetLastError());
}
return true;
}
void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {