[vm, gc] Try to handle virtual memory limitations on iOS.

iOS appears not to allow any single virtual memory allocation to exceed ~700MB.
iOS appears to limit the total amount of allocated virtual memory to amount of physical memory available, even virtual memory that is reserved by not committed.

So instead of trying to allocate the full 4GB region for compressed pointer, allocate the largest power-of-two that succeeds, and speculate that further allocations to into the same 4GB region.

TEST=none
Change-Id: Ib45f7ece59e1adb96d175ae861b984c0c6737549
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/210640
Reviewed-by: Siva Annamalai <asiva@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Ryan Macnak 2021-08-31 00:10:08 +00:00 committed by commit-bot@chromium.org
parent 6433038907
commit 8f9113d9f1
4 changed files with 103 additions and 9 deletions

View file

@ -11,6 +11,7 @@
namespace dart {
uword VirtualMemoryCompressedHeap::base_ = 0;
uword VirtualMemoryCompressedHeap::size_ = 0;
uint8_t* VirtualMemoryCompressedHeap::pages_ = nullptr;
uword VirtualMemoryCompressedHeap::minimum_free_page_id_ = 0;
Mutex* VirtualMemoryCompressedHeap::mutex_ = nullptr;
@ -34,12 +35,29 @@ void VirtualMemoryCompressedHeap::ClearPageUsed(uword page_id) {
pages_[page_id / 8] &= ~PageMask(page_id);
}
void VirtualMemoryCompressedHeap::Init(void* compressed_heap_region) {
void VirtualMemoryCompressedHeap::Init(void* compressed_heap_region,
size_t size) {
pages_ = new uint8_t[kCompressedHeapBitmapSize];
memset(pages_, 0, kCompressedHeapBitmapSize);
ASSERT(size > 0);
ASSERT(size <= kCompressedHeapSize);
for (intptr_t page_id = size / kCompressedHeapPageSize;
page_id < kCompressedHeapNumPages; page_id++) {
SetPageUsed(page_id);
}
base_ = reinterpret_cast<uword>(compressed_heap_region);
size_ = size;
ASSERT(base_ != 0);
ASSERT(Utils::IsAligned(base_, kCompressedHeapSize));
ASSERT(size_ != 0);
ASSERT(size_ <= kCompressedHeapSize);
ASSERT(Utils::IsAligned(base_, kCompressedHeapPageSize));
ASSERT(Utils::IsAligned(size_, kCompressedHeapPageSize));
// base_ is not necessarily 4GB-aligned, because on some systems we can't make
// a large enough reservation to guarentee it. Instead, we have only the
// weaker property that all addresses in [base_, base_ + size_) have the same
// same upper 32 bits, which is what we really need for compressed pointers.
intptr_t mask = ~(kCompressedHeapAlignment - 1);
ASSERT((base_ & mask) == (base_ + size_ - 1 & mask));
mutex_ = new Mutex(NOT_IN_PRODUCT("compressed_heap_mutex"));
}
@ -47,6 +65,7 @@ void VirtualMemoryCompressedHeap::Cleanup() {
delete[] pages_;
delete mutex_;
base_ = 0;
size_ = 0;
pages_ = nullptr;
minimum_free_page_id_ = 0;
mutex_ = nullptr;
@ -123,8 +142,7 @@ void VirtualMemoryCompressedHeap::Free(void* address, intptr_t size) {
}
bool VirtualMemoryCompressedHeap::Contains(void* address) {
return reinterpret_cast<uword>(address) >= base_ &&
reinterpret_cast<uword>(address) < base_ + kCompressedHeapSize;
return (reinterpret_cast<uword>(address) - base_) < size_;
}
} // namespace dart

View file

@ -33,7 +33,7 @@ class VirtualMemoryCompressedHeap : public AllStatic {
public:
// Initializes the compressed heap. The callee must allocate a region of
// kCompressedHeapSize bytes, aligned to kCompressedHeapSize.
static void Init(void* compressed_heap_region);
static void Init(void* compressed_heap_region, size_t size);
// Cleans up the compressed heap. The callee is responsible for freeing the
// region's memory.
@ -58,6 +58,7 @@ class VirtualMemoryCompressedHeap : public AllStatic {
static void ClearPageUsed(uword page_id);
static uword base_;
static uword size_;
static uint8_t* pages_;
static uword minimum_free_page_id_;
static Mutex* mutex_;

View file

@ -35,6 +35,10 @@ namespace dart {
#undef MAP_FAILED
#define MAP_FAILED reinterpret_cast<void*>(-1)
#if defined(DART_HOST_OS_IOS)
#define LARGE_RESERVATIONS_MAY_FAIL
#endif
DECLARE_FLAG(bool, dual_map_code);
DECLARE_FLAG(bool, write_protect_code);
@ -76,12 +80,56 @@ intptr_t VirtualMemory::CalculatePageSize() {
return page_size;
}
#if defined(DART_COMPRESSED_POINTERS) && defined(LARGE_RESERVATIONS_MAY_FAIL)
// Truncate to the largest subregion in [region] that doesn't cross an
// [alignment] boundary.
static MemoryRegion ClipToAlignedRegion(MemoryRegion region, size_t alignment) {
uword base = region.start();
uword aligned_base = Utils::RoundUp(base, alignment);
uword size_below =
region.end() >= aligned_base ? aligned_base - base : region.size();
uword size_above =
region.end() >= aligned_base ? region.end() - aligned_base : 0;
ASSERT(size_below + size_above == region.size());
if (size_below >= size_above) {
unmap(aligned_base, aligned_base + size_above);
return MemoryRegion(reinterpret_cast<void*>(base), size_below);
}
unmap(base, base + size_below);
if (size_above > alignment) {
unmap(aligned_base + alignment, aligned_base + size_above);
size_above = alignment;
}
return MemoryRegion(reinterpret_cast<void*>(aligned_base), size_above);
}
#endif // LARGE_RESERVATIONS_MAY_FAIL
void VirtualMemory::Init() {
page_size_ = CalculatePageSize();
#if defined(DART_COMPRESSED_POINTERS)
ASSERT(compressed_heap_ == nullptr);
#if defined(LARGE_RESERVATIONS_MAY_FAIL)
// Try to reserve a region for the compressed heap by requesting decreasing
// powers-of-two until one succeeds, and use the largest subregion that does
// not cross a 4GB boundary. The subregion itself is not necessarily
// 4GB-aligned.
for (size_t allocated_size = kCompressedHeapSize + kCompressedHeapAlignment;
allocated_size >= kCompressedHeapPageSize; allocated_size >>= 1) {
void* address = GenericMapAligned(
nullptr, PROT_NONE, allocated_size, kCompressedHeapPageSize,
allocated_size + kCompressedHeapPageSize,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE);
if (address == MAP_FAILED) continue;
MemoryRegion region(address, allocated_size);
region = ClipToAlignedRegion(region, kCompressedHeapAlignment);
compressed_heap_ = new VirtualMemory(region, region);
break;
}
#else
compressed_heap_ = Reserve(kCompressedHeapSize, kCompressedHeapAlignment);
#endif
if (compressed_heap_ == nullptr) {
int error = errno;
const int kBufferSize = 1024;
@ -89,7 +137,8 @@ void VirtualMemory::Init() {
FATAL("Failed to reserve region for compressed heap: %d (%s)", error,
Utils::StrError(error, error_buf, kBufferSize));
}
VirtualMemoryCompressedHeap::Init(compressed_heap_->address());
VirtualMemoryCompressedHeap::Init(compressed_heap_->address(),
compressed_heap_->size());
#endif // defined(DART_COMPRESSED_POINTERS)
#if defined(DUAL_MAPPING_SUPPORTED)
@ -248,6 +297,25 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
MemoryRegion region =
VirtualMemoryCompressedHeap::Allocate(size, alignment);
if (region.pointer() == nullptr) {
#if defined(LARGE_RESERVATIONS_MAY_FAIL)
// Try a fresh allocation and hope it ends up in the right region. On
// macOS/iOS, this works surprisingly often.
void* address =
GenericMapAligned(nullptr, PROT_READ | PROT_WRITE, size, alignment,
size + alignment, MAP_PRIVATE | MAP_ANONYMOUS);
if (address != nullptr) {
uword ok_start = Utils::RoundDown(compressed_heap_->start(),
kCompressedHeapAlignment);
uword ok_end = ok_start + kCompressedHeapSize;
uword start = reinterpret_cast<uword>(address);
uword end = start + size;
if ((start >= ok_start) && (end <= ok_end)) {
MemoryRegion region(address, size);
return new VirtualMemory(region, region);
}
munmap(address, size);
}
#endif
return nullptr;
}
Commit(region.pointer(), region.size());
@ -372,7 +440,10 @@ void VirtualMemory::Commit(void* address, intptr_t size) {
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (result == MAP_FAILED) {
int error = errno;
FATAL("Failed to commit: %d\n", error);
const int kBufferSize = 1024;
char error_buf[kBufferSize];
FATAL("Failed to commit: %d (%s)", error,
Utils::StrError(error, error_buf, kBufferSize));
}
}
@ -384,7 +455,10 @@ void VirtualMemory::Decommit(void* address, intptr_t size) {
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, -1, 0);
if (result == MAP_FAILED) {
int error = errno;
FATAL("Failed to decommit: %d\n", error);
const int kBufferSize = 1024;
char error_buf[kBufferSize];
FATAL("Failed to decommit: %d (%s)", error,
Utils::StrError(error, error_buf, kBufferSize));
}
}

View file

@ -62,7 +62,8 @@ void VirtualMemory::Init() {
int error = GetLastError();
FATAL("Failed to reserve region for compressed heap: %d", error);
}
VirtualMemoryCompressedHeap::Init(compressed_heap_->address());
VirtualMemoryCompressedHeap::Init(compressed_heap_->address(),
compressed_heap_->size());
#endif // defined(DART_COMPRESSED_POINTERS)
}