[standalone] Remove use of dart::VirtualMemory from the standalone embedder.

Fixes the standalone embedder's ELF loader using the VM's compressed heap after Dart_Cleanup.

This removes the last non-test layering violation of runtime/bin files including runtime/vm files.

TEST=ci
Bug: https://github.com/dart-lang/sdk/issues/46810
Bug: https://github.com/dart-lang/sdk/issues/46880
Change-Id: I6c430ae61c057016f7cd56188879a9ccb3699d30
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/209847
Commit-Queue: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Alexander Aprelev <aam@google.com>
This commit is contained in:
Ryan Macnak 2021-08-12 21:30:10 +00:00 committed by commit-bot@chromium.org
parent 61109c447a
commit 5cec85d5ce
10 changed files with 400 additions and 36 deletions

View file

@ -131,6 +131,10 @@ template("build_elf_loader") {
sources = [
"elf_loader.cc",
"elf_loader.h",
"virtual_memory.h",
"virtual_memory_fuchsia.cc",
"virtual_memory_posix.cc",
"virtual_memory_win.cc",
]
deps = invoker.deps
}

View file

@ -2,13 +2,9 @@
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include <bin/elf_loader.h>
#include <bin/file.h>
#include <platform/elf.h>
#include <platform/globals.h>
#include <vm/cpu.h>
#include <vm/virtual_memory.h>
#include "bin/elf_loader.h"
#include "platform/globals.h"
#if defined(DART_HOST_OS_FUCHSIA)
#include <sys/mman.h>
#endif
@ -16,6 +12,10 @@
#include <memory>
#include <utility>
#include "bin/file.h"
#include "bin/virtual_memory.h"
#include "platform/elf.h"
namespace dart {
namespace bin {
@ -369,7 +369,6 @@ bool LoadedElf::ReadSectionStringTable() {
bool LoadedElf::LoadSegments() {
// Calculate the total amount of virtual memory needed.
uword total_memory = 0;
uword maximum_alignment = PageSize();
for (uword i = 0; i < header_.num_program_headers; ++i) {
const dart::elf::ProgramHeader header = program_table_[i];
@ -381,14 +380,12 @@ bool LoadedElf::LoadSegments() {
total_memory);
CHECK_ERROR(Utils::IsPowerOfTwo(header.alignment),
"Alignment must be a power of two.");
maximum_alignment =
Utils::Maximum(maximum_alignment, static_cast<uword>(header.alignment));
}
total_memory = Utils::RoundUp(total_memory, PageSize());
base_.reset(VirtualMemory::AllocateAligned(
total_memory, /*alignment=*/maximum_alignment,
/*is_executable=*/false, "dart-compiled-image"));
base_.reset(VirtualMemory::Allocate(total_memory,
/*is_executable=*/false,
"dart-compiled-image"));
CHECK_ERROR(base_ != nullptr, "Could not reserve virtual memory.");
for (uword i = 0; i < header_.num_program_headers; ++i) {

View file

@ -0,0 +1,73 @@
// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_BIN_VIRTUAL_MEMORY_H_
#define RUNTIME_BIN_VIRTUAL_MEMORY_H_
#include "platform/allocation.h"
#include "platform/globals.h"
namespace dart {
namespace bin {
class VirtualMemory {
public:
enum Protection {
kNoAccess,
kReadOnly,
kReadWrite,
kReadExecute,
kReadWriteExecute
};
// The reserved memory is unmapped on destruction.
~VirtualMemory();
void release() {
address_ = nullptr;
size_ = 0;
}
uword start() const { return reinterpret_cast<uword>(address_); }
uword end() const { return reinterpret_cast<uword>(address_) + size_; }
void* address() const { return address_; }
intptr_t size() const { return size_; }
// Changes the protection of the virtual memory area.
static void Protect(void* address, intptr_t size, Protection mode);
void Protect(Protection mode) { return Protect(address(), size(), mode); }
// Reserves and commits a virtual memory segment with size. If a segment of
// the requested size cannot be allocated, NULL is returned.
static VirtualMemory* Allocate(intptr_t size,
bool is_executable,
const char* name);
static void Init() { page_size_ = CalculatePageSize(); }
// Returns the cached page size. Use only if Init() has been called.
static intptr_t PageSize() {
ASSERT(page_size_ != 0);
return page_size_;
}
private:
static intptr_t CalculatePageSize();
// These constructors are only used internally when reserving new virtual
// spaces. They do not reserve any virtual address space on their own.
VirtualMemory(void* address, size_t size) : address_(address), size_(size) {}
void* address_;
size_t size_;
static uword page_size_;
DISALLOW_IMPLICIT_CONSTRUCTORS(VirtualMemory);
};
} // namespace bin
} // namespace dart
#endif // RUNTIME_BIN_VIRTUAL_MEMORY_H_

View file

@ -0,0 +1,110 @@
// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "platform/globals.h"
#if defined(DART_HOST_OS_FUCHSIA)
#include "bin/virtual_memory.h"
#include <zircon/process.h>
#include <zircon/status.h>
#include <zircon/syscalls.h>
#include "platform/assert.h"
#include "platform/utils.h"
namespace dart {
namespace bin {
uword VirtualMemory::page_size_ = 0;
intptr_t VirtualMemory::CalculatePageSize() {
const intptr_t page_size = getpagesize();
ASSERT(page_size != 0);
ASSERT(Utils::IsPowerOfTwo(page_size));
return page_size;
}
VirtualMemory* VirtualMemory::Allocate(intptr_t size,
bool is_executable,
const char* name) {
ASSERT(Utils::IsAligned(size, page_size_));
zx_handle_t vmar = zx_vmar_root_self();
zx_handle_t vmo = ZX_HANDLE_INVALID;
zx_status_t status = zx_vmo_create(size, 0u, &vmo);
if (status != ZX_OK) {
return nullptr;
}
if (name != nullptr) {
zx_object_set_property(vmo, ZX_PROP_NAME, name, strlen(name));
}
if (is_executable) {
// Add ZX_RIGHT_EXECUTE permission to VMO, so it can be mapped
// into memory as executable (now or later).
status = zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo);
if (status != ZX_OK) {
zx_handle_close(vmo);
return nullptr;
}
}
const zx_vm_option_t region_options =
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
(is_executable ? ZX_VM_PERM_EXECUTE : 0);
uword base;
status = zx_vmar_map(vmar, region_options, 0, vmo, 0u, size, &base);
zx_handle_close(vmo);
if (status != ZX_OK) {
return nullptr;
}
return new VirtualMemory(reinterpret_cast<void*>(base), size);
}
VirtualMemory::~VirtualMemory() {
if (address_ != nullptr) {
zx_status_t status = zx_vmar_unmap(
zx_vmar_root_self(), reinterpret_cast<uword>(address_), size_);
if (status != ZX_OK) {
FATAL("zx_vmar_unmap failed: %s\n", zx_status_get_string(status));
}
}
}
void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {
const uword start_address = reinterpret_cast<uword>(address);
const uword end_address = start_address + size;
const uword page_address = Utils::RoundDown(start_address, PageSize());
uint32_t prot = 0;
switch (mode) {
case kNoAccess:
prot = 0;
break;
case kReadOnly:
prot = ZX_VM_PERM_READ;
break;
case kReadWrite:
prot = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
break;
case kReadExecute:
prot = ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE;
break;
case kReadWriteExecute:
prot = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
break;
}
zx_status_t status = zx_vmar_protect(zx_vmar_root_self(), prot, page_address,
end_address - page_address);
if (status != ZX_OK) {
FATAL("zx_vmar_protect(0x%lx, 0x%lx) failed: %s\n", page_address,
end_address - page_address, zx_status_get_string(status));
}
}
} // namespace bin
} // namespace dart
#endif // defined(DART_HOST_OS_FUCHSIA)

View file

@ -0,0 +1,110 @@
// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "platform/globals.h"
#if defined(DART_HOST_OS_ANDROID) || defined(DART_HOST_OS_LINUX) || \
defined(DART_HOST_OS_MACOS)
#include "bin/virtual_memory.h"
#include <errno.h>
#include <sys/mman.h>
#include <unistd.h>
#include "platform/assert.h"
#include "platform/utils.h"
namespace dart {
namespace bin {
// standard MAP_FAILED causes "error: use of old-style cast" as it
// defines MAP_FAILED as ((void *) -1)
#undef MAP_FAILED
#define MAP_FAILED reinterpret_cast<void*>(-1)
uword VirtualMemory::page_size_ = 0;
intptr_t VirtualMemory::CalculatePageSize() {
const intptr_t page_size = getpagesize();
ASSERT(page_size != 0);
ASSERT(Utils::IsPowerOfTwo(page_size));
return page_size;
}
VirtualMemory* VirtualMemory::Allocate(intptr_t size,
bool is_executable,
const char* name) {
ASSERT(Utils::IsAligned(size, PageSize()));
const int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
#if (defined(DART_HOST_OS_MACOS) && !defined(DART_HOST_OS_IOS))
if (is_executable && IsAtLeastOS10_14()) {
map_flags |= MAP_JIT;
}
#endif // defined(DART_HOST_OS_MACOS)
// Some 64-bit microarchitectures store only the low 32-bits of targets as
// part of indirect branch prediction, predicting that the target's upper bits
// will be same as the call instruction's address. This leads to misprediction
// for indirect calls crossing a 4GB boundary. We ask mmap to place our
// generated code near the VM binary to avoid this.
void* hint = is_executable ? reinterpret_cast<void*>(&Allocate) : nullptr;
void* address = mmap(hint, size, prot, map_flags, -1, 0);
if (address == MAP_FAILED) {
return nullptr;
}
return new VirtualMemory(address, size);
}
VirtualMemory::~VirtualMemory() {
if (address_ != nullptr) {
if (munmap(address_, size_) != 0) {
int error = errno;
const int kBufferSize = 1024;
char error_buf[kBufferSize];
FATAL("munmap error: %d (%s)", error,
Utils::StrError(error, error_buf, kBufferSize));
}
}
}
void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {
uword start_address = reinterpret_cast<uword>(address);
uword end_address = start_address + size;
uword page_address = Utils::RoundDown(start_address, PageSize());
int prot = 0;
switch (mode) {
case kNoAccess:
prot = PROT_NONE;
break;
case kReadOnly:
prot = PROT_READ;
break;
case kReadWrite:
prot = PROT_READ | PROT_WRITE;
break;
case kReadExecute:
prot = PROT_READ | PROT_EXEC;
break;
case kReadWriteExecute:
prot = PROT_READ | PROT_WRITE | PROT_EXEC;
break;
}
if (mprotect(reinterpret_cast<void*>(page_address),
end_address - page_address, prot) != 0) {
int error = errno;
const int kBufferSize = 1024;
char error_buf[kBufferSize];
FATAL("mprotect error: %d (%s)", error,
Utils::StrError(error, error_buf, kBufferSize));
}
}
} // namespace bin
} // namespace dart
#endif // defined(DART_HOST_OS_ANDROID) || defined(DART_HOST_OS_LINUX) || \
// defined(DART_HOST_OS_MACOS)

View file

@ -0,0 +1,79 @@
// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "platform/globals.h"
#if defined(DART_HOST_OS_WINDOWS)
#include "bin/virtual_memory.h"
#include "platform/assert.h"
#include "platform/utils.h"
namespace dart {
namespace bin {
uword VirtualMemory::page_size_ = 0;
intptr_t VirtualMemory::CalculatePageSize() {
SYSTEM_INFO info;
GetSystemInfo(&info);
const intptr_t page_size = info.dwPageSize;
ASSERT(page_size != 0);
ASSERT(Utils::IsPowerOfTwo(page_size));
return page_size;
}
VirtualMemory* VirtualMemory::Allocate(intptr_t size,
bool is_executable,
const char* name) {
ASSERT(Utils::IsAligned(size, PageSize()));
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
void* address = VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, prot);
if (address == nullptr) {
return nullptr;
}
return new VirtualMemory(address, size);
}
VirtualMemory::~VirtualMemory() {
if (address_ != nullptr) {
if (VirtualFree(address_, 0, MEM_RELEASE) == 0) {
FATAL("VirtualFree failed: Error code %d\n", GetLastError());
}
}
}
void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {
uword start_address = reinterpret_cast<uword>(address);
uword end_address = start_address + size;
uword page_address = Utils::RoundDown(start_address, PageSize());
DWORD prot = 0;
switch (mode) {
case kNoAccess:
prot = PAGE_NOACCESS;
break;
case kReadOnly:
prot = PAGE_READONLY;
break;
case kReadWrite:
prot = PAGE_READWRITE;
break;
case kReadExecute:
prot = PAGE_EXECUTE_READ;
break;
case kReadWriteExecute:
prot = PAGE_EXECUTE_READWRITE;
break;
}
DWORD old_prot = 0;
if (VirtualProtect(reinterpret_cast<void*>(page_address),
end_address - page_address, prot, &old_prot) == 0) {
FATAL("VirtualProtect failed %d\n", GetLastError());
}
}
} // namespace bin
} // namespace dart
#endif // defined(DART_HOST_OS_WINDOWS)

View file

@ -76,13 +76,6 @@ class VirtualMemory {
static VirtualMemory* ForImagePage(void* pointer, uword size);
void release() {
// Make sure no pages would be leaked.
const uword size_ = size();
ASSERT(address() == reserved_.pointer() && size_ == reserved_.size());
reserved_ = MemoryRegion(nullptr, 0);
}
private:
static intptr_t CalculatePageSize();

View file

@ -7,8 +7,6 @@
#include "vm/virtual_memory.h"
#include <sys/mman.h>
#include <unistd.h>
#include <zircon/process.h>
#include <zircon/status.h>
#include <zircon/syscalls.h>
@ -160,6 +158,7 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
if (status != ZX_OK) {
LOG_ERR("zx_vmo_replace_as_executable() failed: %s\n",
zx_status_get_string(status));
zx_handle_close(vmo);
return NULL;
}
}
@ -173,6 +172,7 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
if (status != ZX_OK) {
LOG_ERR("zx_vmar_map(%u, 0x%lx, 0x%lx) failed: %s\n", region_options, base,
size, zx_status_get_string(status));
zx_handle_close(vmo);
return NULL;
}
void* region_ptr = reinterpret_cast<void*>(base);

View file

@ -80,17 +80,16 @@ void VirtualMemory::Init() {
page_size_ = CalculatePageSize();
#if defined(DART_COMPRESSED_POINTERS)
ASSERT(compressed_heap_ == nullptr);
compressed_heap_ = Reserve(kCompressedHeapSize, kCompressedHeapAlignment);
if (compressed_heap_ == nullptr) {
compressed_heap_ = Reserve(kCompressedHeapSize, kCompressedHeapAlignment);
if (compressed_heap_ == nullptr) {
int error = errno;
const int kBufferSize = 1024;
char error_buf[kBufferSize];
FATAL("Failed to reserve region for compressed heap: %d (%s)", error,
Utils::StrError(error, error_buf, kBufferSize));
}
VirtualMemoryCompressedHeap::Init(compressed_heap_->address());
int error = errno;
const int kBufferSize = 1024;
char error_buf[kBufferSize];
FATAL("Failed to reserve region for compressed heap: %d (%s)", error,
Utils::StrError(error, error_buf, kBufferSize));
}
VirtualMemoryCompressedHeap::Init(compressed_heap_->address());
#endif // defined(DART_COMPRESSED_POINTERS)
#if defined(DUAL_MAPPING_SUPPORTED)

View file

@ -56,14 +56,13 @@ void VirtualMemory::Init() {
page_size_ = CalculatePageSize();
#if defined(DART_COMPRESSED_POINTERS)
ASSERT(compressed_heap_ == nullptr);
compressed_heap_ = Reserve(kCompressedHeapSize, kCompressedHeapAlignment);
if (compressed_heap_ == nullptr) {
compressed_heap_ = Reserve(kCompressedHeapSize, kCompressedHeapAlignment);
if (compressed_heap_ == nullptr) {
int error = GetLastError();
FATAL("Failed to reserve region for compressed heap: %d", error);
}
VirtualMemoryCompressedHeap::Init(compressed_heap_->address());
int error = GetLastError();
FATAL("Failed to reserve region for compressed heap: %d", error);
}
VirtualMemoryCompressedHeap::Init(compressed_heap_->address());
#endif // defined(DART_COMPRESSED_POINTERS)
}