[vm] Remove dual mapping of code

We are no longer using Dart VM in a setting where this
matters as a security measure and it just complicates
portability for no benefit.

There is an indication that it is causing problems when
running Linux build of Dart VM under Docker on Mac OS X.

Fixes https://github.com/dart-lang/sdk/issues/54446

TEST=ci

Cq-Include-Trybots: luci.dart.try:vm-fuchsia-release-arm64-try,vm-fuchsia-release-x64-try
Change-Id: I11bdaa8faebaca1df6fd59097049bdaea9cb8e12
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/344581
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Commit-Queue: Slava Egorov <vegorov@google.com>
This commit is contained in:
Vyacheslav Egorov 2024-01-09 12:36:55 +00:00 committed by Commit Queue
parent b905c9a171
commit 3e0ae6da5f
28 changed files with 18 additions and 407 deletions

View file

@ -403,14 +403,6 @@ struct simd128_value_t {
#endif
#endif
// Determine whether dual mapping of code pages is supported.
// We test dual mapping on linux x64 and deploy it on fuchsia.
#if !defined(DART_PRECOMPILED_RUNTIME) && \
(defined(DART_TARGET_OS_LINUX) && defined(TARGET_ARCH_X64) || \
defined(DART_TARGET_OS_FUCHSIA))
#define DUAL_MAPPING_SUPPORTED 1
#endif
// Short form printf format specifiers
#define Pd PRIdPTR
#define Pu PRIuPTR

View file

@ -305,9 +305,6 @@ dart/optimized_stacktrace_line_test: Skip
dart/*: SkipByDesign # VM specific tests
dart/catch_entry_state: SkipByDesign
[ $system != fuchsia && ($arch != x64 || $system != linux) ]
cc/CodeExecutability: SkipByDesign # --dual-map-code not supported on non-Linux/Fuchsia
[ $arch == arm || $arch == arm64 || $builder_tag == crossword || $builder_tag == crossword_ast || $compiler != dartkp || $system == linux && $simulator ]
dart/v8_snapshot_profile_writer_test: SkipByDesign # Only relevant for AOT. Doesn't work in cross-compilation (has to run on the host). On Linux/simarm64 and Linux/simarm this test requires buildtools/clang which is not always available on testing shards.

View file

@ -12,12 +12,6 @@ namespace dart {
DEFINE_FLAG(bool, write_protect_code, true, "Write protect jitted code");
#if defined(DUAL_MAPPING_SUPPORTED)
DEFINE_FLAG(bool, dual_map_code, true, "Dual map jitted code, RW and RX");
#else
DEFINE_FLAG(bool, dual_map_code, false, "Dual map jitted code, RW and RX");
#endif // defined(DUAL_MAPPING_SUPPORTED)
#if defined(TARGET_ARCH_IA32)
WritableInstructionsScope::WritableInstructionsScope(uword address,
intptr_t size)

View file

@ -343,9 +343,6 @@ void CodeRelocator::ResolveCallToDestination(UnresolvedCall* unresolved_call,
{
auto const caller = unresolved_call->caller;
uword addr = Code::PayloadStartOf(caller) + call_offset;
if (FLAG_write_protect_code) {
addr -= Page::Of(Code::InstructionsOf(caller))->AliasOffset();
}
if (unresolved_call->is_tail_call) {
PcRelativeTailCallPattern call(addr);
ASSERT(call.IsValid());

View file

@ -18,7 +18,6 @@ namespace dart {
#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
DECLARE_FLAG(bool, dual_map_code);
DECLARE_FLAG(int, lower_pc_relative_call_distance);
DECLARE_FLAG(int, upper_pc_relative_call_distance);
@ -134,11 +133,6 @@ struct RelocatorTestHelper {
reinterpret_cast<void*>(assembler.CodeAddress(0)),
assembler.CodeSize());
if (FLAG_write_protect_code && FLAG_dual_map_code) {
auto& instructions = Instructions::Handle(code.instructions());
instructions ^= Page::ToExecutable(instructions.ptr());
code.set_instructions(instructions);
}
if (FLAG_disassemble) {
OS::PrintErr("Disassemble:\n");
code.Disassemble();
@ -259,12 +253,8 @@ struct RelocatorTestHelper {
if (FLAG_write_protect_code) {
const uword address = UntaggedObject::ToAddr(instructions.ptr());
const auto size = instructions.ptr()->untag()->HeapSize();
instructions =
Instructions::RawCast(Page::ToExecutable(instructions.ptr()));
const auto prot = FLAG_dual_map_code ? VirtualMemory::kReadOnly
: VirtualMemory::kReadExecute;
VirtualMemory::Protect(reinterpret_cast<void*>(address), size, prot);
VirtualMemory::Protect(reinterpret_cast<void*>(address), size,
VirtualMemory::kReadExecute);
}
CPU::FlushICache(instructions.PayloadStart(), instructions.Size());
}

View file

@ -405,10 +405,6 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
}
static bool TryAcquireMarkBit(ObjectPtr raw_obj) {
if (FLAG_write_protect_code && raw_obj->IsInstructions()) {
// A non-writable alias mapping may exist for instruction pages.
raw_obj = Page::ToWritable(raw_obj);
}
if (!sync) {
raw_obj->untag()->SetMarkBitUnsynchronized();
return true;

View file

@ -284,7 +284,7 @@ void Page::WriteProtect(bool read_only) {
VirtualMemory::Protection prot;
if (read_only) {
if (is_executable() && (memory_->AliasOffset() == 0)) {
if (is_executable()) {
prot = VirtualMemory::kReadExecute;
} else {
prot = VirtualMemory::kReadOnly;

View file

@ -89,7 +89,6 @@ class Page {
uword start() const { return memory_->start(); }
uword end() const { return memory_->end(); }
bool Contains(uword addr) const { return memory_->Contains(addr); }
intptr_t AliasOffset() const { return memory_->AliasOffset(); }
uword object_start() const {
return is_new() ? new_object_start() : old_object_start();
@ -141,40 +140,6 @@ class Page {
return reinterpret_cast<Page*>(addr & kPageMask);
}
// Warning: This does not work for objects on image pages.
static ObjectPtr ToExecutable(ObjectPtr obj) {
Page* page = Of(obj);
VirtualMemory* memory = page->memory_;
const intptr_t alias_offset = memory->AliasOffset();
if (alias_offset == 0) {
return obj; // Not aliased.
}
uword addr = UntaggedObject::ToAddr(obj);
if (memory->Contains(addr)) {
return UntaggedObject::FromAddr(addr + alias_offset);
}
// obj is executable.
ASSERT(memory->ContainsAlias(addr));
return obj;
}
// Warning: This does not work for objects on image pages.
static ObjectPtr ToWritable(ObjectPtr obj) {
Page* page = Of(obj);
VirtualMemory* memory = page->memory_;
const intptr_t alias_offset = memory->AliasOffset();
if (alias_offset == 0) {
return obj; // Not aliased.
}
uword addr = UntaggedObject::ToAddr(obj);
if (memory->ContainsAlias(addr)) {
return UntaggedObject::FromAddr(addr - alias_offset);
}
// obj is writable.
ASSERT(memory->Contains(addr));
return obj;
}
// 1 card = 32 slots.
static constexpr intptr_t kSlotsPerCardLog2 = 5;
static constexpr intptr_t kBytesPerCardLog2 =

View file

@ -48,10 +48,6 @@ void VerifyPointersVisitor::VisitPointers(ObjectPtr* from, ObjectPtr* to) {
ObjectPtr obj = *ptr;
if (obj->IsHeapObject()) {
if (!allocated_set_->Contains(obj)) {
if (obj->IsInstructions() &&
allocated_set_->Contains(Page::ToWritable(obj))) {
continue;
}
FATAL("%s: Invalid pointer: *0x%" Px " = 0x%" Px "\n", msg_,
reinterpret_cast<uword>(ptr), static_cast<uword>(obj));
}
@ -67,10 +63,6 @@ void VerifyPointersVisitor::VisitCompressedPointers(uword heap_base,
ObjectPtr obj = ptr->Decompress(heap_base);
if (obj->IsHeapObject()) {
if (!allocated_set_->Contains(obj)) {
if (obj->IsInstructions() &&
allocated_set_->Contains(Page::ToWritable(obj))) {
continue;
}
FATAL("%s: Invalid pointer: *0x%" Px " = 0x%" Px "\n", msg_,
reinterpret_cast<uword>(ptr), static_cast<uword>(obj));
}

View file

@ -99,7 +99,6 @@ DEFINE_FLAG(bool,
false,
"Remove script timestamps to allow for deterministic testing.");
DECLARE_FLAG(bool, dual_map_code);
DECLARE_FLAG(bool, intrinsify);
DECLARE_FLAG(bool, trace_deoptimization);
DECLARE_FLAG(bool, trace_deoptimization_verbose);
@ -18172,27 +18171,9 @@ CodePtr Code::FinalizeCode(FlowGraphCompiler* compiler,
// for execution.
if (FLAG_write_protect_code) {
uword address = UntaggedObject::ToAddr(instrs.ptr());
// Check if a dual mapping exists.
instrs = Instructions::RawCast(Page::ToExecutable(instrs.ptr()));
uword exec_address = UntaggedObject::ToAddr(instrs.ptr());
const bool use_dual_mapping = exec_address != address;
ASSERT(use_dual_mapping == FLAG_dual_map_code);
// When dual mapping is enabled the executable mapping is RX from the
// point of allocation and never changes protection.
// Yet the writable mapping is still turned back from RW to R.
if (use_dual_mapping) {
VirtualMemory::Protect(reinterpret_cast<void*>(address),
instrs.ptr()->untag()->HeapSize(),
VirtualMemory::kReadOnly);
address = exec_address;
} else {
// If dual mapping is disabled and we write protect then we have to
// change the single mapping from RW -> RX.
VirtualMemory::Protect(reinterpret_cast<void*>(address),
instrs.ptr()->untag()->HeapSize(),
VirtualMemory::kReadExecute);
}
VirtualMemory::Protect(reinterpret_cast<void*>(address),
instrs.ptr()->untag()->HeapSize(),
VirtualMemory::kReadExecute);
}
// Hook up Code and Instructions objects.

View file

@ -941,11 +941,6 @@ intptr_t HeapSnapshotWriter::GetObjectId(ObjectPtr obj) const {
return id;
}
if (FLAG_write_protect_code && obj->IsInstructions() && !OnImagePage(obj)) {
// A non-writable alias mapping may exist for instruction pages.
obj = Page::ToWritable(obj);
}
CountingPage* counting_page = FindCountingPage(obj);
intptr_t id;
if (counting_page != nullptr) {

View file

@ -39,7 +39,6 @@ namespace dart {
#define Z (thread->zone())
DECLARE_FLAG(bool, dual_map_code);
DECLARE_FLAG(bool, write_protect_code);
static ClassPtr CreateDummyClass(const String& class_name,
@ -2819,44 +2818,6 @@ class CodeTestHelper {
}
};
// Test for executability of generated instructions. The test crashes with a
// segmentation fault when executing the writeable view.
ISOLATE_UNIT_TEST_CASE_WITH_EXPECTATION(CodeExecutability, "Crash") {
extern void GenerateIncrement(compiler::Assembler * assembler);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler _assembler_(&object_pool_builder);
GenerateIncrement(&_assembler_);
const Function& function = Function::Handle(CreateFunction("Test_Code"));
SafepointWriteRwLocker locker(thread,
thread->isolate_group()->program_lock());
Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
function, nullptr, &_assembler_, Code::PoolAttachment::kAttachPool));
function.AttachCode(code);
Instructions& instructions = Instructions::Handle(code.instructions());
uword payload_start = code.PayloadStart();
const uword unchecked_offset = code.UncheckedEntryPoint() - code.EntryPoint();
EXPECT_EQ(instructions.ptr(), Instructions::FromPayloadStart(payload_start));
// Execute the executable view of the instructions (default).
Object& result =
Object::Handle(DartEntry::InvokeFunction(function, Array::empty_array()));
EXPECT_EQ(1, Smi::Cast(result).Value());
// Switch to the writeable but non-executable view of the instructions.
instructions ^= Page::ToWritable(instructions.ptr());
payload_start = instructions.PayloadStart();
EXPECT_EQ(instructions.ptr(), Instructions::FromPayloadStart(payload_start));
// Hook up Code and Instructions objects.
CodeTestHelper::SetInstructions(code, instructions, unchecked_offset);
function.AttachCode(code);
// Try executing the generated code, expected to crash.
result = DartEntry::InvokeFunction(function, Array::empty_array());
EXPECT_EQ(1, Smi::Cast(result).Value());
if (!FLAG_dual_map_code) {
// Since this test is expected to crash, crash if dual mapping of code
// is switched off.
FATAL("Test requires --dual-map-code; skip by forcing expected crash");
}
}
// Test for Embedded String object in the instructions.
ISOLATE_UNIT_TEST_CASE(EmbedStringInCode) {
extern void GenerateEmbedStringInCode(compiler::Assembler * assembler,

View file

@ -26,14 +26,9 @@ void VirtualMemory::Truncate(intptr_t new_size) {
if (FreeSubSegment(reinterpret_cast<void*>(start() + new_size),
size() - new_size)) {
reserved_.set_size(new_size);
if (AliasOffset() != 0) {
FreeSubSegment(reinterpret_cast<void*>(alias_.start() + new_size),
alias_.size() - new_size);
}
}
}
region_.Subregion(region_, 0, new_size);
alias_.Subregion(alias_, 0, new_size);
}
VirtualMemory* VirtualMemory::ForImagePage(void* pointer, uword size) {
@ -42,7 +37,7 @@ VirtualMemory* VirtualMemory::ForImagePage(void* pointer, uword size) {
MemoryRegion region(pointer, size);
MemoryRegion reserved(nullptr, 0); // null reservation indicates VM should
// not attempt to free this memory.
VirtualMemory* memory = new VirtualMemory(region, region, reserved);
VirtualMemory* memory = new VirtualMemory(region, reserved);
ASSERT(!memory->vm_owns_region());
return memory;
}

View file

@ -33,7 +33,6 @@ class VirtualMemory {
uword end() const { return region_.end(); }
void* address() const { return region_.pointer(); }
intptr_t size() const { return region_.size(); }
intptr_t AliasOffset() const { return alias_.start() - region_.start(); }
#if defined(DART_HOST_OS_FUCHSIA)
static void Init(zx_handle_t vmex_resource);
@ -42,13 +41,7 @@ class VirtualMemory {
#endif
static void Cleanup();
// Returns true if dual mapping is enabled.
static bool DualMappingEnabled();
bool Contains(uword addr) const { return region_.Contains(addr); }
bool ContainsAlias(uword addr) const {
return (AliasOffset() != 0) && alias_.Contains(addr);
}
// Changes the protection of the virtual memory area.
static void Protect(void* address, intptr_t size, Protection mode);
@ -113,20 +106,11 @@ class VirtualMemory {
// These constructors are only used internally when reserving new virtual
// spaces. They do not reserve any virtual address space on their own.
VirtualMemory(const MemoryRegion& region,
const MemoryRegion& alias,
const MemoryRegion& reserved)
: region_(region), alias_(alias), reserved_(reserved) {}
VirtualMemory(const MemoryRegion& region, const MemoryRegion& reserved)
: region_(region), alias_(region), reserved_(reserved) {}
: region_(region), reserved_(reserved) {}
MemoryRegion region_;
// Optional secondary mapping of region_ to a virtual space with different
// protection, e.g. allowing code execution.
MemoryRegion alias_;
// The underlying reservation not yet given back to the OS.
// Its address might disagree with region_ due to aligned allocations.
// Its size might disagree with region_ due to Truncate.

View file

@ -34,7 +34,6 @@
namespace dart {
DECLARE_FLAG(bool, dual_map_code);
DECLARE_FLAG(bool, write_protect_code);
uword VirtualMemory::page_size_ = 0;
@ -126,10 +125,6 @@ static void Unmap(zx_handle_t vmar, uword start, uword end) {
}
}
bool VirtualMemory::DualMappingEnabled() {
return FLAG_dual_map_code;
}
VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
intptr_t alignment,
bool is_executable,
@ -139,14 +134,6 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
// is_executable = true) is allocated as non-executable and later
// changed to executable via VirtualMemory::Protect, which requires
// ZX_RIGHT_EXECUTE on the underlying VMO.
//
// If FLAG_dual_map_code is active, the executable mapping will be mapped RX
// immediately and never changes protection until it is eventually unmapped.
//
// In addition, dual mapping of the same underlying code memory is provided.
const bool dual_mapping =
is_executable && FLAG_write_protect_code && FLAG_dual_map_code;
ASSERT(Utils::IsAligned(size, page_size_));
ASSERT(Utils::IsPowerOfTwo(alignment));
ASSERT(Utils::IsAligned(alignment, page_size_));
@ -210,30 +197,7 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
}
void* region_ptr = reinterpret_cast<void*>(base);
MemoryRegion region(region_ptr, size);
VirtualMemory* result;
if (dual_mapping) {
// The mapping will be RX and stays that way until it will eventually be
// unmapped.
const zx_vm_option_t alias_options =
ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE | align_flag;
status = zx_vmar_map(vmar, alias_options, 0, vmo, 0u, size, &base);
LOG_INFO("zx_vmar_map(%u, 0x%lx, 0x%lx)\n", alias_options, base, size);
if (status != ZX_OK) {
LOG_ERR("zx_vmar_map(%u, 0x%lx, 0x%lx) failed: %s\n", alias_options, base,
size, zx_status_get_string(status));
const uword region_base = reinterpret_cast<uword>(region_ptr);
Unmap(vmar, region_base, region_base + size);
return nullptr;
}
void* alias_ptr = reinterpret_cast<void*>(base);
ASSERT(region_ptr != alias_ptr);
MemoryRegion alias(alias_ptr, size);
result = new VirtualMemory(region, alias, region);
} else {
result = new VirtualMemory(region, region, region);
}
VirtualMemory* result = new VirtualMemory(region, region);
zx_handle_close(vmo);
#if defined(DART_COMPRESSED_POINTERS)
@ -253,14 +217,6 @@ VirtualMemory::~VirtualMemory() {
reserved_.end());
LOG_INFO("zx_vmar_unmap(0x%lx, 0x%lx) success\n", reserved_.start(),
reserved_.size());
const intptr_t alias_offset = AliasOffset();
if (alias_offset != 0) {
Unmap(getVmarForAddress(reserved_.start()),
reserved_.start() + alias_offset, reserved_.end() + alias_offset);
LOG_INFO("zx_vmar_unmap(0x%lx, 0x%lx) success\n",
reserved_.start() + alias_offset, reserved_.size());
}
}
}

View file

@ -43,7 +43,6 @@ namespace dart {
#define LARGE_RESERVATIONS_MAY_FAIL
#endif
DECLARE_FLAG(bool, dual_map_code);
DECLARE_FLAG(bool, write_protect_code);
#if defined(DART_TARGET_OS_LINUX)
@ -189,46 +188,6 @@ void VirtualMemory::Init() {
compressed_heap_->size());
#endif // defined(DART_COMPRESSED_POINTERS)
#if defined(DUAL_MAPPING_SUPPORTED)
// Perf is Linux-specific and the flags aren't defined in Product.
#if defined(DART_TARGET_OS_LINUX) && !defined(PRODUCT)
// Perf interacts strangely with memfds, leading it to sometimes collect
// garbled return addresses.
if (FLAG_generate_perf_events_symbols || FLAG_generate_perf_jitdump) {
LOG_INFO(
"Dual code mapping disabled to generate perf events or jitdump.\n");
FLAG_dual_map_code = false;
return;
}
#endif
// Detect dual mapping exec permission limitation on some platforms,
// such as on docker containers, and disable dual mapping in this case.
// Also detect for missing support of memfd_create syscall.
if (FLAG_dual_map_code) {
intptr_t size = PageSize();
intptr_t alignment = kPageSize;
bool executable = true;
bool compressed = false;
VirtualMemory* vm =
AllocateAligned(size, alignment, executable, compressed, "memfd-test");
if (vm == nullptr) {
LOG_INFO("memfd_create not supported; disabling dual mapping of code.\n");
FLAG_dual_map_code = false;
return;
}
void* region = reinterpret_cast<void*>(vm->region_.start());
void* alias = reinterpret_cast<void*>(vm->alias_.start());
if (region == alias ||
mprotect(region, size, PROT_READ) != 0 || // Remove PROT_WRITE.
mprotect(alias, size, PROT_READ | PROT_EXEC) != 0) { // Add PROT_EXEC.
LOG_INFO("mprotect fails; disabling dual mapping of code.\n");
FLAG_dual_map_code = false;
}
delete vm;
}
#endif // defined(DUAL_MAPPING_SUPPORTED)
#if defined(DART_HOST_OS_LINUX) || defined(DART_HOST_OS_ANDROID)
FILE* fp = fopen("/proc/sys/vm/max_map_count", "r");
if (fp != nullptr) {
@ -260,58 +219,6 @@ void VirtualMemory::Cleanup() {
#endif // defined(DART_COMPRESSED_POINTERS)
}
bool VirtualMemory::DualMappingEnabled() {
return FLAG_dual_map_code;
}
#if defined(DUAL_MAPPING_SUPPORTED)
// Do not leak file descriptors to child processes.
#if !defined(MFD_CLOEXEC)
#define MFD_CLOEXEC 0x0001U
#endif
// Wrapper to call memfd_create syscall.
static inline int memfd_create(const char* name, unsigned int flags) {
#if !defined(__NR_memfd_create)
errno = ENOSYS;
return -1;
#else
return syscall(__NR_memfd_create, name, flags);
#endif
}
static void* MapAligned(void* hint,
int fd,
int prot,
intptr_t size,
intptr_t alignment,
intptr_t allocated_size) {
ASSERT(size <= allocated_size);
void* address =
Map(hint, allocated_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (address == MAP_FAILED) {
return nullptr;
}
const uword base = reinterpret_cast<uword>(address);
const uword aligned_base = Utils::RoundUp(base, alignment);
// Guarantee the alignment by mapping at a fixed address inside the above
// mapping. Overlapping region will be automatically discarded in the above
// mapping. Manually discard non-overlapping regions.
address = Map(reinterpret_cast<void*>(aligned_base), size, prot,
MAP_SHARED | MAP_FIXED, fd, 0);
if (address == MAP_FAILED) {
Unmap(base, base + allocated_size);
return nullptr;
}
ASSERT(address == reinterpret_cast<void*>(aligned_base));
Unmap(base, aligned_base);
Unmap(aligned_base + size, base + allocated_size);
return address;
}
#endif // defined(DUAL_MAPPING_SUPPORTED)
VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
intptr_t alignment,
bool is_executable,
@ -320,9 +227,6 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
// When FLAG_write_protect_code is active, code memory (indicated by
// is_executable = true) is allocated as non-executable and later
// changed to executable via VirtualMemory::Protect.
//
// If FLAG_dual_map_code is active, the executable mapping will be mapped RX
// immediately and never changes protection until it is eventually unmapped.
ASSERT(Utils::IsAligned(size, PageSize()));
ASSERT(Utils::IsPowerOfTwo(alignment));
ASSERT(Utils::IsAligned(alignment, PageSize()));
@ -361,53 +265,6 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
#endif // defined(DART_COMPRESSED_POINTERS)
const intptr_t allocated_size = size + alignment - PageSize();
#if defined(DUAL_MAPPING_SUPPORTED)
const bool dual_mapping =
is_executable && FLAG_write_protect_code && FLAG_dual_map_code;
if (dual_mapping) {
int fd = memfd_create(name, MFD_CLOEXEC);
if (fd == -1) {
int error = errno;
if (error != ENOMEM) {
const int kBufferSize = 1024;
char error_buf[kBufferSize];
FATAL("memfd_create failed: %d (%s)", error,
Utils::StrError(error, error_buf, kBufferSize));
}
return nullptr;
}
if (ftruncate(fd, size) == -1) {
close(fd);
return nullptr;
}
const int region_prot = PROT_READ | PROT_WRITE;
void* region_ptr =
MapAligned(nullptr, fd, region_prot, size, alignment, allocated_size);
if (region_ptr == nullptr) {
close(fd);
return nullptr;
}
// The mapping will be RX and stays that way until it will eventually be
// unmapped.
MemoryRegion region(region_ptr, size);
// DUAL_MAPPING_SUPPORTED is false in DART_TARGET_OS_MACOS and hence support
// for MAP_JIT is not required here.
const int alias_prot = PROT_READ | PROT_EXEC;
void* hint = reinterpret_cast<void*>(&Dart_Initialize);
void* alias_ptr =
MapAligned(hint, fd, alias_prot, size, alignment, allocated_size);
close(fd);
if (alias_ptr == nullptr) {
const uword region_base = reinterpret_cast<uword>(region_ptr);
Unmap(region_base, region_base + size);
return nullptr;
}
ASSERT(region_ptr != alias_ptr);
MemoryRegion alias(alias_ptr, size);
return new VirtualMemory(region, alias, region);
}
#endif // defined(DUAL_MAPPING_SUPPORTED)
const int prot =
PROT_READ | PROT_WRITE |
((is_executable && !FLAG_write_protect_code) ? PROT_EXEC : 0);
@ -504,10 +361,6 @@ VirtualMemory::~VirtualMemory() {
#endif // defined(DART_COMPRESSED_POINTERS)
if (vm_owns_region()) {
Unmap(reserved_.start(), reserved_.end());
const intptr_t alias_offset = AliasOffset();
if (alias_offset != 0) {
Unmap(reserved_.start() + alias_offset, reserved_.end() + alias_offset);
}
}
}

View file

@ -92,10 +92,6 @@ void VirtualMemory::Cleanup() {
#endif // defined(DART_COMPRESSED_POINTERS)
}
bool VirtualMemory::DualMappingEnabled() {
return false;
}
VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
intptr_t alignment,
bool is_executable,

View file

@ -4,13 +4,9 @@
// Dart test program for testing dart:ffi async callbacks.
//
// VMOptions=--stacktrace-every=100
// VMOptions=--write-protect-code --no-dual-map-code
// VMOptions=--write-protect-code --no-dual-map-code --stacktrace-every=100
// VMOptions=
// VMOptions=--use-slow-path
// VMOptions=--use-slow-path --stacktrace-every=100
// VMOptions=--use-slow-path --write-protect-code --no-dual-map-code
// VMOptions=--use-slow-path --write-protect-code --no-dual-map-code --stacktrace-every=100
// VMOptions=--dwarf_stack_traces --no-retain_function_objects --no-retain_code_objects
// VMOptions=--test_il_serialization
// VMOptions=--profiler

View file

@ -4,13 +4,10 @@
//
// Dart test program for testing dart:ffi function pointers with callbacks.
//
// VMOptions=
// VMOptions=--stacktrace-every=100
// VMOptions=--write-protect-code --no-dual-map-code
// VMOptions=--write-protect-code --no-dual-map-code --stacktrace-every=100
// VMOptions=--use-slow-path
// VMOptions=--use-slow-path --stacktrace-every=100
// VMOptions=--use-slow-path --write-protect-code --no-dual-map-code
// VMOptions=--use-slow-path --write-protect-code --no-dual-map-code --stacktrace-every=100
// SharedObjects=ffi_test_functions
import 'dart:ffi';

View file

@ -4,13 +4,10 @@
// Dart test program for testing dart:ffi function pointers with callbacks.
//
// VMOptions=
// VMOptions=--stacktrace-every=100
// VMOptions=--write-protect-code --no-dual-map-code
// VMOptions=--write-protect-code --no-dual-map-code --stacktrace-every=100
// VMOptions=--use-slow-path
// VMOptions=--use-slow-path --stacktrace-every=100
// VMOptions=--use-slow-path --write-protect-code --no-dual-map-code
// VMOptions=--use-slow-path --write-protect-code --no-dual-map-code --stacktrace-every=100
// VMOptions=--dwarf_stack_traces --no-retain_function_objects --no-retain_code_objects
// VMOptions=--test_il_serialization
// VMOptions=--profiler

View file

@ -4,14 +4,11 @@
// Dart test program for testing dart:ffi function pointers with callbacks.
//
// VMOptions=
// VMOptions=--deterministic --optimization-counter-threshold=90
// VMOptions=--stacktrace-every=100
// VMOptions=--write-protect-code --no-dual-map-code
// VMOptions=--write-protect-code --no-dual-map-code --stacktrace-every=100
// VMOptions=--use-slow-path
// VMOptions=--use-slow-path --stacktrace-every=100
// VMOptions=--use-slow-path --write-protect-code --no-dual-map-code
// VMOptions=--use-slow-path --write-protect-code --no-dual-map-code --stacktrace-every=100
// VMOptions=--use-slow-path --stacktrace-every=100
// SharedObjects=ffi_test_functions
import 'dart:ffi';

View file

@ -8,9 +8,6 @@
// VMOptions=--deterministic --optimization-counter-threshold=10
// VMOptions=--use-slow-path
// VMOptions=--use-slow-path --stacktrace-every=100
// VMOptions=--write-protect-code --no-dual-map-code
// VMOptions=--write-protect-code --no-dual-map-code --use-slow-path
// VMOptions=--write-protect-code --no-dual-map-code --stacktrace-every=100
// SharedObjects=ffi_test_functions
import 'dart:ffi';

View file

@ -8,9 +8,6 @@
// VMOptions=--deterministic --optimization-counter-threshold=10
// VMOptions=--use-slow-path
// VMOptions=--use-slow-path --stacktrace-every=100
// VMOptions=--write-protect-code --no-dual-map-code
// VMOptions=--write-protect-code --no-dual-map-code --use-slow-path
// VMOptions=--write-protect-code --no-dual-map-code --stacktrace-every=100
// SharedObjects=ffi_test_functions
import 'dart:ffi';

View file

@ -8,9 +8,6 @@
// VMOptions=--deterministic --optimization-counter-threshold=10
// VMOptions=--use-slow-path
// VMOptions=--use-slow-path --stacktrace-every=100
// VMOptions=--write-protect-code --no-dual-map-code
// VMOptions=--write-protect-code --no-dual-map-code --use-slow-path
// VMOptions=--write-protect-code --no-dual-map-code --stacktrace-every=100
// SharedObjects=ffi_test_functions
import 'dart:ffi';

View file

@ -4,13 +4,10 @@
// Dart test program for testing dart:ffi async callbacks.
//
// VMOptions=
// VMOptions=--stacktrace-every=100
// VMOptions=--write-protect-code --no-dual-map-code
// VMOptions=--write-protect-code --no-dual-map-code --stacktrace-every=100
// VMOptions=--use-slow-path
// VMOptions=--use-slow-path --stacktrace-every=100
// VMOptions=--use-slow-path --write-protect-code --no-dual-map-code
// VMOptions=--use-slow-path --write-protect-code --no-dual-map-code --stacktrace-every=100
// VMOptions=--dwarf_stack_traces --no-retain_function_objects --no-retain_code_objects
// VMOptions=--test_il_serialization
// VMOptions=--profiler

View file

@ -5,13 +5,10 @@
//
// Dart test program for testing dart:ffi function pointers with callbacks.
//
// VMOptions=
// VMOptions=--stacktrace-every=100
// VMOptions=--write-protect-code --no-dual-map-code
// VMOptions=--write-protect-code --no-dual-map-code --stacktrace-every=100
// VMOptions=--use-slow-path
// VMOptions=--use-slow-path --stacktrace-every=100
// VMOptions=--use-slow-path --write-protect-code --no-dual-map-code
// VMOptions=--use-slow-path --write-protect-code --no-dual-map-code --stacktrace-every=100
// SharedObjects=ffi_test_functions
import 'dart:ffi';

View file

@ -2,12 +2,10 @@
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
//
// VMOptions=
// VMOptions=--stacktrace-every=100
// VMOptions=--deterministic --optimization-counter-threshold=500
// VMOptions=--deterministic --optimization-counter-threshold=-1
// VMOptions=--deterministic --optimization-counter-threshold=500 --no-dual-map-code --write-protect-code
// VMOptions=--deterministic --optimization-counter-threshold=-1 --no-dual-map-code --write-protect-code
// VMOptions=--no-dual-map-code --write-protect-code
// VMOptions=--no-dual-map-code --write-protect-code --stacktrace-every=100
//
// Dart test program for stress-testing boxing and GC in return paths from FFI
// trampolines.

View file

@ -8,9 +8,6 @@
// VMOptions=--deterministic --optimization-counter-threshold=10
// VMOptions=--use-slow-path
// VMOptions=--use-slow-path --stacktrace-every=100
// VMOptions=--write-protect-code --no-dual-map-code
// VMOptions=--write-protect-code --no-dual-map-code --use-slow-path
// VMOptions=--write-protect-code --no-dual-map-code --stacktrace-every=100
// SharedObjects=ffi_test_functions
import 'dart:ffi';