mirror of
https://github.com/dart-lang/sdk
synced 2024-11-02 08:20:31 +00:00
Reland "[VM runtime] Dual mapping of executable pages."
This is a reland of 44186dfdcd
Original change's description:
> [VM runtime] Dual mapping of executable pages.
>
> Change-Id: Iaad78d324e25462ce951f4df26974a6a368c50b7
> Reviewed-on: https://dart-review.googlesource.com/c/93377
> Commit-Queue: Régis Crelier <regis@google.com>
> Reviewed-by: Ryan Macnak <rmacnak@google.com>
Change-Id: I7a0caa078950637d9fe831732577fd2467061099
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/95263
Reviewed-by: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
parent
2b501b21aa
commit
6da340bf76
19 changed files with 412 additions and 71 deletions
|
@ -352,6 +352,14 @@ typedef simd128_value_t fpu_register_t;
|
|||
#error Unknown architecture.
|
||||
#endif
|
||||
|
||||
// Determine whether dual mapping of code pages is supported.
|
||||
#if !defined(USING_SIMULATOR) && \
|
||||
(defined(HOST_OS_LINUX) || defined(HOST_OS_FUCHSIA)) && \
|
||||
!defined(HOST_OS_ANDROID) && !defined(TARGET_OS_ANDROID) && \
|
||||
!defined(TARGET_ARCH_IA32)
|
||||
#define DUAL_MAPPING_SUPPORTED 1
|
||||
#endif
|
||||
|
||||
// Disable background threads by default on armv5te. The relevant
|
||||
// implementations are uniprocessors.
|
||||
#if !defined(TARGET_ARCH_ARM_5TE)
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
cc/AllocGeneric_Overflow: Crash, Fail # These tests are expected to crash on all platforms.
|
||||
cc/ArrayNew_Overflow_Crash: Crash, Fail # These tests are expected to crash on all platforms.
|
||||
cc/CodeExecutability: Crash, Fail # These tests are expected to crash on all platforms.
|
||||
cc/CodeImmutability: Crash, Fail # These tests are expected to crash on all platforms.
|
||||
cc/Dart2JSCompileAll: Fail, Crash # Issue 27369
|
||||
cc/Dart2JSCompilerStats: Fail, Crash # Issue 27369
|
||||
|
@ -28,6 +29,7 @@ dart/use_bare_instructions_flag_test: Pass, Slow # Spawns several subprocesses
|
|||
dart/appjit_cha_deopt_test: Pass, Slow # Quite slow in debug mode, uses --optimization-counter-threshold=100
|
||||
|
||||
[ $builder_tag == asan ]
|
||||
cc/CodeExecutability: Fail, OK # Address Sanitizer turns a crash into a failure.
|
||||
cc/CodeImmutability: Fail, OK # Address Sanitizer turns a crash into a failure.
|
||||
|
||||
[ $builder_tag == optimization_counter_threshold ]
|
||||
|
|
|
@ -12,6 +12,13 @@ namespace dart {
|
|||
|
||||
DEFINE_FLAG(bool, write_protect_code, true, "Write protect jitted code");
|
||||
|
||||
#if defined(DUAL_MAPPING_SUPPORTED)
|
||||
DEFINE_FLAG(bool, dual_map_code, true, "Dual map jitted code, RW and RX");
|
||||
#else
|
||||
DEFINE_FLAG(bool, dual_map_code, false, "Dual map jitted code, RW and RX");
|
||||
#endif // defined(DUAL_MAPPING_SUPPORTED)
|
||||
|
||||
#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_DBC)
|
||||
WritableInstructionsScope::WritableInstructionsScope(uword address,
|
||||
intptr_t size)
|
||||
: address_(address), size_(size) {
|
||||
|
@ -27,6 +34,7 @@ WritableInstructionsScope::~WritableInstructionsScope() {
|
|||
VirtualMemory::kReadExecute);
|
||||
}
|
||||
}
|
||||
#endif // defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_DBC)
|
||||
|
||||
bool MatchesPattern(uword end, const int16_t* pattern, intptr_t size) {
|
||||
// When breaking within generated code in GDB, it may overwrite individual
|
||||
|
|
|
@ -19,10 +19,12 @@ class RawCode;
|
|||
class RawFunction;
|
||||
class RawObject;
|
||||
|
||||
#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_DBC)
|
||||
// Stack-allocated class to create a scope where the specified region
|
||||
// [address, address + size] has write access enabled. This is used
|
||||
// when patching generated code. Access is reset to read-execute in
|
||||
// the destructor of this scope.
|
||||
// Dual mapping of instructions pages is not supported on these target arch.
|
||||
class WritableInstructionsScope : public ValueObject {
|
||||
public:
|
||||
WritableInstructionsScope(uword address, intptr_t size);
|
||||
|
@ -32,6 +34,7 @@ class WritableInstructionsScope : public ValueObject {
|
|||
const uword address_;
|
||||
const intptr_t size_;
|
||||
};
|
||||
#endif // defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_DBC)
|
||||
|
||||
class CodePatcher : public AllStatic {
|
||||
public:
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include "vm/compiler/relocation.h"
|
||||
|
||||
#include "vm/code_patcher.h"
|
||||
#include "vm/heap/pages.h"
|
||||
#include "vm/instructions.h"
|
||||
#include "vm/object_store.h"
|
||||
#include "vm/stub_code.h"
|
||||
|
@ -362,8 +363,11 @@ void CodeRelocator::ResolveCallToDestination(UnresolvedCall* unresolved_call,
|
|||
auto caller = Code::InstructionsOf(unresolved_call->caller);
|
||||
const int32_t distance = destination_text - call_text_offset;
|
||||
{
|
||||
PcRelativeCallPattern call(Instructions::PayloadStart(caller) +
|
||||
call_offset);
|
||||
uword addr = Instructions::PayloadStart(caller) + call_offset;
|
||||
if (FLAG_write_protect_code) {
|
||||
addr -= HeapPage::Of(caller)->AliasOffset();
|
||||
}
|
||||
PcRelativeCallPattern call(addr);
|
||||
ASSERT(call.IsValid());
|
||||
call.set_distance(static_cast<int32_t>(distance));
|
||||
ASSERT(call.distance() == distance);
|
||||
|
|
|
@ -368,6 +368,10 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
|
|||
}
|
||||
|
||||
static bool TryAcquireMarkBit(RawObject* raw_obj) {
|
||||
if (FLAG_write_protect_code && raw_obj->IsInstructions()) {
|
||||
// A non-writable alias mapping may exist for instruction pages.
|
||||
raw_obj = HeapPage::ToWritable(raw_obj);
|
||||
}
|
||||
if (!sync) {
|
||||
raw_obj->SetMarkBitUnsynchronized();
|
||||
return true;
|
||||
|
|
|
@ -57,11 +57,8 @@ DEFINE_FLAG(bool, log_growth, false, "Log PageSpace growth policy decisions.");
|
|||
HeapPage* HeapPage::Allocate(intptr_t size_in_words,
|
||||
PageType type,
|
||||
const char* name) {
|
||||
bool is_executable = (type == kExecutable);
|
||||
// Create the new page executable (RWX) only if we're not in W^X mode
|
||||
bool create_executable = !FLAG_write_protect_code && is_executable;
|
||||
VirtualMemory* memory = VirtualMemory::AllocateAligned(
|
||||
size_in_words << kWordSizeLog2, kPageSize, create_executable, name);
|
||||
size_in_words << kWordSizeLog2, kPageSize, type == kExecutable, name);
|
||||
if (memory == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -214,7 +211,7 @@ void HeapPage::WriteProtect(bool read_only) {
|
|||
|
||||
VirtualMemory::Protection prot;
|
||||
if (read_only) {
|
||||
if (type_ == kExecutable) {
|
||||
if ((type_ == kExecutable) && (memory_->AliasOffset() == 0)) {
|
||||
prot = VirtualMemory::kReadExecute;
|
||||
} else {
|
||||
prot = VirtualMemory::kReadOnly;
|
||||
|
|
|
@ -40,7 +40,8 @@ class HeapPage {
|
|||
HeapPage* next() const { return next_; }
|
||||
void set_next(HeapPage* next) { next_ = next; }
|
||||
|
||||
bool Contains(uword addr) { return memory_->Contains(addr); }
|
||||
bool Contains(uword addr) const { return memory_->Contains(addr); }
|
||||
intptr_t AliasOffset() const { return memory_->AliasOffset(); }
|
||||
|
||||
uword object_start() const { return memory_->start() + ObjectStartOffset(); }
|
||||
uword object_end() const { return object_end_; }
|
||||
|
@ -70,7 +71,8 @@ class HeapPage {
|
|||
}
|
||||
|
||||
// Warning: This does not work for objects on image pages because image pages
|
||||
// are not aligned.
|
||||
// are not aligned. However, it works for objects on large pages, because
|
||||
// only one object is allocated per large page.
|
||||
static HeapPage* Of(RawObject* obj) {
|
||||
ASSERT(obj->IsHeapObject());
|
||||
ASSERT(obj->IsOldObject());
|
||||
|
@ -78,10 +80,45 @@ class HeapPage {
|
|||
kPageMask);
|
||||
}
|
||||
|
||||
static HeapPage* Of(uintptr_t addr) {
|
||||
// Warning: This does not work for addresses on image pages or on large pages.
|
||||
static HeapPage* Of(uword addr) {
|
||||
return reinterpret_cast<HeapPage*>(addr & kPageMask);
|
||||
}
|
||||
|
||||
// Warning: This does not work for objects on image pages.
|
||||
static RawObject* ToExecutable(RawObject* obj) {
|
||||
HeapPage* page = Of(obj);
|
||||
VirtualMemory* memory = page->memory_;
|
||||
const intptr_t alias_offset = memory->AliasOffset();
|
||||
if (alias_offset == 0) {
|
||||
return obj; // Not aliased.
|
||||
}
|
||||
uword addr = RawObject::ToAddr(obj);
|
||||
if (memory->Contains(addr)) {
|
||||
return RawObject::FromAddr(addr + alias_offset);
|
||||
}
|
||||
// obj is executable.
|
||||
ASSERT(memory->ContainsAlias(addr));
|
||||
return obj;
|
||||
}
|
||||
|
||||
// Warning: This does not work for objects on image pages.
|
||||
static RawObject* ToWritable(RawObject* obj) {
|
||||
HeapPage* page = Of(obj);
|
||||
VirtualMemory* memory = page->memory_;
|
||||
const intptr_t alias_offset = memory->AliasOffset();
|
||||
if (alias_offset == 0) {
|
||||
return obj; // Not aliased.
|
||||
}
|
||||
uword addr = RawObject::ToAddr(obj);
|
||||
if (memory->ContainsAlias(addr)) {
|
||||
return RawObject::FromAddr(addr - alias_offset);
|
||||
}
|
||||
// obj is writable.
|
||||
ASSERT(memory->Contains(addr));
|
||||
return obj;
|
||||
}
|
||||
|
||||
// 1 card = 128 slots.
|
||||
static const intptr_t kSlotsPerCardLog2 = 7;
|
||||
static const intptr_t kBytesPerCardLog2 = kWordSizeLog2 + kSlotsPerCardLog2;
|
||||
|
|
|
@ -49,6 +49,10 @@ void VerifyPointersVisitor::VisitPointers(RawObject** first, RawObject** last) {
|
|||
RawObject* raw_obj = *current;
|
||||
if (raw_obj->IsHeapObject()) {
|
||||
if (!allocated_set_->Contains(raw_obj)) {
|
||||
if (raw_obj->IsInstructions() &&
|
||||
allocated_set_->Contains(HeapPage::ToWritable(raw_obj))) {
|
||||
continue;
|
||||
}
|
||||
uword raw_addr = RawObject::ToAddr(raw_obj);
|
||||
FATAL1("Invalid object pointer encountered %#" Px "\n", raw_addr);
|
||||
}
|
||||
|
|
|
@ -2015,7 +2015,12 @@ RawError* Object::Init(Isolate* isolate,
|
|||
bool Object::IsReadOnly() const {
|
||||
if (FLAG_verify_handles && raw()->IsReadOnly()) {
|
||||
Heap* vm_isolate_heap = Dart::vm_isolate()->heap();
|
||||
ASSERT(vm_isolate_heap->Contains(RawObject::ToAddr(raw())));
|
||||
uword addr = RawObject::ToAddr(raw());
|
||||
if (!vm_isolate_heap->Contains(addr)) {
|
||||
ASSERT(FLAG_write_protect_code);
|
||||
addr = RawObject::ToAddr(HeapPage::ToWritable(raw()));
|
||||
ASSERT(vm_isolate_heap->Contains(addr));
|
||||
}
|
||||
}
|
||||
return raw()->IsReadOnly();
|
||||
}
|
||||
|
@ -2072,8 +2077,12 @@ void Object::CheckHandle() const {
|
|||
Isolate* isolate = Isolate::Current();
|
||||
Heap* isolate_heap = isolate->heap();
|
||||
Heap* vm_isolate_heap = Dart::vm_isolate()->heap();
|
||||
ASSERT(isolate_heap->Contains(RawObject::ToAddr(raw_)) ||
|
||||
vm_isolate_heap->Contains(RawObject::ToAddr(raw_)));
|
||||
uword addr = RawObject::ToAddr(raw_);
|
||||
if (!isolate_heap->Contains(addr) && !vm_isolate_heap->Contains(addr)) {
|
||||
ASSERT(FLAG_write_protect_code);
|
||||
addr = RawObject::ToAddr(HeapPage::ToWritable(raw_));
|
||||
ASSERT(isolate_heap->Contains(addr) || vm_isolate_heap->Contains(addr));
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -14562,6 +14571,24 @@ RawCode* Code::FinalizeCode(FlowGraphCompiler* compiler,
|
|||
object->raw());
|
||||
}
|
||||
|
||||
// Write protect instructions and, if supported by OS, use dual mapping
|
||||
// for execution.
|
||||
if (FLAG_write_protect_code) {
|
||||
uword address = RawObject::ToAddr(instrs.raw());
|
||||
// Check if a dual mapping exists.
|
||||
instrs = Instructions::RawCast(HeapPage::ToExecutable(instrs.raw()));
|
||||
uword exec_address = RawObject::ToAddr(instrs.raw());
|
||||
if (exec_address != address) {
|
||||
VirtualMemory::Protect(reinterpret_cast<void*>(address),
|
||||
instrs.raw()->HeapSize(),
|
||||
VirtualMemory::kReadOnly);
|
||||
address = exec_address;
|
||||
}
|
||||
VirtualMemory::Protect(reinterpret_cast<void*>(address),
|
||||
instrs.raw()->HeapSize(),
|
||||
VirtualMemory::kReadExecute);
|
||||
}
|
||||
|
||||
// Hook up Code and Instructions objects.
|
||||
code.SetActiveInstructions(instrs);
|
||||
code.set_instructions(instrs);
|
||||
|
@ -14572,13 +14599,6 @@ RawCode* Code::FinalizeCode(FlowGraphCompiler* compiler,
|
|||
code.set_object_pool(object_pool->raw());
|
||||
}
|
||||
|
||||
if (FLAG_write_protect_code) {
|
||||
uword address = RawObject::ToAddr(instrs.raw());
|
||||
VirtualMemory::Protect(reinterpret_cast<void*>(address),
|
||||
instrs.raw()->HeapSize(),
|
||||
VirtualMemory::kReadExecute);
|
||||
}
|
||||
|
||||
#if defined(DART_PRECOMPILER)
|
||||
if (stats != nullptr) {
|
||||
stats->Finalize();
|
||||
|
|
|
@ -4651,6 +4651,7 @@ class ExceptionHandlers : public Object {
|
|||
|
||||
class Code : public Object {
|
||||
public:
|
||||
// When dual mapping, this returns the executable view.
|
||||
RawInstructions* active_instructions() const {
|
||||
#if defined(DART_PRECOMPILED_RUNTIME)
|
||||
UNREACHABLE();
|
||||
|
@ -4660,6 +4661,7 @@ class Code : public Object {
|
|||
#endif
|
||||
}
|
||||
|
||||
// When dual mapping, these return the executable view.
|
||||
RawInstructions* instructions() const { return raw_ptr()->instructions_; }
|
||||
static RawInstructions* InstructionsOf(const RawCode* code) {
|
||||
return code->ptr()->instructions_;
|
||||
|
@ -5140,6 +5142,7 @@ class Code : public Object {
|
|||
|
||||
FINAL_HEAP_OBJECT_IMPLEMENTATION(Code, Object);
|
||||
friend class Class;
|
||||
friend class CodeTestHelper;
|
||||
friend class SnapshotWriter;
|
||||
friend class StubCode; // for set_object_pool
|
||||
friend class Precompiler; // for set_object_pool
|
||||
|
@ -9140,8 +9143,12 @@ DART_FORCE_INLINE void Object::SetRaw(RawObject* value) {
|
|||
Isolate* isolate = Isolate::Current();
|
||||
Heap* isolate_heap = isolate->heap();
|
||||
Heap* vm_isolate_heap = Dart::vm_isolate()->heap();
|
||||
ASSERT(isolate_heap->Contains(RawObject::ToAddr(raw_)) ||
|
||||
vm_isolate_heap->Contains(RawObject::ToAddr(raw_)));
|
||||
uword addr = RawObject::ToAddr(raw_);
|
||||
if (!isolate_heap->Contains(addr) && !vm_isolate_heap->Contains(addr)) {
|
||||
ASSERT(FLAG_write_protect_code);
|
||||
addr = RawObject::ToAddr(HeapPage::ToWritable(raw_));
|
||||
ASSERT(isolate_heap->Contains(addr) || vm_isolate_heap->Contains(addr));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -44,7 +44,12 @@ class ObjectGraph::Stack : public ObjectPointerVisitor {
|
|||
if (!include_vm_objects_ && !IsUserClass((*current)->GetClassId())) {
|
||||
continue;
|
||||
}
|
||||
(*current)->SetGraphMarked();
|
||||
if (FLAG_write_protect_code && (*current)->IsInstructions()) {
|
||||
// A non-writable alias mapping may exist for instruction pages.
|
||||
HeapPage::ToWritable(*current)->SetGraphMarked();
|
||||
} else {
|
||||
(*current)->SetGraphMarked();
|
||||
}
|
||||
Node node;
|
||||
node.ptr = current;
|
||||
node.obj = *current;
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
namespace dart {
|
||||
|
||||
DECLARE_FLAG(bool, dual_map_code);
|
||||
DECLARE_FLAG(bool, write_protect_code);
|
||||
|
||||
static RawClass* CreateDummyClass(const String& class_name,
|
||||
|
@ -2507,8 +2508,56 @@ ISOLATE_UNIT_TEST_CASE(CodeImmutability) {
|
|||
if (!FLAG_write_protect_code) {
|
||||
// Since this test is expected to crash, crash if write protection of code
|
||||
// is switched off.
|
||||
// TODO(regis, fschneider): Should this be FATAL() instead?
|
||||
OS::DebugBreak();
|
||||
FATAL("Test requires --write-protect-code; skip by forcing expected crash");
|
||||
}
|
||||
MallocHooks::set_stack_trace_collection_enabled(
|
||||
stack_trace_collection_enabled);
|
||||
}
|
||||
|
||||
class CodeTestHelper {
|
||||
public:
|
||||
static void SetInstructions(const Code& code,
|
||||
const Instructions& instructions) {
|
||||
code.SetActiveInstructions(instructions);
|
||||
code.set_instructions(instructions);
|
||||
}
|
||||
};
|
||||
|
||||
// Test for executability of generated instructions. The test crashes with a
|
||||
// segmentation fault when executing the writeable view.
|
||||
ISOLATE_UNIT_TEST_CASE(CodeExecutability) {
|
||||
bool stack_trace_collection_enabled =
|
||||
MallocHooks::stack_trace_collection_enabled();
|
||||
MallocHooks::set_stack_trace_collection_enabled(false);
|
||||
extern void GenerateIncrement(Assembler * assembler);
|
||||
ObjectPoolBuilder object_pool_builder;
|
||||
Assembler _assembler_(&object_pool_builder);
|
||||
GenerateIncrement(&_assembler_);
|
||||
const Function& function = Function::Handle(CreateFunction("Test_Code"));
|
||||
Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
|
||||
function, nullptr, &_assembler_, Code::PoolAttachment::kAttachPool));
|
||||
function.AttachCode(code);
|
||||
Instructions& instructions = Instructions::Handle(code.instructions());
|
||||
uword payload_start = instructions.PayloadStart();
|
||||
EXPECT_EQ(instructions.raw(), Instructions::FromPayloadStart(payload_start));
|
||||
// Execute the executable view of the instructions (default).
|
||||
Object& result =
|
||||
Object::Handle(DartEntry::InvokeFunction(function, Array::empty_array()));
|
||||
EXPECT_EQ(1, Smi::Cast(result).Value());
|
||||
// Switch to the writeable but non-executable view of the instructions.
|
||||
instructions ^= HeapPage::ToWritable(instructions.raw());
|
||||
payload_start = instructions.PayloadStart();
|
||||
EXPECT_EQ(instructions.raw(), Instructions::FromPayloadStart(payload_start));
|
||||
// Hook up Code and Instructions objects.
|
||||
CodeTestHelper::SetInstructions(code, instructions);
|
||||
function.AttachCode(code);
|
||||
// Try executing the generated code, expected to crash.
|
||||
result = DartEntry::InvokeFunction(function, Array::empty_array());
|
||||
EXPECT_EQ(1, Smi::Cast(result).Value());
|
||||
if (!FLAG_dual_map_code) {
|
||||
// Since this test is expected to crash, crash if dual mapping of code
|
||||
// is switched off.
|
||||
FATAL("Test requires --dual-map-code; skip by forcing expected crash");
|
||||
}
|
||||
MallocHooks::set_stack_trace_collection_enabled(
|
||||
stack_trace_collection_enabled);
|
||||
|
|
|
@ -558,9 +558,7 @@ void OS::SleepMicros(int64_t micros) {
|
|||
}
|
||||
}
|
||||
|
||||
// TODO(regis, iposva): When this function is no longer called from the
|
||||
// CodeImmutability test in object_test.cc, it will be called only from the
|
||||
// simulator, which means that only the Intel implementation is needed.
|
||||
// TODO(regis): Function called only from the simulator.
|
||||
void OS::DebugBreak() {
|
||||
__builtin_trap();
|
||||
}
|
||||
|
|
|
@ -17,12 +17,18 @@ bool VirtualMemory::InSamePage(uword address0, uword address1) {
|
|||
void VirtualMemory::Truncate(intptr_t new_size) {
|
||||
ASSERT(Utils::IsAligned(new_size, PageSize()));
|
||||
ASSERT(new_size <= size());
|
||||
if (reserved_.size() == region_.size()) { // Don't create holes in reservation.
|
||||
if (reserved_.size() ==
|
||||
region_.size()) { // Don't create holes in reservation.
|
||||
FreeSubSegment(reinterpret_cast<void*>(start() + new_size),
|
||||
size() - new_size);
|
||||
reserved_.set_size(new_size);
|
||||
if (AliasOffset() != 0) {
|
||||
FreeSubSegment(reinterpret_cast<void*>(alias_.start() + new_size),
|
||||
alias_.size() - new_size);
|
||||
}
|
||||
}
|
||||
region_.Subregion(region_, 0, new_size);
|
||||
alias_.Subregion(alias_, 0, new_size);
|
||||
}
|
||||
|
||||
VirtualMemory* VirtualMemory::ForImagePage(void* pointer, uword size) {
|
||||
|
@ -31,7 +37,7 @@ VirtualMemory* VirtualMemory::ForImagePage(void* pointer, uword size) {
|
|||
MemoryRegion region(pointer, size);
|
||||
MemoryRegion reserved(0, 0); // NULL reservation indicates VM should not
|
||||
// attempt to free this memory.
|
||||
VirtualMemory* memory = new VirtualMemory(region, reserved);
|
||||
VirtualMemory* memory = new VirtualMemory(region, region, reserved);
|
||||
ASSERT(!memory->vm_owns_region());
|
||||
return memory;
|
||||
}
|
||||
|
|
|
@ -28,10 +28,14 @@ class VirtualMemory {
|
|||
uword end() const { return region_.end(); }
|
||||
void* address() const { return region_.pointer(); }
|
||||
intptr_t size() const { return region_.size(); }
|
||||
intptr_t AliasOffset() const { return alias_.start() - region_.start(); }
|
||||
|
||||
static void Init();
|
||||
|
||||
bool Contains(uword addr) const { return region_.Contains(addr); }
|
||||
bool ContainsAlias(uword addr) const {
|
||||
return (AliasOffset() != 0) && alias_.Contains(addr);
|
||||
}
|
||||
|
||||
// Changes the protection of the virtual memory area.
|
||||
static void Protect(void* address, intptr_t size, Protection mode);
|
||||
|
@ -72,14 +76,22 @@ class VirtualMemory {
|
|||
// can give back the virtual memory to the system. Returns true on success.
|
||||
static void FreeSubSegment(void* address, intptr_t size);
|
||||
|
||||
// This constructor is only used internally when reserving new virtual spaces.
|
||||
// It does not reserve any virtual address space on its own.
|
||||
// These constructors are only used internally when reserving new virtual
|
||||
// spaces. They do not reserve any virtual address space on their own.
|
||||
VirtualMemory(const MemoryRegion& region,
|
||||
const MemoryRegion& alias,
|
||||
const MemoryRegion& reserved)
|
||||
: region_(region), reserved_(reserved) {}
|
||||
: region_(region), alias_(alias), reserved_(reserved) {}
|
||||
|
||||
VirtualMemory(const MemoryRegion& region, const MemoryRegion& reserved)
|
||||
: region_(region), alias_(region), reserved_(reserved) {}
|
||||
|
||||
MemoryRegion region_;
|
||||
|
||||
// Optional secondary mapping of region_ to a virtual space with different
|
||||
// protection, e.g. allowing code execution.
|
||||
MemoryRegion alias_;
|
||||
|
||||
// The underlying reservation not yet given back to the OS.
|
||||
// Its address might disagree with region_ due to aligned allocations.
|
||||
// Its size might disagree with region_ due to Truncate.
|
||||
|
@ -87,6 +99,10 @@ class VirtualMemory {
|
|||
|
||||
static uword page_size_;
|
||||
|
||||
#if defined(HOST_OS_FUCHSIA)
|
||||
static uword base_; // Cached base of root vmar.
|
||||
#endif
|
||||
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(VirtualMemory);
|
||||
};
|
||||
|
||||
|
|
|
@ -35,15 +35,29 @@
|
|||
|
||||
namespace dart {
|
||||
|
||||
DECLARE_FLAG(bool, dual_map_code);
|
||||
DECLARE_FLAG(bool, write_protect_code);
|
||||
|
||||
uword VirtualMemory::page_size_ = 0;
|
||||
uword VirtualMemory::base_ = 0;
|
||||
|
||||
void VirtualMemory::Init() {
|
||||
page_size_ = getpagesize();
|
||||
|
||||
// Cache the base of zx_vmar_root_self() which is used to align mappings.
|
||||
zx_info_vmar_t buf[1];
|
||||
size_t actual;
|
||||
size_t avail;
|
||||
zx_status_t status =
|
||||
zx_object_get_info(zx_vmar_root_self(), ZX_INFO_VMAR, buf,
|
||||
sizeof(zx_info_vmar_t), &actual, &avail);
|
||||
if (status != ZX_OK) {
|
||||
FATAL1("zx_object_get_info failed: %s\n", zx_status_get_string(status));
|
||||
}
|
||||
base_ = buf[0].base;
|
||||
}
|
||||
|
||||
static void unmap(zx_handle_t vmar, uword start, uword end) {
|
||||
static void Unmap(zx_handle_t vmar, uword start, uword end) {
|
||||
ASSERT(start <= end);
|
||||
const uword size = end - start;
|
||||
if (size == 0) {
|
||||
|
@ -56,28 +70,61 @@ static void unmap(zx_handle_t vmar, uword start, uword end) {
|
|||
}
|
||||
}
|
||||
|
||||
static void* MapAligned(zx_handle_t vmar,
|
||||
zx_handle_t vmo,
|
||||
zx_vm_option_t options,
|
||||
uword size,
|
||||
uword alignment,
|
||||
uword vmar_base,
|
||||
uword padded_size) {
|
||||
uword base;
|
||||
zx_status_t status =
|
||||
zx_vmar_map(vmar, options, 0, vmo, 0u, padded_size, &base);
|
||||
LOG_INFO("zx_vmar_map(%u, 0x%lx, 0x%lx)\n", options, base, padded_size);
|
||||
|
||||
if (status != ZX_OK) {
|
||||
LOG_ERR("zx_vmar_map(%u, 0x%lx, 0x%lx) failed: %s\n", options, base,
|
||||
padded_size, zx_status_get_string(status));
|
||||
return NULL;
|
||||
}
|
||||
const uword aligned_base = Utils::RoundUp(base, alignment);
|
||||
const zx_vm_option_t overwrite_options = options | ZX_VM_SPECIFIC_OVERWRITE;
|
||||
status = zx_vmar_map(vmar, overwrite_options, aligned_base - vmar_base, vmo,
|
||||
0u, size, &base);
|
||||
LOG_INFO("zx_vmar_map(%u, 0x%lx, 0x%lx)\n", overwrite_options,
|
||||
aligned_base - vmar_base, size);
|
||||
|
||||
if (status != ZX_OK) {
|
||||
LOG_ERR("zx_vmar_map(%u, 0x%lx, 0x%lx) failed: %s\n", overwrite_options,
|
||||
aligned_base - vmar_base, size, zx_status_get_string(status));
|
||||
return NULL;
|
||||
}
|
||||
ASSERT(base == aligned_base);
|
||||
return reinterpret_cast<void*>(base);
|
||||
}
|
||||
|
||||
VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
|
||||
intptr_t alignment,
|
||||
bool is_executable,
|
||||
const char* name) {
|
||||
// When FLAG_write_protect_code is active, the VM allocates code
|
||||
// memory with !is_executable, and later changes to executable via
|
||||
// VirtualMemory::Protect, which requires ZX_RIGHT_EXECUTE on the
|
||||
// underlying VMO. Conservatively assume all memory needs to be
|
||||
// executable in this mode.
|
||||
// TODO(mdempsky): Make into parameter.
|
||||
const bool can_prot_exec = FLAG_write_protect_code;
|
||||
// When FLAG_write_protect_code is active, code memory (indicated by
|
||||
// is_executable = true) is allocated as non-executable and later
|
||||
// changed to executable via VirtualMemory::Protect, which requires
|
||||
// ZX_RIGHT_EXECUTE on the underlying VMO.
|
||||
// In addition, dual mapping of the same underlying code memory is provided.
|
||||
const bool dual_mapping =
|
||||
is_executable && FLAG_write_protect_code && FLAG_dual_map_code;
|
||||
|
||||
ASSERT(Utils::IsAligned(size, page_size_));
|
||||
ASSERT(Utils::IsPowerOfTwo(alignment));
|
||||
ASSERT(Utils::IsAligned(alignment, page_size_));
|
||||
const intptr_t allocated_size = size + alignment - page_size_;
|
||||
const intptr_t padded_size = size + alignment - page_size_;
|
||||
|
||||
zx_handle_t vmar = zx_vmar_root_self();
|
||||
zx_handle_t vmo = ZX_HANDLE_INVALID;
|
||||
zx_status_t status = zx_vmo_create(allocated_size, 0u, &vmo);
|
||||
zx_status_t status = zx_vmo_create(size, 0u, &vmo);
|
||||
if (status != ZX_OK) {
|
||||
LOG_ERR("zx_vmo_create(%ld) failed: %s\n", size,
|
||||
LOG_ERR("zx_vmo_create(0x%lx) failed: %s\n", size,
|
||||
zx_status_get_string(status));
|
||||
return NULL;
|
||||
}
|
||||
|
@ -86,9 +133,9 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
|
|||
zx_object_set_property(vmo, ZX_PROP_NAME, name, strlen(name));
|
||||
}
|
||||
|
||||
if (is_executable || can_prot_exec) {
|
||||
if (is_executable) {
|
||||
// Add ZX_RIGHT_EXECUTE permission to VMO, so it can be mapped
|
||||
// into memory as executable.
|
||||
// into memory as executable (now or later).
|
||||
status = zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo);
|
||||
if (status != ZX_OK) {
|
||||
LOG_ERR("zx_vmo_replace_as_executable() failed: %s\n",
|
||||
|
@ -97,39 +144,59 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
|
|||
}
|
||||
}
|
||||
|
||||
const zx_vm_option_t options = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
|
||||
(is_executable ? ZX_VM_PERM_EXECUTE : 0);
|
||||
uword base;
|
||||
status = zx_vmar_map(vmar, options, 0u, vmo, 0u, allocated_size, &base);
|
||||
zx_handle_close(vmo);
|
||||
if (status != ZX_OK) {
|
||||
LOG_ERR("zx_vmar_map(%u, %ld) failed: %s\n", flags, size,
|
||||
zx_status_get_string(status));
|
||||
const zx_vm_option_t region_options =
|
||||
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
|
||||
((is_executable && !FLAG_write_protect_code) ? ZX_VM_PERM_EXECUTE : 0);
|
||||
void* region_ptr = MapAligned(vmar, vmo, region_options, size, alignment,
|
||||
base_, padded_size);
|
||||
if (region_ptr == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
MemoryRegion region(region_ptr, size);
|
||||
|
||||
const uword aligned_base = Utils::RoundUp(base, alignment);
|
||||
VirtualMemory* result;
|
||||
|
||||
unmap(vmar, base, aligned_base);
|
||||
unmap(vmar, aligned_base + size, base + allocated_size);
|
||||
|
||||
MemoryRegion region(reinterpret_cast<void*>(aligned_base), size);
|
||||
return new VirtualMemory(region, region);
|
||||
if (dual_mapping) {
|
||||
// ZX_VM_PERM_EXECUTE is added later via VirtualMemory::Protect.
|
||||
const zx_vm_option_t alias_options = ZX_VM_PERM_READ;
|
||||
void* alias_ptr = MapAligned(vmar, vmo, alias_options, size, alignment,
|
||||
base_, padded_size);
|
||||
if (alias_ptr == NULL) {
|
||||
const uword region_base = reinterpret_cast<uword>(region_ptr);
|
||||
Unmap(vmar, region_base, region_base + size);
|
||||
return NULL;
|
||||
}
|
||||
ASSERT(region_ptr != alias_ptr);
|
||||
MemoryRegion alias(alias_ptr, size);
|
||||
result = new VirtualMemory(region, alias, region);
|
||||
} else {
|
||||
result = new VirtualMemory(region, region, region);
|
||||
}
|
||||
zx_handle_close(vmo);
|
||||
return result;
|
||||
}
|
||||
|
||||
VirtualMemory::~VirtualMemory() {
|
||||
// Reserved region may be empty due to VirtualMemory::Truncate.
|
||||
if (vm_owns_region() && reserved_.size() != 0) {
|
||||
unmap(zx_vmar_root_self(), reserved_.start(), reserved_.end());
|
||||
LOG_INFO("zx_vmar_unmap(%lx, %lx) success\n", reserved_.start(),
|
||||
Unmap(zx_vmar_root_self(), reserved_.start(), reserved_.end());
|
||||
LOG_INFO("zx_vmar_unmap(0x%lx, 0x%lx) success\n", reserved_.start(),
|
||||
reserved_.size());
|
||||
|
||||
const intptr_t alias_offset = AliasOffset();
|
||||
if (alias_offset != 0) {
|
||||
Unmap(zx_vmar_root_self(), reserved_.start() + alias_offset,
|
||||
reserved_.end() + alias_offset);
|
||||
LOG_INFO("zx_vmar_unmap(0x%lx, 0x%lx) success\n",
|
||||
reserved_.start() + alias_offset, reserved_.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualMemory::FreeSubSegment(void* address, intptr_t size) {
|
||||
const uword start = reinterpret_cast<uword>(address);
|
||||
unmap(zx_vmar_root_self(), start, start + size);
|
||||
LOG_INFO("zx_vmar_unmap(%p, %lx) success\n", address, size);
|
||||
Unmap(zx_vmar_root_self(), start, start + size);
|
||||
LOG_INFO("zx_vmar_unmap(0x%p, 0x%lx) success\n", address, size);
|
||||
}
|
||||
|
||||
void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {
|
||||
|
@ -161,12 +228,12 @@ void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {
|
|||
}
|
||||
zx_status_t status = zx_vmar_protect(zx_vmar_root_self(), prot, page_address,
|
||||
end_address - page_address);
|
||||
LOG_INFO("zx_vmar_protect(%u, 0x%lx, 0x%lx)\n", prot, page_address,
|
||||
end_address - page_address);
|
||||
if (status != ZX_OK) {
|
||||
FATAL3("zx_vmar_protect(%lx, %lx) failed: %s\n", page_address,
|
||||
FATAL3("zx_vmar_protect(0x%lx, 0x%lx) failed: %s\n", page_address,
|
||||
end_address - page_address, zx_status_get_string(status));
|
||||
}
|
||||
LOG_INFO("zx_vmar_protect(%lx, %lx, %x) success\n", page_address,
|
||||
end_address - page_address, prot);
|
||||
}
|
||||
|
||||
} // namespace dart
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include <errno.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "platform/assert.h"
|
||||
|
@ -16,6 +17,13 @@
|
|||
|
||||
#include "vm/isolate.h"
|
||||
|
||||
// #define VIRTUAL_MEMORY_LOGGING 1
|
||||
#if defined(VIRTUAL_MEMORY_LOGGING)
|
||||
#define LOG_INFO(msg, ...) OS::PrintErr(msg, ##__VA_ARGS__)
|
||||
#else
|
||||
#define LOG_INFO(msg, ...)
|
||||
#endif // defined(VIRTUAL_MEMORY_LOGGING)
|
||||
|
||||
namespace dart {
|
||||
|
||||
// standard MAP_FAILED causes "error: use of old-style cast" as it
|
||||
|
@ -23,6 +31,9 @@ namespace dart {
|
|||
#undef MAP_FAILED
|
||||
#define MAP_FAILED reinterpret_cast<void*>(-1)
|
||||
|
||||
DECLARE_FLAG(bool, dual_map_code);
|
||||
DECLARE_FLAG(bool, write_protect_code);
|
||||
|
||||
uword VirtualMemory::page_size_ = 0;
|
||||
|
||||
void VirtualMemory::Init() {
|
||||
|
@ -45,17 +56,97 @@ static void unmap(uword start, uword end) {
|
|||
}
|
||||
}
|
||||
|
||||
#if defined(DUAL_MAPPING_SUPPORTED)
|
||||
// Wrapper to call memfd_create syscall.
|
||||
static inline int memfd_create(const char* name, unsigned int flags) {
|
||||
return syscall(__NR_memfd_create, name, flags);
|
||||
}
|
||||
|
||||
static void* MapAligned(int fd,
|
||||
int prot,
|
||||
intptr_t size,
|
||||
intptr_t alignment,
|
||||
intptr_t allocated_size) {
|
||||
void* address =
|
||||
mmap(NULL, allocated_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
LOG_INFO("mmap(NULL, 0x%" Px ", PROT_NONE, ...): %p\n", allocated_size,
|
||||
address);
|
||||
if (address == MAP_FAILED) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const uword base = reinterpret_cast<uword>(address);
|
||||
const uword aligned_base = Utils::RoundUp(base, alignment);
|
||||
|
||||
// Guarantee the alignment by mapping at a fixed address inside the above
|
||||
// mapping. Overlapping region will be automatically discarded in the above
|
||||
// mapping. Manually discard non-overlapping regions.
|
||||
address = mmap(reinterpret_cast<void*>(aligned_base), size, prot,
|
||||
MAP_SHARED | MAP_FIXED, fd, 0);
|
||||
LOG_INFO("mmap(0x%" Px ", 0x%" Px ", %u, ...): %p\n", aligned_base, size,
|
||||
prot, address);
|
||||
if (address == MAP_FAILED) {
|
||||
unmap(base, base + allocated_size);
|
||||
return NULL;
|
||||
}
|
||||
ASSERT(address == reinterpret_cast<void*>(aligned_base));
|
||||
unmap(base, aligned_base);
|
||||
unmap(aligned_base + size, base + allocated_size);
|
||||
return address;
|
||||
}
|
||||
#endif // defined(DUAL_MAPPING_SUPPORTED)
|
||||
|
||||
VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
|
||||
intptr_t alignment,
|
||||
bool is_executable,
|
||||
const char* name) {
|
||||
// When FLAG_write_protect_code is active, code memory (indicated by
|
||||
// is_executable = true) is allocated as non-executable and later
|
||||
// changed to executable via VirtualMemory::Protect.
|
||||
ASSERT(Utils::IsAligned(size, page_size_));
|
||||
ASSERT(Utils::IsPowerOfTwo(alignment));
|
||||
ASSERT(Utils::IsAligned(alignment, page_size_));
|
||||
const intptr_t allocated_size = size + alignment - page_size_;
|
||||
const int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
|
||||
#if defined(DUAL_MAPPING_SUPPORTED)
|
||||
int fd = -1;
|
||||
const bool dual_mapping =
|
||||
is_executable && FLAG_write_protect_code && FLAG_dual_map_code;
|
||||
if (dual_mapping) {
|
||||
fd = memfd_create("/dual_mapping", 0);
|
||||
if ((fd == -1) || (ftruncate(fd, size) == -1)) {
|
||||
close(fd);
|
||||
return NULL;
|
||||
}
|
||||
const int region_prot = PROT_READ | PROT_WRITE;
|
||||
void* region_ptr =
|
||||
MapAligned(fd, region_prot, size, alignment, allocated_size);
|
||||
if (region_ptr == NULL) {
|
||||
close(fd);
|
||||
return NULL;
|
||||
}
|
||||
MemoryRegion region(region_ptr, size);
|
||||
// PROT_EXEC is added later via VirtualMemory::Protect.
|
||||
const int alias_prot = PROT_READ;
|
||||
void* alias_ptr =
|
||||
MapAligned(fd, alias_prot, size, alignment, allocated_size);
|
||||
close(fd);
|
||||
if (alias_ptr == NULL) {
|
||||
const uword region_base = reinterpret_cast<uword>(region_ptr);
|
||||
unmap(region_base, region_base + size);
|
||||
return NULL;
|
||||
}
|
||||
ASSERT(region_ptr != alias_ptr);
|
||||
MemoryRegion alias(alias_ptr, size);
|
||||
return new VirtualMemory(region, alias, region);
|
||||
}
|
||||
#endif // defined(DUAL_MAPPING_SUPPORTED)
|
||||
const int prot =
|
||||
PROT_READ | PROT_WRITE |
|
||||
((is_executable && !FLAG_write_protect_code) ? PROT_EXEC : 0);
|
||||
void* address =
|
||||
mmap(NULL, allocated_size, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
mmap(NULL, allocated_size, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
LOG_INFO("mmap(NULL, 0x%" Px ", %u, ...): %p\n", allocated_size, prot,
|
||||
address);
|
||||
if (address == MAP_FAILED) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -73,6 +164,10 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
|
|||
VirtualMemory::~VirtualMemory() {
|
||||
if (vm_owns_region()) {
|
||||
unmap(reserved_.start(), reserved_.end());
|
||||
const intptr_t alias_offset = AliasOffset();
|
||||
if (alias_offset != 0) {
|
||||
unmap(reserved_.start() + alias_offset, reserved_.end() + alias_offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -114,11 +209,15 @@ void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {
|
|||
int error = errno;
|
||||
const int kBufferSize = 1024;
|
||||
char error_buf[kBufferSize];
|
||||
LOG_INFO("mprotect(0x%" Px ", 0x%" Px ", %u) failed\n", page_address,
|
||||
end_address - page_address, prot);
|
||||
FATAL2("mprotect error: %d (%s)", error,
|
||||
Utils::StrError(error, error_buf, kBufferSize));
|
||||
}
|
||||
LOG_INFO("mprotect(0x%" Px ", 0x%" Px ", %u) ok\n", page_address,
|
||||
end_address - page_address, prot);
|
||||
}
|
||||
|
||||
} // namespace dart
|
||||
|
||||
#endif // defined(HOST_OS_ANDROID) || defined(HOST_OS_LINUX) || defined(HOST_OS_MACOS)
|
||||
#endif // defined(HOST_OS_ANDROID ... HOST_OS_LINUX ... HOST_OS_MACOS)
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
|
||||
namespace dart {
|
||||
|
||||
DECLARE_FLAG(bool, write_protect_code);
|
||||
|
||||
uword VirtualMemory::page_size_ = 0;
|
||||
|
||||
void VirtualMemory::Init() {
|
||||
|
@ -26,11 +28,16 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
|
|||
intptr_t alignment,
|
||||
bool is_executable,
|
||||
const char* name) {
|
||||
// When FLAG_write_protect_code is active, code memory (indicated by
|
||||
// is_executable = true) is allocated as non-executable and later
|
||||
// changed to executable via VirtualMemory::Protect.
|
||||
ASSERT(Utils::IsAligned(size, page_size_));
|
||||
ASSERT(Utils::IsPowerOfTwo(alignment));
|
||||
ASSERT(Utils::IsAligned(alignment, page_size_));
|
||||
intptr_t reserved_size = size + alignment - page_size_;
|
||||
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
|
||||
int prot = (is_executable && !FLAG_write_protect_code)
|
||||
? PAGE_EXECUTE_READWRITE
|
||||
: PAGE_READWRITE;
|
||||
void* address = VirtualAlloc(NULL, reserved_size, MEM_RESERVE, prot);
|
||||
if (address == NULL) {
|
||||
return NULL;
|
||||
|
|
Loading…
Reference in a new issue