Allocate from the root VMAR on Fuchsia.

Removes the need for the VmarList.

Overallocate VMOs instead of VMARs to get aligned memory.

Change-Id: I0c2c85e952b8c6958e28ac734b5ba54c7712a512
Reviewed-on: https://dart-review.googlesource.com/30383
Reviewed-by: Zach Anderson <zra@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Ryan Macnak 2017-12-19 21:47:44 +00:00 committed by commit-bot@chromium.org
parent d528e1651f
commit a144e263ce
7 changed files with 73 additions and 183 deletions

View file

@ -20,7 +20,7 @@ void VirtualMemory::Truncate(intptr_t new_size, bool try_unmap) {
if (try_unmap &&
(reserved_.size() ==
region_.size()) && /* Don't create holes in reservation. */
FreeSubSegment(handle(), reinterpret_cast<void*>(start() + new_size),
FreeSubSegment(reinterpret_cast<void*>(start() + new_size),
size() - new_size)) {
reserved_.set_size(new_size);
}

View file

@ -24,7 +24,6 @@ class VirtualMemory {
// The reserved memory is unmapped on destruction.
~VirtualMemory();
int32_t handle() const { return handle_; }
uword start() const { return region_.start(); }
uword end() const { return region_.end(); }
void* address() const { return region_.pointer(); }
@ -71,14 +70,13 @@ class VirtualMemory {
private:
// Free a sub segment. On operating systems that support it this
// can give back the virtual memory to the system. Returns true on success.
static bool FreeSubSegment(int32_t handle, void* address, intptr_t size);
static bool FreeSubSegment(void* address, intptr_t size);
// This constructor is only used internally when reserving new virtual spaces.
// It does not reserve any virtual address space on its own.
VirtualMemory(const MemoryRegion& region,
const MemoryRegion& reserved,
int32_t handle = 0)
: region_(region), reserved_(reserved), handle_(handle) {}
const MemoryRegion& reserved)
: region_(region), reserved_(reserved) {}
MemoryRegion region_;
@ -87,8 +85,6 @@ class VirtualMemory {
// Its size might disagree with region_ due to Truncate.
MemoryRegion reserved_;
int32_t handle_;
static uword page_size_;
DISALLOW_IMPLICIT_CONSTRUCTORS(VirtualMemory);

View file

@ -90,8 +90,7 @@ VirtualMemory::~VirtualMemory() {
}
}
bool VirtualMemory::FreeSubSegment(int32_t handle,
void* address,
bool VirtualMemory::FreeSubSegment(void* address,
intptr_t size) {
unmap(address, size);
return true;

View file

@ -35,132 +35,41 @@
namespace dart {
// The Zircon system call to protect memory regions (zx_vmar_protect) takes a
// VM area (vmar) handle as first argument. We call VirtualMemory::Protect()
// from the memory freelist code in vm/freelist.cc where the vmar handle is not
// available. Additionally, there is no zx_vmar system call to retrieve a handle
// for the leaf vmar given an address. Thus, when memory protections are
// enabled, we maintain a sorted list of our leaf vmar handles that we can
// query by address in calls to VirtualMemory::Protect().
class VmarList : public AllStatic {
public:
static void AddVmar(zx_handle_t vmar, uword addr, intptr_t size);
static void RemoveVmar(uword addr);
static zx_handle_t LookupVmar(uword addr);
private:
static intptr_t LookupVmarIndexLocked(uword addr);
struct VmarListElement {
zx_handle_t vmar;
uword addr;
intptr_t size;
};
static Mutex* vmar_array_lock_;
static MallocGrowableArray<VmarListElement> vmar_array_;
};
Mutex* VmarList::vmar_array_lock_ = new Mutex();
MallocGrowableArray<VmarList::VmarListElement> VmarList::vmar_array_;
void VmarList::AddVmar(zx_handle_t vmar, uword addr, intptr_t size) {
MutexLocker ml(vmar_array_lock_);
LOG_INFO("AddVmar(%d, %lx, %ld)\n", vmar, addr, size);
// Sorted insert in increasing order.
const intptr_t length = vmar_array_.length();
intptr_t idx;
for (idx = 0; idx < length; idx++) {
const VmarListElement& m = vmar_array_.At(idx);
if (m.addr >= addr) {
break;
}
}
#if defined(DEBUG)
if ((length > 0) && (idx < (length - 1))) {
const VmarListElement& m = vmar_array_.At(idx);
ASSERT(m.addr != addr);
}
#endif
LOG_INFO("AddVmar(%d, %lx, %ld) at index = %ld\n", vmar, addr, size, idx);
VmarListElement new_mapping;
new_mapping.vmar = vmar;
new_mapping.addr = addr;
new_mapping.size = size;
vmar_array_.InsertAt(idx, new_mapping);
}
intptr_t VmarList::LookupVmarIndexLocked(uword addr) {
// Binary search for the vmar containing addr.
intptr_t imin = 0;
intptr_t imax = vmar_array_.length();
while (imax >= imin) {
const intptr_t imid = ((imax - imin) / 2) + imin;
const VmarListElement& mapping = vmar_array_.At(imid);
if ((mapping.addr + mapping.size) <= addr) {
imin = imid + 1;
} else if (mapping.addr > addr) {
imax = imid - 1;
} else {
return imid;
}
}
return -1;
}
zx_handle_t VmarList::LookupVmar(uword addr) {
MutexLocker ml(vmar_array_lock_);
LOG_INFO("LookupVmar(%lx)\n", addr);
const intptr_t idx = LookupVmarIndexLocked(addr);
if (idx == -1) {
LOG_ERR("LookupVmar(%lx) NOT FOUND\n", addr);
return ZX_HANDLE_INVALID;
}
LOG_INFO("LookupVmar(%lx) found at %ld\n", addr, idx);
return vmar_array_[idx].vmar;
}
void VmarList::RemoveVmar(uword addr) {
MutexLocker ml(vmar_array_lock_);
LOG_INFO("RemoveVmar(%lx)\n", addr);
const intptr_t idx = LookupVmarIndexLocked(addr);
ASSERT(idx != -1);
#if defined(DEBUG)
zx_handle_t vmar = vmar_array_[idx].vmar;
#endif
// Swap idx to the end, and then RemoveLast()
const intptr_t length = vmar_array_.length();
for (intptr_t i = idx; i < length - 1; i++) {
vmar_array_.Swap(i, i + 1);
}
#if defined(DEBUG)
const VmarListElement& mapping = vmar_array_.Last();
ASSERT(mapping.vmar == vmar);
#endif
vmar_array_.RemoveLast();
}
uword VirtualMemory::page_size_ = 0;
void VirtualMemory::InitOnce() {
page_size_ = getpagesize();
}
static void CloseVmar(zx_handle_t vmar) {
zx_status_t status = zx_vmar_destroy(vmar);
if (status != ZX_OK) {
LOG_ERR("zx_vmar_destroy failed: %s\n", zx_status_get_string(status));
}
status = zx_handle_close(vmar);
if (status != ZX_OK) {
LOG_ERR("zx_handle_close failed: %s\n", zx_status_get_string(status));
}
}
VirtualMemory* VirtualMemory::Allocate(intptr_t size,
bool is_executable,
const char* name) {
return AllocateAligned(size, 0, is_executable, name);
ASSERT(Utils::IsAligned(size, page_size_));
zx_handle_t vmo = ZX_HANDLE_INVALID;
zx_status_t status = zx_vmo_create(size, 0u, &vmo);
if (status != ZX_OK) {
LOG_ERR("zx_vmo_create(%ld) failed: %s\n", size,
zx_status_get_string(status));
return NULL;
}
if (name != NULL) {
zx_object_set_property(vmo, ZX_PROP_NAME, name, strlen(name));
}
const uint32_t flags = ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
(is_executable ? ZX_VM_FLAG_PERM_EXECUTE : 0);
uword address;
status = zx_vmar_map(zx_vmar_root_self(), 0, vmo, 0, size, flags, &address);
zx_handle_close(vmo);
if (status != ZX_OK) {
LOG_ERR("zx_vmar_map(%ld, %ld, %u) failed: %s\n", offset, size, flags,
zx_status_get_string(status));
return NULL;
}
MemoryRegion region(reinterpret_cast<void*>(address), size);
return new VirtualMemory(region, region);
}
VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
@ -170,26 +79,13 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
ASSERT(Utils::IsAligned(size, page_size_));
ASSERT(Utils::IsAligned(alignment, page_size_));
intptr_t allocated_size = size + alignment;
zx_handle_t vmar = ZX_HANDLE_INVALID;
uword addr = 0;
const uint32_t alloc_flags =
ZX_VM_FLAG_COMPACT | ZX_VM_FLAG_CAN_MAP_SPECIFIC |
ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE |
ZX_VM_FLAG_CAN_MAP_EXECUTE;
zx_status_t status = zx_vmar_allocate(zx_vmar_root_self(), 0, allocated_size,
alloc_flags, &vmar, &addr);
if (status != ZX_OK) {
LOG_ERR("zx_vmar_allocate(size = %ld) failed: %s\n", size,
zx_status_get_string(status));
return NULL;
}
zx_handle_t vmar = zx_vmar_root_self();
zx_handle_t vmo = ZX_HANDLE_INVALID;
status = zx_vmo_create(size, 0u, &vmo);
zx_status_t status = zx_vmo_create(allocated_size, 0u, &vmo);
if (status != ZX_OK) {
LOG_ERR("zx_vmo_create(%ld) failed: %s\n", size,
zx_status_get_string(status));
CloseVmar(vmar);
return NULL;
}
@ -197,49 +93,54 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
zx_object_set_property(vmo, ZX_PROP_NAME, name, strlen(name));
}
uword aligned_addr = alignment == 0 ? addr : Utils::RoundUp(addr, alignment);
const size_t offset = aligned_addr - addr;
const uint32_t map_flags = ZX_VM_FLAG_SPECIFIC | ZX_VM_FLAG_PERM_READ |
ZX_VM_FLAG_PERM_WRITE |
(is_executable ? ZX_VM_FLAG_PERM_EXECUTE : 0);
uintptr_t mapped_addr;
status = zx_vmar_map(vmar, offset, vmo, 0, size, map_flags, &mapped_addr);
const uint32_t flags = ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
(is_executable ? ZX_VM_FLAG_PERM_EXECUTE : 0);
uword base;
status = zx_vmar_map(vmar, 0u, vmo, 0u, allocated_size, flags, &base);
zx_handle_close(vmo);
if (status != ZX_OK) {
LOG_ERR("zx_vmar_map(%ld, %ld, %u) failed: %s\n", offset, size, flags,
zx_status_get_string(status));
CloseVmar(vmar);
return NULL;
}
if (mapped_addr != aligned_addr) {
LOG_ERR("zx_vmar_map: mapped_addr != aligned_addr: %lx != %lx\n",
mapped_addr, aligned_addr);
CloseVmar(vmar);
return NULL;
}
LOG_INFO("Commit(%lx, %ld, %s): success\n", addr, size,
executable ? "executable" : "");
VmarList::AddVmar(vmar, aligned_addr, size);
MemoryRegion region(reinterpret_cast<void*>(aligned_addr), size);
MemoryRegion reserved(reinterpret_cast<void*>(addr), allocated_size);
return new VirtualMemory(region, reserved, vmar);
uword aligned_base = Utils::RoundUp(base, alignment);
ASSERT(base <= aligned_base);
if (base != aligned_base) {
uword extra_leading_size = aligned_base - base;
status = zx_vmar_unmap(vmar, base, extra_leading_size);
if (status != ZX_OK) {
FATAL1("zx_vmar_unmap failed: %s\n", zx_status_get_string(status));
}
allocated_size -= extra_leading_size;
}
if (allocated_size != size) {
uword extra_trailing_size = allocated_size - size;
status = zx_vmar_unmap(vmar, aligned_base + size, extra_trailing_size);
if (status != ZX_OK) {
FATAL1("zx_vmar_unmap failed: %s\n", zx_status_get_string(status));
}
}
MemoryRegion region(reinterpret_cast<void*>(aligned_base), size);
return new VirtualMemory(region, region);
}
VirtualMemory::~VirtualMemory() {
if (vm_owns_region()) {
zx_handle_t vmar = static_cast<zx_handle_t>(handle());
CloseVmar(vmar);
VmarList::RemoveVmar(start());
zx_status_t status =
zx_vmar_unmap(zx_vmar_root_self(), reserved_.start(), reserved_.size());
if (status != ZX_OK) {
FATAL1("zx_vmar_unmap failed: %s\n", zx_status_get_string(status));
}
}
}
bool VirtualMemory::FreeSubSegment(int32_t handle,
void* address,
intptr_t size) {
zx_handle_t vmar = static_cast<zx_handle_t>(handle);
zx_status_t status =
zx_vmar_unmap(vmar, reinterpret_cast<uintptr_t>(address), size);
bool VirtualMemory::FreeSubSegment(void* address, intptr_t size) {
zx_status_t status = zx_vmar_unmap(
zx_vmar_root_self(), reinterpret_cast<uintptr_t>(address), size);
if (status != ZX_OK) {
LOG_ERR("zx_vmar_unmap failed: %s\n", zx_status_get_string(status));
return false;
@ -253,13 +154,10 @@ bool VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {
const uword start_address = reinterpret_cast<uword>(address);
const uword end_address = start_address + size;
const uword page_address = Utils::RoundDown(start_address, PageSize());
zx_handle_t vmar = VmarList::LookupVmar(page_address);
ASSERT(vmar != ZX_HANDLE_INVALID);
uint32_t prot = 0;
switch (mode) {
case kNoAccess:
// ZX-426: zx_vmar_protect() requires at least on permission.
prot = ZX_VM_FLAG_PERM_READ;
prot = 0;
break;
case kReadOnly:
prot = ZX_VM_FLAG_PERM_READ;
@ -275,8 +173,8 @@ bool VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {
ZX_VM_FLAG_PERM_EXECUTE;
break;
}
zx_status_t status =
zx_vmar_protect(vmar, page_address, end_address - page_address, prot);
zx_status_t status = zx_vmar_protect(zx_vmar_root_self(), page_address,
end_address - page_address, prot);
if (status != ZX_OK) {
LOG_ERR("zx_vmar_protect(%lx, %lx, %x) success: %s\n", page_address,
end_address - page_address, prot, zx_status_get_string(status));

View file

@ -90,8 +90,7 @@ VirtualMemory::~VirtualMemory() {
}
}
bool VirtualMemory::FreeSubSegment(int32_t handle,
void* address,
bool VirtualMemory::FreeSubSegment(void* address,
intptr_t size) {
unmap(address, size);
return true;

View file

@ -90,8 +90,7 @@ VirtualMemory::~VirtualMemory() {
}
}
bool VirtualMemory::FreeSubSegment(int32_t handle,
void* address,
bool VirtualMemory::FreeSubSegment(void* address,
intptr_t size) {
unmap(address, size);
return true;

View file

@ -70,8 +70,7 @@ VirtualMemory::~VirtualMemory() {
}
}
bool VirtualMemory::FreeSubSegment(int32_t handle,
void* address,
bool VirtualMemory::FreeSubSegment(void* address,
intptr_t size) {
// On Windows only the entire segment returned by VirtualAlloc
// can be freed. Therefore we will have to waste these unused