mirror of
https://github.com/dart-lang/sdk
synced 2024-09-18 21:41:19 +00:00
56853c6bc9
R=zra@google.com Review URL: https://codereview.chromium.org//885443012 git-svn-id: https://dart.googlecode.com/svn/branches/bleeding_edge/dart@43596 260f80e4-7a28-3924-810f-c04153c831b5
304 lines
9.2 KiB
C++
304 lines
9.2 KiB
C++
// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
|
|
// for details. All rights reserved. Use of this source code is governed by a
|
|
// BSD-style license that can be found in the LICENSE file.
|
|
|
|
#include "vm/assembler.h"
|
|
|
|
#include "platform/utils.h"
|
|
#include "vm/cpu.h"
|
|
#include "vm/heap.h"
|
|
#include "vm/memory_region.h"
|
|
#include "vm/os.h"
|
|
#include "vm/zone.h"
|
|
|
|
namespace dart {
|
|
|
|
DEFINE_FLAG(bool, code_comments, false,
|
|
"Include comments into code and disassembly");
|
|
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_MIPS)
|
|
DEFINE_FLAG(bool, use_far_branches, false,
|
|
"Enable far branches for ARM and MIPS");
|
|
#endif
|
|
|
|
DECLARE_FLAG(bool, disassemble);
|
|
DECLARE_FLAG(bool, disassemble_optimized);
|
|
|
|
static uword NewContents(intptr_t capacity) {
|
|
Zone* zone = Isolate::Current()->current_zone();
|
|
uword result = zone->AllocUnsafe(capacity);
|
|
#if defined(DEBUG)
|
|
// Initialize the buffer with kBreakPointInstruction to force a break
|
|
// point if we ever execute an uninitialized part of the code buffer.
|
|
Assembler::InitializeMemoryWithBreakpoints(result, capacity);
|
|
#endif
|
|
return result;
|
|
}
|
|
|
|
|
|
#if defined(DEBUG)
|
|
AssemblerBuffer::EnsureCapacity::EnsureCapacity(AssemblerBuffer* buffer) {
|
|
if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
|
|
// In debug mode, we save the assembler buffer along with the gap
|
|
// size before we start emitting to the buffer. This allows us to
|
|
// check that any single generated instruction doesn't overflow the
|
|
// limit implied by the minimum gap size.
|
|
buffer_ = buffer;
|
|
gap_ = ComputeGap();
|
|
// Make sure that extending the capacity leaves a big enough gap
|
|
// for any kind of instruction.
|
|
ASSERT(gap_ >= kMinimumGap);
|
|
// Mark the buffer as having ensured the capacity.
|
|
ASSERT(!buffer->HasEnsuredCapacity()); // Cannot nest.
|
|
buffer->has_ensured_capacity_ = true;
|
|
}
|
|
|
|
|
|
AssemblerBuffer::EnsureCapacity::~EnsureCapacity() {
|
|
// Unmark the buffer, so we cannot emit after this.
|
|
buffer_->has_ensured_capacity_ = false;
|
|
// Make sure the generated instruction doesn't take up more
|
|
// space than the minimum gap.
|
|
intptr_t delta = gap_ - ComputeGap();
|
|
ASSERT(delta <= kMinimumGap);
|
|
}
|
|
#endif
|
|
|
|
|
|
AssemblerBuffer::AssemblerBuffer()
|
|
: pointer_offsets_(new ZoneGrowableArray<intptr_t>(16)) {
|
|
static const intptr_t kInitialBufferCapacity = 4 * KB;
|
|
contents_ = NewContents(kInitialBufferCapacity);
|
|
cursor_ = contents_;
|
|
limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
|
|
fixup_ = NULL;
|
|
#if defined(DEBUG)
|
|
has_ensured_capacity_ = false;
|
|
fixups_processed_ = false;
|
|
#endif
|
|
|
|
// Verify internal state.
|
|
ASSERT(Capacity() == kInitialBufferCapacity);
|
|
ASSERT(Size() == 0);
|
|
}
|
|
|
|
|
|
AssemblerBuffer::~AssemblerBuffer() {
|
|
}
|
|
|
|
|
|
void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) {
|
|
AssemblerFixup* fixup = fixup_;
|
|
while (fixup != NULL) {
|
|
fixup->Process(region, fixup->position());
|
|
fixup = fixup->previous();
|
|
}
|
|
}
|
|
|
|
|
|
void AssemblerBuffer::FinalizeInstructions(const MemoryRegion& instructions) {
|
|
// Copy the instructions from the buffer.
|
|
MemoryRegion from(reinterpret_cast<void*>(contents()), Size());
|
|
instructions.CopyFrom(0, from);
|
|
|
|
// Process fixups in the instructions.
|
|
ProcessFixups(instructions);
|
|
#if defined(DEBUG)
|
|
fixups_processed_ = true;
|
|
#endif
|
|
}
|
|
|
|
|
|
void AssemblerBuffer::ExtendCapacity() {
|
|
intptr_t old_size = Size();
|
|
intptr_t old_capacity = Capacity();
|
|
intptr_t new_capacity =
|
|
Utils::Minimum(old_capacity * 2, old_capacity + 1 * MB);
|
|
if (new_capacity < old_capacity) {
|
|
FATAL("Unexpected overflow in AssemblerBuffer::ExtendCapacity");
|
|
}
|
|
|
|
// Allocate the new data area and copy contents of the old one to it.
|
|
uword new_contents = NewContents(new_capacity);
|
|
memmove(reinterpret_cast<void*>(new_contents),
|
|
reinterpret_cast<void*>(contents_),
|
|
old_size);
|
|
|
|
// Compute the relocation delta and switch to the new contents area.
|
|
intptr_t delta = new_contents - contents_;
|
|
contents_ = new_contents;
|
|
|
|
// Update the cursor and recompute the limit.
|
|
cursor_ += delta;
|
|
limit_ = ComputeLimit(new_contents, new_capacity);
|
|
|
|
// Verify internal state.
|
|
ASSERT(Capacity() == new_capacity);
|
|
ASSERT(Size() == old_size);
|
|
}
|
|
|
|
|
|
class PatchCodeWithHandle : public AssemblerFixup {
|
|
public:
|
|
PatchCodeWithHandle(ZoneGrowableArray<intptr_t>* pointer_offsets,
|
|
const Object& object)
|
|
: pointer_offsets_(pointer_offsets), object_(object) {
|
|
}
|
|
|
|
void Process(const MemoryRegion& region, intptr_t position) {
|
|
// Patch the handle into the code. Once the instructions are installed into
|
|
// a raw code object and the pointer offsets are setup, the handle is
|
|
// resolved.
|
|
region.Store<const Object*>(position, &object_);
|
|
pointer_offsets_->Add(position);
|
|
}
|
|
|
|
virtual bool IsPointerOffset() const { return true; }
|
|
|
|
private:
|
|
ZoneGrowableArray<intptr_t>* pointer_offsets_;
|
|
const Object& object_;
|
|
};
|
|
|
|
|
|
intptr_t AssemblerBuffer::CountPointerOffsets() const {
|
|
intptr_t count = 0;
|
|
AssemblerFixup* current = fixup_;
|
|
while (current != NULL) {
|
|
if (current->IsPointerOffset()) ++count;
|
|
current = current->previous_;
|
|
}
|
|
return count;
|
|
}
|
|
|
|
|
|
void AssemblerBuffer::EmitObject(const Object& object) {
|
|
// Since we are going to store the handle as part of the fixup information
|
|
// the handle needs to be a zone handle.
|
|
ASSERT(object.IsNotTemporaryScopedHandle());
|
|
ASSERT(object.IsOld());
|
|
EmitFixup(new PatchCodeWithHandle(pointer_offsets_, object));
|
|
cursor_ += kWordSize; // Reserve space for pointer.
|
|
}
|
|
|
|
|
|
// Shared macros are implemented here.
|
|
void Assembler::Unimplemented(const char* message) {
|
|
const char* format = "Unimplemented: %s";
|
|
const intptr_t len = OS::SNPrint(NULL, 0, format, message);
|
|
char* buffer = reinterpret_cast<char*>(malloc(len + 1));
|
|
OS::SNPrint(buffer, len + 1, format, message);
|
|
Stop(buffer);
|
|
}
|
|
|
|
|
|
void Assembler::Untested(const char* message) {
|
|
const char* format = "Untested: %s";
|
|
const intptr_t len = OS::SNPrint(NULL, 0, format, message);
|
|
char* buffer = reinterpret_cast<char*>(malloc(len + 1));
|
|
OS::SNPrint(buffer, len + 1, format, message);
|
|
Stop(buffer);
|
|
}
|
|
|
|
|
|
void Assembler::Unreachable(const char* message) {
|
|
const char* format = "Unreachable: %s";
|
|
const intptr_t len = OS::SNPrint(NULL, 0, format, message);
|
|
char* buffer = reinterpret_cast<char*>(malloc(len + 1));
|
|
OS::SNPrint(buffer, len + 1, format, message);
|
|
Stop(buffer);
|
|
}
|
|
|
|
|
|
void Assembler::Comment(const char* format, ...) {
|
|
if (EmittingComments()) {
|
|
char buffer[1024];
|
|
|
|
va_list args;
|
|
va_start(args, format);
|
|
OS::VSNPrint(buffer, sizeof(buffer), format, args);
|
|
va_end(args);
|
|
|
|
comments_.Add(new CodeComment(buffer_.GetPosition(),
|
|
String::ZoneHandle(String::New(buffer,
|
|
Heap::kOld))));
|
|
}
|
|
}
|
|
|
|
|
|
bool Assembler::EmittingComments() {
|
|
return FLAG_code_comments || FLAG_disassemble || FLAG_disassemble_optimized;
|
|
}
|
|
|
|
|
|
const Code::Comments& Assembler::GetCodeComments() const {
|
|
Code::Comments& comments = Code::Comments::New(comments_.length());
|
|
|
|
for (intptr_t i = 0; i < comments_.length(); i++) {
|
|
comments.SetPCOffsetAt(i, comments_[i]->pc_offset());
|
|
comments.SetCommentAt(i, comments_[i]->comment());
|
|
}
|
|
|
|
return comments;
|
|
}
|
|
|
|
|
|
intptr_t ObjectPool::AddObject(const Object& obj, Patchability patchable) {
|
|
// The object pool cannot be used in the vm isolate.
|
|
ASSERT(Isolate::Current() != Dart::vm_isolate());
|
|
if (object_pool_.IsNull()) {
|
|
object_pool_ = GrowableObjectArray::New(Heap::kOld);
|
|
}
|
|
object_pool_.Add(obj, Heap::kOld);
|
|
patchable_pool_entries_.Add(patchable);
|
|
if (patchable == kNotPatchable) {
|
|
// The object isn't patchable. Record the index for fast lookup.
|
|
object_pool_index_table_.Insert(
|
|
ObjIndexPair(&obj, object_pool_.Length() - 1));
|
|
}
|
|
return object_pool_.Length() - 1;
|
|
}
|
|
|
|
|
|
intptr_t ObjectPool::AddExternalLabel(const ExternalLabel* label,
|
|
Patchability patchable) {
|
|
ASSERT(Isolate::Current() != Dart::vm_isolate());
|
|
const uword address = label->address();
|
|
ASSERT(Utils::IsAligned(address, 4));
|
|
// The address is stored in the object array as a RawSmi.
|
|
const Smi& smi = Smi::Handle(reinterpret_cast<RawSmi*>(address));
|
|
return AddObject(smi, patchable);
|
|
}
|
|
|
|
|
|
intptr_t ObjectPool::FindObject(const Object& obj, Patchability patchable) {
|
|
// The object pool cannot be used in the vm isolate.
|
|
ASSERT(Isolate::Current() != Dart::vm_isolate());
|
|
|
|
// If the object is not patchable, check if we've already got it in the
|
|
// object pool.
|
|
if (patchable == kNotPatchable && !object_pool_.IsNull()) {
|
|
intptr_t idx = object_pool_index_table_.Lookup(&obj);
|
|
if (idx != ObjIndexPair::kNoIndex) {
|
|
ASSERT(patchable_pool_entries_[idx] == kNotPatchable);
|
|
return idx;
|
|
}
|
|
}
|
|
|
|
return AddObject(obj, patchable);
|
|
}
|
|
|
|
|
|
intptr_t ObjectPool::FindExternalLabel(const ExternalLabel* label,
|
|
Patchability patchable) {
|
|
// The object pool cannot be used in the vm isolate.
|
|
ASSERT(Isolate::Current() != Dart::vm_isolate());
|
|
const uword address = label->address();
|
|
ASSERT(Utils::IsAligned(address, 4));
|
|
// The address is stored in the object array as a RawSmi.
|
|
const Smi& smi = Smi::Handle(reinterpret_cast<RawSmi*>(address));
|
|
return FindObject(smi, patchable);
|
|
}
|
|
|
|
|
|
} // namespace dart
|