Use parallel move resolver for optimized try-catch.

This CL has no change in functionality and is purely clean up and
refactoring. Instead of manually generating the moves at throwing
instructions in try-catch, construct a parallel move and use the resolver
to emit the native code. This eliminates a lot of duplicated code
from all platforms.

It will also allow to re-use stack space that is currently allocated
separately for each individual try-catch in a function.

I added a few more unrelated minor changes in various parts of the VM
* Simpilify guard code generation
* Resolve refactoring TODO in deoptimization
* Improve names

R=johnmccutchan@google.com

Review URL: https://codereview.chromium.org//119213002

git-svn-id: https://dart.googlecode.com/svn/branches/bleeding_edge/dart@31326 260f80e4-7a28-3924-810f-c04153c831b5
This commit is contained in:
fschneider@google.com 2013-12-20 09:41:42 +00:00
parent b5a2dea566
commit fe726e1971
15 changed files with 73 additions and 326 deletions

View file

@ -13,7 +13,7 @@ namespace dart {
DECLARE_FLAG(bool, trace_deoptimization_verbose);
void DeferredDouble::Materialize() {
void DeferredDouble::Materialize(DeoptContext* deopt_context) {
RawDouble** double_slot = reinterpret_cast<RawDouble**>(slot());
*double_slot = Double::New(value());
@ -24,7 +24,7 @@ void DeferredDouble::Materialize() {
}
void DeferredMint::Materialize() {
void DeferredMint::Materialize(DeoptContext* deopt_context) {
RawMint** mint_slot = reinterpret_cast<RawMint**>(slot());
ASSERT(!Smi::IsValid64(value()));
Mint& mint = Mint::Handle();
@ -38,7 +38,7 @@ void DeferredMint::Materialize() {
}
void DeferredFloat32x4::Materialize() {
void DeferredFloat32x4::Materialize(DeoptContext* deopt_context) {
RawFloat32x4** float32x4_slot = reinterpret_cast<RawFloat32x4**>(slot());
RawFloat32x4* raw_float32x4 = Float32x4::New(value());
*float32x4_slot = raw_float32x4;
@ -54,7 +54,7 @@ void DeferredFloat32x4::Materialize() {
}
void DeferredInt32x4::Materialize() {
void DeferredInt32x4::Materialize(DeoptContext* deopt_context) {
RawInt32x4** int32x4_slot = reinterpret_cast<RawInt32x4**>(slot());
RawInt32x4* raw_int32x4 = Int32x4::New(value());
*int32x4_slot = raw_int32x4;
@ -70,13 +70,8 @@ void DeferredInt32x4::Materialize() {
}
void DeferredObjectRef::Materialize() {
// TODO(turnidge): Consider passing the deopt_context to materialize
// instead of accessing it through the current isolate. It would
// make it easier to test deferred object materialization in a unit
// test eventually.
DeferredObject* obj =
Isolate::Current()->deopt_context()->GetDeferredObject(index());
void DeferredObjectRef::Materialize(DeoptContext* deopt_context) {
DeferredObject* obj = deopt_context->GetDeferredObject(index());
*slot() = obj->object();
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr("writing instance ref at %" Px ": %s\n",

View file

@ -13,6 +13,7 @@ namespace dart {
class Instance;
class RawInstance;
class RawObject;
class DeoptContext;
// Used by the deoptimization infrastructure to defer allocation of
// unboxed objects until frame is fully rewritten and GC is safe.
@ -27,7 +28,7 @@ class DeferredSlot {
RawInstance** slot() const { return slot_; }
DeferredSlot* next() const { return next_; }
virtual void Materialize() = 0;
virtual void Materialize(DeoptContext* deopt_context) = 0;
private:
RawInstance** const slot_;
@ -42,7 +43,7 @@ class DeferredDouble : public DeferredSlot {
DeferredDouble(double value, RawInstance** slot, DeferredSlot* next)
: DeferredSlot(slot, next), value_(value) { }
virtual void Materialize();
virtual void Materialize(DeoptContext* deopt_context);
double value() const { return value_; }
@ -58,7 +59,7 @@ class DeferredMint : public DeferredSlot {
DeferredMint(int64_t value, RawInstance** slot, DeferredSlot* next)
: DeferredSlot(slot, next), value_(value) { }
virtual void Materialize();
virtual void Materialize(DeoptContext* deopt_context);
int64_t value() const { return value_; }
@ -75,7 +76,7 @@ class DeferredFloat32x4 : public DeferredSlot {
DeferredSlot* next)
: DeferredSlot(slot, next), value_(value) { }
virtual void Materialize();
virtual void Materialize(DeoptContext* deopt_context);
simd128_value_t value() const { return value_; }
@ -92,7 +93,7 @@ class DeferredInt32x4 : public DeferredSlot {
DeferredSlot* next)
: DeferredSlot(slot, next), value_(value) { }
virtual void Materialize();
virtual void Materialize(DeoptContext* deopt_context);
simd128_value_t value() const { return value_; }
@ -111,7 +112,7 @@ class DeferredObjectRef : public DeferredSlot {
DeferredObjectRef(intptr_t index, RawInstance** slot, DeferredSlot* next)
: DeferredSlot(slot, next), index_(index) { }
virtual void Materialize();
virtual void Materialize(DeoptContext* deopt_context);
intptr_t index() const { return index_; }

View file

@ -301,7 +301,8 @@ void DeoptContext::FillDestFrame() {
}
static void FillDeferredSlots(DeferredSlot** slot_list) {
static void FillDeferredSlots(DeoptContext* deopt_context,
DeferredSlot** slot_list) {
DeferredSlot* slot = *slot_list;
*slot_list = NULL;
@ -309,7 +310,7 @@ static void FillDeferredSlots(DeferredSlot** slot_list) {
DeferredSlot* current = slot;
slot = slot->next();
current->Materialize();
current->Materialize(deopt_context);
delete current;
}
@ -325,8 +326,8 @@ intptr_t DeoptContext::MaterializeDeferredObjects() {
// objects can't be referencing other deferred objects because storing
// an object into a field is always conservatively treated as escaping by
// allocation sinking and load forwarding.
FillDeferredSlots(&deferred_boxes_);
FillDeferredSlots(&deferred_object_refs_);
FillDeferredSlots(this, &deferred_boxes_);
FillDeferredSlots(this, &deferred_object_refs_);
// Compute total number of artificial arguments used during deoptimization.
intptr_t deopt_arg_count = 0;

View file

@ -17,6 +17,7 @@
#include "vm/longjump.h"
#include "vm/object_store.h"
#include "vm/parser.h"
#include "vm/stack_frame.h"
#include "vm/stub_code.h"
#include "vm/symbols.h"
@ -297,6 +298,49 @@ void FlowGraphCompiler::Bailout(const char* reason) {
}
void FlowGraphCompiler::EmitTrySync(Instruction* instr, intptr_t try_index) {
ASSERT(is_optimizing());
Environment* env = instr->env();
CatchBlockEntryInstr* catch_block =
flow_graph().graph_entry()->GetCatchEntry(try_index);
const GrowableArray<Definition*>* idefs = catch_block->initial_definitions();
// Construct a ParallelMove instruction for parameters and locals. Skip the
// special locals exception_var and stacktrace_var since they will be filled
// when an exception is thrown. Constant locations are known to be the same
// at all instructions that may throw, and do not need to be materialized.
// Parameters first.
intptr_t i = 0;
const intptr_t num_non_copied_params = flow_graph().num_non_copied_params();
ParallelMoveInstr* move_instr = new ParallelMoveInstr();
for (; i < num_non_copied_params; ++i) {
if ((*idefs)[i]->IsConstant()) continue; // Common constants
Location src = env->LocationAt(i);
intptr_t dest_index = i - num_non_copied_params;
Location dest = Location::StackSlot(dest_index);
move_instr->AddMove(dest, src);
}
// Process locals. Skip exception_var and stacktrace_var.
intptr_t local_base = kFirstLocalSlotFromFp + num_non_copied_params;
intptr_t ex_idx = local_base - catch_block->exception_var().index();
intptr_t st_idx = local_base - catch_block->stacktrace_var().index();
for (; i < flow_graph().variable_count(); ++i) {
if (i == ex_idx || i == st_idx) continue;
if ((*idefs)[i]->IsConstant()) continue;
Location src = env->LocationAt(i);
intptr_t dest_index = i - num_non_copied_params;
Location dest = Location::StackSlot(dest_index);
move_instr->AddMove(dest, src);
// Update safepoint bitmap to indicate that the target location
// now contains a pointer.
instr->locs()->stack_bitmap()->Set(dest_index, true);
}
parallel_move_resolver()->EmitNativeCode(move_instr);
}
intptr_t FlowGraphCompiler::StackSize() const {
if (is_optimizing_) {
return flow_graph_.graph_entry()->spill_slot_count();

View file

@ -473,8 +473,6 @@ class FlowGraphCompiler : public ValueObject {
void EmitFrameEntry();
void EmitTrySyncMove(intptr_t dest_offset, Location loc, bool* push_emitted);
void AddStaticCallTarget(const Function& function);
void GenerateDeferredCode();

View file

@ -718,74 +718,6 @@ void FlowGraphCompiler::GenerateAssertAssignable(intptr_t token_pos,
}
void FlowGraphCompiler::EmitTrySyncMove(intptr_t dest_offset,
Location loc,
bool* push_emitted) {
if (loc.IsConstant()) {
if (!*push_emitted) {
__ Push(R0);
*push_emitted = true;
}
__ LoadObject(R0, loc.constant());
__ StoreToOffset(kWord, R0, FP, dest_offset);
} else if (loc.IsRegister()) {
if (*push_emitted && (loc.reg() == R0)) {
__ ldr(R0, Address(SP, 0));
__ StoreToOffset(kWord, R0, FP, dest_offset);
} else {
__ StoreToOffset(kWord, loc.reg(), FP, dest_offset);
}
} else {
const intptr_t src_offset = loc.ToStackSlotOffset();
if (src_offset != dest_offset) {
if (!*push_emitted) {
__ Push(R0);
*push_emitted = true;
}
__ LoadFromOffset(kWord, R0, FP, src_offset);
__ StoreToOffset(kWord, R0, FP, dest_offset);
}
}
}
void FlowGraphCompiler::EmitTrySync(Instruction* instr, intptr_t try_index) {
ASSERT(is_optimizing());
Environment* env = instr->env();
CatchBlockEntryInstr* catch_block =
flow_graph().graph_entry()->GetCatchEntry(try_index);
const GrowableArray<Definition*>* idefs = catch_block->initial_definitions();
// Parameters.
intptr_t i = 0;
bool push_emitted = false;
const intptr_t num_non_copied_params = flow_graph().num_non_copied_params();
const intptr_t param_base =
kParamEndSlotFromFp + num_non_copied_params;
for (; i < num_non_copied_params; ++i) {
if ((*idefs)[i]->IsConstant()) continue; // Common constants
Location loc = env->LocationAt(i);
EmitTrySyncMove((param_base - i) * kWordSize, loc, &push_emitted);
}
// Process locals. Skip exception_var and stacktrace_var.
intptr_t local_base = kFirstLocalSlotFromFp + num_non_copied_params;
intptr_t ex_idx = local_base - catch_block->exception_var().index();
intptr_t st_idx = local_base - catch_block->stacktrace_var().index();
for (; i < flow_graph().variable_count(); ++i) {
if (i == ex_idx || i == st_idx) continue;
if ((*idefs)[i]->IsConstant()) continue;
Location loc = env->LocationAt(i);
EmitTrySyncMove((local_base - i) * kWordSize, loc, &push_emitted);
// Update safepoint bitmap to indicate that the target location
// now contains a pointer.
instr->locs()->stack_bitmap()->Set(i - num_non_copied_params, true);
}
if (push_emitted) {
__ Pop(R0);
}
}
void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
if (is_optimizing()) return;
Definition* defn = instr->AsDefinition();

View file

@ -742,75 +742,6 @@ void FlowGraphCompiler::GenerateAssertAssignable(intptr_t token_pos,
}
void FlowGraphCompiler::EmitTrySyncMove(intptr_t dest_offset,
Location loc,
bool* push_emitted) {
const Address dest(EBP, dest_offset);
if (loc.IsConstant()) {
if (!*push_emitted) {
__ pushl(EAX);
*push_emitted = true;
}
__ LoadObject(EAX, loc.constant());
__ movl(dest, EAX);
} else if (loc.IsRegister()) {
if (*push_emitted && loc.reg() == EAX) {
__ movl(EAX, Address(ESP, 0));
__ movl(dest, EAX);
} else {
__ movl(dest, loc.reg());
}
} else {
Address src = loc.ToStackSlotAddress();
if (!src.Equals(dest)) {
if (!*push_emitted) {
__ pushl(EAX);
*push_emitted = true;
}
__ movl(EAX, src);
__ movl(dest, EAX);
}
}
}
void FlowGraphCompiler::EmitTrySync(Instruction* instr, intptr_t try_index) {
ASSERT(is_optimizing());
Environment* env = instr->env();
CatchBlockEntryInstr* catch_block =
flow_graph().graph_entry()->GetCatchEntry(try_index);
const GrowableArray<Definition*>* idefs = catch_block->initial_definitions();
// Parameters.
intptr_t i = 0;
bool push_emitted = false;
const intptr_t num_non_copied_params = flow_graph().num_non_copied_params();
const intptr_t param_base =
kParamEndSlotFromFp + num_non_copied_params;
for (; i < num_non_copied_params; ++i) {
if ((*idefs)[i]->IsConstant()) continue; // Common constants
Location loc = env->LocationAt(i);
EmitTrySyncMove((param_base - i) * kWordSize, loc, &push_emitted);
}
// Process locals. Skip exception_var and stacktrace_var.
intptr_t local_base = kFirstLocalSlotFromFp + num_non_copied_params;
intptr_t ex_idx = local_base - catch_block->exception_var().index();
intptr_t st_idx = local_base - catch_block->stacktrace_var().index();
for (; i < flow_graph().variable_count(); ++i) {
if (i == ex_idx || i == st_idx) continue;
if ((*idefs)[i]->IsConstant()) continue;
Location loc = env->LocationAt(i);
EmitTrySyncMove((local_base - i) * kWordSize, loc, &push_emitted);
// Update safepoint bitmap to indicate that the target location
// now contains a pointer.
instr->locs()->stack_bitmap()->Set(i - num_non_copied_params, true);
}
if (push_emitted) {
__ popl(EAX);
}
}
void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
if (is_optimizing()) return;
Definition* defn = instr->AsDefinition();

View file

@ -741,74 +741,6 @@ void FlowGraphCompiler::GenerateAssertAssignable(intptr_t token_pos,
}
void FlowGraphCompiler::EmitTrySyncMove(intptr_t dest_offset,
Location loc,
bool* push_emitted) {
if (loc.IsConstant()) {
if (!*push_emitted) {
__ Push(T0);
*push_emitted = true;
}
__ LoadObject(T0, loc.constant());
__ StoreToOffset(T0, FP, dest_offset);
} else if (loc.IsRegister()) {
if (*push_emitted && loc.reg() == T0) {
__ lw(T0, Address(SP, 0));
__ StoreToOffset(T0, FP, dest_offset);
} else {
__ StoreToOffset(loc.reg(), FP, dest_offset);
}
} else {
const intptr_t src_offset = loc.ToStackSlotOffset();
if (src_offset != dest_offset) {
if (!*push_emitted) {
__ Push(T0);
*push_emitted = true;
}
__ LoadFromOffset(T0, FP, src_offset);
__ StoreToOffset(T0, FP, dest_offset);
}
}
}
void FlowGraphCompiler::EmitTrySync(Instruction* instr, intptr_t try_index) {
ASSERT(is_optimizing());
Environment* env = instr->env();
CatchBlockEntryInstr* catch_block =
flow_graph().graph_entry()->GetCatchEntry(try_index);
const GrowableArray<Definition*>* idefs = catch_block->initial_definitions();
// Parameters.
intptr_t i = 0;
bool push_emitted = false;
const intptr_t num_non_copied_params = flow_graph().num_non_copied_params();
const intptr_t param_base =
kParamEndSlotFromFp + num_non_copied_params;
for (; i < num_non_copied_params; ++i) {
if ((*idefs)[i]->IsConstant()) continue; // Common constants
Location loc = env->LocationAt(i);
EmitTrySyncMove((param_base - i) * kWordSize, loc, &push_emitted);
}
// Process locals. Skip exception_var and stacktrace_var.
intptr_t local_base = kFirstLocalSlotFromFp + num_non_copied_params;
intptr_t ex_idx = local_base - catch_block->exception_var().index();
intptr_t st_idx = local_base - catch_block->stacktrace_var().index();
for (; i < flow_graph().variable_count(); ++i) {
if (i == ex_idx || i == st_idx) continue;
if ((*idefs)[i]->IsConstant()) continue;
Location loc = env->LocationAt(i);
EmitTrySyncMove((local_base - i) * kWordSize, loc, &push_emitted);
// Update safepoint bitmap to indicate that the target location
// now contains a pointer.
instr->locs()->stack_bitmap()->Set(i - num_non_copied_params, true);
}
if (push_emitted) {
__ Pop(T0);
}
}
void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
if (is_optimizing()) return;
Definition* defn = instr->AsDefinition();

View file

@ -722,74 +722,6 @@ void FlowGraphCompiler::GenerateAssertAssignable(intptr_t token_pos,
}
void FlowGraphCompiler::EmitTrySyncMove(intptr_t dest_offset,
Location loc,
bool* push_emitted) {
const Address dest(RBP, dest_offset);
if (loc.IsConstant()) {
if (!*push_emitted) {
__ pushq(RAX);
*push_emitted = true;
}
__ LoadObject(RAX, loc.constant(), PP);
__ movq(dest, RAX);
} else if (loc.IsRegister()) {
if (*push_emitted && loc.reg() == RAX) {
__ movq(RAX, Address(RSP, 0));
__ movq(dest, RAX);
} else {
__ movq(dest, loc.reg());
}
} else {
Address src = loc.ToStackSlotAddress();
if (!src.Equals(dest)) {
if (!*push_emitted) {
__ pushq(RAX);
*push_emitted = true;
}
__ movq(RAX, src);
__ movq(dest, RAX);
}
}
}
void FlowGraphCompiler::EmitTrySync(Instruction* instr, intptr_t try_index) {
ASSERT(is_optimizing());
Environment* env = instr->env();
CatchBlockEntryInstr* catch_block =
flow_graph().graph_entry()->GetCatchEntry(try_index);
const GrowableArray<Definition*>* idefs = catch_block->initial_definitions();
// Parameters.
intptr_t i = 0;
bool push_emitted = false;
const intptr_t num_non_copied_params = flow_graph().num_non_copied_params();
const intptr_t param_base = kParamEndSlotFromFp + num_non_copied_params;
for (; i < num_non_copied_params; ++i) {
if ((*idefs)[i]->IsConstant()) continue; // Common constants
Location loc = env->LocationAt(i);
EmitTrySyncMove((param_base - i) * kWordSize, loc, &push_emitted);
}
// Process locals. Skip exception_var and stacktrace_var.
intptr_t local_base = kFirstLocalSlotFromFp + num_non_copied_params;
intptr_t ex_idx = local_base - catch_block->exception_var().index();
intptr_t st_idx = local_base - catch_block->stacktrace_var().index();
for (; i < flow_graph().variable_count(); ++i) {
if (i == ex_idx || i == st_idx) continue;
if ((*idefs)[i]->IsConstant()) continue;
Location loc = env->LocationAt(i);
EmitTrySyncMove((local_base - i) * kWordSize, loc, &push_emitted);
// Update safepoint bitmap to indicate that the target location
// now contains a pointer.
instr->locs()->stack_bitmap()->Set(i - num_non_copied_params, true);
}
if (push_emitted) {
__ popq(RAX);
}
}
void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
if (is_optimizing()) return;
Definition* defn = instr->AsDefinition();

View file

@ -4960,8 +4960,10 @@ class AliasedSet : public ZoneAllocated {
use = use->next_use()) {
Instruction* instr = use->instruction();
if (instr->IsPushArgument() ||
(instr->IsStoreVMField() && (use->use_index() != 1)) ||
(instr->IsStoreInstanceField() && (use->use_index() != 0)) ||
(instr->IsStoreVMField()
&& (use->use_index() != StoreVMFieldInstr::kObjectPos)) ||
(instr->IsStoreInstanceField()
&& (use->use_index() != StoreInstanceFieldInstr::kInstancePos)) ||
instr->IsStoreStaticField() ||
instr->IsPhi() ||
instr->IsAssertAssignable() ||

View file

@ -4251,19 +4251,19 @@ class StoreVMFieldInstr : public TemplateDefinition<2> {
: offset_in_bytes_(offset_in_bytes), type_(type) {
ASSERT(type.IsZoneHandle()); // May be null if field is not an instance.
SetInputAt(kValuePos, value);
SetInputAt(kDestPos, dest);
SetInputAt(kObjectPos, dest);
}
enum {
kValuePos = 0,
kDestPos = 1
kObjectPos = 1
};
DECLARE_INSTRUCTION(StoreVMField)
virtual CompileType* ComputeInitialType() const;
Value* value() const { return inputs_[kValuePos]; }
Value* dest() const { return inputs_[kDestPos]; }
Value* dest() const { return inputs_[kObjectPos]; }
intptr_t offset_in_bytes() const { return offset_in_bytes_; }
const AbstractType& type() const { return type_; }

View file

@ -1306,8 +1306,6 @@ void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label* fail = (deopt != NULL) ? deopt : &fail_label;
const bool ok_is_fall_through = (deopt != NULL);
if (!compiler->is_optimizing() || (field_cid == kIllegalCid)) {
if (!compiler->is_optimizing() && (field_reg == kNoRegister)) {
// Currently we can't have different location summaries for optimized
@ -1492,12 +1490,10 @@ void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
}
if (!ok_is_fall_through) {
__ b(&ok);
}
if (deopt == NULL) {
ASSERT(!compiler->is_optimizing());
__ b(&ok);
__ Bind(fail);
__ ldr(IP, FieldAddress(field_reg, Field::guarded_cid_offset()));
@ -1512,7 +1508,6 @@ void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
} else {
ASSERT(compiler->is_optimizing());
ASSERT(deopt != NULL);
ASSERT(ok_is_fall_through);
// Field guard class has been initialized and is known.
if (field_reg != kNoRegister) {
__ LoadObject(field_reg, Field::ZoneHandle(field().raw()));

View file

@ -1289,8 +1289,6 @@ void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label* fail = (deopt != NULL) ? deopt : &fail_label;
const bool ok_is_fall_through = (deopt != NULL);
if (!compiler->is_optimizing() || (field_cid == kIllegalCid)) {
if (!compiler->is_optimizing() && (field_reg == kNoRegister)) {
// Currently we can't have different location summaries for optimized
@ -1491,12 +1489,9 @@ void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
if (!ok_is_fall_through) {
__ jmp(&ok);
}
if (deopt == NULL) {
ASSERT(!compiler->is_optimizing());
__ jmp(&ok);
__ Bind(fail);
__ cmpl(FieldAddress(field_reg, Field::guarded_cid_offset()),
@ -1511,7 +1506,6 @@ void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
} else {
ASSERT(compiler->is_optimizing());
ASSERT(deopt != NULL);
ASSERT(ok_is_fall_through);
// Field guard class has been initialized and is known.
if (field_reg != kNoRegister) {
__ LoadObject(field_reg, Field::ZoneHandle(field().raw()));

View file

@ -1373,8 +1373,6 @@ void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label* fail = (deopt != NULL) ? deopt : &fail_label;
const bool ok_is_fall_through = (deopt != NULL);
if (!compiler->is_optimizing() || (field_cid == kIllegalCid)) {
if (!compiler->is_optimizing() && (field_reg == kNoRegister)) {
// Currently we can't have different location summaries for optimized
@ -1556,12 +1554,10 @@ void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
}
if (!ok_is_fall_through) {
__ b(&ok);
}
if (deopt == NULL) {
ASSERT(!compiler->is_optimizing());
__ b(&ok);
__ Bind(fail);
__ lw(CMPRES1, FieldAddress(field_reg, Field::guarded_cid_offset()));
@ -1576,7 +1572,6 @@ void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
} else {
ASSERT(compiler->is_optimizing());
ASSERT(deopt != NULL);
ASSERT(ok_is_fall_through);
// Field guard class has been initialized and is known.
if (field_reg != kNoRegister) {
__ LoadObject(field_reg, Field::ZoneHandle(field().raw()));

View file

@ -1188,8 +1188,6 @@ void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label* fail = (deopt != NULL) ? deopt : &fail_label;
const bool ok_is_fall_through = (deopt != NULL);
if (!compiler->is_optimizing() || (field_cid == kIllegalCid)) {
if (!compiler->is_optimizing() && (field_reg == kNoRegister)) {
// Currently we can't have different location summaries for optimized
@ -1387,12 +1385,10 @@ void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
}
if (!ok_is_fall_through) {
__ jmp(&ok);
}
if (deopt == NULL) {
ASSERT(!compiler->is_optimizing());
__ jmp(&ok);
__ Bind(fail);
__ CompareImmediate(FieldAddress(field_reg, Field::guarded_cid_offset()),
@ -1407,7 +1403,6 @@ void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
} else {
ASSERT(compiler->is_optimizing());
ASSERT(deopt != NULL);
ASSERT(ok_is_fall_through);
// Field guard class has been initialized and is known.
if (field_reg != kNoRegister) {
__ LoadObject(field_reg, Field::ZoneHandle(field().raw()), PP);