Lazy deopt without code patching.

Keep a side table that maps a fp back to a deopt pc.

R=fschneider@google.com

Review URL: https://codereview.chromium.org/2380403003 .
This commit is contained in:
Ryan Macnak 2016-09-30 16:53:29 -07:00
parent 90b54ed78b
commit 6cff17c59a
47 changed files with 186 additions and 441 deletions

View file

@ -1597,10 +1597,6 @@ class CodeDeserializationCluster : public DeserializationCluster {
code->ptr()->compile_timestamp_ = 0;
#endif
code->ptr()->state_bits_ = d->Read<int32_t>();
#if !defined(DART_PRECOMPILED_RUNTIME)
code->ptr()->lazy_deopt_return_pc_offset_ = -1;
code->ptr()->lazy_deopt_throw_pc_offset_ = -1;
#endif
}
}
};

View file

@ -1996,15 +1996,10 @@ DEOPT_REASONS(DEOPT_REASON_TO_TEXT)
}
void DeoptimizeAt(const Code& optimized_code, uword pc) {
void DeoptimizeAt(const Code& optimized_code, StackFrame* frame) {
ASSERT(optimized_code.is_optimized());
Thread* thread = Thread::Current();
Zone* zone = thread->zone();
ICData::DeoptReasonId deopt_reason = ICData::kDeoptUnknown;
uint32_t deopt_flags = 0;
const TypedData& deopt_info = TypedData::Handle(zone,
optimized_code.GetDeoptInfoAtPc(pc, &deopt_reason, &deopt_flags));
ASSERT(!deopt_info.IsNull());
const Function& function = Function::Handle(zone, optimized_code.function());
const Error& error =
Error::Handle(zone, Compiler::EnsureUnoptimizedCode(thread, function));
@ -2018,24 +2013,18 @@ void DeoptimizeAt(const Code& optimized_code, uword pc) {
if (function.HasOptimizedCode()) {
function.SwitchToUnoptimizedCode();
}
// Patch call site (lazy deoptimization is quite rare, patching it twice
// is not a performance issue).
uword lazy_deopt_jump_return = optimized_code.GetLazyDeoptReturnPc();
uword lazy_deopt_jump_throw = optimized_code.GetLazyDeoptThrowPc();
#if !defined(TARGET_ARCH_DBC)
ASSERT(lazy_deopt_jump_return != 0);
ASSERT(lazy_deopt_jump_throw != 0);
#endif
#if defined(TARGET_ARCH_DBC)
const Instructions& instrs =
Instructions::Handle(zone, optimized_code.instructions());
{
WritableInstructionsScope writable(instrs.PayloadStart(), instrs.size());
CodePatcher::InsertDeoptimizationCallAt(pc, lazy_deopt_jump_return);
CodePatcher::InsertDeoptimizationCallAt(frame->pc());
if (FLAG_trace_patching) {
const String& name = String::Handle(function.name());
OS::PrintErr(
"InsertDeoptimizationCallAt: 0x%" Px " to 0x%" Px " for %s\n",
pc, lazy_deopt_jump_return, name.ToCString());
"InsertDeoptimizationCallAt: 0x%" Px " for %s\n",
frame->pc(), name.ToCString());
}
const ExceptionHandlers& handlers =
ExceptionHandlers::Handle(zone, optimized_code.exception_handlers());
@ -2043,12 +2032,34 @@ void DeoptimizeAt(const Code& optimized_code, uword pc) {
for (intptr_t i = 0; i < handlers.num_entries(); ++i) {
handlers.GetHandlerInfo(i, &info);
const uword patch_pc = instrs.PayloadStart() + info.handler_pc_offset;
CodePatcher::InsertDeoptimizationCallAt(patch_pc, lazy_deopt_jump_throw);
CodePatcher::InsertDeoptimizationCallAt(patch_pc);
if (FLAG_trace_patching) {
OS::PrintErr(" at handler 0x%" Px "\n", patch_pc);
}
}
}
#else // !DBC
uword lazy_deopt_entry =
StubCode::DeoptimizeLazyFromReturn_entry()->EntryPoint();
if (frame->pc() == lazy_deopt_entry) {
// Deopt already scheduled.
if (FLAG_trace_deoptimization) {
THR_Print("Lazy deopt already scheduled for fp=%" Pp "\n", frame->fp());
}
} else {
uword deopt_pc = frame->pc();
ASSERT(optimized_code.ContainsInstructionAt(deopt_pc));
PendingLazyDeopt pair(frame->fp(), deopt_pc);
thread->isolate()->pending_deopts()->Add(pair);
frame->set_pc(lazy_deopt_entry);
if (FLAG_trace_deoptimization) {
THR_Print("Lazy deopt scheduled for fp=%" Pp ", pc=%" Pp "\n",
frame->fp(), deopt_pc);
}
}
#endif // !DBC
// Mark code as dead (do not GC its embedded objects).
optimized_code.set_is_alive(false);
}
@ -2063,7 +2074,7 @@ void DeoptimizeFunctionsOnStack() {
while (frame != NULL) {
optimized_code = frame->LookupDartCode();
if (optimized_code.is_optimized()) {
DeoptimizeAt(optimized_code, frame->pc());
DeoptimizeAt(optimized_code, frame);
}
frame = iterator.NextFrame();
}
@ -2144,6 +2155,40 @@ DEFINE_LEAF_RUNTIME_ENTRY(intptr_t, DeoptimizeCopyFrame,
is_lazy_deopt ? "lazy-deopt" : "");
}
#if !defined(TARGET_ARCH_DBC)
if (is_lazy_deopt) {
uword deopt_pc = 0;
MallocGrowableArray<PendingLazyDeopt>* pending_deopts =
isolate->pending_deopts();
for (intptr_t i = pending_deopts->length() - 1; i >= 0; i--) {
if ((*pending_deopts)[i].fp() == caller_frame->fp()) {
deopt_pc = (*pending_deopts)[i].pc();
break;
}
}
for (intptr_t i = pending_deopts->length() - 1; i >= 0; i--) {
if ((*pending_deopts)[i].fp() <= caller_frame->fp()) {
pending_deopts->RemoveAt(i);
}
}
if (FLAG_trace_deoptimization) {
THR_Print("Lazy deopt fp=%" Pp " pc=%" Pp "\n",
caller_frame->fp(), deopt_pc);
THR_Print("%" Pd " pending lazy deopts\n",
pending_deopts->length());
}
ASSERT(deopt_pc != 0);
caller_frame->set_pc(deopt_pc);
ASSERT(caller_frame->pc() == deopt_pc);
} else {
if (FLAG_trace_deoptimization) {
THR_Print("Eager deopt fp=%" Pp " pc=%" Pp "\n",
caller_frame->fp(), caller_frame->pc());
}
}
ASSERT(optimized_code.ContainsInstructionAt(caller_frame->pc()));
#endif // !DBC
// Copy the saved registers from the stack.
fpu_register_t* fpu_registers;
intptr_t* cpu_registers;
@ -2204,6 +2249,7 @@ DEFINE_LEAF_RUNTIME_ENTRY(void, DeoptimizeFillFrame, 1, uword last_fp) {
deopt_context->set_dest_frame(caller_frame);
deopt_context->FillDestFrame();
#else
UNREACHABLE();
#endif // !DART_PRECOMPILED_RUNTIME

View file

@ -17,7 +17,7 @@ class Instance;
const char* DeoptReasonToCString(ICData::DeoptReasonId deopt_reason);
void DeoptimizeAt(const Code& optimized_code, uword pc);
void DeoptimizeAt(const Code& optimized_code, StackFrame* frame);
void DeoptimizeFunctionsOnStack();
double DartModulo(double a, double b);

View file

@ -68,7 +68,7 @@ class CodePatcher : public AllStatic {
static intptr_t InstanceCallSizeInBytes();
static void InsertDeoptimizationCallAt(uword start, uword target);
static void InsertDeoptimizationCallAt(uword start);
static void PatchPoolPointerCallAt(uword return_address,
const Code& code,

View file

@ -30,10 +30,8 @@ void CodePatcher::PatchStaticCallAt(uword return_address,
}
void CodePatcher::InsertDeoptimizationCallAt(uword start, uword target) {
// The inserted call should not overlap the lazy deopt jump code.
ASSERT(start + CallPattern::DeoptCallPatternLengthInBytes() <= target);
CallPattern::InsertDeoptCallAt(start, target);
void CodePatcher::InsertDeoptimizationCallAt(uword start) {
UNREACHABLE();
}

View file

@ -70,10 +70,8 @@ void CodePatcher::PatchPoolPointerCallAt(uword return_address,
}
void CodePatcher::InsertDeoptimizationCallAt(uword start, uword target) {
// The inserted call should not overlap the lazy deopt jump code.
ASSERT(start + CallPattern::kDeoptCallLengthInBytes <= target);
CallPattern::InsertDeoptCallAt(start, target);
void CodePatcher::InsertDeoptimizationCallAt(uword start) {
UNREACHABLE();
}

View file

@ -30,9 +30,8 @@ void CodePatcher::PatchStaticCallAt(uword return_address,
}
void CodePatcher::InsertDeoptimizationCallAt(uword start, uword target) {
ASSERT(target == 0); // Always 0 on DBC.
CallPattern::InsertDeoptCallAt(start, target);
void CodePatcher::InsertDeoptimizationCallAt(uword start) {
CallPattern::InsertDeoptCallAt(start);
}

View file

@ -177,13 +177,8 @@ void CodePatcher::PatchStaticCallAt(uword return_address,
}
void CodePatcher::InsertDeoptimizationCallAt(uword start, uword target) {
// The inserted call should not overlap the lazy deopt jump code.
ASSERT(start + CallPattern::pattern_length_in_bytes() <= target);
*reinterpret_cast<uint8_t*>(start) = 0xE8;
CallPattern call(start);
call.SetTargetAddress(target);
CPU::FlushICache(start, CallPattern::pattern_length_in_bytes());
void CodePatcher::InsertDeoptimizationCallAt(uword start) {
UNREACHABLE();
}

View file

@ -29,10 +29,8 @@ void CodePatcher::PatchStaticCallAt(uword return_address,
}
void CodePatcher::InsertDeoptimizationCallAt(uword start, uword target) {
// The inserted call should not overlap the lazy deopt jump code.
ASSERT(start + CallPattern::kDeoptCallLengthInBytes <= target);
CallPattern::InsertDeoptCallAt(start, target);
void CodePatcher::InsertDeoptimizationCallAt(uword start) {
UNREACHABLE();
}

View file

@ -293,13 +293,8 @@ intptr_t CodePatcher::InstanceCallSizeInBytes() {
}
void CodePatcher::InsertDeoptimizationCallAt(uword start, uword target) {
// The inserted call should not overlap the lazy deopt jump code.
ASSERT(start + ShortCallPattern::pattern_length_in_bytes() <= target);
*reinterpret_cast<uint8_t*>(start) = 0xE8;
ShortCallPattern call(start);
call.SetTargetAddress(target);
CPU::FlushICache(start, ShortCallPattern::pattern_length_in_bytes());
void CodePatcher::InsertDeoptimizationCallAt(uword start) {
UNREACHABLE();
}

View file

@ -123,10 +123,12 @@ DeoptContext::DeoptContext(const StackFrame* frame,
if (FLAG_trace_deoptimization || FLAG_trace_deoptimization_verbose) {
THR_Print(
"Deoptimizing (reason %d '%s') at pc %#" Px " '%s' (count %d)\n",
"Deoptimizing (reason %d '%s') at "
"pc=%" Pp " fp=%" Pp " '%s' (count %d)\n",
deopt_reason(),
DeoptReasonToCString(deopt_reason()),
frame->pc(),
frame->fp(),
function.ToFullyQualifiedCString(),
function.deoptimization_counter());
}
@ -343,11 +345,11 @@ void DeoptContext::FillDestFrame() {
if (FLAG_trace_deoptimization_verbose) {
for (intptr_t i = 0; i < frame_size; i++) {
intptr_t* to_addr = GetDestFrameAddressAt(i);
OS::PrintErr("*%" Pd ". [%p] 0x%" Px " [%s]\n",
i,
to_addr,
*to_addr,
deopt_instructions[i + (len - frame_size)]->ToCString());
THR_Print("*%" Pd ". [%p] 0x%" Px " [%s]\n",
i,
to_addr,
*to_addr,
deopt_instructions[i + (len - frame_size)]->ToCString());
}
}
}
@ -398,12 +400,12 @@ intptr_t DeoptContext::MaterializeDeferredObjects() {
intptr_t line, column;
script.GetTokenLocation(token_pos, &line, &column);
String& line_string = String::Handle(script.GetLine(line));
OS::PrintErr(" Function: %s\n", top_function.ToFullyQualifiedCString());
THR_Print(" Function: %s\n", top_function.ToFullyQualifiedCString());
char line_buffer[80];
OS::SNPrint(line_buffer, sizeof(line_buffer), " Line %" Pd ": '%s'",
line, line_string.ToCString());
OS::PrintErr("%s\n", line_buffer);
OS::PrintErr(" Deopt args: %" Pd "\n", deopt_arg_count);
THR_Print("%s\n", line_buffer);
THR_Print(" Deopt args: %" Pd "\n", deopt_arg_count);
}
return deopt_arg_count;

View file

@ -21,6 +21,7 @@
namespace dart {
DECLARE_FLAG(bool, trace_deoptimization);
DEFINE_FLAG(bool, print_stacktrace_at_throw, false,
"Prints a stack trace everytime a throw occurs.");
@ -128,6 +129,7 @@ static void BuildStackTrace(StacktraceBuilder* builder) {
while (frame != NULL) {
if (frame->IsDartFrame()) {
code = frame->LookupDartCode();
ASSERT(code.ContainsInstructionAt(frame->pc()));
offset = Smi::New(frame->pc() - code.PayloadStart());
builder->AddFrame(code, offset);
}
@ -214,6 +216,30 @@ static void JumpToExceptionHandler(Thread* thread,
RawObject* raw_exception = exception_object.raw();
RawObject* raw_stacktrace = stacktrace_object.raw();
#if !defined(TARGET_ARCH_DBC)
MallocGrowableArray<PendingLazyDeopt>* pending_deopts =
thread->isolate()->pending_deopts();
for (intptr_t i = pending_deopts->length() - 1; i >= 0; i--) {
if ((*pending_deopts)[i].fp() == frame_pointer) {
// Frame is scheduled for lazy deopt.
program_counter =
StubCode::DeoptimizeLazyFromThrow_entry()->EntryPoint();
if (FLAG_trace_deoptimization) {
THR_Print("Throwing to frame scheduled for lazy deopt fp=%" Pp "\n",
frame_pointer);
}
break;
}
}
for (intptr_t i = pending_deopts->length() - 1; i >= 0; i--) {
// Leave the mapping at fp itself for use in DeoptimizeCopyFrame.
if ((*pending_deopts)[i].fp() < frame_pointer) {
pending_deopts->RemoveAt(i);
}
}
#endif // !DBC
#if defined(USING_SIMULATOR)
// Unwinding of the C++ frames and destroying of their stack resources is done
// by the simulator, because the target stack_pointer is a simulated stack

View file

@ -225,8 +225,6 @@ FlowGraphCompiler::FlowGraphCompiler(
LookupClass(Symbols::List()))),
parallel_move_resolver_(this),
pending_deoptimization_env_(NULL),
lazy_deopt_return_pc_offset_(Code::kInvalidPc),
lazy_deopt_throw_pc_offset_(Code::kInvalidPc),
deopt_id_to_ic_data_(NULL),
edge_counters_array_(Array::ZoneHandle()),
inlined_code_intervals_(Array::ZoneHandle(Object::empty_array().raw())),
@ -1031,8 +1029,6 @@ void FlowGraphCompiler::FinalizePcDescriptors(const Code& code) {
pc_descriptors_list_->FinalizePcDescriptors(code.PayloadStart()));
if (!is_optimizing_) descriptors.Verify(parsed_function_.function());
code.set_pc_descriptors(descriptors);
code.set_lazy_deopt_return_pc_offset(lazy_deopt_return_pc_offset_);
code.set_lazy_deopt_throw_pc_offset(lazy_deopt_throw_pc_offset_);
}

View file

@ -819,9 +819,6 @@ class FlowGraphCompiler : public ValueObject {
// In future AddDeoptStub should be moved out of the instruction template.
Environment* pending_deoptimization_env_;
intptr_t lazy_deopt_return_pc_offset_;
intptr_t lazy_deopt_throw_pc_offset_;
ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data_;
Array& edge_counters_array_;

View file

@ -1124,21 +1124,6 @@ void FlowGraphCompiler::CompileGraph() {
__ bkpt(0);
ASSERT(assembler()->constant_pool_allowed());
GenerateDeferredCode();
BeginCodeSourceRange();
if (is_optimizing() && !FLAG_precompiled_mode) {
// Leave enough space for patching in case of lazy deoptimization.
for (intptr_t i = 0;
i < CallPattern::DeoptCallPatternLengthInInstructions();
++i) {
__ nop();
}
lazy_deopt_return_pc_offset_ = assembler()->CodeSize();
__ Branch(*StubCode::DeoptimizeLazyFromReturn_entry());
lazy_deopt_throw_pc_offset_ = assembler()->CodeSize();
__ Branch(*StubCode::DeoptimizeLazyFromThrow_entry());
}
EndCodeSourceRange(TokenPosition::kDartCodeEpilogue);
}

View file

@ -1118,21 +1118,6 @@ void FlowGraphCompiler::CompileGraph() {
__ brk(0);
ASSERT(assembler()->constant_pool_allowed());
GenerateDeferredCode();
BeginCodeSourceRange();
if (is_optimizing() && !FLAG_precompiled_mode) {
// Leave enough space for patching in case of lazy deoptimization.
for (intptr_t i = 0;
i < CallPattern::kDeoptCallLengthInInstructions;
++i) {
__ orr(R0, ZR, Operand(R0)); // nop
}
lazy_deopt_return_pc_offset_ = assembler()->CodeSize();
__ BranchPatchable(*StubCode::DeoptimizeLazyFromReturn_entry());
lazy_deopt_throw_pc_offset_ = assembler()->CodeSize();
__ BranchPatchable(*StubCode::DeoptimizeLazyFromThrow_entry());
}
EndCodeSourceRange(TokenPosition::kDartCodeEpilogue);
}

View file

@ -1135,17 +1135,6 @@ void FlowGraphCompiler::CompileGraph() {
__ int3();
GenerateDeferredCode();
BeginCodeSourceRange();
if (is_optimizing() && !FLAG_precompiled_mode) {
// Leave enough space for patching in case of lazy deoptimization.
__ nop(CallPattern::pattern_length_in_bytes());
lazy_deopt_return_pc_offset_ = assembler()->CodeSize();
__ Jmp(*StubCode::DeoptimizeLazyFromReturn_entry());
lazy_deopt_throw_pc_offset_ = assembler()->CodeSize();
__ Jmp(*StubCode::DeoptimizeLazyFromThrow_entry());
}
EndCodeSourceRange(TokenPosition::kDartCodeEpilogue);
}

View file

@ -1138,21 +1138,6 @@ void FlowGraphCompiler::CompileGraph() {
__ break_(0);
GenerateDeferredCode();
BeginCodeSourceRange();
if (is_optimizing() && !FLAG_precompiled_mode) {
// Leave enough space for patching in case of lazy deoptimization.
for (intptr_t i = 0;
i < CallPattern::kDeoptCallLengthInInstructions;
++i) {
__ nop();
}
lazy_deopt_return_pc_offset_ = assembler()->CodeSize();
__ Branch(*StubCode::DeoptimizeLazyFromReturn_entry());
lazy_deopt_throw_pc_offset_ = assembler()->CodeSize();
__ Branch(*StubCode::DeoptimizeLazyFromThrow_entry());
}
EndCodeSourceRange(TokenPosition::kDartCodeEpilogue);
}

View file

@ -1135,19 +1135,6 @@ void FlowGraphCompiler::CompileGraph() {
__ int3();
ASSERT(assembler()->constant_pool_allowed());
GenerateDeferredCode();
// Emit function patching code. This will be swapped with the first 13 bytes
// at entry point.
BeginCodeSourceRange();
if (is_optimizing() && !FLAG_precompiled_mode) {
// Leave enough space for patching in case of lazy deoptimization.
__ nop(ShortCallPattern::pattern_length_in_bytes());
lazy_deopt_return_pc_offset_ = assembler()->CodeSize();
__ Jmp(*StubCode::DeoptimizeLazyFromReturn_entry(), PP);
lazy_deopt_throw_pc_offset_ = assembler()->CodeSize();
__ Jmp(*StubCode::DeoptimizeLazyFromThrow_entry(), PP);
}
EndCodeSourceRange(TokenPosition::kDartCodeEpilogue);
}

View file

@ -32,21 +32,6 @@ CallPattern::CallPattern(uword pc, const Code& code)
}
int CallPattern::DeoptCallPatternLengthInInstructions() {
const ARMVersion version = TargetCPUFeatures::arm_version();
if ((version == ARMv5TE) || (version == ARMv6)) {
return 5;
} else {
ASSERT(version == ARMv7);
return 3;
}
}
int CallPattern::DeoptCallPatternLengthInBytes() {
return DeoptCallPatternLengthInInstructions() * Instr::kInstrSize;
}
NativeCallPattern::NativeCallPattern(uword pc, const Code& code)
: object_pool_(ObjectPool::Handle(code.GetObjectPool())),
end_(pc),
@ -264,49 +249,6 @@ void CallPattern::SetTargetCode(const Code& target_code) const {
}
void CallPattern::InsertDeoptCallAt(uword pc, uword target_address) {
const ARMVersion version = TargetCPUFeatures::arm_version();
if ((version == ARMv5TE) || (version == ARMv6)) {
const uint32_t byte0 = (target_address & 0x000000ff);
const uint32_t byte1 = (target_address & 0x0000ff00) >> 8;
const uint32_t byte2 = (target_address & 0x00ff0000) >> 16;
const uint32_t byte3 = (target_address & 0xff000000) >> 24;
const uword mov_ip = 0xe3a0c400 | byte3; // mov ip, (byte3 rot 4)
const uword or1_ip = 0xe38cc800 | byte2; // orr ip, ip, (byte2 rot 8)
const uword or2_ip = 0xe38ccc00 | byte1; // orr ip, ip, (byte1 rot 12)
const uword or3_ip = 0xe38cc000 | byte0; // orr ip, ip, byte0
const uword blx_ip = 0xe12fff3c;
*reinterpret_cast<uword*>(pc + (0 * Instr::kInstrSize)) = mov_ip;
*reinterpret_cast<uword*>(pc + (1 * Instr::kInstrSize)) = or1_ip;
*reinterpret_cast<uword*>(pc + (2 * Instr::kInstrSize)) = or2_ip;
*reinterpret_cast<uword*>(pc + (3 * Instr::kInstrSize)) = or3_ip;
*reinterpret_cast<uword*>(pc + (4 * Instr::kInstrSize)) = blx_ip;
ASSERT(DeoptCallPatternLengthInBytes() == 5 * Instr::kInstrSize);
CPU::FlushICache(pc, DeoptCallPatternLengthInBytes());
} else {
ASSERT(version == ARMv7);
const uint16_t target_lo = target_address & 0xffff;
const uint16_t target_hi = target_address >> 16;
const uword movw_ip =
0xe300c000 | ((target_lo >> 12) << 16) | (target_lo & 0xfff);
const uword movt_ip =
0xe340c000 | ((target_hi >> 12) << 16) | (target_hi & 0xfff);
const uword blx_ip = 0xe12fff3c;
*reinterpret_cast<uword*>(pc + (0 * Instr::kInstrSize)) = movw_ip;
*reinterpret_cast<uword*>(pc + (1 * Instr::kInstrSize)) = movt_ip;
*reinterpret_cast<uword*>(pc + (2 * Instr::kInstrSize)) = blx_ip;
ASSERT(DeoptCallPatternLengthInBytes() == 3 * Instr::kInstrSize);
CPU::FlushICache(pc, DeoptCallPatternLengthInBytes());
}
}
SwitchableCallPattern::SwitchableCallPattern(uword pc, const Code& code)
: object_pool_(ObjectPool::Handle(code.GetObjectPool())),
data_pool_index_(-1),

View file

@ -57,13 +57,6 @@ class CallPattern : public ValueObject {
RawCode* TargetCode() const;
void SetTargetCode(const Code& code) const;
// This constant length is only valid for inserted call patterns used for
// lazy deoptimization. Regular call pattern may vary in length.
static int DeoptCallPatternLengthInBytes();
static int DeoptCallPatternLengthInInstructions();
static void InsertDeoptCallAt(uword pc, uword target_address);
private:
const ObjectPool& object_pool_;

View file

@ -333,30 +333,6 @@ void CallPattern::SetTargetCode(const Code& target) const {
}
void CallPattern::InsertDeoptCallAt(uword pc, uword target_address) {
Instr* movz0 = Instr::At(pc + (0 * Instr::kInstrSize));
Instr* movk1 = Instr::At(pc + (1 * Instr::kInstrSize));
Instr* movk2 = Instr::At(pc + (2 * Instr::kInstrSize));
Instr* movk3 = Instr::At(pc + (3 * Instr::kInstrSize));
Instr* blr = Instr::At(pc + (4 * Instr::kInstrSize));
const uint32_t w0 = Utils::Low32Bits(target_address);
const uint32_t w1 = Utils::High32Bits(target_address);
const uint16_t h0 = Utils::Low16Bits(w0);
const uint16_t h1 = Utils::High16Bits(w0);
const uint16_t h2 = Utils::Low16Bits(w1);
const uint16_t h3 = Utils::High16Bits(w1);
movz0->SetMoveWideBits(MOVZ, IP0, h0, 0, kDoubleWord);
movk1->SetMoveWideBits(MOVK, IP0, h1, 1, kDoubleWord);
movk2->SetMoveWideBits(MOVK, IP0, h2, 2, kDoubleWord);
movk3->SetMoveWideBits(MOVK, IP0, h3, 3, kDoubleWord);
blr->SetUnconditionalBranchRegBits(BLR, IP0);
ASSERT(kDeoptCallLengthInBytes == 5 * Instr::kInstrSize);
CPU::FlushICache(pc, kDeoptCallLengthInBytes);
}
SwitchableCallPattern::SwitchableCallPattern(uword pc, const Code& code)
: object_pool_(ObjectPool::Handle(code.GetObjectPool())),
data_pool_index_(-1),

View file

@ -64,14 +64,6 @@ class CallPattern : public ValueObject {
RawCode* TargetCode() const;
void SetTargetCode(const Code& target) const;
// This constant length is only valid for inserted call patterns used for
// lazy deoptimization. Regular call pattern may vary in length.
static const int kDeoptCallLengthInInstructions = 5;
static const int kDeoptCallLengthInBytes =
kDeoptCallLengthInInstructions * Instr::kInstrSize;
static void InsertDeoptCallAt(uword pc, uword target_address);
private:
const ObjectPool& object_pool_;

View file

@ -67,18 +67,6 @@ CallPattern::CallPattern(uword pc, const Code& code)
}
int CallPattern::DeoptCallPatternLengthInInstructions() {
UNIMPLEMENTED();
return 0;
}
int CallPattern::DeoptCallPatternLengthInBytes() {
UNIMPLEMENTED();
return 0;
}
NativeCallPattern::NativeCallPattern(uword pc, const Code& code)
: object_pool_(ObjectPool::Handle(code.GetObjectPool())),
end_(pc),
@ -181,7 +169,7 @@ void CallPattern::SetTargetCode(const Code& target_code) const {
}
void CallPattern::InsertDeoptCallAt(uword pc, uword target_address) {
void CallPattern::InsertDeoptCallAt(uword pc) {
const uint8_t argc = Bytecode::IsCallOpcode(Bytecode::At(pc)) ?
Bytecode::DecodeArgc(Bytecode::At(pc)) : 0;
*reinterpret_cast<Instr*>(pc) = Bytecode::Encode(Bytecode::kDeopt, argc, 0);

View file

@ -57,12 +57,7 @@ class CallPattern : public ValueObject {
RawCode* TargetCode() const;
void SetTargetCode(const Code& code) const;
// This constant length is only valid for inserted call patterns used for
// lazy deoptimization. Regular call pattern may vary in length.
static int DeoptCallPatternLengthInBytes();
static int DeoptCallPatternLengthInInstructions();
static void InsertDeoptCallAt(uword pc, uword target_address);
static void InsertDeoptCallAt(uword pc);
private:
const ObjectPool& object_pool_;

View file

@ -217,24 +217,6 @@ void NativeCallPattern::set_native_function(NativeFunction func) const {
}
void CallPattern::InsertDeoptCallAt(uword pc, uword target_address) {
Instr* lui = Instr::At(pc + (0 * Instr::kInstrSize));
Instr* ori = Instr::At(pc + (1 * Instr::kInstrSize));
Instr* jr = Instr::At(pc + (2 * Instr::kInstrSize));
Instr* nop = Instr::At(pc + (3 * Instr::kInstrSize));
uint16_t target_lo = target_address & 0xffff;
uint16_t target_hi = target_address >> 16;
lui->SetImmInstrBits(LUI, ZR, T9, target_hi);
ori->SetImmInstrBits(ORI, T9, T9, target_lo);
jr->SetSpecialInstrBits(JALR, T9, ZR, RA);
nop->SetInstructionBits(Instr::kNopInstruction);
ASSERT(kDeoptCallLengthInBytes == 4 * Instr::kInstrSize);
CPU::FlushICache(pc, kDeoptCallLengthInBytes);
}
SwitchableCallPattern::SwitchableCallPattern(uword pc, const Code& code)
: object_pool_(ObjectPool::Handle(code.GetObjectPool())),
data_pool_index_(-1),

View file

@ -57,12 +57,6 @@ class CallPattern : public ValueObject {
RawCode* TargetCode() const;
void SetTargetCode(const Code& target) const;
static const int kDeoptCallLengthInInstructions = 4;
static const int kDeoptCallLengthInBytes =
kDeoptCallLengthInInstructions * Instr::kInstrSize;
static void InsertDeoptCallAt(uword pc, uword target_address);
private:
const ObjectPool& object_pool_;

View file

@ -12,13 +12,6 @@
namespace dart {
void ShortCallPattern::SetTargetAddress(uword target) const {
ASSERT(IsValid());
*reinterpret_cast<uint32_t*>(start() + 1) = target - start() - kLengthInBytes;
CPU::FlushICache(start() + 1, kWordSize);
}
bool DecodeLoadObjectFromPoolOrThread(uword pc,
const Code& code,
Object* obj) {

View file

@ -66,25 +66,6 @@ template<class P> class InstructionPattern : public ValueObject {
};
// 5 byte call instruction.
class ShortCallPattern : public InstructionPattern<ShortCallPattern> {
public:
explicit ShortCallPattern(uword pc) : InstructionPattern(pc) {}
void SetTargetAddress(uword new_target) const;
static int pattern_length_in_bytes() { return kLengthInBytes; }
static const int* pattern() {
static const int kCallPattern[kLengthInBytes] = {0xE8, -1, -1, -1, -1};
return kCallPattern;
}
private:
static const int kLengthInBytes = 5;
DISALLOW_COPY_AND_ASSIGN(ShortCallPattern);
};
class ReturnPattern : public InstructionPattern<ReturnPattern> {
public:
explicit ReturnPattern(uword pc) : InstructionPattern(pc) {}

View file

@ -2837,14 +2837,6 @@ LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Ensure space for patching return sites for lazy deopt.
if (!FLAG_precompiled_mode && compiler->is_optimizing()) {
for (intptr_t i = 0;
i < CallPattern::DeoptCallPatternLengthInInstructions();
++i) {
__ nop();
}
}
__ Bind(compiler->GetJumpLabel(this));
compiler->AddExceptionHandler(catch_try_index(),
try_index(),

View file

@ -2554,14 +2554,6 @@ LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Ensure space for patching return sites for lazy deopt.
if (!FLAG_precompiled_mode && compiler->is_optimizing()) {
for (intptr_t i = 0;
i < CallPattern::kDeoptCallLengthInInstructions;
++i) {
__ orr(R0, ZR, Operand(R0)); // nop
}
}
__ Bind(compiler->GetJumpLabel(this));
compiler->AddExceptionHandler(catch_try_index(),
try_index(),

View file

@ -2548,10 +2548,6 @@ LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Ensure space for patching return sites for lazy deopt.
if (!FLAG_precompiled_mode && compiler->is_optimizing()) {
__ nop(CallPattern::pattern_length_in_bytes());
}
__ Bind(compiler->GetJumpLabel(this));
compiler->AddExceptionHandler(catch_try_index(),
try_index(),

View file

@ -2668,14 +2668,6 @@ LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Ensure space for patching return sites for lazy deopt.
if (!FLAG_precompiled_mode && compiler->is_optimizing()) {
for (intptr_t i = 0;
i < CallPattern::kDeoptCallLengthInInstructions;
++i) {
__ nop();
}
}
__ Bind(compiler->GetJumpLabel(this));
compiler->AddExceptionHandler(catch_try_index(),
try_index(),

View file

@ -2564,10 +2564,6 @@ LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Ensure space for patching return sites for lazy deopt.
if (!FLAG_precompiled_mode && compiler->is_optimizing()) {
__ nop(ShortCallPattern::pattern_length_in_bytes());
}
__ Bind(compiler->GetJumpLabel(this));
compiler->AddExceptionHandler(catch_try_index(),
try_index(),

View file

@ -813,6 +813,7 @@ Isolate::Isolate(const Dart_IsolateFlags& api_flags)
gc_prologue_callback_(NULL),
gc_epilogue_callback_(NULL),
defer_finalization_count_(0),
pending_deopts_(new MallocGrowableArray<PendingLazyDeopt>),
deopt_context_(NULL),
is_service_isolate_(false),
stacktrace_(NULL),
@ -878,6 +879,8 @@ Isolate::~Isolate() {
constant_canonicalization_mutex_ = NULL;
delete megamorphic_lookup_mutex_;
megamorphic_lookup_mutex_ = NULL;
delete pending_deopts_;
pending_deopts_ = NULL;
delete message_handler_;
message_handler_ = NULL; // Fail fast if we send messages to a dead isolate.
ASSERT(deopt_context_ == NULL); // No deopt in progress when isolate deleted.

View file

@ -19,6 +19,7 @@
#include "vm/os_thread.h"
#include "vm/timer.h"
#include "vm/token_position.h"
#include "vm/growable_array.h"
namespace dart {
@ -70,6 +71,18 @@ class ThreadRegistry;
class UserTag;
class PendingLazyDeopt {
public:
PendingLazyDeopt(uword fp, uword pc) : fp_(fp), pc_(pc) { }
uword fp() { return fp_; }
uword pc() { return pc_; }
private:
uword fp_;
uword pc_;
};
class IsolateVisitor {
public:
IsolateVisitor() {}
@ -390,6 +403,9 @@ class Isolate : public BaseIsolate {
return object_id_ring_;
}
MallocGrowableArray<PendingLazyDeopt>* pending_deopts() {
return pending_deopts_;
}
bool IsDeoptimizing() const { return deopt_context_ != NULL; }
DeoptContext* deopt_context() const { return deopt_context_; }
void set_deopt_context(DeoptContext* value) {
@ -740,6 +756,7 @@ class Isolate : public BaseIsolate {
Dart_GcPrologueCallback gc_prologue_callback_;
Dart_GcEpilogueCallback gc_epilogue_callback_;
intptr_t defer_finalization_count_;
MallocGrowableArray<PendingLazyDeopt>* pending_deopts_;
DeoptContext* deopt_context_;
bool is_service_isolate_;

View file

@ -14229,8 +14229,6 @@ RawCode* Code::New(intptr_t pointer_offsets_length) {
result.set_is_alive(false);
result.set_comments(Comments::New(0));
result.set_compile_timestamp(0);
result.set_lazy_deopt_return_pc_offset(kInvalidPc);
result.set_lazy_deopt_throw_pc_offset(kInvalidPc);
result.set_pc_descriptors(Object::empty_descriptors());
}
return result.raw();
@ -14544,18 +14542,6 @@ void Code::SetActiveInstructions(RawInstructions* instructions) const {
}
uword Code::GetLazyDeoptReturnPc() const {
return (lazy_deopt_return_pc_offset() != kInvalidPc)
? PayloadStart() + lazy_deopt_return_pc_offset() : 0;
}
uword Code::GetLazyDeoptThrowPc() const {
return (lazy_deopt_throw_pc_offset() != kInvalidPc)
? PayloadStart() + lazy_deopt_throw_pc_offset() : 0;
}
RawStackmap* Code::GetStackmap(
uint32_t pc_offset, Array* maps, Stackmap* map) const {
// This code is used during iterating frames during a GC and hence it

View file

@ -4918,13 +4918,6 @@ class Code : public Object {
}
TokenPosition GetTokenIndexOfPC(uword pc) const;
enum {
kInvalidPc = -1
};
uword GetLazyDeoptReturnPc() const;
uword GetLazyDeoptThrowPc() const;
// Find pc, return 0 if not found.
uword GetPcForDeoptId(intptr_t deopt_id, RawPcDescriptors::Kind kind) const;
intptr_t GetDeoptIdForOsr(uword pc) const;
@ -4940,35 +4933,6 @@ class Code : public Object {
#endif
}
intptr_t lazy_deopt_return_pc_offset() const {
#if defined(DART_PRECOMPILED_RUNTIME)
return 0;
#else
return raw_ptr()->lazy_deopt_return_pc_offset_;
#endif
}
void set_lazy_deopt_return_pc_offset(intptr_t pc) const {
#if defined(DART_PRECOMPILED_RUNTIME)
UNREACHABLE();
#else
StoreNonPointer(&raw_ptr()->lazy_deopt_return_pc_offset_, pc);
#endif
}
intptr_t lazy_deopt_throw_pc_offset() const {
#if defined(DART_PRECOMPILED_RUNTIME)
return 0;
#else
return raw_ptr()->lazy_deopt_throw_pc_offset_;
#endif
}
void set_lazy_deopt_throw_pc_offset(intptr_t pc) const {
#if defined(DART_PRECOMPILED_RUNTIME)
UNREACHABLE();
#else
StoreNonPointer(&raw_ptr()->lazy_deopt_throw_pc_offset_, pc);
#endif
}
bool IsAllocationStubCode() const;
bool IsStubCode() const;
bool IsFunctionCode() const;

View file

@ -1165,10 +1165,6 @@ class RawCode : public RawObject {
// Alive: If true, the embedded object pointers will be visited during GC.
int32_t state_bits_;
// PC offsets for code patching.
NOT_IN_PRECOMPILED(int32_t lazy_deopt_return_pc_offset_);
NOT_IN_PRECOMPILED(int32_t lazy_deopt_throw_pc_offset_);
// Variable length data follows here.
int32_t* data() { OPEN_ARRAY_START(int32_t, int32_t); }
const int32_t* data() const { OPEN_ARRAY_START(int32_t, int32_t); }

View file

@ -53,6 +53,7 @@ class StackFrame : public ValueObject {
void set_pc(uword value) {
*reinterpret_cast<uword*>(sp() + (kSavedPcSlotFromSp * kWordSize)) = value;
pc_ = value;
}
void set_pc_marker(RawCode* code) {
@ -110,8 +111,21 @@ class StackFrame : public ValueObject {
}
uword GetCallerPc() const {
return *(reinterpret_cast<uword*>(
uword raw_pc = *(reinterpret_cast<uword*>(
fp() + (kSavedCallerPcSlotFromFp * kWordSize)));
ASSERT(raw_pc != StubCode::DeoptimizeLazyFromThrow_entry()->EntryPoint());
if (raw_pc == StubCode::DeoptimizeLazyFromReturn_entry()->EntryPoint()) {
uword fp = GetCallerFp();
MallocGrowableArray<PendingLazyDeopt>* pending_deopts =
isolate()->pending_deopts();
for (intptr_t i = 0; i < pending_deopts->length(); i++) {
if ((*pending_deopts)[i].fp() == fp) {
return (*pending_deopts)[i].pc();
}
}
UNREACHABLE();
}
return raw_pc;
}
uword fp_;

View file

@ -536,29 +536,27 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
// LR: return address + call-instruction-size
// R0: result, must be preserved
void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ AddImmediate(LR, -CallPattern::DeoptCallPatternLengthInBytes());
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(IP, 0xf1f1f1f1);
__ Push(IP);
// Return address for "call" to deopt stub.
__ LoadImmediate(LR, 0xe1e1e1e1);
__ ldr(CODE_REG, Address(THR, Thread::lazy_deopt_from_return_stub_offset()));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
}
// LR: return address + call-instruction-size
// R0: exception, must be preserved
// R1: stacktrace, must be preserved
void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ AddImmediate(LR, -CallPattern::DeoptCallPatternLengthInBytes());
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(IP, 0xf1f1f1f1);
__ Push(IP);
// Return address for "call" to deopt stub.
__ LoadImmediate(LR, 0xe1e1e1e1);
__ ldr(CODE_REG, Address(THR, Thread::lazy_deopt_from_throw_stub_offset()));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
}

View file

@ -557,29 +557,27 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
// LR: return address + call-instruction-size
// R0: result, must be preserved
void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ AddImmediate(LR, LR, -CallPattern::kDeoptCallLengthInBytes);
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, 0xf1f1f1f1);
__ Push(TMP);
// Return address for "call" to deopt stub.
__ LoadImmediate(LR, 0xe1e1e1e1);
__ ldr(CODE_REG, Address(THR, Thread::lazy_deopt_from_return_stub_offset()));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
}
// LR: return address + call-instruction-size
// R0: exception, must be preserved
// R1: stacktrace, must be preserved
void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ AddImmediate(LR, LR, -CallPattern::kDeoptCallLengthInBytes);
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, 0xf1f1f1f1);
__ Push(TMP);
// Return address for "call" to deopt stub.
__ LoadImmediate(LR, 0xe1e1e1e1);
__ ldr(CODE_REG, Address(THR, Thread::lazy_deopt_from_throw_stub_offset()));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
}

View file

@ -461,27 +461,19 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
// TOS: return address + call-instruction-size (5 bytes).
// EAX: result, must be preserved
void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ popl(EBX);
__ subl(EBX, Immediate(CallPattern::pattern_length_in_bytes()));
__ pushl(EBX);
// Return address for "call" to deopt stub.
__ pushl(Immediate(0xe1e1e1e1));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
}
// TOS: return address + call-instruction-size (5 bytes).
// EAX: exception, must be preserved
// EDX: stacktrace, must be preserved
void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ popl(EBX);
__ subl(EBX, Immediate(CallPattern::pattern_length_in_bytes()));
__ pushl(EBX);
// Return address for "call" to deopt stub.
__ pushl(Immediate(0xe1e1e1e1));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
}

View file

@ -547,29 +547,27 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ Ret();
}
// RA: return address + call-instruction-size
// V0: result, must be preserved
void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ AddImmediate(RA, -CallPattern::kDeoptCallLengthInBytes);
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, 0xf1f1f1f1);
__ Push(TMP);
// Return address for "call" to deopt stub.
__ LoadImmediate(RA, 0xe1e1e1e1);
__ lw(CODE_REG, Address(THR, Thread::lazy_deopt_from_return_stub_offset()));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
}
// RA: return address + call-instruction-size
// V0: exception, must be preserved
// V1: stacktrace, must be preserved
void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ AddImmediate(RA, -CallPattern::kDeoptCallLengthInBytes);
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, 0xf1f1f1f1);
__ Push(TMP);
// Return address for "call" to deopt stub.
__ LoadImmediate(RA, 0xe1e1e1e1);
__ lw(CODE_REG, Address(THR, Thread::lazy_deopt_from_throw_stub_offset()));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
}

View file

@ -497,31 +497,25 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
// TOS: return address + call-instruction-size (5 bytes).
// RAX: result, must be preserved
void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ popq(RBX);
__ subq(RBX, Immediate(ShortCallPattern::pattern_length_in_bytes()));
// Push zap value instead of CODE_REG for lazy deopt.
__ pushq(Immediate(0xf1f1f1f1));
__ pushq(RBX);
// Return address for "call" to deopt stub.
__ pushq(Immediate(0xe1e1e1e1));
__ movq(CODE_REG, Address(THR, Thread::lazy_deopt_from_return_stub_offset()));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
}
// TOS: return address + call-instruction-size (5 bytes).
// RAX: exception, must be preserved
// RDX: stacktrace, must be preserved
void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ popq(RBX);
__ subq(RBX, Immediate(ShortCallPattern::pattern_length_in_bytes()));
// Push zap value instead of CODE_REG for lazy deopt.
__ pushq(Immediate(0xf1f1f1f1));
__ pushq(RBX);
// Return address for "call" to deopt stub.
__ pushq(Immediate(0xe1e1e1e1));
__ movq(CODE_REG, Address(THR, Thread::lazy_deopt_from_throw_stub_offset()));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
}

View file

@ -92,6 +92,10 @@ class Zone;
StubCode::MonomorphicMiss_entry()->code(), NULL) \
V(RawCode*, ic_lookup_through_code_stub_, \
StubCode::ICCallThroughCode_entry()->code(), NULL) \
V(RawCode*, lazy_deopt_from_return_stub_, \
StubCode::DeoptimizeLazyFromReturn_entry()->code(), NULL) \
V(RawCode*, lazy_deopt_from_throw_stub_, \
StubCode::DeoptimizeLazyFromThrow_entry()->code(), NULL) \
#endif

View file

@ -76,7 +76,7 @@ void WeakCodeReferences::DisableCode() {
code = frame->LookupDartCode();
if (IsOptimizedCode(code_objects, code)) {
ReportDeoptimization(code);
DeoptimizeAt(code, frame->pc());
DeoptimizeAt(code, frame);
}
frame = iterator.NextFrame();
}