Fix a throw returning to a frame marked for lazy deopt that captures the stacktrace.

A lazy deopt via a return needs to preserve one value (the result), but via a throw needs to preserve two values (the exception and stacktrace).

Fixes #27446.

R=fschneider@google.com

Review URL: https://codereview.chromium.org/2374273002 .
This commit is contained in:
Ryan Macnak 2016-09-28 16:51:45 -07:00
parent 82ec8d6c9e
commit e1a252a017
21 changed files with 389 additions and 90 deletions

View file

@ -1598,7 +1598,8 @@ class CodeDeserializationCluster : public DeserializationCluster {
#endif
code->ptr()->state_bits_ = d->Read<int32_t>();
#if !defined(DART_PRECOMPILED_RUNTIME)
code->ptr()->lazy_deopt_pc_offset_ = -1;
code->ptr()->lazy_deopt_return_pc_offset_ = -1;
code->ptr()->lazy_deopt_throw_pc_offset_ = -1;
#endif
}
}

View file

@ -2020,20 +2020,22 @@ void DeoptimizeAt(const Code& optimized_code, uword pc) {
}
// Patch call site (lazy deoptimization is quite rare, patching it twice
// is not a performance issue).
uword lazy_deopt_jump = optimized_code.GetLazyDeoptPc();
uword lazy_deopt_jump_return = optimized_code.GetLazyDeoptReturnPc();
uword lazy_deopt_jump_throw = optimized_code.GetLazyDeoptThrowPc();
#if !defined(TARGET_ARCH_DBC)
ASSERT(lazy_deopt_jump != 0);
ASSERT(lazy_deopt_jump_return != 0);
ASSERT(lazy_deopt_jump_throw != 0);
#endif
const Instructions& instrs =
Instructions::Handle(zone, optimized_code.instructions());
{
WritableInstructionsScope writable(instrs.PayloadStart(), instrs.size());
CodePatcher::InsertDeoptimizationCallAt(pc, lazy_deopt_jump);
CodePatcher::InsertDeoptimizationCallAt(pc, lazy_deopt_jump_return);
if (FLAG_trace_patching) {
const String& name = String::Handle(function.name());
OS::PrintErr(
"InsertDeoptimizationCallAt: 0x%" Px " to 0x%" Px " for %s\n",
pc, lazy_deopt_jump, name.ToCString());
pc, lazy_deopt_jump_return, name.ToCString());
}
const ExceptionHandlers& handlers =
ExceptionHandlers::Handle(zone, optimized_code.exception_handlers());
@ -2041,7 +2043,7 @@ void DeoptimizeAt(const Code& optimized_code, uword pc) {
for (intptr_t i = 0; i < handlers.num_entries(); ++i) {
handlers.GetHandlerInfo(i, &info);
const uword patch_pc = instrs.PayloadStart() + info.handler_pc_offset;
CodePatcher::InsertDeoptimizationCallAt(patch_pc, lazy_deopt_jump);
CodePatcher::InsertDeoptimizationCallAt(patch_pc, lazy_deopt_jump_throw);
if (FLAG_trace_patching) {
OS::PrintErr(" at handler 0x%" Px "\n", patch_pc);
}

View file

@ -697,7 +697,7 @@ class DeoptPcMarkerInstr : public DeoptInstr {
function ^= deopt_context->ObjectAt(object_table_index_);
if (function.IsNull()) {
*reinterpret_cast<RawObject**>(dest_addr) = deopt_context->is_lazy_deopt()
? StubCode::DeoptimizeLazy_entry()->code()
? StubCode::DeoptimizeLazyFromReturn_entry()->code()
: StubCode::Deoptimize_entry()->code();
return;
}

View file

@ -225,7 +225,8 @@ FlowGraphCompiler::FlowGraphCompiler(
LookupClass(Symbols::List()))),
parallel_move_resolver_(this),
pending_deoptimization_env_(NULL),
lazy_deopt_pc_offset_(Code::kInvalidPc),
lazy_deopt_return_pc_offset_(Code::kInvalidPc),
lazy_deopt_throw_pc_offset_(Code::kInvalidPc),
deopt_id_to_ic_data_(NULL),
edge_counters_array_(Array::ZoneHandle()),
inlined_code_intervals_(Array::ZoneHandle(Object::empty_array().raw())),
@ -1030,7 +1031,8 @@ void FlowGraphCompiler::FinalizePcDescriptors(const Code& code) {
pc_descriptors_list_->FinalizePcDescriptors(code.PayloadStart()));
if (!is_optimizing_) descriptors.Verify(parsed_function_.function());
code.set_pc_descriptors(descriptors);
code.set_lazy_deopt_pc_offset(lazy_deopt_pc_offset_);
code.set_lazy_deopt_return_pc_offset(lazy_deopt_return_pc_offset_);
code.set_lazy_deopt_throw_pc_offset(lazy_deopt_throw_pc_offset_);
}

View file

@ -819,7 +819,8 @@ class FlowGraphCompiler : public ValueObject {
// In future AddDeoptStub should be moved out of the instruction template.
Environment* pending_deoptimization_env_;
intptr_t lazy_deopt_pc_offset_;
intptr_t lazy_deopt_return_pc_offset_;
intptr_t lazy_deopt_throw_pc_offset_;
ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data_;

View file

@ -1127,15 +1127,16 @@ void FlowGraphCompiler::CompileGraph() {
BeginCodeSourceRange();
if (is_optimizing() && !FLAG_precompiled_mode) {
// Leave enough space for patching in case of lazy deoptimization from
// deferred code.
// Leave enough space for patching in case of lazy deoptimization.
for (intptr_t i = 0;
i < CallPattern::DeoptCallPatternLengthInInstructions();
++i) {
__ nop();
}
lazy_deopt_pc_offset_ = assembler()->CodeSize();
__ Branch(*StubCode::DeoptimizeLazy_entry());
lazy_deopt_return_pc_offset_ = assembler()->CodeSize();
__ Branch(*StubCode::DeoptimizeLazyFromReturn_entry());
lazy_deopt_throw_pc_offset_ = assembler()->CodeSize();
__ Branch(*StubCode::DeoptimizeLazyFromThrow_entry());
}
EndCodeSourceRange(TokenPosition::kDartCodeEpilogue);
}

View file

@ -1121,15 +1121,16 @@ void FlowGraphCompiler::CompileGraph() {
BeginCodeSourceRange();
if (is_optimizing() && !FLAG_precompiled_mode) {
// Leave enough space for patching in case of lazy deoptimization from
// deferred code.
// Leave enough space for patching in case of lazy deoptimization.
for (intptr_t i = 0;
i < CallPattern::kDeoptCallLengthInInstructions;
++i) {
__ orr(R0, ZR, Operand(R0)); // nop
}
lazy_deopt_pc_offset_ = assembler()->CodeSize();
__ BranchPatchable(*StubCode::DeoptimizeLazy_entry());
lazy_deopt_return_pc_offset_ = assembler()->CodeSize();
__ BranchPatchable(*StubCode::DeoptimizeLazyFromReturn_entry());
lazy_deopt_throw_pc_offset_ = assembler()->CodeSize();
__ BranchPatchable(*StubCode::DeoptimizeLazyFromThrow_entry());
}
EndCodeSourceRange(TokenPosition::kDartCodeEpilogue);
}

View file

@ -1138,11 +1138,12 @@ void FlowGraphCompiler::CompileGraph() {
BeginCodeSourceRange();
if (is_optimizing() && !FLAG_precompiled_mode) {
// Leave enough space for patching in case of lazy deoptimization from
// deferred code.
// Leave enough space for patching in case of lazy deoptimization.
__ nop(CallPattern::pattern_length_in_bytes());
lazy_deopt_pc_offset_ = assembler()->CodeSize();
__ Jmp(*StubCode::DeoptimizeLazy_entry());
lazy_deopt_return_pc_offset_ = assembler()->CodeSize();
__ Jmp(*StubCode::DeoptimizeLazyFromReturn_entry());
lazy_deopt_throw_pc_offset_ = assembler()->CodeSize();
__ Jmp(*StubCode::DeoptimizeLazyFromThrow_entry());
}
EndCodeSourceRange(TokenPosition::kDartCodeEpilogue);
}

View file

@ -1141,15 +1141,16 @@ void FlowGraphCompiler::CompileGraph() {
BeginCodeSourceRange();
if (is_optimizing() && !FLAG_precompiled_mode) {
// Leave enough space for patching in case of lazy deoptimization from
// deferred code.
// Leave enough space for patching in case of lazy deoptimization.
for (intptr_t i = 0;
i < CallPattern::kDeoptCallLengthInInstructions;
++i) {
__ nop();
}
lazy_deopt_pc_offset_ = assembler()->CodeSize();
__ Branch(*StubCode::DeoptimizeLazy_entry());
lazy_deopt_return_pc_offset_ = assembler()->CodeSize();
__ Branch(*StubCode::DeoptimizeLazyFromReturn_entry());
lazy_deopt_throw_pc_offset_ = assembler()->CodeSize();
__ Branch(*StubCode::DeoptimizeLazyFromThrow_entry());
}
EndCodeSourceRange(TokenPosition::kDartCodeEpilogue);
}

View file

@ -1140,11 +1140,12 @@ void FlowGraphCompiler::CompileGraph() {
BeginCodeSourceRange();
if (is_optimizing() && !FLAG_precompiled_mode) {
// Leave enough space for patching in case of lazy deoptimization from
// deferred code.
// Leave enough space for patching in case of lazy deoptimization.
__ nop(ShortCallPattern::pattern_length_in_bytes());
lazy_deopt_pc_offset_ = assembler()->CodeSize();
__ Jmp(*StubCode::DeoptimizeLazy_entry(), PP);
lazy_deopt_return_pc_offset_ = assembler()->CodeSize();
__ Jmp(*StubCode::DeoptimizeLazyFromReturn_entry(), PP);
lazy_deopt_throw_pc_offset_ = assembler()->CodeSize();
__ Jmp(*StubCode::DeoptimizeLazyFromThrow_entry(), PP);
}
EndCodeSourceRange(TokenPosition::kDartCodeEpilogue);
}

View file

@ -14229,7 +14229,8 @@ RawCode* Code::New(intptr_t pointer_offsets_length) {
result.set_is_alive(false);
result.set_comments(Comments::New(0));
result.set_compile_timestamp(0);
result.set_lazy_deopt_pc_offset(kInvalidPc);
result.set_lazy_deopt_return_pc_offset(kInvalidPc);
result.set_lazy_deopt_throw_pc_offset(kInvalidPc);
result.set_pc_descriptors(Object::empty_descriptors());
}
return result.raw();
@ -14543,9 +14544,15 @@ void Code::SetActiveInstructions(RawInstructions* instructions) const {
}
uword Code::GetLazyDeoptPc() const {
return (lazy_deopt_pc_offset() != kInvalidPc)
? PayloadStart() + lazy_deopt_pc_offset() : 0;
uword Code::GetLazyDeoptReturnPc() const {
return (lazy_deopt_return_pc_offset() != kInvalidPc)
? PayloadStart() + lazy_deopt_return_pc_offset() : 0;
}
uword Code::GetLazyDeoptThrowPc() const {
return (lazy_deopt_throw_pc_offset() != kInvalidPc)
? PayloadStart() + lazy_deopt_throw_pc_offset() : 0;
}

View file

@ -4922,7 +4922,8 @@ class Code : public Object {
kInvalidPc = -1
};
uword GetLazyDeoptPc() const;
uword GetLazyDeoptReturnPc() const;
uword GetLazyDeoptThrowPc() const;
// Find pc, return 0 if not found.
uword GetPcForDeoptId(intptr_t deopt_id, RawPcDescriptors::Kind kind) const;
@ -4939,18 +4940,32 @@ class Code : public Object {
#endif
}
intptr_t lazy_deopt_pc_offset() const {
intptr_t lazy_deopt_return_pc_offset() const {
#if defined(DART_PRECOMPILED_RUNTIME)
return 0;
#else
return raw_ptr()->lazy_deopt_pc_offset_;
return raw_ptr()->lazy_deopt_return_pc_offset_;
#endif
}
void set_lazy_deopt_pc_offset(intptr_t pc) const {
void set_lazy_deopt_return_pc_offset(intptr_t pc) const {
#if defined(DART_PRECOMPILED_RUNTIME)
UNREACHABLE();
#else
StoreNonPointer(&raw_ptr()->lazy_deopt_pc_offset_, pc);
StoreNonPointer(&raw_ptr()->lazy_deopt_return_pc_offset_, pc);
#endif
}
intptr_t lazy_deopt_throw_pc_offset() const {
#if defined(DART_PRECOMPILED_RUNTIME)
return 0;
#else
return raw_ptr()->lazy_deopt_throw_pc_offset_;
#endif
}
void set_lazy_deopt_throw_pc_offset(intptr_t pc) const {
#if defined(DART_PRECOMPILED_RUNTIME)
UNREACHABLE();
#else
StoreNonPointer(&raw_ptr()->lazy_deopt_throw_pc_offset_, pc);
#endif
}

View file

@ -1166,7 +1166,8 @@ class RawCode : public RawObject {
int32_t state_bits_;
// PC offsets for code patching.
NOT_IN_PRECOMPILED(int32_t lazy_deopt_pc_offset_);
NOT_IN_PRECOMPILED(int32_t lazy_deopt_return_pc_offset_);
NOT_IN_PRECOMPILED(int32_t lazy_deopt_throw_pc_offset_);
// Variable length data follows here.
int32_t* data() { OPEN_ARRAY_START(int32_t, int32_t); }

View file

@ -45,7 +45,8 @@ class Deserializer;
V(MegamorphicCall) \
V(FixAllocationStubTarget) \
V(Deoptimize) \
V(DeoptimizeLazy) \
V(DeoptimizeLazyFromReturn) \
V(DeoptimizeLazyFromThrow) \
V(UnoptimizedIdenticalWithNumberCheck) \
V(OptimizedIdenticalWithNumberCheck) \
V(ICCallBreakpoint) \
@ -73,7 +74,8 @@ class Deserializer;
V(LazyCompile) \
V(FixCallersTarget) \
V(Deoptimize) \
V(DeoptimizeLazy) \
V(DeoptimizeLazyFromReturn) \
V(DeoptimizeLazyFromThrow) \
V(FrameAwaitingMaterialization) \
#endif // !defined(TARGET_ARCH_DBC)
@ -194,7 +196,8 @@ class StubCode : public AllStatic {
enum DeoptStubKind {
kLazyDeopt,
kLazyDeoptFromReturn,
kLazyDeoptFromThrow,
kEagerDeopt
};

View file

@ -427,6 +427,10 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
const intptr_t saved_result_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - R0);
const intptr_t saved_exception_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - R0);
const intptr_t saved_stacktrace_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - R1);
// Result in R0 is preserved as part of pushing all registers below.
// Push registers in their enumeration order: lowest register number at
@ -459,15 +463,20 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
__ mov(R0, Operand(SP)); // Pass address of saved registers block.
__ mov(R1, Operand(kind == kLazyDeopt ? 1 : 0));
bool is_lazy = (kind == kLazyDeoptFromReturn) ||
(kind == kLazyDeoptFromThrow);
__ mov(R1, Operand(is_lazy ? 1 : 0));
__ ReserveAlignedFrameSpace(0);
__ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (R0) is stack-size (FP - SP) in bytes.
const bool preserve_result = (kind == kLazyDeopt);
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
// Restore result into R1 temporarily.
__ ldr(R1, Address(FP, saved_result_slot_from_fp * kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into R1 temporarily.
__ ldr(R1, Address(FP, saved_exception_slot_from_fp * kWordSize));
__ ldr(R2, Address(FP, saved_stacktrace_slot_from_fp * kWordSize));
}
__ RestoreCodePointer();
@ -478,14 +487,21 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// is no need to set the correct PC marker or load PP, since they get patched.
__ EnterStubFrame();
__ mov(R0, Operand(FP)); // Get last FP address.
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ Push(R1); // Preserve result as first local.
} else if (kind == kLazyDeoptFromThrow) {
__ Push(R1); // Preserve exception as first local.
__ Push(R2); // Preserve stacktrace as second local.
}
__ ReserveAlignedFrameSpace(0);
__ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in R0.
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
// Restore result into R1.
__ ldr(R1, Address(FP, kFirstLocalSlotFromFp * kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into R1.
__ ldr(R1, Address(FP, kFirstLocalSlotFromFp * kWordSize));
__ ldr(R2, Address(FP, (kFirstLocalSlotFromFp - 1) * kWordSize));
}
// Code above cannot cause GC.
__ RestoreCodePointer();
@ -496,16 +512,22 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// require allocation.
// Enter stub frame with loading PP. The caller's PP is not materialized yet.
__ EnterStubFrame();
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ Push(R1); // Preserve result, it will be GC-d here.
} else if (kind == kLazyDeoptFromThrow) {
__ Push(R1); // Preserve exception, it will be GC-d here.
__ Push(R2); // Preserve stacktrace, it will be GC-d here.
}
__ PushObject(Smi::ZoneHandle()); // Space for the result.
__ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
// Result tells stub how many bytes to remove from the expression stack
// of the bottom-most frame. They were used as materialization arguments.
__ Pop(R1);
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ Pop(R0); // Restore result.
} else if (kind == kLazyDeoptFromThrow) {
__ Pop(R1); // Restore stacktrace.
__ Pop(R0); // Restore exception.
}
__ LeaveStubFrame();
// Remove materialization arguments.
@ -514,14 +536,30 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) {
// LR: return address + call-instruction-size
// R0: result, must be preserved
void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ AddImmediate(LR, -CallPattern::DeoptCallPatternLengthInBytes());
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(IP, 0xf1f1f1f1);
__ Push(IP);
GenerateDeoptimizationSequence(assembler, kLazyDeopt);
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
}
// LR: return address + call-instruction-size
// R0: exception, must be preserved
// R1: stacktrace, must be preserved
void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ AddImmediate(LR, -CallPattern::DeoptCallPatternLengthInBytes());
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(IP, 0xf1f1f1f1);
__ Push(IP);
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
}

View file

@ -452,6 +452,10 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
const intptr_t saved_result_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - R0);
const intptr_t saved_exception_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - R0);
const intptr_t saved_stacktrace_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - R1);
// Result in R0 is preserved as part of pushing all registers below.
// Push registers in their enumeration order: lowest register number at
@ -475,15 +479,20 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
__ mov(R0, SP); // Pass address of saved registers block.
__ LoadImmediate(R1, kind == kLazyDeopt ? 1 : 0);
bool is_lazy = (kind == kLazyDeoptFromReturn) ||
(kind == kLazyDeoptFromThrow);
__ LoadImmediate(R1, is_lazy ? 1 : 0);
__ ReserveAlignedFrameSpace(0);
__ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (R0) is stack-size (FP - SP) in bytes.
const bool preserve_result = (kind == kLazyDeopt);
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
// Restore result into R1 temporarily.
__ LoadFromOffset(R1, FP, saved_result_slot_from_fp * kWordSize);
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into R1 temporarily.
__ LoadFromOffset(R1, FP, saved_exception_slot_from_fp * kWordSize);
__ LoadFromOffset(R2, FP, saved_stacktrace_slot_from_fp * kWordSize);
}
// There is a Dart Frame on the stack. We must restore PP and leave frame.
@ -495,15 +504,22 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// is no need to set the correct PC marker or load PP, since they get patched.
__ EnterStubFrame();
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ Push(R1); // Preserve result as first local.
} else if (kind == kLazyDeoptFromThrow) {
__ Push(R1); // Preserve exception as first local.
__ Push(R2); // Preserve stacktrace as second local.
}
__ ReserveAlignedFrameSpace(0);
__ mov(R0, FP); // Pass last FP as parameter in R0.
__ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
// Restore result into R1.
__ LoadFromOffset(R1, FP, kFirstLocalSlotFromFp * kWordSize);
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into R1.
__ LoadFromOffset(R1, FP, kFirstLocalSlotFromFp * kWordSize);
__ LoadFromOffset(R2, FP, (kFirstLocalSlotFromFp - 1) * kWordSize);
}
// Code above cannot cause GC.
// There is a Dart Frame on the stack. We must restore PP and leave frame.
@ -515,17 +531,24 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// require allocation.
// Enter stub frame with loading PP. The caller's PP is not materialized yet.
__ EnterStubFrame();
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ Push(R1); // Preserve result, it will be GC-d here.
} else if (kind == kLazyDeoptFromThrow) {
__ Push(R1); // Preserve exception, it will be GC-d here.
__ Push(R2); // Preserve stacktrace, it will be GC-d here.
}
__ Push(ZR); // Space for the result.
__ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
// Result tells stub how many bytes to remove from the expression stack
// of the bottom-most frame. They were used as materialization arguments.
__ Pop(R1);
__ SmiUntag(R1);
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ Pop(R0); // Restore result.
} else if (kind == kLazyDeoptFromThrow) {
__ Pop(R1); // Restore stacktrace.
__ Pop(R0); // Restore exception.
}
__ LeaveStubFrame();
// Remove materialization arguments.
@ -534,14 +557,30 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) {
// LR: return address + call-instruction-size
// R0: result, must be preserved
void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ AddImmediate(LR, LR, -CallPattern::kDeoptCallLengthInBytes);
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, 0xf1f1f1f1);
__ Push(TMP);
GenerateDeoptimizationSequence(assembler, kLazyDeopt);
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
}
// LR: return address + call-instruction-size
// R0: exception, must be preserved
// R1: stacktrace, must be preserved
void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ AddImmediate(LR, LR, -CallPattern::kDeoptCallLengthInBytes);
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, 0xf1f1f1f1);
__ Push(TMP);
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
}

View file

@ -54,7 +54,12 @@ void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) {
// These deoptimization stubs are only used to populate stack frames
// with something meaningful to make sure GC can scan the stack during
// the last phase of deoptimization which materializes objects.
void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) {
void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
__ Trap();
}
void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
__ Trap();
}

View file

@ -359,6 +359,10 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
const intptr_t saved_result_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - EAX);
const intptr_t saved_exception_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - EAX);
const intptr_t saved_stacktrace_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - EDX);
// Result in EAX is preserved as part of pushing all registers below.
// Push registers in their enumeration order: lowest register number at
@ -383,14 +387,19 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ movl(ECX, ESP); // Preserve saved registers block.
__ ReserveAlignedFrameSpace(2 * kWordSize);
__ movl(Address(ESP, 0 * kWordSize), ECX); // Start of register block.
__ movl(Address(ESP, 1 * kWordSize), Immediate(kind == kLazyDeopt ? 1 : 0));
bool is_lazy = (kind == kLazyDeoptFromReturn) ||
(kind == kLazyDeoptFromThrow);
__ movl(Address(ESP, 1 * kWordSize), Immediate(is_lazy ? 1 : 0));
__ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (EAX) is stack-size (FP - SP) in bytes.
const bool preserve_result = (kind == kLazyDeopt);
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
// Restore result into EBX temporarily.
__ movl(EBX, Address(EBP, saved_result_slot_from_fp * kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into EBX temporarily.
__ movl(EBX, Address(EBP, saved_exception_slot_from_fp * kWordSize));
__ movl(ECX, Address(EBP, saved_stacktrace_slot_from_fp * kWordSize));
}
__ LeaveFrame();
@ -401,15 +410,22 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// Leaf runtime function DeoptimizeFillFrame expects a Dart frame.
__ EnterDartFrame(0);
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ pushl(EBX); // Preserve result as first local.
} else if (kind == kLazyDeoptFromThrow) {
__ pushl(EBX); // Preserve exception as first local.
__ pushl(ECX); // Preserve stacktrace as first local.
}
__ ReserveAlignedFrameSpace(1 * kWordSize);
__ movl(Address(ESP, 0), EBP); // Pass last FP as parameter on stack.
__ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
// Restore result into EBX.
__ movl(EBX, Address(EBP, kFirstLocalSlotFromFp * kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into EBX.
__ movl(EBX, Address(EBP, kFirstLocalSlotFromFp * kWordSize));
__ movl(ECX, Address(EBP, (kFirstLocalSlotFromFp - 1) * kWordSize));
}
// Code above cannot cause GC.
__ LeaveFrame();
@ -418,8 +434,11 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// Materialize any objects that were deferred by FillFrame because they
// require allocation.
__ EnterStubFrame();
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ pushl(EBX); // Preserve result, it will be GC-d here.
} else if (kind == kLazyDeoptFromThrow) {
__ pushl(EBX); // Preserve exception, it will be GC-d here.
__ pushl(ECX); // Preserve stacktrace, it will be GC-d here.
}
__ pushl(Immediate(Smi::RawValue(0))); // Space for the result.
__ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
@ -427,8 +446,11 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// of the bottom-most frame. They were used as materialization arguments.
__ popl(EBX);
__ SmiUntag(EBX);
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ popl(EAX); // Restore result.
} else if (kind == kLazyDeoptFromThrow) {
__ popl(EDX); // Restore exception.
__ popl(EAX); // Restore stacktrace.
}
__ LeaveFrame();
@ -441,13 +463,26 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// TOS: return address + call-instruction-size (5 bytes).
// EAX: result, must be preserved
void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) {
void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ popl(EBX);
__ subl(EBX, Immediate(CallPattern::pattern_length_in_bytes()));
__ pushl(EBX);
GenerateDeoptimizationSequence(assembler, kLazyDeopt);
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
}
// TOS: return address + call-instruction-size (5 bytes).
// EAX: exception, must be preserved
// EDX: stacktrace, must be preserved
void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ popl(EBX);
__ subl(EBX, Immediate(CallPattern::pattern_length_in_bytes()));
__ pushl(EBX);
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
}

View file

@ -444,6 +444,10 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
const intptr_t saved_result_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V0);
const intptr_t saved_exception_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V0);
const intptr_t saved_stacktrace_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V1);
// Result in V0 is preserved as part of pushing all registers below.
// Push registers in their enumeration order: lowest register number at
@ -469,15 +473,20 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
__ mov(A0, SP); // Pass address of saved registers block.
__ LoadImmediate(A1, (kind == kLazyDeopt) ? 1 : 0);
bool is_lazy = (kind == kLazyDeoptFromReturn) ||
(kind == kLazyDeoptFromThrow);
__ LoadImmediate(A1, is_lazy ? 1 : 0);
__ ReserveAlignedFrameSpace(1 * kWordSize);
__ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (V0) is stack-size (FP - SP) in bytes, incl. the return address.
const bool preserve_result = (kind == kLazyDeopt);
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
// Restore result into T1 temporarily.
__ lw(T1, Address(FP, saved_result_slot_from_fp * kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into T1 temporarily.
__ lw(T1, Address(FP, saved_exception_slot_from_fp * kWordSize));
__ lw(T2, Address(FP, saved_stacktrace_slot_from_fp * kWordSize));
}
__ RestoreCodePointer();
@ -489,14 +498,21 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ EnterStubFrame();
__ mov(A0, FP); // Get last FP address.
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ Push(T1); // Preserve result as first local.
} else if (kind == kLazyDeoptFromThrow) {
__ Push(T1); // Preserve exception as first local.
__ Push(T2); // Preserve stacktrace as second local.
}
__ ReserveAlignedFrameSpace(1 * kWordSize);
__ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in A0.
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
// Restore result into T1.
__ lw(T1, Address(FP, kFirstLocalSlotFromFp * kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into T1.
__ lw(T1, Address(FP, kFirstLocalSlotFromFp * kWordSize));
__ lw(T2, Address(FP, (kFirstLocalSlotFromFp - 1) * kWordSize));
}
// Code above cannot cause GC.
__ RestoreCodePointer();
@ -507,16 +523,22 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// require allocation.
// Enter stub frame with loading PP. The caller's PP is not materialized yet.
__ EnterStubFrame();
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ Push(T1); // Preserve result, it will be GC-d here.
} else if (kind == kLazyDeoptFromThrow) {
__ Push(T1); // Preserve exception, it will be GC-d here.
__ Push(T2); // Preserve stacktrace, it will be GC-d here.
}
__ PushObject(Smi::ZoneHandle()); // Space for the result.
__ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
// Result tells stub how many bytes to remove from the expression stack
// of the bottom-most frame. They were used as materialization arguments.
__ Pop(T1);
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ Pop(V0); // Restore result.
} else if (kind == kLazyDeoptFromThrow) {
__ Pop(V1); // Restore stacktrace.
__ Pop(V0); // Restore exception.
}
__ LeaveStubFrame();
// Remove materialization arguments.
@ -525,15 +547,30 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ Ret();
}
void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) {
// RA: return address + call-instruction-size
// V0: result, must be preserved
void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ AddImmediate(RA, -CallPattern::kDeoptCallLengthInBytes);
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, 0xf1f1f1f1);
__ Push(TMP);
GenerateDeoptimizationSequence(assembler, kLazyDeopt);
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
}
// RA: return address + call-instruction-size
// V0: exception, must be preserved
// V1: stacktrace, must be preserved
void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ AddImmediate(RA, -CallPattern::kDeoptCallLengthInBytes);
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, 0xf1f1f1f1);
__ Push(TMP);
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
}

View file

@ -385,6 +385,10 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
const intptr_t saved_result_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - RAX);
const intptr_t saved_exception_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - RAX);
const intptr_t saved_stacktrace_slot_from_fp =
kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - RDX);
// Result in RAX is preserved as part of pushing all registers below.
// Push registers in their enumeration order: lowest register number at
@ -408,15 +412,20 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// Pass address of saved registers block.
__ movq(CallingConventions::kArg1Reg, RSP);
__ movq(CallingConventions::kArg2Reg, Immediate(kind == kLazyDeopt ? 1 : 0));
bool is_lazy = (kind == kLazyDeoptFromReturn) ||
(kind == kLazyDeoptFromThrow);
__ movq(CallingConventions::kArg2Reg, Immediate(is_lazy ? 1 : 0));
__ ReserveAlignedFrameSpace(0); // Ensure stack is aligned before the call.
__ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (RAX) is stack-size (FP - SP) in bytes.
const bool preserve_result = (kind == kLazyDeopt);
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
// Restore result into RBX temporarily.
__ movq(RBX, Address(RBP, saved_result_slot_from_fp * kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into RBX temporarily.
__ movq(RBX, Address(RBP, saved_exception_slot_from_fp * kWordSize));
__ movq(RDX, Address(RBP, saved_stacktrace_slot_from_fp * kWordSize));
}
// There is a Dart Frame on the stack. We must restore PP and leave frame.
@ -432,16 +441,24 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// is no need to set the correct PC marker or load PP, since they get patched.
__ EnterStubFrame();
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ pushq(RBX); // Preserve result as first local.
} else if (kind == kLazyDeoptFromThrow) {
__ pushq(RBX); // Preserve exception as first local.
__ pushq(RDX); // Preserve stacktrace as second local.
}
__ ReserveAlignedFrameSpace(0);
// Pass last FP as a parameter.
__ movq(CallingConventions::kArg1Reg, RBP);
__ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
// Restore result into RBX.
__ movq(RBX, Address(RBP, kFirstLocalSlotFromFp * kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore exception into RBX.
__ movq(RBX, Address(RBP, kFirstLocalSlotFromFp * kWordSize));
// Restore stacktrace into RDX.
__ movq(RDX, Address(RBP, (kFirstLocalSlotFromFp - 1) * kWordSize));
}
// Code above cannot cause GC.
// There is a Dart Frame on the stack. We must restore PP and leave frame.
@ -453,8 +470,11 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// require allocation.
// Enter stub frame with loading PP. The caller's PP is not materialized yet.
__ EnterStubFrame();
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ pushq(RBX); // Preserve result, it will be GC-d here.
} else if (kind == kLazyDeoptFromThrow) {
__ pushq(RBX); // Preserve exception.
__ pushq(RDX); // Preserve stacktrace.
}
__ pushq(Immediate(Smi::RawValue(0))); // Space for the result.
__ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
@ -462,8 +482,11 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// of the bottom-most frame. They were used as materialization arguments.
__ popq(RBX);
__ SmiUntag(RBX);
if (preserve_result) {
if (kind == kLazyDeoptFromReturn) {
__ popq(RAX); // Restore result.
} else if (kind == kLazyDeoptFromThrow) {
__ popq(RDX); // Restore stacktrace.
__ popq(RAX); // Restore exception.
}
__ LeaveStubFrame();
@ -476,7 +499,7 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// TOS: return address + call-instruction-size (5 bytes).
// RAX: result, must be preserved
void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) {
void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ popq(RBX);
@ -484,7 +507,22 @@ void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ pushq(Immediate(0xf1f1f1f1));
__ pushq(RBX);
GenerateDeoptimizationSequence(assembler, kLazyDeopt);
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
}
// TOS: return address + call-instruction-size (5 bytes).
// RAX: exception, must be preserved
// RDX: stacktrace, must be preserved
void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) {
// Correct return address to point just after the call that is being
// deoptimized.
__ popq(RBX);
__ subq(RBX, Immediate(ShortCallPattern::pattern_length_in_bytes()));
// Push zap value instead of CODE_REG for lazy deopt.
__ pushq(Immediate(0xf1f1f1f1));
__ pushq(RBX);
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
}

View file

@ -0,0 +1,70 @@
// Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// Test deoptimization on an optimistically hoisted smi check.
// VMOptions=--optimization-counter-threshold=10 --no-background-compilation --enable-inlining-annotations
// Test that lazy deoptimization works if the program returns to a function
// that is scheduled for lazy deoptimization via an exception.
import 'package:expect/expect.dart';
class C {
var x = 42;
}
const NeverInline = "NeverInline";
@NeverInline
AA(C c, bool b) {
if (b) {
c.x = 2.5;
throw 123;
}
}
@NeverInline
T1(C c, bool b) {
try {
AA(c, b);
} on dynamic catch (e, st) {
print(e);
print(st);
Expect.isTrue(st is StackTrace, "is StackTrace");
}
return c.x + 1;
}
@NeverInline
T2(C c, bool b) {
try {
AA(c, b);
} on String catch(e, st) {
print(e);
print(st);
Expect.isTrue(st is StackTrace, "is StackTrace");
Expect.isTrue(false);
} on int catch(e, st) {
Expect.equals(e, 123);
Expect.equals(b, true);
Expect.equals(c.x, 2.5);
print(st);
Expect.isTrue(st is StackTrace, "is StackTrace");
}
return c.x + 1;
}
main() {
var c = new C();
for (var i = 0; i < 10000; ++i) {
T1(c, false);
T2(c, false);
}
Expect.equals(43, T1(c, false));
Expect.equals(43, T2(c, false));
Expect.equals(3.5, T1(c, true));
Expect.equals(3.5, T2(c, true));
}