VM: Make optimized try-catch work in DBC.

The catch entry block has all locals in fixed locations
(Rj) where j = kNumberOfRegisters - i for parameter i.

This means we reserve a range of DBC registers at the top-end of the frame.
Those registers are blocked for general allocation to avoid any overlap
with the rest of the registers that are allocated from the bottom.

Each optimized frame with a try-catch will be kNumberOfRegisters wide.

BUG=
R=vegorov@google.com

Review URL: https://codereview.chromium.org/2388093003 .
This commit is contained in:
Florian Schneider 2016-10-13 11:36:23 -07:00
parent d483b0d2af
commit 6d66f3dea8
10 changed files with 190 additions and 19 deletions

View file

@ -747,7 +747,7 @@ namespace dart {
V(IfEqNull, A, reg, ___, ___) \
V(IfNeNull, A, reg, ___, ___) \
V(CreateArrayTOS, 0, ___, ___, ___) \
V(CreateArrayOpt, A_B_C, reg, reg, ___) \
V(CreateArrayOpt, A_B_C, reg, reg, reg) \
V(Allocate, D, lit, ___, ___) \
V(AllocateT, 0, ___, ___, ___) \
V(AllocateOpt, A_D, reg, lit, ___) \

View file

@ -11,6 +11,7 @@
#include "vm/flow_graph_compiler.h"
#include "vm/log.h"
#include "vm/parser.h"
#include "vm/stack_frame.h"
namespace dart {
@ -612,10 +613,6 @@ void FlowGraphAllocator::BuildLiveRanges() {
} else if (block->IsCatchBlockEntry()) {
// Process initial definitions.
CatchBlockEntryInstr* catch_entry = block->AsCatchBlockEntry();
#if defined(TARGET_ARCH_DBC)
// TODO(vegorov) support try-catch/finally for DBC.
flow_graph_.parsed_function().Bailout("FlowGraphAllocator", "Catch");
#endif
ProcessEnvironmentUses(catch_entry, catch_entry); // For lazy deopt
@ -631,10 +628,19 @@ void FlowGraphAllocator::BuildLiveRanges() {
// block start to until the end of the instruction so that they are
// preserved.
intptr_t start = catch_entry->start_pos();
BlockLocation(Location::RegisterLocation(kExceptionObjectReg),
#if !defined(TARGET_ARCH_DBC)
const Register exception_reg = kExceptionObjectReg;
const Register stacktrace_reg = kStackTraceObjectReg;
#else
const intptr_t exception_reg =
LocalVarIndex(0, catch_entry->exception_var().index());
const intptr_t stacktrace_reg =
LocalVarIndex(0, catch_entry->stacktrace_var().index());
#endif
BlockLocation(Location::RegisterLocation(exception_reg),
start,
ToInstructionEnd(start));
BlockLocation(Location::RegisterLocation(kStackTraceObjectReg),
BlockLocation(Location::RegisterLocation(stacktrace_reg),
start,
ToInstructionEnd(start));
}
@ -654,9 +660,52 @@ void FlowGraphAllocator::BuildLiveRanges() {
}
void FlowGraphAllocator::SplitInitialDefinitionAt(LiveRange* range,
intptr_t pos) {
if (range->End() > pos) {
LiveRange* tail = range->SplitAt(pos);
CompleteRange(tail, Location::kRegister);
}
}
void FlowGraphAllocator::ProcessInitialDefinition(Definition* defn,
LiveRange* range,
BlockEntryInstr* block) {
#if defined(TARGET_ARCH_DBC)
if (block->IsCatchBlockEntry()) {
if (defn->IsParameter()) {
ParameterInstr* param = defn->AsParameter();
intptr_t slot_index = param->index();
AssignSafepoints(defn, range);
range->finger()->Initialize(range);
slot_index = kNumberOfCpuRegisters - 1 - slot_index;
range->set_assigned_location(Location::RegisterLocation(slot_index));
SplitInitialDefinitionAt(range, block->lifetime_position() + 2);
ConvertAllUses(range);
BlockLocation(Location::RegisterLocation(slot_index), 0, kMaxPosition);
} else {
ConstantInstr* constant = defn->AsConstant();
ASSERT(constant != NULL);
range->set_assigned_location(Location::Constant(constant));
range->set_spill_slot(Location::Constant(constant));
AssignSafepoints(defn, range);
range->finger()->Initialize(range);
UsePosition* use =
range->finger()->FirstRegisterBeneficialUse(block->start_pos());
if (use != NULL) {
LiveRange* tail =
SplitBetween(range, block->start_pos(), use->pos());
// Parameters and constants are tagged, so allocated to CPU registers.
ASSERT(constant->representation() == kTagged);
CompleteRange(tail, Location::kRegister);
}
ConvertAllUses(range);
}
return;
}
#endif
// Save the range end because it may change below.
intptr_t range_end = range->End();
if (defn->IsParameter()) {
@ -679,10 +728,7 @@ void FlowGraphAllocator::ProcessInitialDefinition(Definition* defn,
AssignSafepoints(defn, range);
range->finger()->Initialize(range);
range->set_assigned_location(Location::RegisterLocation(slot_index));
if (range->End() > kNormalEntryPos) {
LiveRange* tail = range->SplitAt(kNormalEntryPos);
CompleteRange(tail, Location::kRegister);
}
SplitInitialDefinitionAt(range, kNormalEntryPos);
ConvertAllUses(range);
return;
}

View file

@ -248,6 +248,8 @@ class FlowGraphAllocator : public ValueObject {
return Location::MachineRegisterLocation(register_kind_, reg);
}
void SplitInitialDefinitionAt(LiveRange* range, intptr_t pos);
void PrintLiveRanges();
const FlowGraph& flow_graph_;

View file

@ -670,8 +670,17 @@ void FlowGraphCompiler::EmitTrySync(Instruction* instr, intptr_t try_index) {
if (flow_graph().captured_parameters()->Contains(i)) continue;
if ((*idefs)[i]->IsConstant()) continue; // Common constants
Location src = env->LocationAt(i);
#if defined(TARGET_ARCH_DBC)
intptr_t dest_index = kNumberOfCpuRegisters - 1 - i;
Location dest = Location::RegisterLocation(dest_index);
// Update safepoint bitmap to indicate that the target location
// now contains a pointer. With DBC parameters are copied into
// the locals area.
instr->locs()->SetStackBit(dest_index);
#else
intptr_t dest_index = i - num_non_copied_params;
Location dest = Location::StackSlot(dest_index);
#endif
move_instr->AddMove(dest, src);
}
@ -687,8 +696,13 @@ void FlowGraphCompiler::EmitTrySync(Instruction* instr, intptr_t try_index) {
Location src = env->LocationAt(i);
ASSERT(!src.IsFpuRegister());
ASSERT(!src.IsDoubleStackSlot());
#if defined(TARGET_ARCH_DBC)
intptr_t dest_index = kNumberOfCpuRegisters - 1 - i;
Location dest = Location::RegisterLocation(dest_index);
#else
intptr_t dest_index = i - num_non_copied_params;
Location dest = Location::StackSlot(dest_index);
#endif
move_instr->AddMove(dest, src);
// Update safepoint bitmap to indicate that the target location
// now contains a pointer.
@ -845,12 +859,18 @@ void FlowGraphCompiler::RecordSafepoint(LocationSummary* locs,
// with the same instruction (and same location summary) sees a bitmap that
// is larger that StackSize(). It will never be larger than StackSize() +
// live_registers_size.
ASSERT(bitmap->Length() <= (spill_area_size + live_registers_size));
// The first safepoint will grow the bitmap to be the size of
// spill_area_size but the second safepoint will truncate the bitmap and
// append the live registers to it again. The bitmap produced by both calls
// will be the same.
#if !defined(TARGET_ARCH_DBC)
ASSERT(bitmap->Length() <= (spill_area_size + live_registers_size));
bitmap->SetLength(spill_area_size);
#else
if (bitmap->Length() <= (spill_area_size + live_registers_size)) {
bitmap->SetLength(Utils::Maximum(bitmap->Length(), spill_area_size));
}
#endif
// Mark the bits in the stack map in the same order we push registers in
// slow path code (see FlowGraphCompiler::SaveLiveRegisters).

View file

@ -194,10 +194,12 @@ void FlowGraphCompiler::RecordAfterCall(Instruction* instr) {
// hence the difference.
pending_deoptimization_env_->DropArguments(instr->ArgumentCount());
AddDeoptIndexAtCall(deopt_id_after);
// This descriptor is needed for exception handling in optimized code.
AddCurrentDescriptor(RawPcDescriptors::kOther,
deopt_id_after, instr->token_pos());
} else {
// Add deoptimization continuation point after the call and before the
// arguments are removed.
// In optimized code this descriptor is needed for exception handling.
AddCurrentDescriptor(RawPcDescriptors::kDeopt,
deopt_id_after,
instr->token_pos());

View file

@ -1129,10 +1129,27 @@ EMIT_NATIVE_CODE(CatchBlockEntry, 0) {
compiler->assembler()->CodeSize(),
catch_handler_types_,
needs_stacktrace());
__ MoveSpecial(-exception_var().index()-1,
Simulator::kExceptionSpecialIndex);
__ MoveSpecial(-stacktrace_var().index()-1,
Simulator::kStacktraceSpecialIndex);
if (HasParallelMove()) {
compiler->parallel_move_resolver()->EmitNativeCode(parallel_move());
}
if (compiler->is_optimizing()) {
// In optimized code, variables at the catch block entry reside at the top
// of the allocatable register range.
const intptr_t num_non_copied_params =
compiler->flow_graph().num_non_copied_params();
const intptr_t exception_reg = kNumberOfCpuRegisters -
(-exception_var().index() + num_non_copied_params);
const intptr_t stacktrace_reg = kNumberOfCpuRegisters -
(-stacktrace_var().index() + num_non_copied_params);
__ MoveSpecial(exception_reg, Simulator::kExceptionSpecialIndex);
__ MoveSpecial(stacktrace_reg, Simulator::kStacktraceSpecialIndex);
} else {
__ MoveSpecial(LocalVarIndex(0, exception_var().index()),
Simulator::kExceptionSpecialIndex);
__ MoveSpecial(LocalVarIndex(0, stacktrace_var().index()),
Simulator::kStacktraceSpecialIndex);
}
__ SetFrame(compiler->StackSize());
}
@ -1410,10 +1427,15 @@ EMIT_NATIVE_CODE(CheckClass, 1) {
(unary_checks().NumberOfChecks() > 1));
const intptr_t may_be_smi =
(unary_checks().GetReceiverClassIdAt(0) == kSmiCid) ? 1 : 0;
bool is_dense_switch = false;
intptr_t cid_mask = 0;
if (IsDenseSwitch()) {
ASSERT(cids_[0] < cids_[cids_.length() - 1]);
cid_mask = ComputeCidMask();
is_dense_switch = Smi::IsValid(cid_mask);
}
if (is_dense_switch) {
const intptr_t low_cid = cids_[0];
const intptr_t cid_mask = ComputeCidMask();
__ CheckDenseSwitch(value, may_be_smi);
__ Nop(compiler->ToEmbeddableCid(low_cid, this));
__ Nop(__ AddConstant(Smi::Handle(Smi::New(cid_mask))));

View file

@ -12164,7 +12164,7 @@ RawStackmap* Stackmap::New(intptr_t length,
const char* Stackmap::ToCString() const {
#define FORMAT "%#x: "
#define FORMAT "%#05x: "
if (IsNull()) {
return "{null}";
} else {

View file

@ -5,6 +5,7 @@
// Testing Bigints with and without intrinsics.
// VMOptions=
// VMOptions=--no_intrinsify
// VMOptions=--no-background-compilation
// VMOptions=--optimization_counter_threshold=10 --no-background_compilation
library big_integer_test;

View file

@ -208,3 +208,4 @@ http_resource_test: Skip # Resolve URI not supported yet in product mode.
[ $arch == simdbc || $arch == simdbc64 ]
regexp/stack-overflow_test: RuntimeError, OK # Smaller limit with irregex interpreter
big_integer_arith_vm_test/gcd: Pass, RuntimeError # Issue #27474
big_integer_arith_vm_test/modPow: Pass, RuntimeError # Issue #27474

View file

@ -0,0 +1,77 @@
// Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// Dart test program for testing try/catch statement without any exceptions
// being thrown.
// VMOptions=--optimization-counter-threshold=100 --no-background-compilation --enable-inlining-annotations
// Test optional parameters updated inside try-catch
import "package:expect/expect.dart";
const noInline = "NeverInline";
@noInline
m1(int b) {
if (b == 1) throw 123;
}
@noInline
m2(int b) {
if (b == 2) throw 456;
}
@noInline
test1(int b, [int state = 0]) {
try {
state++;
m1(b);
state++;
m2(b);
state++;
} on dynamic catch (e, s) {
if (b == 1 && state != 1) throw "fail1";
if (b == 2 && state != 2) throw "fail2";
if (b == 3 && state != 3) throw "fail3";
if (s is! StackTrace) throw "fail4";
return e;
}
return "no throw";
}
@noInline
test2(int b, [int state]) {
state = 0;
try {
state++;
m1(b);
state++;
m2(b);
state++;
} on dynamic catch (e, s) {
if (b == 1 && state != 1) throw "fail1";
if (b == 2 && state != 2) throw "fail2";
if (b == 3 && state != 3) throw "fail3";
if (s is! StackTrace) throw "fail4";
return e;
}
return "no throw";
}
main() {
for (var i=0; i<300; i++) {
Expect.equals("no throw", test1(0));
}
Expect.equals("no throw", test1(0));
Expect.equals(123, test1(1));
Expect.equals(456, test1(2));
for (var i=0; i<300; i++) {
Expect.equals("no throw", test2(0));
}
Expect.equals("no throw", test2(0));
Expect.equals(123, test2(1));
Expect.equals(456, test2(2));
}