[vm/compiler] Remove dangerous FlowGraphCompiler::GenerateRuntimeCall

Doing any runtime call inside an `<XXX>Instr::EmitNativeCode()` is dangerous
because doing so almost always would require adjusting the lazy-deopt
environment to ensure the continuation in unoptimized code would have
the right stack state when returning from a runtime call.

=> Going to runtime should almost always happen via a stub call.

To discourage using runtime calls inside `EmitNativeCode()` functions we
remove the `FlowGraphCompiler::GenerateRuntimeCall()` helper method.

The remaining runtime calls used in `EmitNativeCode()`s fall into three
categories:

  * StackOverFlowInstr: In this particular case the deopt environment
    wouldn't need to be modified.
  * leaf runtime calls: Those do not need any metadata (neither deopt
    nor stackmaps) - since leaf runtime entries cannot safepoint
  * unopt-only runtime calls: Those do not need any metadata (neither
    deopt nor stackmaps) - since all slots in unoptimized frames are
    GC-able and unoptimized frames cannot be lazy-deoptimized.

We add assertions for the latter to cases with a comment to make it
clear.

Issue https://github.com/dart-lang/sdk/issues/45213

TEST=Only adds assertions.

Change-Id: Idb8badfbe65fff55585959338129405c71908d25
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/199042
Reviewed-by: Alexander Markov <alexmarkov@google.com>
Commit-Queue: Martin Kustermann <kustermann@google.com>
This commit is contained in:
Martin Kustermann 2021-05-12 09:14:50 +00:00 committed by commit-bot@chromium.org
parent 64cf9574e2
commit 535b53659b
9 changed files with 38 additions and 70 deletions

View file

@ -632,12 +632,6 @@ class FlowGraphCompiler : public ValueObject {
intptr_t sub_type_cache_index);
#endif
void GenerateRuntimeCall(const InstructionSource& source,
intptr_t deopt_id,
const RuntimeEntry& entry,
intptr_t argument_count,
LocationSummary* locs);
void GenerateStubCall(const InstructionSource& source,
const Code& stub,
UntaggedPcDescriptors::Kind kind,

View file

@ -465,15 +465,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
}
}
void FlowGraphCompiler::GenerateRuntimeCall(const InstructionSource& source,
intptr_t deopt_id,
const RuntimeEntry& entry,
intptr_t argument_count,
LocationSummary* locs) {
__ CallRuntime(entry, argument_count);
EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kOther, locs);
}
void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
// We do not check for overflow when incrementing the edge counter. The
// function should normally be optimized long before the counter can

View file

@ -458,15 +458,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
}
}
void FlowGraphCompiler::GenerateRuntimeCall(const InstructionSource& source,
intptr_t deopt_id,
const RuntimeEntry& entry,
intptr_t argument_count,
LocationSummary* locs) {
__ CallRuntime(entry, argument_count);
EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kOther, locs);
}
void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
// We do not check for overflow when incrementing the edge counter. The
// function should normally be optimized long before the counter can

View file

@ -500,15 +500,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
AddStaticCallTarget(target, entry_kind);
}
void FlowGraphCompiler::GenerateRuntimeCall(const InstructionSource& source,
intptr_t deopt_id,
const RuntimeEntry& entry,
intptr_t argument_count,
LocationSummary* locs) {
__ CallRuntime(entry, argument_count);
EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kOther, locs);
}
void FlowGraphCompiler::EmitUnoptimizedStaticCall(
intptr_t size_with_type_args,
intptr_t deopt_id,

View file

@ -461,15 +461,6 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
}
}
void FlowGraphCompiler::GenerateRuntimeCall(const InstructionSource& source,
intptr_t deopt_id,
const RuntimeEntry& entry,
intptr_t argument_count,
LocationSummary* locs) {
__ CallRuntime(entry, argument_count);
EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kOther, locs);
}
void FlowGraphCompiler::EmitUnoptimizedStaticCall(
intptr_t size_with_type_args,
intptr_t deopt_id,

View file

@ -2401,7 +2401,6 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
if (deopt == NULL) {
ASSERT(!compiler->is_optimizing());
__ Bind(fail);
__ ldrh(IP,
@ -2412,6 +2411,7 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Push(field_reg);
__ Push(value_reg);
ASSERT(!compiler->is_optimizing()); // No deopt info needed.
__ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
__ Drop(2); // Drop the field and the value.
} else {
@ -2525,6 +2525,7 @@ void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Push(field_reg);
__ Push(value_reg);
ASSERT(!compiler->is_optimizing()); // No deopt info needed.
__ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
__ Drop(2); // Drop the field and the value.
} else {
@ -3586,9 +3587,10 @@ class CheckStackOverflowSlowPath
instruction()->deopt_id(), instruction()->source(),
compiler->CurrentTryIndex());
} else {
compiler->GenerateRuntimeCall(
__ CallRuntime(kStackOverflowRuntimeEntry, kNumSlowPathArgs);
compiler->EmitCallsiteMetadata(
instruction()->source(), instruction()->deopt_id(),
kStackOverflowRuntimeEntry, kNumSlowPathArgs, instruction()->locs());
UntaggedPcDescriptors::kOther, instruction()->locs());
}
if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
@ -5587,9 +5589,8 @@ LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
}
void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(TargetFunction().is_leaf());
// Call the function.
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), TargetFunction().argument_count());
}
@ -6057,6 +6058,7 @@ static void InvokeDoublePow(FlowGraphCompiler* compiler,
// Args must be in D0 and D1, so move arg from Q1(== D3:D2) to D1.
__ vmovd(D1, D2);
if (TargetCPUFeatures::hardfp_supported()) {
ASSERT(instr->TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(instr->TargetFunction(), kInputCount);
} else {
// If the ABI is not "hardfp", then we have to move the double arguments
@ -6064,6 +6066,7 @@ static void InvokeDoublePow(FlowGraphCompiler* compiler,
// registers.
__ vmovrrd(R0, R1, D0);
__ vmovrrd(R2, R3, D1);
ASSERT(instr->TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(instr->TargetFunction(), kInputCount);
__ vmovdrr(D0, R0, R1);
__ vmovdrr(D1, R2, R3);
@ -6072,8 +6075,6 @@ static void InvokeDoublePow(FlowGraphCompiler* compiler,
}
void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(TargetFunction().is_leaf());
if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
InvokeDoublePow(compiler, this);
return;
@ -6084,6 +6085,7 @@ void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ vmovd(D1, D2);
}
if (TargetCPUFeatures::hardfp_supported()) {
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), InputCount());
} else {
// If the ABI is not "hardfp", then we have to move the double arguments
@ -6091,6 +6093,7 @@ void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// registers.
__ vmovrrd(R0, R1, D0);
__ vmovrrd(R2, R3, D1);
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), InputCount());
__ vmovdrr(D0, R0, R1);
__ vmovdrr(D1, R2, R3);

View file

@ -2172,7 +2172,6 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
if (deopt == NULL) {
ASSERT(!compiler->is_optimizing());
__ Bind(fail);
__ LoadFieldFromOffset(TMP, field_reg, Field::guarded_cid_offset(),
@ -2181,6 +2180,7 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ b(&ok, EQ);
__ PushPair(value_reg, field_reg);
ASSERT(!compiler->is_optimizing()); // No deopt info needed.
__ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
__ Drop(2); // Drop the field and the value.
} else {
@ -2285,6 +2285,7 @@ void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ b(&ok, EQ);
__ PushPair(value_reg, field_reg);
ASSERT(!compiler->is_optimizing()); // No deopt info needed.
__ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
__ Drop(2); // Drop the field and the value.
} else {
@ -3181,9 +3182,10 @@ class CheckStackOverflowSlowPath
instruction()->deopt_id(), instruction()->source(),
compiler->CurrentTryIndex());
} else {
compiler->GenerateRuntimeCall(
__ CallRuntime(kStackOverflowRuntimeEntry, kNumSlowPathArgs);
compiler->EmitCallsiteMetadata(
instruction()->source(), instruction()->deopt_id(),
kStackOverflowRuntimeEntry, kNumSlowPathArgs, locs);
UntaggedPcDescriptors::kOther, instruction()->locs());
}
if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
@ -4673,9 +4675,8 @@ LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
}
void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(TargetFunction().is_leaf());
// Call the function.
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), TargetFunction().argument_count());
}
@ -5134,17 +5135,17 @@ static void InvokeDoublePow(FlowGraphCompiler* compiler,
__ Bind(&do_pow);
__ fmovdd(base, saved_base); // Restore base.
ASSERT(instr->TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(instr->TargetFunction(), kInputCount);
__ Bind(&skip_call);
}
void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(TargetFunction().is_leaf());
if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
InvokeDoublePow(compiler, this);
return;
}
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), InputCount());
}

View file

@ -1903,7 +1903,6 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
if (deopt == NULL) {
ASSERT(!compiler->is_optimizing());
__ Bind(fail);
__ cmpw(compiler::FieldAddress(field_reg, Field::guarded_cid_offset()),
@ -1912,6 +1911,7 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ pushl(field_reg);
__ pushl(value_reg);
ASSERT(!compiler->is_optimizing()); // No deopt info needed.
__ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
__ Drop(2); // Drop the field and the value.
} else {
@ -2021,6 +2021,7 @@ void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ pushl(field_reg);
__ pushl(value_reg);
ASSERT(!compiler->is_optimizing()); // No deopt info needed.
__ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
__ Drop(2); // Drop the field and the value.
} else {
@ -2831,6 +2832,8 @@ LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone,
class CheckStackOverflowSlowPath
: public TemplateSlowPathCode<CheckStackOverflowInstr> {
public:
static constexpr intptr_t kNumSlowPathArgs = 0;
explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
: TemplateSlowPathCode(instruction) {}
@ -2850,9 +2853,11 @@ class CheckStackOverflowSlowPath
Environment* env = compiler->SlowPathEnvironmentFor(
instruction(), /*num_slow_path_args=*/0);
compiler->pending_deoptimization_env_ = env;
compiler->GenerateRuntimeCall(
__ CallRuntime(kStackOverflowRuntimeEntry, kNumSlowPathArgs);
compiler->EmitCallsiteMetadata(
instruction()->source(), instruction()->deopt_id(),
kStackOverflowRuntimeEntry, 0, instruction()->locs());
UntaggedPcDescriptors::kOther, instruction()->locs());
if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
instruction()->in_loop()) {
@ -4787,8 +4792,6 @@ LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
}
void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(TargetFunction().is_leaf());
// Save ESP. EDI is chosen because it is callee saved so we do not need to
// back it up before calling into the runtime.
static const Register kSavedSPReg = EDI;
@ -4801,6 +4804,7 @@ void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ movl(compiler::Address(ESP, +3 * kWordSize), locs()->in(3).reg());
// Call the function.
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), TargetFunction().argument_count());
// Restore ESP and pop the old value off the stack.
@ -5287,6 +5291,7 @@ static void InvokeDoublePow(FlowGraphCompiler* compiler,
for (intptr_t i = 0; i < kInputCount; i++) {
__ movsd(compiler::Address(ESP, kDoubleSize * i), locs->in(i).fpu_reg());
}
ASSERT(instr->TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(instr->TargetFunction(), kInputCount);
__ fstpl(compiler::Address(ESP, 0));
__ movsd(locs->out(0).fpu_reg(), compiler::Address(ESP, 0));
@ -5296,8 +5301,6 @@ static void InvokeDoublePow(FlowGraphCompiler* compiler,
}
void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(TargetFunction().is_leaf());
if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
InvokeDoublePow(compiler, this);
return;
@ -5309,6 +5312,7 @@ void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ movsd(compiler::Address(ESP, kDoubleSize * i), locs()->in(i).fpu_reg());
}
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), InputCount());
__ fstpl(compiler::Address(ESP, 0));
__ movsd(locs()->out(0).fpu_reg(), compiler::Address(ESP, 0));

View file

@ -2159,7 +2159,6 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
if (deopt == NULL) {
ASSERT(!compiler->is_optimizing());
__ Bind(fail);
__ cmpw(compiler::FieldAddress(field_reg, Field::guarded_cid_offset()),
@ -2168,6 +2167,7 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ pushq(field_reg);
__ pushq(value_reg);
ASSERT(!compiler->is_optimizing()); // No deopt info needed.
__ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
__ Drop(2); // Drop the field and the value.
} else {
@ -2273,6 +2273,7 @@ void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ pushq(field_reg);
__ pushq(value_reg);
ASSERT(!compiler->is_optimizing()); // No deopt info needed.
__ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
__ Drop(2); // Drop the field and the value.
} else {
@ -2367,6 +2368,7 @@ void GuardFieldTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&call_runtime);
__ PushObject(original);
__ pushq(value_reg);
ASSERT(!compiler->is_optimizing()); // No deopt info needed.
__ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
__ Drop(2);
}
@ -3267,9 +3269,10 @@ class CheckStackOverflowSlowPath
instruction()->deopt_id(), instruction()->source(),
compiler->CurrentTryIndex());
} else {
compiler->GenerateRuntimeCall(
__ CallRuntime(kStackOverflowRuntimeEntry, kNumSlowPathArgs);
compiler->EmitCallsiteMetadata(
instruction()->source(), instruction()->deopt_id(),
kStackOverflowRuntimeEntry, kNumSlowPathArgs, instruction()->locs());
UntaggedPcDescriptors::kOther, instruction()->locs());
}
if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
@ -4989,8 +4992,6 @@ LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
}
void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(TargetFunction().is_leaf());
// Save RSP. R13 is chosen because it is callee saved so we do not need to
// back it up before calling into the runtime.
static const Register kSavedSPReg = R13;
@ -4998,6 +4999,7 @@ void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ ReserveAlignedFrameSpace(0);
// Call the function. Parameters are already in their correct spots.
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), TargetFunction().argument_count());
// Restore RSP.
@ -5470,6 +5472,7 @@ static void InvokeDoublePow(FlowGraphCompiler* compiler,
__ movaps(XMM0, locs->in(0).fpu_reg());
ASSERT(locs->in(1).fpu_reg() == XMM1);
ASSERT(instr->TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(instr->TargetFunction(), kInputCount);
__ movaps(locs->out(0).fpu_reg(), XMM0);
// Restore RSP.
@ -5478,8 +5481,6 @@ static void InvokeDoublePow(FlowGraphCompiler* compiler,
}
void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(TargetFunction().is_leaf());
if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
InvokeDoublePow(compiler, this);
return;
@ -5492,6 +5493,7 @@ void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(locs()->in(1).fpu_reg() == XMM1);
}
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), InputCount());
__ movaps(locs()->out(0).fpu_reg(), XMM0);
// Restore RSP.