[vm] New async/async* implementation in JIT mode

The new implementation is based on suspend/resume stubs and doesn't
use desugaring of async functions on kernel AST.

Previously, new implementation of async/async* was only supported in
AOT mode. This change adds all necessary bits for the JIT mode:

 * Suspending variable-length frames (for unoptimized code).
 * Handling of Code and pool pointers in Dart stack frames.
 * OSR.
 * Deoptimization.
 * Hot reload.
 * Debugger.

The new implementation is not enabled in JIT mode yet.

Design doc: go/compact-async-await.
TEST=ci

Issue: https://github.com/dart-lang/sdk/issues/48378
Change-Id: I477d6684bdce7cbc1edb179ae2271ff598b7dcc5
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/246081
Reviewed-by: Martin Kustermann <kustermann@google.com>
Reviewed-by: Johnni Winther <johnniwinther@google.com>
Commit-Queue: Alexander Markov <alexmarkov@google.com>
Reviewed-by: Slava Egorov <vegorov@google.com>
This commit is contained in:
Alexander Markov 2022-06-02 23:39:45 +00:00 committed by Commit Bot
parent 8f9e40b815
commit af4da780be
61 changed files with 4161 additions and 2484 deletions

View file

@ -66,7 +66,6 @@ import 'package:front_end/src/fasta/util/parser_ast.dart'
import 'package:front_end/src/fasta/util/parser_ast_helper.dart';
import 'package:kernel/ast.dart'
show
AwaitExpression,
BasicLiteral,
Class,
Component,
@ -87,9 +86,7 @@ import 'package:kernel/ast.dart'
TreeNode,
UnevaluatedConstant,
VariableDeclaration,
Version,
Visitor,
VisitorVoidMixin;
Version;
import 'package:kernel/binary/ast_to_binary.dart' show BinaryPrinter;
import 'package:kernel/class_hierarchy.dart' show ClassHierarchy;
import 'package:kernel/core_types.dart' show CoreTypes;
@ -2421,13 +2418,6 @@ class Transform extends Step<ComponentResult, ComponentResult, FastaContext> {
backendTarget.performModularTransformations = false;
}
}
List<String> errors = VerifyTransformed.verify(component, backendTarget);
if (errors.isNotEmpty) {
return new Result<ComponentResult>(
result,
context.expectationSet["TransformVerificationError"],
errors.join('\n'));
}
if (backendTarget is TestTarget &&
backendTarget.hasGlobalTransformation) {
component =
@ -2492,34 +2482,6 @@ class Verify extends Step<ComponentResult, ComponentResult, FastaContext> {
}
}
/// Visitor that checks that the component has been transformed properly.
// TODO(johnniwinther): Add checks for all nodes that are unsupported after
// transformation.
class VerifyTransformed extends Visitor<void> with VisitorVoidMixin {
final Target target;
List<String> errors = [];
VerifyTransformed(this.target);
@override
void defaultNode(Node node) {
node.visitChildren(this);
}
@override
void visitAwaitExpression(AwaitExpression node) {
if (target is VmTarget) {
errors.add("ERROR: Untransformed await expression: $node");
}
}
static List<String> verify(Component component, Target target) {
VerifyTransformed visitor = new VerifyTransformed(target);
component.accept(visitor);
return visitor.errors;
}
}
mixin TestTarget on Target {
bool performModularTransformations = false;

View file

@ -1587,7 +1587,7 @@ void Assembler::CheckCodePointer() {
target::Instructions::HeaderSize() - kHeapObjectTag;
mov(R0, Operand(PC));
AddImmediate(R0, -offset);
ldr(IP, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
ldr(IP, FieldAddress(CODE_REG, target::Code::instructions_offset()));
cmp(R0, Operand(IP));
b(&instructions_ok, EQ);
bkpt(1);
@ -3308,6 +3308,10 @@ void Assembler::Ret(Condition cond /* = AL */) {
READS_RETURN_ADDRESS_FROM_LR(bx(LR, cond));
}
void Assembler::SetReturnAddress(Register value) {
RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(MoveRegister(LR, value));
}
void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
// Reserve space for arguments and align frame before entering
// the C++ world.

View file

@ -1300,6 +1300,11 @@ class Assembler : public AssemblerBase {
void EnterFrame(RegList regs, intptr_t frame_space);
void LeaveFrame(RegList regs, bool allow_pop_pc = false);
void Ret(Condition cond = AL);
// Sets the return address to [value] as if there was a call.
// On ARM sets LR.
void SetReturnAddress(Register value);
void ReserveAlignedFrameSpace(intptr_t frame_space);
// In debug mode, this generates code to check that:

View file

@ -1540,7 +1540,7 @@ void Assembler::CheckCodePointer() {
const intptr_t entry_offset =
CodeSize() + target::Instructions::HeaderSize() - kHeapObjectTag;
adr(R0, Immediate(-entry_offset));
ldr(TMP, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
ldr(TMP, FieldAddress(CODE_REG, target::Code::instructions_offset()));
cmp(R0, Operand(TMP));
b(&instructions_ok, EQ);
brk(1);
@ -1583,6 +1583,10 @@ void Assembler::RestoreCSP() {
mov(CSP, SP);
}
void Assembler::SetReturnAddress(Register value) {
RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(MoveRegister(LR, value));
}
void Assembler::EnterFrame(intptr_t frame_size) {
SPILLS_LR_TO_FRAME(PushPair(FP, LR)); // low: FP, high: LR.
mov(FP, SP);

View file

@ -2121,6 +2121,10 @@ class Assembler : public AssemblerBase {
void LeaveFrame();
void Ret() { ret(); }
// Sets the return address to [value] as if there was a call.
// On ARM64 sets LR.
void SetReturnAddress(Register value);
// Emit code to transition between generated mode and native mode.
//
// These require and ensure that CSP and SP are equal and aligned and require

View file

@ -573,6 +573,13 @@ class Assembler : public AssemblerBase {
*/
void Ret() { ret(); }
// Sets the return address to [value] as if there was a call.
// On IA32 pushes [value].
void SetReturnAddress(Register value) {
PushRegister(value);
}
void CompareRegisters(Register a, Register b);
void CompareObjectRegisters(Register a, Register b) {
CompareRegisters(a, b);

View file

@ -2727,6 +2727,9 @@ void Assembler::AddImmediate(Register rd,
Register rs1,
intx_t imm,
OperandSize sz) {
if ((imm == 0) && (rd == rs1)) {
return;
}
if (IsITypeImm(imm)) {
addi(rd, rs1, imm);
} else {
@ -3646,7 +3649,7 @@ void Assembler::CheckCodePointer() {
intx_t hi = (imm - lo) << (XLEN - 32) >> (XLEN - 32);
auipc(TMP, hi);
addi(TMP, TMP, lo);
lx(TMP2, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
lx(TMP2, FieldAddress(CODE_REG, target::Code::instructions_offset()));
beq(TMP, TMP2, &instructions_ok, kNearJump);
ebreak();
Bind(&instructions_ok);

View file

@ -1231,6 +1231,12 @@ class Assembler : public MicroAssembler {
void LeaveFrame();
void Ret() { ret(); }
// Sets the return address to [value] as if there was a call.
// On RISC-V sets RA.
void SetReturnAddress(Register value) {
mv(RA, value);
}
// Emit code to transition between generated mode and native mode.
//
// These require and ensure that CSP and SP are equal and aligned and require

View file

@ -2069,7 +2069,7 @@ void Assembler::CheckCodePointer() {
leaq(RAX, Address::AddressRIPRelative(-header_to_rip_offset));
ASSERT(CodeSize() == (header_to_rip_offset - header_to_entry_offset));
}
cmpq(RAX, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
cmpq(RAX, FieldAddress(CODE_REG, target::Code::instructions_offset()));
j(EQUAL, &instructions_ok);
int3();
Bind(&instructions_ok);

View file

@ -711,6 +711,13 @@ class Assembler : public AssemblerBase {
// Methods for High-level operations and implemented on all architectures.
void Ret() { ret(); }
// Sets the return address to [value] as if there was a call.
// On X64 pushes [value].
void SetReturnAddress(Register value) {
PushRegister(value);
}
void CompareRegisters(Register a, Register b);
void CompareObjectRegisters(Register a, Register b) { OBJ(cmp)(a, b); }
void BranchIf(Condition condition,

View file

@ -1456,6 +1456,10 @@ void ConstantPropagator::VisitCall1ArgStub(Call1ArgStubInstr* instr) {
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitSuspend(SuspendInstr* instr) {
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitLoadThread(LoadThreadInstr* instr) {
SetValue(instr, non_constant_);
}

View file

@ -373,7 +373,8 @@ void FlowGraphCompiler::EmitPrologue() {
Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : R0;
__ StoreToOffset(value_reg, FP, slot_index * compiler::target::kWordSize);
}
} else if (parsed_function().suspend_state_var() != nullptr) {
} else if (parsed_function().suspend_state_var() != nullptr &&
!flow_graph().IsCompiledForOsr()) {
// Initialize synthetic :suspend_state variable early
// as it may be accessed by GC and exception handling before
// InitSuspendableFunction stub is called.

View file

@ -363,7 +363,8 @@ void FlowGraphCompiler::EmitPrologue() {
slot_index == args_desc_slot ? ARGS_DESC_REG : NULL_REG;
__ StoreToOffset(value_reg, FP, slot_index * kWordSize);
}
} else if (parsed_function().suspend_state_var() != nullptr) {
} else if (parsed_function().suspend_state_var() != nullptr &&
!flow_graph().IsCompiledForOsr()) {
// Initialize synthetic :suspend_state variable early
// as it may be accessed by GC and exception handling before
// InitSuspendableFunction stub is called.

View file

@ -450,7 +450,8 @@ void FlowGraphCompiler::EmitPrologue() {
Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : EAX;
__ movl(compiler::Address(EBP, slot_index * kWordSize), value_reg);
}
} else if (parsed_function().suspend_state_var() != nullptr) {
} else if (parsed_function().suspend_state_var() != nullptr &&
!flow_graph().IsCompiledForOsr()) {
// Initialize synthetic :suspend_state variable early
// as it may be accessed by GC and exception handling before
// InitSuspendableFunction stub is called.

View file

@ -357,7 +357,8 @@ void FlowGraphCompiler::EmitPrologue() {
__ StoreToOffset(value_reg, SP,
(slot_index + fp_to_sp_delta) * kWordSize);
}
} else if (parsed_function().suspend_state_var() != nullptr) {
} else if (parsed_function().suspend_state_var() != nullptr &&
!flow_graph().IsCompiledForOsr()) {
// Initialize synthetic :suspend_state variable early
// as it may be accessed by GC and exception handling before
// InitSuspendableFunction stub is called.

View file

@ -369,7 +369,8 @@ void FlowGraphCompiler::EmitPrologue() {
Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : RAX;
__ movq(compiler::Address(RBP, slot_index * kWordSize), value_reg);
}
} else if (parsed_function().suspend_state_var() != nullptr) {
} else if (parsed_function().suspend_state_var() != nullptr &&
!flow_graph().IsCompiledForOsr()) {
// Initialize synthetic :suspend_state variable early
// as it may be accessed by GC and exception handling before
// InitSuspendableFunction stub is called.

View file

@ -7298,10 +7298,6 @@ LocationSummary* Call1ArgStubInstr::MakeLocationSummary(Zone* zone,
locs->set_in(0, Location::RegisterLocation(
InitSuspendableFunctionStubABI::kTypeArgsReg));
break;
case StubId::kAwait:
case StubId::kYieldAsyncStar:
locs->set_in(0, Location::RegisterLocation(SuspendStubABI::kArgumentReg));
break;
}
locs->set_out(0, Location::RegisterLocation(CallingConventions::kReturnReg));
return locs;
@ -7314,12 +7310,34 @@ void Call1ArgStubInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case StubId::kInitAsync:
stub = object_store->init_async_stub();
break;
case StubId::kAwait:
stub = object_store->await_stub();
break;
case StubId::kInitAsyncStar:
stub = object_store->init_async_star_stub();
break;
}
compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
locs(), deopt_id(), env());
}
LocationSummary* SuspendInstr::MakeLocationSummary(Zone* zone, bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(0, Location::RegisterLocation(SuspendStubABI::kArgumentReg));
locs->set_out(0, Location::RegisterLocation(CallingConventions::kReturnReg));
return locs;
}
void SuspendInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Use deopt_id as a yield index.
compiler->EmitYieldPositionMetadata(source(), deopt_id());
ObjectStore* object_store = compiler->isolate_group()->object_store();
Code& stub = Code::ZoneHandle(compiler->zone());
switch (stub_id_) {
case StubId::kAwait:
stub = object_store->await_stub();
break;
case StubId::kYieldAsyncStar:
stub = object_store->yield_async_star_stub();
break;
@ -7328,18 +7346,18 @@ void Call1ArgStubInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
locs(), deopt_id(), env());
#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
if ((stub_id_ == StubId::kAwait) || (stub_id_ == StubId::kYieldAsyncStar)) {
// On x86 (X64 and IA32) mismatch between calls and returns
// significantly regresses performance. So suspend stub
// does not return directly to the caller. Instead, a small
// epilogue is generated right after the call to suspend stub,
// and resume stub adjusts resume PC to skip this epilogue.
const intptr_t start = compiler->assembler()->CodeSize();
__ LeaveFrame();
__ ret();
RELEASE_ASSERT(compiler->assembler()->CodeSize() - start ==
SuspendStubABI::kResumePcDistance);
}
// On x86 (X64 and IA32) mismatch between calls and returns
// significantly regresses performance. So suspend stub
// does not return directly to the caller. Instead, a small
// epilogue is generated right after the call to suspend stub,
// and resume stub adjusts resume PC to skip this epilogue.
const intptr_t start = compiler->assembler()->CodeSize();
__ LeaveFrame();
__ ret();
RELEASE_ASSERT(compiler->assembler()->CodeSize() - start ==
SuspendStubABI::kResumePcDistance);
compiler->EmitCallsiteMetadata(source(), resume_deopt_id(),
UntaggedPcDescriptors::kOther, locs(), env());
#endif
}

View file

@ -529,7 +529,8 @@ struct InstrAttrs {
M(Call1ArgStub, _) \
M(LoadThread, kNoGC) \
M(Deoptimize, kNoGC) \
M(SimdOp, kNoGC)
M(SimdOp, kNoGC) \
M(Suspend, _)
#define FOR_EACH_ABSTRACT_INSTRUCTION(M) \
M(Allocation, _) \
@ -9582,10 +9583,8 @@ class SimdOpInstr : public Definition {
class Call1ArgStubInstr : public TemplateDefinition<1, Throws> {
public:
enum class StubId {
kAwait,
kInitAsync,
kInitAsyncStar,
kYieldAsyncStar,
};
Call1ArgStubInstr(const InstructionSource& source,
@ -9605,6 +9604,9 @@ class Call1ArgStubInstr : public TemplateDefinition<1, Throws> {
virtual bool CanCallDart() const { return true; }
virtual bool ComputeCanDeoptimize() const { return true; }
virtual bool HasUnknownSideEffects() const { return true; }
virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
return InputCount();
}
DECLARE_INSTRUCTION(Call1ArgStub);
PRINT_OPERANDS_TO_SUPPORT
@ -9616,6 +9618,49 @@ class Call1ArgStubInstr : public TemplateDefinition<1, Throws> {
DISALLOW_COPY_AND_ASSIGN(Call1ArgStubInstr);
};
// Suspends execution using the suspend stub specified using [StubId].
class SuspendInstr : public TemplateDefinition<1, Throws> {
public:
enum class StubId {
kAwait,
kYieldAsyncStar,
};
SuspendInstr(const InstructionSource& source,
StubId stub_id,
Value* operand,
intptr_t deopt_id,
intptr_t resume_deopt_id)
: TemplateDefinition(source, deopt_id),
stub_id_(stub_id),
resume_deopt_id_(resume_deopt_id),
token_pos_(source.token_pos) {
SetInputAt(0, operand);
}
Value* operand() const { return inputs_[0]; }
StubId stub_id() const { return stub_id_; }
intptr_t resume_deopt_id() const { return resume_deopt_id_; }
virtual TokenPosition token_pos() const { return token_pos_; }
virtual bool CanCallDart() const { return true; }
virtual bool ComputeCanDeoptimize() const { return true; }
virtual bool HasUnknownSideEffects() const { return true; }
virtual intptr_t NumberOfInputsConsumedBeforeCall() const {
return InputCount();
}
DECLARE_INSTRUCTION(Suspend);
PRINT_OPERANDS_TO_SUPPORT
private:
const StubId stub_id_;
const intptr_t resume_deopt_id_;
const TokenPosition token_pos_;
DISALLOW_COPY_AND_ASSIGN(SuspendInstr);
};
#undef DECLARE_INSTRUCTION
class Environment : public ZoneAllocated {

View file

@ -6503,8 +6503,8 @@ void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
EBP, compiler::target::frame_layout.code_from_fp * kWordSize));
// Load instructions object (active_instructions and Code::entry_point() may
// not point to this instruction object any more; see Code::DisableDartCode).
__ movl(target_reg, compiler::FieldAddress(
target_reg, Code::saved_instructions_offset()));
__ movl(target_reg,
compiler::FieldAddress(target_reg, Code::instructions_offset()));
__ addl(target_reg,
compiler::Immediate(Instructions::HeaderSize() - kHeapObjectTag));
__ addl(target_reg, offset);

View file

@ -1370,12 +1370,21 @@ void Call1ArgStubInstr::PrintOperandsTo(BaseTextBuffer* f) const {
case StubId::kInitAsync:
name = "InitAsync";
break;
case StubId::kAwait:
name = "Await";
break;
case StubId::kInitAsyncStar:
name = "InitAsyncStar";
break;
}
f->Printf("%s(", name);
operand()->PrintTo(f);
f->AddString(")");
}
void SuspendInstr::PrintOperandsTo(BaseTextBuffer* f) const {
const char* name = "";
switch (stub_id_) {
case StubId::kAwait:
name = "Await";
break;
case StubId::kYieldAsyncStar:
name = "YieldAsyncStar";
break;

View file

@ -645,7 +645,7 @@ void FlowGraphAllocator::BuildLiveRanges() {
Definition* defn = (*catch_entry->initial_definitions())[i];
LiveRange* range = GetLiveRange(defn->ssa_temp_index());
range->DefineAt(catch_entry->start_pos()); // Defined at block entry.
ProcessInitialDefinition(defn, range, catch_entry);
ProcessInitialDefinition(defn, range, catch_entry, i);
}
} else if (auto entry = block->AsBlockEntryWithInitialDefs()) {
ASSERT(block->IsFunctionEntry() || block->IsOsrEntry());
@ -658,13 +658,13 @@ void FlowGraphAllocator::BuildLiveRanges() {
GetLiveRange(ToSecondPairVreg(defn->ssa_temp_index()));
range->AddUseInterval(entry->start_pos(), entry->start_pos() + 2);
range->DefineAt(entry->start_pos());
ProcessInitialDefinition(defn, range, entry,
ProcessInitialDefinition(defn, range, entry, i,
/*second_location_for_definition=*/true);
}
LiveRange* range = GetLiveRange(defn->ssa_temp_index());
range->AddUseInterval(entry->start_pos(), entry->start_pos() + 2);
range->DefineAt(entry->start_pos());
ProcessInitialDefinition(defn, range, entry);
ProcessInitialDefinition(defn, range, entry, i);
}
}
}
@ -679,13 +679,13 @@ void FlowGraphAllocator::BuildLiveRanges() {
LiveRange* range = GetLiveRange(ToSecondPairVreg(defn->ssa_temp_index()));
range->AddUseInterval(graph_entry->start_pos(), graph_entry->end_pos());
range->DefineAt(graph_entry->start_pos());
ProcessInitialDefinition(defn, range, graph_entry,
ProcessInitialDefinition(defn, range, graph_entry, i,
/*second_location_for_definition=*/true);
}
LiveRange* range = GetLiveRange(defn->ssa_temp_index());
range->AddUseInterval(graph_entry->start_pos(), graph_entry->end_pos());
range->DefineAt(graph_entry->start_pos());
ProcessInitialDefinition(defn, range, graph_entry);
ProcessInitialDefinition(defn, range, graph_entry, i);
}
}
@ -697,10 +697,45 @@ void FlowGraphAllocator::SplitInitialDefinitionAt(LiveRange* range,
}
}
bool FlowGraphAllocator::IsSuspendStateParameter(Definition* defn) {
if (auto param = defn->AsParameter()) {
if ((param->GetBlock()->IsOsrEntry() ||
param->GetBlock()->IsCatchBlockEntry()) &&
flow_graph_.SuspendStateVar() != nullptr &&
param->index() == flow_graph_.SuspendStateEnvIndex()) {
return true;
}
}
return false;
}
void FlowGraphAllocator::AllocateSpillSlotForInitialDefinition(
intptr_t slot_index,
intptr_t range_end) {
if (slot_index < spill_slots_.length()) {
// Multiple initial definitions could exist for the same spill slot
// as function could have both OsrEntry and CatchBlockEntry.
spill_slots_[slot_index] =
Utils::Maximum(spill_slots_[slot_index], range_end);
ASSERT(!quad_spill_slots_[slot_index]);
ASSERT(!untagged_spill_slots_[slot_index]);
} else {
while (spill_slots_.length() < slot_index) {
spill_slots_.Add(kMaxPosition);
quad_spill_slots_.Add(false);
untagged_spill_slots_.Add(false);
}
spill_slots_.Add(range_end);
quad_spill_slots_.Add(false);
untagged_spill_slots_.Add(false);
}
}
void FlowGraphAllocator::ProcessInitialDefinition(
Definition* defn,
LiveRange* range,
BlockEntryInstr* block,
intptr_t initial_definition_index,
bool second_location_for_definition) {
// Save the range end because it may change below.
const intptr_t range_end = range->End();
@ -779,21 +814,30 @@ void FlowGraphAllocator::ProcessInitialDefinition(
Location spill_slot = range->spill_slot();
if (spill_slot.IsStackSlot() && spill_slot.base_reg() == FPREG &&
spill_slot.stack_index() <=
compiler::target::frame_layout.first_local_from_fp) {
compiler::target::frame_layout.first_local_from_fp &&
!IsSuspendStateParameter(defn)) {
// On entry to the function, range is stored on the stack above the FP in
// the same space which is used for spill slots. Update spill slot state to
// reflect that and prevent register allocator from reusing this space as a
// spill slot.
spill_slots_.Add(range_end);
quad_spill_slots_.Add(false);
untagged_spill_slots_.Add(false);
// Do not allocate spill slot for OSR parameter corresponding to
// a synthetic :suspend_state variable as it is already allocated
// in AllocateSpillSlotForSuspendState.
ASSERT(defn->IsParameter());
ASSERT(defn->AsParameter()->index() == initial_definition_index);
const intptr_t spill_slot_index =
-compiler::target::frame_layout.VariableIndexForFrameSlot(
spill_slot.stack_index());
AllocateSpillSlotForInitialDefinition(spill_slot_index, range_end);
// Note, all incoming parameters are assumed to be tagged.
MarkAsObjectAtSafepoints(range);
} else if (defn->IsConstant() && block->IsCatchBlockEntry()) {
} else if (defn->IsConstant() && block->IsCatchBlockEntry() &&
(initial_definition_index >=
flow_graph_.num_direct_parameters())) {
// Constants at catch block entries consume spill slots.
spill_slots_.Add(range_end);
quad_spill_slots_.Add(false);
untagged_spill_slots_.Add(false);
AllocateSpillSlotForInitialDefinition(
initial_definition_index - flow_graph_.num_direct_parameters(),
range_end);
}
}
@ -994,26 +1038,45 @@ void FlowGraphAllocator::ProcessEnvironmentUses(BlockEntryInstr* block,
for (intptr_t i = 0; i < env->Length(); ++i) {
Value* value = env->ValueAt(i);
Definition* def = value->definition();
if (def->HasPairRepresentation()) {
locations[i] = Location::Pair(Location::Any(), Location::Any());
} else {
locations[i] = Location::Any();
}
if (env->outer() == nullptr && flow_graph_.SuspendStateVar() != nullptr &&
i == flow_graph_.SuspendStateEnvIndex()) {
// Make sure synthetic :suspend_state variable gets a correct
// location on the stack frame. It is used by deoptimization.
const intptr_t slot_index =
compiler::target::frame_layout.FrameSlotForVariable(
flow_graph_.parsed_function().suspend_state_var());
locations[i] = Location::StackSlot(slot_index, FPREG);
if (!def->IsConstant()) {
// Update live intervals for Parameter/Phi definitions
// corresponding to :suspend_state in OSR and try/catch cases as
// they are still used when resolving control flow.
ASSERT(def->IsParameter() || def->IsPhi());
ASSERT(!def->HasPairRepresentation());
LiveRange* range = GetLiveRange(def->ssa_temp_index());
range->AddUseInterval(block_start_pos, use_pos);
}
continue;
}
if (def->IsPushArgument()) {
// Frame size is unknown until after allocation.
locations[i] = Location::NoLocation();
continue;
}
ConstantInstr* constant = def->AsConstant();
if (constant != NULL) {
if (auto constant = def->AsConstant()) {
locations[i] = Location::Constant(constant);
continue;
}
MaterializeObjectInstr* mat = def->AsMaterializeObject();
if (mat != NULL) {
if (auto mat = def->AsMaterializeObject()) {
// MaterializeObject itself produces no value. But its uses
// are treated as part of the environment: allocated locations
// will be used when building deoptimization data.

View file

@ -147,6 +147,7 @@ class FlowGraphAllocator : public ValueObject {
void ProcessInitialDefinition(Definition* defn,
LiveRange* range,
BlockEntryInstr* block,
intptr_t initial_definition_index,
bool second_location_for_definition = false);
void ConnectIncomingPhiMoves(JoinEntryInstr* join);
void BlockLocation(Location loc, intptr_t from, intptr_t to);
@ -249,6 +250,15 @@ class FlowGraphAllocator : public ValueObject {
// at all safepoints.
void UpdateStackmapsForSuspendState();
// Returns true if [defn] is an OsrEntry or CatchBlockEntry parameter
// corresponding to a synthetic :suspend_state variable.
bool IsSuspendStateParameter(Definition* defn);
// Allocates spill slot [slot_index] for the initial definition of
// OsrEntry or CatchBlockEntry (Parameter or Constant).
void AllocateSpillSlotForInitialDefinition(intptr_t slot_index,
intptr_t range_end);
// Allocate the given live range to a spill slot.
void Spill(LiveRange* range);

View file

@ -699,8 +699,8 @@ Fragment StreamingFlowGraphBuilder::InitSuspendableFunction(
Call1ArgStubInstr::StubId::kInitAsyncStar);
body += Drop();
body += NullConstant();
body += B->Call1ArgStub(TokenPosition::kNoSource,
Call1ArgStubInstr::StubId::kYieldAsyncStar);
body += B->Suspend(TokenPosition::kNoSource,
SuspendInstr::StubId::kYieldAsyncStar);
body += Drop();
}
return body;
@ -4373,7 +4373,10 @@ Fragment StreamingFlowGraphBuilder::BuildAwaitExpression(
instructions += BuildExpression(); // read operand.
instructions += B->Call1ArgStub(pos, Call1ArgStubInstr::StubId::kAwait);
if (NeedsDebugStepCheck(parsed_function()->function(), pos)) {
instructions += DebugStepCheck(pos);
}
instructions += B->Suspend(pos, SuspendInstr::StubId::kAwait);
return instructions;
}
@ -5305,6 +5308,9 @@ Fragment StreamingFlowGraphBuilder::BuildYieldStatement(
instructions += LoadNativeField(Slot::SuspendState_function_data());
instructions += BuildExpression(); // read expression.
if (NeedsDebugStepCheck(parsed_function()->function(), pos)) {
instructions += DebugStepCheck(pos);
}
auto& add_method = Function::ZoneHandle(Z);
const bool is_yield_star = (flags & kYieldStatementFlagYieldStar) != 0;
@ -5314,15 +5320,15 @@ Fragment StreamingFlowGraphBuilder::BuildYieldStatement(
} else {
add_method = IG->object_store()->async_star_stream_controller_add();
}
instructions += StaticCall(pos, add_method, 2, ICData::kNoRebind);
instructions +=
StaticCall(TokenPosition::kNoSource, add_method, 2, ICData::kNoRebind);
if (is_yield_star) {
// Discard result of _AsyncStarStreamController.addStream().
instructions += Drop();
// Suspend and test value passed to the resumed async* body.
instructions += NullConstant();
instructions +=
B->Call1ArgStub(pos, Call1ArgStubInstr::StubId::kYieldAsyncStar);
instructions += B->Suspend(pos, SuspendInstr::StubId::kYieldAsyncStar);
} else {
// Test value returned by _AsyncStarStreamController.add().
}
@ -5339,8 +5345,7 @@ Fragment StreamingFlowGraphBuilder::BuildYieldStatement(
instructions = Fragment(instructions.entry, continue_execution);
if (!is_yield_star) {
instructions += NullConstant();
instructions +=
B->Call1ArgStub(pos, Call1ArgStubInstr::StubId::kYieldAsyncStar);
instructions += B->Suspend(pos, SuspendInstr::StubId::kYieldAsyncStar);
instructions += Drop();
}
@ -5586,9 +5591,6 @@ Fragment StreamingFlowGraphBuilder::BuildFunctionNode(
}
if (function_node_helper.async_marker_ == FunctionNodeHelper::kAsync) {
if (!FLAG_precompiled_mode) {
FATAL("Compact async functions are only supported in AOT mode.");
}
function.set_modifier(UntaggedFunction::kAsync);
function.set_is_debuggable(true);
function.set_is_inlinable(false);
@ -5596,9 +5598,6 @@ Fragment StreamingFlowGraphBuilder::BuildFunctionNode(
ASSERT(function.IsCompactAsyncFunction());
} else if (function_node_helper.async_marker_ ==
FunctionNodeHelper::kAsyncStar) {
if (!FLAG_precompiled_mode) {
FATAL("Compact async* functions are only supported in AOT mode.");
}
function.set_modifier(UntaggedFunction::kAsyncGen);
function.set_is_debuggable(true);
function.set_is_inlinable(false);

View file

@ -4254,6 +4254,15 @@ Fragment FlowGraphBuilder::Call1ArgStub(TokenPosition position,
return Fragment(instr);
}
Fragment FlowGraphBuilder::Suspend(TokenPosition position,
SuspendInstr::StubId stub_id) {
SuspendInstr* instr =
new (Z) SuspendInstr(InstructionSource(position), stub_id, Pop(),
GetNextDeoptId(), GetNextDeoptId());
Push(instr);
return Fragment(instr);
}
Fragment FlowGraphBuilder::WrapTypedDataBaseInCompound(
const AbstractType& compound_type) {
const auto& compound_sub_class =

View file

@ -424,6 +424,9 @@ class FlowGraphBuilder : public BaseFlowGraphBuilder {
Fragment Call1ArgStub(TokenPosition position,
Call1ArgStubInstr::StubId stub_id);
// Generates Suspend instruction.
Fragment Suspend(TokenPosition position, SuspendInstr::StubId stub_id);
LocalVariable* LookupVariable(intptr_t kernel_offset);
// Build type argument type checks for the current function.

View file

@ -993,6 +993,7 @@ class StackTrace : public AllStatic {
class SuspendState : public AllStatic {
public:
static word frame_capacity_offset();
static word frame_size_offset();
static word pc_offset();
static word function_data_offset();
@ -1003,6 +1004,8 @@ class SuspendState : public AllStatic {
static word HeaderSize();
static word InstanceSize();
static word InstanceSize(word payload_size);
static word FrameSizeGrowthGap();
FINAL_CLASS();
};
@ -1203,6 +1206,10 @@ class Thread : public AllStatic {
static word null_cast_error_shared_with_fpu_regs_stub_offset();
static word range_error_shared_without_fpu_regs_stub_offset();
static word range_error_shared_with_fpu_regs_stub_offset();
static word resume_stub_offset();
static word return_async_not_future_stub_offset();
static word return_async_star_stub_offset();
static word return_async_stub_offset();
static word stack_overflow_shared_without_fpu_regs_entry_point_offset();
static word stack_overflow_shared_without_fpu_regs_stub_offset();
static word stack_overflow_shared_with_fpu_regs_entry_point_offset();
@ -1214,6 +1221,7 @@ class Thread : public AllStatic {
static word allocate_object_stub_offset();
static word allocate_object_parameterized_stub_offset();
static word allocate_object_slow_stub_offset();
static word async_exception_handler_stub_offset();
static word optimize_stub_offset();
static word deoptimize_stub_offset();
static word enter_safepoint_stub_offset();
@ -1276,6 +1284,15 @@ class ObjectStore : public AllStatic {
static word int_type_offset();
static word string_type_offset();
static word type_type_offset();
static word suspend_state_await_offset();
static word suspend_state_handle_exception_offset();
static word suspend_state_init_async_offset();
static word suspend_state_init_async_star_offset();
static word suspend_state_return_async_offset();
static word suspend_state_return_async_not_future_offset();
static word suspend_state_return_async_star_offset();
static word suspend_state_yield_async_star_offset();
};
class Isolate : public AllStatic {
@ -1287,6 +1304,7 @@ class Isolate : public AllStatic {
static word finalizers_offset();
#if !defined(PRODUCT)
static word single_step_offset();
static word has_resumption_breakpoints_offset();
#endif // !defined(PRODUCT)
};
@ -1355,7 +1373,8 @@ class Code : public AllStatic {
static word object_pool_offset();
static word entry_point_offset(CodeEntryKind kind = CodeEntryKind::kNormal);
static word saved_instructions_offset();
static word active_instructions_offset();
static word instructions_offset();
static word owner_offset();
static word HeaderSize();
static word InstanceSize();

File diff suppressed because it is too large Load diff

View file

@ -111,8 +111,8 @@
FIELD(Closure, hash_offset) \
FIELD(Closure, instantiator_type_arguments_offset) \
FIELD(ClosureData, default_type_arguments_kind_offset) \
FIELD(Code, instructions_offset) \
FIELD(Code, object_pool_offset) \
FIELD(Code, saved_instructions_offset) \
FIELD(Code, owner_offset) \
FIELD(Context, num_variables_offset) \
FIELD(Context, parent_offset) \
@ -149,6 +149,7 @@
FIELD(Isolate, current_tag_offset) \
FIELD(Isolate, default_tag_offset) \
FIELD(Isolate, finalizers_offset) \
NOT_IN_PRODUCT(FIELD(Isolate, has_resumption_breakpoints_offset)) \
FIELD(Isolate, ic_miss_code_offset) \
FIELD(IsolateGroup, object_store_offset) \
FIELD(IsolateGroup, shared_class_table_offset) \
@ -176,6 +177,14 @@
FIELD(ObjectStore, int_type_offset) \
FIELD(ObjectStore, string_type_offset) \
FIELD(ObjectStore, type_type_offset) \
FIELD(ObjectStore, suspend_state_await_offset) \
FIELD(ObjectStore, suspend_state_handle_exception_offset) \
FIELD(ObjectStore, suspend_state_init_async_offset) \
FIELD(ObjectStore, suspend_state_init_async_star_offset) \
FIELD(ObjectStore, suspend_state_return_async_offset) \
FIELD(ObjectStore, suspend_state_return_async_not_future_offset) \
FIELD(ObjectStore, suspend_state_return_async_star_offset) \
FIELD(ObjectStore, suspend_state_yield_async_star_offset) \
FIELD(OneByteString, data_offset) \
FIELD(PointerBase, data_offset) \
FIELD(Pointer, type_arguments_offset) \
@ -189,6 +198,7 @@
FIELD(String, hash_offset) \
FIELD(String, length_offset) \
FIELD(SubtypeTestCache, cache_offset) \
FIELD(SuspendState, FrameSizeGrowthGap) \
FIELD(SuspendState, error_callback_offset) \
FIELD(SuspendState, frame_size_offset) \
FIELD(SuspendState, function_data_offset) \
@ -210,6 +220,7 @@
FIELD(Thread, allocate_object_slow_entry_point_offset) \
FIELD(Thread, allocate_object_slow_stub_offset) \
FIELD(Thread, api_top_scope_offset) \
FIELD(Thread, async_exception_handler_stub_offset) \
FIELD(Thread, auto_scope_native_wrapper_entry_point_offset) \
FIELD(Thread, bool_false_offset) \
FIELD(Thread, bool_true_offset) \
@ -263,6 +274,10 @@
FIELD(Thread, null_cast_error_shared_without_fpu_regs_stub_offset) \
FIELD(Thread, range_error_shared_with_fpu_regs_stub_offset) \
FIELD(Thread, range_error_shared_without_fpu_regs_stub_offset) \
FIELD(Thread, resume_stub_offset) \
FIELD(Thread, return_async_not_future_stub_offset) \
FIELD(Thread, return_async_star_stub_offset) \
FIELD(Thread, return_async_stub_offset) \
\
FIELD(Thread, object_null_offset) \
FIELD(Thread, predefined_symbols_address_offset) \
@ -459,8 +474,10 @@
#define JIT_OFFSETS_LIST(FIELD, ARRAY, SIZEOF, ARRAY_SIZEOF, PAYLOAD_SIZEOF, \
RANGE, CONSTANT) \
FIELD(Code, active_instructions_offset) \
FIELD(Function, usage_counter_offset) \
FIELD(ICData, receivers_static_type_offset)
FIELD(ICData, receivers_static_type_offset) \
FIELD(SuspendState, frame_capacity_offset)
#define AOT_OFFSETS_LIST(FIELD, ARRAY, SIZEOF, ARRAY_SIZEOF, PAYLOAD_SIZEOF, \
RANGE, CONSTANT) \

View file

@ -1274,9 +1274,34 @@ static intptr_t SuspendStateFpOffset() {
compiler::target::kWordSize;
}
static void CallDartCoreLibraryFunction(
Assembler* assembler,
intptr_t entry_point_offset_in_thread,
intptr_t function_offset_in_object_store,
bool uses_args_desc = false) {
if (FLAG_precompiled_mode) {
__ Call(Address(THR, entry_point_offset_in_thread));
} else {
__ LoadIsolateGroup(FUNCTION_REG);
__ LoadFromOffset(
FUNCTION_REG,
Address(FUNCTION_REG, target::IsolateGroup::object_store_offset()));
__ LoadFromOffset(FUNCTION_REG,
Address(FUNCTION_REG, function_offset_in_object_store));
__ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
if (!uses_args_desc) {
// Load a GC-safe value for the arguments descriptor (unused but tagged).
__ LoadImmediate(ARGS_DESC_REG, 0);
}
__ Call(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
}
}
void StubCodeCompiler::GenerateSuspendStub(
Assembler* assembler,
intptr_t suspend_entry_point_offset) {
intptr_t suspend_entry_point_offset_in_thread,
intptr_t suspend_function_offset_in_object_store) {
const Register kArgument = SuspendStubABI::kArgumentReg;
const Register kTemp = SuspendStubABI::kTempReg;
const Register kFrameSize = SuspendStubABI::kFrameSizeReg;
@ -1284,7 +1309,8 @@ void StubCodeCompiler::GenerateSuspendStub(
const Register kFunctionData = SuspendStubABI::kFunctionDataReg;
const Register kSrcFrame = SuspendStubABI::kSrcFrameReg;
const Register kDstFrame = SuspendStubABI::kDstFrameReg;
Label alloc_slow_case, alloc_done, init_done, old_gen_object, call_await;
Label alloc_slow_case, alloc_done, init_done, resize_suspend_state,
old_gen_object, call_dart;
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
SPILLS_LR_TO_FRAME({}); // Simulate entering the caller (Dart) frame.
@ -1300,10 +1326,29 @@ void StubCodeCompiler::GenerateSuspendStub(
__ EnterStubFrame();
__ CompareClassId(kSuspendState, kSuspendStateCid, kTemp);
__ BranchIf(EQUAL, &init_done);
__ MoveRegister(kFunctionData, kSuspendState);
if (FLAG_precompiled_mode) {
__ BranchIf(EQUAL, &init_done);
} else {
Label alloc_suspend_state;
__ BranchIf(NOT_EQUAL, &alloc_suspend_state);
__ CompareWithMemoryValue(
kFrameSize,
FieldAddress(kSuspendState,
target::SuspendState::frame_capacity_offset()));
__ BranchIf(UNSIGNED_GREATER, &resize_suspend_state);
__ StoreToOffset(
kFrameSize,
FieldAddress(kSuspendState, target::SuspendState::frame_size_offset()));
__ Jump(&init_done);
__ Bind(&alloc_suspend_state);
}
__ Comment("Allocate SuspendState");
__ MoveRegister(kFunctionData, kSuspendState);
// Check for allocation tracing.
NOT_IN_PRODUCT(
@ -1312,6 +1357,7 @@ void StubCodeCompiler::GenerateSuspendStub(
// Compute the rounded instance size.
const intptr_t fixed_size_plus_alignment_padding =
(target::SuspendState::HeaderSize() +
target::SuspendState::FrameSizeGrowthGap() * target::kWordSize +
target::ObjectAlignment::kObjectAlignment - 1);
__ AddImmediate(kTemp, kFrameSize, fixed_size_plus_alignment_padding);
__ AndImmediate(kTemp, -target::ObjectAlignment::kObjectAlignment);
@ -1329,6 +1375,16 @@ void StubCodeCompiler::GenerateSuspendStub(
__ SubRegisters(kTemp, kSuspendState);
__ AddImmediate(kSuspendState, kHeapObjectTag);
if (!FLAG_precompiled_mode) {
// Use rounded object size to calculate and save frame capacity.
__ AddImmediate(kTemp, kTemp, -target::SuspendState::payload_offset());
__ StoreToOffset(
kTemp, FieldAddress(kSuspendState,
target::SuspendState::frame_capacity_offset()));
// Restore rounded object size.
__ AddImmediate(kTemp, kTemp, target::SuspendState::payload_offset());
}
// Calculate the size tag.
{
Label size_tag_overflow, done;
@ -1381,8 +1437,8 @@ void StubCodeCompiler::GenerateSuspendStub(
__ Bind(&alloc_done);
__ Comment("Save SuspendState to frame");
__ LoadFromOffset(kTemp, Address(FPREG, kSavedCallerFpSlotFromFp *
compiler::target::kWordSize));
__ LoadFromOffset(
kTemp, Address(FPREG, kSavedCallerFpSlotFromFp * target::kWordSize));
__ StoreToOffset(kSuspendState, Address(kTemp, SuspendStateFpOffset()));
__ Bind(&init_done);
@ -1437,22 +1493,32 @@ void StubCodeCompiler::GenerateSuspendStub(
}
#endif
// Push arguments for _SuspendState._await* method.
// Push arguments for suspend Dart function.
__ PushRegistersInOrder({kSuspendState, kArgument});
// Write barrier.
__ BranchIfBit(kSuspendState, target::ObjectAlignment::kNewObjectBitPosition,
ZERO, &old_gen_object);
__ Bind(&call_await);
__ Comment("Call _SuspendState._await method");
__ Call(Address(THR, suspend_entry_point_offset));
__ Bind(&call_dart);
__ Comment("Call suspend Dart function");
CallDartCoreLibraryFunction(assembler, suspend_entry_point_offset_in_thread,
suspend_function_offset_in_object_store);
__ LeaveStubFrame();
#if !defined(TARGET_ARCH_X64) && !defined(TARGET_ARCH_IA32)
// Drop caller frame on all architectures except x86 which needs to maintain
// call/return balance to avoid performance regressions.
// Drop caller frame on all architectures except x86 (X64/IA32) which
// needs to maintain call/return balance to avoid performance regressions.
__ LeaveDartFrame();
#elif defined(TARGET_ARCH_X64)
// Restore PP in JIT mode on x64 as epilogue following SuspendStub call
// will only unwind frame and return.
if (!FLAG_precompiled_mode) {
__ LoadFromOffset(
PP, Address(FPREG, target::frame_layout.saved_caller_pp_from_fp *
target::kWordSize));
}
#endif
__ Ret();
@ -1476,6 +1542,24 @@ void StubCodeCompiler::GenerateSuspendStub(
__ PopRegister(kArgument); // Restore argument.
__ Jump(&alloc_done);
__ Bind(&resize_suspend_state);
__ Comment("Resize SuspendState");
// Save argument and frame size.
__ PushRegistersInOrder({kArgument, kFrameSize});
__ PushObject(NullObject()); // Make space on stack for the return value.
__ SmiTag(kFrameSize);
// Pass frame size and old suspend state to runtime entry.
__ PushRegistersInOrder({kFrameSize, kSuspendState});
// It's okay to call runtime for resizing SuspendState objects
// as it can only happen in the unoptimized code if expression
// stack grows between suspends, or once after OSR transition.
__ CallRuntime(kAllocateSuspendStateRuntimeEntry, 2);
__ Drop(2); // Drop arguments
__ PopRegister(kSuspendState); // Get result.
__ PopRegister(kFrameSize); // Restore frame size.
__ PopRegister(kArgument); // Restore argument.
__ Jump(&alloc_done);
__ Bind(&old_gen_object);
__ Comment("Old gen SuspendState slow case");
{
@ -1492,30 +1576,35 @@ void StubCodeCompiler::GenerateSuspendStub(
#endif
rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
}
__ Jump(&call_await);
__ Jump(&call_dart);
}
void StubCodeCompiler::GenerateAwaitStub(Assembler* assembler) {
GenerateSuspendStub(assembler,
target::Thread::suspend_state_await_entry_point_offset());
target::Thread::suspend_state_await_entry_point_offset(),
target::ObjectStore::suspend_state_await_offset());
}
void StubCodeCompiler::GenerateYieldAsyncStarStub(Assembler* assembler) {
GenerateSuspendStub(
assembler,
target::Thread::suspend_state_yield_async_star_entry_point_offset());
target::Thread::suspend_state_yield_async_star_entry_point_offset(),
target::ObjectStore::suspend_state_yield_async_star_offset());
}
void StubCodeCompiler::GenerateInitSuspendableFunctionStub(
Assembler* assembler,
intptr_t init_entry_point_offset) {
intptr_t init_entry_point_offset_in_thread,
intptr_t init_function_offset_in_object_store) {
const Register kTypeArgs = InitSuspendableFunctionStubABI::kTypeArgsReg;
__ EnterStubFrame();
__ LoadObject(ARGS_DESC_REG, ArgumentsDescriptorBoxed(/*type_args_len=*/1,
/*num_arguments=*/0));
__ PushRegister(kTypeArgs);
__ Call(Address(THR, init_entry_point_offset));
CallDartCoreLibraryFunction(assembler, init_entry_point_offset_in_thread,
init_function_offset_in_object_store,
/*uses_args_desc=*/true);
__ LeaveStubFrame();
// Set :suspend_state in the caller frame.
@ -1526,13 +1615,15 @@ void StubCodeCompiler::GenerateInitSuspendableFunctionStub(
void StubCodeCompiler::GenerateInitAsyncStub(Assembler* assembler) {
GenerateInitSuspendableFunctionStub(
assembler, target::Thread::suspend_state_init_async_entry_point_offset());
assembler, target::Thread::suspend_state_init_async_entry_point_offset(),
target::ObjectStore::suspend_state_init_async_offset());
}
void StubCodeCompiler::GenerateInitAsyncStarStub(Assembler* assembler) {
GenerateInitSuspendableFunctionStub(
assembler,
target::Thread::suspend_state_init_async_star_entry_point_offset());
target::Thread::suspend_state_init_async_star_entry_point_offset(),
target::ObjectStore::suspend_state_init_async_star_offset());
}
void StubCodeCompiler::GenerateResumeStub(Assembler* assembler) {
@ -1544,7 +1635,7 @@ void StubCodeCompiler::GenerateResumeStub(Assembler* assembler) {
const Register kResumePc = ResumeStubABI::kResumePcReg;
const Register kException = ResumeStubABI::kExceptionReg;
const Register kStackTrace = ResumeStubABI::kStackTraceReg;
Label rethrow_exception;
Label call_runtime;
// Top of the stack on entry:
// ... [SuspendState] [value] [exception] [stackTrace] [ReturnAddress]
@ -1563,6 +1654,15 @@ void StubCodeCompiler::GenerateResumeStub(Assembler* assembler) {
__ Breakpoint();
__ Bind(&okay);
}
{
Label okay;
__ LoadFromOffset(
kTemp, FieldAddress(kSuspendState, target::SuspendState::pc_offset()));
__ CompareImmediate(kTemp, 0);
__ BranchIf(NOT_EQUAL, &okay);
__ Breakpoint();
__ Bind(&okay);
}
#endif
__ LoadFromOffset(
@ -1582,16 +1682,48 @@ void StubCodeCompiler::GenerateResumeStub(Assembler* assembler) {
__ Bind(&okay);
}
#endif
if (!FLAG_precompiled_mode) {
// Copy Code object (part of the fixed frame which is not copied below)
// and restore pool pointer.
__ MoveRegister(kTemp, kSuspendState);
__ AddRegisters(kTemp, kFrameSize);
__ LoadFromOffset(
CODE_REG,
Address(kTemp,
target::SuspendState::payload_offset() - kHeapObjectTag +
target::frame_layout.code_from_fp * target::kWordSize));
__ StoreToOffset(
CODE_REG,
Address(FPREG, target::frame_layout.code_from_fp * target::kWordSize));
#if !defined(TARGET_ARCH_IA32)
__ LoadPoolPointer(PP);
#endif
}
// Do not copy fixed frame between the first local and FP.
__ AddImmediate(kFrameSize, (target::frame_layout.first_local_from_fp + 1) *
target::kWordSize);
__ SubRegisters(SPREG, kFrameSize);
__ Comment("Copy frame from SuspendState");
intptr_t num_saved_regs = 0;
if (kSrcFrame == THR) {
__ PushRegister(THR);
++num_saved_regs;
}
if (kDstFrame == CODE_REG) {
__ PushRegister(CODE_REG);
++num_saved_regs;
}
__ AddImmediate(kSrcFrame, kSuspendState,
target::SuspendState::payload_offset() - kHeapObjectTag);
__ MoveRegister(kDstFrame, SPREG);
__ AddImmediate(kDstFrame, SPREG, num_saved_regs * target::kWordSize);
__ CopyMemoryWords(kSrcFrame, kDstFrame, kFrameSize, kTemp);
if (kDstFrame == CODE_REG) {
__ PopRegister(CODE_REG);
}
if (kSrcFrame == THR) {
__ PopRegister(THR);
}
__ Comment("Transfer control");
@ -1600,11 +1732,6 @@ void StubCodeCompiler::GenerateResumeStub(Assembler* assembler) {
__ StoreZero(FieldAddress(kSuspendState, target::SuspendState::pc_offset()),
kTemp);
__ LoadFromOffset(kException,
Address(FPREG, param_offset + 2 * target::kWordSize));
__ CompareObject(kException, NullObject());
__ BranchIf(NOT_EQUAL, &rethrow_exception);
#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
// Adjust resume PC to skip extra epilogue generated on x86
// right after the call to suspend stub in order to maintain
@ -1612,41 +1739,78 @@ void StubCodeCompiler::GenerateResumeStub(Assembler* assembler) {
__ AddImmediate(kResumePc, SuspendStubABI::kResumePcDistance);
#endif
static_assert((kException != CODE_REG) && (kException != PP),
"should not interfere");
__ LoadFromOffset(kException,
Address(FPREG, param_offset + 2 * target::kWordSize));
__ CompareObject(kException, NullObject());
__ BranchIf(NOT_EQUAL, &call_runtime);
if (!FLAG_precompiled_mode) {
// Check if Code is disabled.
__ LoadFromOffset(
kTemp, FieldAddress(CODE_REG, target::Code::instructions_offset()));
__ CompareWithMemoryValue(
kTemp,
FieldAddress(CODE_REG, target::Code::active_instructions_offset()));
__ BranchIf(NOT_EQUAL, &call_runtime);
#if !defined(PRODUCT)
// Check if there is a breakpoint at resumption.
__ LoadIsolate(kTemp);
__ LoadFromOffset(
kTemp,
Address(kTemp, target::Isolate::has_resumption_breakpoints_offset()),
kUnsignedByte);
__ CompareImmediate(kTemp, 0);
__ BranchIf(NOT_EQUAL, &call_runtime);
#endif
}
__ LoadFromOffset(CallingConventions::kReturnReg,
Address(FPREG, param_offset + 3 * target::kWordSize));
__ Jump(kResumePc);
__ Comment("Rethrow exception");
__ Bind(&rethrow_exception);
__ Comment("Call runtime to throw exception or deopt");
__ Bind(&call_runtime);
__ LoadFromOffset(kStackTrace,
Address(FPREG, param_offset + 1 * target::kWordSize));
static_assert((kStackTrace != CODE_REG) && (kStackTrace != PP),
"should not interfere");
// Adjust stack/LR/RA as if suspended Dart function called
// Set return address as if suspended Dart function called
// stub with kResumePc as a return address.
#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64)
__ PushRegister(kResumePc);
#elif defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(__ MoveRegister(LR, kResumePc));
#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
__ MoveRegister(RA, kResumePc);
#else
#error Unknown target
#endif
__ SetReturnAddress(kResumePc);
if (!FLAG_precompiled_mode) {
__ LoadFromOffset(CODE_REG, THR, target::Thread::resume_stub_offset());
}
#if !defined(TARGET_ARCH_IA32)
__ set_constant_pool_allowed(false);
#endif
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for (unused) result.
__ PushRegistersInOrder({kException, kStackTrace});
__ CallRuntime(kReThrowRuntimeEntry, /*argument_count=*/2);
__ Breakpoint();
__ CallRuntime(kResumeFrameRuntimeEntry, /*argument_count=*/2);
if (FLAG_precompiled_mode) {
__ Breakpoint();
} else {
__ LeaveStubFrame();
__ LoadFromOffset(CallingConventions::kReturnReg,
Address(FPREG, param_offset + 3 * target::kWordSize));
// Lazy deoptimize.
__ Ret();
}
}
void StubCodeCompiler::GenerateReturnStub(Assembler* assembler,
intptr_t return_entry_point_offset) {
void StubCodeCompiler::GenerateReturnStub(
Assembler* assembler,
intptr_t return_entry_point_offset_in_thread,
intptr_t return_function_offset_in_object_store,
intptr_t return_stub_offset_in_thread) {
const Register kSuspendState = ReturnStubABI::kSuspendStateReg;
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
@ -1654,11 +1818,23 @@ void StubCodeCompiler::GenerateReturnStub(Assembler* assembler,
#endif
__ LoadFromOffset(kSuspendState, Address(FPREG, SuspendStateFpOffset()));
#ifdef DEBUG
{
Label okay;
__ CompareObject(kSuspendState, NullObject());
__ BranchIf(NOT_EQUAL, &okay);
__ Breakpoint();
__ Bind(&okay);
}
#endif
__ LeaveDartFrame();
if (!FLAG_precompiled_mode) {
__ LoadFromOffset(CODE_REG, THR, return_stub_offset_in_thread);
}
__ EnterStubFrame();
__ PushRegistersInOrder({kSuspendState, CallingConventions::kReturnReg});
__ Call(Address(THR, return_entry_point_offset));
CallDartCoreLibraryFunction(assembler, return_entry_point_offset_in_thread,
return_function_offset_in_object_store);
__ LeaveStubFrame();
__ Ret();
}
@ -1666,20 +1842,26 @@ void StubCodeCompiler::GenerateReturnStub(Assembler* assembler,
void StubCodeCompiler::GenerateReturnAsyncStub(Assembler* assembler) {
GenerateReturnStub(
assembler,
target::Thread::suspend_state_return_async_entry_point_offset());
target::Thread::suspend_state_return_async_entry_point_offset(),
target::ObjectStore::suspend_state_return_async_offset(),
target::Thread::return_async_stub_offset());
}
void StubCodeCompiler::GenerateReturnAsyncNotFutureStub(Assembler* assembler) {
GenerateReturnStub(
assembler,
target::Thread::
suspend_state_return_async_not_future_entry_point_offset());
suspend_state_return_async_not_future_entry_point_offset(),
target::ObjectStore::suspend_state_return_async_not_future_offset(),
target::Thread::return_async_not_future_stub_offset());
}
void StubCodeCompiler::GenerateReturnAsyncStarStub(Assembler* assembler) {
GenerateReturnStub(
assembler,
target::Thread::suspend_state_return_async_star_entry_point_offset());
target::Thread::suspend_state_return_async_star_entry_point_offset(),
target::ObjectStore::suspend_state_return_async_star_offset(),
target::Thread::return_async_star_stub_offset());
}
void StubCodeCompiler::GenerateAsyncExceptionHandlerStub(Assembler* assembler) {
@ -1701,12 +1883,17 @@ void StubCodeCompiler::GenerateAsyncExceptionHandlerStub(Assembler* assembler) {
__ BranchIf(EQUAL, &rethrow_exception);
__ LeaveDartFrame();
if (!FLAG_precompiled_mode) {
__ LoadFromOffset(CODE_REG, THR,
target::Thread::async_exception_handler_stub_offset());
}
__ EnterStubFrame();
__ PushRegistersInOrder(
{kSuspendState, kExceptionObjectReg, kStackTraceObjectReg});
__ Call(Address(
THR,
target::Thread::suspend_state_handle_exception_entry_point_offset()));
CallDartCoreLibraryFunction(
assembler,
target::Thread::suspend_state_handle_exception_entry_point_offset(),
target::ObjectStore::suspend_state_handle_exception_offset());
__ LeaveStubFrame();
__ Ret();
@ -1717,6 +1904,10 @@ void StubCodeCompiler::GenerateAsyncExceptionHandlerStub(Assembler* assembler) {
__ Comment("Rethrow exception");
__ Bind(&rethrow_exception);
__ LeaveDartFrame();
if (!FLAG_precompiled_mode) {
__ LoadFromOffset(CODE_REG, THR,
target::Thread::async_exception_handler_stub_offset());
}
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for (unused) result.
__ PushRegistersInOrder({kExceptionObjectReg, kStackTraceObjectReg});

View file

@ -199,13 +199,19 @@ class StubCodeCompiler : public AllStatic {
static void GenerateRangeError(Assembler* assembler, bool with_fpu_regs);
static void GenerateSuspendStub(Assembler* assembler,
intptr_t suspend_entry_point_offset);
static void GenerateSuspendStub(
Assembler* assembler,
intptr_t suspend_entry_point_offset_in_thread,
intptr_t suspend_function_offset_in_object_store);
static void GenerateInitSuspendableFunctionStub(
Assembler* assembler,
intptr_t init_entry_point_offset);
static void GenerateReturnStub(Assembler* assembler,
intptr_t return_entry_point_offset);
intptr_t init_entry_point_offset_in_thread,
intptr_t init_function_offset_in_object_store);
static void GenerateReturnStub(
Assembler* assembler,
intptr_t return_entry_point_offset_in_thread,
intptr_t return_function_offset_in_object_store,
intptr_t return_stub_offset_in_thread);
};
} // namespace compiler

View file

@ -560,6 +560,7 @@ struct ResumeStubABI {
// Registers for control transfer.
// (the 2nd part, can reuse registers from the 1st part)
static const Register kResumePcReg = R1;
// Can also reuse kSuspendStateReg but should not conflict with CODE_REG/PP.
static const Register kExceptionReg = R3;
static const Register kStackTraceReg = R4;
};

View file

@ -394,6 +394,7 @@ struct ResumeStubABI {
// Registers for control transfer.
// (the 2nd part, can reuse registers from the 1st part)
static const Register kResumePcReg = R1;
// Can also reuse kSuspendStateReg but should not conflict with CODE_REG/PP.
static const Register kExceptionReg = R3;
static const Register kStackTraceReg = R4;
};

View file

@ -276,7 +276,7 @@ struct SuspendStubABI {
// Number of bytes to skip after
// suspend stub return address in order to resume.
// IA32: mov esp, ebp; pop ebp; ret
static const intptr_t kResumePcDistance = 5;
static const intptr_t kResumePcDistance = 4;
};
// ABI for InitSuspendableFunctionStub (InitAsyncStub, InitAsyncStarStub).
@ -290,13 +290,16 @@ struct ResumeStubABI {
static const Register kTempReg = EDX;
// Registers for the frame copying (the 1st part).
static const Register kFrameSizeReg = ECX;
// Can reuse THR.
static const Register kSrcFrameReg = ESI;
// Can reuse CODE_REG.
static const Register kDstFrameReg = EDI;
// Registers for control transfer.
// (the 2nd part, can reuse registers from the 1st part)
static const Register kResumePcReg = ECX;
static const Register kExceptionReg = ESI;
static const Register kStackTraceReg = EDI;
// Can also reuse kSuspendStateReg but should not conflict with CODE_REG.
static const Register kExceptionReg = EAX;
static const Register kStackTraceReg = EBX;
};
// ABI for ReturnStub (ReturnAsyncStub, ReturnAsyncNotFutureStub,

View file

@ -404,6 +404,7 @@ struct ResumeStubABI {
// Registers for control transfer.
// (the 2nd part, can reuse registers from the 1st part)
static const Register kResumePcReg = T2;
// Can also reuse kSuspendStateReg but should not conflict with CODE_REG/PP.
static const Register kExceptionReg = T3;
static const Register kStackTraceReg = T4;
};

View file

@ -368,6 +368,7 @@ struct ResumeStubABI {
// Registers for control transfer.
// (the 2nd part, can reuse registers from the 1st part)
static const Register kResumePcReg = RCX;
// Can also reuse kSuspendStateReg but should not conflict with CODE_REG/PP.
static const Register kExceptionReg = RSI;
static const Register kStackTraceReg = RDI;
};

View file

@ -282,7 +282,8 @@ ActivationFrame::ActivationFrame(Kind kind)
desc_indices_(8),
pc_desc_(PcDescriptors::ZoneHandle()) {}
ActivationFrame::ActivationFrame(const Closure& async_activation)
ActivationFrame::ActivationFrame(const Closure& async_activation,
CallerClosureFinder* caller_closure_finder)
: pc_(0),
fp_(0),
sp_(0),
@ -306,6 +307,17 @@ ActivationFrame::ActivationFrame(const Closure& async_activation)
pc_desc_(PcDescriptors::ZoneHandle()) {
// Extract the function and the code from the asynchronous activation.
function_ = async_activation.function();
if (caller_closure_finder->IsCompactAsyncCallback(function_)) {
const auto& suspend_state = SuspendState::Handle(
caller_closure_finder->GetSuspendStateFromAsyncCallback(
async_activation));
if (suspend_state.pc() != 0) {
pc_ = suspend_state.pc();
code_ = suspend_state.GetCodeObject();
function_ = code_.function();
return;
}
}
// Force-optimize functions should not be debuggable.
ASSERT(!function_.ForceOptimize());
function_.EnsureHasCompiledUnoptimizedCode();
@ -752,16 +764,24 @@ ObjectPtr ActivationFrame::GetAsyncContextVariable(const String& name) {
ObjectPtr ActivationFrame::GetAsyncAwaiter(
CallerClosureFinder* caller_closure_finder) {
if (fp() != 0 && !function_.IsNull() &&
(function_.IsAsyncClosure() || function_.IsAsyncGenClosure())) {
// Look up caller's closure on the stack.
ObjectPtr* last_caller_obj = reinterpret_cast<ObjectPtr*>(GetCallerSp());
Closure& closure = Closure::Handle();
closure = StackTraceUtils::FindClosureInFrame(last_caller_obj, function_);
if (fp() != 0 && !function_.IsNull()) {
if (function_.IsCompactAsyncFunction() ||
function_.IsCompactAsyncStarFunction()) {
const auto& suspend_state = Object::Handle(GetSuspendStateVar());
if (suspend_state.IsSuspendState()) {
return caller_closure_finder->FindCallerFromSuspendState(
SuspendState::Cast(suspend_state));
}
} else if (function_.IsAsyncClosure() || function_.IsAsyncGenClosure()) {
// Look up caller's closure on the stack.
ObjectPtr* last_caller_obj = reinterpret_cast<ObjectPtr*>(GetCallerSp());
Closure& closure = Closure::Handle();
closure = StackTraceUtils::FindClosureInFrame(last_caller_obj, function_);
if (!closure.IsNull() && caller_closure_finder->IsRunningAsync(closure)) {
closure = caller_closure_finder->FindCaller(closure);
return closure.ptr();
if (!closure.IsNull() && caller_closure_finder->IsRunningAsync(closure)) {
closure = caller_closure_finder->FindCaller(closure);
return closure.ptr();
}
}
}
@ -774,16 +794,13 @@ bool ActivationFrame::HandlesException(const Instance& exc_obj) {
return false;
}
intptr_t try_index = TryIndex();
if (try_index < 0) {
const auto& handlers = ExceptionHandlers::Handle(code().exception_handlers());
ASSERT(!handlers.IsNull());
if ((try_index < 0) && !handlers.has_async_handler()) {
return false;
}
ExceptionHandlers& handlers = ExceptionHandlers::Handle();
Array& handled_types = Array::Handle();
AbstractType& type = Type::Handle();
const bool is_async =
function().IsAsyncClosure() || function().IsAsyncGenClosure();
handlers = code().exception_handlers();
ASSERT(!handlers.IsNull());
intptr_t num_handlers_checked = 0;
while (try_index != kInvalidTryIndex) {
// Detect circles in the exception handler data.
@ -812,7 +829,8 @@ bool ActivationFrame::HandlesException(const Instance& exc_obj) {
}
// Async functions might have indirect exception handlers in the form of
// `Future.catchError`. Check the Closure's _FutureListeners.
if (fp() != 0 && is_async) {
if ((fp() != 0) &&
(function().IsAsyncClosure() || function().IsAsyncGenClosure())) {
CallerClosureFinder caller_closure_finder(Thread::Current()->zone());
ObjectPtr* last_caller_obj = reinterpret_cast<ObjectPtr*>(GetCallerSp());
Closure& closure = Closure::Handle(
@ -825,6 +843,17 @@ bool ActivationFrame::HandlesException(const Instance& exc_obj) {
futureOrListener =
caller_closure_finder.GetFutureFutureListener(futureOrListener);
return caller_closure_finder.HasCatchError(futureOrListener);
} else if ((fp() != 0) && function().IsCompactAsyncFunction()) {
CallerClosureFinder caller_closure_finder(Thread::Current()->zone());
auto& suspend_state = Object::Handle(GetSuspendStateVar());
if (!suspend_state.IsSuspendState()) {
return false;
}
Object& futureOrListener =
Object::Handle(SuspendState::Cast(suspend_state).function_data());
futureOrListener =
caller_closure_finder.GetFutureFutureListener(futureOrListener);
return caller_closure_finder.HasCatchError(futureOrListener);
}
return false;
@ -1074,6 +1103,19 @@ ClosurePtr ActivationFrame::GetClosure() {
return Closure::Cast(param).ptr();
}
ObjectPtr ActivationFrame::GetSuspendStateVar() {
ASSERT(function().IsSuspendableFunction());
return GetStackVar(VariableIndex(SuspendState::kSuspendStateVarIndex));
}
ObjectPtr ActivationFrame::GetSuspendableFunctionData() {
Object& suspend_state = Object::Handle(GetSuspendStateVar());
if (suspend_state.IsSuspendState()) {
return SuspendState::Cast(suspend_state).function_data();
}
return suspend_state.ptr();
}
ObjectPtr ActivationFrame::GetStackVar(VariableIndex variable_index) {
const intptr_t slot_index =
runtime_frame_layout.FrameSlotForVariableIndex(variable_index.value());
@ -1660,6 +1702,13 @@ bool Debugger::SetupStepOverAsyncSuspension(const char** error) {
}
return false;
}
if (top_frame->function().IsCompactAsyncFunction() ||
top_frame->function().IsCompactAsyncStarFunction()) {
const auto& function_data =
Object::Handle(top_frame->GetSuspendableFunctionData());
SetBreakpointAtResumption(function_data);
return true;
}
Object& closure = Object::Handle(top_frame->GetAsyncOperation());
ASSERT(!closure.IsNull());
ASSERT(closure.IsInstance());
@ -2057,6 +2106,20 @@ DebuggerStackTrace* DebuggerStackTrace::CollectAwaiterReturn() {
if (caller_closure_finder.IsRunningAsync(closure)) {
break;
}
} else if (function.IsCompactAsyncFunction() ||
function.IsCompactAsyncStarFunction()) {
ActivationFrame* activation = CollectDartFrame(
isolate, frame->pc(), frame, code, Object::null_array(), 0,
ActivationFrame::kAsyncActivation);
ASSERT(activation != nullptr);
stack_trace->AddActivation(activation);
stack_has_async_function = true;
// Grab the awaiter.
async_activation ^= activation->GetAsyncAwaiter(&caller_closure_finder);
// Bail if we've reach the end of sync execution stack.
if (Object::Handle(activation->GetSuspendStateVar()).IsSuspendState()) {
break;
}
} else {
stack_trace->AddActivation(CollectDartFrame(
isolate, frame->pc(), frame, code, Object::null_array(), 0));
@ -2094,7 +2157,9 @@ DebuggerStackTrace* DebuggerStackTrace::CollectAwaiterReturn() {
}
intptr_t deopt_frame_offset = it.GetDeoptFpOffset();
if (function.IsAsyncClosure() || function.IsAsyncGenClosure()) {
if (function.IsAsyncClosure() || function.IsAsyncGenClosure() ||
function.IsCompactAsyncFunction() ||
function.IsCompactAsyncStarFunction()) {
ActivationFrame* activation = CollectDartFrame(
isolate, it.pc(), frame, inlined_code, deopt_frame,
deopt_frame_offset, ActivationFrame::kAsyncActivation);
@ -2125,7 +2190,8 @@ DebuggerStackTrace* DebuggerStackTrace::CollectAwaiterReturn() {
// Append the awaiter return call stack.
while (!async_activation.IsNull() &&
async_activation.context() != Object::null()) {
ActivationFrame* activation = new (zone) ActivationFrame(async_activation);
ActivationFrame* activation =
new (zone) ActivationFrame(async_activation, &caller_closure_finder);
if (activation->function().IsAsyncClosure() ||
activation->function().IsAsyncGenClosure()) {
activation->ExtractTokenPositionFromAsyncClosure();
@ -3107,6 +3173,39 @@ Breakpoint* Debugger::BreakpointAtActivation(const Instance& closure) {
return NULL;
}
void Debugger::SetBreakpointAtResumption(const Object& function_data) {
ASSERT(!function_data.IsNull());
ASSERT(function_data.IsInstance());
breakpoints_at_resumption_.Add(function_data.ptr());
isolate_->set_has_resumption_breakpoints(true);
}
void Debugger::ResumptionBreakpoint() {
ASSERT(!breakpoints_at_resumption_.is_empty());
ASSERT(isolate_->has_resumption_breakpoints());
ActivationFrame* top_frame = TopDartFrame();
ASSERT(top_frame->function().IsSuspendableFunction());
const auto& function_data =
Object::Handle(top_frame->GetSuspendableFunctionData());
for (intptr_t i = 0, n = breakpoints_at_resumption_.length(); i < n; ++i) {
if (breakpoints_at_resumption_[i] == function_data.ptr()) {
breakpoints_at_resumption_.RemoveAt(i);
if (breakpoints_at_resumption_.is_empty()) {
isolate_->set_has_resumption_breakpoints(false);
}
if (FLAG_verbose_debug) {
OS::PrintErr(
"ResumptionBreakpoint - hit a breakpoint, continue single "
"stepping\n");
}
EnterSingleStepMode();
return;
}
}
}
Breakpoint* Debugger::SetBreakpointAtLine(const String& script_url,
intptr_t line_number) {
// Prevent future tests from calling this function in the wrong
@ -3352,6 +3451,9 @@ void Debugger::VisitObjectPointers(ObjectPointerVisitor* visitor) {
loc = loc->next();
}
visitor->VisitPointer(reinterpret_cast<ObjectPtr*>(&top_frame_awaiter_));
for (intptr_t i = 0, n = breakpoints_at_resumption_.length(); i < n; ++i) {
visitor->VisitPointer(&breakpoints_at_resumption_[i]);
}
}
void Debugger::Pause(ServiceEvent* event) {
@ -3487,7 +3589,9 @@ void Debugger::HandleSteppingRequest(DebuggerStackTrace* stack_trace,
} else if (resume_action_ == kStepOut) {
if (FLAG_async_debugger) {
if (stack_trace->FrameAt(0)->function().IsAsyncClosure() ||
stack_trace->FrameAt(0)->function().IsAsyncGenClosure()) {
stack_trace->FrameAt(0)->function().IsAsyncGenClosure() ||
stack_trace->FrameAt(0)->function().IsCompactAsyncFunction() ||
stack_trace->FrameAt(0)->function().IsCompactAsyncStarFunction()) {
CallerClosureFinder caller_closure_finder(Thread::Current()->zone());
// Request to step out of an async/async* closure.
const Object& async_op = Object::Handle(
@ -3880,25 +3984,29 @@ void Debugger::SignalPausedEvent(ActivationFrame* top_frame, Breakpoint* bpt) {
static bool IsAtAsyncJump(ActivationFrame* top_frame) {
Zone* zone = Thread::Current()->zone();
Object& closure_or_null =
Object::Handle(zone, top_frame->GetAsyncOperation());
if (!closure_or_null.IsNull()) {
if (!top_frame->function().IsCompactAsyncFunction() &&
!top_frame->function().IsCompactAsyncStarFunction()) {
Object& closure_or_null =
Object::Handle(zone, top_frame->GetAsyncOperation());
if (closure_or_null.IsNull()) {
return false;
}
ASSERT(top_frame->function().IsAsyncClosure() ||
top_frame->function().IsAsyncGenClosure());
ASSERT(closure_or_null.IsInstance());
ASSERT(Instance::Cast(closure_or_null).IsClosure());
const auto& pc_descriptors =
PcDescriptors::Handle(zone, top_frame->code().pc_descriptors());
if (pc_descriptors.IsNull()) {
return false;
}
const TokenPosition looking_for = top_frame->TokenPos();
PcDescriptors::Iterator it(pc_descriptors, UntaggedPcDescriptors::kOther);
while (it.MoveNext()) {
if (it.TokenPos() == looking_for &&
it.YieldIndex() != UntaggedPcDescriptors::kInvalidYieldIndex) {
return true;
}
}
const auto& pc_descriptors =
PcDescriptors::Handle(zone, top_frame->code().pc_descriptors());
if (pc_descriptors.IsNull()) {
return false;
}
const TokenPosition looking_for = top_frame->TokenPos();
PcDescriptors::Iterator it(pc_descriptors, UntaggedPcDescriptors::kOther);
while (it.MoveNext()) {
if (it.TokenPos() == looking_for &&
it.YieldIndex() != UntaggedPcDescriptors::kInvalidYieldIndex) {
return true;
}
}
return false;
@ -3964,7 +4072,8 @@ ErrorPtr Debugger::PauseStepping() {
if (FLAG_lazy_async_stacks) {
// async and async* functions always contain synthetic async_ops.
if ((frame->function().IsAsyncFunction() ||
frame->function().IsAsyncGenerator())) {
frame->function().IsAsyncGenerator()) &&
!frame->function().IsSuspendableFunction()) {
ASSERT(!frame->GetSavedCurrentContext().IsNull());
ASSERT(frame->GetSavedCurrentContext().num_variables() >
Context::kAsyncFutureIndex);
@ -4003,6 +4112,14 @@ ErrorPtr Debugger::PauseStepping() {
return Error::null();
}
// TODO(dartbug.com/48378): Consider aligning async/async* functions
// with regular function wrt the first stop in the function prologue.
if ((frame->function().IsCompactAsyncFunction() ||
frame->function().IsCompactAsyncStarFunction()) &&
frame->GetSuspendStateVar() == Object::null()) {
return Error::null();
}
// We are stopping in this frame at the token pos.
last_stepping_fp_ = frame->fp();
last_stepping_pos_ = frame->TokenPos();
@ -4612,7 +4729,18 @@ void Debugger::MaybeAsyncStepInto(const Closure& async_op) {
}
void Debugger::AsyncStepInto(const Closure& async_op) {
SetBreakpointAtActivation(async_op, true);
Zone* zone = Thread::Current()->zone();
CallerClosureFinder caller_closure_finder(zone);
if (caller_closure_finder.IsCompactAsyncCallback(
Function::Handle(zone, async_op.function()))) {
const auto& suspend_state = SuspendState::Handle(
zone, caller_closure_finder.GetSuspendStateFromAsyncCallback(async_op));
const auto& function_data =
Object::Handle(zone, suspend_state.function_data());
SetBreakpointAtResumption(function_data);
} else {
SetBreakpointAtActivation(async_op, true);
}
Continue();
}

View file

@ -326,7 +326,8 @@ class ActivationFrame : public ZoneAllocated {
explicit ActivationFrame(Kind kind);
explicit ActivationFrame(const Closure& async_activation);
ActivationFrame(const Closure& async_activation,
CallerClosureFinder* caller_closure_finder);
uword pc() const { return pc_; }
uword fp() const { return fp_; }
@ -388,6 +389,8 @@ class ActivationFrame : public ZoneAllocated {
const Context& GetSavedCurrentContext();
ObjectPtr GetAsyncOperation();
ObjectPtr GetSuspendStateVar();
ObjectPtr GetSuspendableFunctionData();
TypeArgumentsPtr BuildParameters(
const GrowableObjectArray& param_names,
@ -816,6 +819,13 @@ class Debugger {
// Callback to the debugger to continue frame rewind, post-deoptimization.
void RewindPostDeopt();
// Sets breakpoint at resumption of a suspendable function
// with given function data (such as _Future or _AsyncStarStreamController).
void SetBreakpointAtResumption(const Object& function_data);
// Check breakpoints at frame resumption. Called from generated code.
void ResumptionBreakpoint();
private:
ErrorPtr PauseRequest(ServiceEvent::EventKind kind);
@ -968,6 +978,10 @@ class Debugger {
Dart_ExceptionPauseInfo exc_pause_info_;
// Holds function data corresponding to suspendable
// function which should be stopped when resumed.
MallocGrowableArray<ObjectPtr> breakpoints_at_resumption_;
friend class Isolate;
friend class BreakpointLocation;
DISALLOW_COPY_AND_ASSIGN(Debugger);

View file

@ -580,14 +580,15 @@ static void JumpToExceptionHandler(Thread* thread,
uword frame_pointer,
const Object& exception_object,
const Object& stacktrace_object) {
bool clear_deopt = false;
uword remapped_pc = thread->pending_deopts().RemapExceptionPCForDeopt(
program_counter, frame_pointer);
program_counter, frame_pointer, &clear_deopt);
thread->set_active_exception(exception_object);
thread->set_active_stacktrace(stacktrace_object);
thread->set_resume_pc(remapped_pc);
uword run_exception_pc = StubCode::RunExceptionHandler().EntryPoint();
Exceptions::JumpToFrame(thread, run_exception_pc, stack_pointer,
frame_pointer, false /* do not clear deopt */);
frame_pointer, clear_deopt);
}
NO_SANITIZE_SAFE_STACK // This function manipulates the safestack pointer.

View file

@ -1150,6 +1150,16 @@ class Isolate : public BaseIsolate, public IntrusiveDListEntry<Isolate> {
return OFFSET_OF(Isolate, single_step_);
}
void set_has_resumption_breakpoints(bool value) {
has_resumption_breakpoints_ = value;
}
bool has_resumption_breakpoints() const {
return has_resumption_breakpoints_;
}
static intptr_t has_resumption_breakpoints_offset() {
return OFFSET_OF(Isolate, has_resumption_breakpoints_);
}
bool ResumeRequest() const { return LoadIsolateFlagsBit<ResumeRequestBit>(); }
// Lets the embedder know that a service message resulted in a resume request.
void SetResumeRequest() {
@ -1554,6 +1564,7 @@ class Isolate : public BaseIsolate, public IntrusiveDListEntry<Isolate> {
// shutdown to prevent usage of dangling pointers.
GrowableObjectArrayPtr finalizers_;
bool single_step_ = false;
bool has_resumption_breakpoints_ = false;
bool is_system_isolate_ = false;
// End accessed from generated code.

View file

@ -1882,11 +1882,13 @@ class InvalidationCollector : public ObjectVisitor {
GrowableArray<const Function*>* functions,
GrowableArray<const KernelProgramInfo*>* kernel_infos,
GrowableArray<const Field*>* fields,
GrowableArray<const SuspendState*>* suspend_states,
GrowableArray<const Instance*>* instances)
: zone_(zone),
functions_(functions),
kernel_infos_(kernel_infos),
fields_(fields),
suspend_states_(suspend_states),
instances_(instances) {}
virtual ~InvalidationCollector() {}
@ -1904,6 +1906,12 @@ class InvalidationCollector : public ObjectVisitor {
zone_, static_cast<KernelProgramInfoPtr>(obj)));
} else if (cid == kFieldCid) {
fields_->Add(&Field::Handle(zone_, static_cast<FieldPtr>(obj)));
} else if (cid == kSuspendStateCid) {
const auto& suspend_state =
SuspendState::Handle(zone_, static_cast<SuspendStatePtr>(obj));
if (suspend_state.pc() != 0) {
suspend_states_->Add(&suspend_state);
}
} else if (cid > kNumPredefinedCids) {
instances_->Add(&Instance::Handle(zone_, static_cast<InstancePtr>(obj)));
}
@ -1914,6 +1922,7 @@ class InvalidationCollector : public ObjectVisitor {
GrowableArray<const Function*>* const functions_;
GrowableArray<const KernelProgramInfo*>* const kernel_infos_;
GrowableArray<const Field*>* const fields_;
GrowableArray<const SuspendState*>* const suspend_states_;
GrowableArray<const Instance*>* const instances_;
};
@ -1928,16 +1937,18 @@ void ProgramReloadContext::RunInvalidationVisitors() {
GrowableArray<const Function*> functions(4 * KB);
GrowableArray<const KernelProgramInfo*> kernel_infos(KB);
GrowableArray<const Field*> fields(4 * KB);
GrowableArray<const SuspendState*> suspend_states(4 * KB);
GrowableArray<const Instance*> instances(4 * KB);
{
HeapIterationScope iteration(thread);
InvalidationCollector visitor(zone, &functions, &kernel_infos, &fields,
&instances);
&suspend_states, &instances);
iteration.IterateObjects(&visitor);
}
InvalidateKernelInfos(zone, kernel_infos);
InvalidateSuspendStates(zone, suspend_states);
InvalidateFunctions(zone, functions);
InvalidateFields(zone, fields, instances);
}
@ -2028,6 +2039,55 @@ void ProgramReloadContext::InvalidateFunctions(
}
}
void ProgramReloadContext::InvalidateSuspendStates(
Zone* zone,
const GrowableArray<const SuspendState*>& suspend_states) {
TIMELINE_SCOPE(InvalidateSuspendStates);
auto thread = Thread::Current();
HANDLESCOPE(thread);
CallSiteResetter resetter(zone);
Code& code = Code::Handle(zone);
Function& function = Function::Handle(zone);
SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
for (intptr_t i = 0, n = suspend_states.length(); i < n; ++i) {
const SuspendState& suspend_state = *suspend_states[i];
ASSERT(suspend_state.pc() != 0);
code = suspend_state.GetCodeObject();
ASSERT(!code.IsNull());
if (code.is_optimized() && !code.is_force_optimized()) {
function = code.function();
// Before disabling [code], function needs to
// switch to unoptimized code first.
function.SwitchToLazyCompiledUnoptimizedCode();
// Disable [code] in order to trigger lazy deoptimization.
// Unless [code] is compiled for OSR, it may be already
// disabled in SwitchToLazyCompiledUnoptimizedCode.
if (!code.IsDisabled()) {
code.DisableDartCode();
}
// Reset switchable calls and caches for unoptimized
// code (if any), as it is going to be used to continue
// execution of the suspended function.
code = function.unoptimized_code();
if (!code.IsNull()) {
resetter.ResetSwitchableCalls(code);
resetter.ResetCaches(code);
}
} else {
function = code.function();
// ResetSwitchableCalls uses ICData array, which
// can be cleared along with the code in InvalidateFunctions
// during previous hot reloads.
// Rebuild an unoptimized code in order to recreate ICData array.
function.EnsureHasCompiledUnoptimizedCode();
resetter.ResetSwitchableCalls(code);
resetter.ResetCaches(code);
}
}
}
// Finds fields that are initialized or have a value that does not conform to
// the field's static type, setting Field::needs_load_guard(). Accessors for
// such fields are compiled with additional checks to handle lazy initialization

View file

@ -281,7 +281,6 @@ class IsolateGroupReloadContext {
friend class Class; // AddStaticFieldMapping, AddEnumBecomeMapping.
friend class Library;
friend class ObjectLocator;
friend class MarkFunctionsForRecompilation; // IsDirty.
friend class ReasonForCancelling;
friend class ProgramReloadContext;
friend class IsolateGroup; // GetClassSizeForHeapWalkAt
@ -356,6 +355,9 @@ class ProgramReloadContext {
const GrowableArray<const KernelProgramInfo*>& kernel_infos);
void InvalidateFunctions(Zone* zone,
const GrowableArray<const Function*>& functions);
void InvalidateSuspendStates(
Zone* zone,
const GrowableArray<const SuspendState*>& suspend_states);
void InvalidateFields(Zone* zone,
const GrowableArray<const Field*>& fields,
const GrowableArray<const Instance*>& instances);
@ -411,7 +413,6 @@ class ProgramReloadContext {
friend class Class; // AddStaticFieldMapping, AddEnumBecomeMapping.
friend class Library;
friend class ObjectLocator;
friend class MarkFunctionsForRecompilation; // IsDirty.
friend class ReasonForCancelling;
friend class IsolateGroupReloadContext;
};

View file

@ -2037,9 +2037,6 @@ void KernelLoader::LoadProcedure(const Library& library,
FunctionNodeHelper function_node_helper(&helper_);
function_node_helper.ReadUntilIncluding(FunctionNodeHelper::kDartAsyncMarker);
if (function_node_helper.async_marker_ == FunctionNodeHelper::kAsync) {
if (!FLAG_precompiled_mode) {
FATAL("Compact async functions are only supported in AOT mode.");
}
function.set_modifier(UntaggedFunction::kAsync);
function.set_is_debuggable(true);
function.set_is_inlinable(false);
@ -2047,9 +2044,6 @@ void KernelLoader::LoadProcedure(const Library& library,
ASSERT(function.IsCompactAsyncFunction());
} else if (function_node_helper.async_marker_ ==
FunctionNodeHelper::kAsyncStar) {
if (!FLAG_precompiled_mode) {
FATAL("Compact async* functions are only supported in AOT mode.");
}
function.set_modifier(UntaggedFunction::kAsyncGen);
function.set_is_debuggable(true);
function.set_is_inlinable(false);

View file

@ -26055,12 +26055,22 @@ SuspendStatePtr SuspendState::New(intptr_t frame_size,
const Instance& function_data,
Heap::Space space) {
SuspendState& result = SuspendState::Handle();
const intptr_t instance_size = SuspendState::InstanceSize(
frame_size + SuspendState::FrameSizeGrowthGap());
{
ObjectPtr raw = Object::Allocate(
SuspendState::kClassId, SuspendState::InstanceSize(frame_size), space,
SuspendState::ContainsCompressedPointers());
ObjectPtr raw =
Object::Allocate(SuspendState::kClassId, instance_size, space,
SuspendState::ContainsCompressedPointers());
NoSafepointScope no_safepoint;
result ^= raw;
#if !defined(DART_PRECOMPILED_RUNTIME)
// Include heap object alignment overhead into the frame capacity.
const intptr_t frame_capacity =
instance_size - SuspendState::payload_offset();
ASSERT(SuspendState::InstanceSize(frame_capacity) == instance_size);
ASSERT(frame_size <= frame_capacity);
result.set_frame_capacity(frame_capacity);
#endif
result.set_frame_size(frame_size);
result.set_pc(0);
result.set_function_data(function_data);
@ -26068,6 +26078,13 @@ SuspendStatePtr SuspendState::New(intptr_t frame_size,
return result.ptr();
}
#if !defined(DART_PRECOMPILED_RUNTIME)
void SuspendState::set_frame_capacity(intptr_t frame_capcity) const {
ASSERT(frame_capcity >= 0);
StoreNonPointer(&untag()->frame_capacity_, frame_capcity);
}
#endif
void SuspendState::set_frame_size(intptr_t frame_size) const {
ASSERT(frame_size >= 0);
StoreNonPointer(&untag()->frame_size_, frame_size);
@ -26094,8 +26111,10 @@ CodePtr SuspendState::GetCodeObject() const {
ASSERT(code != Code::null());
return code;
#else
UNIMPLEMENTED();
return Code::null();
ObjectPtr code = *(reinterpret_cast<ObjectPtr*>(
untag()->payload() + untag()->frame_size_ +
runtime_frame_layout.code_from_fp * kWordSize));
return Code::RawCast(code);
#endif // defined(DART_PRECOMPILED_RUNTIME)
}

View file

@ -6404,9 +6404,14 @@ class Code : public Object {
return code->untag()->instructions();
}
static intptr_t saved_instructions_offset() {
static intptr_t instructions_offset() {
return OFFSET_OF(UntaggedCode, instructions_);
}
#if !defined(DART_PRECOMPILED_RUNTIME)
static intptr_t active_instructions_offset() {
return OFFSET_OF(UntaggedCode, active_instructions_);
}
#endif
using EntryKind = CodeEntryKind;
@ -11813,20 +11818,31 @@ class SuspendState : public Instance {
static intptr_t HeaderSize() { return sizeof(UntaggedSuspendState); }
static intptr_t UnroundedSize(SuspendStatePtr ptr) {
return UnroundedSize(ptr->untag()->frame_size_);
return UnroundedSize(ptr->untag()->frame_capacity());
}
static intptr_t UnroundedSize(intptr_t frame_size) {
return HeaderSize() + frame_size;
static intptr_t UnroundedSize(intptr_t frame_capacity) {
return HeaderSize() + frame_capacity;
}
static intptr_t InstanceSize() {
ASSERT_EQUAL(sizeof(UntaggedSuspendState),
OFFSET_OF_RETURNED_VALUE(UntaggedSuspendState, payload));
return 0;
}
static intptr_t InstanceSize(intptr_t frame_size) {
return RoundedAllocationSize(UnroundedSize(frame_size));
static intptr_t InstanceSize(intptr_t frame_capacity) {
return RoundedAllocationSize(UnroundedSize(frame_capacity));
}
// Number of extra words reserved for growth of frame size
// during SuspendState allocation. Frames do not grow in AOT.
static intptr_t FrameSizeGrowthGap() {
return ONLY_IN_PRECOMPILED(0) NOT_IN_PRECOMPILED(2);
}
#if !defined(DART_PRECOMPILED_RUNTIME)
static intptr_t frame_capacity_offset() {
return OFFSET_OF(UntaggedSuspendState, frame_capacity_);
}
#endif
static intptr_t frame_size_offset() {
return OFFSET_OF(UntaggedSuspendState, frame_size_);
}
@ -11848,16 +11864,23 @@ class SuspendState : public Instance {
const Instance& function_data,
Heap::Space space = Heap::kNew);
InstancePtr function_data() const { return untag()->function_data(); }
uword pc() const { return untag()->pc_; }
InstancePtr function_data() const {
return untag()->function_data();
}
// Returns Code object corresponding to the suspended function.
CodePtr GetCodeObject() const;
private:
#if !defined(DART_PRECOMPILED_RUNTIME)
void set_frame_capacity(intptr_t frame_capcity) const;
#endif
void set_frame_size(intptr_t frame_size) const;
void set_pc(uword pc) const;
void set_function_data(const Instance& function_data) const;
void set_then_callback(const Closure& then_callback) const;
void set_error_callback(const Closure& error_callback) const;
FINAL_HEAP_OBJECT_IMPLEMENTATION(SuspendState, Instance);
friend class Class;

View file

@ -146,7 +146,8 @@ void CallSiteResetter::ResetSwitchableCalls(const Code& code) {
descriptors_ = code.pc_descriptors();
PcDescriptors::Iterator iter(descriptors_, UntaggedPcDescriptors::kIcCall);
while (iter.MoveNext()) {
FATAL1("%s has IC calls but no ic_data_array\n", object_.ToCString());
FATAL1("%s has IC calls but no ic_data_array\n",
function.ToFullyQualifiedCString());
}
#endif
return;

View file

@ -250,6 +250,7 @@ void ObjectStore::InitKnownObjects() {
String& function_name = String::Handle(zone);
Function& function = Function::Handle(zone);
Field& field = Field::Handle(zone);
function_name = async_lib.PrivateName(Symbols::AsyncStarMoveNextHelper());
ASSERT(!function_name.IsNull());
@ -297,6 +298,10 @@ void ObjectStore::InitKnownObjects() {
ASSERT(!function.IsNull());
set_async_star_stream_controller_add_stream(function);
field = cls.LookupFieldAllowPrivate(Symbols::asyncStarBody());
ASSERT(!field.IsNull());
set_async_star_stream_controller_async_star_body(field);
if (FLAG_async_debugger) {
// Disable debugging and inlining of all functions on the
// _AsyncStarStreamController class.

View file

@ -180,6 +180,7 @@ class ObjectPointerVisitor;
RW(Function, suspend_state_handle_exception) \
RW(Class, async_star_stream_controller) \
RW(Class, stream_class) \
RW(Field, async_star_stream_controller_async_star_body) \
ARW_RELAXED(Smi, future_timeout_future_index) \
ARW_RELAXED(Smi, future_wait_future_index) \
RW(CompressedStackMaps, canonicalized_stack_map_entries) \

View file

@ -73,7 +73,17 @@ void PendingDeopts::ClearPendingDeoptsAtOrBelow(uword fp, ClearReason reason) {
}
uword PendingDeopts::RemapExceptionPCForDeopt(uword program_counter,
uword frame_pointer) {
uword frame_pointer,
bool* clear_deopt) {
*clear_deopt = false;
// Do not attempt to deopt at async exception handler as it doesn't
// belong to the function code. Async handler never continues execution
// in the same frame - it either rethrows exception to the caller or
// tail calls Dart handler, leaving the function frame before the call.
if (program_counter == StubCode::AsyncExceptionHandler().EntryPoint()) {
*clear_deopt = true;
return program_counter;
}
// Check if the target frame is scheduled for lazy deopt.
for (intptr_t i = 0; i < pending_deopts_->length(); i++) {
if ((*pending_deopts_)[i].fp() == frame_pointer) {

View file

@ -40,7 +40,9 @@ class PendingDeopts {
uword FindPendingDeopt(uword fp);
void ClearPendingDeoptsBelow(uword fp, ClearReason reason);
void ClearPendingDeoptsAtOrBelow(uword fp, ClearReason reason);
uword RemapExceptionPCForDeopt(uword program_counter, uword frame_pointer);
uword RemapExceptionPCForDeopt(uword program_counter,
uword frame_pointer,
bool* clear_deopt);
private:
MallocGrowableArray<PendingLazyDeopt>* pending_deopts_;

View file

@ -174,8 +174,8 @@ intptr_t UntaggedObject::HeapSizeFromClass(uword tags) const {
case kSuspendStateCid: {
const SuspendStatePtr raw_suspend_state =
static_cast<const SuspendStatePtr>(this);
intptr_t frame_size = raw_suspend_state->untag()->frame_size_;
instance_size = SuspendState::InstanceSize(frame_size);
intptr_t frame_capacity = raw_suspend_state->untag()->frame_capacity();
instance_size = SuspendState::InstanceSize(frame_capacity);
break;
}
case kTypeArgumentsCid: {
@ -658,7 +658,7 @@ intptr_t UntaggedSuspendState::VisitSuspendStatePointers(
}
}
return SuspendState::InstanceSize(raw_obj->untag()->frame_size_);
return SuspendState::InstanceSize(raw_obj->untag()->frame_capacity());
}
bool UntaggedCode::ContainsPC(const ObjectPtr raw_obj, uword pc) {

View file

@ -3307,6 +3307,7 @@ class UntaggedStackTrace : public UntaggedInstance {
class UntaggedSuspendState : public UntaggedInstance {
RAW_HEAP_OBJECT_IMPLEMENTATION(SuspendState);
NOT_IN_PRECOMPILED(intptr_t frame_capacity_);
intptr_t frame_size_;
uword pc_;
@ -3324,6 +3325,14 @@ class UntaggedSuspendState : public UntaggedInstance {
public:
uword pc() const { return pc_; }
intptr_t frame_capacity() const {
#if defined(DART_PRECOMPILED_RUNTIME)
return frame_size_;
#else
return frame_capacity_;
#endif
}
static intptr_t payload_offset() {
return OFFSET_OF_RETURNED_VALUE(UntaggedSuspendState, payload);
}

View file

@ -711,15 +711,36 @@ DEFINE_RUNTIME_ENTRY(CloneContext, 1) {
// Allocate a SuspendState object.
// Arg0: frame size.
// Arg1: function data.
// Arg1: existing SuspendState object or function data.
// Return value: newly allocated object.
DEFINE_RUNTIME_ENTRY(AllocateSuspendState, 2) {
const Smi& frame_size = Smi::CheckedHandle(zone, arguments.ArgAt(0));
const Instance& function_data =
Instance::CheckedHandle(zone, arguments.ArgAt(1));
const SuspendState& result = SuspendState::Handle(
zone, SuspendState::New(frame_size.Value(), function_data,
SpaceForRuntimeAllocation()));
const intptr_t frame_size =
Smi::CheckedHandle(zone, arguments.ArgAt(0)).Value();
const Object& previous_state = Object::Handle(zone, arguments.ArgAt(1));
SuspendState& result = SuspendState::Handle(zone);
if (previous_state.IsSuspendState()) {
const auto& suspend_state = SuspendState::Cast(previous_state);
const auto& function_data =
Instance::Handle(zone, suspend_state.function_data());
ObjectStore* object_store = thread->isolate_group()->object_store();
if (function_data.GetClassId() ==
Class::Handle(zone, object_store->async_star_stream_controller())
.id()) {
// Reset _AsyncStarStreamController.asyncStarBody to null in order
// to create a new callback closure during next yield.
// The new callback closure will capture the reallocated SuspendState.
function_data.SetField(
Field::Handle(
zone,
object_store->async_star_stream_controller_async_star_body()),
Object::null_object());
}
result = SuspendState::New(frame_size, function_data,
SpaceForRuntimeAllocation());
} else {
result = SuspendState::New(frame_size, Instance::Cast(previous_state),
SpaceForRuntimeAllocation());
}
arguments.SetReturn(result);
}
@ -3111,6 +3132,20 @@ const char* DeoptReasonToCString(ICData::DeoptReasonId deopt_reason) {
}
}
static bool IsSuspendedFrame(Zone* zone,
const Function& function,
StackFrame* frame) {
if (!function.IsSuspendableFunction()) {
return false;
}
auto& suspend_state = Object::Handle(
zone, *reinterpret_cast<ObjectPtr*>(LocalVarAddress(
frame->fp(), runtime_frame_layout.FrameSlotForVariableIndex(
SuspendState::kSuspendStateVarIndex))));
return suspend_state.IsSuspendState() &&
(SuspendState::Cast(suspend_state).pc() != 0);
}
void DeoptimizeAt(Thread* mutator_thread,
const Code& optimized_code,
StackFrame* frame) {
@ -3136,7 +3171,12 @@ void DeoptimizeAt(Thread* mutator_thread,
function.SwitchToUnoptimizedCode();
}
if (frame->IsMarkedForLazyDeopt()) {
if (IsSuspendedFrame(zone, function, frame)) {
// Frame is suspended and going to be removed from the stack.
if (FLAG_trace_deoptimization) {
THR_Print("Not deoptimizing suspended frame, fp=%" Pp "\n", frame->fp());
}
} else if (frame->IsMarkedForLazyDeopt()) {
// Deopt already scheduled.
if (FLAG_trace_deoptimization) {
THR_Print("Lazy deopt already scheduled for fp=%" Pp "\n", frame->fp());
@ -3408,6 +3448,52 @@ DEFINE_RUNTIME_ENTRY(RewindPostDeopt, 0) {
UNREACHABLE();
}
// Handle slow path actions for the resumed frame after it was
// copied back to the stack:
// 1) deoptimization;
// 2) breakpoint at resumption;
// 3) throwing an exception.
//
// Arg0: exception
// Arg1: stack trace
DEFINE_RUNTIME_ENTRY(ResumeFrame, 2) {
const Instance& exception = Instance::CheckedHandle(zone, arguments.ArgAt(0));
const Instance& stacktrace =
Instance::CheckedHandle(zone, arguments.ArgAt(1));
#if !defined(DART_PRECOMPILED_RUNTIME)
#if !defined(PRODUCT)
if (isolate->has_resumption_breakpoints()) {
isolate->debugger()->ResumptionBreakpoint();
}
#endif
DartFrameIterator iterator(thread,
StackFrameIterator::kNoCrossThreadIteration);
StackFrame* frame = iterator.NextFrame();
ASSERT(frame->IsDartFrame());
ASSERT(Function::Handle(zone, frame->LookupDartFunction())
.IsSuspendableFunction());
const Code& caller_code = Code::Handle(zone, frame->LookupDartCode());
if (caller_code.IsDisabled() && caller_code.is_optimized() &&
!caller_code.is_force_optimized()) {
const uword deopt_pc = frame->pc();
thread->pending_deopts().AddPendingDeopt(frame->fp(), deopt_pc);
frame->MarkForLazyDeopt();
if (FLAG_trace_deoptimization) {
THR_Print("Lazy deopt scheduled for resumed frame fp=%" Pp ", pc=%" Pp
"\n",
frame->fp(), deopt_pc);
}
}
#endif
if (!exception.IsNull()) {
Exceptions::ReThrow(thread, exception, stacktrace);
}
}
void OnEveryRuntimeEntryCall(Thread* thread,
const char* runtime_call_name,
bool can_lazy_deopt) {

View file

@ -62,6 +62,7 @@ namespace dart {
V(LateFieldAssignedDuringInitializationError) \
V(LateFieldNotInitializedError) \
V(CompileFunction) \
V(ResumeFrame) \
V(SwitchableCallMiss) \
V(NotLoaded)

View file

@ -457,6 +457,7 @@ class ObjectPointerVisitor;
V(_yieldAsyncStar, "_yieldAsyncStar") \
V(add, "add") \
V(addStream, "addStream") \
V(asyncStarBody, "asyncStarBody") \
V(callback, "callback") \
V(capture_length, ":capture_length") \
V(capture_start_index, ":capture_start_index") \

View file

@ -129,6 +129,14 @@ class Thread;
StubCode::AllocateObjectParameterized().ptr(), nullptr) \
V(CodePtr, allocate_object_slow_stub_, StubCode::AllocateObjectSlow().ptr(), \
nullptr) \
V(CodePtr, async_exception_handler_stub_, \
StubCode::AsyncExceptionHandler().ptr(), nullptr) \
V(CodePtr, resume_stub_, StubCode::Resume().ptr(), nullptr) \
V(CodePtr, return_async_stub_, StubCode::ReturnAsync().ptr(), nullptr) \
V(CodePtr, return_async_not_future_stub_, \
StubCode::ReturnAsyncNotFuture().ptr(), nullptr) \
V(CodePtr, return_async_star_stub_, StubCode::ReturnAsyncStar().ptr(), \
nullptr) \
V(CodePtr, stack_overflow_shared_without_fpu_regs_stub_, \
StubCode::StackOverflowSharedWithoutFPURegs().ptr(), nullptr) \
V(CodePtr, stack_overflow_shared_with_fpu_regs_stub_, \

View file

@ -1140,6 +1140,8 @@ ISOLATE_UNIT_TEST_CASE(TTS_Future) {
(() => 3) as int Function()?;
)";
SetupCoreLibrariesForUnitTest();
const auto& class_future =
Class::Handle(IsolateGroup::Current()->object_store()->future_class());

View file

@ -110,6 +110,7 @@ void _asyncStarMoveNextHelper(var stream) {
class _AsyncStarStreamController<T> {
@pragma("vm:entry-point")
StreamController<T> controller;
@pragma("vm:entry-point")
Function? asyncStarBody;
bool isAdding = false;
bool onListenReceived = false;

View file

@ -2,7 +2,8 @@
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
library async_await_test;
// VMOptions=
// VMOptions=--optimization-counter-threshold=5
import "package:expect/expect.dart";
import "package:async_helper/async_helper.dart";

View file

@ -2,6 +2,7 @@
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// VMOptions=
// VMOptions=--optimization-counter-threshold=5
import 'package:expect/expect.dart';

View file

@ -4,7 +4,8 @@
// @dart = 2.9
library async_await_test;
// VMOptions=
// VMOptions=--optimization-counter-threshold=5
import "package:expect/expect.dart";
import "package:async_helper/async_helper.dart";

View file

@ -4,6 +4,7 @@
// @dart = 2.9
// VMOptions=
// VMOptions=--optimization-counter-threshold=5
import 'package:expect/expect.dart';