[vm] New async/await implementation in the VM, part 2 - vm

The new implementation moves away from desugaring of async
functions on kernel AST, state machine generated in the flow graph and
capturing all local variables in the context.

Instead, async/await is implemented using a few stubs
(InitSuspendableFunction, Suspend, Resume, Return and
AsyncExceptionHandler). The stubs are implemented in a
platform-independent way using (macro-)assembler helpers.
When suspending a function, its frame is copied into a SuspendState
object, and when resuming a function it is copied back onto the stack.
No extra code is generated for accessing local variables.
Callback closures are created lazily on the first await.

Design doc: go/compact-async-await.

Part 1 (kernel): https://dart-review.googlesource.com/c/sdk/+/241842

TEST=ci

Issue: https://github.com/dart-lang/sdk/issues/48378
Change-Id: Ibad757035b7cc438ebdff80b460728b1d3eff1f5
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/242000
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Slava Egorov <vegorov@google.com>
This commit is contained in:
Alexander Markov 2022-04-29 01:03:50 +00:00 committed by Commit Bot
parent fcc9169885
commit bf4bb95308
112 changed files with 3730 additions and 854 deletions

View file

@ -250,6 +250,7 @@ static ObjectPtr ValidateMessageObject(Zone* zone,
MESSAGE_SNAPSHOT_ILLEGAL(Pointer);
MESSAGE_SNAPSHOT_ILLEGAL(ReceivePort);
MESSAGE_SNAPSHOT_ILLEGAL(UserTag);
MESSAGE_SNAPSHOT_ILLEGAL(SuspendState);
default:
if (cid >= kNumPredefinedCids) {

View file

@ -3061,7 +3061,7 @@ class ExceptionHandlersSerializationCluster : public SerializationCluster {
ExceptionHandlersPtr handlers = objects_[i];
s->AssignRef(handlers);
AutoTraceObject(handlers);
const intptr_t length = handlers->untag()->num_entries_;
const intptr_t length = handlers->untag()->num_entries();
s->WriteUnsigned(length);
target_memory_size_ +=
compiler::target::ExceptionHandlers::InstanceSize(length);
@ -3073,8 +3073,10 @@ class ExceptionHandlersSerializationCluster : public SerializationCluster {
for (intptr_t i = 0; i < count; i++) {
ExceptionHandlersPtr handlers = objects_[i];
AutoTraceObject(handlers);
const intptr_t length = handlers->untag()->num_entries_;
s->WriteUnsigned(length);
const intptr_t packed_fields = handlers->untag()->packed_fields_;
const intptr_t length =
UntaggedExceptionHandlers::NumEntriesBits::decode(packed_fields);
s->WriteUnsigned(packed_fields);
WriteCompressedField(handlers, handled_types_data);
for (intptr_t j = 0; j < length; j++) {
const ExceptionHandlerInfo& info = handlers->untag()->data()[j];
@ -3117,10 +3119,12 @@ class ExceptionHandlersDeserializationCluster : public DeserializationCluster {
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
ExceptionHandlersPtr handlers =
static_cast<ExceptionHandlersPtr>(d.Ref(id));
const intptr_t length = d.ReadUnsigned();
const intptr_t packed_fields = d.ReadUnsigned();
const intptr_t length =
UntaggedExceptionHandlers::NumEntriesBits::decode(packed_fields);
Deserializer::InitializeHeader(handlers, kExceptionHandlersCid,
ExceptionHandlers::InstanceSize(length));
handlers->untag()->num_entries_ = length;
handlers->untag()->packed_fields_ = packed_fields;
handlers->untag()->handled_types_data_ =
static_cast<ArrayPtr>(d.ReadRef());
for (intptr_t j = 0; j < length; j++) {
@ -5683,6 +5687,8 @@ class VMSerializationRoots : public SerializationRoots {
"LocalVarDescriptors", "<empty>");
s->AddBaseObject(Object::empty_exception_handlers().ptr(),
"ExceptionHandlers", "<empty>");
s->AddBaseObject(Object::empty_async_exception_handlers().ptr(),
"ExceptionHandlers", "<empty async>");
for (intptr_t i = 0; i < ArgumentsDescriptor::kCachedDescriptorCount; i++) {
s->AddBaseObject(ArgumentsDescriptor::cached_args_descriptors_[i],
@ -5794,6 +5800,7 @@ class VMDeserializationRoots : public DeserializationRoots {
d->AddBaseObject(Object::empty_descriptors().ptr());
d->AddBaseObject(Object::empty_var_descriptors().ptr());
d->AddBaseObject(Object::empty_exception_handlers().ptr());
d->AddBaseObject(Object::empty_async_exception_handlers().ptr());
for (intptr_t i = 0; i < ArgumentsDescriptor::kCachedDescriptorCount; i++) {
d->AddBaseObject(ArgumentsDescriptor::cached_args_descriptors_[i]);

View file

@ -95,6 +95,7 @@ typedef uint16_t ClassIdTagType;
V(ReceivePort) \
V(SendPort) \
V(StackTrace) \
V(SuspendState) \
V(RegExp) \
V(WeakProperty) \
V(WeakReference) \

View file

@ -125,10 +125,12 @@ ExceptionHandlersPtr ExceptionHandlerList::FinalizeExceptionHandlers(
uword entry_point) const {
intptr_t num_handlers = Length();
if (num_handlers == 0) {
return Object::empty_exception_handlers().ptr();
return has_async_handler_ ? Object::empty_async_exception_handlers().ptr()
: Object::empty_exception_handlers().ptr();
}
const ExceptionHandlers& handlers =
ExceptionHandlers::Handle(ExceptionHandlers::New(num_handlers));
handlers.set_has_async_handler(has_async_handler_);
for (intptr_t i = 0; i < num_handlers; i++) {
// Assert that every element in the array has been initialized.
if (list_[i].handler_types == NULL) {

View file

@ -76,7 +76,8 @@ class ExceptionHandlerList : public ZoneAllocated {
bool needs_stacktrace;
};
ExceptionHandlerList() : list_() {}
explicit ExceptionHandlerList(const Function& function)
: list_(), has_async_handler_(function.IsCompactAsyncFunction()) {}
intptr_t Length() const { return list_.length(); }
@ -137,6 +138,7 @@ class ExceptionHandlerList : public ZoneAllocated {
private:
GrowableArray<struct HandlerDesc> list_;
const bool has_async_handler_;
DISALLOW_COPY_AND_ASSIGN(ExceptionHandlerList);
};

View file

@ -453,7 +453,7 @@ void DispatchTableGenerator::NumberSelectors() {
if (function.IsDynamicFunction(/*allow_abstract=*/false)) {
const bool on_null_interface = klass.IsObjectClass();
const bool requires_args_descriptor =
function.IsGeneric() || function.HasOptionalParameters();
function.PrologueNeedsArgumentsDescriptor();
// Get assigned selector ID for this function.
const int32_t sid = selector_map_.SelectorId(function);
if (sid == SelectorMap::kInvalidSelectorId) {

View file

@ -1592,8 +1592,7 @@ static void TryAllocateString(Assembler* assembler,
__ cmp(length_reg, Operand(0));
__ b(failure, LT);
NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R0, cid));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(R0, failure));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure, R0));
__ mov(R8, Operand(length_reg)); // Save the length register.
if (cid == kOneByteStringCid) {
__ SmiUntag(length_reg);

View file

@ -1819,7 +1819,7 @@ static void TryAllocateString(Assembler* assembler,
// negative length: call to runtime to produce error.
__ tbnz(failure, length_reg, compiler::target::kBitsPerWord - 1);
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R0, failure));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure, R0));
__ mov(R6, length_reg); // Save the length register.
if (cid == kOneByteStringCid) {
// Untag length.

View file

@ -1626,8 +1626,7 @@ static void TryAllocateString(Assembler* assembler,
__ cmpl(length_reg, Immediate(0));
__ j(LESS, failure);
NOT_IN_PRODUCT(
__ MaybeTraceAllocation(cid, EAX, failure, Assembler::kFarJump));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure, EAX));
if (length_reg != EDI) {
__ movl(EDI, length_reg);
}

View file

@ -1510,7 +1510,7 @@ static void TryAllocateString(Assembler* assembler,
// negative length: call to runtime to produce error.
__ bltz(length_reg, failure);
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, TMP, failure));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure, TMP));
__ mv(T0, length_reg); // Save the length register.
if (cid == kOneByteStringCid) {
// Untag length.

View file

@ -1702,7 +1702,7 @@ static void TryAllocateString(Assembler* assembler,
__ cmpq(length_reg, Immediate(0));
__ j(LESS, failure);
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure, Assembler::kFarJump));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure));
if (length_reg != RDI) {
__ movq(RDI, length_reg);
}

View file

@ -3161,6 +3161,19 @@ void Assembler::AndImmediate(Register rd,
}
}
void Assembler::OrImmediate(Register rd,
Register rs,
int32_t imm,
Condition cond) {
Operand o;
if (Operand::CanHold(imm, &o)) {
orr(rd, rs, Operand(o), cond);
} else {
LoadImmediate(TMP, imm, cond);
orr(rd, rs, Operand(TMP), cond);
}
}
void Assembler::CompareImmediate(Register rn, int32_t value, Condition cond) {
Operand o;
if (Operand::CanHold(value, &o)) {
@ -3516,6 +3529,14 @@ void Assembler::MaybeTraceAllocation(Register stats_addr_reg, Label* trace) {
b(trace, NE);
}
void Assembler::MaybeTraceAllocation(intptr_t cid,
Label* trace,
Register temp_reg,
JumpDistance distance) {
LoadAllocationStatsAddress(temp_reg, cid);
MaybeTraceAllocation(temp_reg, trace);
}
void Assembler::LoadAllocationStatsAddress(Register dest, intptr_t cid) {
ASSERT(dest != kNoRegister);
ASSERT(dest != TMP);
@ -3623,6 +3644,21 @@ void Assembler::TryAllocateArray(intptr_t cid,
}
}
void Assembler::CopyMemoryWords(Register src,
Register dst,
Register size,
Register temp) {
Label loop, done;
__ cmp(size, Operand(0));
__ b(&done, EQUAL);
__ Bind(&loop);
__ ldr(temp, Address(src, target::kWordSize, Address::PostIndex));
__ str(temp, Address(dst, target::kWordSize, Address::PostIndex));
__ subs(size, size, Operand(target::kWordSize));
__ b(&loop, NOT_ZERO);
__ Bind(&done);
}
void Assembler::GenerateUnRelocatedPcRelativeCall(Condition cond,
intptr_t offset_into_target) {
// Emit "blr.cond <offset>".

View file

@ -385,6 +385,8 @@ class Assembler : public AssemblerBase {
void Bind(Label* label);
// Unconditional jump to a given label. [distance] is ignored on ARM.
void Jump(Label* label, JumpDistance distance = kFarJump) { b(label); }
// Unconditional jump to a given address in register.
void Jump(Register target) { bx(target); }
// Unconditional jump to a given address in memory.
void Jump(const Address& address) { Branch(address); }
@ -415,9 +417,6 @@ class Assembler : public AssemblerBase {
// We don't run TSAN bots on 32 bit.
}
void CompareWithFieldValue(Register value, FieldAddress address) {
CompareWithMemoryValue(value, address);
}
void CompareWithCompressedFieldFromOffset(Register value,
Register base,
int32_t offset) {
@ -818,6 +817,9 @@ class Assembler : public AssemblerBase {
Register rn,
int32_t value,
Condition cond = AL);
void AddRegisters(Register dest, Register src) {
add(dest, dest, Operand(src));
}
void SubImmediate(Register rd,
Register rn,
int32_t value,
@ -826,7 +828,24 @@ class Assembler : public AssemblerBase {
Register rn,
int32_t value,
Condition cond = AL);
void SubRegisters(Register dest, Register src) {
sub(dest, dest, Operand(src));
}
void AndImmediate(Register rd, Register rs, int32_t imm, Condition cond = AL);
void AndImmediate(Register rd, int32_t imm, Condition cond = AL) {
AndImmediate(rd, rd, imm, cond);
}
void OrImmediate(Register rd, Register rs, int32_t imm, Condition cond = AL);
void OrImmediate(Register rd, int32_t imm, Condition cond = AL) {
OrImmediate(rd, rd, imm, cond);
}
void LslImmediate(Register rd, Register rn, int32_t shift) {
ASSERT((shift >= 0) && (shift < kBitsPerInt32));
Lsl(rd, rn, Operand(shift));
}
void LslImmediate(Register rd, int32_t shift) {
LslImmediate(rd, rd, shift);
}
// Test rn and immediate. May clobber IP.
void TestImmediate(Register rn, int32_t imm, Condition cond = AL);
@ -1051,6 +1070,10 @@ class Assembler : public AssemblerBase {
Condition cond = AL) {
StoreToOffset(reg, base, offset - kHeapObjectTag, type, cond);
}
void StoreZero(const Address& address, Register temp) {
mov(temp, Operand(0));
str(temp, address);
}
void LoadSFromOffset(SRegister reg,
Register base,
int32_t offset,
@ -1137,6 +1160,14 @@ class Assembler : public AssemblerBase {
cmp(rn, Operand(0));
b(label, ZERO);
}
void BranchIfBit(Register rn,
intptr_t bit_number,
Condition condition,
Label* label,
JumpDistance distance = kFarJump) {
tst(rn, Operand(1 << bit_number));
b(label, condition);
}
void MoveRegister(Register rd, Register rm, Condition cond) {
ExtendValue(rd, rm, kFourBytes, cond);
@ -1368,6 +1399,13 @@ class Assembler : public AssemblerBase {
// which will allocate in the runtime where tracing occurs.
void MaybeTraceAllocation(Register stats_addr_reg, Label* trace);
// If allocation tracing for |cid| is enabled, will jump to |trace| label,
// which will allocate in the runtime where tracing occurs.
void MaybeTraceAllocation(intptr_t cid,
Label* trace,
Register temp_reg,
JumpDistance distance = JumpDistance::kFarJump);
void TryAllocateObject(intptr_t cid,
intptr_t instance_size,
Label* failure,
@ -1383,6 +1421,14 @@ class Assembler : public AssemblerBase {
Register temp1,
Register temp2);
// Copy [size] bytes from [src] address to [dst] address.
// [size] should be a multiple of word size.
// Clobbers [src], [dst], [size] and [temp] registers.
void CopyMemoryWords(Register src,
Register dst,
Register size,
Register temp);
// This emits an PC-relative call of the form "blr.<cond> <offset>". The
// offset is not yet known and needs therefore relocation to the right place
// before the code can be used.

View file

@ -1998,8 +1998,9 @@ void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
#ifndef PRODUCT
void Assembler::MaybeTraceAllocation(intptr_t cid,
Label* trace,
Register temp_reg,
Label* trace) {
JumpDistance distance) {
ASSERT(cid > 0);
const intptr_t shared_table_offset =
@ -2034,7 +2035,7 @@ void Assembler::TryAllocateObject(intptr_t cid,
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure));
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp_reg));
RELEASE_ASSERT((target::Thread::top_offset() + target::kWordSize) ==
target::Thread::end_offset());
ldp(instance_reg, temp_reg,
@ -2076,7 +2077,7 @@ void Assembler::TryAllocateArray(intptr_t cid,
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp1, failure));
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp1));
// Potential new object start.
ldr(instance, Address(THR, target::Thread::top_offset()));
AddImmediateSetFlags(end_address, instance, instance_size);
@ -2105,6 +2106,20 @@ void Assembler::TryAllocateArray(intptr_t cid,
}
}
void Assembler::CopyMemoryWords(Register src,
Register dst,
Register size,
Register temp) {
Label loop, done;
__ cbz(&done, size);
__ Bind(&loop);
__ ldr(temp, Address(src, target::kWordSize, Address::PostIndex));
__ str(temp, Address(dst, target::kWordSize, Address::PostIndex));
__ subs(size, size, Operand(target::kWordSize));
__ b(&loop, NOT_ZERO);
__ Bind(&done);
}
void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
// Emit "bl <offset>".
EmitUnconditionalBranchOp(BL, 0);

View file

@ -545,6 +545,8 @@ class Assembler : public AssemblerBase {
void Bind(Label* label);
// Unconditional jump to a given label. [distance] is ignored on ARM.
void Jump(Label* label, JumpDistance distance = kFarJump) { b(label); }
// Unconditional jump to a given address in register.
void Jump(Register target) { br(target); }
// Unconditional jump to a given address in memory. Clobbers TMP.
void Jump(const Address& address) {
ldr(TMP, address);
@ -629,9 +631,6 @@ class Assembler : public AssemblerBase {
#endif
}
void CompareWithFieldValue(Register value, FieldAddress address) {
CompareWithMemoryValue(value, address);
}
void CompareWithCompressedFieldFromOffset(Register value,
Register base,
int32_t offset) {
@ -1260,6 +1259,19 @@ class Assembler : public AssemblerBase {
JumpDistance distance = kFarJump) {
cbz(label, rn);
}
void BranchIfBit(Register rn,
intptr_t bit_number,
Condition condition,
Label* label,
JumpDistance distance = kFarJump) {
if (condition == ZERO) {
tbz(label, rn, bit_number);
} else if (condition == NOT_ZERO) {
tbnz(label, rn, bit_number);
} else {
UNREACHABLE();
}
}
void cbz(Label* label, Register rt, OperandSize sz = kEightBytes) {
EmitCompareAndBranch(CBZ, rt, label, sz);
@ -1676,13 +1688,16 @@ class Assembler : public AssemblerBase {
void LslImmediate(Register rd,
Register rn,
int shift,
int32_t shift,
OperandSize sz = kEightBytes) {
const int reg_size =
const int32_t reg_size =
(sz == kEightBytes) ? kXRegSizeInBits : kWRegSizeInBits;
ASSERT((shift >= 0) && (shift < reg_size));
ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1, sz);
}
void LslImmediate(Register rd, int32_t shift, OperandSize sz = kEightBytes) {
LslImmediate(rd, rd, shift, sz);
}
void LsrImmediate(Register rd,
Register rn,
int shift,
@ -1792,18 +1807,30 @@ class Assembler : public AssemblerBase {
Register rn,
int64_t imm,
OperandSize sz = kEightBytes);
void AddRegisters(Register dest, Register src) {
add(dest, dest, Operand(src));
}
void SubImmediateSetFlags(Register dest,
Register rn,
int64_t imm,
OperandSize sz = kEightBytes);
void SubRegisters(Register dest, Register src) {
sub(dest, dest, Operand(src));
}
void AndImmediate(Register rd,
Register rn,
int64_t imm,
OperandSize sz = kEightBytes);
void AndImmediate(Register rd, int64_t imm) {
AndImmediate(rd, rd, imm);
}
void OrImmediate(Register rd,
Register rn,
int64_t imm,
OperandSize sz = kEightBytes);
void OrImmediate(Register rd, int64_t imm) {
OrImmediate(rd, rd, imm);
}
void XorImmediate(Register rd,
Register rn,
int64_t imm,
@ -1884,6 +1911,9 @@ class Assembler : public AssemblerBase {
OperandSize sz = kEightBytes) {
StoreToOffset(src, base, offset - kHeapObjectTag, sz);
}
void StoreZero(const Address& address, Register temp = kNoRegister) {
str(ZR, address);
}
void StoreSToOffset(VRegister src, Register base, int32_t offset);
void StoreDToOffset(VRegister src, Register base, int32_t offset);
@ -2123,7 +2153,10 @@ class Assembler : public AssemblerBase {
// If allocation tracing for |cid| is enabled, will jump to |trace| label,
// which will allocate in the runtime where tracing occurs.
void MaybeTraceAllocation(intptr_t cid, Register temp_reg, Label* trace);
void MaybeTraceAllocation(intptr_t cid,
Label* trace,
Register temp_reg,
JumpDistance distance = JumpDistance::kFarJump);
void TryAllocateObject(intptr_t cid,
intptr_t instance_size,
@ -2140,6 +2173,14 @@ class Assembler : public AssemblerBase {
Register temp1,
Register temp2);
// Copy [size] bytes from [src] address to [dst] address.
// [size] should be a multiple of word size.
// Clobbers [src], [dst], [size] and [temp] registers.
void CopyMemoryWords(Register src,
Register dst,
Register size,
Register temp);
// This emits an PC-relative call of the form "bl <offset>". The offset
// is not yet known and needs therefore relocation to the right place before
// the code can be used.

View file

@ -320,7 +320,7 @@ void Assembler::rep_movsw() {
EmitUint8(0xA5);
}
void Assembler::rep_movsl() {
void Assembler::rep_movsd() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
EmitUint8(0xA5);
@ -1889,6 +1889,18 @@ void Assembler::AddImmediate(Register reg, const Immediate& imm) {
}
}
void Assembler::AddImmediate(Register dest, Register src, int32_t value) {
if (dest == src) {
AddImmediate(dest, value);
return;
}
if (value == 0) {
MoveRegister(dest, src);
return;
}
leal(dest, Address(src, value));
}
void Assembler::SubImmediate(Register reg, const Immediate& imm) {
const intptr_t value = imm.value();
if (value == 0) {
@ -2598,8 +2610,8 @@ void Assembler::MoveMemoryToMemory(Address dst, Address src, Register tmp) {
#ifndef PRODUCT
void Assembler::MaybeTraceAllocation(intptr_t cid,
Register temp_reg,
Label* trace,
Register temp_reg,
JumpDistance distance) {
ASSERT(cid > 0);
Address state_address(kNoRegister, 0);
@ -2636,7 +2648,7 @@ void Assembler::TryAllocateObject(intptr_t cid,
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure, distance));
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp_reg, distance));
movl(instance_reg, Address(THR, target::Thread::top_offset()));
addl(instance_reg, Immediate(instance_size));
// instance_reg: potential next object start.
@ -2669,7 +2681,7 @@ void Assembler::TryAllocateArray(intptr_t cid,
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure, distance));
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp_reg, distance));
movl(instance, Address(THR, target::Thread::top_offset()));
movl(end_address, instance);
@ -2696,6 +2708,17 @@ void Assembler::TryAllocateArray(intptr_t cid,
}
}
void Assembler::CopyMemoryWords(Register src,
Register dst,
Register size,
Register temp) {
RELEASE_ASSERT(src == ESI);
RELEASE_ASSERT(dst == EDI);
RELEASE_ASSERT(size == ECX);
shrl(size, Immediate(target::kWordSizeLog2));
rep_movsd();
}
void Assembler::PushCodeObject() {
ASSERT(IsNotTemporaryScopedHandle(code_));
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@ -2713,6 +2736,10 @@ void Assembler::EnterDartFrame(intptr_t frame_size) {
}
}
void Assembler::LeaveDartFrame() {
LeaveFrame();
}
// On entry to a function compiled for OSR, the caller's frame pointer, the
// stack locals, and any copied parameters are already in place. The frame
// pointer is already set up. There may be extra space for spill slots to
@ -2734,7 +2761,7 @@ void Assembler::EnterStubFrame() {
}
void Assembler::LeaveStubFrame() {
LeaveFrame();
LeaveDartFrame();
}
void Assembler::EnterCFrame(intptr_t frame_space) {

View file

@ -297,7 +297,7 @@ class Assembler : public AssemblerBase {
void rep_movsb();
void rep_movsw();
void rep_movsl();
void rep_movsd();
void movss(XmmRegister dst, const Address& src);
void movss(const Address& dst, XmmRegister src);
@ -587,6 +587,14 @@ class Assembler : public AssemblerBase {
cmpl(src, Immediate(0));
j(ZERO, label, distance);
}
void BranchIfBit(Register rn,
intptr_t bit_number,
Condition condition,
Label* label,
JumpDistance distance = kFarJump) {
testl(rn, Immediate(1 << bit_number));
j(condition, label, distance);
}
// Arch-specific LoadFromOffset to choose the right operation for [sz].
void LoadFromOffset(Register dst,
@ -645,6 +653,9 @@ class Assembler : public AssemblerBase {
OperandSize sz = kFourBytes) {
StoreToOffset(src, FieldAddress(base, offset), sz);
}
void StoreZero(const Address& address, Register temp = kNoRegister) {
movl(address, Immediate(0));
}
void LoadFromStack(Register dst, intptr_t depth);
void StoreToStack(Register src, intptr_t depth);
void CompareToStack(Register src, intptr_t depth);
@ -682,6 +693,10 @@ class Assembler : public AssemblerBase {
// We don't run TSAN on 32 bit systems.
}
void CompareWithMemoryValue(Register value, Address address) {
cmpl(value, address);
}
void ExtendValue(Register to, Register from, OperandSize sz) override;
void PushRegister(Register r);
void PopRegister(Register r);
@ -699,7 +714,24 @@ class Assembler : public AssemblerBase {
void AddImmediate(Register reg, int32_t value) {
AddImmediate(reg, Immediate(value));
}
void AddImmediate(Register dest, Register src, int32_t value);
void AddRegisters(Register dest, Register src) {
addl(dest, src);
}
void SubImmediate(Register reg, const Immediate& imm);
void SubRegisters(Register dest, Register src) {
subl(dest, src);
}
void AndImmediate(Register dst, int32_t value) {
andl(dst, Immediate(value));
}
void OrImmediate(Register dst, int32_t value) {
orl(dst, Immediate(value));
}
void LslImmediate(Register dst, int32_t shift) {
shll(dst, Immediate(shift));
}
void CompareImmediate(Register reg, int32_t immediate) {
cmpl(reg, Immediate(immediate));
@ -934,6 +966,10 @@ class Assembler : public AssemblerBase {
void Jump(Label* label, JumpDistance distance = kFarJump) {
jmp(label, distance);
}
// Unconditional jump to a given address in register.
void Jump(Register target) {
jmp(target);
}
// Moves one word from the memory at [from] to the memory at [to].
// Needs a temporary register.
@ -956,6 +992,7 @@ class Assembler : public AssemblerBase {
// L: <code to adjust saved pc if there is any intrinsification code>
// .....
void EnterDartFrame(intptr_t frame_size);
void LeaveDartFrame();
// Set up a Dart frame for a function compiled for on-stack replacement.
// The frame layout is a normal Dart frame, but the frame is partially set
@ -1002,9 +1039,9 @@ class Assembler : public AssemblerBase {
// If allocation tracing for |cid| is enabled, will jump to |trace| label,
// which will allocate in the runtime where tracing occurs.
void MaybeTraceAllocation(intptr_t cid,
Register temp_reg,
Label* trace,
JumpDistance distance);
Register temp_reg,
JumpDistance distance = JumpDistance::kFarJump);
void TryAllocateObject(intptr_t cid,
intptr_t instance_size,
@ -1021,6 +1058,16 @@ class Assembler : public AssemblerBase {
Register end_address,
Register temp);
// Copy [size] bytes from [src] address to [dst] address.
// [size] should be a multiple of word size.
// Clobbers [src], [dst], [size] and [temp] registers.
// IA32 requires fixed registers for memory copying:
// [src] = ESI, [dst] = EDI, [size] = ECX.
void CopyMemoryWords(Register src,
Register dst,
Register size,
Register temp = kNoRegister);
// Debugging and bringup support.
void Breakpoint() override { int3(); }

View file

@ -4821,7 +4821,7 @@ ASSEMBLER_TEST_GENERATE(TestRepMovsDwords, assembler) {
__ movl(ESI, Address(ESP, 4 * target::kWordSize)); // from.
__ movl(EDI, Address(ESP, 5 * target::kWordSize)); // to.
__ movl(ECX, Address(ESP, 6 * target::kWordSize)); // count.
__ rep_movsl();
__ rep_movsd();
__ popl(ECX);
__ popl(EDI);
__ popl(ESI);

View file

@ -2373,10 +2373,9 @@ void Assembler::CompareWithCompressedFieldFromOffset(Register value,
UNIMPLEMENTED();
}
void Assembler::CompareWithMemoryValue(Register value,
Address address,
OperandSize sz) {
UNIMPLEMENTED();
void Assembler::CompareWithMemoryValue(Register value, Address address) {
lx(TMP2, address);
CompareRegisters(value, TMP2);
}
void Assembler::CompareFunctionTypeNullabilityWith(Register type,
@ -2648,6 +2647,22 @@ void Assembler::BranchIfZero(Register rn, Label* label, JumpDistance distance) {
beqz(rn, label, distance);
}
void Assembler::BranchIfBit(Register rn,
intptr_t bit_number,
Condition condition,
Label* label,
JumpDistance distance) {
ASSERT(rn != TMP2);
andi(TMP2, rn, 1 << bit_number);
if (condition == ZERO) {
beqz(TMP2, label, distance);
} else if (condition == NOT_ZERO) {
bnez(TMP2, label, distance);
} else {
UNREACHABLE();
}
}
void Assembler::BranchIfNotSmi(Register reg,
Label* label,
JumpDistance distance) {
@ -3923,8 +3938,9 @@ void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
#ifndef PRODUCT
void Assembler::MaybeTraceAllocation(intptr_t cid,
Label* trace,
Register temp_reg,
Label* trace) {
JumpDistance distance) {
ASSERT(cid > 0);
const intptr_t shared_table_offset =
@ -3963,7 +3979,7 @@ void Assembler::TryAllocateObject(intptr_t cid,
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure));
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp_reg));
lx(instance_reg, Address(THR, target::Thread::top_offset()));
lx(temp_reg, Address(THR, target::Thread::end_offset()));
@ -4003,7 +4019,7 @@ void Assembler::TryAllocateArray(intptr_t cid,
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp1, failure));
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp1));
// Potential new object start.
lx(instance, Address(THR, target::Thread::top_offset()));
AddImmediate(end_address, instance, instance_size);
@ -4031,6 +4047,22 @@ void Assembler::TryAllocateArray(intptr_t cid,
}
}
void Assembler::CopyMemoryWords(Register src,
Register dst,
Register size,
Register temp) {
Label loop, done;
beqz(size, &done, kNearJump);
Bind(&loop);
lx(temp, Address(src));
addi(src, src, target::kWordSize);
sx(temp, Address(dst));
addi(dst, dst, target::kWordSize);
subi(size, size, target::kWordSize);
bnez(size, &loop, kNearJump);
Bind(&done);
}
void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
// JAL only has a +/- 1MB range. AUIPC+JALR has a +/- 2GB range.
intx_t imm = offset_into_target;

View file

@ -801,6 +801,8 @@ class Assembler : public MicroAssembler {
void Jump(Label* label, JumpDistance distance = kFarJump) {
j(label, distance);
}
// Unconditional jump to a given address in register.
void Jump(Register target) { jr(target); }
// Unconditional jump to a given address in memory. Clobbers TMP.
void Jump(const Address& address);
@ -834,16 +836,11 @@ class Assembler : public MicroAssembler {
Register address,
int32_t offset = 0);
void CompareWithFieldValue(Register value, FieldAddress address) {
CompareWithMemoryValue(value, address);
}
void CompareWithCompressedFieldFromOffset(Register value,
Register base,
int32_t offset);
void CompareWithMemoryValue(Register value,
Address address,
OperandSize sz = kWordBytes);
void CompareWithMemoryValue(Register value, Address address);
void CompareFunctionTypeNullabilityWith(Register type, int8_t value) override;
void CompareTypeNullabilityWith(Register type, int8_t value) override;
@ -888,6 +885,11 @@ class Assembler : public MicroAssembler {
void BranchIfZero(Register rn,
Label* label,
JumpDistance distance = kFarJump);
void BranchIfBit(Register rn,
intptr_t bit_number,
Condition condition,
Label* label,
JumpDistance distance = kFarJump);
void SetIf(Condition condition, Register rd);
void SmiUntag(Register reg) { SmiUntag(reg, reg); }
@ -932,6 +934,12 @@ class Assembler : public MicroAssembler {
void AddImmediate(Register dest, intx_t imm) {
AddImmediate(dest, dest, imm);
}
void AddRegisters(Register dest, Register src) {
add(dest, dest, src);
}
void SubRegisters(Register dest, Register src) {
sub(dest, dest, src);
}
// Macros accepting a pp Register argument may attempt to load values from
// the object pool when possible. Unless you are sure that the untagged object
@ -946,14 +954,23 @@ class Assembler : public MicroAssembler {
Register rn,
intx_t imm,
OperandSize sz = kWordBytes);
void AndImmediate(Register rd, intx_t imm) {
AndImmediate(rd, rd, imm);
}
void OrImmediate(Register rd,
Register rn,
intx_t imm,
OperandSize sz = kWordBytes);
void OrImmediate(Register rd, intx_t imm) {
OrImmediate(rd, rd, imm);
}
void XorImmediate(Register rd,
Register rn,
intx_t imm,
OperandSize sz = kWordBytes);
void LslImmediate(Register rd, int32_t shift) {
slli(rd, rd, shift);
}
void TestImmediate(Register rn, intx_t imm, OperandSize sz = kWordBytes);
void CompareImmediate(Register rn, intx_t imm, OperandSize sz = kWordBytes);
@ -1016,6 +1033,9 @@ class Assembler : public MicroAssembler {
OperandSize sz = kWordBytes) {
StoreToOffset(src, base, offset - kHeapObjectTag, sz);
}
void StoreZero(const Address& address, Register temp = kNoRegister) {
sx(ZR, address);
}
void StoreSToOffset(FRegister src, Register base, int32_t offset);
void StoreDToOffset(FRegister src, Register base, int32_t offset);
void StoreDFieldToOffset(FRegister src, Register base, int32_t offset) {
@ -1256,7 +1276,10 @@ class Assembler : public MicroAssembler {
// If allocation tracing for |cid| is enabled, will jump to |trace| label,
// which will allocate in the runtime where tracing occurs.
void MaybeTraceAllocation(intptr_t cid, Register temp_reg, Label* trace);
void MaybeTraceAllocation(intptr_t cid,
Label* trace,
Register temp_reg,
JumpDistance distance = JumpDistance::kFarJump);
void TryAllocateObject(intptr_t cid,
intptr_t instance_size,
@ -1273,6 +1296,14 @@ class Assembler : public MicroAssembler {
Register temp1,
Register temp2);
// Copy [size] bytes from [src] address to [dst] address.
// [size] should be a multiple of word size.
// Clobbers [src], [dst], [size] and [temp] registers.
void CopyMemoryWords(Register src,
Register dst,
Register size,
Register temp);
// This emits an PC-relative call of the form "bl <offset>". The offset
// is not yet known and needs therefore relocation to the right place before
// the code can be used.

View file

@ -1161,6 +1161,18 @@ void Assembler::AddImmediate(Register reg,
}
}
void Assembler::AddImmediate(Register dest, Register src, int32_t value) {
if (dest == src) {
AddImmediate(dest, value);
return;
}
if (value == 0) {
MoveRegister(dest, src);
return;
}
leaq(dest, Address(src, value));
}
void Assembler::AddImmediate(const Address& address, const Immediate& imm) {
const int64_t value = imm.value();
if (value == 0) {
@ -2185,6 +2197,7 @@ void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
#ifndef PRODUCT
void Assembler::MaybeTraceAllocation(intptr_t cid,
Label* trace,
Register temp_reg,
JumpDistance distance) {
ASSERT(cid > 0);
const intptr_t shared_table_offset =
@ -2193,7 +2206,9 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
target::SharedClassTable::class_heap_stats_table_offset();
const intptr_t class_offset = target::ClassTable::ClassOffsetFor(cid);
Register temp_reg = TMP;
if (temp_reg == kNoRegister) {
temp_reg = TMP;
}
LoadIsolateGroup(temp_reg);
movq(temp_reg, Address(temp_reg, shared_table_offset));
movq(temp_reg, Address(temp_reg, table_offset));
@ -2219,7 +2234,7 @@ void Assembler::TryAllocateObject(intptr_t cid,
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, distance));
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp_reg, distance));
movq(instance_reg, Address(THR, target::Thread::top_offset()));
addq(instance_reg, Immediate(instance_size));
// instance_reg: potential next object start.
@ -2251,7 +2266,7 @@ void Assembler::TryAllocateArray(intptr_t cid,
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, distance));
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp, distance));
movq(instance, Address(THR, target::Thread::top_offset()));
movq(end_address, instance);
@ -2279,6 +2294,17 @@ void Assembler::TryAllocateArray(intptr_t cid,
}
}
void Assembler::CopyMemoryWords(Register src,
Register dst,
Register size,
Register temp) {
RELEASE_ASSERT(src == RSI);
RELEASE_ASSERT(dst == RDI);
RELEASE_ASSERT(size == RCX);
shrq(size, Immediate(target::kWordSizeLog2));
rep_movsq();
}
void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
buffer_.Emit<uint8_t>(0xe8);

View file

@ -407,7 +407,7 @@ class Assembler : public AssemblerBase {
SIMPLE(lock, 0xF0)
SIMPLE(rep_movsb, 0xF3, 0xA4)
SIMPLE(rep_movsw, 0xF3, 0x66, 0xA5)
SIMPLE(rep_movsl, 0xF3, 0xA5)
SIMPLE(rep_movsd, 0xF3, 0xA5)
SIMPLE(rep_movsq, 0xF3, 0x48, 0xA5)
#undef SIMPLE
// XmmRegister operations with another register or an address.
@ -583,8 +583,17 @@ class Assembler : public AssemblerBase {
OperandSize width = kEightBytes);
void AndImmediate(Register dst, const Immediate& imm);
void AndImmediate(Register dst, int32_t value) {
AndImmediate(dst, Immediate(value));
}
void OrImmediate(Register dst, const Immediate& imm);
void OrImmediate(Register dst, int32_t value) {
OrImmediate(dst, Immediate(value));
}
void XorImmediate(Register dst, const Immediate& imm);
void LslImmediate(Register dst, int32_t shift) {
shlq(dst, Immediate(shift));
}
void shldq(Register dst, Register src, Register shifter) {
ASSERT(shifter == RCX);
@ -715,6 +724,14 @@ class Assembler : public AssemblerBase {
cmpq(src, Immediate(0));
j(ZERO, label, distance);
}
void BranchIfBit(Register rn,
intptr_t bit_number,
Condition condition,
Label* label,
JumpDistance distance = kFarJump) {
testq(rn, Immediate(1 << bit_number));
j(condition, label, distance);
}
void ExtendValue(Register dst, Register src, OperandSize sz) override;
void PushRegister(Register r);
@ -740,11 +757,18 @@ class Assembler : public AssemblerBase {
OperandSize width = kEightBytes) {
AddImmediate(reg, Immediate(value), width);
}
void AddRegisters(Register dest, Register src) {
addq(dest, src);
}
void AddImmediate(Register dest, Register src, int32_t value);
void AddImmediate(const Address& address, const Immediate& imm);
void SubImmediate(Register reg,
const Immediate& imm,
OperandSize width = kEightBytes);
void SubImmediate(const Address& address, const Immediate& imm);
void SubRegisters(Register dest, Register src) {
subq(dest, src);
}
void Drop(intptr_t stack_elements, Register tmp = TMP);
@ -965,6 +989,10 @@ class Assembler : public AssemblerBase {
void Jump(Label* label, JumpDistance distance = kFarJump) {
jmp(label, distance);
}
// Unconditional jump to a given address in register.
void Jump(Register target) {
jmp(target);
}
// Unconditional jump to a given address in memory.
void Jump(const Address& address) { jmp(address); }
@ -1028,6 +1056,9 @@ class Assembler : public AssemblerBase {
OperandSize sz = kEightBytes) {
StoreToOffset(src, FieldAddress(base, offset), sz);
}
void StoreZero(const Address& address, Register temp = kNoRegister) {
movq(address, Immediate(0));
}
void LoadFromStack(Register dst, intptr_t depth);
void StoreToStack(Register src, intptr_t depth);
void CompareToStack(Register src, intptr_t depth);
@ -1097,7 +1128,7 @@ class Assembler : public AssemblerBase {
#endif
}
void CompareWithFieldValue(Register value, FieldAddress address) {
void CompareWithMemoryValue(Register value, Address address) {
cmpq(value, address);
}
void CompareWithCompressedFieldFromOffset(Register value,
@ -1178,7 +1209,10 @@ class Assembler : public AssemblerBase {
// If allocation tracing for |cid| is enabled, will jump to |trace| label,
// which will allocate in the runtime where tracing occurs.
void MaybeTraceAllocation(intptr_t cid, Label* trace, JumpDistance distance);
void MaybeTraceAllocation(intptr_t cid,
Label* trace,
Register temp_reg = kNoRegister,
JumpDistance distance = JumpDistance::kFarJump);
void TryAllocateObject(intptr_t cid,
intptr_t instance_size,
@ -1195,6 +1229,16 @@ class Assembler : public AssemblerBase {
Register end_address,
Register temp);
// Copy [size] bytes from [src] address to [dst] address.
// [size] should be a multiple of word size.
// Clobbers [src], [dst], [size] and [temp] registers.
// X64 requires fixed registers for memory copying:
// [src] = RSI, [dst] = RDI, [size] = RCX.
void CopyMemoryWords(Register src,
Register dst,
Register size,
Register temp = kNoRegister);
// This emits an PC-relative call of the form "callq *[rip+<offset>]". The
// offset is not yet known and needs therefore relocation to the right place
// before the code can be used.

View file

@ -5693,7 +5693,7 @@ ASSEMBLER_TEST_GENERATE(TestRepMovsDwords, assembler) {
__ movq(RSI, Address(RSP, 2 * target::kWordSize)); // from.
__ movq(RDI, Address(RSP, 1 * target::kWordSize)); // to.
__ movq(RCX, Address(RSP, 0 * target::kWordSize)); // count.
__ rep_movsl();
__ rep_movsd();
// Remove saved arguments.
__ popq(RAX);
__ popq(RAX);

View file

@ -259,6 +259,10 @@ class CompileType : public ZoneAllocated {
// can be uninstantiated.
bool CanBeSmi();
// Returns true if a value of this CompileType can contain a Future
// instance.
bool CanBeFuture();
bool Specialize(GrowableArray<intptr_t>* class_ids);
void PrintTo(BaseTextBuffer* f) const;

View file

@ -1460,6 +1460,10 @@ void ConstantPropagator::VisitBitCast(BitCastInstr* instr) {
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitCall1ArgStub(Call1ArgStubInstr* instr) {
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitLoadThread(LoadThreadInstr* instr) {
SetValue(instr, non_constant_);
}

View file

@ -39,7 +39,7 @@ FlowGraph::FlowGraph(const ParsedFunction& parsed_function,
current_ssa_temp_index_(0),
max_block_id_(max_block_id),
parsed_function_(parsed_function),
num_direct_parameters_(parsed_function.function().HasOptionalParameters()
num_direct_parameters_(parsed_function.function().MakesCopyOfParameters()
? 0
: parsed_function.function().NumParameters()),
direct_parameters_size_(0),
@ -54,7 +54,7 @@ FlowGraph::FlowGraph(const ParsedFunction& parsed_function,
prologue_info_(prologue_info),
loop_hierarchy_(nullptr),
loop_invariant_loads_(nullptr),
captured_parameters_(new (zone()) BitVector(zone(), variable_count())),
captured_parameters_(new(zone()) BitVector(zone(), variable_count())),
inlining_id_(-1),
should_print_(false) {
should_print_ = FlowGraphPrinter::ShouldPrint(parsed_function.function(),
@ -1076,7 +1076,7 @@ void FlowGraph::InsertPhis(const GrowableArray<BlockEntryInstr*>& preorder,
GrowableArray<BlockEntryInstr*> worklist;
for (intptr_t var_index = 0; var_index < variable_count(); ++var_index) {
const bool always_live =
!FLAG_prune_dead_locals || (var_index == CurrentContextEnvIndex());
!FLAG_prune_dead_locals || IsImmortalVariable(var_index);
// Add to the worklist each block containing an assignment.
for (intptr_t block_index = 0; block_index < block_count; ++block_index) {
if (assigned_vars[block_index]->Contains(var_index)) {
@ -1378,7 +1378,7 @@ void FlowGraph::RenameRecursive(
// TODO(fschneider): Make sure that live_in always contains the
// CurrentContext variable to avoid the special case here.
if (FLAG_prune_dead_locals && !live_in->Contains(i) &&
(i != CurrentContextEnvIndex())) {
!IsImmortalVariable(i)) {
(*env)[i] = constant_dead();
}
}
@ -1619,10 +1619,6 @@ void FlowGraph::ValidatePhis() {
return;
}
// Current_context_var is never pruned, it is artificially kept alive, so
// it should not be checked here.
const intptr_t current_context_var_index = CurrentContextEnvIndex();
for (intptr_t i = 0, n = preorder().length(); i < n; ++i) {
BlockEntryInstr* block_entry = preorder()[i];
Instruction* last_instruction = block_entry->last_instruction();
@ -1634,7 +1630,7 @@ void FlowGraph::ValidatePhis() {
if (successor->phis() != NULL) {
for (intptr_t j = 0; j < successor->phis()->length(); ++j) {
PhiInstr* phi = (*successor->phis())[j];
if (phi == nullptr && j != current_context_var_index) {
if (phi == nullptr && !IsImmortalVariable(j)) {
// We have no phi node for the this variable.
// Double check we do not have a different value in our env.
// If we do, we would have needed a phi-node in the successsor.

View file

@ -165,6 +165,12 @@ class FlowGraph : public ZoneAllocated {
bool IsIrregexpFunction() const { return function().IsIrregexpFunction(); }
LocalVariable* SuspendStateVar() const {
return parsed_function().suspend_state_var();
}
intptr_t SuspendStateEnvIndex() const { return EnvIndex(SuspendStateVar()); }
LocalVariable* CurrentContextVar() const {
return parsed_function().current_context_var();
}
@ -186,6 +192,14 @@ class FlowGraph : public ZoneAllocated {
return num_direct_parameters_ - variable->index().value();
}
// Context and :suspend_state variables are never pruned and
// artificially kept alive.
bool IsImmortalVariable(intptr_t env_index) const {
return (env_index == CurrentContextEnvIndex()) ||
(SuspendStateVar() != nullptr &&
env_index == SuspendStateEnvIndex());
}
static bool NeedsPairLocation(Representation representation) {
return representation == kUnboxedInt64 &&
compiler::target::kIntSpillFactor == 2;

View file

@ -208,7 +208,8 @@ void FlowGraphCompiler::InitCompiler() {
new (zone()) CompressedStackMapsBuilder(zone());
pc_descriptors_list_ = new (zone()) DescriptorList(
zone(), &code_source_map_builder_->inline_id_to_function());
exception_handlers_list_ = new (zone()) ExceptionHandlerList();
exception_handlers_list_ =
new (zone()) ExceptionHandlerList(parsed_function().function());
#if defined(DART_PRECOMPILER)
catch_entry_moves_maps_builder_ = new (zone()) CatchEntryMovesMapBuilder();
#endif
@ -1220,7 +1221,7 @@ ArrayPtr FlowGraphCompiler::CreateDeoptInfo(compiler::Assembler* assembler) {
// to spill slots. The deoptimization environment does not track them.
const Function& function = parsed_function().function();
const intptr_t incoming_arg_count =
function.HasOptionalParameters() ? 0 : function.num_fixed_parameters();
function.MakesCopyOfParameters() ? 0 : function.num_fixed_parameters();
DeoptInfoBuilder builder(zone(), incoming_arg_count, assembler);
intptr_t deopt_info_table_size = DeoptTable::SizeFor(deopt_infos_.length());

View file

@ -814,6 +814,7 @@ class FlowGraphCompiler : public ValueObject {
void RecordCatchEntryMoves(Environment* env);
void EmitCallToStub(const Code& stub);
void EmitJumpToStub(const Code& stub);
void EmitTailCallToStub(const Code& stub);
// Emits the following metadata for the current PC:

View file

@ -373,6 +373,15 @@ void FlowGraphCompiler::EmitPrologue() {
Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : R0;
__ StoreToOffset(value_reg, FP, slot_index * compiler::target::kWordSize);
}
} else if (parsed_function().suspend_state_var() != nullptr) {
// Initialize synthetic :suspend_state variable early
// as it may be accessed by GC and exception handling before
// InitAsync stub is called.
const intptr_t slot_index =
compiler::target::frame_layout.FrameSlotForVariable(
parsed_function().suspend_state_var());
__ LoadObject(R0, Object::null_object());
__ StoreToOffset(R0, FP, slot_index * compiler::target::kWordSize);
}
EndCodeSourceRange(PrologueSource());
@ -389,10 +398,25 @@ void FlowGraphCompiler::EmitCallToStub(const Code& stub) {
}
}
void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
ASSERT(!stub.IsNull());
if (CanPcRelativeCall(stub)) {
__ GenerateUnRelocatedPcRelativeTailCall();
AddPcRelativeTailCallStubTarget(stub);
} else {
__ LoadObject(CODE_REG, stub);
__ ldr(PC, compiler::FieldAddress(
CODE_REG, compiler::target::Code::entry_point_offset()));
AddStubCallTarget(stub);
}
}
void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
ASSERT(!stub.IsNull());
if (CanPcRelativeCall(stub)) {
__ LeaveDartFrame();
if (flow_graph().graph_entry()->NeedsFrame()) {
__ LeaveDartFrame();
}
__ GenerateUnRelocatedPcRelativeTailCall();
AddPcRelativeTailCallStubTarget(stub);
#if defined(DEBUG)
@ -400,7 +424,9 @@ void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
#endif
} else {
__ LoadObject(CODE_REG, stub);
__ LeaveDartFrame();
if (flow_graph().graph_entry()->NeedsFrame()) {
__ LeaveDartFrame();
}
__ ldr(PC, compiler::FieldAddress(
CODE_REG, compiler::target::Code::entry_point_offset()));
AddStubCallTarget(stub);
@ -645,7 +671,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(!function.IsClosureFunction());
if (function.HasOptionalParameters() || function.IsGeneric()) {
if (function.PrologueNeedsArgumentsDescriptor()) {
__ LoadObject(R4, arguments_descriptor);
} else {
if (!FLAG_precompiled_mode) {

View file

@ -363,6 +363,14 @@ void FlowGraphCompiler::EmitPrologue() {
slot_index == args_desc_slot ? ARGS_DESC_REG : NULL_REG;
__ StoreToOffset(value_reg, FP, slot_index * kWordSize);
}
} else if (parsed_function().suspend_state_var() != nullptr) {
// Initialize synthetic :suspend_state variable early
// as it may be accessed by GC and exception handling before
// InitAsync stub is called.
const intptr_t slot_index =
compiler::target::frame_layout.FrameSlotForVariable(
parsed_function().suspend_state_var());
__ StoreToOffset(NULL_REG, FP, slot_index * kWordSize);
}
EndCodeSourceRange(PrologueSource());
@ -379,10 +387,26 @@ void FlowGraphCompiler::EmitCallToStub(const Code& stub) {
}
}
void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
ASSERT(!stub.IsNull());
if (CanPcRelativeCall(stub)) {
__ GenerateUnRelocatedPcRelativeTailCall();
AddPcRelativeTailCallStubTarget(stub);
} else {
__ LoadObject(CODE_REG, stub);
__ ldr(TMP, compiler::FieldAddress(
CODE_REG, compiler::target::Code::entry_point_offset()));
__ br(TMP);
AddStubCallTarget(stub);
}
}
void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
ASSERT(!stub.IsNull());
if (CanPcRelativeCall(stub)) {
__ LeaveDartFrame();
if (flow_graph().graph_entry()->NeedsFrame()) {
__ LeaveDartFrame();
}
__ GenerateUnRelocatedPcRelativeTailCall();
AddPcRelativeTailCallStubTarget(stub);
#if defined(DEBUG)
@ -390,7 +414,9 @@ void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
#endif
} else {
__ LoadObject(CODE_REG, stub);
__ LeaveDartFrame();
if (flow_graph().graph_entry()->NeedsFrame()) {
__ LeaveDartFrame();
}
__ ldr(TMP, compiler::FieldAddress(
CODE_REG, compiler::target::Code::entry_point_offset()));
__ br(TMP);
@ -646,7 +672,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(!function.IsClosureFunction());
if (function.HasOptionalParameters() || function.IsGeneric()) {
if (function.PrologueNeedsArgumentsDescriptor()) {
__ LoadObject(R4, arguments_descriptor);
} else {
if (!FLAG_precompiled_mode) {

View file

@ -450,6 +450,15 @@ void FlowGraphCompiler::EmitPrologue() {
Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : EAX;
__ movl(compiler::Address(EBP, slot_index * kWordSize), value_reg);
}
} else if (parsed_function().suspend_state_var() != nullptr) {
// Initialize synthetic :suspend_state variable early
// as it may be accessed by GC and exception handling before
// InitAsync stub is called.
const intptr_t slot_index =
compiler::target::frame_layout.FrameSlotForVariable(
parsed_function().suspend_state_var());
__ LoadObject(EAX, Object::null_object());
__ movl(compiler::Address(EBP, slot_index * kWordSize), EAX);
}
EndCodeSourceRange(PrologueSource());
@ -464,6 +473,14 @@ void FlowGraphCompiler::EmitCallToStub(const Code& stub) {
AddStubCallTarget(stub);
}
void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
ASSERT(!stub.IsNull());
__ LoadObject(CODE_REG, stub);
__ jmp(compiler::FieldAddress(CODE_REG,
compiler::target::Code::entry_point_offset()));
AddStubCallTarget(stub);
}
void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
const InstructionSource& source,
const Code& stub,
@ -625,7 +642,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
if (function.HasOptionalParameters() || function.IsGeneric()) {
if (function.PrologueNeedsArgumentsDescriptor()) {
__ LoadObject(EDX, arguments_descriptor);
} else {
__ xorl(EDX, EDX); // GC safe smi zero because of stub.

View file

@ -357,6 +357,14 @@ void FlowGraphCompiler::EmitPrologue() {
__ StoreToOffset(value_reg, SP,
(slot_index + fp_to_sp_delta) * kWordSize);
}
} else if (parsed_function().suspend_state_var() != nullptr) {
// Initialize synthetic :suspend_state variable early
// as it may be accessed by GC and exception handling before
// InitAsync stub is called.
const intptr_t slot_index =
compiler::target::frame_layout.FrameSlotForVariable(
parsed_function().suspend_state_var());
__ StoreToOffset(NULL_REG, FP, slot_index * kWordSize);
}
EndCodeSourceRange(PrologueSource());
@ -373,10 +381,26 @@ void FlowGraphCompiler::EmitCallToStub(const Code& stub) {
}
}
void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
ASSERT(!stub.IsNull());
if (CanPcRelativeCall(stub)) {
__ GenerateUnRelocatedPcRelativeTailCall();
AddPcRelativeTailCallStubTarget(stub);
} else {
__ LoadObject(CODE_REG, stub);
__ lx(TMP, compiler::FieldAddress(
CODE_REG, compiler::target::Code::entry_point_offset()));
__ jr(TMP);
AddStubCallTarget(stub);
}
}
void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
ASSERT(!stub.IsNull());
if (CanPcRelativeCall(stub)) {
__ LeaveDartFrame();
if (flow_graph().graph_entry()->NeedsFrame()) {
__ LeaveDartFrame();
}
__ GenerateUnRelocatedPcRelativeTailCall();
AddPcRelativeTailCallStubTarget(stub);
#if defined(DEBUG)
@ -384,7 +408,9 @@ void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
#endif
} else {
__ LoadObject(CODE_REG, stub);
__ LeaveDartFrame();
if (flow_graph().graph_entry()->NeedsFrame()) {
__ LeaveDartFrame();
}
__ lx(TMP, compiler::FieldAddress(
CODE_REG, compiler::target::Code::entry_point_offset()));
__ jr(TMP);
@ -616,7 +642,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(!function.IsClosureFunction());
if (function.HasOptionalParameters() || function.IsGeneric()) {
if (function.PrologueNeedsArgumentsDescriptor()) {
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
} else {
if (!FLAG_precompiled_mode) {

View file

@ -369,6 +369,15 @@ void FlowGraphCompiler::EmitPrologue() {
Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : RAX;
__ movq(compiler::Address(RBP, slot_index * kWordSize), value_reg);
}
} else if (parsed_function().suspend_state_var() != nullptr) {
// Initialize synthetic :suspend_state variable early
// as it may be accessed by GC and exception handling before
// InitAsync stub is called.
const intptr_t slot_index =
compiler::target::frame_layout.FrameSlotForVariable(
parsed_function().suspend_state_var());
__ LoadObject(RAX, Object::null_object());
__ movq(compiler::Address(RBP, slot_index * kWordSize), RAX);
}
EndCodeSourceRange(PrologueSource());
@ -385,10 +394,25 @@ void FlowGraphCompiler::EmitCallToStub(const Code& stub) {
}
}
void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
ASSERT(!stub.IsNull());
if (CanPcRelativeCall(stub)) {
__ GenerateUnRelocatedPcRelativeTailCall();
AddPcRelativeTailCallStubTarget(stub);
} else {
__ LoadObject(CODE_REG, stub);
__ jmp(compiler::FieldAddress(
CODE_REG, compiler::target::Code::entry_point_offset()));
AddStubCallTarget(stub);
}
}
void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
ASSERT(!stub.IsNull());
if (CanPcRelativeCall(stub)) {
__ LeaveDartFrame();
if (flow_graph().graph_entry()->NeedsFrame()) {
__ LeaveDartFrame();
}
__ GenerateUnRelocatedPcRelativeTailCall();
AddPcRelativeTailCallStubTarget(stub);
#if defined(DEBUG)
@ -396,7 +420,9 @@ void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
#endif
} else {
__ LoadObject(CODE_REG, stub);
__ LeaveDartFrame();
if (flow_graph().graph_entry()->NeedsFrame()) {
__ LeaveDartFrame();
}
__ jmp(compiler::FieldAddress(
CODE_REG, compiler::target::Code::entry_point_offset()));
AddStubCallTarget(stub);
@ -629,7 +655,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
ASSERT(!function.IsClosureFunction());
if (function.HasOptionalParameters() || function.IsGeneric()) {
if (function.PrologueNeedsArgumentsDescriptor()) {
__ LoadObject(R10, arguments_descriptor);
} else {
if (!FLAG_precompiled_mode) {

View file

@ -6900,6 +6900,19 @@ void RawStoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler->assembler()->StoreMemoryValue(value_reg, base_reg, offset_);
}
const Code& ReturnInstr::GetReturnStub(FlowGraphCompiler* compiler) const {
ASSERT(compiler->parsed_function().function().IsCompactAsyncFunction());
if (!value()->Type()->CanBeFuture()) {
return Code::ZoneHandle(compiler->zone(),
compiler->isolate_group()
->object_store()
->return_async_not_future_stub());
}
return Code::ZoneHandle(
compiler->zone(),
compiler->isolate_group()->object_store()->return_async_stub());
}
void NativeReturnInstr::EmitReturnMoves(FlowGraphCompiler* compiler) {
const auto& dst1 = marshaller_.Location(compiler::ffi::kResultIndex);
if (dst1.payload_type().IsVoid()) {
@ -7209,6 +7222,55 @@ bool SimdOpInstr::HasMask() const {
return simd_op_information[kind()].has_mask;
}
LocationSummary* Call1ArgStubInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
switch (stub_id_) {
case StubId::kInitAsync:
locs->set_in(0, Location::RegisterLocation(
InitSuspendableFunctionStubABI::kTypeArgsReg));
break;
case StubId::kAwaitAsync:
locs->set_in(0, Location::RegisterLocation(SuspendStubABI::kArgumentReg));
break;
}
locs->set_out(0, Location::RegisterLocation(CallingConventions::kReturnReg));
return locs;
}
void Call1ArgStubInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ObjectStore* object_store = compiler->isolate_group()->object_store();
Code& stub = Code::ZoneHandle(compiler->zone());
switch (stub_id_) {
case StubId::kInitAsync:
stub = object_store->init_async_stub();
break;
case StubId::kAwaitAsync:
stub = object_store->await_async_stub();
break;
}
compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
locs(), deopt_id(), env());
#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
if (stub_id_ == StubId::kAwaitAsync) {
// On x86 (X64 and IA32) mismatch between calls and returns
// significantly regresses performance. So suspend stub
// does not return directly to the caller. Instead, a small
// epilogue is generated right after the call to suspend stub,
// and resume stub adjusts resume PC to skip this epilogue.
const intptr_t start = compiler->assembler()->CodeSize();
__ LeaveFrame();
__ ret();
RELEASE_ASSERT(compiler->assembler()->CodeSize() - start ==
SuspendStubABI::kResumePcDistance);
}
#endif
}
#undef __
} // namespace dart

View file

@ -528,6 +528,7 @@ struct InstrAttrs {
M(BoxSmallInt, kNoGC) \
M(IntConverter, kNoGC) \
M(BitCast, kNoGC) \
M(Call1ArgStub, _) \
M(LoadThread, kNoGC) \
M(Deoptimize, kNoGC) \
M(SimdOp, kNoGC)
@ -3099,6 +3100,8 @@ class ReturnInstr : public TemplateInstruction<1, NoThrow> {
const intptr_t yield_index_;
const Representation representation_;
const Code& GetReturnStub(FlowGraphCompiler* compiler) const;
DISALLOW_COPY_AND_ASSIGN(ReturnInstr);
};
@ -9560,6 +9563,42 @@ class SimdOpInstr : public Definition {
DISALLOW_COPY_AND_ASSIGN(SimdOpInstr);
};
// Generic instruction to call 1-argument stubs specified using [StubId].
class Call1ArgStubInstr : public TemplateDefinition<1, Throws> {
public:
enum class StubId {
kInitAsync,
kAwaitAsync,
};
Call1ArgStubInstr(const InstructionSource& source,
StubId stub_id,
Value* operand,
intptr_t deopt_id)
: TemplateDefinition(source, deopt_id),
stub_id_(stub_id),
token_pos_(source.token_pos) {
SetInputAt(0, operand);
}
Value* operand() const { return inputs_[0]; }
StubId stub_id() const { return stub_id_; }
virtual TokenPosition token_pos() const { return token_pos_; }
virtual bool CanCallDart() const { return true; }
virtual bool ComputeCanDeoptimize() const { return true; }
virtual bool HasUnknownSideEffects() const { return true; }
DECLARE_INSTRUCTION(Call1ArgStub);
PRINT_OPERANDS_TO_SUPPORT
private:
const StubId stub_id_;
const TokenPosition token_pos_;
DISALLOW_COPY_AND_ASSIGN(Call1ArgStubInstr);
};
#undef DECLARE_INSTRUCTION
class Environment : public ZoneAllocated {

View file

@ -484,6 +484,13 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(result == CallingConventions::kReturnFpuReg);
}
if (compiler->parsed_function().function().IsCompactAsyncFunction()) {
ASSERT(compiler->flow_graph().graph_entry()->NeedsFrame());
const Code& stub = GetReturnStub(compiler);
compiler->EmitJumpToStub(stub);
return;
}
if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
__ Ret();
return;

View file

@ -411,6 +411,13 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(result == CallingConventions::kReturnFpuReg);
}
if (compiler->parsed_function().function().IsCompactAsyncFunction()) {
ASSERT(compiler->flow_graph().graph_entry()->NeedsFrame());
const Code& stub = GetReturnStub(compiler);
compiler->EmitJumpToStub(stub);
return;
}
if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
__ ret();
return;

View file

@ -116,7 +116,7 @@ void MemoryCopyInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case 4:
case 8:
case 16:
__ rep_movsl();
__ rep_movsd();
break;
}
@ -233,6 +233,13 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register result = locs()->in(0).reg();
ASSERT(result == EAX);
if (compiler->parsed_function().function().IsCompactAsyncFunction()) {
ASSERT(compiler->flow_graph().graph_entry()->NeedsFrame());
const Code& stub = GetReturnStub(compiler);
compiler->EmitJumpToStub(stub);
return;
}
if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
__ ret();
return;
@ -256,7 +263,7 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (yield_index() != UntaggedPcDescriptors::kInvalidYieldIndex) {
compiler->EmitYieldPositionMetadata(source(), yield_index());
}
__ LeaveFrame();
__ LeaveDartFrame();
__ ret();
}
@ -272,8 +279,7 @@ void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
return_in_st0 = true;
}
// Leave Dart frame.
__ LeaveFrame();
__ LeaveDartFrame();
// EDI is the only sane choice for a temporary register here because:
//
@ -1139,7 +1145,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::Address(SPREG, marshaller_.RequiredStackSpaceInBytes()));
} else {
// Leave dummy exit frame.
__ LeaveFrame();
__ LeaveDartFrame();
// Instead of returning to the "fake" return address, we just pop it.
__ popl(temp);

View file

@ -1356,6 +1356,21 @@ void TailCallInstr::PrintOperandsTo(BaseTextBuffer* f) const {
f->AddString(")");
}
void Call1ArgStubInstr::PrintOperandsTo(BaseTextBuffer* f) const {
const char* name = "";
switch (stub_id_) {
case StubId::kInitAsync:
name = "InitAsync";
break;
case StubId::kAwaitAsync:
name = "AwaitAsync";
break;
}
f->Printf("%s(", name);
operand()->PrintTo(f);
f->AddString(")");
}
void PushArgumentInstr::PrintOperandsTo(BaseTextBuffer* f) const {
value()->PrintTo(f);
}

View file

@ -464,6 +464,13 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(result == CallingConventions::kReturnFpuReg);
}
if (compiler->parsed_function().function().IsCompactAsyncFunction()) {
ASSERT(compiler->flow_graph().graph_entry()->NeedsFrame());
const Code& stub = GetReturnStub(compiler);
compiler->EmitJumpToStub(stub);
return;
}
if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
__ ret();
return;

View file

@ -180,7 +180,7 @@ void MemoryCopyInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ rep_movsw();
break;
case 4:
__ rep_movsl();
__ rep_movsd();
break;
case 8:
case 16:
@ -338,6 +338,13 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(result == CallingConventions::kReturnFpuReg);
}
if (compiler->parsed_function().function().IsCompactAsyncFunction()) {
ASSERT(compiler->flow_graph().graph_entry()->NeedsFrame());
const Code& stub = GetReturnStub(compiler);
compiler->EmitJumpToStub(stub);
return;
}
if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
__ ret();
return;

View file

@ -2082,6 +2082,41 @@ void FlowGraphAllocator::Spill(LiveRange* range) {
ConvertAllUses(range);
}
void FlowGraphAllocator::AllocateSpillSlotForSuspendState() {
if (flow_graph_.parsed_function().suspend_state_var() == nullptr) {
return;
}
spill_slots_.Add(kMaxPosition);
quad_spill_slots_.Add(false);
untagged_spill_slots_.Add(false);
#if defined(DEBUG)
const intptr_t stack_index =
-compiler::target::frame_layout.VariableIndexForFrameSlot(
compiler::target::frame_layout.FrameSlotForVariable(
flow_graph_.parsed_function().suspend_state_var()));
ASSERT(stack_index == spill_slots_.length() - 1);
#endif
}
void FlowGraphAllocator::UpdateStackmapsForSuspendState() {
if (flow_graph_.parsed_function().suspend_state_var() == nullptr) {
return;
}
const intptr_t stack_index =
-compiler::target::frame_layout.VariableIndexForFrameSlot(
compiler::target::frame_layout.FrameSlotForVariable(
flow_graph_.parsed_function().suspend_state_var()));
ASSERT(stack_index >= 0);
for (intptr_t i = 0, n = safepoints_.length(); i < n; ++i) {
Instruction* safepoint_instr = safepoints_[i];
safepoint_instr->locs()->SetStackBit(stack_index);
}
}
intptr_t FlowGraphAllocator::FirstIntersectionWithAllocated(
intptr_t reg,
LiveRange* unallocated) {
@ -3102,11 +3137,11 @@ void FlowGraphAllocator::RemoveFrameIfNotNeeded() {
return;
}
// Optional parameter handling needs special changes to become frameless.
// Copying of parameters needs special changes to become frameless.
// Specifically we need to rebase IL instructions which directly access frame
// ({Load,Store}IndexedUnsafeInstr) to use SP rather than FP.
// For now just always give such functions a frame.
if (flow_graph_.parsed_function().function().HasOptionalParameters()) {
if (flow_graph_.parsed_function().function().MakesCopyOfParameters()) {
return;
}
@ -3211,8 +3246,15 @@ void FlowGraphAllocator::AllocateRegisters() {
NumberInstructions();
// Reserve spill slot for :suspend_state synthetic variable before
// reserving spill slots for parameter variables.
AllocateSpillSlotForSuspendState();
BuildLiveRanges();
// Update stackmaps after all safepoints are collected.
UpdateStackmapsForSuspendState();
if (FLAG_print_ssa_liveranges && CompilerState::ShouldTrace()) {
const Function& function = flow_graph_.function();
THR_Print("-- [before ssa allocator] ranges [%s] ---------\n",

View file

@ -242,6 +242,13 @@ class FlowGraphAllocator : public ValueObject {
// Find a spill slot that can be used by the given live range.
void AllocateSpillSlotFor(LiveRange* range);
// Allocate spill slot for synthetic :suspend_state variable.
void AllocateSpillSlotForSuspendState();
// Mark synthetic :suspend_state variable as object in stackmaps
// at all safepoints.
void UpdateStackmapsForSuspendState();
// Allocate the given live range to a spill slot.
void Spill(LiveRange* range);

View file

@ -2814,6 +2814,9 @@ void LoadFieldInstr::InferRange(RangeAnalysis* analysis, Range* range) {
case Slot::Kind::kFunctionType_parameter_types:
case Slot::Kind::kFunctionType_type_parameters:
case Slot::Kind::kInstance_native_fields_array:
case Slot::Kind::kSuspendState_future:
case Slot::Kind::kSuspendState_then_callback:
case Slot::Kind::kSuspendState_error_callback:
case Slot::Kind::kTypedDataView_typed_data:
case Slot::Kind::kType_arguments:
case Slot::Kind::kTypeArgumentsIndex:

View file

@ -243,6 +243,9 @@ bool Slot::IsImmutableLengthSlot() const {
case Slot::Kind::kFunctionType_named_parameter_names:
case Slot::Kind::kFunctionType_parameter_types:
case Slot::Kind::kFunctionType_type_parameters:
case Slot::Kind::kSuspendState_future:
case Slot::Kind::kSuspendState_then_callback:
case Slot::Kind::kSuspendState_error_callback:
case Slot::Kind::kType_arguments:
case Slot::Kind::kTypeArgumentsIndex:
case Slot::Kind::kTypeParameters_names:

View file

@ -73,6 +73,9 @@ class ParsedFunction;
V(ImmutableLinkedHashBase, UntaggedLinkedHashBase, index, \
TypedDataUint32Array, VAR) \
V(Instance, UntaggedInstance, native_fields_array, Dynamic, VAR) \
V(SuspendState, UntaggedSuspendState, future, Dynamic, VAR) \
V(SuspendState, UntaggedSuspendState, then_callback, Closure, VAR) \
V(SuspendState, UntaggedSuspendState, error_callback, Closure, VAR) \
V(Type, UntaggedType, arguments, TypeArguments, FINAL) \
V(TypeParameters, UntaggedTypeParameters, flags, Array, FINAL) \
V(TypeParameters, UntaggedTypeParameters, bounds, TypeArguments, FINAL) \

View file

@ -938,6 +938,43 @@ bool CompileType::CanBeSmi() {
return CanPotentiallyBeSmi(*ToAbstractType(), /*recurse=*/true);
}
bool CompileType::CanBeFuture() {
IsolateGroup* isolate_group = IsolateGroup::Current();
ObjectStore* object_store = isolate_group->object_store();
if (cid_ != kIllegalCid && cid_ != kDynamicCid) {
if ((cid_ == kNullCid) || (cid_ == kNeverCid)) {
return false;
}
const Class& cls = Class::Handle(isolate_group->class_table()->At(cid_));
return Class::IsSubtypeOf(
cls, TypeArguments::null_type_arguments(), Nullability::kNonNullable,
Type::Handle(object_store->non_nullable_future_rare_type()),
Heap::kNew);
}
AbstractType& type = AbstractType::Handle(ToAbstractType()->ptr());
if (type.IsTypeParameter()) {
type = TypeParameter::Cast(type).bound();
}
if (type.IsTypeParameter()) {
// Type parameter bounds can be cyclic, do not bother handling them here.
return true;
}
const intptr_t type_class_id = type.type_class_id();
if (type_class_id == kDynamicCid || type_class_id == kVoidCid ||
type_class_id == kInstanceCid || type_class_id == kFutureOrCid) {
return true;
}
if ((type_class_id == kNullCid) || (type_class_id == kNeverCid)) {
return false;
}
Type& future_type =
Type::Handle(object_store->non_nullable_future_rare_type());
future_type = future_type.ToNullability(Nullability::kNullable, Heap::kNew);
return type.IsSubtypeOf(future_type, Heap::kNew);
}
void CompileType::PrintTo(BaseTextBuffer* f) const {
const char* type_name = "?";
if (IsNone()) {

View file

@ -284,7 +284,7 @@ Fragment BaseFlowGraphBuilder::MemoryCopy(classid_t src_cid,
Fragment BaseFlowGraphBuilder::TailCall(const Code& code) {
Value* arg_desc = Pop();
return Fragment(new (Z) TailCallInstr(code, arg_desc));
return Fragment(new (Z) TailCallInstr(code, arg_desc)).closed();
}
void BaseFlowGraphBuilder::InlineBailout(const char* reason) {

View file

@ -647,9 +647,9 @@ Fragment StreamingFlowGraphBuilder::SetupCapturedParameters(
LocalVariable* variable = pf.ParameterVariable(i);
if (variable->is_captured()) {
LocalVariable& raw_parameter = *pf.RawParameterVariable(i);
ASSERT((function.HasOptionalParameters() &&
ASSERT((function.MakesCopyOfParameters() &&
raw_parameter.owner() == scope) ||
(!function.HasOptionalParameters() &&
(!function.MakesCopyOfParameters() &&
raw_parameter.owner() == nullptr));
ASSERT(!raw_parameter.is_captured());
@ -666,6 +666,28 @@ Fragment StreamingFlowGraphBuilder::SetupCapturedParameters(
return body;
}
Fragment StreamingFlowGraphBuilder::InitSuspendableFunction(
const Function& dart_function) {
Fragment body;
if (dart_function.IsCompactAsyncFunction()) {
const auto& result_type =
AbstractType::Handle(Z, dart_function.result_type());
auto& type_args = TypeArguments::ZoneHandle(Z);
if (result_type.IsType() &&
(Class::Handle(Z, result_type.type_class()).IsFutureClass() ||
result_type.IsFutureOrType())) {
ASSERT(result_type.IsFinalized());
type_args = result_type.arguments();
}
body += TranslateInstantiatedTypeArguments(type_args);
body += B->Call1ArgStub(TokenPosition::kNoSource,
Call1ArgStubInstr::StubId::kInitAsync);
body += Drop();
}
return body;
}
Fragment StreamingFlowGraphBuilder::ShortcutForUserDefinedEquals(
const Function& dart_function,
LocalVariable* first_parameter) {
@ -885,6 +907,7 @@ FlowGraph* StreamingFlowGraphBuilder::BuildGraphOfFunction(
// objects than necessary during GC.
const Fragment body =
ClearRawParameters(dart_function) + B->BuildNullAssertions() +
InitSuspendableFunction(dart_function) +
BuildFunctionBody(dart_function, first_parameter, is_constructor);
auto extra_entry_point_style = ChooseEntryPointStyle(
@ -1220,6 +1243,8 @@ Fragment StreamingFlowGraphBuilder::BuildExpression(TokenPosition* position) {
return BuildLibraryPrefixAction(position, Symbols::LoadLibrary());
case kCheckLibraryIsLoaded:
return BuildLibraryPrefixAction(position, Symbols::CheckLoaded());
case kAwaitExpression:
return BuildAwaitExpression(position);
case kConstStaticInvocation:
case kConstConstructorInvocation:
case kConstListLiteral:
@ -1442,6 +1467,10 @@ Value* StreamingFlowGraphBuilder::stack() {
return flow_graph_builder_->stack_;
}
void StreamingFlowGraphBuilder::set_stack(Value* top) {
flow_graph_builder_->stack_ = top;
}
void StreamingFlowGraphBuilder::Push(Definition* definition) {
flow_graph_builder_->Push(definition);
}
@ -4291,6 +4320,20 @@ Fragment StreamingFlowGraphBuilder::BuildLibraryPrefixAction(
return instructions;
}
Fragment StreamingFlowGraphBuilder::BuildAwaitExpression(
TokenPosition* position) {
ASSERT(parsed_function()->function().IsCompactAsyncFunction());
Fragment instructions;
const TokenPosition pos = ReadPosition(); // read file offset.
if (position != nullptr) *position = pos;
instructions += BuildExpression(); // read operand.
instructions += B->Call1ArgStub(pos, Call1ArgStubInstr::StubId::kAwaitAsync);
return instructions;
}
Fragment StreamingFlowGraphBuilder::BuildExpressionStatement(
TokenPosition* position) {
Fragment instructions = BuildExpression(position); // read expression.
@ -4468,7 +4511,6 @@ Fragment StreamingFlowGraphBuilder::BuildBreakStatement(
Fragment StreamingFlowGraphBuilder::BuildWhileStatement(
TokenPosition* position) {
ASSERT(block_expression_depth() == 0); // no while in block-expr
loop_depth_inc();
const TokenPosition pos = ReadPosition(); // read position.
if (position != nullptr) *position = pos;
@ -4485,8 +4527,7 @@ Fragment StreamingFlowGraphBuilder::BuildWhileStatement(
body_entry += Goto(join);
Fragment loop(join);
ASSERT(B->GetStackDepth() == 0);
loop += CheckStackOverflow(pos);
loop += CheckStackOverflow(pos); // may have non-empty stack
loop.current->LinkTo(condition.entry);
entry = Goto(join).entry;
@ -4499,7 +4540,6 @@ Fragment StreamingFlowGraphBuilder::BuildWhileStatement(
}
Fragment StreamingFlowGraphBuilder::BuildDoStatement(TokenPosition* position) {
ASSERT(block_expression_depth() == 0); // no do-while in block-expr
loop_depth_inc();
const TokenPosition pos = ReadPosition(); // read position.
if (position != nullptr) *position = pos;
@ -4516,8 +4556,7 @@ Fragment StreamingFlowGraphBuilder::BuildDoStatement(TokenPosition* position) {
JoinEntryInstr* join = BuildJoinEntry();
Fragment loop(join);
ASSERT(B->GetStackDepth() == 0);
loop += CheckStackOverflow(pos);
loop += CheckStackOverflow(pos); // may have non-empty stack
loop += body;
loop <<= condition.entry;
@ -5089,7 +5128,6 @@ Fragment StreamingFlowGraphBuilder::BuildTryCatch(TokenPosition* position) {
}
Fragment StreamingFlowGraphBuilder::BuildTryFinally(TokenPosition* position) {
ASSERT(block_expression_depth() == 0); // no try-finally in block-expr
// Note on streaming:
// We only stream this TryFinally if we can stream everything inside it,
// so creating a "TryFinallyBlock" with a kernel binary offset instead of an
@ -5152,6 +5190,7 @@ Fragment StreamingFlowGraphBuilder::BuildTryFinally(TokenPosition* position) {
// Fill in the body of the catch.
catch_depth_inc();
const Array& handler_types = Array::ZoneHandle(Z, Array::New(1, Heap::kOld));
handler_types.SetAt(0, Object::dynamic_type());
// Note: rethrow will actually force mark the handler as needing a stacktrace.
@ -5159,6 +5198,15 @@ Fragment StreamingFlowGraphBuilder::BuildTryFinally(TokenPosition* position) {
/* needs_stacktrace = */ false,
/* is_synthesized = */ true);
SetOffset(finalizer_offset);
// Try/finally might occur in control flow collections with non-empty
// expression stack (via desugaring of 'await for'). Note that catch-block
// generated for finally always throws so there is no merge.
// Save and reset expression stack around catch body in order to maintain
// correct stack depth, as catch entry drops expression stack.
Value* const saved_stack_top = stack();
set_stack(nullptr);
finally_body += BuildStatementWithBranchCoverage(); // read finalizer
if (finally_body.is_open()) {
finally_body += LoadLocal(CurrentException());
@ -5167,6 +5215,9 @@ Fragment StreamingFlowGraphBuilder::BuildTryFinally(TokenPosition* position) {
RethrowException(TokenPosition::kNoSource, try_handler_index);
Drop();
}
ASSERT(stack() == nullptr);
set_stack(saved_stack_top);
catch_depth_dec();
return Fragment(try_body.entry, after_try);
@ -5416,36 +5467,53 @@ Fragment StreamingFlowGraphBuilder::BuildFunctionNode(
lib.AddMetadata(function, func_decl_offset);
}
function.set_is_debuggable(function_node_helper.dart_async_marker_ ==
FunctionNodeHelper::kSync);
switch (function_node_helper.dart_async_marker_) {
case FunctionNodeHelper::kSyncStar:
function.set_modifier(UntaggedFunction::kSyncGen);
break;
case FunctionNodeHelper::kAsync:
function.set_modifier(UntaggedFunction::kAsync);
break;
case FunctionNodeHelper::kAsyncStar:
function.set_modifier(UntaggedFunction::kAsyncGen);
break;
default:
// no special modifier
break;
}
function.set_is_generated_body(function_node_helper.async_marker_ ==
FunctionNodeHelper::kSyncYielding);
// sync* functions contain two nested synthetic functions, the first of
// which (sync_op_gen) is a regular sync function so we need to manually
// label it generated:
if (function.parent_function() != Function::null()) {
const auto& parent = Function::Handle(function.parent_function());
if (parent.IsSyncGenerator()) {
function.set_is_generated_body(true);
if (function_node_helper.async_marker_ == FunctionNodeHelper::kAsync) {
if (!FLAG_precompiled_mode) {
FATAL("Compact async functions are only supported in AOT mode.");
}
}
// Note: Is..() methods use the modifiers set above, so order matters.
if (function.IsAsyncClosure() || function.IsAsyncGenClosure()) {
function.set_is_inlinable(!FLAG_lazy_async_stacks);
function.set_modifier(UntaggedFunction::kAsync);
function.set_is_debuggable(true);
function.set_is_inlinable(false);
function.set_is_visible(true);
ASSERT(function.IsCompactAsyncFunction());
} else {
ASSERT((function_node_helper.async_marker_ ==
FunctionNodeHelper::kSync) ||
(function_node_helper.async_marker_ ==
FunctionNodeHelper::kSyncYielding));
function.set_is_debuggable(function_node_helper.dart_async_marker_ ==
FunctionNodeHelper::kSync);
switch (function_node_helper.dart_async_marker_) {
case FunctionNodeHelper::kSyncStar:
function.set_modifier(UntaggedFunction::kSyncGen);
break;
case FunctionNodeHelper::kAsync:
function.set_modifier(UntaggedFunction::kAsync);
break;
case FunctionNodeHelper::kAsyncStar:
function.set_modifier(UntaggedFunction::kAsyncGen);
break;
default:
// no special modifier
break;
}
function.set_is_generated_body(function_node_helper.async_marker_ ==
FunctionNodeHelper::kSyncYielding);
// sync* functions contain two nested synthetic functions,
// the first of which (sync_op_gen) is a regular sync function so we
// need to manually label it generated:
if (function.parent_function() != Function::null()) {
const auto& parent = Function::Handle(function.parent_function());
if (parent.IsSyncGenerator()) {
function.set_is_generated_body(true);
}
}
// Note: Is..() methods use the modifiers set above, so order
// matters.
if (function.IsAsyncClosure() || function.IsAsyncGenClosure()) {
function.set_is_inlinable(!FLAG_lazy_async_stacks);
}
ASSERT(!function.IsCompactAsyncFunction());
}
// If the start token position is synthetic, the end token position

View file

@ -93,6 +93,7 @@ class StreamingFlowGraphBuilder : public KernelReaderHelper {
Fragment SetAsyncStackTrace(const Function& dart_function);
Fragment CheckStackOverflowInPrologue(const Function& dart_function);
Fragment SetupCapturedParameters(const Function& dart_function);
Fragment InitSuspendableFunction(const Function& dart_function);
Fragment ShortcutForUserDefinedEquals(const Function& dart_function,
LocalVariable* first_parameter);
Fragment TypeArgumentsHandling(const Function& dart_function);
@ -131,6 +132,7 @@ class StreamingFlowGraphBuilder : public KernelReaderHelper {
BreakableBlock* breakable_block();
GrowableArray<YieldContinuation>& yield_continuations();
Value* stack();
void set_stack(Value* top);
void Push(Definition* definition);
Value* Pop();
Class& GetSuperOrDie();
@ -335,6 +337,7 @@ class StreamingFlowGraphBuilder : public KernelReaderHelper {
Fragment BuildPartialTearoffInstantiation(TokenPosition* position);
Fragment BuildLibraryPrefixAction(TokenPosition* position,
const String& selector);
Fragment BuildAwaitExpression(TokenPosition* position);
Fragment BuildExpressionStatement(TokenPosition* position);
Fragment BuildBlock(TokenPosition* position);

View file

@ -617,6 +617,10 @@ void KernelFingerprintHelper::CalculateExpressionFingerprint() {
case kCheckLibraryIsLoaded:
ReadUInt(); // skip library index
return;
case kAwaitExpression:
ReadPosition(); // read position.
CalculateExpressionFingerprint(); // read operand.
return;
case kConstStaticInvocation:
case kConstConstructorInvocation:
case kConstListLiteral:

View file

@ -309,8 +309,7 @@ Fragment FlowGraphBuilder::TryCatch(int try_handler_index) {
// => We therefore create a block for the body (fresh try index) and another
// join block (with current try index).
Fragment body;
JoinEntryInstr* entry = new (Z)
JoinEntryInstr(AllocateBlockId(), try_handler_index, GetNextDeoptId());
JoinEntryInstr* entry = BuildJoinEntry(try_handler_index);
body += LoadLocal(parsed_function_->current_context_var());
body += StoreLocal(TokenPosition::kNoSource, CurrentCatchContext());
body += Drop();
@ -835,6 +834,9 @@ Fragment FlowGraphBuilder::NativeFunctionBody(const Function& function,
V(LinkedHashBase_getIndex, LinkedHashBase_index) \
V(LinkedHashBase_getUsedData, LinkedHashBase_used_data) \
V(ObjectArrayLength, Array_length) \
V(SuspendState_getFuture, SuspendState_future) \
V(SuspendState_getThenCallback, SuspendState_then_callback) \
V(SuspendState_getErrorCallback, SuspendState_error_callback) \
V(TypedDataViewOffsetInBytes, TypedDataView_offset_in_bytes) \
V(TypedDataViewTypedData, TypedDataView_typed_data) \
V(TypedListBaseLength, TypedDataBase_length) \
@ -850,6 +852,9 @@ Fragment FlowGraphBuilder::NativeFunctionBody(const Function& function,
V(NativeFinalizer_setCallback, NativeFinalizer_callback) \
V(LinkedHashBase_setData, LinkedHashBase_data) \
V(LinkedHashBase_setIndex, LinkedHashBase_index) \
V(SuspendState_setFuture, SuspendState_future) \
V(SuspendState_setThenCallback, SuspendState_then_callback) \
V(SuspendState_setErrorCallback, SuspendState_error_callback) \
V(WeakProperty_setKey, WeakProperty_key) \
V(WeakProperty_setValue, WeakProperty_value) \
V(WeakReference_setTarget, WeakReference_target)
@ -864,6 +869,7 @@ bool FlowGraphBuilder::IsRecognizedMethodForFlowGraph(
const MethodRecognizer::Kind kind = function.recognized_kind();
switch (kind) {
case MethodRecognizer::kSuspendState_resume:
case MethodRecognizer::kTypedData_ByteDataView_factory:
case MethodRecognizer::kTypedData_Int8ArrayView_factory:
case MethodRecognizer::kTypedData_Uint8ArrayView_factory:
@ -1012,6 +1018,13 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
const MethodRecognizer::Kind kind = function.recognized_kind();
switch (kind) {
case MethodRecognizer::kSuspendState_resume: {
const Code& resume_stub =
Code::ZoneHandle(Z, IG->object_store()->resume_stub());
body += NullConstant();
body += TailCall(resume_stub);
break;
}
case MethodRecognizer::kTypedData_ByteDataView_factory:
body += BuildTypedDataViewFactoryConstructor(function, kByteDataViewCid);
break;
@ -1704,7 +1717,10 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
}
}
body += Return(TokenPosition::kNoSource, /* omit_result_type_check = */ true);
if (body.is_open()) {
body +=
Return(TokenPosition::kNoSource, /* omit_result_type_check = */ true);
}
return new (Z) FlowGraph(*parsed_function_, graph_entry_, last_used_block_id_,
prologue_info);
@ -4184,6 +4200,14 @@ Fragment FlowGraphBuilder::BitCast(Representation from, Representation to) {
return Fragment(instr);
}
Fragment FlowGraphBuilder::Call1ArgStub(TokenPosition position,
Call1ArgStubInstr::StubId stub_id) {
Call1ArgStubInstr* instr = new (Z) Call1ArgStubInstr(
InstructionSource(position), stub_id, Pop(), GetNextDeoptId());
Push(instr);
return Fragment(instr);
}
Fragment FlowGraphBuilder::WrapTypedDataBaseInCompound(
const AbstractType& compound_type) {
const auto& compound_sub_class =

View file

@ -416,6 +416,10 @@ class FlowGraphBuilder : public BaseFlowGraphBuilder {
// Currently only works with equal sizes and floating point <-> integer.
Fragment BitCast(Representation from, Representation to);
// Generates Call1ArgStub instruction.
Fragment Call1ArgStub(TokenPosition position,
Call1ArgStubInstr::StubId stub_id);
LocalVariable* LookupVariable(intptr_t kernel_offset);
// Build type argument type checks for the current function.

View file

@ -2620,6 +2620,10 @@ void KernelReaderHelper::SkipExpression() {
case kCheckLibraryIsLoaded:
ReadUInt(); // skip library index
return;
case kAwaitExpression:
ReadPosition(); // read position.
SkipExpression(); // read operand.
return;
case kConstStaticInvocation:
case kConstConstructorInvocation:
case kConstListLiteral:

View file

@ -35,12 +35,12 @@ static CompileType ParameterType(LocalVariable* param,
bool PrologueBuilder::PrologueSkippableOnUncheckedEntry(
const Function& function) {
return !function.HasOptionalParameters() &&
return !function.MakesCopyOfParameters() &&
!function.IsNonImplicitClosureFunction() && !function.IsGeneric();
}
bool PrologueBuilder::HasEmptyPrologue(const Function& function) {
return !function.HasOptionalParameters() && !function.IsGeneric() &&
return !function.MakesCopyOfParameters() && !function.IsGeneric() &&
!function.IsClosureFunction();
}
@ -51,14 +51,13 @@ BlockEntryInstr* PrologueBuilder::BuildPrologue(BlockEntryInstr* entry,
const intptr_t previous_block_id = last_used_block_id_;
const bool load_optional_arguments = function_.HasOptionalParameters();
const bool copy_parameters = function_.MakesCopyOfParameters();
const bool expect_type_args = function_.IsGeneric();
Fragment prologue = Fragment(entry);
if (load_optional_arguments) {
Fragment f =
BuildOptionalParameterHandling(parsed_function_->expression_temp_var());
if (copy_parameters) {
Fragment f = BuildParameterHandling();
if (link) prologue += f;
}
if (function_.IsClosureFunction()) {
@ -94,8 +93,7 @@ BlockEntryInstr* PrologueBuilder::BuildPrologue(BlockEntryInstr* entry,
}
}
Fragment PrologueBuilder::BuildOptionalParameterHandling(
LocalVariable* temp_var) {
Fragment PrologueBuilder::BuildParameterHandling() {
Fragment copy_args_prologue;
const int num_fixed_params = function_.num_fixed_parameters();
const int num_opt_pos_params = function_.NumOptionalPositionalParameters();
@ -111,18 +109,22 @@ Fragment PrologueBuilder::BuildOptionalParameterHandling(
// where num_pos_args is the number of positional arguments passed in.
const int min_num_pos_args = num_fixed_params;
copy_args_prologue += LoadArgDescriptor();
copy_args_prologue +=
LoadNativeField(Slot::ArgumentsDescriptor_positional_count());
LocalVariable* count_var = nullptr;
LocalVariable* optional_count_var = nullptr;
if ((num_opt_pos_params > 0) || (num_opt_named_params > 0)) {
copy_args_prologue += LoadArgDescriptor();
copy_args_prologue +=
LoadNativeField(Slot::ArgumentsDescriptor_positional_count());
copy_args_prologue += LoadArgDescriptor();
copy_args_prologue += LoadNativeField(Slot::ArgumentsDescriptor_count());
LocalVariable* count_var = MakeTemporary();
copy_args_prologue += LoadArgDescriptor();
copy_args_prologue += LoadNativeField(Slot::ArgumentsDescriptor_count());
count_var = MakeTemporary();
copy_args_prologue += LoadLocal(count_var);
copy_args_prologue += IntConstant(min_num_pos_args);
copy_args_prologue += SmiBinaryOp(Token::kSUB, /* truncate= */ true);
LocalVariable* optional_count_var = MakeTemporary();
copy_args_prologue += LoadLocal(count_var);
copy_args_prologue += IntConstant(min_num_pos_args);
copy_args_prologue += SmiBinaryOp(Token::kSUB, /* truncate= */ true);
optional_count_var = MakeTemporary();
}
// Copy mandatory parameters down.
intptr_t param = 0;
@ -157,7 +159,11 @@ Fragment PrologueBuilder::BuildOptionalParameterHandling(
? FlowGraph::ParameterRepresentationAt(function_, param_index)
: kTagged);
copy_args_prologue += LoadLocal(optional_count_var);
if ((num_opt_pos_params > 0) || (num_opt_named_params > 0)) {
copy_args_prologue += LoadLocal(optional_count_var);
} else {
copy_args_prologue += IntConstant(0);
}
copy_args_prologue += LoadFpRelativeSlot(
compiler::target::kWordSize *
(compiler::target::frame_layout.param_end_from_fp +
@ -208,9 +214,8 @@ Fragment PrologueBuilder::BuildOptionalParameterHandling(
}
copy_args_prologue += Goto(next_missing /* join good/not_good flows */);
copy_args_prologue.current = next_missing;
} else {
ASSERT(num_opt_named_params > 0);
} else if (num_opt_named_params > 0) {
const bool check_required_params =
IsolateGroup::Current()->use_strict_null_safety_checks();
const intptr_t first_name_offset =
@ -222,8 +227,9 @@ Fragment PrologueBuilder::BuildOptionalParameterHandling(
SortOptionalNamedParametersInto(opt_param_position, num_fixed_params,
num_params);
ASSERT(temp_var != nullptr);
LocalVariable* optional_count_vars_processed = temp_var;
LocalVariable* optional_count_vars_processed =
parsed_function_->expression_temp_var();
ASSERT(optional_count_vars_processed != nullptr);
copy_args_prologue += IntConstant(0);
copy_args_prologue +=
StoreLocalRaw(TokenPosition::kNoSource, optional_count_vars_processed);
@ -332,9 +338,11 @@ Fragment PrologueBuilder::BuildOptionalParameterHandling(
}
}
copy_args_prologue += Drop(); // optional_count_var
copy_args_prologue += Drop(); // count_var
copy_args_prologue += Drop(); // positional_count_var
if ((num_opt_pos_params > 0) || (num_opt_named_params > 0)) {
copy_args_prologue += Drop(); // optional_count_var
copy_args_prologue += Drop(); // count_var
copy_args_prologue += Drop(); // positional_count_var
}
return copy_args_prologue;
}

View file

@ -49,7 +49,7 @@ class PrologueBuilder : public BaseFlowGraphBuilder {
BlockEntryInstr* BuildPrologue(BlockEntryInstr* entry,
PrologueInfo* prologue_info);
Fragment BuildOptionalParameterHandling(LocalVariable* temp_var);
Fragment BuildParameterHandling();
static bool HasEmptyPrologue(const Function& function);
static bool PrologueSkippableOnUncheckedEntry(const Function& function);

View file

@ -89,6 +89,16 @@ ScopeBuildingResult* ScopeBuilder::BuildScopes() {
scope_->set_begin_token_pos(function.token_pos());
scope_->set_end_token_pos(function.end_token_pos());
if (function.IsCompactAsyncFunction()) {
LocalVariable* suspend_state_var =
MakeVariable(TokenPosition::kNoSource, TokenPosition::kNoSource,
Symbols::SuspendStateVar(), AbstractType::dynamic_type());
suspend_state_var->set_is_forced_stack();
suspend_state_var->set_invisible(true);
scope_->AddVariable(suspend_state_var);
parsed_function_->set_suspend_state_var(suspend_state_var);
}
// Add function type arguments variable before current context variable.
if (function.IsGeneric() || function.HasGenericParent()) {
LocalVariable* type_args_var = MakeVariable(
@ -438,6 +448,12 @@ ScopeBuildingResult* ScopeBuilder::BuildScopes() {
parsed_function_->AllocateVariables();
// :suspend_state variable should be allocated to a fixed location in
// the stack frame.
RELEASE_ASSERT((parsed_function_->suspend_state_var() == nullptr) ||
(parsed_function_->suspend_state_var()->index().value() ==
SuspendState::kSuspendStateVarIndex));
return result_;
}
@ -977,6 +993,10 @@ void ScopeBuilder::VisitExpression() {
case kCheckLibraryIsLoaded:
helper_.ReadUInt(); // library index
break;
case kAwaitExpression:
helper_.ReadPosition(); // read position.
VisitExpression(); // read operand.
return;
case kConstStaticInvocation:
case kConstConstructorInvocation:
case kConstListLiteral:

View file

@ -83,6 +83,19 @@ namespace dart {
V(::, copyRangeFromUint8ListToOneByteString, \
CopyRangeFromUint8ListToOneByteString, 0x19a1bf41) \
V(_StringBase, _interpolate, StringBaseInterpolate, 0x7da2a580) \
V(_SuspendState, get:_future, SuspendState_getFuture, 0x0e2a7e73) \
V(_SuspendState, set:_future, SuspendState_setFuture, 0x179923b0) \
V(_SuspendState, get:_thenCallback, SuspendState_getThenCallback, \
0xff1dccec) \
V(_SuspendState, set:_thenCallback, SuspendState_setThenCallback, \
0x6446bde9) \
V(_SuspendState, get:_errorCallback, SuspendState_getErrorCallback, \
0x8a6eb3cf) \
V(_SuspendState, set:_errorCallback, SuspendState_setErrorCallback, \
0x4935f88c) \
V(_SuspendState, _createAsyncCallbacks, SuspendState_createAsyncCallbacks, \
0x4add6c13) \
V(_SuspendState, _resume, SuspendState_resume, 0x93d8c5e8) \
V(_IntegerImplementation, toDouble, IntegerToDouble, 0x97728b46) \
V(_Double, _add, DoubleAdd, 0xea666327) \
V(_Double, _sub, DoubleSub, 0x28474c2e) \

View file

@ -213,9 +213,10 @@ const Class& ClosureClass() {
return Class::Handle(object_store->closure_class());
}
const Array& OneArgArgumentsDescriptor() {
const Array& ArgumentsDescriptorBoxed(intptr_t type_args_len,
intptr_t num_arguments) {
return Array::ZoneHandle(
ArgumentsDescriptor::NewBoxed(/*type_args_len=*/0, /*num_arguments=*/1));
ArgumentsDescriptor::NewBoxed(type_args_len, num_arguments));
}
bool IsOriginalObject(const Object& object) {

View file

@ -120,7 +120,8 @@ const Class& Float32x4Class();
const Class& Float64x2Class();
const Class& Int32x4Class();
const Class& ClosureClass();
const Array& OneArgArgumentsDescriptor();
const Array& ArgumentsDescriptorBoxed(intptr_t type_args_len,
intptr_t num_arguments);
template <typename To, typename From>
const To& CastHandle(const From& from) {
@ -987,6 +988,21 @@ class StackTrace : public AllStatic {
FINAL_CLASS();
};
class SuspendState : public AllStatic {
public:
static word frame_size_offset();
static word pc_offset();
static word future_offset();
static word then_callback_offset();
static word error_callback_offset();
static word payload_offset();
static word HeaderSize();
static word InstanceSize();
static word InstanceSize(word payload_size);
FINAL_CLASS();
};
class Integer : public AllStatic {
public:
static word InstanceSize();
@ -1222,6 +1238,12 @@ class Thread : public AllStatic {
static word random_offset();
static word suspend_state_init_async_entry_point_offset();
static word suspend_state_await_async_entry_point_offset();
static word suspend_state_return_async_entry_point_offset();
static word suspend_state_return_async_not_future_entry_point_offset();
static word suspend_state_handle_exception_entry_point_offset();
static word OffsetFromThread(const dart::Object& object);
static intptr_t OffsetFromThread(const dart::RuntimeEntry* runtime_entry);
};

File diff suppressed because it is too large Load diff

View file

@ -190,6 +190,12 @@
FIELD(String, hash_offset) \
FIELD(String, length_offset) \
FIELD(SubtypeTestCache, cache_offset) \
FIELD(SuspendState, error_callback_offset) \
FIELD(SuspendState, frame_size_offset) \
FIELD(SuspendState, future_offset) \
FIELD(SuspendState, payload_offset) \
FIELD(SuspendState, pc_offset) \
FIELD(SuspendState, then_callback_offset) \
FIELD(Thread, AllocateArray_entry_point_offset) \
FIELD(Thread, active_exception_offset) \
FIELD(Thread, active_stacktrace_offset) \
@ -275,6 +281,11 @@
\
FIELD(Thread, stack_overflow_shared_without_fpu_regs_stub_offset) \
FIELD(Thread, store_buffer_block_offset) \
FIELD(Thread, suspend_state_await_async_entry_point_offset) \
FIELD(Thread, suspend_state_init_async_entry_point_offset) \
FIELD(Thread, suspend_state_return_async_entry_point_offset) \
FIELD(Thread, suspend_state_return_async_not_future_entry_point_offset) \
FIELD(Thread, suspend_state_handle_exception_entry_point_offset) \
FIELD(Thread, top_exit_frame_info_offset) \
FIELD(Thread, top_offset) \
FIELD(Thread, top_resource_offset) \
@ -416,6 +427,7 @@
SIZEOF(Sentinel, InstanceSize, UntaggedSentinel) \
SIZEOF(SingleTargetCache, InstanceSize, UntaggedSingleTargetCache) \
SIZEOF(StackTrace, InstanceSize, UntaggedStackTrace) \
SIZEOF(SuspendState, HeaderSize, UntaggedSuspendState) \
SIZEOF(String, InstanceSize, UntaggedString) \
SIZEOF(SubtypeTestCache, InstanceSize, UntaggedSubtypeTestCache) \
SIZEOF(LoadingUnit, InstanceSize, UntaggedLoadingUnit) \
@ -439,6 +451,7 @@
PAYLOAD_SIZEOF(CompressedStackMaps, InstanceSize, HeaderSize) \
PAYLOAD_SIZEOF(InstructionsSection, InstanceSize, HeaderSize) \
PAYLOAD_SIZEOF(PcDescriptors, InstanceSize, HeaderSize) \
PAYLOAD_SIZEOF(SuspendState, InstanceSize, HeaderSize) \
PAYLOAD_SIZEOF(TypedData, InstanceSize, HeaderSize)
#define JIT_OFFSETS_LIST(FIELD, ARRAY, SIZEOF, ARRAY_SIZEOF, PAYLOAD_SIZEOF, \

View file

@ -15,6 +15,7 @@
#include "vm/compiler/api/type_check_mode.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/stack_frame.h"
#define __ assembler->
@ -1273,6 +1274,434 @@ void StubCodeCompiler::GenerateDoubleToIntegerStub(Assembler* assembler) {
__ Ret();
}
static intptr_t SuspendStateFpOffset() {
return compiler::target::frame_layout.FrameSlotForVariableIndex(
SuspendState::kSuspendStateVarIndex) *
compiler::target::kWordSize;
}
void StubCodeCompiler::GenerateSuspendStub(
Assembler* assembler,
intptr_t suspend_entry_point_offset) {
const Register kArgument = SuspendStubABI::kArgumentReg;
const Register kTemp = SuspendStubABI::kTempReg;
const Register kFrameSize = SuspendStubABI::kFrameSizeReg;
const Register kSuspendState = SuspendStubABI::kSuspendStateReg;
const Register kFuture = SuspendStubABI::kFutureReg;
const Register kSrcFrame = SuspendStubABI::kSrcFrameReg;
const Register kDstFrame = SuspendStubABI::kDstFrameReg;
Label alloc_slow_case, alloc_done, init_done, old_gen_object, call_await;
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
SPILLS_LR_TO_FRAME({}); // Simulate entering the caller (Dart) frame.
#endif
__ LoadFromOffset(kSuspendState, Address(FPREG, SuspendStateFpOffset()));
__ AddImmediate(
kFrameSize, FPREG,
-target::frame_layout.last_param_from_entry_sp * target::kWordSize);
__ SubRegisters(kFrameSize, SPREG);
__ EnterStubFrame();
__ CompareClassId(kSuspendState, kSuspendStateCid, kTemp);
__ BranchIf(EQUAL, &init_done);
__ MoveRegister(kFuture, kSuspendState);
__ Comment("Allocate SuspendState");
// Check for allocation tracing.
NOT_IN_PRODUCT(
__ MaybeTraceAllocation(kSuspendStateCid, &alloc_slow_case, kTemp));
// Compute the rounded instance size.
const intptr_t fixed_size_plus_alignment_padding =
(target::SuspendState::HeaderSize() +
target::ObjectAlignment::kObjectAlignment - 1);
__ AddImmediate(kTemp, kFrameSize, fixed_size_plus_alignment_padding);
__ AndImmediate(kTemp, -target::ObjectAlignment::kObjectAlignment);
// Now allocate the object.
__ LoadFromOffset(kSuspendState, Address(THR, target::Thread::top_offset()));
__ AddRegisters(kTemp, kSuspendState);
// Check if the allocation fits into the remaining space.
__ CompareWithMemoryValue(kTemp, Address(THR, target::Thread::end_offset()));
__ BranchIf(UNSIGNED_GREATER_EQUAL, &alloc_slow_case);
// Successfully allocated the object, now update top to point to
// next object start and initialize the object.
__ StoreToOffset(kTemp, Address(THR, target::Thread::top_offset()));
__ SubRegisters(kTemp, kSuspendState);
__ AddImmediate(kSuspendState, kHeapObjectTag);
// Calculate the size tag.
{
Label size_tag_overflow, done;
__ CompareImmediate(kTemp, target::UntaggedObject::kSizeTagMaxSizeTag);
__ BranchIf(UNSIGNED_GREATER, &size_tag_overflow, Assembler::kNearJump);
__ LslImmediate(kTemp, target::UntaggedObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2);
__ Jump(&done, Assembler::kNearJump);
__ Bind(&size_tag_overflow);
// Set overflow size tag value.
__ LoadImmediate(kTemp, 0);
__ Bind(&done);
uword tags = target::MakeTagWordForNewSpaceObject(kSuspendStateCid, 0);
__ OrImmediate(kTemp, tags);
__ StoreToOffset(
kTemp,
FieldAddress(kSuspendState, target::Object::tags_offset())); // Tags.
}
__ StoreToOffset(
kFrameSize,
FieldAddress(kSuspendState, target::SuspendState::frame_size_offset()));
__ StoreCompressedIntoObjectNoBarrier(
kSuspendState,
FieldAddress(kSuspendState, target::SuspendState::future_offset()),
kFuture);
{
#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) || \
defined(TARGET_ARCH_RISCV64)
const Register kNullReg = NULL_REG;
#else
const Register kNullReg = kTemp;
__ LoadObject(kNullReg, NullObject());
#endif
__ StoreCompressedIntoObjectNoBarrier(
kSuspendState,
FieldAddress(kSuspendState,
target::SuspendState::then_callback_offset()),
kNullReg);
__ StoreCompressedIntoObjectNoBarrier(
kSuspendState,
FieldAddress(kSuspendState,
target::SuspendState::error_callback_offset()),
kNullReg);
}
__ Bind(&alloc_done);
__ Comment("Save SuspendState to frame");
__ LoadFromOffset(kTemp, Address(FPREG, kSavedCallerFpSlotFromFp *
compiler::target::kWordSize));
__ StoreToOffset(kSuspendState, Address(kTemp, SuspendStateFpOffset()));
__ Bind(&init_done);
__ Comment("Copy frame to SuspendState");
__ LoadFromOffset(
kTemp, Address(FPREG, kSavedCallerPcSlotFromFp * target::kWordSize));
__ StoreToOffset(
kTemp, FieldAddress(kSuspendState, target::SuspendState::pc_offset()));
if (kSrcFrame == THR) {
__ PushRegister(THR);
}
__ AddImmediate(kSrcFrame, FPREG, kCallerSpSlotFromFp * target::kWordSize);
__ AddImmediate(kDstFrame, kSuspendState,
target::SuspendState::payload_offset() - kHeapObjectTag);
__ CopyMemoryWords(kSrcFrame, kDstFrame, kFrameSize, kTemp);
if (kSrcFrame == THR) {
__ PopRegister(THR);
}
#ifdef DEBUG
{
Label okay;
__ LoadFromOffset(
kTemp,
FieldAddress(kSuspendState, target::SuspendState::frame_size_offset()));
__ AddRegisters(kTemp, kSuspendState);
__ LoadFromOffset(
kTemp, FieldAddress(kTemp, target::SuspendState::payload_offset() +
SuspendStateFpOffset()));
__ CompareRegisters(kTemp, kSuspendState);
__ BranchIf(EQUAL, &okay);
__ Breakpoint();
__ Bind(&okay);
}
#endif
// Push arguments for _SuspendState._await* method.
__ PushRegister(kSuspendState);
__ PushRegister(kArgument);
// Write barrier.
__ BranchIfBit(kSuspendState, target::ObjectAlignment::kNewObjectBitPosition,
ZERO, &old_gen_object);
__ Bind(&call_await);
__ Comment("Call _SuspendState._await method");
__ Call(Address(THR, suspend_entry_point_offset));
__ LeaveStubFrame();
#if !defined(TARGET_ARCH_X64) && !defined(TARGET_ARCH_IA32)
// Drop caller frame on all architectures except x86 which needs to maintain
// call/return balance to avoid performance regressions.
__ LeaveDartFrame();
#endif
__ Ret();
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
// Slow path is executed with Dart and stub frames still on the stack.
SPILLS_LR_TO_FRAME({});
SPILLS_LR_TO_FRAME({});
#endif
__ Bind(&alloc_slow_case);
__ Comment("SuspendState Allocation slow case");
__ PushRegister(kArgument); // Save argument.
__ PushRegister(kFrameSize); // Save frame size.
__ PushObject(NullObject()); // Make space on stack for the return value.
__ SmiTag(kFrameSize);
__ PushRegister(kFrameSize); // Pass frame size to runtime entry.
__ PushRegister(kFuture); // Pass future.
__ CallRuntime(kAllocateSuspendStateRuntimeEntry, 2);
__ Drop(2); // Drop arguments
__ PopRegister(kSuspendState); // Get result.
__ PopRegister(kFrameSize); // Restore frame size.
__ PopRegister(kArgument); // Restore argument.
__ Jump(&alloc_done);
__ Bind(&old_gen_object);
__ Comment("Old gen SuspendState slow case");
{
#if defined(TARGET_ARCH_IA32)
LeafRuntimeScope rt(assembler, /*frame_size=*/2 * target::kWordSize,
/*preserve_registers=*/false);
__ movl(Address(ESP, 1 * target::kWordSize), THR);
__ movl(Address(ESP, 0 * target::kWordSize), kSuspendState);
#else
LeafRuntimeScope rt(assembler, /*frame_size=*/0,
/*preserve_registers=*/false);
__ MoveRegister(CallingConventions::ArgumentRegisters[0], kSuspendState);
__ MoveRegister(CallingConventions::ArgumentRegisters[1], THR);
#endif
rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
}
__ Jump(&call_await);
}
void StubCodeCompiler::GenerateAwaitAsyncStub(Assembler* assembler) {
GenerateSuspendStub(
assembler,
target::Thread::suspend_state_await_async_entry_point_offset());
}
void StubCodeCompiler::GenerateInitSuspendableFunctionStub(
Assembler* assembler,
intptr_t init_entry_point_offset) {
const Register kTypeArgs = InitSuspendableFunctionStubABI::kTypeArgsReg;
__ EnterStubFrame();
__ LoadObject(ARGS_DESC_REG, ArgumentsDescriptorBoxed(/*type_args_len=*/1,
/*num_arguments=*/0));
__ PushRegister(kTypeArgs);
__ Call(Address(THR, init_entry_point_offset));
__ LeaveStubFrame();
// Set :suspend_state in the caller frame.
__ StoreToOffset(CallingConventions::kReturnReg,
Address(FPREG, SuspendStateFpOffset()));
__ Ret();
}
void StubCodeCompiler::GenerateInitAsyncStub(Assembler* assembler) {
GenerateInitSuspendableFunctionStub(
assembler, target::Thread::suspend_state_init_async_entry_point_offset());
}
void StubCodeCompiler::GenerateResumeStub(Assembler* assembler) {
const Register kSuspendState = ResumeStubABI::kSuspendStateReg;
const Register kTemp = ResumeStubABI::kTempReg;
const Register kFrameSize = ResumeStubABI::kFrameSizeReg;
const Register kSrcFrame = ResumeStubABI::kSrcFrameReg;
const Register kDstFrame = ResumeStubABI::kDstFrameReg;
const Register kResumePc = ResumeStubABI::kResumePcReg;
const Register kException = ResumeStubABI::kExceptionReg;
const Register kStackTrace = ResumeStubABI::kStackTraceReg;
Label rethrow_exception;
// Top of the stack on entry:
// ... [SuspendState] [value] [exception] [stackTrace] [ReturnAddress]
__ EnterDartFrame(0);
const intptr_t param_offset =
target::frame_layout.param_end_from_fp * target::kWordSize;
__ LoadFromOffset(kSuspendState,
Address(FPREG, param_offset + 4 * target::kWordSize));
#ifdef DEBUG
{
Label okay;
__ CompareClassId(kSuspendState, kSuspendStateCid, kTemp);
__ BranchIf(EQUAL, &okay);
__ Breakpoint();
__ Bind(&okay);
}
#endif
__ LoadFromOffset(
kFrameSize,
FieldAddress(kSuspendState, target::SuspendState::frame_size_offset()));
#ifdef DEBUG
{
Label okay;
__ MoveRegister(kTemp, kFrameSize);
__ AddRegisters(kTemp, kSuspendState);
__ LoadFromOffset(
kTemp, FieldAddress(kTemp, target::SuspendState::payload_offset() +
SuspendStateFpOffset()));
__ CompareRegisters(kTemp, kSuspendState);
__ BranchIf(EQUAL, &okay);
__ Breakpoint();
__ Bind(&okay);
}
#endif
// Do not copy fixed frame between the first local and FP.
__ AddImmediate(kFrameSize, (target::frame_layout.first_local_from_fp + 1) *
target::kWordSize);
__ SubRegisters(SPREG, kFrameSize);
__ Comment("Copy frame from SuspendState");
__ AddImmediate(kSrcFrame, kSuspendState,
target::SuspendState::payload_offset() - kHeapObjectTag);
__ MoveRegister(kDstFrame, SPREG);
__ CopyMemoryWords(kSrcFrame, kDstFrame, kFrameSize, kTemp);
__ Comment("Transfer control");
__ LoadFromOffset(kResumePc, FieldAddress(kSuspendState,
target::SuspendState::pc_offset()));
__ StoreZero(FieldAddress(kSuspendState, target::SuspendState::pc_offset()),
kTemp);
__ LoadFromOffset(kException,
Address(FPREG, param_offset + 2 * target::kWordSize));
__ CompareObject(kException, NullObject());
__ BranchIf(NOT_EQUAL, &rethrow_exception);
#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
// Adjust resume PC to skip extra epilogue generated on x86
// right after the call to suspend stub in order to maintain
// call/return balance.
__ AddImmediate(kResumePc, SuspendStubABI::kResumePcDistance);
#endif
__ LoadFromOffset(CallingConventions::kReturnReg,
Address(FPREG, param_offset + 3 * target::kWordSize));
__ Jump(kResumePc);
__ Comment("Rethrow exception");
__ Bind(&rethrow_exception);
__ LoadFromOffset(kStackTrace,
Address(FPREG, param_offset + 1 * target::kWordSize));
// Adjust stack/LR/RA as if suspended Dart function called
// stub with kResumePc as a return address.
#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64)
__ PushRegister(kResumePc);
#elif defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(__ MoveRegister(LR, kResumePc));
#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
__ MoveRegister(RA, kResumePc);
#else
#error Unknown target
#endif
#if !defined(TARGET_ARCH_IA32)
__ set_constant_pool_allowed(false);
#endif
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for (unused) result.
__ PushRegister(kException);
__ PushRegister(kStackTrace);
__ CallRuntime(kReThrowRuntimeEntry, /*argument_count=*/2);
__ Breakpoint();
}
void StubCodeCompiler::GenerateReturnStub(Assembler* assembler,
intptr_t return_entry_point_offset) {
const Register kSuspendState = ReturnStubABI::kSuspendStateReg;
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
SPILLS_LR_TO_FRAME({}); // Simulate entering the caller (Dart) frame.
#endif
__ LoadFromOffset(kSuspendState, Address(FPREG, SuspendStateFpOffset()));
__ LeaveDartFrame();
__ EnterStubFrame();
__ PushRegister(kSuspendState);
__ PushRegister(CallingConventions::kReturnReg);
__ Call(Address(THR, return_entry_point_offset));
__ LeaveStubFrame();
__ Ret();
}
void StubCodeCompiler::GenerateReturnAsyncStub(Assembler* assembler) {
GenerateReturnStub(
assembler,
target::Thread::suspend_state_return_async_entry_point_offset());
}
void StubCodeCompiler::GenerateReturnAsyncNotFutureStub(Assembler* assembler) {
GenerateReturnStub(
assembler,
target::Thread::
suspend_state_return_async_not_future_entry_point_offset());
}
void StubCodeCompiler::GenerateAsyncExceptionHandlerStub(Assembler* assembler) {
const Register kSuspendState = AsyncExceptionHandlerStubABI::kSuspendStateReg;
ASSERT(kSuspendState != kExceptionObjectReg);
ASSERT(kSuspendState != kStackTraceObjectReg);
Label rethrow_exception;
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
SPILLS_LR_TO_FRAME({}); // Simulate entering the caller (Dart) frame.
#endif
__ LoadFromOffset(kSuspendState, Address(FPREG, SuspendStateFpOffset()));
// Check if suspend_state is initialized. Otherwise
// exception was thrown from the prologue code and
// should be synchronuously propagated.
__ CompareObject(kSuspendState, NullObject());
__ BranchIf(EQUAL, &rethrow_exception);
__ LeaveDartFrame();
__ EnterStubFrame();
__ PushRegister(kSuspendState);
__ PushRegister(kExceptionObjectReg);
__ PushRegister(kStackTraceObjectReg);
__ Call(Address(
THR,
target::Thread::suspend_state_handle_exception_entry_point_offset()));
__ LeaveStubFrame();
__ Ret();
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
// Rethrow case is used when Dart frame is still on the stack.
SPILLS_LR_TO_FRAME({});
#endif
__ Comment("Rethrow exception");
__ Bind(&rethrow_exception);
__ LeaveDartFrame();
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for (unused) result.
__ PushRegister(kExceptionObjectReg);
__ PushRegister(kStackTraceObjectReg);
__ CallRuntime(kReThrowRuntimeEntry, /*argument_count=*/2);
__ Breakpoint();
}
} // namespace compiler
} // namespace dart

View file

@ -198,6 +198,14 @@ class StubCodeCompiler : public AllStatic {
bool with_fpu_regs);
static void GenerateRangeError(Assembler* assembler, bool with_fpu_regs);
static void GenerateSuspendStub(Assembler* assembler,
intptr_t suspend_entry_point_offset);
static void GenerateInitSuspendableFunctionStub(
Assembler* assembler,
intptr_t init_entry_point_offset);
static void GenerateReturnStub(Assembler* assembler,
intptr_t return_entry_point_offset);
};
} // namespace compiler

View file

@ -1040,8 +1040,7 @@ void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
__ b(&slow_case, HI);
const intptr_t cid = kArrayCid;
NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(R4, &slow_case));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &slow_case, R4));
const intptr_t fixed_size_plus_alignment_padding =
target::Array::header_size() +
@ -1346,8 +1345,7 @@ static void GenerateAllocateContext(Assembler* assembler, Label* slow_case) {
ASSERT(kSmiTagShift == 1);
__ bic(R2, R2, Operand(target::ObjectAlignment::kObjectAlignment - 1));
NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R8, kContextCid));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(R8, slow_case));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, slow_case, R8));
// Now allocate the object.
// R1: number of context variables.
// R2: object size.
@ -3432,8 +3430,7 @@ void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label call_runtime;
NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R2, cid));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(R2, &call_runtime));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &call_runtime, R2));
__ mov(R2, Operand(AllocateTypedDataArrayABI::kLengthReg));
/* Check that length is a positive Smi. */
/* R2: requested array length argument. */

View file

@ -1285,7 +1285,7 @@ void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
__ b(&slow_case, HI);
const intptr_t cid = kArrayCid;
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, R4, &slow_case));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, &slow_case, R4));
// Calculate and align allocation size.
// Load new object start and calculate next object start.
@ -1630,7 +1630,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
ASSERT(kSmiTagShift == 1);
__ andi(R2, R2, Immediate(~(target::ObjectAlignment::kObjectAlignment - 1)));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, R4, slow_case));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, slow_case, R4));
// Now allocate the object.
// R1: number of context variables.
// R2: object size.
@ -3784,7 +3784,7 @@ void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label call_runtime;
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R2, &call_runtime));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &call_runtime, R2));
__ mov(R2, AllocateTypedDataArrayABI::kLengthReg);
/* Check that length is a positive Smi. */
/* R2: requested array length argument. */

View file

@ -673,7 +673,7 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
Address(EBP, saved_stacktrace_slot_from_fp * target::kWordSize));
}
__ LeaveFrame();
__ LeaveDartFrame();
__ popl(EDX); // Preserve return address.
__ movl(ESP, EBP); // Discard optimized frame.
__ subl(ESP, EAX); // Reserve space for deoptimized frame.
@ -706,7 +706,7 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
target::kWordSize));
}
// Code above cannot cause GC.
__ LeaveFrame();
__ LeaveDartFrame();
// Frame is fully rewritten at this point and it is safe to perform a GC.
// Materialize any objects that were deferred by FillFrame because they
@ -730,7 +730,7 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ popl(EDX); // Restore exception.
__ popl(EAX); // Restore stacktrace.
}
__ LeaveFrame();
__ LeaveStubFrame();
__ popl(ECX); // Pop return address.
__ addl(ESP, EBX); // Remove materialization arguments.
@ -838,9 +838,8 @@ void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
__ cmpl(AllocateArrayABI::kLengthReg, max_len);
__ j(ABOVE, &slow_case);
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid,
AllocateArrayABI::kResultReg,
&slow_case, Assembler::kFarJump));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, &slow_case,
AllocateArrayABI::kResultReg));
const intptr_t fixed_size_plus_alignment_padding =
target::Array::header_size() +
@ -1116,8 +1115,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
__ leal(EBX, Address(EDX, TIMES_4, fixed_size_plus_alignment_padding));
__ andl(EBX, Immediate(-target::ObjectAlignment::kObjectAlignment));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, EAX, slow_case,
Assembler::kFarJump));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, slow_case, EAX));
// Now allocate the object.
// EDX: number of context variables.
@ -2988,8 +2986,7 @@ void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
Label call_runtime;
__ pushl(AllocateTypedDataArrayABI::kLengthReg);
NOT_IN_PRODUCT(
__ MaybeTraceAllocation(cid, ECX, &call_runtime, Assembler::kFarJump));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &call_runtime, ECX));
__ movl(EDI, AllocateTypedDataArrayABI::kLengthReg);
/* Check that length is a positive Smi. */
/* EDI: requested array length argument. */

View file

@ -1103,7 +1103,7 @@ void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
__ BranchIf(HI, &slow_case);
const intptr_t cid = kArrayCid;
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, T4, &slow_case));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, &slow_case, T4));
// Calculate and align allocation size.
// Load new object start and calculate next object start.
@ -1431,7 +1431,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
__ AddImmediate(T2, fixed_size_plus_alignment_padding);
__ andi(T2, T2, ~(target::ObjectAlignment::kObjectAlignment - 1));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, T4, slow_case));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, slow_case, T4));
// Now allocate the object.
// T1: number of context variables.
// T2: object size.
@ -3562,7 +3562,7 @@ void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label call_runtime;
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, T3, &call_runtime));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &call_runtime, T3));
__ mv(T3, AllocateTypedDataArrayABI::kLengthReg);
/* Check that length is a positive Smi. */
/* T3: requested array length argument. */

View file

@ -1207,8 +1207,7 @@ void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
__ j(ABOVE, &slow_case);
// Check for allocation tracing.
NOT_IN_PRODUCT(
__ MaybeTraceAllocation(kArrayCid, &slow_case, Assembler::kFarJump));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, &slow_case));
const intptr_t fixed_size_plus_alignment_padding =
target::Array::header_size() +
@ -1542,8 +1541,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
__ andq(R13, Immediate(-target::ObjectAlignment::kObjectAlignment));
// Check for allocation tracing.
NOT_IN_PRODUCT(
__ MaybeTraceAllocation(kContextCid, slow_case, Assembler::kFarJump));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, slow_case));
// Now allocate the object.
// R10: number of context variables.
@ -3691,8 +3689,7 @@ void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
Label call_runtime;
__ pushq(AllocateTypedDataArrayABI::kLengthReg);
NOT_IN_PRODUCT(
__ MaybeTraceAllocation(cid, &call_runtime, Assembler::kFarJump));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &call_runtime));
__ movq(RDI, AllocateTypedDataArrayABI::kLengthReg);
/* Check that length is a positive Smi. */
/* RDI: requested array length argument. */

View file

@ -532,6 +532,47 @@ struct DoubleToIntegerStubABI {
static const Register kResultReg = R0;
};
// ABI for SuspendStub (AwaitAsyncStub).
struct SuspendStubABI {
static const Register kArgumentReg = R0;
static const Register kTempReg = R1;
static const Register kFrameSizeReg = R2;
static const Register kSuspendStateReg = R3;
static const Register kFutureReg = R4;
static const Register kSrcFrameReg = R8;
static const Register kDstFrameReg = R9;
};
// ABI for InitSuspendableFunctionStub (InitAsyncStub).
struct InitSuspendableFunctionStubABI {
static const Register kTypeArgsReg = R0;
};
// ABI for ResumeStub
struct ResumeStubABI {
static const Register kSuspendStateReg = R2;
static const Register kTempReg = R0;
// Registers for the frame copying (the 1st part).
static const Register kFrameSizeReg = R1;
static const Register kSrcFrameReg = R3;
static const Register kDstFrameReg = R4;
// Registers for control transfer.
// (the 2nd part, can reuse registers from the 1st part)
static const Register kResumePcReg = R1;
static const Register kExceptionReg = R3;
static const Register kStackTraceReg = R4;
};
// ABI for ReturnStub (ReturnAsyncStub, ReturnAsyncNotFutureStub).
struct ReturnStubABI {
static const Register kSuspendStateReg = R2;
};
// ABI for AsyncExceptionHandlerStub.
struct AsyncExceptionHandlerStubABI {
static const Register kSuspendStateReg = R2;
};
// ABI for DispatchTableNullErrorStub and consequently for all dispatch
// table calls (though normal functions will not expect or use this
// register). This ABI is added to distinguish memory corruption errors from

View file

@ -366,6 +366,47 @@ struct DoubleToIntegerStubABI {
static const Register kResultReg = R0;
};
// ABI for SuspendStub (AwaitAsyncStub).
struct SuspendStubABI {
static const Register kArgumentReg = R0;
static const Register kTempReg = R1;
static const Register kFrameSizeReg = R2;
static const Register kSuspendStateReg = R3;
static const Register kFutureReg = R4;
static const Register kSrcFrameReg = R5;
static const Register kDstFrameReg = R6;
};
// ABI for InitSuspendableFunctionStub (InitAsyncStub).
struct InitSuspendableFunctionStubABI {
static const Register kTypeArgsReg = R0;
};
// ABI for ResumeStub
struct ResumeStubABI {
static const Register kSuspendStateReg = R2;
static const Register kTempReg = R0;
// Registers for the frame copying (the 1st part).
static const Register kFrameSizeReg = R1;
static const Register kSrcFrameReg = R3;
static const Register kDstFrameReg = R4;
// Registers for control transfer.
// (the 2nd part, can reuse registers from the 1st part)
static const Register kResumePcReg = R1;
static const Register kExceptionReg = R3;
static const Register kStackTraceReg = R4;
};
// ABI for ReturnStub (ReturnAsyncStub, ReturnAsyncNotFutureStub).
struct ReturnStubABI {
static const Register kSuspendStateReg = R2;
};
// ABI for AsyncExceptionHandlerStub.
struct AsyncExceptionHandlerStubABI {
static const Register kSuspendStateReg = R2;
};
// ABI for DispatchTableNullErrorStub and consequently for all dispatch
// table calls (though normal functions will not expect or use this
// register). This ABI is added to distinguish memory corruption errors from

View file

@ -260,6 +260,54 @@ struct DoubleToIntegerStubABI {
static const Register kResultReg = EAX;
};
// ABI for SuspendStub (AwaitAsyncStub).
struct SuspendStubABI {
static const Register kArgumentReg = EAX;
static const Register kTempReg = EDX;
static const Register kFrameSizeReg = ECX;
static const Register kSuspendStateReg = EBX;
static const Register kFutureReg = EDI;
// Can reuse THR.
static const Register kSrcFrameReg = ESI;
// Can reuse kFutureReg.
static const Register kDstFrameReg = EDI;
// Number of bytes to skip after
// suspend stub return address in order to resume.
// IA32: mov esp, ebp; pop ebp; ret
static const intptr_t kResumePcDistance = 5;
};
// ABI for InitSuspendableFunctionStub (InitAsyncStub).
struct InitSuspendableFunctionStubABI {
static const Register kTypeArgsReg = EAX;
};
// ABI for ResumeStub
struct ResumeStubABI {
static const Register kSuspendStateReg = EBX;
static const Register kTempReg = EDX;
// Registers for the frame copying (the 1st part).
static const Register kFrameSizeReg = ECX;
static const Register kSrcFrameReg = ESI;
static const Register kDstFrameReg = EDI;
// Registers for control transfer.
// (the 2nd part, can reuse registers from the 1st part)
static const Register kResumePcReg = ECX;
static const Register kExceptionReg = ESI;
static const Register kStackTraceReg = EDI;
};
// ABI for ReturnStub (ReturnAsyncStub, ReturnAsyncNotFutureStub).
struct ReturnStubABI {
static const Register kSuspendStateReg = EBX;
};
// ABI for AsyncExceptionHandlerStub.
struct AsyncExceptionHandlerStubABI {
static const Register kSuspendStateReg = EBX;
};
// ABI for DispatchTableNullErrorStub and consequently for all dispatch
// table calls (though normal functions will not expect or use this
// register). This ABI is added to distinguish memory corruption errors from

View file

@ -378,6 +378,47 @@ struct DoubleToIntegerStubABI {
static constexpr Register kResultReg = A0;
};
// ABI for SuspendStub (AwaitAsyncStub).
struct SuspendStubABI {
static const Register kArgumentReg = A0;
static const Register kTempReg = T0;
static const Register kFrameSizeReg = T1;
static const Register kSuspendStateReg = T2;
static const Register kFutureReg = T3;
static const Register kSrcFrameReg = T4;
static const Register kDstFrameReg = T5;
};
// ABI for InitSuspendableFunctionStub (InitAsyncStub).
struct InitSuspendableFunctionStubABI {
static const Register kTypeArgsReg = A0;
};
// ABI for ResumeStub
struct ResumeStubABI {
static const Register kSuspendStateReg = T1;
static const Register kTempReg = T0;
// Registers for the frame copying (the 1st part).
static const Register kFrameSizeReg = T2;
static const Register kSrcFrameReg = T3;
static const Register kDstFrameReg = T4;
// Registers for control transfer.
// (the 2nd part, can reuse registers from the 1st part)
static const Register kResumePcReg = T2;
static const Register kExceptionReg = T3;
static const Register kStackTraceReg = T4;
};
// ABI for ReturnStub (ReturnAsyncStub, ReturnAsyncNotFutureStub).
struct ReturnStubABI {
static const Register kSuspendStateReg = T1;
};
// ABI for AsyncExceptionHandlerStub.
struct AsyncExceptionHandlerStubABI {
static const Register kSuspendStateReg = T1;
};
// ABI for DispatchTableNullErrorStub and consequently for all dispatch
// table calls (though normal functions will not expect or use this
// register). This ABI is added to distinguish memory corruption errors from

View file

@ -335,6 +335,52 @@ struct DoubleToIntegerStubABI {
static const Register kResultReg = RAX;
};
// ABI for SuspendStub (AwaitAsyncStub).
struct SuspendStubABI {
static const Register kArgumentReg = RAX;
static const Register kTempReg = RDX;
static const Register kFrameSizeReg = RCX;
static const Register kSuspendStateReg = RBX;
static const Register kFutureReg = R8;
static const Register kSrcFrameReg = RSI;
static const Register kDstFrameReg = RDI;
// Number of bytes to skip after
// suspend stub return address in order to resume.
// X64: mov rsp, rbp; pop rbp; ret
static const intptr_t kResumePcDistance = 5;
};
// ABI for InitSuspendableFunctionStub (InitAsyncStub).
struct InitSuspendableFunctionStubABI {
static const Register kTypeArgsReg = RAX;
};
// ABI for ResumeStub
struct ResumeStubABI {
static const Register kSuspendStateReg = RBX;
static const Register kTempReg = RDX;
// Registers for the frame copying (the 1st part).
static const Register kFrameSizeReg = RCX;
static const Register kSrcFrameReg = RSI;
static const Register kDstFrameReg = RDI;
// Registers for control transfer.
// (the 2nd part, can reuse registers from the 1st part)
static const Register kResumePcReg = RCX;
static const Register kExceptionReg = RSI;
static const Register kStackTraceReg = RDI;
};
// ABI for ReturnStub (ReturnAsyncStub, ReturnAsyncNotFutureStub).
struct ReturnStubABI {
static const Register kSuspendStateReg = RBX;
};
// ABI for AsyncExceptionHandlerStub.
struct AsyncExceptionHandlerStubABI {
static const Register kSuspendStateReg = RBX;
};
// ABI for DispatchTableNullErrorStub and consequently for all dispatch
// table calls (though normal functions will not expect or use this
// register). This ABI is added to distinguish memory corruption errors from

View file

@ -71,12 +71,12 @@ DeoptContext::DeoptContext(const StackFrame* frame,
// Do not include incoming arguments if there are optional arguments
// (they are copied into local space at method entry).
num_args_ =
function.HasOptionalParameters() ? 0 : function.num_fixed_parameters();
function.MakesCopyOfParameters() ? 0 : function.num_fixed_parameters();
// The fixed size section of the (fake) Dart frame called via a stub by the
// optimized function contains FP, PP (ARM only), PC-marker and
// return-address. This section is copied as well, so that its contained
// values can be updated before returning to the deoptimized function.
// The fixed size section of the (fake) Dart frame called via a stub by the
// optimized function contains FP, PP (ARM only), PC-marker and
// return-address. This section is copied as well, so that its contained
// values can be updated before returning to the deoptimized function.
ASSERT(frame->fp() >= frame->sp());
const intptr_t frame_size = (frame->fp() - frame->sp()) / kWordSize;
@ -326,7 +326,7 @@ const CatchEntryMoves* DeoptContext::ToCatchEntryMoves(intptr_t num_vars) {
Function& function = Function::Handle(zone(), code.function());
intptr_t params =
function.HasOptionalParameters() ? 0 : function.num_fixed_parameters();
function.MakesCopyOfParameters() ? 0 : function.num_fixed_parameters();
for (intptr_t i = 0; i < num_vars; i++) {
const intptr_t len = deopt_instructions.length();
intptr_t slot = i < params ? i

View file

@ -149,7 +149,9 @@ class ExceptionHandlerFinder : public StackResource {
handler_pc = temp_handler_pc;
handler_sp = frame->sp();
handler_fp = frame->fp();
if (is_optimized) {
if (is_optimized &&
(handler_pc !=
StubCode::AsyncExceptionHandler().EntryPoint())) {
pc_ = frame->pc();
code_ = &Code::Handle(frame->LookupDartCode());
CatchEntryMovesRefPtr* cached_catch_entry_moves =

View file

@ -313,6 +313,19 @@ void GCCompactor::Compact(OldPage* pages,
ForwardStackPointers();
}
{
TIMELINE_FUNCTION_GC_DURATION(thread(),
"ForwardPostponedSuspendStatePointers");
// After heap sliding is complete and ObjectStore pointers are forwarded
// it is finally safe to visit SuspendState objects with copied frames.
can_visit_stack_frames_ = true;
const intptr_t length = postponed_suspend_states_.length();
for (intptr_t i = 0; i < length; ++i) {
auto suspend_state = postponed_suspend_states_[i];
suspend_state->untag()->VisitPointers(this);
}
}
heap_->old_space()->VisitRoots(this);
{
@ -741,6 +754,21 @@ void GCCompactor::VisitCompressedPointers(uword heap_base,
}
}
bool GCCompactor::CanVisitSuspendStatePointers(SuspendStatePtr suspend_state) {
if ((suspend_state->untag()->pc() != 0) && !can_visit_stack_frames_) {
// Visiting pointers of SuspendState objects with copied stack frame
// needs to query stack map, which can touch other Dart objects
// (such as GrowableObjectArray of InstructionsTable).
// Those objects may have an inconsistent state during compaction,
// so processing of SuspendState objects is postponed to the later
// stage of compaction.
MutexLocker ml(&postponed_suspend_states_mutex_);
postponed_suspend_states_.Add(suspend_state);
return false;
}
return true;
}
void GCCompactor::VisitHandle(uword addr) {
FinalizablePersistentHandle* handle =
reinterpret_cast<FinalizablePersistentHandle*>(addr);

View file

@ -41,12 +41,13 @@ class GCCompactor : public ValueObject,
void ForwardCompressedPointer(uword heap_base, CompressedObjectPtr* ptr);
void VisitTypedDataViewPointers(TypedDataViewPtr view,
CompressedObjectPtr* first,
CompressedObjectPtr* last);
void VisitPointers(ObjectPtr* first, ObjectPtr* last);
CompressedObjectPtr* last) override;
void VisitPointers(ObjectPtr* first, ObjectPtr* last) override;
void VisitCompressedPointers(uword heap_base,
CompressedObjectPtr* first,
CompressedObjectPtr* last);
void VisitHandle(uword addr);
CompressedObjectPtr* last) override;
bool CanVisitSuspendStatePointers(SuspendStatePtr suspend_state) override;
void VisitHandle(uword addr) override;
Heap* heap_;
@ -71,6 +72,12 @@ class GCCompactor : public ValueObject,
// complete.
Mutex typed_data_view_mutex_;
MallocGrowableArray<TypedDataViewPtr> typed_data_views_;
// SuspendState objects with copied frame must be updated after sliding is
// complete.
bool can_visit_stack_frames_ = false;
Mutex postponed_suspend_states_mutex_;
MallocGrowableArray<SuspendStatePtr> postponed_suspend_states_;
};
} // namespace dart

View file

@ -48,6 +48,7 @@
#include "vm/stub_code.h"
#include "vm/symbols.h"
#include "vm/tags.h"
#include "vm/thread.h"
#include "vm/thread_interrupter.h"
#include "vm/thread_registry.h"
#include "vm/timeline.h"
@ -613,6 +614,12 @@ Thread* IsolateGroup::ScheduleThreadLocked(MonitorLocker* ml,
thread->isolate_ = nullptr;
thread->isolate_group_ = this;
thread->field_table_values_ = nullptr;
if (object_store() != nullptr) {
#define INIT_ENTRY_POINT(name) \
thread->name##_entry_point_ = Function::EntryPointOf(object_store()->name());
CACHED_FUNCTION_ENTRY_POINTS_LIST(INIT_ENTRY_POINT)
#undef INIT_ENTRY_POINT
}
ASSERT(heap() != nullptr);
thread->heap_ = heap();
thread->set_os_thread(os_thread);

View file

@ -2030,28 +2030,40 @@ void KernelLoader::LoadProcedure(const Library& library,
FunctionNodeHelper function_node_helper(&helper_);
function_node_helper.ReadUntilIncluding(FunctionNodeHelper::kDartAsyncMarker);
function.set_is_debuggable(function_node_helper.dart_async_marker_ ==
FunctionNodeHelper::kSync);
switch (function_node_helper.dart_async_marker_) {
case FunctionNodeHelper::kSyncStar:
function.set_modifier(UntaggedFunction::kSyncGen);
function.set_is_visible(!FLAG_lazy_async_stacks);
break;
case FunctionNodeHelper::kAsync:
function.set_modifier(UntaggedFunction::kAsync);
function.set_is_inlinable(!FLAG_lazy_async_stacks);
function.set_is_visible(!FLAG_lazy_async_stacks);
break;
case FunctionNodeHelper::kAsyncStar:
function.set_modifier(UntaggedFunction::kAsyncGen);
function.set_is_inlinable(!FLAG_lazy_async_stacks);
function.set_is_visible(!FLAG_lazy_async_stacks);
break;
default:
// no special modifier
break;
if (function_node_helper.async_marker_ == FunctionNodeHelper::kAsync) {
if (!FLAG_precompiled_mode) {
FATAL("Compact async functions are only supported in AOT mode.");
}
function.set_modifier(UntaggedFunction::kAsync);
function.set_is_debuggable(true);
function.set_is_inlinable(false);
function.set_is_visible(true);
ASSERT(function.IsCompactAsyncFunction());
} else {
ASSERT(function_node_helper.async_marker_ == FunctionNodeHelper::kSync);
function.set_is_debuggable(function_node_helper.dart_async_marker_ ==
FunctionNodeHelper::kSync);
switch (function_node_helper.dart_async_marker_) {
case FunctionNodeHelper::kSyncStar:
function.set_modifier(UntaggedFunction::kSyncGen);
function.set_is_visible(!FLAG_lazy_async_stacks);
break;
case FunctionNodeHelper::kAsync:
function.set_modifier(UntaggedFunction::kAsync);
function.set_is_inlinable(!FLAG_lazy_async_stacks);
function.set_is_visible(!FLAG_lazy_async_stacks);
break;
case FunctionNodeHelper::kAsyncStar:
function.set_modifier(UntaggedFunction::kAsyncGen);
function.set_is_inlinable(!FLAG_lazy_async_stacks);
function.set_is_visible(!FLAG_lazy_async_stacks);
break;
default:
// no special modifier
break;
}
ASSERT(!function.IsCompactAsyncFunction());
}
ASSERT(function_node_helper.async_marker_ == FunctionNodeHelper::kSync);
if (!native_name.IsNull()) {
function.set_native_name(native_name);

View file

@ -3241,6 +3241,7 @@ void MessageSerializer::Trace(Object* object) {
ILLEGAL(MirrorReference)
ILLEGAL(ReceivePort)
ILLEGAL(StackTrace)
ILLEGAL(SuspendState)
ILLEGAL(UserTag)
// From "dart:ffi" we handle only Pointer/DynamicLibrary specially, since

View file

@ -55,6 +55,7 @@
#include "vm/regexp.h"
#include "vm/resolver.h"
#include "vm/reusable_handles.h"
#include "vm/reverse_pc_lookup_cache.h"
#include "vm/runtime_entry.h"
#include "vm/scopes.h"
#include "vm/stack_frame.h"
@ -949,6 +950,8 @@ void Object::Init(IsolateGroup* isolate_group) {
cls.set_type_arguments_field_offset(Array::type_arguments_offset(),
RTN::Array::type_arguments_offset());
cls.set_num_type_arguments_unsafe(1);
// In order to be able to canonicalize arguments descriptors early.
cls.set_is_prefinalized();
cls =
Class::New<GrowableObjectArray, RTN::GrowableObjectArray>(isolate_group);
isolate_group->object_store()->set_growable_object_array_class(cls);
@ -1090,10 +1093,26 @@ void Object::Init(IsolateGroup* isolate_group) {
empty_exception_handlers_,
static_cast<ExceptionHandlersPtr>(address + kHeapObjectTag));
empty_exception_handlers_->StoreNonPointer(
&empty_exception_handlers_->untag()->num_entries_, 0);
&empty_exception_handlers_->untag()->packed_fields_, 0);
empty_exception_handlers_->SetCanonical();
}
// Empty exception handlers for async/async* functions.
{
uword address =
heap->Allocate(thread, ExceptionHandlers::InstanceSize(0), Heap::kOld);
InitializeObject(address, kExceptionHandlersCid,
ExceptionHandlers::InstanceSize(0),
ExceptionHandlers::ContainsCompressedPointers());
ExceptionHandlers::initializeHandle(
empty_async_exception_handlers_,
static_cast<ExceptionHandlersPtr>(address + kHeapObjectTag));
empty_async_exception_handlers_->StoreNonPointer(
&empty_async_exception_handlers_->untag()->packed_fields_,
UntaggedExceptionHandlers::AsyncHandlerBit::update(true, 0));
empty_async_exception_handlers_->SetCanonical();
}
// Allocate and initialize the canonical empty type arguments object.
{
uword address =
@ -1239,6 +1258,8 @@ void Object::Init(IsolateGroup* isolate_group) {
ASSERT(empty_var_descriptors_->IsLocalVarDescriptors());
ASSERT(!empty_exception_handlers_->IsSmi());
ASSERT(empty_exception_handlers_->IsExceptionHandlers());
ASSERT(!empty_async_exception_handlers_->IsSmi());
ASSERT(empty_async_exception_handlers_->IsExceptionHandlers());
ASSERT(!sentinel_->IsSmi());
ASSERT(sentinel_->IsSentinel());
ASSERT(!transition_sentinel_->IsSmi());
@ -2039,6 +2060,10 @@ ErrorPtr Object::Init(IsolateGroup* isolate_group,
RegisterClass(cls, Symbols::FutureOr(), lib);
pending_classes.Add(cls);
cls = Class::New<SuspendState, RTN::SuspendState>(isolate_group);
RegisterPrivateClass(cls, Symbols::_SuspendState(), lib);
pending_classes.Add(cls);
// Pre-register the developer library so we can place the vm class
// UserTag there rather than the core library.
lib = Library::LookupLibrary(thread, Symbols::DartDeveloper());
@ -2542,6 +2567,7 @@ ErrorPtr Object::Init(IsolateGroup* isolate_group,
cls = Class::New<ReceivePort, RTN::ReceivePort>(isolate_group);
cls = Class::New<SendPort, RTN::SendPort>(isolate_group);
cls = Class::New<StackTrace, RTN::StackTrace>(isolate_group);
cls = Class::New<SuspendState, RTN::SuspendState>(isolate_group);
cls = Class::New<RegExp, RTN::RegExp>(isolate_group);
cls = Class::New<Number, RTN::Number>(isolate_group);
@ -15358,7 +15384,18 @@ intptr_t LocalVarDescriptors::Length() const {
}
intptr_t ExceptionHandlers::num_entries() const {
return untag()->num_entries_;
return untag()->num_entries();
}
bool ExceptionHandlers::has_async_handler() const {
return UntaggedExceptionHandlers::AsyncHandlerBit::decode(
untag()->packed_fields_);
}
void ExceptionHandlers::set_has_async_handler(bool value) const {
StoreNonPointer(&untag()->packed_fields_,
UntaggedExceptionHandlers::AsyncHandlerBit::update(
value, untag()->packed_fields_));
}
void ExceptionHandlers::SetHandlerInfo(intptr_t try_index,
@ -15450,7 +15487,9 @@ ExceptionHandlersPtr ExceptionHandlers::New(intptr_t num_handlers) {
ExceptionHandlers::ContainsCompressedPointers());
NoSafepointScope no_safepoint;
result ^= raw;
result.StoreNonPointer(&result.untag()->num_entries_, num_handlers);
result.StoreNonPointer(
&result.untag()->packed_fields_,
UntaggedExceptionHandlers::NumEntriesBits::update(num_handlers, 0));
}
const Array& handled_types_data =
(num_handlers == 0) ? Object::empty_array()
@ -15476,7 +15515,9 @@ ExceptionHandlersPtr ExceptionHandlers::New(const Array& handled_types_data) {
ExceptionHandlers::ContainsCompressedPointers());
NoSafepointScope no_safepoint;
result ^= raw;
result.StoreNonPointer(&result.untag()->num_entries_, num_handlers);
result.StoreNonPointer(
&result.untag()->packed_fields_,
UntaggedExceptionHandlers::NumEntriesBits::update(num_handlers, 0));
}
result.set_handled_types_data(handled_types_data);
return result.ptr();
@ -15485,8 +15526,11 @@ ExceptionHandlersPtr ExceptionHandlers::New(const Array& handled_types_data) {
const char* ExceptionHandlers::ToCString() const {
#define FORMAT1 "%" Pd " => %#x (%" Pd " types) (outer %d)%s%s\n"
#define FORMAT2 " %d. %s\n"
#define FORMAT3 "<async handler>\n"
if (num_entries() == 0) {
return "empty ExceptionHandlers\n";
return has_async_handler()
? "empty ExceptionHandlers (with <async handler>)\n"
: "empty ExceptionHandlers\n";
}
auto& handled_types = Array::Handle();
auto& type = AbstractType::Handle();
@ -15509,6 +15553,9 @@ const char* ExceptionHandlers::ToCString() const {
len += Utils::SNPrint(NULL, 0, FORMAT2, k, type.ToCString());
}
}
if (has_async_handler()) {
len += Utils::SNPrint(NULL, 0, FORMAT3);
}
// Allocate the buffer.
char* buffer = Thread::Current()->zone()->Alloc<char>(len);
// Layout the fields in the buffer.
@ -15529,9 +15576,14 @@ const char* ExceptionHandlers::ToCString() const {
FORMAT2, k, type.ToCString());
}
}
if (has_async_handler()) {
num_chars +=
Utils::SNPrint((buffer + num_chars), (len - num_chars), FORMAT3);
}
return buffer;
#undef FORMAT1
#undef FORMAT2
#undef FORMAT3
}
void SingleTargetCache::set_target(const Code& value) const {
@ -25983,6 +26035,54 @@ DEFINE_FLAG_HANDLER(DwarfStackTracesHandler,
"Omit CodeSourceMaps in precompiled snapshots and don't "
"symbolize stack traces in the precompiled runtime.");
SuspendStatePtr SuspendState::New(intptr_t frame_size,
const Instance& future,
Heap::Space space) {
SuspendState& result = SuspendState::Handle();
{
ObjectPtr raw = Object::Allocate(
SuspendState::kClassId, SuspendState::InstanceSize(frame_size), space,
SuspendState::ContainsCompressedPointers());
NoSafepointScope no_safepoint;
result ^= raw;
result.set_frame_size(frame_size);
result.set_pc(0);
result.set_future(future);
}
return result.ptr();
}
void SuspendState::set_frame_size(intptr_t frame_size) const {
ASSERT(frame_size >= 0);
StoreNonPointer(&untag()->frame_size_, frame_size);
}
void SuspendState::set_pc(uword pc) const {
StoreNonPointer(&untag()->pc_, pc);
}
void SuspendState::set_future(const Instance& future) const {
untag()->set_future(future.ptr());
}
const char* SuspendState::ToCString() const {
return "SuspendState";
}
CodePtr SuspendState::GetCodeObject() const {
ASSERT(pc() != 0);
#if defined(DART_PRECOMPILED_RUNTIME)
NoSafepointScope no_safepoint;
CodePtr code = ReversePc::Lookup(IsolateGroup::Current(), pc(),
/*is_return_address=*/true);
ASSERT(code != Code::null());
return code;
#else
UNIMPLEMENTED();
return Code::null();
#endif // defined(DART_PRECOMPILED_RUNTIME)
}
void RegExp::set_pattern(const String& pattern) const {
untag()->set_pattern(pattern.ptr());
}

View file

@ -447,6 +447,7 @@ class Object {
V(PcDescriptors, empty_descriptors) \
V(LocalVarDescriptors, empty_var_descriptors) \
V(ExceptionHandlers, empty_exception_handlers) \
V(ExceptionHandlers, empty_async_exception_handlers) \
V(Array, extractor_parameter_types) \
V(Array, extractor_parameter_names) \
V(Sentinel, sentinel) \
@ -2891,7 +2892,12 @@ class Function : public Object {
static intptr_t code_offset() { return OFFSET_OF(UntaggedFunction, code_); }
uword entry_point() const { return untag()->entry_point_; }
uword entry_point() const {
return EntryPointOf(ptr());
}
static uword EntryPointOf(const FunctionPtr function) {
return function->untag()->entry_point_;
}
static intptr_t entry_point_offset(
CodeEntryKind entry_kind = CodeEntryKind::kNormal) {
@ -3162,6 +3168,12 @@ class Function : public Object {
// Returns the number of implicit parameters, e.g., this for instance methods.
intptr_t NumImplicitParameters() const;
// Returns true if parameters of this function are copied into the frame
// in the function prologue.
bool MakesCopyOfParameters() const {
return HasOptionalParameters() || IsCompactAsyncFunction();
}
#if defined(DART_PRECOMPILED_RUNTIME)
#define DEFINE_GETTERS_AND_SETTERS(return_type, type, name) \
static intptr_t name##_offset() { \
@ -3558,6 +3570,12 @@ class Function : public Object {
return modifier() == UntaggedFunction::kAsync;
}
// TODO(alexmarkov): replace this predicate with IsAsyncFunction() after
// old async functions are removed.
bool IsCompactAsyncFunction() const {
return IsAsyncFunction() && is_debuggable();
}
// Recognise synthetic sync-yielding functions like the inner-most:
// user_func /* was async */ {
// :async_op(..) yielding {
@ -6220,6 +6238,9 @@ class ExceptionHandlers : public Object {
intptr_t num_entries() const;
bool has_async_handler() const;
void set_has_async_handler(bool value) const;
void GetHandlerInfo(intptr_t try_index, ExceptionHandlerInfo* info) const;
uword HandlerPCOffset(intptr_t try_index) const;
@ -11763,6 +11784,63 @@ class StackTrace : public Instance {
friend class DebuggerStackTrace;
};
class SuspendState : public Instance {
public:
// :suspend_state local variable index
static constexpr intptr_t kSuspendStateVarIndex = 0;
static intptr_t HeaderSize() { return sizeof(UntaggedSuspendState); }
static intptr_t UnroundedSize(SuspendStatePtr ptr) {
return UnroundedSize(ptr->untag()->frame_size_);
}
static intptr_t UnroundedSize(intptr_t frame_size) {
return HeaderSize() + frame_size;
}
static intptr_t InstanceSize() {
ASSERT_EQUAL(sizeof(UntaggedSuspendState),
OFFSET_OF_RETURNED_VALUE(UntaggedSuspendState, payload));
return 0;
}
static intptr_t InstanceSize(intptr_t frame_size) {
return RoundedAllocationSize(UnroundedSize(frame_size));
}
static intptr_t frame_size_offset() {
return OFFSET_OF(UntaggedSuspendState, frame_size_);
}
static intptr_t pc_offset() { return OFFSET_OF(UntaggedSuspendState, pc_); }
static intptr_t future_offset() {
return OFFSET_OF(UntaggedSuspendState, future_);
}
static intptr_t then_callback_offset() {
return OFFSET_OF(UntaggedSuspendState, then_callback_);
}
static intptr_t error_callback_offset() {
return OFFSET_OF(UntaggedSuspendState, error_callback_);
}
static intptr_t payload_offset() {
return UntaggedSuspendState::payload_offset();
}
static SuspendStatePtr New(intptr_t frame_size,
const Instance& future,
Heap::Space space = Heap::kNew);
InstancePtr future() const { return untag()->future(); }
uword pc() const { return untag()->pc_; }
// Returns Code object corresponding to the suspended function.
CodePtr GetCodeObject() const;
private:
void set_frame_size(intptr_t frame_size) const;
void set_pc(uword pc) const;
void set_future(const Instance& future) const;
FINAL_HEAP_OBJECT_IMPLEMENTATION(SuspendState, Instance);
friend class Class;
};
class RegExpFlags {
public:
// Flags are passed to a regex object as follows:

View file

@ -74,6 +74,7 @@
V(Smi) \
V(StackTrace) \
V(SubtypeTestCache) \
V(SuspendState) \
V(Type) \
V(TypeArguments) \
V(TypeParameter) \
@ -619,6 +620,7 @@ class ObjectCopyBase {
HANDLE_ILLEGAL_CASE(MirrorReference)
HANDLE_ILLEGAL_CASE(Pointer)
HANDLE_ILLEGAL_CASE(ReceivePort)
HANDLE_ILLEGAL_CASE(SuspendState)
HANDLE_ILLEGAL_CASE(UserTag)
default:
return true;

View file

@ -1760,6 +1760,10 @@ void FutureOr::PrintJSONImpl(JSONStream* stream, bool ref) const {
Instance::PrintJSONImpl(stream, ref);
}
void SuspendState::PrintJSONImpl(JSONStream* stream, bool ref) const {
Instance::PrintJSONImpl(stream, ref);
}
#endif
} // namespace dart

View file

@ -301,6 +301,31 @@ void ObjectStore::InitKnownObjects() {
}
}
cls = async_lib.LookupClassAllowPrivate(Symbols::_SuspendState());
ASSERT(!cls.IsNull());
const auto& error = cls.EnsureIsFinalized(thread);
ASSERT(error == Error::null());
function = cls.LookupFunctionAllowPrivate(Symbols::_initAsync());
ASSERT(!function.IsNull());
set_suspend_state_init_async(function);
function = cls.LookupFunctionAllowPrivate(Symbols::_awaitAsync());
ASSERT(!function.IsNull());
set_suspend_state_await_async(function);
function = cls.LookupFunctionAllowPrivate(Symbols::_returnAsync());
ASSERT(!function.IsNull());
set_suspend_state_return_async(function);
function = cls.LookupFunctionAllowPrivate(Symbols::_returnAsyncNotFuture());
ASSERT(!function.IsNull());
set_suspend_state_return_async_not_future(function);
function = cls.LookupFunctionAllowPrivate(Symbols::_handleException());
ASSERT(!function.IsNull());
set_suspend_state_handle_exception(function);
const Library& core_lib = Library::Handle(zone, core_library());
cls = core_lib.LookupClassAllowPrivate(Symbols::_CompileTimeError());
ASSERT(!cls.IsNull());

View file

@ -168,6 +168,11 @@ class ObjectPointerVisitor;
RW(Function, complete_on_async_return) \
RW(Function, complete_with_no_future_on_async_return) \
RW(Function, complete_on_async_error) \
RW(Function, suspend_state_init_async) \
RW(Function, suspend_state_await_async) \
RW(Function, suspend_state_return_async) \
RW(Function, suspend_state_return_async_not_future) \
RW(Function, suspend_state_handle_exception) \
RW(Class, async_star_stream_controller) \
ARW_RELAXED(Smi, future_timeout_future_index) \
ARW_RELAXED(Smi, future_wait_future_index) \
@ -239,6 +244,11 @@ class ObjectPointerVisitor;
RW(Code, type_parameter_tts_stub) \
RW(Code, unreachable_tts_stub) \
RW(Code, slow_tts_stub) \
RW(Code, await_async_stub) \
RW(Code, init_async_stub) \
RW(Code, resume_stub) \
RW(Code, return_async_stub) \
RW(Code, return_async_not_future_stub) \
RW(Array, dispatch_table_code_entries) \
RW(GrowableObjectArray, instructions_tables) \
RW(Array, obfuscation_map) \
@ -314,6 +324,11 @@ class ObjectPointerVisitor;
DO(init_instance_field_stub, InitInstanceField) \
DO(init_late_instance_field_stub, InitLateInstanceField) \
DO(init_late_final_instance_field_stub, InitLateFinalInstanceField) \
DO(await_async_stub, AwaitAsync) \
DO(init_async_stub, InitAsync) \
DO(resume_stub, Resume) \
DO(return_async_stub, ReturnAsync) \
DO(return_async_not_future_stub, ReturnAsyncNotFuture) \
DO(instance_of_stub, InstanceOf)
#define ISOLATE_OBJECT_STORE_FIELD_LIST(R_, RW) \

View file

@ -187,6 +187,7 @@ void ParsedFunction::AllocateVariables() {
const intptr_t num_fixed_params = function().num_fixed_parameters();
const intptr_t num_opt_params = function().NumOptionalParameters();
const intptr_t num_params = num_fixed_params + num_opt_params;
const bool copy_parameters = function().MakesCopyOfParameters();
// Before we start allocating indices to variables, we'll setup the
// parameters array, which can be used to access the raw parameters (i.e. not
@ -212,7 +213,7 @@ void ParsedFunction::AllocateVariables() {
raw_parameter->set_needs_covariant_check_in_method();
}
raw_parameter->set_type_check_mode(variable->type_check_mode());
if (function().HasOptionalParameters()) {
if (copy_parameters) {
bool ok = scope->AddVariable(raw_parameter);
ASSERT(ok);
@ -261,33 +262,30 @@ void ParsedFunction::AllocateVariables() {
// The copy parameters implementation will still write to local variables
// which we assign indices as with the old CopyParams implementation.
VariableIndex parameter_index_start;
VariableIndex reamining_local_variables_start;
VariableIndex first_local_index;
{
// Compute start indices to parameters and locals, and the number of
// parameters to copy.
if (num_opt_params == 0) {
parameter_index_start = first_parameter_index_ =
VariableIndex(num_params);
reamining_local_variables_start = VariableIndex(0);
if (!copy_parameters) {
ASSERT(suspend_state_var() == nullptr);
first_parameter_index_ = VariableIndex(num_params);
first_local_index = VariableIndex(0);
} else {
parameter_index_start = first_parameter_index_ = VariableIndex(0);
reamining_local_variables_start = VariableIndex(-num_params);
// :suspend_state variable is inserted at the fixed slot
// before the copied parameters.
const intptr_t reserved_var_slot_count =
(suspend_state_var() != nullptr) ? 1 : 0;
first_parameter_index_ = VariableIndex(-reserved_var_slot_count);
first_local_index =
VariableIndex(first_parameter_index_.value() - num_params);
}
}
if (function_type_arguments_ != NULL && num_opt_params > 0) {
reamining_local_variables_start =
VariableIndex(reamining_local_variables_start.value() - 1);
}
// Allocate parameters and local variables, either in the local frame or
// in the context(s).
bool found_captured_variables = false;
VariableIndex first_local_index =
VariableIndex(parameter_index_start.value() > 0 ? 0 : -num_params);
VariableIndex next_free_index = scope->AllocateVariables(
function(), parameter_index_start, num_params, first_local_index, NULL,
function(), first_parameter_index_, num_params, first_local_index, NULL,
&found_captured_variables);
num_stack_locals_ = -next_free_index.value();

View file

@ -74,6 +74,12 @@ class ParsedFunction : public ZoneAllocated {
parent_type_arguments_ = parent_type_arguments;
}
LocalVariable* suspend_state_var() const { return suspend_state_var_; }
void set_suspend_state_var(LocalVariable* suspend_state_var) {
ASSERT(suspend_state_var != nullptr);
suspend_state_var_ = suspend_state_var;
}
void set_default_parameter_values(ZoneGrowableArray<const Instance*>* list) {
default_parameter_values_ = list;
#if defined(DEBUG)
@ -260,6 +266,7 @@ class ParsedFunction : public ZoneAllocated {
RegExpCompileData* regexp_compile_data_;
LocalVariable* function_type_arguments_;
LocalVariable* parent_type_arguments_;
LocalVariable* suspend_state_var_ = nullptr;
LocalVariable* current_context_var_;
LocalVariable* arg_desc_var_;
LocalVariable* receiver_var_ = nullptr;

View file

@ -12,6 +12,7 @@
#include "vm/isolate_reload.h"
#include "vm/object.h"
#include "vm/runtime_entry.h"
#include "vm/stack_frame.h"
#include "vm/visitor.h"
namespace dart {
@ -170,6 +171,13 @@ intptr_t UntaggedObject::HeapSizeFromClass(uword tags) const {
case kPointerCid:
instance_size = Pointer::InstanceSize();
break;
case kSuspendStateCid: {
const SuspendStatePtr raw_suspend_state =
static_cast<const SuspendStatePtr>(this);
intptr_t frame_size = raw_suspend_state->untag()->frame_size_;
instance_size = SuspendState::InstanceSize(frame_size);
break;
}
case kTypeArgumentsCid: {
const TypeArgumentsPtr raw_array =
static_cast<const TypeArgumentsPtr>(this);
@ -208,7 +216,7 @@ intptr_t UntaggedObject::HeapSizeFromClass(uword tags) const {
case kExceptionHandlersCid: {
const ExceptionHandlersPtr raw_handlers =
static_cast<const ExceptionHandlersPtr>(this);
intptr_t num_handlers = raw_handlers->untag()->num_entries_;
intptr_t num_handlers = raw_handlers->untag()->num_entries();
instance_size = ExceptionHandlers::InstanceSize(num_handlers);
break;
}
@ -563,7 +571,7 @@ COMPRESSED_VISITOR(TypeParameters)
VARIABLE_COMPRESSED_VISITOR(TypeArguments,
Smi::Value(raw_obj->untag()->length()))
VARIABLE_COMPRESSED_VISITOR(LocalVarDescriptors, raw_obj->untag()->num_entries_)
VARIABLE_COMPRESSED_VISITOR(ExceptionHandlers, raw_obj->untag()->num_entries_)
VARIABLE_COMPRESSED_VISITOR(ExceptionHandlers, raw_obj->untag()->num_entries())
VARIABLE_COMPRESSED_VISITOR(Context, raw_obj->untag()->num_variables_)
VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag()->length()))
VARIABLE_COMPRESSED_VISITOR(
@ -626,6 +634,33 @@ intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj,
return Field::InstanceSize();
}
intptr_t UntaggedSuspendState::VisitSuspendStatePointers(
SuspendStatePtr raw_obj,
ObjectPointerVisitor* visitor) {
ASSERT(raw_obj->IsHeapObject());
ASSERT_COMPRESSED(SuspendState);
if (visitor->CanVisitSuspendStatePointers(raw_obj)) {
visitor->VisitCompressedPointers(
raw_obj->heap_base(), raw_obj->untag()->from(), raw_obj->untag()->to());
const uword pc = raw_obj->untag()->pc_;
if (pc != 0) {
Thread* thread = Thread::Current();
ASSERT(thread != nullptr);
ASSERT(thread->isolate_group() == visitor->isolate_group());
const uword sp = reinterpret_cast<uword>(raw_obj->untag()->payload());
StackFrame frame(thread);
frame.pc_ = pc;
frame.sp_ = sp;
frame.fp_ = sp + raw_obj->untag()->frame_size_;
frame.VisitObjectPointers(visitor);
}
}
return SuspendState::InstanceSize(raw_obj->untag()->frame_size_);
}
bool UntaggedCode::ContainsPC(const ObjectPtr raw_obj, uword pc) {
if (!raw_obj->IsCode()) return false;
auto const raw_code = static_cast<const CodePtr>(raw_obj);

View file

@ -2249,16 +2249,30 @@ class UntaggedExceptionHandlers : public UntaggedObject {
private:
RAW_HEAP_OBJECT_IMPLEMENTATION(ExceptionHandlers);
// Number of exception handler entries.
int32_t num_entries_;
// Number of exception handler entries and
// async handler.
uint32_t packed_fields_;
// Array with [num_entries_] entries. Each entry is an array of all handled
// Async handler is used in the async/async* functions.
// It's an implicit exception handler (stub) which runs when
// exception is not handled within the function.
using AsyncHandlerBit = BitField<decltype(packed_fields_), bool, 0, 1>;
using NumEntriesBits = BitField<decltype(packed_fields_),
uint32_t,
AsyncHandlerBit::kNextBit,
31>;
intptr_t num_entries() const {
return NumEntriesBits::decode(packed_fields_);
}
// Array with [num_entries] entries. Each entry is an array of all handled
// exception types.
COMPRESSED_POINTER_FIELD(ArrayPtr, handled_types_data)
VISIT_FROM(handled_types_data)
VISIT_TO(handled_types_data)
// Exception handler info of length [num_entries_].
// Exception handler info of length [num_entries].
const ExceptionHandlerInfo* data() const {
OPEN_ARRAY_START(ExceptionHandlerInfo, intptr_t);
}
@ -3272,6 +3286,30 @@ class UntaggedStackTrace : public UntaggedInstance {
bool skip_sync_start_in_parent_stack;
};
class UntaggedSuspendState : public UntaggedInstance {
RAW_HEAP_OBJECT_IMPLEMENTATION(SuspendState);
intptr_t frame_size_;
uword pc_;
COMPRESSED_POINTER_FIELD(InstancePtr, future)
COMPRESSED_POINTER_FIELD(ClosurePtr, then_callback)
COMPRESSED_POINTER_FIELD(ClosurePtr, error_callback)
VISIT_FROM(future)
VISIT_TO(error_callback)
public:
uword pc() const { return pc_; }
static intptr_t payload_offset() {
return OFFSET_OF_RETURNED_VALUE(UntaggedSuspendState, payload);
}
// Variable length payload follows here.
uint8_t* payload() { OPEN_ARRAY_START(uint8_t, uint8_t); }
const uint8_t* payload() const { OPEN_ARRAY_START(uint8_t, uint8_t); }
};
// VM type for capturing JS regular expressions.
class UntaggedRegExp : public UntaggedInstance {
RAW_HEAP_OBJECT_IMPLEMENTATION(RegExp);

View file

@ -191,6 +191,9 @@ namespace dart {
F(RegExp, two_byte_sticky_) \
F(RegExp, external_one_byte_sticky_) \
F(RegExp, external_two_byte_sticky_) \
F(SuspendState, future_) \
F(SuspendState, then_callback_) \
F(SuspendState, error_callback_) \
F(WeakProperty, key_) \
F(WeakProperty, value_) \
F(WeakReference, target_) \

View file

@ -709,6 +709,19 @@ DEFINE_RUNTIME_ENTRY(CloneContext, 1) {
arguments.SetReturn(cloned_ctx);
}
// Allocate a SuspendState object.
// Arg0: frame size.
// Arg1: future.
// Return value: newly allocated object.
DEFINE_RUNTIME_ENTRY(AllocateSuspendState, 2) {
const Smi& frame_size = Smi::CheckedHandle(zone, arguments.ArgAt(0));
const Instance& future = Instance::CheckedHandle(zone, arguments.ArgAt(1));
const SuspendState& result = SuspendState::Handle(
zone, SuspendState::New(frame_size.Value(), future,
SpaceForRuntimeAllocation()));
arguments.SetReturn(result);
}
// Helper routine for tracing a type check.
static void PrintTypeCheck(const char* message,
const Instance& instance,

View file

@ -18,6 +18,7 @@ namespace dart {
V(AllocateClosure) \
V(AllocateContext) \
V(AllocateObject) \
V(AllocateSuspendState) \
V(BoxDouble) \
V(BreakpointRuntimeHandler) \
V(SingleStepHandler) \

View file

@ -149,6 +149,7 @@ VariableIndex LocalScope::AllocateVariables(const Function& function,
LocalVariable* controller = nullptr;
LocalVariable* chained_future = nullptr;
LocalVariable* is_sync = nullptr;
LocalVariable* suspend_state_var = nullptr;
for (intptr_t i = 0; i < num_variables(); i++) {
LocalVariable* variable = VariableAt(i);
if (variable->owner() == this) {
@ -164,6 +165,10 @@ VariableIndex LocalScope::AllocateVariables(const Function& function,
} else if (variable->name().Equals(Symbols::is_sync())) {
is_sync = variable;
}
} else {
if (variable->name().Equals(Symbols::SuspendStateVar())) {
suspend_state_var = variable;
}
}
}
}
@ -218,6 +223,12 @@ VariableIndex LocalScope::AllocateVariables(const Function& function,
ASSERT(is_sync->index().value() == Context::kIsSyncIndex);
}
if (suspend_state_var != nullptr) {
suspend_state_var->set_index(
VariableIndex(SuspendState::kSuspendStateVarIndex));
ASSERT(next_index.value() == SuspendState::kSuspendStateVarIndex - 1);
}
while (pos < num_parameters) {
LocalVariable* parameter = VariableAt(pos);
pos++;
@ -253,8 +264,10 @@ VariableIndex LocalScope::AllocateVariables(const Function& function,
*found_captured_variables = true;
}
} else {
variable->set_index(next_index);
next_index = VariableIndex(next_index.value() - 1);
if (variable != suspend_state_var) {
variable->set_index(next_index);
next_index = VariableIndex(next_index.value() - 1);
}
}
}
pos++;

View file

@ -394,21 +394,25 @@ bool StackFrame::FindExceptionHandler(Thread* thread,
return true;
}
if (handlers.num_entries() == 0) {
return false;
}
intptr_t try_index = -1;
uword pc_offset = pc() - code.PayloadStart();
PcDescriptors::Iterator iter(descriptors, UntaggedPcDescriptors::kAnyKind);
while (iter.MoveNext()) {
const intptr_t current_try_index = iter.TryIndex();
if ((iter.PcOffset() == pc_offset) && (current_try_index != -1)) {
try_index = current_try_index;
break;
if (handlers.num_entries() != 0) {
uword pc_offset = pc() - code.PayloadStart();
PcDescriptors::Iterator iter(descriptors, UntaggedPcDescriptors::kAnyKind);
while (iter.MoveNext()) {
const intptr_t current_try_index = iter.TryIndex();
if ((iter.PcOffset() == pc_offset) && (current_try_index != -1)) {
try_index = current_try_index;
break;
}
}
}
if (try_index == -1) {
if (handlers.has_async_handler()) {
*handler_pc = StubCode::AsyncExceptionHandler().EntryPoint();
*needs_stacktrace = true;
*has_catch_all = true;
return true;
}
return false;
}
ExceptionHandlerInfo handler_info;

View file

@ -159,6 +159,9 @@ class StackFrame : public ValueObject {
// fields fp_ and sp_ when they return the respective frame objects.
friend class FrameSetIterator;
friend class StackFrameIterator;
// UntaggedSuspendState::VisitSuspendStatePointers creates a temporary
// StackFrame objects for the copied frames of the suspended functions.
friend class UntaggedSuspendState;
friend class ProfilerDartStackWalker;
DISALLOW_COPY_AND_ASSIGN(StackFrame);
};

View file

@ -59,6 +59,7 @@ CallerClosureFinder::CallerClosureFinder(Zone* zone)
receiver_context_(Context::Handle(zone)),
receiver_function_(Function::Handle(zone)),
parent_function_(Function::Handle(zone)),
suspend_state_(SuspendState::Handle(zone)),
context_entry_(Object::Handle(zone)),
future_(Object::Handle(zone)),
listener_(Object::Handle(zone)),
@ -266,6 +267,12 @@ ClosurePtr CallerClosureFinder::FindCaller(const Closure& receiver_closure) {
return UnwrapAsyncThen(closure_);
}
ClosurePtr CallerClosureFinder::FindCallerFromSuspendState(
const SuspendState& suspend_state) {
future_ = suspend_state.future();
return GetCallerInFutureImpl(future_);
}
ClosurePtr CallerClosureFinder::UnwrapAsyncThen(const Closure& closure) {
if (closure.IsNull()) return closure.ptr();
@ -280,11 +287,31 @@ ClosurePtr CallerClosureFinder::UnwrapAsyncThen(const Closure& closure) {
return closure.ptr();
}
bool CallerClosureFinder::IsCompactAsyncCallback(const Function& function) {
parent_function_ = function.parent_function();
return parent_function_.recognized_kind() ==
MethodRecognizer::kSuspendState_createAsyncCallbacks;
}
SuspendStatePtr CallerClosureFinder::GetSuspendStateFromAsyncCallback(
const Closure& closure) {
ASSERT(IsCompactAsyncCallback(Function::Handle(closure.function())));
// Async handler only captures the receiver (SuspendState).
receiver_context_ = closure.context();
RELEASE_ASSERT(receiver_context_.num_variables() == 1);
return SuspendState::RawCast(receiver_context_.At(0));
}
ClosurePtr CallerClosureFinder::FindCallerInternal(
const Closure& receiver_closure) {
receiver_function_ = receiver_closure.function();
receiver_context_ = receiver_closure.context();
if (IsCompactAsyncCallback(receiver_function_)) {
suspend_state_ = GetSuspendStateFromAsyncCallback(receiver_closure);
return FindCallerFromSuspendState(suspend_state_);
}
if (receiver_function_.IsAsyncGenClosure()) {
return FindCallerInAsyncGenClosure(receiver_context_);
}
@ -442,6 +469,21 @@ ClosurePtr StackTraceUtils::ClosureFromFrameFunction(
return Closure::null();
}
if (function.IsCompactAsyncFunction()) {
auto& suspend_state = Object::Handle(
zone, *reinterpret_cast<ObjectPtr*>(LocalVarAddress(
frame->fp(), runtime_frame_layout.FrameSlotForVariableIndex(
SuspendState::kSuspendStateVarIndex))));
if (suspend_state.IsSuspendState()) {
*is_async = true;
return caller_closure_finder->FindCallerFromSuspendState(
SuspendState::Cast(suspend_state));
}
// Still running the sync part before the first await.
return Closure::null();
}
if (function.IsAsyncClosure() || function.IsAsyncGenClosure()) {
// Next, look up caller's closure on the stack and walk backwards
// through the yields.
@ -506,6 +548,7 @@ void StackTraceUtils::UnwindAwaiterChain(
auto& function = Function::Handle(zone);
auto& closure = Closure::Handle(zone, leaf_closure.ptr());
auto& pc_descs = PcDescriptors::Handle(zone);
auto& suspend_state = SuspendState::Handle(zone);
// Inject async suspension marker.
code_array.Add(StubCode::AsynchronousGapMarker());
@ -518,16 +561,31 @@ void StackTraceUtils::UnwindAwaiterChain(
if (function.IsNull()) {
continue;
}
// In hot-reload-test-mode we sometimes have to do this:
code = function.EnsureHasCode();
RELEASE_ASSERT(!code.IsNull());
code_array.Add(code);
pc_descs = code.pc_descriptors();
const intptr_t pc_offset = FindPcOffset(pc_descs, GetYieldIndex(closure));
// Unlike other sources of PC offsets, the offset may be 0 here if we
// reach a non-async closure receiving the yielded value.
ASSERT(pc_offset >= 0);
pc_offset_array->Add(pc_offset);
if (caller_closure_finder->IsCompactAsyncCallback(function)) {
suspend_state =
caller_closure_finder->GetSuspendStateFromAsyncCallback(closure);
const uword pc = suspend_state.pc();
if (pc == 0) {
// Async function is already resumed.
continue;
}
code = suspend_state.GetCodeObject();
code_array.Add(code);
const uword pc_offset = pc - code.PayloadStart();
ASSERT(pc_offset > 0 && pc_offset <= code.Size());
pc_offset_array->Add(pc_offset);
} else {
// In hot-reload-test-mode we sometimes have to do this:
code = function.EnsureHasCode();
RELEASE_ASSERT(!code.IsNull());
code_array.Add(code);
pc_descs = code.pc_descriptors();
const intptr_t pc_offset = FindPcOffset(pc_descs, GetYieldIndex(closure));
// Unlike other sources of PC offsets, the offset may be 0 here if we
// reach a non-async closure receiving the yielded value.
ASSERT(pc_offset >= 0);
pc_offset_array->Add(pc_offset);
}
// Inject async suspension marker.
code_array.Add(StubCode::AsynchronousGapMarker());

View file

@ -36,6 +36,17 @@ class CallerClosureFinder {
// we can do this by finding and following their awaited Futures.
ClosurePtr FindCaller(const Closure& receiver_closure);
// Find caller closure from a SuspendState of a resumed async function.
ClosurePtr FindCallerFromSuspendState(const SuspendState& suspend_state);
// Returns true if given closure function is a Future callback
// corresponding to an async function.
bool IsCompactAsyncCallback(const Function& function);
// Returns SuspendState from the given Future callback which corresponds
// to an async function.
SuspendStatePtr GetSuspendStateFromAsyncCallback(const Closure& closure);
// Finds the awaited Future from an async function receiver closure.
ObjectPtr GetAsyncFuture(const Closure& receiver_closure);
@ -64,6 +75,7 @@ class CallerClosureFinder {
Context& receiver_context_;
Function& receiver_function_;
Function& parent_function_;
SuspendState& suspend_state_;
Object& context_entry_;
Object& future_;

Some files were not shown because too many files have changed in this diff Show more