[vm] Refactor compilation of runtime calls.

- Fixes SIMD registers clobbered by write barrier on ARM64.
 - Moves code generation out of non-compiler directory.
 - Removes unnecessary building of Dart frames on leaf runtime calls.
 - Removes unnecessary Threads slots for write barrier Code objects.
 - Removes duplicate saves of SP in leaf runtime calls on ARM64 and RISC-V.
 - Avoids some redundant SP updates on RISC-V.

TEST=ci
Change-Id: Idb92127658edc90b320923ef3d882a7219a450ae
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/236842
Reviewed-by: Alexander Markov <alexmarkov@google.com>
Reviewed-by: Slava Egorov <vegorov@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Ryan Macnak 2022-03-22 19:45:03 +00:00 committed by Commit Bot
parent 4abac88110
commit 67f93d3840
45 changed files with 3522 additions and 3540 deletions

View file

@ -0,0 +1,81 @@
// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// This test attempts to verify that write barrier slow path does not
// clobber any live values.
import 'dart:_internal' show VMInternalsForTesting;
import 'package:expect/expect.dart';
class Old {
var f;
Old(this.f);
}
@pragma('vm:never-inline')
double crashy(double v, List<Old> oldies) {
// This test attempts to create a lot of live values which would live across
// write barrier invocation so that when write-barrier calls runtime and
// clobbers a register this is detected.
var young = Object();
var len = oldies.length;
var i = 0;
var v00 = v + 0.0;
var v01 = v + 1.0;
var v02 = v + 2.0;
var v03 = v + 3.0;
var v04 = v + 4.0;
var v05 = v + 5.0;
var v06 = v + 6.0;
var v07 = v + 7.0;
var v08 = v + 8.0;
var v09 = v + 9.0;
var v10 = v + 10.0;
var v11 = v + 11.0;
var v12 = v + 12.0;
var v13 = v + 13.0;
var v14 = v + 14.0;
var v15 = v + 15.0;
var v16 = v + 16.0;
var v17 = v + 17.0;
var v18 = v + 18.0;
var v19 = v + 19.0;
while (i < len) {
// Eventually this will overflow store buffer and call runtime to acquire
// a new block.
oldies[i++].f = young;
}
return v00 +
v01 +
v02 +
v03 +
v04 +
v05 +
v06 +
v07 +
v08 +
v09 +
v10 +
v11 +
v12 +
v13 +
v14 +
v15 +
v16 +
v17 +
v18 +
v19;
}
void main(List<String> args) {
final init = args.contains('impossible') ? 1.0 : 0.0;
final oldies = List<Old>.generate(100000, (i) => Old(""));
VMInternalsForTesting.collectAllGarbage();
VMInternalsForTesting.collectAllGarbage();
Expect.equals(crashy(init, oldies), 190.0);
for (var o in oldies) {
Expect.isTrue(o.f is! String);
}
}

View file

@ -0,0 +1,85 @@
// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// This test attempts to verify that write barrier slow path does not
// clobber any live values.
import 'dart:_internal' show VMInternalsForTesting;
import 'dart:typed_data';
import 'package:expect/expect.dart';
class Old {
var f;
Old(this.f);
}
@pragma('vm:never-inline')
Float64x2 crashy(Float64x2 v, List<Old> oldies) {
// This test attempts to create a lot of live values which would live across
// write barrier invocation so that when write-barrier calls runtime and
// clobbers a register this is detected.
var young = Object();
var len = oldies.length;
var i = 0;
var v00 = v + Float64x2(0.0, 0.0);
var v01 = v + Float64x2(1.0, 1.0);
var v02 = v + Float64x2(2.0, 2.0);
var v03 = v + Float64x2(3.0, 3.0);
var v04 = v + Float64x2(4.0, 4.0);
var v05 = v + Float64x2(5.0, 5.0);
var v06 = v + Float64x2(6.0, 6.0);
var v07 = v + Float64x2(7.0, 7.0);
var v08 = v + Float64x2(8.0, 8.0);
var v09 = v + Float64x2(9.0, 9.0);
var v10 = v + Float64x2(10.0, 10.0);
var v11 = v + Float64x2(11.0, 11.0);
var v12 = v + Float64x2(12.0, 12.0);
var v13 = v + Float64x2(13.0, 13.0);
var v14 = v + Float64x2(14.0, 14.0);
var v15 = v + Float64x2(15.0, 15.0);
var v16 = v + Float64x2(16.0, 16.0);
var v17 = v + Float64x2(17.0, 17.0);
var v18 = v + Float64x2(18.0, 18.0);
var v19 = v + Float64x2(19.0, 19.0);
while (i < len) {
// Eventually this will overflow store buffer and call runtime to acquire
// a new block.
oldies[i++].f = young;
}
return v00 +
v01 +
v02 +
v03 +
v04 +
v05 +
v06 +
v07 +
v08 +
v09 +
v10 +
v11 +
v12 +
v13 +
v14 +
v15 +
v16 +
v17 +
v18 +
v19;
}
void main(List<String> args) {
final init =
args.contains('impossible') ? Float64x2(1.0, 1.0) : Float64x2(0.0, 0.0);
final oldies = List<Old>.generate(100000, (i) => Old(""));
VMInternalsForTesting.collectAllGarbage();
VMInternalsForTesting.collectAllGarbage();
var r = crashy(init, oldies);
Expect.equals(r.x, 190.0);
Expect.equals(r.y, 190.0);
for (var o in oldies) {
Expect.isTrue(o.f is! String);
}
}

View file

@ -0,0 +1,83 @@
// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// @dart = 2.9
// This test attempts to verify that write barrier slow path does not
// clobber any live values.
import 'dart:_internal' show VMInternalsForTesting;
import 'package:expect/expect.dart';
class Old {
var f;
Old(this.f);
}
@pragma('vm:never-inline')
double crashy(double v, List<Old> oldies) {
// This test attempts to create a lot of live values which would live across
// write barrier invocation so that when write-barrier calls runtime and
// clobbers a register this is detected.
var young = Object();
var len = oldies.length;
var i = 0;
var v00 = v + 0.0;
var v01 = v + 1.0;
var v02 = v + 2.0;
var v03 = v + 3.0;
var v04 = v + 4.0;
var v05 = v + 5.0;
var v06 = v + 6.0;
var v07 = v + 7.0;
var v08 = v + 8.0;
var v09 = v + 9.0;
var v10 = v + 10.0;
var v11 = v + 11.0;
var v12 = v + 12.0;
var v13 = v + 13.0;
var v14 = v + 14.0;
var v15 = v + 15.0;
var v16 = v + 16.0;
var v17 = v + 17.0;
var v18 = v + 18.0;
var v19 = v + 19.0;
while (i < len) {
// Eventually this will overflow store buffer and call runtime to acquire
// a new block.
oldies[i++].f = young;
}
return v00 +
v01 +
v02 +
v03 +
v04 +
v05 +
v06 +
v07 +
v08 +
v09 +
v10 +
v11 +
v12 +
v13 +
v14 +
v15 +
v16 +
v17 +
v18 +
v19;
}
void main(List<String> args) {
final init = args.contains('impossible') ? 1.0 : 0.0;
final oldies = List<Old>.generate(100000, (i) => Old(""));
VMInternalsForTesting.collectAllGarbage();
VMInternalsForTesting.collectAllGarbage();
Expect.equals(crashy(init, oldies), 190.0);
for (var o in oldies) {
Expect.isTrue(o.f is! String);
}
}

View file

@ -0,0 +1,87 @@
// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// @dart = 2.9
// This test attempts to verify that write barrier slow path does not
// clobber any live values.
import 'dart:_internal' show VMInternalsForTesting;
import 'dart:typed_data';
import 'package:expect/expect.dart';
class Old {
var f;
Old(this.f);
}
@pragma('vm:never-inline')
Float64x2 crashy(Float64x2 v, List<Old> oldies) {
// This test attempts to create a lot of live values which would live across
// write barrier invocation so that when write-barrier calls runtime and
// clobbers a register this is detected.
var young = Object();
var len = oldies.length;
var i = 0;
var v00 = v + Float64x2(0.0, 0.0);
var v01 = v + Float64x2(1.0, 1.0);
var v02 = v + Float64x2(2.0, 2.0);
var v03 = v + Float64x2(3.0, 3.0);
var v04 = v + Float64x2(4.0, 4.0);
var v05 = v + Float64x2(5.0, 5.0);
var v06 = v + Float64x2(6.0, 6.0);
var v07 = v + Float64x2(7.0, 7.0);
var v08 = v + Float64x2(8.0, 8.0);
var v09 = v + Float64x2(9.0, 9.0);
var v10 = v + Float64x2(10.0, 10.0);
var v11 = v + Float64x2(11.0, 11.0);
var v12 = v + Float64x2(12.0, 12.0);
var v13 = v + Float64x2(13.0, 13.0);
var v14 = v + Float64x2(14.0, 14.0);
var v15 = v + Float64x2(15.0, 15.0);
var v16 = v + Float64x2(16.0, 16.0);
var v17 = v + Float64x2(17.0, 17.0);
var v18 = v + Float64x2(18.0, 18.0);
var v19 = v + Float64x2(19.0, 19.0);
while (i < len) {
// Eventually this will overflow store buffer and call runtime to acquire
// a new block.
oldies[i++].f = young;
}
return v00 +
v01 +
v02 +
v03 +
v04 +
v05 +
v06 +
v07 +
v08 +
v09 +
v10 +
v11 +
v12 +
v13 +
v14 +
v15 +
v16 +
v17 +
v18 +
v19;
}
void main(List<String> args) {
final init =
args.contains('impossible') ? Float64x2(1.0, 1.0) : Float64x2(0.0, 0.0);
final oldies = List<Old>.generate(100000, (i) => Old(""));
VMInternalsForTesting.collectAllGarbage();
VMInternalsForTesting.collectAllGarbage();
var r = crashy(init, oldies);
Expect.equals(r.x, 190.0);
Expect.equals(r.y, 190.0);
for (var o in oldies) {
Expect.isTrue(o.f is! String);
}
}

View file

@ -12,6 +12,7 @@
#include "vm/compiler/backend/locations.h"
#include "vm/cpu.h"
#include "vm/instructions.h"
#include "vm/tags.h"
// An extra check since we are assuming the existence of /proc/cpuinfo below.
#if !defined(USING_SIMULATOR) && !defined(__linux__) && !defined(ANDROID) && \
@ -2727,11 +2728,6 @@ void Assembler::BranchLinkPatchable(const Code& target,
BranchLink(target, ObjectPoolBuilderEntry::kPatchable, entry_kind);
}
void Assembler::BranchLinkToRuntime() {
ldr(IP, Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
blx(IP);
}
void Assembler::BranchLinkWithEquivalence(const Code& target,
const Object& equivalence,
CodeEntryKind entry_kind) {
@ -3269,63 +3265,104 @@ void Assembler::EmitEntryFrameVerification(Register scratch) {
#endif
}
void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) {
Comment("EnterCallRuntimeFrame");
// Preserve volatile CPU registers and PP.
SPILLS_LR_TO_FRAME(
EnterFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP) | (1 << LR), 0));
COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
// Preserve all volatile FPU registers.
DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg);
DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg);
if ((lastv - firstv + 1) >= 16) {
DRegister mid = static_cast<DRegister>(firstv + 16);
vstmd(DB_W, SP, mid, lastv - mid + 1);
vstmd(DB_W, SP, firstv, 16);
} else {
vstmd(DB_W, SP, firstv, lastv - firstv + 1);
}
ReserveAlignedFrameSpace(frame_space);
}
void Assembler::LeaveCallRuntimeFrame() {
// SP might have been modified to reserve space for arguments
// and ensure proper alignment of the stack frame.
// We need to restore it before restoring registers.
const intptr_t kPushedFpuRegisterSize =
kDartVolatileFpuRegCount * kFpuRegisterSize;
COMPILE_ASSERT(PP < FP);
COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
// kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile,
// it is pushed ahead of FP.
const intptr_t kPushedRegistersSize =
kDartVolatileCpuRegCount * target::kWordSize + kPushedFpuRegisterSize;
AddImmediate(SP, FP, -kPushedRegistersSize);
// Restore all volatile FPU registers.
DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg);
DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg);
if ((lastv - firstv + 1) >= 16) {
DRegister mid = static_cast<DRegister>(firstv + 16);
vldmd(IA_W, SP, firstv, 16);
vldmd(IA_W, SP, mid, lastv - mid + 1);
} else {
vldmd(IA_W, SP, firstv, lastv - firstv + 1);
}
// Restore volatile CPU registers.
RESTORES_LR_FROM_FRAME(
LeaveFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP) | (1 << LR)));
}
void Assembler::CallRuntime(const RuntimeEntry& entry,
intptr_t argument_count) {
entry.Call(this, argument_count);
ASSERT(!entry.is_leaf());
// Argument count is not checked here, but in the runtime entry for a more
// informative error message.
LoadFromOffset(R9, THR, entry.OffsetFromThread());
LoadImmediate(R4, argument_count);
ldr(IP, Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
blx(IP);
}
// For use by LR related macros (e.g. CLOBBERS_LR).
#undef __
#define __ assembler_->
LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
intptr_t frame_size,
bool preserve_registers)
: assembler_(assembler), preserve_registers_(preserve_registers) {
__ Comment("EnterCallRuntimeFrame");
if (preserve_registers) {
// Preserve volatile CPU registers and PP.
SPILLS_LR_TO_FRAME(__ EnterFrame(
kDartVolatileCpuRegs | (1 << PP) | (1 << FP) | (1 << LR), 0));
COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
// Preserve all volatile FPU registers.
DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg);
DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg);
if ((lastv - firstv + 1) >= 16) {
DRegister mid = static_cast<DRegister>(firstv + 16);
__ vstmd(DB_W, SP, mid, lastv - mid + 1);
__ vstmd(DB_W, SP, firstv, 16);
} else {
__ vstmd(DB_W, SP, firstv, lastv - firstv + 1);
}
} else {
SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
// These registers must always be preserved.
COMPILE_ASSERT(IsCalleeSavedRegister(THR));
COMPILE_ASSERT(IsCalleeSavedRegister(PP));
COMPILE_ASSERT(IsCalleeSavedRegister(CODE_REG));
}
__ ReserveAlignedFrameSpace(frame_size);
}
void LeafRuntimeScope::Call(const RuntimeEntry& entry,
intptr_t argument_count) {
ASSERT(argument_count == entry.argument_count());
__ LoadFromOffset(TMP, THR, entry.OffsetFromThread());
__ str(TMP,
compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
__ blx(TMP);
__ LoadImmediate(TMP, VMTag::kDartTagId);
__ str(TMP,
compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
}
LeafRuntimeScope::~LeafRuntimeScope() {
if (preserve_registers_) {
// SP might have been modified to reserve space for arguments
// and ensure proper alignment of the stack frame.
// We need to restore it before restoring registers.
const intptr_t kPushedFpuRegisterSize =
kDartVolatileFpuRegCount * kFpuRegisterSize;
COMPILE_ASSERT(PP < FP);
COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
// kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile,
// it is pushed ahead of FP.
const intptr_t kPushedRegistersSize =
kDartVolatileCpuRegCount * target::kWordSize + kPushedFpuRegisterSize;
__ AddImmediate(SP, FP, -kPushedRegistersSize);
// Restore all volatile FPU registers.
DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg);
DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg);
if ((lastv - firstv + 1) >= 16) {
DRegister mid = static_cast<DRegister>(firstv + 16);
__ vldmd(IA_W, SP, firstv, 16);
__ vldmd(IA_W, SP, mid, lastv - mid + 1);
} else {
__ vldmd(IA_W, SP, firstv, lastv - firstv + 1);
}
// Restore volatile CPU registers.
RESTORES_LR_FROM_FRAME(__ LeaveFrame(kDartVolatileCpuRegs | (1 << PP) |
(1 << FP) | (1 << LR)));
} else {
RESTORES_LR_FROM_FRAME(__ LeaveFrame((1 << FP) | (1 << LR)));
}
}
// For use by LR related macros (e.g. CLOBBERS_LR).
#undef __
#define __ this->
void Assembler::EnterDartFrame(intptr_t frame_size, bool load_pool_pointer) {
ASSERT(!constant_pool_allowed());

View file

@ -776,7 +776,6 @@ class Assembler : public AssemblerBase {
ObjectPoolBuilderEntry::Patchability patchable =
ObjectPoolBuilderEntry::kNotPatchable,
CodeEntryKind entry_kind = CodeEntryKind::kNormal);
void BranchLinkToRuntime();
// Branch and link to an entry address. Call sequence can be patched.
void BranchLinkPatchable(const Code& code,
@ -1259,12 +1258,7 @@ class Assembler : public AssemblerBase {
// Requires a scratch register in addition to the assembler temporary.
void EmitEntryFrameVerification(Register scratch);
// Create a frame for calling into runtime that preserves all volatile
// registers. Frame's SP is guaranteed to be correctly aligned and
// frame_space bytes are reserved under it.
void EnterCallRuntimeFrame(intptr_t frame_space);
void LeaveCallRuntimeFrame();
// For non-leaf runtime calls. For leaf runtime calls, use LeafRuntimeScope,
void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
// Set up a Dart frame on entry with a frame pointer and PC information to

View file

@ -12,6 +12,7 @@
#include "vm/cpu.h"
#include "vm/instructions.h"
#include "vm/simulator.h"
#include "vm/tags.h"
namespace dart {
@ -255,17 +256,15 @@ void Assembler::Bind(Label* label) {
#if defined(USING_THREAD_SANITIZER)
void Assembler::TsanLoadAcquire(Register addr) {
EnterCallRuntimeFrame(/*frame_size=*/0, /*is_leaf=*/true);
ASSERT(kTsanLoadAcquireRuntimeEntry.is_leaf());
CallRuntime(kTsanLoadAcquireRuntimeEntry, /*argument_count=*/1);
LeaveCallRuntimeFrame(/*is_leaf=*/true);
LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
MoveRegister(R0, addr);
rt.Call(kTsanLoadAcquireRuntimeEntry, /*argument_count=*/1);
}
void Assembler::TsanStoreRelease(Register addr) {
EnterCallRuntimeFrame(/*frame_size=*/0, /*is_leaf=*/true);
ASSERT(kTsanStoreReleaseRuntimeEntry.is_leaf());
CallRuntime(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
LeaveCallRuntimeFrame(/*is_leaf=*/true);
LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
MoveRegister(R0, addr);
rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
}
#endif
@ -706,10 +705,6 @@ void Assembler::BranchLink(const Code& target,
Call(FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
}
void Assembler::BranchLinkToRuntime() {
Call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
}
void Assembler::BranchLinkWithEquivalence(const Code& target,
const Object& equivalence,
CodeEntryKind entry_kind) {
@ -1820,110 +1815,83 @@ void Assembler::TransitionNativeToGenerated(Register state,
StoreToOffset(state, THR, target::Thread::exit_through_ffi_offset());
}
void Assembler::EnterCallRuntimeFrame(intptr_t frame_size, bool is_leaf) {
Comment("EnterCallRuntimeFrame");
EnterFrame(0);
if (!FLAG_precompiled_mode) {
TagAndPushPPAndPcMarker(); // Save PP and PC marker.
}
// Store fpu registers with the lowest register number at the lowest
// address.
for (int i = kNumberOfVRegisters - 1; i >= 0; i--) {
if ((i >= kAbiFirstPreservedFpuReg) && (i <= kAbiLastPreservedFpuReg)) {
// TODO(zra): When SIMD is added, we must also preserve the top
// 64-bits of the callee-saved registers.
continue;
}
// TODO(zra): Save the whole V register.
VRegister reg = static_cast<VRegister>(i);
PushDouble(reg);
}
for (int i = kDartFirstVolatileCpuReg; i <= kDartLastVolatileCpuReg; i++) {
const Register reg = static_cast<Register>(i);
Push(reg);
}
if (!is_leaf) { // Leaf calling sequence aligns the stack itself.
ReserveAlignedFrameSpace(frame_size);
} else {
PushPair(kCallLeafRuntimeCalleeSaveScratch1,
kCallLeafRuntimeCalleeSaveScratch2);
}
}
void Assembler::LeaveCallRuntimeFrame(bool is_leaf) {
// SP might have been modified to reserve space for arguments
// and ensure proper alignment of the stack frame.
// We need to restore it before restoring registers.
const intptr_t fixed_frame_words_without_pc_and_fp =
target::frame_layout.dart_fixed_frame_size - 2;
const intptr_t kPushedRegistersSize =
kDartVolatileFpuRegCount * sizeof(double) +
(kDartVolatileCpuRegCount + (is_leaf ? 2 : 0) +
fixed_frame_words_without_pc_and_fp) *
target::kWordSize;
AddImmediate(SP, FP, -kPushedRegistersSize);
if (is_leaf) {
PopPair(kCallLeafRuntimeCalleeSaveScratch1,
kCallLeafRuntimeCalleeSaveScratch2);
}
for (int i = kDartLastVolatileCpuReg; i >= kDartFirstVolatileCpuReg; i--) {
const Register reg = static_cast<Register>(i);
Pop(reg);
}
for (int i = 0; i < kNumberOfVRegisters; i++) {
if ((i >= kAbiFirstPreservedFpuReg) && (i <= kAbiLastPreservedFpuReg)) {
// TODO(zra): When SIMD is added, we must also restore the top
// 64-bits of the callee-saved registers.
continue;
}
// TODO(zra): Restore the whole V register.
VRegister reg = static_cast<VRegister>(i);
PopDouble(reg);
}
LeaveStubFrame();
}
void Assembler::CallRuntime(const RuntimeEntry& entry,
intptr_t argument_count) {
entry.Call(this, argument_count);
ASSERT(!entry.is_leaf());
// Argument count is not checked here, but in the runtime entry for a more
// informative error message.
ldr(R5, compiler::Address(THR, entry.OffsetFromThread()));
LoadImmediate(R4, argument_count);
Call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
}
void Assembler::CallRuntimeScope::Call(intptr_t argument_count) {
assembler_->CallRuntime(entry_, argument_count);
}
// FPU: Only the bottom 64-bits of v8-v15 are preserved by the caller. The upper
// bits might be in use by Dart, so we save the whole register.
static const RegisterSet kRuntimeCallSavedRegisters(kDartVolatileCpuRegs,
kAllFpuRegistersList);
Assembler::CallRuntimeScope::~CallRuntimeScope() {
if (preserve_registers_) {
assembler_->LeaveCallRuntimeFrame(entry_.is_leaf());
if (restore_code_reg_) {
assembler_->Pop(CODE_REG);
}
#undef __
#define __ assembler_->
LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
intptr_t frame_size,
bool preserve_registers)
: assembler_(assembler), preserve_registers_(preserve_registers) {
__ Comment("EnterCallRuntimeFrame");
__ EnterFrame(0);
if (preserve_registers) {
__ PushRegisters(kRuntimeCallSavedRegisters);
} else {
// These registers must always be preserved.
COMPILE_ASSERT(IsCalleeSavedRegister(THR));
COMPILE_ASSERT(IsCalleeSavedRegister(PP));
COMPILE_ASSERT(IsCalleeSavedRegister(CODE_REG));
COMPILE_ASSERT(IsCalleeSavedRegister(NULL_REG));
COMPILE_ASSERT(IsCalleeSavedRegister(HEAP_BITS));
COMPILE_ASSERT(IsCalleeSavedRegister(DISPATCH_TABLE_REG));
}
__ ReserveAlignedFrameSpace(frame_size);
}
Assembler::CallRuntimeScope::CallRuntimeScope(Assembler* assembler,
const RuntimeEntry& entry,
intptr_t frame_size,
bool preserve_registers,
const Address* caller)
: assembler_(assembler),
entry_(entry),
preserve_registers_(preserve_registers),
restore_code_reg_(caller != nullptr) {
if (preserve_registers_) {
if (caller != nullptr) {
assembler_->Push(CODE_REG);
assembler_->ldr(CODE_REG, *caller);
}
assembler_->EnterCallRuntimeFrame(frame_size, entry.is_leaf());
}
void LeafRuntimeScope::Call(const RuntimeEntry& entry,
intptr_t argument_count) {
ASSERT(argument_count == entry.argument_count());
// Since we are entering C++ code, we must restore the C stack pointer from
// the stack limit to an aligned value nearer to the top of the stack.
// We cache the stack limit in callee-saved registers, then align and call,
// restoring CSP and SP on return from the call.
// This sequence may occur in an intrinsic, so don't use registers an
// intrinsic must preserve.
__ mov(CSP, SP);
__ ldr(TMP, compiler::Address(THR, entry.OffsetFromThread()));
__ str(TMP, compiler::Address(THR, target::Thread::vm_tag_offset()));
__ blr(TMP);
__ LoadImmediate(TMP, VMTag::kDartTagId);
__ str(TMP, compiler::Address(THR, target::Thread::vm_tag_offset()));
__ SetupCSPFromThread(THR);
}
LeafRuntimeScope::~LeafRuntimeScope() {
if (preserve_registers_) {
// SP might have been modified to reserve space for arguments
// and ensure proper alignment of the stack frame.
// We need to restore it before restoring registers.
const intptr_t kPushedRegistersSize =
kRuntimeCallSavedRegisters.CpuRegisterCount() * target::kWordSize +
kRuntimeCallSavedRegisters.FpuRegisterCount() * kFpuRegisterSize;
__ AddImmediate(SP, FP, -kPushedRegistersSize);
__ PopRegisters(kRuntimeCallSavedRegisters);
}
__ LeaveFrame();
}
// For use by LR related macros (e.g. CLOBBERS_LR).
#undef __
#define __ this->
void Assembler::EnterStubFrame() {
EnterDartFrame(0);
}

View file

@ -1752,7 +1752,6 @@ class Assembler : public AssemblerBase {
CodeEntryKind entry_kind = CodeEntryKind::kNormal) {
BranchLink(code, ObjectPoolBuilderEntry::kPatchable, entry_kind);
}
void BranchLinkToRuntime();
// Emit a call that shares its object pool entries with other calls
// that have the same equivalence marker.
@ -2101,51 +2100,9 @@ class Assembler : public AssemblerBase {
void EnterOsrFrame(intptr_t extra_size, Register new_pp = kNoRegister);
void LeaveDartFrame(RestorePP restore_pp = kRestoreCallerPP);
// For non-leaf runtime calls. For leaf runtime calls, use LeafRuntimeScope,
void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
// Helper method for performing runtime calls from callers requiring manual
// register preservation is required (e.g. outside IL instructions marked
// as calling).
class CallRuntimeScope : public ValueObject {
public:
CallRuntimeScope(Assembler* assembler,
const RuntimeEntry& entry,
intptr_t frame_size,
bool preserve_registers = true)
: CallRuntimeScope(assembler,
entry,
frame_size,
preserve_registers,
/*caller=*/nullptr) {}
CallRuntimeScope(Assembler* assembler,
const RuntimeEntry& entry,
intptr_t frame_size,
Address caller,
bool preserve_registers = true)
: CallRuntimeScope(assembler,
entry,
frame_size,
preserve_registers,
&caller) {}
void Call(intptr_t argument_count);
~CallRuntimeScope();
private:
CallRuntimeScope(Assembler* assembler,
const RuntimeEntry& entry,
intptr_t frame_size,
bool preserve_registers,
const Address* caller);
Assembler* const assembler_;
const RuntimeEntry& entry_;
const bool preserve_registers_;
const bool restore_code_reg_;
};
// Set up a stub frame so that the stack traversal code can easily identify
// a stub frame.
void EnterStubFrame();
@ -2956,11 +2913,6 @@ class Assembler : public AssemblerBase {
CanBeSmi can_be_smi,
BarrierFilterMode barrier_filter_mode);
// Note: leaf call sequence uses some abi callee save registers as scratch
// so they should be manually preserved.
void EnterCallRuntimeFrame(intptr_t frame_size, bool is_leaf);
void LeaveCallRuntimeFrame(bool is_leaf);
friend class dart::FlowGraphCompiler;
std::function<void(Register reg)> generate_invoke_write_barrier_wrapper_;
std::function<void()> generate_invoke_array_write_barrier_;

View file

@ -866,6 +866,35 @@ class AssemblerBase : public StackResource {
ObjectPoolBuilder* object_pool_builder_;
};
// For leaf runtime calls. For non-leaf runtime calls, use
// Assembler::CallRuntime.
class LeafRuntimeScope : public ValueObject {
public:
// Enters a frame, saves registers, and aligns the stack according to the C
// ABI.
//
// If [preserve_registers] is false, only registers normally preserved at a
// Dart call will be preserved (SP, FP, THR, PP, CODE_REG, RA). Suitable for
// use in IL instructions marked with LocationSummary::kCall.
// If [preserve registers] is true, all registers allocatable by Dart (roughly
// everything but TMP, TMP2) will be preserved. Suitable for non-call IL
// instructions like the write barrier.
LeafRuntimeScope(Assembler* assembler,
intptr_t frame_size,
bool preserve_registers);
// Restores registers and leaves the frame.
~LeafRuntimeScope();
// Sets the current tag, calls the runtime function, and restores the current
// tag.
void Call(const RuntimeEntry& entry, intptr_t argument_count);
private:
Assembler* const assembler_;
const bool preserve_registers_;
};
} // namespace compiler
} // namespace dart

View file

@ -11,6 +11,7 @@
#include "vm/compiler/assembler/assembler.h"
#include "vm/cpu.h"
#include "vm/instructions.h"
#include "vm/tags.h"
namespace dart {
@ -2446,59 +2447,86 @@ static const Register volatile_cpu_registers[kNumberOfVolatileCpuRegisters] = {
// save it.
static const intptr_t kNumberOfVolatileXmmRegisters = kNumberOfXmmRegisters - 1;
void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) {
Comment("EnterCallRuntimeFrame");
EnterFrame(0);
// Preserve volatile CPU registers.
for (intptr_t i = 0; i < kNumberOfVolatileCpuRegisters; i++) {
pushl(volatile_cpu_registers[i]);
}
// Preserve all XMM registers except XMM0
subl(ESP, Immediate((kNumberOfXmmRegisters - 1) * kFpuRegisterSize));
// Store XMM registers with the lowest register number at the lowest
// address.
intptr_t offset = 0;
for (intptr_t reg_idx = 1; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
movups(Address(ESP, offset), xmm_reg);
offset += kFpuRegisterSize;
}
ReserveAlignedFrameSpace(frame_space);
}
void Assembler::LeaveCallRuntimeFrame() {
// ESP might have been modified to reserve space for arguments
// and ensure proper alignment of the stack frame.
// We need to restore it before restoring registers.
const intptr_t kPushedRegistersSize =
kNumberOfVolatileCpuRegisters * target::kWordSize +
kNumberOfVolatileXmmRegisters * kFpuRegisterSize;
leal(ESP, Address(EBP, -kPushedRegistersSize));
// Restore all XMM registers except XMM0
// XMM registers have the lowest register number at the lowest address.
intptr_t offset = 0;
for (intptr_t reg_idx = 1; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
movups(xmm_reg, Address(ESP, offset));
offset += kFpuRegisterSize;
}
addl(ESP, Immediate(offset));
// Restore volatile CPU registers.
for (intptr_t i = kNumberOfVolatileCpuRegisters - 1; i >= 0; i--) {
popl(volatile_cpu_registers[i]);
}
leave();
}
void Assembler::CallRuntime(const RuntimeEntry& entry,
intptr_t argument_count) {
entry.Call(this, argument_count);
ASSERT(!entry.is_leaf());
// Argument count is not checked here, but in the runtime entry for a more
// informative error message.
movl(ECX, compiler::Address(THR, entry.OffsetFromThread()));
movl(EDX, compiler::Immediate(argument_count));
call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
}
#define __ assembler_->
LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
intptr_t frame_size,
bool preserve_registers)
: assembler_(assembler), preserve_registers_(preserve_registers) {
__ Comment("EnterCallRuntimeFrame");
__ EnterFrame(0);
if (preserve_registers_) {
// Preserve volatile CPU registers.
for (intptr_t i = 0; i < kNumberOfVolatileCpuRegisters; i++) {
__ pushl(volatile_cpu_registers[i]);
}
// Preserve all XMM registers except XMM0
__ subl(ESP, Immediate((kNumberOfXmmRegisters - 1) * kFpuRegisterSize));
// Store XMM registers with the lowest register number at the lowest
// address.
intptr_t offset = 0;
for (intptr_t reg_idx = 1; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
__ movups(Address(ESP, offset), xmm_reg);
offset += kFpuRegisterSize;
}
} else {
// These registers must always be preserved.
COMPILE_ASSERT(IsCalleeSavedRegister(THR));
}
__ ReserveAlignedFrameSpace(frame_size);
}
void LeafRuntimeScope::Call(const RuntimeEntry& entry,
intptr_t argument_count) {
ASSERT(argument_count == entry.argument_count());
__ movl(EAX, compiler::Address(THR, entry.OffsetFromThread()));
__ movl(compiler::Assembler::VMTagAddress(), EAX);
__ call(EAX);
__ movl(compiler::Assembler::VMTagAddress(),
compiler::Immediate(VMTag::kDartTagId));
}
LeafRuntimeScope::~LeafRuntimeScope() {
if (preserve_registers_) {
// ESP might have been modified to reserve space for arguments
// and ensure proper alignment of the stack frame.
// We need to restore it before restoring registers.
const intptr_t kPushedRegistersSize =
kNumberOfVolatileCpuRegisters * target::kWordSize +
kNumberOfVolatileXmmRegisters * kFpuRegisterSize;
__ leal(ESP, Address(EBP, -kPushedRegistersSize));
// Restore all XMM registers except XMM0
// XMM registers have the lowest register number at the lowest address.
intptr_t offset = 0;
for (intptr_t reg_idx = 1; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
__ movups(xmm_reg, Address(ESP, offset));
offset += kFpuRegisterSize;
}
__ addl(ESP, Immediate(offset));
// Restore volatile CPU registers.
for (intptr_t i = kNumberOfVolatileCpuRegisters - 1; i >= 0; i--) {
__ popl(volatile_cpu_registers[i]);
}
}
__ leave();
}
void Assembler::Call(const Code& target,
@ -2517,10 +2545,6 @@ void Assembler::CallVmStub(const Code& target) {
kHeapObjectTag));
}
void Assembler::CallToRuntime() {
call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
}
void Assembler::Jmp(const Code& target) {
const ExternalLabel label(target::Code::EntryPointOf(target));
jmp(&label);

View file

@ -825,18 +825,12 @@ class Assembler : public AssemblerBase {
void EnterFullSafepoint(Register scratch);
void ExitFullSafepoint(Register scratch, bool ignore_unwind_in_progress);
// Create a frame for calling into runtime that preserves all volatile
// registers. Frame's RSP is guaranteed to be correctly aligned and
// frame_space bytes are reserved under it.
void EnterCallRuntimeFrame(intptr_t frame_space);
void LeaveCallRuntimeFrame();
// For non-leaf runtime calls. For leaf runtime calls, use LeafRuntimeScope,
void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
void Call(const Code& code,
bool movable_target = false,
CodeEntryKind entry_kind = CodeEntryKind::kNormal);
void CallToRuntime();
// Will not clobber any registers and can therefore be called with 5 live
// registers.
void CallVmStub(const Code& code);

View file

@ -12,6 +12,7 @@
#include "vm/cpu.h"
#include "vm/instructions.h"
#include "vm/simulator.h"
#include "vm/tags.h"
namespace dart {
@ -2323,10 +2324,14 @@ void Assembler::LoadField(Register dst, const FieldAddress& address) {
#if defined(USING_THREAD_SANITIZER)
void Assembler::TsanLoadAcquire(Register addr) {
UNIMPLEMENTED();
LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
MoveRegister(A0, addr);
rt.Call(kTsanLoadAcquireRuntimeEntry, /*argument_count=*/1);
}
void Assembler::TsanStoreRelease(Register addr) {
UNIMPLEMENTED();
LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
MoveRegister(A0, addr);
rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
}
#endif
@ -2674,10 +2679,6 @@ void Assembler::JumpAndLink(const Code& target,
Call(FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
}
void Assembler::JumpAndLinkToRuntime() {
Call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
}
void Assembler::JumpAndLinkWithEquivalence(const Code& target,
const Object& equivalence,
CodeEntryKind entry_kind) {
@ -3721,9 +3722,80 @@ void Assembler::LeaveDartFrame(RestorePP restore_pp) {
void Assembler::CallRuntime(const RuntimeEntry& entry,
intptr_t argument_count) {
entry.Call(this, argument_count);
ASSERT(!entry.is_leaf());
// Argument count is not checked here, but in the runtime entry for a more
// informative error message.
lx(T5, compiler::Address(THR, entry.OffsetFromThread()));
li(T4, argument_count);
Call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
}
static const RegisterSet kRuntimeCallSavedRegisters(kDartVolatileCpuRegs,
kAbiVolatileFpuRegs);
#define __ assembler_->
LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
intptr_t frame_size,
bool preserve_registers)
: assembler_(assembler), preserve_registers_(preserve_registers) {
// N.B. The ordering here is important. We must never write beyond SP or
// it can be clobbered by a signal handler.
__ subi(SP, SP, 4 * target::kWordSize);
__ sx(RA, Address(SP, 3 * target::kWordSize));
__ sx(FP, Address(SP, 2 * target::kWordSize));
__ sx(CODE_REG, Address(SP, 1 * target::kWordSize));
__ sx(PP, Address(SP, 0 * target::kWordSize));
__ addi(FP, SP, 4 * target::kWordSize);
if (preserve_registers) {
__ PushRegisters(kRuntimeCallSavedRegisters);
} else {
// Or no reason to save above.
COMPILE_ASSERT(!IsAbiPreservedRegister(CODE_REG));
COMPILE_ASSERT(!IsAbiPreservedRegister(PP));
// Or would need to save above.
COMPILE_ASSERT(IsCalleeSavedRegister(THR));
COMPILE_ASSERT(IsCalleeSavedRegister(NULL_REG));
COMPILE_ASSERT(IsCalleeSavedRegister(WRITE_BARRIER_MASK));
COMPILE_ASSERT(IsCalleeSavedRegister(DISPATCH_TABLE_REG));
}
__ ReserveAlignedFrameSpace(frame_size);
}
void LeafRuntimeScope::Call(const RuntimeEntry& entry,
intptr_t argument_count) {
ASSERT(argument_count == entry.argument_count());
__ lx(TMP2, compiler::Address(THR, entry.OffsetFromThread()));
__ sx(TMP2, compiler::Address(THR, target::Thread::vm_tag_offset()));
__ jalr(TMP2);
__ LoadImmediate(TMP2, VMTag::kDartTagId);
__ sx(TMP2, compiler::Address(THR, target::Thread::vm_tag_offset()));
}
LeafRuntimeScope::~LeafRuntimeScope() {
if (preserve_registers_) {
const intptr_t kSavedRegistersSize =
kRuntimeCallSavedRegisters.CpuRegisterCount() * target::kWordSize +
kRuntimeCallSavedRegisters.FpuRegisterCount() * kFpuRegisterSize +
4 * target::kWordSize;
__ subi(SP, FP, kSavedRegistersSize);
__ PopRegisters(kRuntimeCallSavedRegisters);
}
__ subi(SP, FP, 4 * target::kWordSize);
__ lx(PP, Address(SP, 0 * target::kWordSize));
__ lx(CODE_REG, Address(SP, 1 * target::kWordSize));
__ lx(FP, Address(SP, 2 * target::kWordSize));
__ lx(RA, Address(SP, 3 * target::kWordSize));
__ addi(SP, SP, 4 * target::kWordSize);
}
#undef __
void Assembler::EnterCFrame(intptr_t frame_space) {
// N.B. The ordering here is important. We must never read beyond SP or
// it may have already been clobbered by a signal handler.
@ -4166,81 +4238,6 @@ void Assembler::LoadObjectHelper(Register dst,
LoadImmediate(dst, target::ToRawSmi(object));
}
static const RegisterSet kRuntimeCallSavedRegisters(
kAbiVolatileCpuRegs | (1 << CALLEE_SAVED_TEMP) | (1 << CALLEE_SAVED_TEMP2),
kAbiVolatileFpuRegs);
// Note: leaf call sequence uses some abi callee save registers as scratch
// so they should be manually preserved.
void Assembler::EnterCallRuntimeFrame(intptr_t frame_size, bool is_leaf) {
// N.B. The ordering here is important. We must never write beyond SP or
// it can be clobbered by a signal handler.
if (FLAG_precompiled_mode) {
subi(SP, SP, 2 * target::kWordSize + frame_size);
sx(RA, Address(SP, 1 * target::kWordSize + frame_size));
sx(FP, Address(SP, 0 * target::kWordSize + frame_size));
addi(FP, SP, 2 * target::kWordSize + frame_size);
} else {
subi(SP, SP, 4 * target::kWordSize + frame_size);
sx(RA, Address(SP, 3 * target::kWordSize + frame_size));
sx(FP, Address(SP, 2 * target::kWordSize + frame_size));
sx(CODE_REG, Address(SP, 1 * target::kWordSize + frame_size));
addi(PP, PP, kHeapObjectTag);
sx(PP, Address(SP, 0 * target::kWordSize + frame_size));
addi(FP, SP, 4 * target::kWordSize + frame_size);
}
PushRegisters(kRuntimeCallSavedRegisters);
if (!is_leaf) { // Leaf calling sequence aligns the stack itself.
ReserveAlignedFrameSpace(0);
}
}
void Assembler::LeaveCallRuntimeFrame(bool is_leaf) {
const intptr_t kPushedRegistersSize =
kRuntimeCallSavedRegisters.CpuRegisterCount() * target::kWordSize +
kRuntimeCallSavedRegisters.FpuRegisterCount() * kFpuRegisterSize +
(target::frame_layout.dart_fixed_frame_size * target::kWordSize);
subi(SP, FP, kPushedRegistersSize);
PopRegisters(kRuntimeCallSavedRegisters);
LeaveStubFrame();
}
void Assembler::CallRuntimeScope::Call(intptr_t argument_count) {
assembler_->CallRuntime(entry_, argument_count);
}
Assembler::CallRuntimeScope::~CallRuntimeScope() {
if (preserve_registers_) {
assembler_->LeaveCallRuntimeFrame(entry_.is_leaf());
if (restore_code_reg_) {
assembler_->PopRegister(CODE_REG);
}
}
}
Assembler::CallRuntimeScope::CallRuntimeScope(Assembler* assembler,
const RuntimeEntry& entry,
intptr_t frame_size,
bool preserve_registers,
const Address* caller)
: assembler_(assembler),
entry_(entry),
preserve_registers_(preserve_registers),
restore_code_reg_(caller != nullptr) {
if (preserve_registers_) {
if (caller != nullptr) {
assembler_->PushRegister(CODE_REG);
assembler_->lx(CODE_REG, *caller);
}
assembler_->EnterCallRuntimeFrame(frame_size, entry.is_leaf());
}
}
void Assembler::AddImmediateBranchOverflow(Register rd,
Register rs1,
intx_t imm,

View file

@ -916,7 +916,6 @@ class Assembler : public MicroAssembler {
CodeEntryKind entry_kind = CodeEntryKind::kNormal) {
JumpAndLink(code, ObjectPoolBuilderEntry::kPatchable, entry_kind);
}
void JumpAndLinkToRuntime();
// Emit a call that shares its object pool entries with other calls
// that have the same equivalence marker.
@ -1218,51 +1217,9 @@ class Assembler : public MicroAssembler {
void EnterOsrFrame(intptr_t extra_size, Register new_pp = kNoRegister);
void LeaveDartFrame(RestorePP restore_pp = kRestoreCallerPP);
// For non-leaf runtime calls. For leaf runtime calls, use LeafRuntimeScope,
void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
// Helper method for performing runtime calls from callers requiring manual
// register preservation is required (e.g. outside IL instructions marked
// as calling).
class CallRuntimeScope : public ValueObject {
public:
CallRuntimeScope(Assembler* assembler,
const RuntimeEntry& entry,
intptr_t frame_size,
bool preserve_registers = true)
: CallRuntimeScope(assembler,
entry,
frame_size,
preserve_registers,
/*caller=*/nullptr) {}
CallRuntimeScope(Assembler* assembler,
const RuntimeEntry& entry,
intptr_t frame_size,
Address caller,
bool preserve_registers = true)
: CallRuntimeScope(assembler,
entry,
frame_size,
preserve_registers,
&caller) {}
void Call(intptr_t argument_count);
~CallRuntimeScope();
private:
CallRuntimeScope(Assembler* assembler,
const RuntimeEntry& entry,
intptr_t frame_size,
bool preserve_registers,
const Address* caller);
Assembler* const assembler_;
const RuntimeEntry& entry_;
const bool preserve_registers_;
const bool restore_code_reg_;
};
// Set up a stub frame so that the stack traversal code can easily identify
// a stub frame.
void EnterStubFrame() { EnterDartFrame(0); }
@ -1438,11 +1395,6 @@ class Assembler : public MicroAssembler {
CanBeSmi can_be_smi,
BarrierFilterMode barrier_filter_mode);
// Note: leaf call sequence uses some abi callee save registers as scratch
// so they should be manually preserved.
void EnterCallRuntimeFrame(intptr_t frame_size, bool is_leaf);
void LeaveCallRuntimeFrame(bool is_leaf);
friend class dart::FlowGraphCompiler;
std::function<void(Register reg)> generate_invoke_write_barrier_wrapper_;
std::function<void()> generate_invoke_array_write_barrier_;

View file

@ -11,6 +11,7 @@
#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/backend/locations.h"
#include "vm/instructions.h"
#include "vm/tags.h"
namespace dart {
@ -87,10 +88,6 @@ void Assembler::Call(const Code& target) {
call(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
}
void Assembler::CallToRuntime() {
call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
}
void Assembler::pushq(Register reg) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitRegisterREX(reg, REX_NONE);
@ -1882,42 +1879,6 @@ static const RegisterSet kVolatileRegisterSet(
CallingConventions::kVolatileCpuRegisters,
CallingConventions::kVolatileXmmRegisters);
void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) {
Comment("EnterCallRuntimeFrame");
EnterFrame(0);
if (!FLAG_precompiled_mode) {
pushq(CODE_REG);
pushq(PP);
}
// TODO(vegorov): avoid saving FpuTMP, it is used only as scratch.
PushRegisters(kVolatileRegisterSet);
ReserveAlignedFrameSpace(frame_space);
}
void Assembler::LeaveCallRuntimeFrame() {
// RSP might have been modified to reserve space for arguments
// and ensure proper alignment of the stack frame.
// We need to restore it before restoring registers.
const intptr_t kPushedCpuRegistersCount =
RegisterSet::RegisterCount(CallingConventions::kVolatileCpuRegisters);
const intptr_t kPushedXmmRegistersCount =
RegisterSet::RegisterCount(CallingConventions::kVolatileXmmRegisters);
const intptr_t kPushedRegistersSize =
kPushedCpuRegistersCount * target::kWordSize +
kPushedXmmRegistersCount * kFpuRegisterSize +
(target::frame_layout.dart_fixed_frame_size - 2) *
target::kWordSize; // From EnterStubFrame (excluding PC / FP)
leaq(RSP, Address(RBP, -kPushedRegistersSize));
// TODO(vegorov): avoid saving FpuTMP, it is used only as scratch.
PopRegisters(kVolatileRegisterSet);
LeaveStubFrame();
}
void Assembler::CallCFunction(Register reg, bool restore_rsp) {
// Reserve shadow space for outgoing arguments.
if (CallingConventions::kShadowSpaceBytes != 0) {
@ -1943,24 +1904,86 @@ void Assembler::CallCFunction(Address address, bool restore_rsp) {
void Assembler::CallRuntime(const RuntimeEntry& entry,
intptr_t argument_count) {
entry.Call(this, argument_count);
ASSERT(!entry.is_leaf());
// Argument count is not checked here, but in the runtime entry for a more
// informative error message.
movq(RBX, compiler::Address(THR, entry.OffsetFromThread()));
LoadImmediate(R10, compiler::Immediate(argument_count));
call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
}
#define __ assembler_->
LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
intptr_t frame_size,
bool preserve_registers)
: assembler_(assembler), preserve_registers_(preserve_registers) {
__ Comment("EnterCallRuntimeFrame");
__ EnterFrame(0);
if (preserve_registers_) {
// TODO(vegorov): avoid saving FpuTMP, it is used only as scratch.
__ PushRegisters(kVolatileRegisterSet);
} else {
// These registers must always be preserved.
ASSERT(IsCalleeSavedRegister(THR));
ASSERT(IsCalleeSavedRegister(PP));
ASSERT(IsCalleeSavedRegister(CODE_REG));
}
__ ReserveAlignedFrameSpace(frame_size);
}
void LeafRuntimeScope::Call(const RuntimeEntry& entry,
intptr_t argument_count) {
ASSERT(entry.is_leaf());
ASSERT(entry.argument_count() == argument_count);
COMPILE_ASSERT(CallingConventions::kVolatileCpuRegisters & (1 << RAX));
__ movq(RAX, compiler::Address(THR, entry.OffsetFromThread()));
__ movq(compiler::Assembler::VMTagAddress(), RAX);
__ CallCFunction(RAX);
__ movq(compiler::Assembler::VMTagAddress(),
compiler::Immediate(VMTag::kDartTagId));
}
LeafRuntimeScope::~LeafRuntimeScope() {
if (preserve_registers_) {
// RSP might have been modified to reserve space for arguments
// and ensure proper alignment of the stack frame.
// We need to restore it before restoring registers.
const intptr_t kPushedCpuRegistersCount =
RegisterSet::RegisterCount(CallingConventions::kVolatileCpuRegisters);
const intptr_t kPushedXmmRegistersCount =
RegisterSet::RegisterCount(CallingConventions::kVolatileXmmRegisters);
const intptr_t kPushedRegistersSize =
kPushedCpuRegistersCount * target::kWordSize +
kPushedXmmRegistersCount * kFpuRegisterSize;
__ leaq(RSP, Address(RBP, -kPushedRegistersSize));
// TODO(vegorov): avoid saving FpuTMP, it is used only as scratch.
__ PopRegisters(kVolatileRegisterSet);
} else {
const intptr_t kPushedRegistersSize =
(target::frame_layout.dart_fixed_frame_size - 2) *
target::kWordSize; // From EnterStubFrame (excluding PC / FP)
__ leaq(RSP, Address(RBP, -kPushedRegistersSize));
}
__ LeaveFrame();
}
#if defined(USING_THREAD_SANITIZER)
void Assembler::TsanLoadAcquire(Address addr) {
PushRegisters(kVolatileRegisterSet);
LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
leaq(CallingConventions::kArg1Reg, addr);
ASSERT(kTsanLoadAcquireRuntimeEntry.is_leaf());
CallRuntime(kTsanLoadAcquireRuntimeEntry, /*argument_count=*/1);
PopRegisters(kVolatileRegisterSet);
rt.Call(kTsanLoadAcquireRuntimeEntry, /*argument_count=*/1);
}
void Assembler::TsanStoreRelease(Address addr) {
PushRegisters(kVolatileRegisterSet);
LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
leaq(CallingConventions::kArg1Reg, addr);
ASSERT(kTsanStoreReleaseRuntimeEntry.is_leaf());
CallRuntime(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
PopRegisters(kVolatileRegisterSet);
rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
}
#endif

View file

@ -772,7 +772,6 @@ class Assembler : public AssemblerBase {
void CallPatchable(const Code& code,
CodeEntryKind entry_kind = CodeEntryKind::kNormal);
void Call(const Code& stub_entry);
void CallToRuntime();
// Emit a call that shares its object pool entries with other calls
// that have the same equivalence marker.
@ -880,12 +879,7 @@ class Assembler : public AssemblerBase {
// Clobbers RAX.
void EmitEntryFrameVerification();
// Create a frame for calling into runtime that preserves all volatile
// registers. Frame's RSP is guaranteed to be correctly aligned and
// frame_space bytes are reserved under it.
void EnterCallRuntimeFrame(intptr_t frame_space);
void LeaveCallRuntimeFrame();
// For non-leaf runtime calls. For leaf runtime calls, use LeafRuntimeScope,
void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
// Call runtime function. Reserves shadow space on the stack before calling

View file

@ -5664,9 +5664,11 @@ LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
}
void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Call the function.
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), TargetFunction().argument_count());
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/0,
/*preserve_registers=*/false);
// Call the function. Parameters are already in their correct spots.
rt.Call(TargetFunction(), TargetFunction().argument_count());
}
LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
@ -6116,15 +6118,20 @@ static void InvokeDoublePow(FlowGraphCompiler* compiler,
__ vmovd(D1, D2);
if (TargetCPUFeatures::hardfp_supported()) {
ASSERT(instr->TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(instr->TargetFunction(), kInputCount);
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/0,
/*preserve_registers=*/false);
rt.Call(instr->TargetFunction(), kInputCount);
} else {
// If the ABI is not "hardfp", then we have to move the double arguments
// to the integer registers, and take the results from the integer
// registers.
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/0,
/*preserve_registers=*/false);
__ vmovrrd(R0, R1, D0);
__ vmovrrd(R2, R3, D1);
ASSERT(instr->TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(instr->TargetFunction(), kInputCount);
rt.Call(instr->TargetFunction(), kInputCount);
__ vmovdrr(D0, R0, R1);
__ vmovdrr(D1, R2, R3);
}
@ -6142,16 +6149,20 @@ void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ vmovd(D1, D2);
}
if (TargetCPUFeatures::hardfp_supported()) {
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), InputCount());
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/0,
/*preserve_registers=*/false);
rt.Call(TargetFunction(), TargetFunction().argument_count());
} else {
// If the ABI is not "hardfp", then we have to move the double arguments
// to the integer registers, and take the results from the integer
// registers.
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/0,
/*preserve_registers=*/false);
__ vmovrrd(R0, R1, D0);
__ vmovrrd(R2, R3, D1);
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), InputCount());
rt.Call(TargetFunction(), TargetFunction().argument_count());
__ vmovdrr(D0, R0, R1);
__ vmovdrr(D1, R2, R3);
}

View file

@ -4708,9 +4708,11 @@ LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
}
void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Call the function.
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), TargetFunction().argument_count());
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/0,
/*preserve_registers=*/false);
// Call the function. Parameters are already in their correct spots.
rt.Call(TargetFunction(), TargetFunction().argument_count());
}
LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
@ -5163,9 +5165,15 @@ static void InvokeDoublePow(FlowGraphCompiler* compiler,
__ Bind(&do_pow);
__ fmovdd(base, saved_base); // Restore base.
ASSERT(instr->TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(instr->TargetFunction(), kInputCount);
{
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/0,
/*preserve_registers=*/false);
ASSERT(base == V0);
ASSERT(exp == V1);
rt.Call(instr->TargetFunction(), kInputCount);
ASSERT(result == V0);
}
__ Bind(&skip_call);
}
@ -5174,8 +5182,16 @@ void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
InvokeDoublePow(compiler, this);
return;
}
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), InputCount());
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/0,
/*preserve_registers=*/false);
ASSERT(locs()->in(0).fpu_reg() == V0);
if (InputCount() == 2) {
ASSERT(locs()->in(1).fpu_reg() == V1);
}
rt.Call(TargetFunction(), InputCount());
ASSERT(locs()->out(0).fpu_reg() == V0);
}
LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone,

View file

@ -4804,23 +4804,14 @@ LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
}
void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Save ESP. EDI is chosen because it is callee saved so we do not need to
// back it up before calling into the runtime.
static const Register kSavedSPReg = EDI;
__ movl(kSavedSPReg, ESP);
__ ReserveAlignedFrameSpace(kWordSize * TargetFunction().argument_count());
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/4 * compiler::target::kWordSize,
/*preserve_registers=*/false);
__ movl(compiler::Address(ESP, +0 * kWordSize), locs()->in(0).reg());
__ movl(compiler::Address(ESP, +1 * kWordSize), locs()->in(1).reg());
__ movl(compiler::Address(ESP, +2 * kWordSize), locs()->in(2).reg());
__ movl(compiler::Address(ESP, +3 * kWordSize), locs()->in(3).reg());
// Call the function.
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), TargetFunction().argument_count());
// Restore ESP and pop the old value off the stack.
__ movl(ESP, kSavedSPReg);
rt.Call(TargetFunction(), 4);
}
LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
@ -5281,18 +5272,17 @@ static void InvokeDoublePow(FlowGraphCompiler* compiler,
__ jmp(&skip_call);
__ Bind(&do_pow);
// Save ESP.
__ movl(locs->temp(InvokeMathCFunctionInstr::kSavedSpTempIndex).reg(), ESP);
__ ReserveAlignedFrameSpace(kDoubleSize * kInputCount);
for (intptr_t i = 0; i < kInputCount; i++) {
__ movsd(compiler::Address(ESP, kDoubleSize * i), locs->in(i).fpu_reg());
{
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/kDoubleSize * kInputCount,
/*preserve_registers=*/false);
for (intptr_t i = 0; i < kInputCount; i++) {
__ movsd(compiler::Address(ESP, kDoubleSize * i), locs->in(i).fpu_reg());
}
rt.Call(instr->TargetFunction(), kInputCount);
__ fstpl(compiler::Address(ESP, 0));
__ movsd(locs->out(0).fpu_reg(), compiler::Address(ESP, 0));
}
ASSERT(instr->TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(instr->TargetFunction(), kInputCount);
__ fstpl(compiler::Address(ESP, 0));
__ movsd(locs->out(0).fpu_reg(), compiler::Address(ESP, 0));
// Restore ESP.
__ movl(ESP, locs->temp(InvokeMathCFunctionInstr::kSavedSpTempIndex).reg());
__ Bind(&skip_call);
}
@ -5301,19 +5291,19 @@ void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
InvokeDoublePow(compiler, this);
return;
}
// Save ESP.
__ movl(locs()->temp(kSavedSpTempIndex).reg(), ESP);
__ ReserveAlignedFrameSpace(kDoubleSize * InputCount());
for (intptr_t i = 0; i < InputCount(); i++) {
__ movsd(compiler::Address(ESP, kDoubleSize * i), locs()->in(i).fpu_reg());
}
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), InputCount());
__ fstpl(compiler::Address(ESP, 0));
__ movsd(locs()->out(0).fpu_reg(), compiler::Address(ESP, 0));
// Restore ESP.
__ movl(ESP, locs()->temp(kSavedSpTempIndex).reg());
{
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/kDoubleSize * InputCount(),
/*preserve_registers=*/false);
for (intptr_t i = 0; i < InputCount(); i++) {
__ movsd(compiler::Address(ESP, kDoubleSize * i),
locs()->in(i).fpu_reg());
}
rt.Call(TargetFunction(), InputCount());
__ fstpl(compiler::Address(ESP, 0));
__ movsd(locs()->out(0).fpu_reg(), compiler::Address(ESP, 0));
}
}
LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone,

View file

@ -4903,6 +4903,9 @@ void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNIMPLEMENTED();
}
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/0,
/*preserve_registers=*/false);
if (locs()->in(3).IsRegister()) {
__ mv(A3, locs()->in(3).reg());
} else if (locs()->in(3).IsStackSlot()) {
@ -4910,10 +4913,7 @@ void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
} else {
UNIMPLEMENTED();
}
// Call the function.
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), TargetFunction().argument_count());
rt.Call(TargetFunction(), TargetFunction().argument_count());
}
LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
@ -5235,8 +5235,15 @@ void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNIMPLEMENTED();
}
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), InputCount());
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/0,
/*preserve_registers=*/false);
ASSERT(locs()->in(0).fpu_reg() == FA0);
if (InputCount() == 2) {
ASSERT(locs()->in(1).fpu_reg() == FA1);
}
rt.Call(TargetFunction(), InputCount());
ASSERT(locs()->out(0).fpu_reg() == FA0);
// TODO(riscv): Special case pow?
}

View file

@ -5031,18 +5031,11 @@ LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
}
void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Save RSP. R13 is chosen because it is callee saved so we do not need to
// back it up before calling into the runtime.
static const Register kSavedSPReg = R13;
__ movq(kSavedSPReg, RSP);
__ ReserveAlignedFrameSpace(0);
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/0,
/*preserve_registers=*/false);
// Call the function. Parameters are already in their correct spots.
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), TargetFunction().argument_count());
// Restore RSP.
__ movq(RSP, kSavedSPReg);
rt.Call(TargetFunction(), TargetFunction().argument_count());
}
LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone,
@ -5524,18 +5517,15 @@ static void InvokeDoublePow(FlowGraphCompiler* compiler,
__ jmp(&skip_call);
__ Bind(&do_pow);
// Save RSP.
__ movq(locs->temp(InvokeMathCFunctionInstr::kSavedSpTempIndex).reg(), RSP);
__ ReserveAlignedFrameSpace(0);
__ movaps(XMM0, locs->in(0).fpu_reg());
ASSERT(locs->in(1).fpu_reg() == XMM1);
ASSERT(instr->TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(instr->TargetFunction(), kInputCount);
__ movaps(locs->out(0).fpu_reg(), XMM0);
// Restore RSP.
__ movq(RSP, locs->temp(InvokeMathCFunctionInstr::kSavedSpTempIndex).reg());
{
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/0,
/*preserve_registers=*/false);
__ movaps(XMM0, locs->in(0).fpu_reg());
ASSERT(locs->in(1).fpu_reg() == XMM1);
rt.Call(instr->TargetFunction(), kInputCount);
__ movaps(locs->out(0).fpu_reg(), XMM0);
}
__ Bind(&skip_call);
}
@ -5545,21 +5535,15 @@ void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
return;
}
compiler::LeafRuntimeScope rt(compiler->assembler(),
/*frame_size=*/0,
/*preserve_registers=*/false);
ASSERT(locs()->in(0).fpu_reg() == XMM0);
if (InputCount() == 2) {
ASSERT(locs()->in(1).fpu_reg() == XMM1);
}
// Save RSP.
__ movq(locs()->temp(kSavedSpTempIndex).reg(), RSP);
__ ReserveAlignedFrameSpace(0);
ASSERT(TargetFunction().is_leaf()); // No deopt info needed.
__ CallRuntime(TargetFunction(), InputCount());
rt.Call(TargetFunction(), InputCount());
ASSERT(locs()->out(0).fpu_reg() == XMM0);
// Restore RSP.
__ movq(RSP, locs()->temp(kSavedSpTempIndex).reg());
}
LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone,

View file

@ -320,6 +320,10 @@ bool RuntimeEntry::is_leaf() const {
return runtime_entry_->is_leaf();
}
intptr_t RuntimeEntry::argument_count() const {
return runtime_entry_->argument_count();
}
namespace target {
const word kOldPageSize = dart::kOldPageSize;

View file

@ -211,10 +211,6 @@ word LookupFieldOffsetInBytes(const Field& field);
uword SymbolsPredefinedAddress();
#endif
typedef void (*RuntimeEntryCallInternal)(const dart::RuntimeEntry*,
Assembler*,
intptr_t);
const Code& StubCodeAllocateArray();
const Code& StubCodeSubtype3TestCache();
const Code& StubCodeSubtype7TestCache();
@ -223,43 +219,17 @@ class RuntimeEntry : public ValueObject {
public:
virtual ~RuntimeEntry() {}
void Call(Assembler* assembler, intptr_t argument_count) const {
ASSERT(call_ != NULL);
ASSERT(runtime_entry_ != NULL);
// We call a manually set function pointer which points to the
// implementation of call for the subclass. We do this instead of just
// defining Call in this class as a pure virtual method and providing an
// implementation in the subclass as RuntimeEntry objects are declared as
// globals which causes problems on Windows.
//
// When exit() is called on Windows, global objects start to be destroyed.
// As part of an object's destruction, the vtable is reset to that of the
// base class. Since some threads may still be running and accessing these
// now destroyed globals, an invocation to dart::RuntimeEntry::Call would
// instead invoke dart::compiler::RuntimeEntry::Call. If
// dart::compiler::RuntimeEntry::Call were a pure virtual method, _purecall
// would be invoked to handle the invalid call and attempt to call exit(),
// causing the process to hang on a lock.
//
// By removing the need to rely on a potentially invalid vtable at exit,
// we should be able to avoid hanging or crashing the process at shutdown,
// even as global objects start to be destroyed. See issue #35855.
call_(runtime_entry_, assembler, argument_count);
}
word OffsetFromThread() const;
bool is_leaf() const;
intptr_t argument_count() const;
protected:
RuntimeEntry(const dart::RuntimeEntry* runtime_entry,
RuntimeEntryCallInternal call)
: runtime_entry_(runtime_entry), call_(call) {}
explicit RuntimeEntry(const dart::RuntimeEntry* runtime_entry)
: runtime_entry_(runtime_entry) {}
private:
const dart::RuntimeEntry* runtime_entry_;
RuntimeEntryCallInternal call_;
};
#define DECLARE_RUNTIME_ENTRY(name) \

File diff suppressed because it is too large Load diff

View file

@ -191,7 +191,6 @@
FIELD(Thread, AllocateArray_entry_point_offset) \
FIELD(Thread, active_exception_offset) \
FIELD(Thread, active_stacktrace_offset) \
FIELD(Thread, array_write_barrier_code_offset) \
FIELD(Thread, array_write_barrier_entry_point_offset) \
FIELD(Thread, allocate_mint_with_fpu_regs_entry_point_offset) \
FIELD(Thread, allocate_mint_with_fpu_regs_stub_offset) \
@ -279,7 +278,6 @@
FIELD(Thread, unboxed_int64_runtime_arg_offset) \
FIELD(Thread, unboxed_double_runtime_arg_offset) \
FIELD(Thread, vm_tag_offset) \
FIELD(Thread, write_barrier_code_offset) \
FIELD(Thread, write_barrier_entry_point_offset) \
FIELD(Thread, write_barrier_mask_offset) \
FIELD(Thread, heap_base_offset) \

View file

@ -43,16 +43,13 @@ void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
__ tst(R0, Operand(1 << target::ObjectAlignment::kNewObjectBitPosition));
__ BranchIf(NOT_ZERO, &done);
if (preserve_registers) {
__ EnterCallRuntimeFrame(0);
} else {
__ ReserveAlignedFrameSpace(0);
}
// [R0] already contains first argument.
__ mov(R1, Operand(THR));
__ CallRuntime(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
if (preserve_registers) {
__ LeaveCallRuntimeFrame();
{
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/false);
// [R0] already contains first argument.
__ mov(R1, Operand(THR));
rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
}
__ Bind(&done);
@ -841,13 +838,17 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ vstmd(DB_W, SP, D0, kNumberOfDRegisters);
}
__ mov(R0, Operand(SP)); // Pass address of saved registers block.
bool is_lazy =
(kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
__ mov(R1, Operand(is_lazy ? 1 : 0));
__ ReserveAlignedFrameSpace(0);
__ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (R0) is stack-size (FP - SP) in bytes.
{
__ mov(R0, Operand(SP)); // Pass address of saved registers block.
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/false);
bool is_lazy =
(kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
__ mov(R1, Operand(is_lazy ? 1 : 0));
rt.Call(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (R0) is stack-size (FP - SP) in bytes.
}
if (kind == kLazyDeoptFromReturn) {
// Restore result into R1 temporarily.
@ -865,15 +866,19 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
// is no need to set the correct PC marker or load PP, since they get patched.
__ EnterStubFrame();
__ mov(R0, Operand(FP)); // Get last FP address.
if (kind == kLazyDeoptFromReturn) {
__ Push(R1); // Preserve result as first local.
} else if (kind == kLazyDeoptFromThrow) {
__ Push(R1); // Preserve exception as first local.
__ Push(R2); // Preserve stacktrace as second local.
}
__ ReserveAlignedFrameSpace(0);
__ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in R0.
{
__ mov(R0, Operand(FP)); // Get last FP address.
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/false);
rt.Call(kDeoptimizeFillFrameRuntimeEntry, 1);
}
if (kind == kLazyDeoptFromReturn) {
// Restore result into R1.
__ ldr(R1, Address(FP, target::frame_layout.first_local_from_fp *
@ -1559,7 +1564,6 @@ COMPILE_ASSERT(kWriteBarrierObjectReg == R1);
COMPILE_ASSERT(kWriteBarrierValueReg == R0);
COMPILE_ASSERT(kWriteBarrierSlotReg == R9);
static void GenerateWriteBarrierStubHelper(Assembler* assembler,
Address stub_code,
bool cards) {
Label add_to_mark_stack, remember_card, lost_race;
__ tst(R0, Operand(1 << target::ObjectAlignment::kNewObjectBitPosition));
@ -1618,16 +1622,12 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
// Handle overflow: Call the runtime leaf function.
__ Bind(&overflow);
// Setup frame, push callee-saved registers.
__ Push(CODE_REG);
__ ldr(CODE_REG, stub_code);
__ EnterCallRuntimeFrame(0 * target::kWordSize);
__ mov(R0, Operand(THR));
__ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1);
// Restore callee-saved registers, tear down frame.
__ LeaveCallRuntimeFrame();
__ Pop(CODE_REG);
{
LeafRuntimeScope rt(assembler, /*frame_size=*/0,
/*preserve_registers=*/true);
__ mov(R0, Operand(THR));
rt.Call(kStoreBufferBlockProcessRuntimeEntry, 1);
}
__ Ret();
__ Bind(&add_to_mark_stack);
@ -1659,13 +1659,12 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
__ Ret();
__ Bind(&marking_overflow);
__ Push(CODE_REG);
__ ldr(CODE_REG, stub_code);
__ EnterCallRuntimeFrame(0 * target::kWordSize);
__ mov(R0, Operand(THR));
__ CallRuntime(kMarkingStackBlockProcessRuntimeEntry, 1);
__ LeaveCallRuntimeFrame();
__ Pop(CODE_REG);
{
LeafRuntimeScope rt(assembler, /*frame_size=*/0,
/*preserve_registers=*/true);
__ mov(R0, Operand(THR));
rt.Call(kMarkingStackBlockProcessRuntimeEntry, 1);
}
__ Ret();
__ Bind(&lost_race);
@ -1698,32 +1697,23 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
// Card table not yet allocated.
__ Bind(&remember_card_slow);
__ Push(CODE_REG);
__ Push(R0);
__ Push(R1);
__ ldr(CODE_REG, stub_code);
__ mov(R0, Operand(R1)); // Arg0 = Object
__ mov(R1, Operand(R9)); // Arg1 = Slot
__ EnterCallRuntimeFrame(0);
__ CallRuntime(kRememberCardRuntimeEntry, 2);
__ LeaveCallRuntimeFrame();
__ Pop(R1);
__ Pop(R0);
__ Pop(CODE_REG);
{
LeafRuntimeScope rt(assembler, /*frame_size=*/0,
/*preserve_registers=*/true);
__ mov(R0, Operand(R1)); // Arg0 = Object
__ mov(R1, Operand(R9)); // Arg1 = Slot
rt.Call(kRememberCardRuntimeEntry, 2);
}
__ Ret();
}
}
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
assembler, Address(THR, target::Thread::write_barrier_code_offset()),
false);
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
assembler,
Address(THR, target::Thread::array_write_barrier_code_offset()), true);
GenerateWriteBarrierStubHelper(assembler, true);
}
static void GenerateAllocateObjectHelper(Assembler* assembler,

View file

@ -42,11 +42,11 @@ void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
__ tbnz(&done, R0, target::ObjectAlignment::kNewObjectBitPosition);
{
Assembler::CallRuntimeScope scope(
assembler, kEnsureRememberedAndMarkingDeferredRuntimeEntry,
/*frame_size=*/0, /*preserve_registers=*/preserve_registers);
LeafRuntimeScope rt(assembler, /*frame_size=*/0, preserve_registers);
// R0 already loaded.
__ mov(R1, THR);
scope.Call(/*argument_count=*/2);
rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry,
/*argument_count=*/2);
}
__ Bind(&done);
@ -977,13 +977,17 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ PushQuad(vreg);
}
__ mov(R0, SP); // Pass address of saved registers block.
bool is_lazy =
(kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
__ LoadImmediate(R1, is_lazy ? 1 : 0);
__ ReserveAlignedFrameSpace(0);
__ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (R0) is stack-size (FP - SP) in bytes.
{
__ mov(R0, SP); // Pass address of saved registers block.
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/false);
bool is_lazy =
(kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
__ LoadImmediate(R1, is_lazy ? 1 : 0);
rt.Call(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (R0) is stack-size (FP - SP) in bytes.
}
if (kind == kLazyDeoptFromReturn) {
// Restore result into R1 temporarily.
@ -1010,9 +1014,13 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ Push(R1); // Preserve exception as first local.
__ Push(R2); // Preserve stacktrace as second local.
}
__ ReserveAlignedFrameSpace(0);
__ mov(R0, FP); // Pass last FP as parameter in R0.
__ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
{
__ mov(R0, FP); // Pass last FP as parameter in R0.
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/false);
rt.Call(kDeoptimizeFillFrameRuntimeEntry, 1);
}
if (kind == kLazyDeoptFromReturn) {
// Restore result into R1.
__ LoadFromOffset(
@ -1737,7 +1745,6 @@ COMPILE_ASSERT(kWriteBarrierObjectReg == R1);
COMPILE_ASSERT(kWriteBarrierValueReg == R0);
COMPILE_ASSERT(kWriteBarrierSlotReg == R25);
static void GenerateWriteBarrierStubHelper(Assembler* assembler,
Address stub_code,
bool cards) {
Label add_to_mark_stack, remember_card, lost_race;
__ tbz(&add_to_mark_stack, R0,
@ -1800,11 +1807,11 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
// Handle overflow: Call the runtime leaf function.
__ Bind(&overflow);
{
Assembler::CallRuntimeScope scope(assembler,
kStoreBufferBlockProcessRuntimeEntry,
/*frame_size=*/0, stub_code);
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/true);
__ mov(R0, THR);
scope.Call(/*argument_count=*/1);
rt.Call(kStoreBufferBlockProcessRuntimeEntry, /*argument_count=*/1);
}
__ ret();
@ -1842,11 +1849,11 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
__ Bind(&marking_overflow);
{
Assembler::CallRuntimeScope scope(assembler,
kMarkingStackBlockProcessRuntimeEntry,
/*frame_size=*/0, stub_code);
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/true);
__ mov(R0, THR);
scope.Call(/*argument_count=*/1);
rt.Call(kMarkingStackBlockProcessRuntimeEntry, /*argument_count=*/1);
}
__ ret();
@ -1882,26 +1889,23 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
// Card table not yet allocated.
__ Bind(&remember_card_slow);
{
Assembler::CallRuntimeScope scope(assembler, kRememberCardRuntimeEntry,
/*frame_size=*/0, stub_code);
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/true);
__ mov(R0, R1); // Arg0 = Object
__ mov(R1, R25); // Arg1 = Slot
scope.Call(/*argument_count=*/2);
rt.Call(kRememberCardRuntimeEntry, /*argument_count=*/2);
}
__ ret();
}
}
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
assembler, Address(THR, target::Thread::write_barrier_code_offset()),
false);
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
assembler,
Address(THR, target::Thread::array_write_barrier_code_offset()), true);
GenerateWriteBarrierStubHelper(assembler, true);
}
static void GenerateAllocateObjectHelper(Assembler* assembler,

View file

@ -41,16 +41,13 @@ void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
__ testl(EAX, Immediate(1 << target::ObjectAlignment::kNewObjectBitPosition));
__ BranchIf(NOT_ZERO, &done);
if (preserve_registers) {
__ EnterCallRuntimeFrame(2 * target::kWordSize);
} else {
__ ReserveAlignedFrameSpace(2 * target::kWordSize);
}
__ movl(Address(ESP, 1 * target::kWordSize), THR);
__ movl(Address(ESP, 0 * target::kWordSize), EAX);
__ CallRuntime(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
if (preserve_registers) {
__ LeaveCallRuntimeFrame();
{
LeafRuntimeScope rt(assembler,
/*frame_size=*/2 * target::kWordSize,
preserve_registers);
__ movl(Address(ESP, 1 * target::kWordSize), THR);
__ movl(Address(ESP, 0 * target::kWordSize), EAX);
rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
}
__ Bind(&done);
@ -651,15 +648,19 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
offset += kFpuRegisterSize;
}
__ movl(ECX, ESP); // Preserve saved registers block.
__ ReserveAlignedFrameSpace(2 * target::kWordSize);
__ movl(Address(ESP, 0 * target::kWordSize),
ECX); // Start of register block.
bool is_lazy =
(kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
__ movl(Address(ESP, 1 * target::kWordSize), Immediate(is_lazy ? 1 : 0));
__ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (EAX) is stack-size (FP - SP) in bytes.
{
__ movl(ECX, ESP); // Preserve saved registers block.
LeafRuntimeScope rt(assembler,
/*frame_size=*/2 * target::kWordSize,
/*preserve_registers=*/false);
bool is_lazy =
(kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
__ movl(Address(ESP, 0 * target::kWordSize),
ECX); // Start of register block.
__ movl(Address(ESP, 1 * target::kWordSize), Immediate(is_lazy ? 1 : 0));
rt.Call(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (EAX) is stack-size (FP - SP) in bytes.
}
if (kind == kLazyDeoptFromReturn) {
// Restore result into EBX temporarily.
@ -686,9 +687,13 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ pushl(EBX); // Preserve exception as first local.
__ pushl(ECX); // Preserve stacktrace as first local.
}
__ ReserveAlignedFrameSpace(1 * target::kWordSize);
__ movl(Address(ESP, 0), EBP); // Pass last FP as parameter on stack.
__ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
{
LeafRuntimeScope rt(assembler,
/*frame_size=*/1 * target::kWordSize,
/*preserve_registers=*/false);
__ movl(Address(ESP, 0), EBP); // Pass last FP as parameter on stack.
rt.Call(kDeoptimizeFillFrameRuntimeEntry, 1);
}
if (kind == kLazyDeoptFromReturn) {
// Restore result into EBX.
__ movl(EBX, Address(EBP, target::frame_layout.first_local_from_fp *
@ -1324,7 +1329,6 @@ COMPILE_ASSERT(kWriteBarrierObjectReg == EDX);
COMPILE_ASSERT(kWriteBarrierValueReg == kNoRegister);
COMPILE_ASSERT(kWriteBarrierSlotReg == EDI);
static void GenerateWriteBarrierStubHelper(Assembler* assembler,
Address stub_code,
bool cards) {
Label remember_card;
@ -1406,13 +1410,13 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
// Handle overflow: Call the runtime leaf function.
__ Bind(&overflow);
// Setup frame, push callee-saved registers.
__ EnterCallRuntimeFrame(1 * target::kWordSize);
__ movl(Address(ESP, 0), THR); // Push the thread as the only argument.
__ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1);
// Restore callee-saved registers, tear down frame.
__ LeaveCallRuntimeFrame();
{
LeafRuntimeScope rt(assembler,
/*frame_size=*/1 * target::kWordSize,
/*preserve_registers=*/true);
__ movl(Address(ESP, 0), THR); // Push the thread as the only argument.
rt.Call(kStoreBufferBlockProcessRuntimeEntry, 1);
}
__ ret();
__ Bind(&lost_race);
@ -1444,11 +1448,15 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
// Card table not yet allocated.
__ Bind(&remember_card_slow);
__ EnterCallRuntimeFrame(2 * target::kWordSize);
__ movl(Address(ESP, 0 * target::kWordSize), EDX); // Object
__ movl(Address(ESP, 1 * target::kWordSize), EDI); // Slot
__ CallRuntime(kRememberCardRuntimeEntry, 2);
__ LeaveCallRuntimeFrame();
{
LeafRuntimeScope rt(assembler,
/*frame_size=*/2 * target::kWordSize,
/*preserve_registers=*/true);
__ movl(Address(ESP, 0 * target::kWordSize), EDX); // Object
__ movl(Address(ESP, 1 * target::kWordSize), EDI); // Slot
rt.Call(kRememberCardRuntimeEntry, 2);
}
__ popl(ECX);
__ popl(EAX);
__ ret();
@ -1456,15 +1464,11 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
}
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
assembler, Address(THR, target::Thread::write_barrier_code_offset()),
false);
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
assembler,
Address(THR, target::Thread::array_write_barrier_code_offset()), true);
GenerateWriteBarrierStubHelper(assembler, true);
}
void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {

View file

@ -43,11 +43,11 @@ void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
__ bnez(TMP2, &done);
{
Assembler::CallRuntimeScope scope(
assembler, kEnsureRememberedAndMarkingDeferredRuntimeEntry,
/*frame_size=*/0, /*preserve_registers=*/preserve_registers);
LeafRuntimeScope rt(assembler, /*frame_size=*/0, preserve_registers);
// A0 already loaded.
__ mv(A1, THR);
scope.Call(/*argument_count=*/2);
rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry,
/*argument_count=*/2);
}
__ Bind(&done);
@ -894,13 +894,17 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ fsd(freg, Address(SP, i * kFpuRegisterSize));
}
__ mv(A0, SP); // Pass address of saved registers block.
bool is_lazy =
(kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
__ li(A1, is_lazy ? 1 : 0);
__ ReserveAlignedFrameSpace(0);
__ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (A0) is stack-size (FP - SP) in bytes.
{
__ mv(A0, SP); // Pass address of saved registers block.
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/false);
bool is_lazy =
(kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
__ li(A1, is_lazy ? 1 : 0);
rt.Call(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (A0) is stack-size (FP - SP) in bytes.
}
if (kind == kLazyDeoptFromReturn) {
// Restore result into T1 temporarily.
@ -927,9 +931,13 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ PushRegister(T1); // Preserve exception as first local.
__ PushRegister(T2); // Preserve stacktrace as second local.
}
__ ReserveAlignedFrameSpace(0);
__ mv(A0, FP); // Pass last FP as parameter in R0.
__ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
{
__ mv(A0, FP); // Pass last FP as parameter in R0.
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/false);
rt.Call(kDeoptimizeFillFrameRuntimeEntry, 1);
}
if (kind == kLazyDeoptFromReturn) {
// Restore result into T1.
__ LoadFromOffset(
@ -1640,7 +1648,6 @@ COMPILE_ASSERT(kWriteBarrierObjectReg == A0);
COMPILE_ASSERT(kWriteBarrierValueReg == A1);
COMPILE_ASSERT(kWriteBarrierSlotReg == A6);
static void GenerateWriteBarrierStubHelper(Assembler* assembler,
Address stub_code,
bool cards) {
Label add_to_mark_stack, remember_card, lost_race;
__ andi(TMP2, A1, 1 << target::ObjectAlignment::kNewObjectBitPosition);
@ -1716,11 +1723,10 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
__ lx(T2, Address(SP, 2 * target::kWordSize));
__ addi(SP, SP, 3 * target::kWordSize);
{
Assembler::CallRuntimeScope scope(assembler,
kStoreBufferBlockProcessRuntimeEntry,
/*frame_size=*/0, stub_code);
LeafRuntimeScope rt(assembler, /*frame_size=*/0,
/*preserve_registers=*/true);
__ mv(A0, THR);
scope.Call(/*argument_count=*/1);
rt.Call(kStoreBufferBlockProcessRuntimeEntry, /*argument_count=*/1);
}
__ ret();
@ -1770,11 +1776,10 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
__ lx(T2, Address(SP, 2 * target::kWordSize));
__ addi(SP, SP, 3 * target::kWordSize);
{
Assembler::CallRuntimeScope scope(assembler,
kMarkingStackBlockProcessRuntimeEntry,
/*frame_size=*/0, stub_code);
LeafRuntimeScope rt(assembler, /*frame_size=*/0,
/*preserve_registers=*/true);
__ mv(A0, THR);
scope.Call(/*argument_count=*/1);
rt.Call(kMarkingStackBlockProcessRuntimeEntry, /*argument_count=*/1);
}
__ ret();
@ -1809,26 +1814,22 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
// Card table not yet allocated.
__ Bind(&remember_card_slow);
{
Assembler::CallRuntimeScope scope(assembler, kRememberCardRuntimeEntry,
/*frame_size=*/0, stub_code);
LeafRuntimeScope rt(assembler, /*frame_size=*/0,
/*preserve_registers=*/true);
__ mv(A0, A0); // Arg0 = Object
__ mv(A1, A6); // Arg1 = Slot
scope.Call(/*argument_count=*/2);
rt.Call(kRememberCardRuntimeEntry, /*argument_count=*/2);
}
__ ret();
}
}
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
assembler, Address(THR, target::Thread::write_barrier_code_offset()),
false);
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
assembler,
Address(THR, target::Thread::array_write_barrier_code_offset()), true);
GenerateWriteBarrierStubHelper(assembler, true);
}
static void GenerateAllocateObjectHelper(Assembler* assembler,

View file

@ -45,16 +45,11 @@ void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
__ testq(RAX, Immediate(1 << target::ObjectAlignment::kNewObjectBitPosition));
__ BranchIf(NOT_ZERO, &done);
if (preserve_registers) {
__ EnterCallRuntimeFrame(0);
} else {
__ ReserveAlignedFrameSpace(0);
}
__ movq(CallingConventions::kArg1Reg, RAX);
__ movq(CallingConventions::kArg2Reg, THR);
__ CallRuntime(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
if (preserve_registers) {
__ LeaveCallRuntimeFrame();
{
LeafRuntimeScope rt(assembler, /*frame_size=*/0, preserve_registers);
__ movq(CallingConventions::kArg1Reg, RAX);
__ movq(CallingConventions::kArg2Reg, THR);
rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
}
__ Bind(&done);
@ -982,14 +977,18 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
offset += kFpuRegisterSize;
}
// Pass address of saved registers block.
__ movq(CallingConventions::kArg1Reg, RSP);
bool is_lazy =
(kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
__ movq(CallingConventions::kArg2Reg, Immediate(is_lazy ? 1 : 0));
__ ReserveAlignedFrameSpace(0); // Ensure stack is aligned before the call.
__ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (RAX) is stack-size (FP - SP) in bytes.
{
// Pass address of saved registers block.
__ movq(CallingConventions::kArg1Reg, RSP);
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/false);
bool is_lazy =
(kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
__ movq(CallingConventions::kArg2Reg, Immediate(is_lazy ? 1 : 0));
rt.Call(kDeoptimizeCopyFrameRuntimeEntry, 2);
// Result (RAX) is stack-size (FP - SP) in bytes.
}
if (kind == kLazyDeoptFromReturn) {
// Restore result into RBX temporarily.
@ -1021,10 +1020,13 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ pushq(RBX); // Preserve exception as first local.
__ pushq(RDX); // Preserve stacktrace as second local.
}
__ ReserveAlignedFrameSpace(0);
// Pass last FP as a parameter.
__ movq(CallingConventions::kArg1Reg, RBP);
__ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
{
__ movq(CallingConventions::kArg1Reg, RBP); // Pass last FP as a parameter.
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/false);
rt.Call(kDeoptimizeFillFrameRuntimeEntry, 1);
}
if (kind == kLazyDeoptFromReturn) {
// Restore result into RBX.
__ movq(RBX, Address(RBP, target::frame_layout.first_local_from_fp *
@ -1771,7 +1773,6 @@ COMPILE_ASSERT(kWriteBarrierObjectReg == RDX);
COMPILE_ASSERT(kWriteBarrierValueReg == RAX);
COMPILE_ASSERT(kWriteBarrierSlotReg == R13);
static void GenerateWriteBarrierStubHelper(Assembler* assembler,
Address stub_code,
bool cards) {
Label add_to_mark_stack, remember_card, lost_race;
__ testq(RAX, Immediate(1 << target::ObjectAlignment::kNewObjectBitPosition));
@ -1833,14 +1834,13 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
// Handle overflow: Call the runtime leaf function.
__ Bind(&overflow);
// Setup frame, push callee-saved registers.
__ pushq(CODE_REG);
__ movq(CODE_REG, stub_code);
__ EnterCallRuntimeFrame(0);
__ movq(CallingConventions::kArg1Reg, THR);
__ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1);
__ LeaveCallRuntimeFrame();
__ popq(CODE_REG);
{
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/true);
__ movq(CallingConventions::kArg1Reg, THR);
rt.Call(kStoreBufferBlockProcessRuntimeEntry, 1);
}
__ ret();
__ Bind(&add_to_mark_stack);
@ -1875,13 +1875,13 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
__ ret();
__ Bind(&marking_overflow);
__ pushq(CODE_REG);
__ movq(CODE_REG, stub_code);
__ EnterCallRuntimeFrame(0);
__ movq(CallingConventions::kArg1Reg, THR);
__ CallRuntime(kMarkingStackBlockProcessRuntimeEntry, 1);
__ LeaveCallRuntimeFrame();
__ popq(CODE_REG);
{
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/true);
__ movq(CallingConventions::kArg1Reg, THR);
rt.Call(kMarkingStackBlockProcessRuntimeEntry, 1);
}
__ ret();
__ Bind(&lost_race);
@ -1911,28 +1911,24 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
// Card table not yet allocated.
__ Bind(&remember_card_slow);
__ pushq(CODE_REG);
__ movq(CODE_REG, stub_code);
__ EnterCallRuntimeFrame(0);
__ movq(CallingConventions::kArg1Reg, RDX);
__ movq(CallingConventions::kArg2Reg, R13);
__ CallRuntime(kRememberCardRuntimeEntry, 2);
__ LeaveCallRuntimeFrame();
__ popq(CODE_REG);
{
LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/true);
__ movq(CallingConventions::kArg1Reg, RDX);
__ movq(CallingConventions::kArg2Reg, R13);
rt.Call(kRememberCardRuntimeEntry, 2);
}
__ ret();
}
}
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
assembler, Address(THR, target::Thread::write_barrier_code_offset()),
false);
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(
assembler,
Address(THR, target::Thread::array_write_barrier_code_offset()), true);
GenerateWriteBarrierStubHelper(assembler, true);
}
static void GenerateAllocateObjectHelper(Assembler* assembler,

View file

@ -380,6 +380,7 @@ const int64_t kWRegMask = 0x00000000ffffffffL;
// List of registers used in load/store multiple.
typedef uint32_t RegList;
const RegList kAllCpuRegistersList = 0xFFFFFFFF;
const RegList kAllFpuRegistersList = 0xFFFFFFFF;
// See "Procedure Call Standard for the ARM 64-bit Architecture", document
// number "ARM IHI 0055B", May 22 2013.
@ -429,16 +430,6 @@ const Register kDartLastVolatileCpuReg = R14;
const int kDartVolatileCpuRegCount = 15;
const int kDartVolatileFpuRegCount = 24;
// Two callee save scratch registers used by leaf runtime call sequence.
const Register kCallLeafRuntimeCalleeSaveScratch1 = R20;
const Register kCallLeafRuntimeCalleeSaveScratch2 = R25;
static_assert((R(kCallLeafRuntimeCalleeSaveScratch1) & kAbiPreservedCpuRegs) !=
0,
"Need callee save scratch register for leaf runtime calls.");
static_assert((R(kCallLeafRuntimeCalleeSaveScratch2) & kAbiPreservedCpuRegs) !=
0,
"Need callee save scratch register for leaf runtime calls.");
constexpr int kStoreBufferWrapperSize = 32;
class CallingConventions {

View file

@ -425,16 +425,6 @@ constexpr int kAbiPreservedFpuRegCount = 12;
constexpr intptr_t kReservedFpuRegisters = 0;
constexpr intptr_t kNumberOfReservedFpuRegisters = 0;
// Two callee save scratch registers used by leaf runtime call sequence.
constexpr Register kCallLeafRuntimeCalleeSaveScratch1 = CALLEE_SAVED_TEMP;
constexpr Register kCallLeafRuntimeCalleeSaveScratch2 = CALLEE_SAVED_TEMP2;
static_assert((R(kCallLeafRuntimeCalleeSaveScratch1) & kAbiPreservedCpuRegs) !=
0,
"Need callee save scratch register for leaf runtime calls.");
static_assert((R(kCallLeafRuntimeCalleeSaveScratch2) & kAbiPreservedCpuRegs) !=
0,
"Need callee save scratch register for leaf runtime calls.");
constexpr int kStoreBufferWrapperSize = 26;
class CallingConventions {

View file

@ -35,7 +35,6 @@
#include "vm/raw_object.h"
#include "vm/report.h"
#include "vm/static_type_exactness_state.h"
#include "vm/tags.h"
#include "vm/thread.h"
#include "vm/token_position.h"

View file

@ -38,7 +38,7 @@ class RuntimeEntry : public BaseRuntimeEntry {
bool can_lazy_deopt)
:
#if !defined(DART_PRECOMPILED_RUNTIME)
compiler::RuntimeEntry(this, &CallInternal),
compiler::RuntimeEntry(this),
#endif
name_(name),
function_(function),
@ -56,15 +56,6 @@ class RuntimeEntry : public BaseRuntimeEntry {
bool can_lazy_deopt() const { return can_lazy_deopt_; }
uword GetEntryPoint() const;
// Generate code to call the runtime entry.
NOT_IN_PRECOMPILED(void Call(compiler::Assembler* assembler,
intptr_t argument_count) const);
protected:
NOT_IN_PRECOMPILED(static void CallInternal(const RuntimeEntry* runtime_entry,
compiler::Assembler* assembler,
intptr_t argument_count));
private:
const char* const name_;
const RuntimeFunction function_;

View file

@ -39,41 +39,6 @@ uword RuntimeEntry::GetEntryPoint() const {
return entry;
}
#if !defined(DART_PRECOMPILED_RUNTIME)
// Generate code to call into the stub which will call the runtime
// function. Input for the stub is as follows:
// SP : points to the arguments and return value array.
// R9 : address of the runtime function to call.
// R4 : number of arguments to the call.
void RuntimeEntry::CallInternal(const RuntimeEntry* runtime_entry,
compiler::Assembler* assembler,
intptr_t argument_count) {
if (runtime_entry->is_leaf()) {
ASSERT(argument_count == runtime_entry->argument_count());
__ LoadFromOffset(
TMP, THR, compiler::target::Thread::OffsetFromThread(runtime_entry));
__ str(TMP,
compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
__ blx(TMP);
__ LoadImmediate(TMP, VMTag::kDartTagId);
__ str(TMP,
compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
// These registers must be preserved by runtime functions, otherwise
// we'd need to restore them here.
COMPILE_ASSERT(IsCalleeSavedRegister(THR));
COMPILE_ASSERT(IsCalleeSavedRegister(PP));
COMPILE_ASSERT(IsCalleeSavedRegister(CODE_REG));
} else {
// Argument count is not checked here, but in the runtime entry for a more
// informative error message.
__ LoadFromOffset(
R9, THR, compiler::target::Thread::OffsetFromThread(runtime_entry));
__ LoadImmediate(R4, argument_count);
__ BranchLinkToRuntime();
}
}
#endif // !defined(DART_PRECOMPILED_RUNTIME)
} // namespace dart
#endif // defined TARGET_ARCH_ARM

View file

@ -39,58 +39,6 @@ uword RuntimeEntry::GetEntryPoint() const {
return entry;
}
#if !defined(DART_PRECOMPILED_RUNTIME)
// Generate code to call into the stub which will call the runtime
// function. Input for the stub is as follows:
// SP : points to the arguments and return value array.
// R5 : address of the runtime function to call.
// R4 : number of arguments to the call.
void RuntimeEntry::CallInternal(const RuntimeEntry* runtime_entry,
compiler::Assembler* assembler,
intptr_t argument_count) {
if (runtime_entry->is_leaf()) {
ASSERT(argument_count == runtime_entry->argument_count());
// Since we are entering C++ code, we must restore the C stack pointer from
// the stack limit to an aligned value nearer to the top of the stack.
// We cache the Dart stack pointer and the stack limit in callee-saved
// registers, then align and call, restoring CSP and SP on return from the
// call.
// This sequence may occur in an intrinsic, so don't use registers an
// intrinsic must preserve.
COMPILE_ASSERT(kCallLeafRuntimeCalleeSaveScratch1 != CODE_REG);
COMPILE_ASSERT(kCallLeafRuntimeCalleeSaveScratch2 != CODE_REG);
COMPILE_ASSERT(kCallLeafRuntimeCalleeSaveScratch1 != ARGS_DESC_REG);
COMPILE_ASSERT(kCallLeafRuntimeCalleeSaveScratch2 != ARGS_DESC_REG);
__ mov(kCallLeafRuntimeCalleeSaveScratch1, CSP);
__ mov(kCallLeafRuntimeCalleeSaveScratch2, SP);
__ ReserveAlignedFrameSpace(0);
__ mov(CSP, SP);
__ ldr(TMP,
compiler::Address(THR, Thread::OffsetFromThread(runtime_entry)));
__ str(TMP, compiler::Address(THR, Thread::vm_tag_offset()));
__ blr(TMP);
__ LoadImmediate(TMP, VMTag::kDartTagId);
__ str(TMP, compiler::Address(THR, Thread::vm_tag_offset()));
__ mov(SP, kCallLeafRuntimeCalleeSaveScratch2);
__ mov(CSP, kCallLeafRuntimeCalleeSaveScratch1);
// These registers must be preserved by runtime functions, otherwise
// we'd need to restore them here.
COMPILE_ASSERT(IsCalleeSavedRegister(THR));
COMPILE_ASSERT(IsCalleeSavedRegister(PP));
COMPILE_ASSERT(IsCalleeSavedRegister(CODE_REG));
COMPILE_ASSERT(IsCalleeSavedRegister(NULL_REG));
COMPILE_ASSERT(IsCalleeSavedRegister(HEAP_BITS));
COMPILE_ASSERT(IsCalleeSavedRegister(DISPATCH_TABLE_REG));
} else {
// Argument count is not checked here, but in the runtime entry for a more
// informative error message.
__ ldr(R5, compiler::Address(THR, Thread::OffsetFromThread(runtime_entry)));
__ LoadImmediate(R4, argument_count);
__ BranchLinkToRuntime();
}
}
#endif // !defined(DART_PRECOMPILED_RUNTIME)
} // namespace dart
#endif // defined TARGET_ARCH_ARM64

View file

@ -18,38 +18,6 @@ uword RuntimeEntry::GetEntryPoint() const {
return reinterpret_cast<uword>(function());
}
#if !defined(DART_PRECOMPILED_RUNTIME)
// Generate code to call into the stub which will call the runtime
// function. Input for the stub is as follows:
// For regular runtime calls -
// ESP : points to the arguments and return value array.
// ECX : address of the runtime function to call.
// EDX : number of arguments to the call as Smi.
// For leaf calls the caller is responsible to setup the arguments
// and look for return values based on the C calling convention.
void RuntimeEntry::CallInternal(const RuntimeEntry* runtime_entry,
compiler::Assembler* assembler,
intptr_t argument_count) {
if (runtime_entry->is_leaf()) {
ASSERT(argument_count == runtime_entry->argument_count());
__ movl(EAX, compiler::Immediate(runtime_entry->GetEntryPoint()));
__ movl(compiler::Assembler::VMTagAddress(), EAX);
__ call(EAX);
__ movl(compiler::Assembler::VMTagAddress(),
compiler::Immediate(VMTag::kDartTagId));
// These registers must be preserved by runtime functions, otherwise
// we'd need to restore them here.
COMPILE_ASSERT(IsCalleeSavedRegister(THR));
} else {
// Argument count is not checked here, but in the runtime entry for a more
// informative error message.
__ movl(ECX, compiler::Immediate(runtime_entry->GetEntryPoint()));
__ movl(EDX, compiler::Immediate(argument_count));
__ CallToRuntime();
}
}
#endif // !defined(DART_PRECOMPILED_RUNTIME)
} // namespace dart
#endif // defined TARGET_ARCH_IA32

View file

@ -39,47 +39,6 @@ uword RuntimeEntry::GetEntryPoint() const {
return entry;
}
#if !defined(DART_PRECOMPILED_RUNTIME)
// Generate code to call into the stub which will call the runtime
// function. Input for the stub is as follows:
// SP : points to the arguments and return value array.
// T5 : address of the runtime function to call.
// T4 : number of arguments to the call.
void RuntimeEntry::CallInternal(const RuntimeEntry* runtime_entry,
compiler::Assembler* assembler,
intptr_t argument_count) {
if (runtime_entry->is_leaf()) {
ASSERT(argument_count == runtime_entry->argument_count());
COMPILE_ASSERT(!IsAbiPreservedRegister(PP));
// PP is a C volatile register.
// SP will be aligned to the C stack alignment.
__ mv(CALLEE_SAVED_TEMP, PP);
__ mv(CALLEE_SAVED_TEMP2, SP);
__ lx(TMP2,
compiler::Address(THR, Thread::OffsetFromThread(runtime_entry)));
__ sx(TMP2, compiler::Address(THR, Thread::vm_tag_offset()));
__ ReserveAlignedFrameSpace(0);
__ jalr(TMP2);
__ LoadImmediate(TMP2, VMTag::kDartTagId);
__ sx(TMP2, compiler::Address(THR, Thread::vm_tag_offset()));
__ mv(PP, CALLEE_SAVED_TEMP);
__ mv(SP, CALLEE_SAVED_TEMP2);
// These registers must be preserved by runtime functions, otherwise
// we'd need to restore them here.
COMPILE_ASSERT(IsCalleeSavedRegister(THR));
COMPILE_ASSERT(IsCalleeSavedRegister(NULL_REG));
COMPILE_ASSERT(IsCalleeSavedRegister(WRITE_BARRIER_MASK));
COMPILE_ASSERT(IsCalleeSavedRegister(DISPATCH_TABLE_REG));
} else {
// Argument count is not checked here, but in the runtime entry for a more
// informative error message.
__ lx(T5, compiler::Address(THR, Thread::OffsetFromThread(runtime_entry)));
__ li(T4, argument_count);
__ JumpAndLinkToRuntime();
}
}
#endif // !defined(DART_PRECOMPILED_RUNTIME)
} // namespace dart
#endif // defined TARGET_ARCH_RISCV

View file

@ -20,40 +20,6 @@ uword RuntimeEntry::GetEntryPoint() const {
return reinterpret_cast<uword>(function());
}
#if !defined(DART_PRECOMPILED_RUNTIME)
// Generate code to call into the stub which will call the runtime
// function. Input for the stub is as follows:
// RSP : points to the arguments and return value array.
// RBX : address of the runtime function to call.
// R10 : number of arguments to the call.
void RuntimeEntry::CallInternal(const RuntimeEntry* runtime_entry,
compiler::Assembler* assembler,
intptr_t argument_count) {
if (runtime_entry->is_leaf()) {
ASSERT(argument_count == runtime_entry->argument_count());
COMPILE_ASSERT(CallingConventions::kVolatileCpuRegisters & (1 << RAX));
__ movq(RAX,
compiler::Address(THR, Thread::OffsetFromThread(runtime_entry)));
__ movq(compiler::Assembler::VMTagAddress(), RAX);
__ CallCFunction(RAX);
__ movq(compiler::Assembler::VMTagAddress(),
compiler::Immediate(VMTag::kDartTagId));
// These registers must be preserved by runtime functions, otherwise
// we'd need to restore them here.
ASSERT(IsCalleeSavedRegister(THR));
ASSERT(IsCalleeSavedRegister(PP));
ASSERT(IsCalleeSavedRegister(CODE_REG));
} else {
// Argument count is not checked here, but in the runtime entry for a more
// informative error message.
__ movq(RBX,
compiler::Address(THR, Thread::OffsetFromThread(runtime_entry)));
__ LoadImmediate(R10, compiler::Immediate(argument_count));
__ CallToRuntime();
}
}
#endif // !defined(DART_PRECOMPILED_RUNTIME)
} // namespace dart
#endif // defined TARGET_ARCH_X64

View file

@ -966,6 +966,19 @@ void Simulator::set_register(Instr* instr,
if ((instr != NULL) && (reg == R31) && !Utils::IsAligned(value, 16)) {
UnalignedAccess("CSP set", value, instr);
}
#if defined(DEBUG)
if (reg == SP) {
// Memory below CSP can be written to at any instruction boundary by a
// signal handler. Simulate this to ensure we're keeping CSP far enough
// ahead of SP to prevent Dart frames from being trashed.
uword csp = registers_[R31];
WriteX(csp - 1 * kWordSize, icount_, NULL);
WriteX(csp - 2 * kWordSize, icount_, NULL);
WriteX(csp - 3 * kWordSize, icount_, NULL);
WriteX(csp - 4 * kWordSize, icount_, NULL);
}
#endif
}
}

View file

@ -81,12 +81,16 @@ static void GenerateCallToCallLeafRuntimeStub(compiler::Assembler* assembler,
const Smi& rhs_index = Smi::ZoneHandle(Smi::New(rhs_index_value));
const Smi& length = Smi::ZoneHandle(Smi::New(length_value));
__ EnterDartFrame(0);
__ ReserveAlignedFrameSpace(0);
__ LoadObject(R0, str);
__ LoadObject(R1, lhs_index);
__ LoadObject(R2, rhs_index);
__ LoadObject(R3, length);
__ CallRuntime(kCaseInsensitiveCompareUCS2RuntimeEntry, 4);
{
compiler::LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/false);
__ LoadObject(R0, str);
__ LoadObject(R1, lhs_index);
__ LoadObject(R2, rhs_index);
__ LoadObject(R3, length);
rt.Call(kCaseInsensitiveCompareUCS2RuntimeEntry, 4);
}
__ LeaveDartFrame();
__ ret(); // Return value is in R0.
}
@ -107,6 +111,10 @@ ISOLATE_UNIT_TEST_CASE(CallLeafRuntimeStubCode) {
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
*CreateFunction("Test_CallLeafRuntimeStubCode"), nullptr, &assembler,
Code::PoolAttachment::kAttachPool));
if (FLAG_disassemble) {
OS::PrintErr("Disassemble:\n");
code.Disassemble();
}
const Function& function = RegisterFakeFunction(kName, code);
Instance& result = Instance::Handle();
result ^= DartEntry::InvokeFunction(function, Object::empty_array());

View file

@ -80,12 +80,16 @@ static void GenerateCallToCallLeafRuntimeStub(compiler::Assembler* assembler,
const Smi& rhs_index = Smi::ZoneHandle(Smi::New(rhs_index_value));
const Smi& length = Smi::ZoneHandle(Smi::New(length_value));
__ EnterDartFrame(0);
__ ReserveAlignedFrameSpace(0);
__ LoadObject(R0, str);
__ LoadObject(R1, lhs_index);
__ LoadObject(R2, rhs_index);
__ LoadObject(R3, length);
__ CallRuntime(kCaseInsensitiveCompareUCS2RuntimeEntry, 4);
{
compiler::LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/false);
__ LoadObject(R0, str);
__ LoadObject(R1, lhs_index);
__ LoadObject(R2, rhs_index);
__ LoadObject(R3, length);
rt.Call(kCaseInsensitiveCompareUCS2RuntimeEntry, 4);
}
__ LeaveDartFrameAndReturn(); // Return value is in R0.
}
@ -105,6 +109,10 @@ ISOLATE_UNIT_TEST_CASE(CallLeafRuntimeStubCode) {
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
*CreateFunction("Test_CallLeafRuntimeStubCode"), nullptr, &assembler,
Code::PoolAttachment::kAttachPool));
if (FLAG_disassemble) {
OS::PrintErr("Disassemble:\n");
code.Disassemble();
}
const Function& function = RegisterFakeFunction(kName, code);
Instance& result = Instance::Handle();
result ^= DartEntry::InvokeFunction(function, Object::empty_array());

View file

@ -80,16 +80,20 @@ static void GenerateCallToCallLeafRuntimeStub(compiler::Assembler* assembler,
const Smi& rhs_index = Smi::ZoneHandle(Smi::New(rhs_index_value));
const Smi& length = Smi::ZoneHandle(Smi::New(length_value));
__ enter(compiler::Immediate(0));
__ ReserveAlignedFrameSpace(4 * kWordSize);
__ LoadObject(EAX, str);
__ movl(compiler::Address(ESP, 0), EAX); // Push argument 1.
__ LoadObject(EAX, lhs_index);
__ movl(compiler::Address(ESP, kWordSize), EAX); // Push argument 2.
__ LoadObject(EAX, rhs_index);
__ movl(compiler::Address(ESP, 2 * kWordSize), EAX); // Push argument 3.
__ LoadObject(EAX, length);
__ movl(compiler::Address(ESP, 3 * kWordSize), EAX); // Push argument 4.
__ CallRuntime(kCaseInsensitiveCompareUCS2RuntimeEntry, 4);
{
compiler::LeafRuntimeScope rt(assembler,
/*frame_size=*/4 * kWordSize,
/*preserve_registers=*/false);
__ LoadObject(EAX, str);
__ movl(compiler::Address(ESP, 0), EAX); // Push argument 1.
__ LoadObject(EAX, lhs_index);
__ movl(compiler::Address(ESP, kWordSize), EAX); // Push argument 2.
__ LoadObject(EAX, rhs_index);
__ movl(compiler::Address(ESP, 2 * kWordSize), EAX); // Push argument 3.
__ LoadObject(EAX, length);
__ movl(compiler::Address(ESP, 3 * kWordSize), EAX); // Push argument 4.
rt.Call(kCaseInsensitiveCompareUCS2RuntimeEntry, 4);
}
__ leave();
__ ret(); // Return value is in EAX.
}
@ -109,6 +113,10 @@ ISOLATE_UNIT_TEST_CASE(CallLeafRuntimeStubCode) {
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
*CreateFunction("Test_CallLeafRuntimeStubCode"), nullptr, &assembler,
Code::PoolAttachment::kAttachPool));
if (FLAG_disassemble) {
OS::PrintErr("Disassemble:\n");
code.Disassemble();
}
const Function& function = RegisterFakeFunction(kName, code);
Instance& result = Instance::Handle();
result ^= DartEntry::InvokeFunction(function, Object::empty_array());

View file

@ -81,12 +81,16 @@ static void GenerateCallToCallLeafRuntimeStub(compiler::Assembler* assembler,
const Smi& rhs_index = Smi::ZoneHandle(Smi::New(rhs_index_value));
const Smi& length = Smi::ZoneHandle(Smi::New(length_value));
__ EnterDartFrame(0);
__ ReserveAlignedFrameSpace(0);
__ LoadObject(A0, str);
__ LoadObject(A1, lhs_index);
__ LoadObject(A2, rhs_index);
__ LoadObject(A3, length);
__ CallRuntime(kCaseInsensitiveCompareUCS2RuntimeEntry, 4);
{
compiler::LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/false);
__ LoadObject(A0, str);
__ LoadObject(A1, lhs_index);
__ LoadObject(A2, rhs_index);
__ LoadObject(A3, length);
rt.Call(kCaseInsensitiveCompareUCS2RuntimeEntry, 4);
}
__ LeaveDartFrame();
__ ret(); // Return value is in A0.
}
@ -107,6 +111,10 @@ ISOLATE_UNIT_TEST_CASE(CallLeafRuntimeStubCode) {
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
*CreateFunction("Test_CallLeafRuntimeStubCode"), nullptr, &assembler,
Code::PoolAttachment::kAttachPool));
if (FLAG_disassemble) {
OS::PrintErr("Disassemble:\n");
code.Disassemble();
}
const Function& function = RegisterFakeFunction(kName, code);
Instance& result = Instance::Handle();
result ^= DartEntry::InvokeFunction(function, Object::empty_array());

View file

@ -81,12 +81,16 @@ static void GenerateCallToCallLeafRuntimeStub(compiler::Assembler* assembler,
const Smi& rhs_index = Smi::ZoneHandle(Smi::New(rhs_index_value));
const Smi& length = Smi::ZoneHandle(Smi::New(length_value));
__ EnterStubFrame();
__ ReserveAlignedFrameSpace(0);
__ LoadObject(CallingConventions::kArg1Reg, str);
__ LoadObject(CallingConventions::kArg2Reg, lhs_index);
__ LoadObject(CallingConventions::kArg3Reg, rhs_index);
__ LoadObject(CallingConventions::kArg4Reg, length);
__ CallRuntime(kCaseInsensitiveCompareUCS2RuntimeEntry, 4);
{
compiler::LeafRuntimeScope rt(assembler,
/*frame_size=*/0,
/*preserve_registers=*/false);
__ LoadObject(CallingConventions::kArg1Reg, str);
__ LoadObject(CallingConventions::kArg2Reg, lhs_index);
__ LoadObject(CallingConventions::kArg3Reg, rhs_index);
__ LoadObject(CallingConventions::kArg4Reg, length);
rt.Call(kCaseInsensitiveCompareUCS2RuntimeEntry, 4);
}
__ LeaveStubFrame();
__ ret(); // Return value is in RAX.
}
@ -107,6 +111,10 @@ ISOLATE_UNIT_TEST_CASE(CallLeafRuntimeStubCode) {
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
*CreateFunction("Test_CallLeafRuntimeStubCode"), nullptr, &assembler,
Code::PoolAttachment::kAttachPool));
if (FLAG_disassemble) {
OS::PrintErr("Disassemble:\n");
code.Disassemble();
}
const Function& function = RegisterFakeFunction(kName, code);
Instance& result = Instance::Handle();
result ^= DartEntry::InvokeFunction(function, Object::empty_array());

View file

@ -92,9 +92,6 @@ class Thread;
V(TypeParameter)
#define CACHED_VM_STUBS_LIST(V) \
V(CodePtr, write_barrier_code_, StubCode::WriteBarrier().ptr(), nullptr) \
V(CodePtr, array_write_barrier_code_, StubCode::ArrayWriteBarrier().ptr(), \
nullptr) \
V(CodePtr, fix_callers_target_code_, StubCode::FixCallersTarget().ptr(), \
nullptr) \
V(CodePtr, fix_allocation_stub_code_, \