[vm] Faster double.toInt() in AOT mode

double.toInt() micro-benchmark on AOT/x64:
Before: BenchToInt(RunTime): 438.67258771929824 us.
After:  BenchToInt(RunTime): 118.8603434955726 us.

double.floor() micro-benchmark on AOT/x64:
Before: BenchFloor(RunTime): 537.2132688691916 us.
After:  BenchFloor(RunTime): 321.2052352657781 us.

TEST=ci
Issue https://github.com/dart-lang/sdk/issues/46876
Issue https://github.com/dart-lang/sdk/issues/46650

Change-Id: Id37c827bceb7f374ae5b91b36871ccf0d9e92441
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/211620
Reviewed-by: Slava Egorov <vegorov@google.com>
Commit-Queue: Alexander Markov <alexmarkov@google.com>
This commit is contained in:
Alexander Markov 2021-09-01 16:57:59 +00:00 committed by commit-bot@chromium.org
parent 4ff04f641b
commit 307bc3ef2c
38 changed files with 559 additions and 582 deletions

View file

@ -67,23 +67,6 @@ DEFINE_NATIVE_ENTRY(Double_div, 0, 2) {
return Double::New(left / right);
}
static IntegerPtr DoubleToInteger(double val, const char* error_msg) {
if (isinf(val) || isnan(val)) {
const Array& args = Array::Handle(Array::New(1));
args.SetAt(0, String::Handle(String::New(error_msg)));
Exceptions::ThrowByType(Exceptions::kUnsupported, args);
}
int64_t ival = 0;
if (val <= static_cast<double>(kMinInt64)) {
ival = kMinInt64;
} else if (val >= static_cast<double>(kMaxInt64)) {
ival = kMaxInt64;
} else { // Representable in int64_t.
ival = static_cast<int64_t>(val);
}
return Integer::New(ival);
}
DEFINE_NATIVE_ENTRY(Double_hashCode, 0, 1) {
double val = Double::CheckedHandle(zone, arguments->NativeArgAt(0)).value();
if (FLAG_trace_intrinsified_natives) {
@ -176,7 +159,7 @@ DEFINE_NATIVE_ENTRY(Double_truncate, 0, 1) {
DEFINE_NATIVE_ENTRY(Double_toInt, 0, 1) {
const Double& arg = Double::CheckedHandle(zone, arguments->NativeArgAt(0));
return DoubleToInteger(arg.value(), "Infinity or NaN toInt");
return DoubleToInteger(zone, arg.value());
}
DEFINE_NATIVE_ENTRY(Double_parse, 0, 3) {

View file

@ -992,31 +992,6 @@ void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
}
}
void AsmIntrinsifier::DoubleToInteger(Assembler* assembler,
Label* normal_ir_body) {
if (TargetCPUFeatures::vfp_supported()) {
Label fall_through;
__ ldr(R0, Address(SP, 0 * target::kWordSize));
__ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
// Explicit NaN check, since ARM gives an FPU exception if you try to
// convert NaN to an int.
__ vcmpd(D0, D0);
__ vmstat();
__ b(normal_ir_body, VS);
__ vcvtid(S0, D0);
__ vmovrs(R0, S0);
// Overflow is signaled with minint.
// Check for overflow and that it fits into Smi.
__ CompareImmediate(R0, 0xC0000000);
__ SmiTag(R0, PL);
READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, PL));
__ Bind(normal_ir_body);
}
}
void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
Label* normal_ir_body) {
// TODO(dartbug.com/31174): Convert this to a graph intrinsic.

View file

@ -1133,35 +1133,6 @@ void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
__ ret();
}
void AsmIntrinsifier::DoubleToInteger(Assembler* assembler,
Label* normal_ir_body) {
__ ldr(R0, Address(SP, 0 * target::kWordSize));
__ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
// Explicit NaN check, since ARM gives an FPU exception if you try to
// convert NaN to an int.
__ fcmpd(V0, V0);
__ b(normal_ir_body, VS);
__ fcvtzdsx(R0, V0);
#if !defined(DART_COMPRESSED_POINTERS)
// Overflow is signaled with minint.
// Check for overflow and that it fits into Smi.
__ CompareImmediate(R0, 0xC000000000000000);
__ b(normal_ir_body, MI);
#else
// Overflow is signaled with minint.
// Check for overflow and that it fits into Smi.
__ AsrImmediate(TMP, R0, 30);
__ cmp(TMP, Operand(R0, ASR, 63));
__ b(normal_ir_body, NE);
#endif
__ SmiTag(R0);
__ ret();
__ Bind(normal_ir_body);
}
void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
Label* normal_ir_body) {
// TODO(dartbug.com/31174): Convert this to a graph intrinsic.

View file

@ -1109,20 +1109,6 @@ void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
__ jmp(&is_false, Assembler::kNearJump);
}
void AsmIntrinsifier::DoubleToInteger(Assembler* assembler,
Label* normal_ir_body) {
__ movl(EAX, Address(ESP, +1 * target::kWordSize));
__ movsd(XMM0, FieldAddress(EAX, target::Double::value_offset()));
__ cvttsd2si(EAX, XMM0);
// Overflow is signalled with minint.
// Check for overflow and that it fits into Smi.
__ cmpl(EAX, Immediate(0xC0000000));
__ j(NEGATIVE, normal_ir_body, Assembler::kNearJump);
__ SmiTag(EAX);
__ ret();
__ Bind(normal_ir_body);
}
void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
Label* normal_ir_body) {
// TODO(dartbug.com/31174): Convert this to a graph intrinsic.

View file

@ -1006,21 +1006,6 @@ void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
__ jmp(&is_false, Assembler::kNearJump);
}
void AsmIntrinsifier::DoubleToInteger(Assembler* assembler,
Label* normal_ir_body) {
__ movq(RAX, Address(RSP, +1 * target::kWordSize));
__ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
__ OBJ(cvttsd2si)(RAX, XMM0);
// Overflow is signalled with minint.
// Check for overflow and that it fits into Smi.
__ movq(RCX, RAX);
__ OBJ(shl)(RCX, Immediate(1));
__ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
__ SmiTag(RAX);
__ ret();
__ Bind(normal_ir_body);
}
void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
Label* normal_ir_body) {
// TODO(dartbug.com/31174): Convert this to a graph intrinsic.

View file

@ -1040,12 +1040,18 @@ class Assembler : public AssemblerBase {
Register base,
int32_t offset,
Condition cond = AL);
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset) {
LoadDFromOffset(EvenDRegisterOf(dst), base, offset);
}
void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset) {
StoreDToOffset(EvenDRegisterOf(src), base, offset);
}
void MoveUnboxedDouble(FpuRegister dst, FpuRegister src) {
if (src != dst) {
vmovd(EvenDRegisterOf(dst), EvenDRegisterOf(src));
}
}
void LoadMultipleDFromOffset(DRegister first,
intptr_t count,

View file

@ -1814,6 +1814,11 @@ class Assembler : public AssemblerBase {
void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset) {
StoreDToOffset(src, base, offset);
}
void MoveUnboxedDouble(FpuRegister dst, FpuRegister src) {
if (src != dst) {
fmovdd(dst, src);
}
}
void LoadCompressed(Register dest, const Address& slot);
void LoadCompressedFromOffset(Register dest, Register base, int32_t offset);

View file

@ -654,12 +654,19 @@ class Assembler : public AssemblerBase {
void StoreMemoryValue(Register src, Register base, int32_t offset) {
movl(Address(base, offset), src);
}
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset) {
movsd(dst, Address(base, offset));
}
void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset) {
movsd(Address(base, offset), src);
}
void MoveUnboxedDouble(FpuRegister dst, FpuRegister src) {
if (src != dst) {
movaps(dst, src);
}
}
void LoadAcquire(Register dst, Register address, int32_t offset = 0) {
// On intel loads have load-acquire behavior (i.e. loads are not re-ordered
// with other loads).

View file

@ -1027,12 +1027,18 @@ class Assembler : public AssemblerBase {
void StoreMemoryValue(Register src, Register base, int32_t offset) {
movq(Address(base, offset), src);
}
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset) {
movsd(dst, Address(base, offset));
}
void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset) {
movsd(Address(base, offset), src);
}
void MoveUnboxedDouble(FpuRegister dst, FpuRegister src) {
if (src != dst) {
movaps(dst, src);
}
}
#if defined(USING_THREAD_SANITIZER)
void TsanLoadAcquire(Address addr);

View file

@ -771,6 +771,7 @@ void FlowGraphCompiler::GenerateDeferredCode() {
assembler()->set_lr_state(lr_state);
#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
set_current_instruction(slow_path->instruction());
set_current_block(current_instruction_->GetBlock());
SpecialStatsBegin(stats_tag);
BeginCodeSourceRange(slow_path->instruction()->source());
DEBUG_ONLY(current_instruction_ = slow_path->instruction());
@ -779,6 +780,7 @@ void FlowGraphCompiler::GenerateDeferredCode() {
EndCodeSourceRange(slow_path->instruction()->source());
SpecialStatsEnd(stats_tag);
set_current_instruction(nullptr);
set_current_block(nullptr);
}
// All code generated by deferred deopt info is treated as in the root
// function.

View file

@ -298,6 +298,17 @@ class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
const Register result_;
};
class DoubleToIntegerSlowPath : public TemplateSlowPathCode<Instruction> {
public:
DoubleToIntegerSlowPath(Instruction* instruction, FpuRegister value_reg)
: TemplateSlowPathCode(instruction), value_reg_(value_reg) {}
virtual void EmitNativeCode(FlowGraphCompiler* compiler);
private:
FpuRegister value_reg_;
};
// Slow path code which calls runtime entry to throw an exception.
class ThrowErrorSlowPathCode : public TemplateSlowPathCode<Instruction> {
public:

View file

@ -249,10 +249,7 @@ void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
// In unoptimized code at instruction epilogue the only
// live register is an output register.
instr->locs()->live_registers()->Clear();
if (value.fpu_reg() != BoxDoubleStubABI::kValueReg) {
__ vmovd(EvenDRegisterOf(BoxDoubleStubABI::kValueReg),
EvenDRegisterOf(value.fpu_reg()));
}
__ MoveUnboxedDouble(BoxDoubleStubABI::kValueReg, value.fpu_reg());
GenerateNonLazyDeoptableStubCall(
InstructionSource(), // No token position.
StubCode::BoxDouble(), UntaggedPcDescriptors::kOther, instr->locs());

View file

@ -240,9 +240,7 @@ void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
// In unoptimized code at instruction epilogue the only
// live register is an output register.
instr->locs()->live_registers()->Clear();
if (value.fpu_reg() != BoxDoubleStubABI::kValueReg) {
__ fmovdd(BoxDoubleStubABI::kValueReg, value.fpu_reg());
}
__ MoveUnboxedDouble(BoxDoubleStubABI::kValueReg, value.fpu_reg());
GenerateNonLazyDeoptableStubCall(
InstructionSource(), // No token position.
StubCode::BoxDouble(), UntaggedPcDescriptors::kOther, instr->locs());

View file

@ -369,9 +369,7 @@ void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
// In unoptimized code at instruction epilogue the only
// live register is an output register.
instr->locs()->live_registers()->Clear();
if (value.fpu_reg() != BoxDoubleStubABI::kValueReg) {
__ movaps(BoxDoubleStubABI::kValueReg, value.fpu_reg());
}
__ MoveUnboxedDouble(BoxDoubleStubABI::kValueReg, value.fpu_reg());
GenerateNonLazyDeoptableStubCall(
InstructionSource(), // No token position.
StubCode::BoxDouble(), UntaggedPcDescriptors::kOther, instr->locs());

View file

@ -241,9 +241,7 @@ void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
// In unoptimized code at instruction epilogue the only
// live register is an output register.
instr->locs()->live_registers()->Clear();
if (value.fpu_reg() != BoxDoubleStubABI::kValueReg) {
__ movaps(BoxDoubleStubABI::kValueReg, value.fpu_reg());
}
__ MoveUnboxedDouble(BoxDoubleStubABI::kValueReg, value.fpu_reg());
GenerateNonLazyDeoptableStubCall(
InstructionSource(), // No token position.
StubCode::BoxDouble(), UntaggedPcDescriptors::kOther, instr->locs());

View file

@ -5603,6 +5603,29 @@ void BoxAllocationSlowPath::Allocate(FlowGraphCompiler* compiler,
}
}
void DoubleToIntegerSlowPath::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("DoubleToIntegerSlowPath");
__ Bind(entry_label());
LocationSummary* locs = instruction()->locs();
locs->live_registers()->Remove(locs->out(0));
compiler->SaveLiveRegisters(locs);
auto slow_path_env =
compiler->SlowPathEnvironmentFor(instruction(), /*num_slow_path_args=*/0);
__ MoveUnboxedDouble(DoubleToIntegerStubABI::kInputReg, value_reg_);
compiler->GenerateStubCall(instruction()->source(),
StubCode::DoubleToInteger(),
UntaggedPcDescriptors::kOther, locs,
instruction()->deopt_id(), slow_path_env);
__ MoveRegister(instruction()->locs()->out(0).reg(),
DoubleToIntegerStubABI::kResultReg);
compiler->RestoreLiveRegisters(instruction()->locs());
__ Jump(exit_label());
}
void RangeErrorSlowPath::EmitSharedStubCall(FlowGraphCompiler* compiler,
bool save_fpu_registers) {
#if defined(TARGET_ARCH_IA32)

View file

@ -8317,31 +8317,39 @@ class Int64ToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
DISALLOW_COPY_AND_ASSIGN(Int64ToDoubleInstr);
};
class DoubleToIntegerInstr : public TemplateDefinition<1, Throws> {
class DoubleToIntegerInstr : public TemplateDefinition<1, Throws, Pure> {
public:
DoubleToIntegerInstr(Value* value, InstanceCallInstr* instance_call)
: TemplateDefinition(instance_call->deopt_id()),
instance_call_(instance_call) {
DoubleToIntegerInstr(Value* value, intptr_t deopt_id)
: TemplateDefinition(deopt_id) {
SetInputAt(0, value);
}
Value* value() const { return inputs_[0]; }
InstanceCallInstr* instance_call() const { return instance_call_; }
DECLARE_INSTRUCTION(DoubleToInteger)
virtual CompileType ComputeType() const;
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT(idx == 0);
return kUnboxedDouble;
}
virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
ASSERT(idx == 0);
return kNotSpeculative;
}
virtual bool ComputeCanDeoptimize() const {
return !CompilerState::Current().is_aot();
}
virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
virtual bool HasUnknownSideEffects() const { return false; }
virtual bool CanCallDart() const { return true; }
virtual bool AttributesEqual(const Instruction& other) const { return true; }
private:
InstanceCallInstr* instance_call_;
DISALLOW_COPY_AND_ASSIGN(DoubleToIntegerInstr);
};

View file

@ -5838,53 +5838,35 @@ LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* result = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
result->set_in(0, Location::RegisterLocation(R1));
result->set_out(0, Location::RegisterLocation(R0));
LocationSummary* result = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
result->set_in(0, Location::RequiresFpuRegister());
result->set_out(0, Location::RequiresRegister());
return result;
}
void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register result = locs()->out(0).reg();
const Register value_obj = locs()->in(0).reg();
ASSERT(result == R0);
ASSERT(result != value_obj);
__ LoadDFromOffset(DTMP, value_obj,
compiler::target::Double::value_offset() - kHeapObjectTag);
const DRegister value_double = EvenDRegisterOf(locs()->in(0).fpu_reg());
DoubleToIntegerSlowPath* slow_path =
new DoubleToIntegerSlowPath(this, locs()->in(0).fpu_reg());
compiler->AddSlowPathCode(slow_path);
compiler::Label done, do_call;
// First check for NaN. Checking for minint after the conversion doesn't work
// on ARM because vcvtid gives 0 for NaN.
__ vcmpd(DTMP, DTMP);
__ vcmpd(value_double, value_double);
__ vmstat();
__ b(&do_call, VS);
__ b(slow_path->entry_label(), VS);
__ vcvtid(STMP, DTMP);
__ vcvtid(STMP, value_double);
__ vmovrs(result, STMP);
// Overflow is signaled with minint.
// Check for overflow and that it fits into Smi.
__ CompareImmediate(result, 0xC0000000);
__ SmiTag(result, PL);
__ b(&done, PL);
__ Bind(&do_call);
__ Push(value_obj);
ASSERT(instance_call()->HasICData());
const ICData& ic_data = *instance_call()->ic_data();
ASSERT(ic_data.NumberOfChecksIs(1));
const Function& target = Function::ZoneHandle(ic_data.GetTargetAt(0));
const int kTypeArgsLen = 0;
const int kNumberOfArguments = 1;
constexpr int kSizeOfArguments = 1;
const Array& kNoArgumentNames = Object::null_array();
ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kSizeOfArguments,
kNoArgumentNames);
compiler->GenerateStaticCall(deopt_id(), instance_call()->source(), target,
args_info, locs(), ICData::Handle(),
ICData::kStatic);
__ Bind(&done);
__ b(slow_path->entry_label(), MI);
__ SmiTag(result);
__ Bind(slow_path->exit_label());
}
LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,

View file

@ -4894,57 +4894,41 @@ LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* result = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
result->set_in(0, Location::RegisterLocation(R1));
result->set_out(0, Location::RegisterLocation(R0));
LocationSummary* result = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
result->set_in(0, Location::RequiresFpuRegister());
result->set_out(0, Location::RequiresRegister());
return result;
}
void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register result = locs()->out(0).reg();
const Register value_obj = locs()->in(0).reg();
ASSERT(result == R0);
ASSERT(result != value_obj);
__ LoadDFieldFromOffset(VTMP, value_obj, Double::value_offset());
const VRegister value_double = locs()->in(0).fpu_reg();
DoubleToIntegerSlowPath* slow_path =
new DoubleToIntegerSlowPath(this, value_double);
compiler->AddSlowPathCode(slow_path);
compiler::Label do_call, done;
// First check for NaN. Checking for minint after the conversion doesn't work
// on ARM64 because fcvtzds gives 0 for NaN.
__ fcmpd(VTMP, VTMP);
__ b(&do_call, VS);
__ fcmpd(value_double, value_double);
__ b(slow_path->entry_label(), VS);
__ fcvtzdsx(result, VTMP);
__ fcvtzdsx(result, value_double);
// Overflow is signaled with minint.
#if !defined(DART_COMPRESSED_POINTERS)
// Check for overflow and that it fits into Smi.
__ CompareImmediate(result, 0xC000000000000000);
__ b(&do_call, MI);
__ b(slow_path->entry_label(), MI);
#else
// Check for overflow and that it fits into Smi.
__ AsrImmediate(TMP, result, 30);
__ cmp(TMP, compiler::Operand(result, ASR, 63));
__ b(&do_call, NE);
__ b(slow_path->entry_label(), NE);
#endif
__ SmiTag(result);
__ b(&done);
__ Bind(&do_call);
__ Push(value_obj);
ASSERT(instance_call()->HasICData());
const ICData& ic_data = *instance_call()->ic_data();
ASSERT(ic_data.NumberOfChecksIs(1));
const Function& target = Function::ZoneHandle(ic_data.GetTargetAt(0));
const int kTypeArgsLen = 0;
const int kNumberOfArguments = 1;
constexpr int kSizeOfArguments = 1;
const Array& kNoArgumentNames = Object::null_array();
ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kSizeOfArguments,
kNoArgumentNames);
compiler->GenerateStaticCall(deopt_id(), instance_call()->source(), target,
args_info, locs(), ICData::Handle(),
ICData::kStatic);
__ Bind(&done);
__ Bind(slow_path->exit_label());
}
LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,

View file

@ -5010,45 +5010,28 @@ LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* result = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
result->set_in(0, Location::RegisterLocation(ECX));
result->set_out(0, Location::RegisterLocation(EAX));
LocationSummary* result = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
result->set_in(0, Location::RequiresFpuRegister());
result->set_out(0, Location::RequiresRegister());
return result;
}
void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register result = locs()->out(0).reg();
Register value_obj = locs()->in(0).reg();
XmmRegister value_double = FpuTMP;
ASSERT(result == EAX);
ASSERT(result != value_obj);
__ movsd(value_double,
compiler::FieldAddress(value_obj, Double::value_offset()));
const Register result = locs()->out(0).reg();
const XmmRegister value_double = locs()->in(0).fpu_reg();
DoubleToIntegerSlowPath* slow_path =
new DoubleToIntegerSlowPath(this, value_double);
compiler->AddSlowPathCode(slow_path);
__ cvttsd2si(result, value_double);
// Overflow is signalled with minint.
compiler::Label do_call, done;
// Check for overflow and that it fits into Smi.
__ cmpl(result, compiler::Immediate(0xC0000000));
__ j(NEGATIVE, &do_call, compiler::Assembler::kNearJump);
__ j(NEGATIVE, slow_path->entry_label());
__ SmiTag(result);
__ jmp(&done);
__ Bind(&do_call);
__ pushl(value_obj);
ASSERT(instance_call()->HasICData());
const ICData& ic_data = *instance_call()->ic_data();
ASSERT(ic_data.NumberOfChecksIs(1));
const Function& target = Function::ZoneHandle(ic_data.GetTargetAt(0));
const int kTypeArgsLen = 0;
const int kNumberOfArguments = 1;
constexpr int kSizeOfArguments = 1;
const Array& kNoArgumentNames = Object::null_array();
ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kSizeOfArguments,
kNoArgumentNames);
compiler->GenerateStaticCall(deopt_id(), instance_call()->source(), target,
args_info, locs(), ICData::Handle(),
ICData::kStatic);
__ Bind(&done);
__ Bind(slow_path->exit_label());
}
LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,

View file

@ -5202,49 +5202,32 @@ LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 1;
LocationSummary* result = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
result->set_in(0, Location::RegisterLocation(RCX));
result->set_out(0, Location::RegisterLocation(RAX));
result->set_temp(0, Location::RegisterLocation(RBX));
LocationSummary* result = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
result->set_in(0, Location::RequiresFpuRegister());
result->set_out(0, Location::RequiresRegister());
result->set_temp(0, Location::RequiresRegister());
return result;
}
void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register result = locs()->out(0).reg();
Register value_obj = locs()->in(0).reg();
Register temp = locs()->temp(0).reg();
XmmRegister value_double = FpuTMP;
ASSERT(result == RAX);
ASSERT(result != value_obj);
const Register result = locs()->out(0).reg();
const Register temp = locs()->temp(0).reg();
const XmmRegister value_double = locs()->in(0).fpu_reg();
ASSERT(result != temp);
__ movsd(value_double,
compiler::FieldAddress(value_obj, Double::value_offset()));
DoubleToIntegerSlowPath* slow_path =
new DoubleToIntegerSlowPath(this, value_double);
compiler->AddSlowPathCode(slow_path);
__ OBJ(cvttsd2si)(result, value_double);
// Overflow is signalled with minint.
compiler::Label do_call, done;
// Check for overflow and that it fits into Smi.
__ movq(temp, result);
__ OBJ(shl)(temp, compiler::Immediate(1));
__ j(OVERFLOW, &do_call, compiler::Assembler::kNearJump);
__ j(OVERFLOW, slow_path->entry_label());
__ SmiTag(result);
__ jmp(&done);
__ Bind(&do_call);
__ pushq(value_obj);
ASSERT(instance_call()->HasICData());
const ICData& ic_data = *instance_call()->ic_data();
ASSERT(ic_data.NumberOfChecksIs(1));
const Function& target = Function::ZoneHandle(ic_data.GetTargetAt(0));
const int kTypeArgsLen = 0;
const int kNumberOfArguments = 1;
constexpr int kSizeOfArguments = 1;
const Array& kNoArgumentNames = Object::null_array();
ArgumentsInfo args_info(kTypeArgsLen, kNumberOfArguments, kSizeOfArguments,
kNoArgumentNames);
compiler->GenerateStaticCall(deopt_id(), instance_call()->source(), target,
args_info, locs(), ICData::Handle(),
ICData::kStatic);
__ Bind(&done);
__ Bind(slow_path->exit_label());
}
LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,

View file

@ -1010,7 +1010,8 @@ bool CallSpecializer::TryInlineInstanceMethod(InstanceCallInstr* call) {
Definition* d2i_instr = NULL;
if (ic_data.HasDeoptReason(ICData::kDeoptDoubleToSmi)) {
// Do not repeatedly deoptimize because result didn't fit into Smi.
d2i_instr = new (Z) DoubleToIntegerInstr(new (Z) Value(input), call);
d2i_instr = new (Z)
DoubleToIntegerInstr(new (Z) Value(input), call->deopt_id());
} else {
// Optimistically assume result fits into Smi.
d2i_instr =

View file

@ -1213,6 +1213,13 @@ Fragment BaseFlowGraphBuilder::DoubleToDouble(
return Fragment(instr);
}
Fragment BaseFlowGraphBuilder::DoubleToInteger() {
Value* value = Pop();
auto* instr = new (Z) DoubleToIntegerInstr(value, GetNextDeoptId());
Push(instr);
return Fragment(instr);
}
Fragment BaseFlowGraphBuilder::MathUnary(MathUnaryInstr::MathUnaryKind kind) {
Value* value = Pop();
auto* instr = new (Z) MathUnaryInstr(kind, value, GetNextDeoptId());

View file

@ -444,6 +444,9 @@ class BaseFlowGraphBuilder {
// kDoubleFloor or kDoubleCeil).
Fragment DoubleToDouble(MethodRecognizer::Kind recognized_kind);
// Pops double value and converts it to int.
Fragment DoubleToInteger();
// Pops double value and applies unary math operation.
Fragment MathUnary(MathUnaryInstr::MathUnaryKind kind);

View file

@ -892,6 +892,7 @@ bool FlowGraphBuilder::IsRecognizedMethodForFlowGraph(
case MethodRecognizer::kUtf8DecoderScan:
case MethodRecognizer::kHas63BitSmis:
return true;
case MethodRecognizer::kDoubleToInteger:
case MethodRecognizer::kDoubleMod:
case MethodRecognizer::kDoubleRound:
case MethodRecognizer::kDoubleTruncate:
@ -1524,6 +1525,10 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
body += LoadIndexed(kIntPtrCid);
body += Box(kUnboxedIntPtr);
} break;
case MethodRecognizer::kDoubleToInteger: {
body += LoadLocal(parsed_function_->RawParameterVariable(0));
body += DoubleToInteger();
} break;
case MethodRecognizer::kDoubleMod:
case MethodRecognizer::kDoubleRound:
case MethodRecognizer::kDoubleTruncate:

View file

@ -91,6 +91,7 @@ namespace dart {
V(_Double, ceilToDouble, DoubleCeil, 0x5f1bced9) \
V(_Double, floorToDouble, DoubleFloor, 0x54b4cb48) \
V(_Double, roundToDouble, DoubleRound, 0x5649ca00) \
V(_Double, toInt, DoubleToInteger, 0x676f20a9) \
V(_Double, truncateToDouble, DoubleTruncate, 0x62d48659) \
V(::, min, MathMin, 0x504a28df) \
V(::, max, MathMax, 0xead7161a) \
@ -282,7 +283,6 @@ namespace dart {
V(_IntegerImplementation, <=, Integer_lessEqualThan, 0xb6764495) \
V(_IntegerImplementation, >=, Integer_greaterEqualThan, 0xfecba6b3) \
V(_IntegerImplementation, <<, Integer_shl, 0x2d855b02) \
V(_Double, toInt, DoubleToInteger, 0x676f1ce8) \
#define MATH_LIB_INTRINSIC_LIST(V) \
V(_Random, _nextState, Random_nextState, 0x7207677d) \

File diff suppressed because it is too large Load diff

View file

@ -1043,13 +1043,30 @@ void StubCodeCompiler::GenerateBoxDoubleStub(Assembler* assembler) {
__ EnterStubFrame();
__ PushObject(NullObject()); /* Make room for result. */
__ StoreUnboxedDouble(BoxDoubleStubABI::kValueReg, THR,
Thread::unboxed_double_runtime_arg_offset());
target::Thread::unboxed_double_runtime_arg_offset());
__ CallRuntime(kBoxDoubleRuntimeEntry, 0);
__ PopRegister(BoxDoubleStubABI::kResultReg);
__ LeaveStubFrame();
__ Ret();
}
void StubCodeCompiler::GenerateDoubleToIntegerStub(Assembler* assembler) {
#if defined(TARGET_ARCH_ARM)
if (!TargetCPUFeatures::vfp_supported()) {
__ Breakpoint();
return;
}
#endif // defined(TARGET_ARCH_ARM)
__ EnterStubFrame();
__ StoreUnboxedDouble(DoubleToIntegerStubABI::kInputReg, THR,
target::Thread::unboxed_double_runtime_arg_offset());
__ PushObject(NullObject()); /* Make room for result. */
__ CallRuntime(kDoubleToIntegerRuntimeEntry, 0);
__ PopRegister(DoubleToIntegerStubABI::kResultReg);
__ LeaveStubFrame();
__ Ret();
}
} // namespace compiler
} // namespace dart

View file

@ -494,6 +494,12 @@ struct BoxDoubleStubABI {
static const Register kResultReg = R0;
};
// ABI for DoubleToIntegerStub.
struct DoubleToIntegerStubABI {
static const FpuRegister kInputReg = Q0;
static const Register kResultReg = R0;
};
// ABI for DispatchTableNullErrorStub and consequently for all dispatch
// table calls (though normal functions will not expect or use this
// register). This ABI is added to distinguish memory corruption errors from

View file

@ -334,6 +334,12 @@ struct BoxDoubleStubABI {
static const Register kResultReg = R0;
};
// ABI for DoubleToIntegerStub.
struct DoubleToIntegerStubABI {
static const FpuRegister kInputReg = V0;
static const Register kResultReg = R0;
};
// ABI for DispatchTableNullErrorStub and consequently for all dispatch
// table calls (though normal functions will not expect or use this
// register). This ABI is added to distinguish memory corruption errors from

View file

@ -233,6 +233,12 @@ struct BoxDoubleStubABI {
static const Register kResultReg = EAX;
};
// ABI for DoubleToIntegerStub.
struct DoubleToIntegerStubABI {
static const FpuRegister kInputReg = XMM0;
static const Register kResultReg = EAX;
};
// ABI for DispatchTableNullErrorStub and consequently for all dispatch
// table calls (though normal functions will not expect or use this
// register). This ABI is added to distinguish memory corruption errors from

View file

@ -306,6 +306,12 @@ struct BoxDoubleStubABI {
static const Register kResultReg = RAX;
};
// ABI for DoubleToIntegerStub.
struct DoubleToIntegerStubABI {
static const FpuRegister kInputReg = XMM0;
static const Register kResultReg = RAX;
};
// ABI for DispatchTableNullErrorStub and consequently for all dispatch
// table calls (though normal functions will not expect or use this
// register). This ABI is added to distinguish memory corruption errors from

View file

@ -175,4 +175,21 @@ bool CStringToDouble(const char* str, intptr_t length, double* result) {
return (parsed_count == length);
}
IntegerPtr DoubleToInteger(Zone* zone, double val) {
if (isinf(val) || isnan(val)) {
const Array& args = Array::Handle(zone, Array::New(1));
args.SetAt(0, String::Handle(zone, String::New("Infinity or NaN toInt")));
Exceptions::ThrowByType(Exceptions::kUnsupported, args);
}
int64_t ival = 0;
if (val <= static_cast<double>(kMinInt64)) {
ival = kMinInt64;
} else if (val >= static_cast<double>(kMaxInt64)) {
ival = kMaxInt64;
} else { // Representable in int64_t.
ival = static_cast<int64_t>(val);
}
return Integer::New(ival);
}
} // namespace dart

View file

@ -24,6 +24,8 @@ StringPtr DoubleToStringAsPrecision(double d, int precision);
bool CStringToDouble(const char* str, intptr_t length, double* result);
IntegerPtr DoubleToInteger(Zone* zone, double val);
} // namespace dart
#endif // RUNTIME_VM_DOUBLE_CONVERSION_H_

View file

@ -14,6 +14,7 @@
#include "vm/dart_api_state.h"
#include "vm/dart_entry.h"
#include "vm/debugger.h"
#include "vm/double_conversion.h"
#include "vm/exceptions.h"
#include "vm/flags.h"
#include "vm/heap/verifier.h"
@ -286,6 +287,12 @@ DEFINE_RUNTIME_ENTRY(ArgumentErrorUnboxedInt64, 0) {
Exceptions::ThrowArgumentError(value);
}
DEFINE_RUNTIME_ENTRY(DoubleToInteger, 0) {
// Unboxed value is passed through a dedicated slot in Thread.
const double val = arguments.thread()->unboxed_double_runtime_arg();
arguments.SetReturn(Integer::Handle(zone, DoubleToInteger(zone, val)));
}
DEFINE_RUNTIME_ENTRY(IntegerDivisionByZeroException, 0) {
const Array& args = Array::Handle(zone, Array::New(0));
Exceptions::ThrowByType(Exceptions::kIntegerDivisionByZeroException, args);

View file

@ -22,6 +22,7 @@ namespace dart {
V(BreakpointRuntimeHandler) \
V(SingleStepHandler) \
V(CloneContext) \
V(DoubleToInteger) \
V(FixCallersTarget) \
V(FixCallersTargetMonomorphic) \
V(FixAllocationStubTarget) \

View file

@ -120,6 +120,7 @@ namespace dart {
V(RangeErrorSharedWithoutFPURegs) \
V(StackOverflowSharedWithFPURegs) \
V(StackOverflowSharedWithoutFPURegs) \
V(DoubleToInteger) \
V(OneArgCheckInlineCacheWithExactnessCheck) \
V(OneArgOptimizedCheckInlineCacheWithExactnessCheck) \
V(EnterSafepoint) \

View file

@ -216,7 +216,8 @@ class _Double implements double {
return this;
}
@pragma("vm:recognized", "asm-intrinsic")
@pragma("vm:recognized", "other")
@pragma("vm:prefer-inline")
@pragma("vm:non-nullable-result-type")
int toInt() native "Double_toInt";