[vm] Replace Double_hashCode native method with graph intrinsic implementation.

Bug: https://github.com/dart-lang/sdk/issues/50265
TEST=ci
Change-Id: Icae87ce3871bb44599e0f1fa19d8becb3a6fcdec
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/264240
Commit-Queue: Alexander Aprelev <aam@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
This commit is contained in:
Alexander Aprelev 2022-11-17 02:28:37 +00:00 committed by Commit Queue
parent 9b4be3b388
commit a83cfa0990
28 changed files with 1562 additions and 1359 deletions

View file

@ -69,23 +69,6 @@ DEFINE_NATIVE_ENTRY(Double_div, 0, 2) {
return Double::New(left / right);
}
DEFINE_NATIVE_ENTRY(Double_hashCode, 0, 1) {
double val = Double::CheckedHandle(zone, arguments->NativeArgAt(0)).value();
if (FLAG_trace_intrinsified_natives) {
OS::PrintErr("Double_hashCode %f\n", val);
}
if ((val >= kMinInt64RepresentableAsDouble) &&
(val <= kMaxInt64RepresentableAsDouble)) {
int64_t ival = static_cast<int64_t>(val);
if (static_cast<double>(ival) == val) {
return Integer::New(Multiply64Hash(ival));
}
}
uint64_t uval = bit_cast<uint64_t>(val);
return Smi::New(((uval >> 32) ^ (uval)) & kSmiMax);
}
DEFINE_NATIVE_ENTRY(Double_modulo, 0, 2) {
double left = Double::CheckedHandle(zone, arguments->NativeArgAt(0)).value();
GET_NON_NULL_NATIVE_ARGUMENT(Double, right_object, arguments->NativeArgAt(1));

View file

@ -272,9 +272,7 @@ DEFINE_NATIVE_ENTRY(Smi_bitLength, 0, 1) {
return Smi::New(result);
}
// Should be kept in sync with
// - il_(x64/arm64/...).cc HashIntegerOpInstr,
// - asm_intrinsifier(...).cc Multiply64Hash
// Should be kept in sync with il_*.cc EmitHashIntegerCodeSequence
uint32_t Multiply64Hash(int64_t ivalue) {
const uint64_t magic_constant = /*0x1b873593cc9e*/ 0x2d51;
uint64_t value = static_cast<uint64_t>(ivalue);

View file

@ -0,0 +1,28 @@
// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
//
// This complements corelib/double_hash_code_test.dart and verifies hash code
// values of doubles that are not representable as integers.
import 'package:expect/expect.dart';
import 'isolates/fast_object_copy_timeline_test.dart' show slotSize;
void main() {
// On 32-bit and 64-bit-compressed modes double.hashCode is different for
// non-integer doubles.
if (slotSize == 4) {
Expect.equals(1072693248, double.infinity.hashCode);
Expect.equals(1048576, double.maxFinite.hashCode);
Expect.equals(1, double.minPositive.hashCode);
Expect.equals(1073217536, double.nan.hashCode);
Expect.equals(1072693248, double.negativeInfinity.hashCode);
} else {
Expect.equals(4607182420946452480, double.infinity.hashCode);
Expect.equals(4607182416653582336, double.maxFinite.hashCode);
Expect.equals(1, double.minPositive.hashCode);
Expect.equals(4609434222908145664, double.nan.hashCode);
Expect.equals(4607182423093936128, double.negativeInfinity.hashCode);
}
}

View file

@ -0,0 +1,30 @@
// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
//
// @dart = 2.9
//
// This complements corelib/double_hash_code_test.dart and verifies hash code
// values of doubles that are not representable as integers.
import 'package:expect/expect.dart';
import 'isolates/fast_object_copy_timeline_test.dart' show slotSize;
void main() {
// On 32-bit and 64-bit-compressed modes double.hashCode is different for
// non-integer doubles.
if (slotSize == 4) {
Expect.equals(1072693248, double.infinity.hashCode);
Expect.equals(1048576, double.maxFinite.hashCode);
Expect.equals(1, double.minPositive.hashCode);
Expect.equals(1073217536, double.nan.hashCode);
Expect.equals(1072693248, double.negativeInfinity.hashCode);
} else {
Expect.equals(4607182420946452480, double.infinity.hashCode);
Expect.equals(4607182416653582336, double.maxFinite.hashCode);
Expect.equals(1, double.minPositive.hashCode);
Expect.equals(4609434222908145664, double.nan.hashCode);
Expect.equals(4607182423093936128, double.negativeInfinity.hashCode);
}
}

View file

@ -81,7 +81,6 @@ namespace dart {
V(Developer_postEvent, 2) \
V(Developer_webServerControl, 3) \
V(Developer_reachability_barrier, 0) \
V(Double_hashCode, 1) \
V(Double_getIsNegative, 1) \
V(Double_getIsInfinite, 1) \
V(Double_getIsNaN, 1) \

View file

@ -14,11 +14,6 @@ void AsmIntrinsifier::String_identityHash(Assembler* assembler,
String_getHashCode(assembler, normal_ir_body);
}
void AsmIntrinsifier::Double_identityHash(Assembler* assembler,
Label* normal_ir_body) {
Double_hashCode(assembler, normal_ir_body);
}
void AsmIntrinsifier::RegExp_ExecuteMatch(Assembler* assembler,
Label* normal_ir_body) {
AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(assembler, normal_ir_body,

View file

@ -945,70 +945,6 @@ void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
__ b(&is_false);
}
// Input: tagged integer in R0
// Output: tagged hash code value in R0
// Should be kept in sync with
// - il_(x64/arm64/...).cc HashIntegerOpInstr,
// - asm_intrinsifier(...).cc Multiply64Hash
// - integers.cc Multiply64Hash
static void Multiply64Hash(Assembler* assembler) {
__ SmiUntag(R0);
__ SignFill(R1, R0); // sign extend R0 to R1
__ LoadImmediate(TMP, compiler::Immediate(0x2d51));
__ umull(TMP, R0, R0, TMP); // (R0:TMP) = R0 * 0x2d51
// (0:0:R0:TMP) is 128-bit product
__ eor(R0, TMP, compiler::Operand(R0));
__ eor(R0, R1, compiler::Operand(R0));
__ AndImmediate(R0, R0, 0x3fffffff);
__ SmiTag(R0);
}
void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
Label* normal_ir_body) {
// TODO(dartbug.com/31174): Convert this to a graph intrinsic.
// Load double value and check that it isn't NaN, since ARM gives an
// FPU exception if you try to convert NaN to an int.
Label double_hash;
__ ldr(R1, Address(SP, 0 * target::kWordSize));
__ LoadDFromOffset(D0, R1, target::Double::value_offset() - kHeapObjectTag);
__ vcmpd(D0, D0);
__ vmstat();
__ b(&double_hash, VS);
// Convert double value to signed 32-bit int in R0.
__ vcvtid(S2, D0);
__ vmovrs(R0, S2);
// Tag the int as a Smi, making sure that it fits; this checks for
// overflow in the conversion from double to int. Conversion
// overflow is signalled by vcvt through clamping R0 to either
// INT32_MAX or INT32_MIN (saturation).
ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
__ adds(R0, R0, Operand(R0));
__ b(normal_ir_body, VS);
// Compare the two double values. If they are equal, we return the
// Smi tagged result immediately as the hash code.
__ vcvtdi(D1, S2);
__ vcmpd(D0, D1);
__ vmstat();
__ b(&double_hash, NE);
Multiply64Hash(assembler);
__ Ret();
// Convert the double bits to a hash code that fits in a Smi.
__ Bind(&double_hash);
__ ldr(R0, FieldAddress(R1, target::Double::value_offset()));
__ ldr(R1, FieldAddress(R1, target::Double::value_offset() + 4));
__ eor(R0, R0, Operand(R1));
__ AndImmediate(R0, R0, target::kSmiMax);
__ SmiTag(R0);
__ Ret();
// Fall into the native C++ implementation.
__ Bind(normal_ir_body);
}
void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
Label* normal_ir_body) {
__ ldr(R0, Address(SP, 0 * target::kWordSize));

View file

@ -1100,75 +1100,6 @@ void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
__ ret();
}
// Input: tagged integer in R0
// Output: tagged hash code value in R0
// Should be kept in sync with
// - il_(x64/arm64/...).cc HashIntegerOpInstr,
// - asm_intrinsifier(...).cc Multiply64Hash
// - integers.cc Multiply64Hash
static void Multiply64Hash(Assembler* assembler) {
__ SmiUntag(R0);
__ LoadImmediate(TMP, compiler::Immediate(0x2d51));
__ mul(R1, TMP, R0);
__ umulh(TMP, TMP, R0);
__ eor(R0, R1, compiler::Operand(TMP));
__ eor(R0, R0, compiler::Operand(R0, LSR, 32));
__ AndImmediate(R0, R0, 0x3fffffff);
__ SmiTag(R0);
}
void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
Label* normal_ir_body) {
// TODO(dartbug.com/31174): Convert this to a graph intrinsic.
// Load double value and check that it isn't NaN, since ARM gives an
// FPU exception if you try to convert NaN to an int.
Label double_hash;
__ ldr(R1, Address(SP, 0 * target::kWordSize));
__ LoadDFieldFromOffset(V0, R1, target::Double::value_offset());
__ fcmpd(V0, V0);
__ b(&double_hash, VS);
#if !defined(DART_COMPRESSED_POINTERS)
// Convert double value to signed 64-bit int in R0 and back to a
// double value in V1.
__ fcvtzsxd(R0, V0);
__ scvtfdx(V1, R0);
#else
// Convert double value to signed 32-bit int in R0 and back to a
// double value in V1.
__ fcvtzswd(R0, V0);
__ scvtfdw(V1, R0);
#endif
// Tag the int as a Smi, making sure that it fits; this checks for
// overflow in the conversion from double to int. Conversion
// overflow is signalled by fcvt through clamping R0 to either
// INT64_MAX or INT64_MIN (saturation).
ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
__ adds(R0, R0, Operand(R0), kObjectBytes);
__ b(normal_ir_body, VS);
// Compare the two double values. If they are equal, we return the
// Smi tagged result immediately as the hash code.
__ fcmpd(V0, V1);
__ b(&double_hash, NE);
Multiply64Hash(assembler);
__ ret();
// Convert the double bits to a hash code that fits in a Smi.
__ Bind(&double_hash);
__ fmovrd(R0, V0);
__ eor(R0, R0, Operand(R0, LSR, 32));
__ AndImmediate(R0, R0, target::kSmiMax);
__ SmiTag(R0);
__ ret();
// Fall into the native C++ implementation.
__ Bind(normal_ir_body);
}
void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
Label* normal_ir_body) {
__ ldr(R0, Address(SP, 0 * target::kWordSize));

View file

@ -1073,70 +1073,6 @@ void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
__ jmp(&is_false, Assembler::kNearJump);
}
// Input: tagged integer in EAX
// Output: tagged hash code value in EAX
// - il_(x64/arm64/...).cc HashIntegerOpInstr,
// - asm_intrinsifier(...).cc Multiply64Hash
// - integers.cc Multiply64Hash
static void Multiply64Hash(Assembler* assembler) {
__ SmiUntag(EAX);
__ cdq(); // // sign-extend EAX to EDX
__ movl(ECX, EDX); // save "value_hi" in ECX
__ movl(EDX, compiler::Immediate(0x2d51));
__ mull(EDX); // (EDX:EAX) = value_lo * 0x2d51
__ movl(EBX, EAX); // save lo32 in EBX
__ movl(EAX, ECX); // get saved value_hi
__ movl(ECX, EDX); // save hi32 in ECX
__ movl(EDX, compiler::Immediate(0x2d51));
__ mull(EDX); // (EDX:EAX) = value_hi * 0x2d51
__ addl(EAX, ECX); // EAX has prod_hi32, EDX has prod_hi64_lo32
__ xorl(EAX, EDX); // EAX = prod_hi32 ^ prod_hi64_lo32
__ xorl(EAX, EBX); // result = prod_hi32 ^ prod_hi64_lo32 ^ prod_lo32
__ andl(EAX, compiler::Immediate(0x3fffffff));
__ SmiTag(EAX);
}
void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
Label* normal_ir_body) {
// TODO(dartbug.com/31174): Convert this to a graph intrinsic.
// Convert double value to signed 32-bit int in EAX and
// back to a double in XMM1.
__ movl(ECX, Address(ESP, +1 * target::kWordSize));
__ movsd(XMM0, FieldAddress(ECX, target::Double::value_offset()));
__ cvttsd2si(EAX, XMM0);
__ cvtsi2sd(XMM1, EAX);
// Tag the int as a Smi, making sure that it fits; this checks for
// overflow and NaN in the conversion from double to int. Conversion
// overflow from cvttsd2si is signalled with an INT32_MIN value.
ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
__ addl(EAX, EAX);
__ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
// Compare the two double values. If they are equal, we return the
// Smi tagged result immediately as the hash code.
Label double_hash;
__ comisd(XMM0, XMM1);
__ j(NOT_EQUAL, &double_hash, Assembler::kNearJump);
Multiply64Hash(assembler);
__ ret();
// Convert the double bits to a hash code that fits in a Smi.
__ Bind(&double_hash);
__ movl(EAX, FieldAddress(ECX, target::Double::value_offset()));
__ movl(ECX, FieldAddress(ECX, target::Double::value_offset() + 4));
__ xorl(EAX, ECX);
__ andl(EAX, Immediate(target::kSmiMax));
__ SmiTag(EAX);
__ ret();
// Fall into the native C++ implementation.
__ Bind(normal_ir_body);
}
// Identity comparison.
void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
Label* normal_ir_body) {

View file

@ -1128,90 +1128,6 @@ void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
kFClassNegSubnormal | kFClassNegZero);
}
// Input: untagged integer in A1
// Output: tagged hash code value in A0
// Should be kept in sync with
// - il_(x64/arm64/...).cc HashIntegerOpInstr,
// - asm_intrinsifier(...).cc Multiply64Hash
// - integers.cc Multiply64Hash
static void Multiply64Hash(Assembler* assembler) {
#if XLEN == 32
__ srai(A0, A1, 31); // sign extend A1 to A0
__ LoadImmediate(TMP, 0x2d51);
__ mulhu(A2, A1, TMP);
__ mul(A1, A1, TMP); // (A2:A1) = lo32 * 0x2d51
__ mulhu(TMP2, A0, TMP);
__ mul(A0, A0, TMP); // (TMP2:A0) = hi32 * 0x2d51
__ add(A0, A0, A2); // (0: TMP2: A0: A1)
__ xor_(TMP2, TMP2, A1);
__ xor_(A0, A0, TMP2);
#else
__ LoadImmediate(TMP, 0x2d51);
__ mul(A0, TMP, A1);
__ mulhu(TMP, TMP, A1);
__ xor_(A0, A0, TMP);
__ srai(A1, A0, 32);
__ xor_(A0, A0, A1);
#endif
__ AndImmediate(A0, A0, 0x3fffffff);
__ SmiTag(A0);
}
void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
Label* normal_ir_body) {
Label double_hash;
__ lx(A0, Address(SP, 0 * target::kWordSize));
__ LoadDFieldFromOffset(FA0, A0, target::Double::value_offset());
#if XLEN == 32
__ fcvtwd(A1, FA0);
__ fcvtdw(FA1, A1);
// Ensure value in Smi range
__ SmiTag(TMP, A1);
__ SmiUntag(TMP2, TMP);
__ bne(TMP2, A1, normal_ir_body, Assembler::kNearJump);
__ feqd(TMP, FA0, FA1);
__ beqz(TMP, &double_hash, Assembler::kNearJump); // Not integer.
#else
__ fcvtld(A1, FA0);
__ fcvtdl(FA1, A1);
__ feqd(TMP, FA0, FA1);
__ beqz(TMP, &double_hash, Assembler::kNearJump); // Not integer.
// Ensure value in Smi range
__ SmiTag(TMP, A1);
__ SmiUntag(TMP2, TMP);
__ bne(TMP2, A1, normal_ir_body, Assembler::kNearJump);
#endif
Multiply64Hash(assembler);
__ ret();
__ Bind(&double_hash);
#if XLEN == 32
__ lx(A0, Address(SP, 0 * target::kWordSize));
__ lw(A1, Address(A0, target::Double::value_offset() + 4));
__ lw(A0, Address(A0, target::Double::value_offset() + 0));
#else
__ fmvxd(A0, FA0);
__ srli(A1, A0, 32);
#endif
__ xor_(A0, A0, A1);
__ AndImmediate(A0, A0, target::kSmiMax);
__ SmiTag(A0);
__ ret();
__ Bind(normal_ir_body);
}
void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
Label* normal_ir_body) {
Label true_label;

View file

@ -972,65 +972,6 @@ void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
__ jmp(&is_false, Assembler::kNearJump);
}
// Input: tagged integer in RAX
// Output: tagged hash code value in RAX
// Should be kept in sync with
// - il_(x64/arm64/...).cc HashIntegerOpInstr,
// - asm_intrinsifier(...).cc Multiply64Hash
// - integers.cc Multiply64Hash
static void Multiply64Hash(Assembler* assembler) {
__ SmiUntagAndSignExtend(RAX);
__ movq(RDX, Immediate(0x2d51));
__ mulq(RDX);
__ xorq(RAX, RDX);
__ movq(RDX, RAX);
__ shrq(RDX, Immediate(32));
__ xorq(RAX, RDX);
__ andq(RAX, Immediate(0x3fffffff));
__ SmiTag(RAX);
}
void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
Label* normal_ir_body) {
// TODO(dartbug.com/31174): Convert this to a graph intrinsic.
// Convert double value to signed 64-bit int in RAX and
// back to a double in XMM1.
__ movq(RCX, Address(RSP, +1 * target::kWordSize));
__ movsd(XMM0, FieldAddress(RCX, target::Double::value_offset()));
__ OBJ(cvttsd2si)(RAX, XMM0);
__ OBJ(cvtsi2sd)(XMM1, RAX);
// Tag the int as a Smi, making sure that it fits; this checks for
// overflow and NaN in the conversion from double to int. Conversion
// overflow from cvttsd2si is signalled with an INT64_MIN value.
ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
__ OBJ(add)(RAX, RAX);
__ j(OVERFLOW, normal_ir_body, Assembler::kNearJump);
// Compare the two double values. If they are equal, we return the
// Smi tagged result immediately as the hash code.
Label double_hash;
__ comisd(XMM0, XMM1);
__ j(NOT_EQUAL, &double_hash, Assembler::kNearJump);
Multiply64Hash(assembler);
__ ret();
// Convert the double bits to a hash code that fits in a Smi.
__ Bind(&double_hash);
__ movq(RAX, FieldAddress(RCX, target::Double::value_offset()));
__ movq(RCX, RAX);
__ shrq(RCX, Immediate(32));
__ xorq(RAX, RCX);
__ andq(RAX, Immediate(target::kSmiMax));
__ SmiTag(RAX);
__ ret();
// Fall into the native C++ implementation.
__ Bind(normal_ir_body);
}
// Identity comparison.
void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
Label* normal_ir_body) {

View file

@ -1209,6 +1209,17 @@ void ConstantPropagator::VisitUnboxInt64(UnboxInt64Instr* instr) {
VisitUnbox(instr);
}
void ConstantPropagator::VisitHashDoubleOp(HashDoubleOpInstr* instr) {
const Object& value = instr->value()->definition()->constant_value();
if (IsUnknown(value)) {
return;
}
if (value.IsDouble()) {
// TODO(aam): Add constant hash evaluation
}
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitHashIntegerOp(HashIntegerOpInstr* instr) {
const Object& value = instr->value()->definition()->constant_value();
if (IsUnknown(value)) {

View file

@ -470,6 +470,7 @@ struct InstrAttrs {
M(CloneContext, _) \
M(BinarySmiOp, kNoGC) \
M(BinaryInt32Op, kNoGC) \
M(HashDoubleOp, kNoGC) \
M(HashIntegerOp, kNoGC) \
M(UnarySmiOp, kNoGC) \
M(UnaryDoubleOp, kNoGC) \
@ -8446,6 +8447,46 @@ class DoubleTestOpInstr : public TemplateComparison<1, NoThrow, Pure> {
DISALLOW_COPY_AND_ASSIGN(DoubleTestOpInstr);
};
class HashDoubleOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
public:
HashDoubleOpInstr(Value* value, intptr_t deopt_id)
: TemplateDefinition(deopt_id) {
SetInputAt(0, value);
}
static HashDoubleOpInstr* Create(Value* value, intptr_t deopt_id) {
return new HashDoubleOpInstr(value, deopt_id);
}
Value* value() const { return inputs_[0]; }
virtual intptr_t DeoptimizationTarget() const {
// Direct access since this instruction cannot deoptimize, and the deopt-id
// was inherited from another instruction that could deoptimize.
return GetDeoptId();
}
virtual Representation representation() const { return kUnboxedInt64; }
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT(idx == 0);
return kUnboxedDouble;
}
DECLARE_INSTRUCTION(HashDoubleOp)
virtual bool ComputeCanDeoptimize() const { return false; }
virtual CompileType ComputeType() const { return CompileType::Smi(); }
virtual bool AttributesEqual(const Instruction& other) const { return true; }
DECLARE_EMPTY_SERIALIZATION(HashDoubleOpInstr, TemplateDefinition)
private:
DISALLOW_COPY_AND_ASSIGN(HashDoubleOpInstr);
};
class HashIntegerOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
public:
HashIntegerOpInstr(Value* value, bool smi, intptr_t deopt_id)

View file

@ -5862,6 +5862,118 @@ void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&done);
}
// Should be kept in sync with integers.cc Multiply64Hash
static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
const Register result,
const Register value_lo,
const Register value_hi) {
__ LoadImmediate(TMP, compiler::Immediate(0x2d51));
__ umull(result, value_lo, value_lo, TMP); // (lo:result) = lo32 * 0x2d51
__ umull(TMP, value_hi, value_hi, TMP); // (hi:TMP) = hi32 * 0x2d51
__ add(TMP, TMP, compiler::Operand(value_lo));
// (0:hi:TMP:result) is 128-bit product
__ eor(result, value_hi, compiler::Operand(result));
__ eor(result, TMP, compiler::Operand(result));
__ AndImmediate(result, result, 0x3fffffff);
}
LocationSummary* HashDoubleOpInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 4;
LocationSummary* summary = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNativeLeafCall);
summary->set_in(0, Location::RequiresFpuRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RegisterLocation(R1));
summary->set_temp(2, Location::RequiresFpuRegister());
summary->set_temp(3, Location::RegisterLocation(R4));
summary->set_out(0, Location::Pair(Location::RegisterLocation(R0),
Location::RegisterLocation(R1)));
return summary;
}
void HashDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
const Register temp = locs()->temp(0).reg();
const Register temp1 = locs()->temp(1).reg();
ASSERT(temp1 == R1);
const DRegister temp_double = EvenDRegisterOf(locs()->temp(2).fpu_reg());
ASSERT(locs()->temp(3).reg() == R4);
const PairLocation* out_pair = locs()->out(0).AsPairLocation();
Register result = out_pair->At(0).reg();
ASSERT(result == R0);
ASSERT(out_pair->At(1).reg() == R1);
compiler::Label hash_double, hash_double_value, try_convert;
__ vmovrrd(TMP, temp, value);
__ AndImmediate(temp, temp, 0x7FF00000);
__ CompareImmediate(temp, 0x7FF00000);
__ b(&hash_double_value, EQ); // is_infinity or nan
compiler::Label slow_path;
__ Bind(&try_convert);
// value -> temp1 -> temp_double
__ vcvtid(STMP, value);
__ vmovrs(temp1, STMP);
// Checks whether temp1 is INT_MAX or INT_MIN which indicates failed vcvt
__ CompareImmediate(temp1, 0xC0000000);
__ b(&slow_path, MI);
__ vmovdr(DTMP, 0, temp1);
__ vcvtdi(temp_double, STMP);
// value != temp_double, then go to hash_double_value
__ vcmpd(value, temp_double);
__ vmstat();
__ b(&hash_double_value, NE);
// Sign-extend 32-bit [temp1] value to 64-bit pair of (temp:temp1), which
// is used by integer hash code sequence.
__ SignFill(temp, temp1);
compiler::Label hash_integer, done;
{
__ Bind(&hash_integer);
// integer hash of (temp:temp1)
EmitHashIntegerCodeSequence(compiler, result, temp1, temp);
__ b(&done);
}
__ Bind(&slow_path);
// double value is potentially doesn't fit into Smi range, so
// do the double->int64->double via runtime call.
__ StoreDToOffset(value, THR,
compiler::target::Thread::unboxed_runtime_arg_offset());
{
compiler::LeafRuntimeScope rt(compiler->assembler(), /*frame_size=*/0,
/*preserve_registers=*/true);
__ mov(R0, compiler::Operand(THR));
// Check if double can be represented as int64, load it into (temp:EAX) if
// it can.
rt.Call(kTryDoubleAsIntegerRuntimeEntry, 1);
__ mov(R4, compiler::Operand(R0));
}
__ LoadFromOffset(temp1, THR,
compiler::target::Thread::unboxed_runtime_arg_offset());
__ LoadFromOffset(temp, THR,
compiler::target::Thread::unboxed_runtime_arg_offset() +
compiler::target::kWordSize);
__ cmp(R4, compiler::Operand(0));
__ b(&hash_integer, NE);
__ b(&hash_double);
__ Bind(&hash_double_value);
__ vmovrrd(temp, temp1, value);
__ Bind(&hash_double);
// Convert the double bits (temp:temp1) to a hash code that fits in a Smi.
__ eor(result, temp1, compiler::Operand(temp));
__ AndImmediate(result, result, compiler::target::kSmiMax);
__ Bind(&done);
__ mov(R1, compiler::Operand(0));
}
LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -5874,10 +5986,6 @@ LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
return summary;
}
// Should be kept in sync with
// - asm_intrinsifier_x64.cc Multiply64Hash
// - integers.cc Multiply64Hash
// - integers.dart computeHashCode
void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register result = locs()->out(0).reg();
@ -5891,18 +5999,7 @@ void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Mint::value_offset() + compiler::target::kWordSize);
__ LoadFieldFromOffset(value, value, Mint::value_offset());
}
Register value_lo = value;
Register value_hi = temp;
__ LoadImmediate(TMP, compiler::Immediate(0x2d51));
__ umull(result, value_lo, value_lo, TMP); // (lo:result) = lo32 * 0x2d51
__ umull(TMP, value_hi, value_hi, TMP); // (hi:TMP) = hi32 * 0x2d51
__ add(TMP, TMP, compiler::Operand(value_lo));
// (0:hi:TMP:result) is 128-bit product
__ eor(result, value_hi, compiler::Operand(result));
__ eor(result, TMP, compiler::Operand(result));
__ AndImmediate(result, result, 0x3fffffff);
EmitHashIntegerCodeSequence(compiler, result, value, temp);
__ SmiTag(result);
}

View file

@ -4976,6 +4976,60 @@ void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&done);
}
// Should be kept in sync with integers.cc Multiply64Hash
static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
const Register value,
const Register result) {
ASSERT(value != TMP2);
ASSERT(result != TMP2);
ASSERT(value != result);
__ LoadImmediate(TMP2, compiler::Immediate(0x2d51));
__ mul(result, value, TMP2);
__ umulh(value, value, TMP2);
__ eor(result, result, compiler::Operand(value));
__ eor(result, result, compiler::Operand(result, LSR, 32));
}
LocationSummary* HashDoubleOpInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 1;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresFpuRegister());
summary->set_temp(0, Location::RequiresFpuRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
void HashDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const VRegister value = locs()->in(0).fpu_reg();
const VRegister temp_double = locs()->temp(0).fpu_reg();
const Register result = locs()->out(0).reg();
compiler::Label done, hash_double;
__ vmovrd(TMP, value, 0);
__ AndImmediate(TMP, TMP, 0x7FF0000000000000LL);
__ CompareImmediate(TMP, 0x7FF0000000000000LL);
__ b(&hash_double, EQ); // is_infinity or nan
__ fcvtzsxd(TMP, value);
__ scvtfdx(temp_double, TMP);
__ fcmpd(temp_double, value);
__ b(&hash_double, NE);
EmitHashIntegerCodeSequence(compiler, TMP, result);
__ AndImmediate(result, result, 0x3fffffff);
__ b(&done);
__ Bind(&hash_double);
__ fmovrd(result, value);
__ eor(result, result, compiler::Operand(result, LSR, 32));
__ AndImmediate(result, result, compiler::target::kSmiMax);
__ Bind(&done);
}
LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -4987,25 +5041,17 @@ LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
return summary;
}
// Should be kept in sync with
// - asm_intrinsifier_x64.cc Multiply64Hash
// - integers.cc Multiply64Hash
// - integers.dart computeHashCode
void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register result = locs()->out(0).reg();
if (smi_) {
__ SmiUntag(TMP2, value);
__ SmiUntag(TMP, value);
} else {
__ LoadFieldFromOffset(TMP2, value, Mint::value_offset());
__ LoadFieldFromOffset(TMP, value, Mint::value_offset());
}
__ LoadImmediate(TMP, compiler::Immediate(0x2d51));
__ mul(result, TMP, TMP2);
__ umulh(TMP, TMP, TMP2);
__ eor(result, result, compiler::Operand(TMP));
__ eor(result, result, compiler::Operand(result, LSR, 32));
EmitHashIntegerCodeSequence(compiler, TMP, result);
__ ubfm(result, result, 63, 29); // SmiTag(result & 0x3fffffff)
}

View file

@ -5065,6 +5065,115 @@ void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ SmiTag(EDX);
}
// Should be kept in sync with integers.cc Multiply64Hash
static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
const Register value_lo,
const Register value_hi,
const Register temp) {
__ movl(EDX, compiler::Immediate(0x2d51));
__ mull(EDX); // EAX = lo32(value_lo*0x2d51), EDX = carry(value_lo * 0x2d51)
__ movl(temp, EAX); // save prod_lo32
__ movl(EAX, value_hi); // get saved value_hi
__ movl(value_hi, EDX); // save carry
__ movl(EDX, compiler::Immediate(0x2d51));
__ mull(EDX); // EAX = lo32(value_hi * 0x2d51, EDX = carry(value_hi * 0x2d51)
__ addl(EAX, value_hi); // EAX has prod_hi32, EDX has prod_hi64_lo32
__ xorl(EAX, EDX); // EAX = prod_hi32 ^ prod_hi64_lo32
__ xorl(EAX, temp); // result = prod_hi32 ^ prod_hi64_lo32 ^ prod_lo32
__ andl(EAX, compiler::Immediate(0x3fffffff));
}
LocationSummary* HashDoubleOpInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 4;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresFpuRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RegisterLocation(EBX));
summary->set_temp(2, Location::RegisterLocation(EDX));
summary->set_temp(3, Location::RequiresFpuRegister());
summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX),
Location::RegisterLocation(EDX)));
return summary;
}
void HashDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const XmmRegister value = locs()->in(0).fpu_reg();
const Register temp = locs()->temp(0).reg();
ASSERT(locs()->temp(1).reg() == EBX);
ASSERT(locs()->temp(2).reg() == EDX);
const XmmRegister temp_double = locs()->temp(3).fpu_reg();
PairLocation* result_pair = locs()->out(0).AsPairLocation();
ASSERT(result_pair->At(0).reg() == EAX);
ASSERT(result_pair->At(1).reg() == EDX);
// If either nan or infinity, do hash double
compiler::Label hash_double, try_convert;
// extract high 32-bits out of double value.
__ pextrd(temp, value, compiler::Immediate(1));
__ andl(temp, compiler::Immediate(0x7FF00000));
__ cmpl(temp, compiler::Immediate(0x7FF00000));
__ j(EQUAL, &hash_double); // is infinity or nan
compiler::Label slow_path;
__ Bind(&try_convert);
__ cvttsd2si(EAX, value);
// Overflow is signaled with minint.
__ cmpl(EAX, compiler::Immediate(0x80000000));
__ j(EQUAL, &slow_path);
__ cvtsi2sd(temp_double, EAX);
__ comisd(value, temp_double);
__ j(NOT_EQUAL, &hash_double);
__ cdq(); // sign-extend EAX to EDX
__ movl(temp, EDX);
compiler::Label hash_integer, done;
// integer hash for (temp:EAX)
__ Bind(&hash_integer);
EmitHashIntegerCodeSequence(compiler, EAX, temp, EBX);
__ jmp(&done);
__ Bind(&slow_path);
// double value is potentially doesn't fit into Smi range, so
// do the double->int64->double via runtime call.
__ StoreUnboxedDouble(value, THR,
compiler::target::Thread::unboxed_runtime_arg_offset());
{
compiler::LeafRuntimeScope rt(
compiler->assembler(),
/*frame_size=*/1 * compiler::target::kWordSize,
/*preserve_registers=*/true);
__ movl(compiler::Address(ESP, 0 * compiler::target::kWordSize), THR);
// Check if double can be represented as int64, load it into (temp:EAX) if
// it can.
rt.Call(kTryDoubleAsIntegerRuntimeEntry, 1);
__ movl(EBX, EAX); // use non-volatile register to carry value out.
}
__ orl(EBX, EBX);
__ j(ZERO, &hash_double);
__ movl(EAX,
compiler::Address(
THR, compiler::target::Thread::unboxed_runtime_arg_offset()));
__ movl(temp,
compiler::Address(
THR, compiler::target::Thread::unboxed_runtime_arg_offset() +
kWordSize));
__ jmp(&hash_integer);
__ Bind(&hash_double);
__ pextrd(EAX, value, compiler::Immediate(0));
__ pextrd(temp, value, compiler::Immediate(1));
__ xorl(EAX, temp);
__ andl(EAX, compiler::Immediate(compiler::target::kSmiMax));
__ Bind(&done);
__ xorl(EDX, EDX);
}
LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -5108,19 +5217,7 @@ void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// return result & 0x3fffffff
// EAX has value_lo
__ movl(EDX, compiler::Immediate(0x2d51));
__ mull(
EDX); // EAX = lo32(value_lo * 0x2d51), EDX = carry(value_lo * 0x2d51)
__ movl(temp1, EAX); // save prod_lo32
__ movl(EAX, temp); // get saved value_hi
__ movl(temp, EDX); // save carry
__ movl(EDX, compiler::Immediate(0x2d51));
__ mull(EDX); // EAX = lo32(value_hi * 0x2d51, EDX = carry(value_hi * 0x2d51)
__ addl(EAX, temp); // EAX has prod_hi32, EDX has prod_hi64_lo32
__ xorl(EAX, EDX); // EAX = prod_hi32 ^ prod_hi64_lo32
__ xorl(EAX, temp1); // result = prod_hi32 ^ prod_hi64_lo32 ^ prod_lo32
__ andl(EAX, compiler::Immediate(0x3fffffff));
EmitHashIntegerCodeSequence(compiler, EAX, temp, temp1);
__ SmiTag(EAX);
}

View file

@ -5144,6 +5144,169 @@ void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
// Should be kept in sync with integers.cc Multiply64Hash
#if XLEN == 32
static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
const Register value_lo,
const Register value_hi,
const Register result) {
ASSERT(value_lo != TMP);
ASSERT(value_lo != TMP2);
ASSERT(value_hi != TMP);
ASSERT(value_hi != TMP2);
ASSERT(result != TMP);
ASSERT(result != TMP2);
__ LoadImmediate(TMP, 0x2d51);
// (value_hi:value_lo) * (0:TMP) =
// value_lo * TMP + (value_hi * TMP) * 2^32 =
// lo32(value_lo * TMP) +
// (hi32(value_lo * TMP) + lo32(value_hi * TMP) * 2^32 +
// hi32(value_hi * TMP) * 2^64
__ mulhu(TMP2, value_lo, TMP);
__ mul(result, value_lo, TMP); // (TMP2:result) = lo32 * 0x2d51
__ mulhu(value_lo, value_hi, TMP);
__ mul(TMP, value_hi, TMP); // (value_lo:TMP) = hi32 * 0x2d51
__ add(TMP, TMP, TMP2);
// (0:value_lo:TMP:result) is 128-bit product
__ xor_(result, value_lo, result);
__ xor_(result, TMP, result);
__ AndImmediate(result, result, 0x3fffffff);
}
#else
static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
const Register value,
const Register result) {
ASSERT(value != TMP);
ASSERT(result != TMP);
__ LoadImmediate(TMP, 0x2d51);
__ mul(result, TMP, value);
__ mulhu(TMP, TMP, value);
__ xor_(result, result, TMP);
__ srai(TMP, result, 32);
__ xor_(result, result, TMP);
__ AndImmediate(result, result, 0x3fffffff);
}
#endif
LocationSummary* HashDoubleOpInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 3;
LocationSummary* summary = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps, LocationSummary::kNativeLeafCall);
summary->set_in(0, Location::RequiresFpuRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
summary->set_temp(2, Location::RequiresFpuRegister());
#if XLEN == 32
summary->set_out(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
#else
summary->set_out(0, Location::RequiresRegister());
#endif
return summary;
}
void HashDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const FpuRegister value = locs()->in(0).fpu_reg();
#if XLEN == 32
const PairLocation* out_pair = locs()->out(0).AsPairLocation();
const Register result = out_pair->At(0).reg();
const Register result_hi = out_pair->At(1).reg();
#else
const Register result = locs()->out(0).reg();
#endif
const Register temp = locs()->temp(0).reg();
const Register temp1 = locs()->temp(1).reg();
const FpuRegister temp_double = locs()->temp(2).fpu_reg();
compiler::Label hash_double, hash_double_value, hash_integer;
compiler::Label slow_path, done;
__ fclassd(temp, value);
__ TestImmediate(temp, kFClassSignallingNan | kFClassQuietNan |
kFClassNegInfinity | kFClassPosInfinity);
__ BranchIf(NOT_ZERO, &hash_double_value);
#if XLEN == 32
__ fcvtwd(temp1, value, RTZ);
__ fcvtdw(temp_double, temp1);
#else
__ fcvtld(temp1, value, RTZ);
__ fcvtdl(temp_double, temp1);
#endif
__ feqd(temp, value, temp_double);
__ CompareImmediate(temp, 1);
__ BranchIf(NE, &hash_double_value);
#if XLEN == 32
// integer hash of (0:temp1)
__ srai(temp, temp1, XLEN - 1); // SignFill
__ Bind(&hash_integer);
// integer hash of (temp, temp1)
EmitHashIntegerCodeSequence(compiler, temp1, temp, result);
#else
// integer hash of temp1
__ Bind(&hash_integer);
EmitHashIntegerCodeSequence(compiler, temp1, result);
#endif
__ j(&done);
__ Bind(&slow_path);
// double value is potentially doesn't fit into Smi range, so
// do the double->int64->double via runtime call.
__ StoreDToOffset(value, THR,
compiler::target::Thread::unboxed_runtime_arg_offset());
{
compiler::LeafRuntimeScope rt(compiler->assembler(), /*frame_size=*/0,
/*preserve_registers=*/true);
__ mv(A0, THR);
// Check if double can be represented as int64, load it into (temp:EAX) if
// it can.
rt.Call(kTryDoubleAsIntegerRuntimeEntry, 1);
__ mv(TMP, A0);
}
#if XLEN == 32
__ LoadFromOffset(temp1, THR,
compiler::target::Thread::unboxed_runtime_arg_offset());
__ LoadFromOffset(temp, THR,
compiler::target::Thread::unboxed_runtime_arg_offset() +
compiler::target::kWordSize);
#else
__ fmvxd(temp1, value);
__ srli(temp, temp1, 32);
#endif
__ CompareImmediate(TMP, 0);
__ BranchIf(NE, &hash_integer);
__ j(&hash_double);
#if XLEN == 32
__ Bind(&hash_double_value);
__ StoreDToOffset(value, THR,
compiler::target::Thread::unboxed_runtime_arg_offset());
__ LoadFromOffset(temp1, THR,
compiler::target::Thread::unboxed_runtime_arg_offset());
__ LoadFromOffset(temp, THR,
compiler::target::Thread::unboxed_runtime_arg_offset() +
compiler::target::kWordSize);
#else
__ Bind(&hash_double_value);
__ fmvxd(temp1, value);
__ srli(temp, temp1, 32);
#endif
// double hi/lo words are in (temp:temp1)
__ Bind(&hash_double);
__ xor_(result, temp1, temp);
__ AndImmediate(result, result, compiler::target::kSmiMax);
__ Bind(&done);
#if XLEN == 32
__ xor_(result_hi, result_hi, result_hi);
#endif
}
LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -5162,10 +5325,6 @@ LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
return summary;
}
// Should be kept in sync with
// - asm_intrinsifier_x64.cc Multiply64Hash
// - integers.cc Multiply64Hash
// - integers.dart computeHashCode
void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register result = locs()->out(0).reg();
Register value = locs()->in(0).reg();
@ -5181,38 +5340,15 @@ void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Mint::value_offset() + compiler::target::kWordSize);
__ LoadFieldFromOffset(value, value, Mint::value_offset());
}
Register value_lo = value;
__ LoadImmediate(TMP, 0x2d51);
// (value_hi:value_lo) * (0:TMP) =
// value_lo * TMP + (value_hi * TMP) * 2^32 =
// lo32(value_lo * TMP) +
// (hi32(value_lo * TMP) + lo32(value_hi * TMP) * 2^32 +
// hi32(value_hi * TMP) * 2^64
__ mulhu(TMP2, value_lo, TMP);
__ mul(result, value_lo, TMP); // (TMP2:result) = lo32 * 0x2d51
__ mulhu(value_lo, value_hi, TMP);
__ mul(TMP, value_hi, TMP); // (value_lo:TMP) = hi32 * 0x2d51
__ add(TMP, TMP, TMP2);
// (0:value_lo:TMP:result) is 128-bit product
__ xor_(result, value_lo, result);
__ xor_(result, TMP, result);
EmitHashIntegerCodeSequence(compiler, value, value_hi, result);
#else
if (smi_) {
__ SmiUntag(value);
} else {
__ LoadFieldFromOffset(value, value, Mint::value_offset());
}
__ LoadImmediate(TMP, 0x2d51);
__ mul(result, TMP, value);
__ mulhu(TMP, TMP, value);
__ xor_(result, result, TMP);
__ srai(TMP, result, 32);
__ xor_(result, result, TMP);
EmitHashIntegerCodeSequence(compiler, value, result);
#endif
__ AndImmediate(result, result, 0x3fffffff);
__ SmiTag(result);
}

View file

@ -5317,6 +5317,61 @@ void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// in-range arguments, cannot create out-of-range result.
}
// Should be kept in sync with integers.cc Multiply64Hash
static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler) {
__ movq(RDX, compiler::Immediate(0x2d51));
__ mulq(RDX);
__ xorq(RAX, RDX); // RAX = xor(hi64, lo64)
__ movq(RDX, RAX);
__ shrq(RDX, compiler::Immediate(32));
__ xorq(RAX, RDX);
__ andq(RAX, compiler::Immediate(0x3fffffff));
}
LocationSummary* HashDoubleOpInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 2;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresFpuRegister());
summary->set_temp(0, Location::RegisterLocation(RDX));
summary->set_temp(1, Location::RequiresFpuRegister());
summary->set_out(0, Location::RegisterLocation(RAX));
return summary;
}
void HashDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const XmmRegister value = locs()->in(0).fpu_reg();
ASSERT(locs()->out(0).reg() == RAX);
ASSERT(locs()->temp(0).reg() == RDX);
const FpuRegister temp_fpu_reg = locs()->temp(1).fpu_reg();
compiler::Label hash_double;
__ cvttsd2siq(RAX, value);
__ cvtsi2sdq(temp_fpu_reg, RAX);
__ comisd(value, temp_fpu_reg);
__ j(PARITY_EVEN, &hash_double); // one of the arguments is NaN
__ j(NOT_EQUAL, &hash_double);
// RAX has int64 value
EmitHashIntegerCodeSequence(compiler);
compiler::Label done;
__ jmp(&done);
__ Bind(&hash_double);
// Convert the double bits to a hash code that fits in a Smi.
__ movq(RAX, value);
__ movq(RDX, RAX);
__ shrq(RDX, compiler::Immediate(32));
__ xorq(RAX, RDX);
__ andq(RAX, compiler::Immediate(compiler::target::kSmiMax));
__ Bind(&done);
}
LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -5329,10 +5384,6 @@ LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
return summary;
}
// Should be kept in sync with
// - asm_intrinsifier_x64.cc Multiply64Hash
// - integers.cc Multiply64Hash
// - integers.dart computeHashCode
void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register result = locs()->out(0).reg();
@ -5347,13 +5398,7 @@ void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ LoadFieldFromOffset(RAX, RAX, Mint::value_offset());
}
__ movq(RDX, compiler::Immediate(0x2d51)); // 1b873593cc9e2d51));
__ mulq(RDX);
__ xorq(RAX, RDX); // RAX = xor(hi64, lo64)
__ movq(RDX, RAX);
__ shrq(RDX, compiler::Immediate(32));
__ xorq(RAX, RDX);
__ andq(RAX, compiler::Immediate(0x3fffffff));
EmitHashIntegerCodeSequence(compiler);
__ SmiTag(RAX);
}

View file

@ -1022,6 +1022,7 @@ bool FlowGraphBuilder::IsRecognizedMethodForFlowGraph(
case MethodRecognizer::kExtensionStreamHasListener:
case MethodRecognizer::kSmi_hashCode:
case MethodRecognizer::kMint_hashCode:
case MethodRecognizer::kDouble_hashCode:
#define CASE(method, slot) case MethodRecognizer::k##method:
LOAD_NATIVE_FIELD(CASE)
STORE_NATIVE_FIELD(CASE)
@ -1490,16 +1491,27 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
#endif // PRODUCT
} break;
case MethodRecognizer::kSmi_hashCode: {
// TODO(dartbug.com/38985): We should make this LoadLocal+Unbox+
// IntegerHash+Box. Though this would make use of unboxed values on stack
// which isn't allowed in unoptimized mode.
// Once force-optimized functions can be inlined, we should change this
// code to the above.
ASSERT_EQUAL(function.NumParameters(), 1);
body += LoadLocal(parsed_function_->RawParameterVariable(0));
body += BuildHashCode(/*smi=*/true);
body += BuildIntegerHashCode(/*smi=*/true);
} break;
case MethodRecognizer::kMint_hashCode: {
ASSERT_EQUAL(function.NumParameters(), 1);
body += LoadLocal(parsed_function_->RawParameterVariable(0));
body += BuildHashCode(/*smi=*/false);
body += BuildIntegerHashCode(/*smi=*/false);
} break;
case MethodRecognizer::kDouble_hashCode: {
ASSERT_EQUAL(function.NumParameters(), 1);
body += LoadLocal(parsed_function_->RawParameterVariable(0));
body += UnboxTruncate(kUnboxedDouble);
body += BuildDoubleHashCode();
body += Box(kUnboxedInt64);
} break;
// case MethodRecognizer::kDouble_hashCode:
case MethodRecognizer::kFfiAsExternalTypedDataInt8:
case MethodRecognizer::kFfiAsExternalTypedDataInt16:
case MethodRecognizer::kFfiAsExternalTypedDataInt32:
@ -5095,7 +5107,7 @@ const Function& FlowGraphBuilder::PrependTypeArgumentsFunction() {
return prepend_type_arguments_;
}
Fragment FlowGraphBuilder::BuildHashCode(bool smi) {
Fragment FlowGraphBuilder::BuildIntegerHashCode(bool smi) {
Fragment body;
Value* unboxed_value = Pop();
HashIntegerOpInstr* hash =
@ -5105,6 +5117,16 @@ Fragment FlowGraphBuilder::BuildHashCode(bool smi) {
return body;
}
Fragment FlowGraphBuilder::BuildDoubleHashCode() {
Fragment body;
Value* double_value = Pop();
HashDoubleOpInstr* hash = new HashDoubleOpInstr(double_value, DeoptId::kNone);
Push(hash);
body <<= hash;
body += Box(kUnboxedInt64);
return body;
}
int64_t SwitchHelper::ExpressionRange() const {
const int64_t min = expression_min().AsInt64Value();
const int64_t max = expression_max().AsInt64Value();

View file

@ -572,7 +572,8 @@ class FlowGraphBuilder : public BaseFlowGraphBuilder {
return instructions;
}
Fragment BuildHashCode(bool smi);
Fragment BuildDoubleHashCode();
Fragment BuildIntegerHashCode(bool smi);
TranslationHelper translation_helper_;
Thread* thread_;

View file

@ -322,6 +322,7 @@ namespace dart {
V(::, get:extensionStreamHasListener, ExtensionStreamHasListener, 0xfab46343)\
V(_Smi, get:hashCode, Smi_hashCode, 0x75e0ccd2) \
V(_Mint, get:hashCode, Mint_hashCode, 0x75e0ccd2) \
V(_Double, get:hashCode, Double_hashCode, 0x75e0ccd2) \
// List of intrinsics:
// (class-name, function-name, intrinsification method, fingerprint).
@ -345,8 +346,6 @@ namespace dart {
V(_Double, -, Double_sub, 0xb8343210) \
V(_Double, *, Double_mul, 0xf9bb3c0d) \
V(_Double, /, Double_div, 0xefe9ca49) \
V(_Double, get:hashCode, Double_hashCode, 0x75e0d093) \
V(_Double, get:_identityHashCode, Double_identityHash, 0x47a56551) \
V(_Double, get:isNaN, Double_getIsNaN, 0xd4890713) \
V(_Double, get:isInfinite, Double_getIsInfinite, 0xc4facbd2) \
V(_Double, get:isNegative, Double_getIsNegative, 0xd4715091) \

File diff suppressed because it is too large Load diff

View file

@ -8381,6 +8381,7 @@ bool Function::RecognizedKindForceOptimize() const {
case MethodRecognizer::kGetNativeField:
case MethodRecognizer::kRecord_numFields:
case MethodRecognizer::kUtf8DecoderScan:
case MethodRecognizer::kDouble_hashCode:
// Prevent the GC from running so that the operation is atomic from
// a GC point of view. Always double check implementation in
// kernel_to_il.cc that no GC can happen in between the relevant IL

View file

@ -3388,6 +3388,18 @@ static void CopySavedRegisters(uword saved_registers_address,
}
#endif
DEFINE_LEAF_RUNTIME_ENTRY(bool, TryDoubleAsInteger, 1, Thread* thread) {
double value = thread->unboxed_double_runtime_arg();
int64_t int_value = static_cast<int64_t>(value);
double converted_double = static_cast<double>(int_value);
if (converted_double != value) {
return false;
}
thread->set_unboxed_int64_runtime_arg(int_value);
return true;
}
END_LEAF_RUNTIME_ENTRY
// Copies saved registers and caller's frame into temporary buffers.
// Returns the stack size of unoptimized frame.
// The calling code must be optimized, but its function may not have

View file

@ -112,7 +112,8 @@ namespace dart {
V(void, MsanUnpoison, void*, size_t) \
V(void, MsanUnpoisonParam, size_t) \
V(void, TsanLoadAcquire, void*) \
V(void, TsanStoreRelease, void*)
V(void, TsanStoreRelease, void*) \
V(bool, TryDoubleAsInteger, Thread*)
} // namespace dart

View file

@ -11,12 +11,9 @@ class _Double implements double {
@pragma("vm:external-name", "Double_doubleFromInteger")
external factory _Double.fromInteger(int value);
@pragma("vm:recognized", "asm-intrinsic")
@pragma("vm:external-name", "Double_hashCode")
@pragma("vm:recognized", "other")
external int get hashCode;
@pragma("vm:recognized", "asm-intrinsic")
@pragma("vm:external-name", "Double_hashCode")
external int get _identityHashCode;
int get _identityHashCode => hashCode;
@pragma("vm:recognized", "asm-intrinsic")
@pragma("vm:exact-result-type", _Double)

View file

@ -18,6 +18,7 @@ main() {
test(9007199254840856);
test(144115188075954880);
test(936748722493162112);
test(0x8000000000000000);
}
test(int x) {

View file

@ -20,6 +20,7 @@ main() {
test(9007199254840856);
test(144115188075954880);
test(936748722493162112);
test(0x8000000000000000);
}
test(int x) {