mirror of
https://github.com/dart-lang/sdk
synced 2024-11-02 12:20:38 +00:00
[VM] Reduce Smi size to 32 bit on 64 bit platforms
This reduces small tagged integers on 64 bit platforms from 63 bits to 31 bits plus one tag bit. This is a step on the way to compile-time-optional compressed pointers on 64 bit platforms. See more about this at go/dartvmlearnings This causes a slowdown for some uses of integers that don't fit in 31 signed bits, but because both x64 and ARM64 have unboxed 64 bit integers now the performance hit should not be too bad. This is a reapplication of https://dart-review.googlesource.com/c/sdk/+/46244 It was reverted due to a compilation error on 32 bit ARM with DBC. R=vegorov@google.com Change-Id: I943de1768519457f0e5a61ef0b4ef204b6a53281 Reviewed-on: https://dart-review.googlesource.com/51321 Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
This commit is contained in:
parent
be8637756a
commit
cf78da8a48
25 changed files with 931 additions and 529 deletions
|
@ -78,19 +78,14 @@ abstract class _HashBase implements _HashVMBase {
|
|||
static const int _UNUSED_PAIR = 0;
|
||||
static const int _DELETED_PAIR = 1;
|
||||
|
||||
// On 32-bit, the top bits are wasted to avoid Mint allocation.
|
||||
// TODO(koda): Reclaim the bits by making the compiler treat hash patterns
|
||||
// as unsigned words.
|
||||
// The top bits are wasted to avoid Mint allocation.
|
||||
static int _indexSizeToHashMask(int indexSize) {
|
||||
int indexBits = indexSize.bitLength - 2;
|
||||
return internal.is64Bit
|
||||
? (1 << (32 - indexBits)) - 1
|
||||
: (1 << (30 - indexBits)) - 1;
|
||||
return (1 << (30 - indexBits)) - 1;
|
||||
}
|
||||
|
||||
static int _hashPattern(int fullHash, int hashMask, int size) {
|
||||
final int maskedHash = fullHash & hashMask;
|
||||
// TODO(koda): Consider keeping bit length and use left shift.
|
||||
return (maskedHash == 0) ? (size >> 1) : maskedHash * (size >> 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ class int {
|
|||
return null; // Empty.
|
||||
}
|
||||
}
|
||||
var smiLimit = is64Bit ? 18 : 9;
|
||||
var smiLimit = 9;
|
||||
if ((last - ix) >= smiLimit) {
|
||||
return null; // May not fit into a Smi.
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ class int {
|
|||
|
||||
static int _parseRadix(
|
||||
String source, int radix, int start, int end, int sign) {
|
||||
int tableIndex = (radix - 2) * 4 + (is64Bit ? 2 : 0);
|
||||
int tableIndex = (radix - 2) * 2;
|
||||
int blockSize = _PARSE_LIMITS[tableIndex];
|
||||
int length = end - start;
|
||||
if (length <= blockSize) {
|
||||
|
@ -159,7 +159,7 @@ class int {
|
|||
int positiveOverflowLimit = 0;
|
||||
int negativeOverflowLimit = 0;
|
||||
if (_limitIntsTo64Bits) {
|
||||
tableIndex = tableIndex << 1; // pre-multiply by 2 for simpler indexing
|
||||
tableIndex = tableIndex << 1; // Pre-multiply by 2 for simpler indexing.
|
||||
positiveOverflowLimit = _int64OverflowLimits[tableIndex];
|
||||
if (positiveOverflowLimit == 0) {
|
||||
positiveOverflowLimit =
|
||||
|
@ -175,14 +175,10 @@ class int {
|
|||
if (result >= positiveOverflowLimit) {
|
||||
if ((result > positiveOverflowLimit) ||
|
||||
(smi > _int64OverflowLimits[tableIndex + 2])) {
|
||||
// Although the unsigned overflow limits do not depend on the
|
||||
// platform, the multiplier and block size, which are used to
|
||||
// compute it, do.
|
||||
int X = is64Bit ? 1 : 0;
|
||||
if (radix == 16 &&
|
||||
!(result >= _int64UnsignedOverflowLimits[X] &&
|
||||
(result > _int64UnsignedOverflowLimits[X] ||
|
||||
smi > _int64UnsignedSmiOverflowLimits[X])) &&
|
||||
!(result >= _int64UnsignedOverflowLimit &&
|
||||
(result > _int64UnsignedOverflowLimit ||
|
||||
smi > _int64UnsignedSmiOverflowLimit)) &&
|
||||
blockEnd + blockSize > end) {
|
||||
return (result * multiplier) + smi;
|
||||
}
|
||||
|
@ -227,43 +223,42 @@ class int {
|
|||
|
||||
// For each radix, 2-36, how many digits are guaranteed to fit in a smi,
|
||||
// and magnitude of such a block (radix ** digit-count).
|
||||
// 32-bit limit/multiplier at (radix - 2)*4, 64-bit limit at (radix-2)*4+2
|
||||
static const _PARSE_LIMITS = const [
|
||||
30, 1073741824, 62, 4611686018427387904, // radix: 2
|
||||
18, 387420489, 39, 4052555153018976267,
|
||||
15, 1073741824, 30, 1152921504606846976,
|
||||
12, 244140625, 26, 1490116119384765625, // radix: 5
|
||||
11, 362797056, 23, 789730223053602816,
|
||||
10, 282475249, 22, 3909821048582988049,
|
||||
10, 1073741824, 20, 1152921504606846976,
|
||||
9, 387420489, 19, 1350851717672992089,
|
||||
9, 1000000000, 18, 1000000000000000000, // radix: 10
|
||||
8, 214358881, 17, 505447028499293771,
|
||||
8, 429981696, 17, 2218611106740436992,
|
||||
8, 815730721, 16, 665416609183179841,
|
||||
7, 105413504, 16, 2177953337809371136,
|
||||
7, 170859375, 15, 437893890380859375, // radix: 15
|
||||
7, 268435456, 15, 1152921504606846976,
|
||||
7, 410338673, 15, 2862423051509815793,
|
||||
7, 612220032, 14, 374813367582081024,
|
||||
7, 893871739, 14, 799006685782884121,
|
||||
6, 64000000, 14, 1638400000000000000, // radix: 20
|
||||
6, 85766121, 14, 3243919932521508681,
|
||||
6, 113379904, 13, 282810057883082752,
|
||||
6, 148035889, 13, 504036361936467383,
|
||||
6, 191102976, 13, 876488338465357824,
|
||||
6, 244140625, 13, 1490116119384765625, // radix: 25
|
||||
6, 308915776, 13, 2481152873203736576,
|
||||
6, 387420489, 13, 4052555153018976267,
|
||||
6, 481890304, 12, 232218265089212416,
|
||||
6, 594823321, 12, 353814783205469041,
|
||||
6, 729000000, 12, 531441000000000000, // radix: 30
|
||||
6, 887503681, 12, 787662783788549761,
|
||||
6, 1073741824, 12, 1152921504606846976,
|
||||
5, 39135393, 12, 1667889514952984961,
|
||||
5, 45435424, 12, 2386420683693101056,
|
||||
5, 52521875, 12, 3379220508056640625, // radix: 35
|
||||
5, 60466176, 11, 131621703842267136,
|
||||
30, 1073741824, // radix: 2
|
||||
18, 387420489,
|
||||
15, 1073741824,
|
||||
12, 244140625, // radix: 5
|
||||
11, 362797056,
|
||||
10, 282475249,
|
||||
10, 1073741824,
|
||||
9, 387420489,
|
||||
9, 1000000000, // radix: 10
|
||||
8, 214358881,
|
||||
8, 429981696,
|
||||
8, 815730721,
|
||||
7, 105413504,
|
||||
7, 170859375, // radix: 15
|
||||
7, 268435456,
|
||||
7, 410338673,
|
||||
7, 612220032,
|
||||
7, 893871739,
|
||||
6, 64000000, // radix: 20
|
||||
6, 85766121,
|
||||
6, 113379904,
|
||||
6, 148035889,
|
||||
6, 191102976,
|
||||
6, 244140625, // radix: 25
|
||||
6, 308915776,
|
||||
6, 387420489,
|
||||
6, 481890304,
|
||||
6, 594823321,
|
||||
6, 729000000, // radix: 30
|
||||
6, 887503681,
|
||||
6, 1073741824,
|
||||
5, 39135393,
|
||||
5, 45435424,
|
||||
5, 52521875, // radix: 35
|
||||
5, 60466176,
|
||||
];
|
||||
|
||||
/// Flag indicating if integers are limited by 64 bits
|
||||
|
@ -273,11 +268,8 @@ class int {
|
|||
static const _maxInt64 = 0x7fffffffffffffff;
|
||||
static const _minInt64 = -_maxInt64 - 1;
|
||||
|
||||
static const _int64UnsignedOverflowLimits = const [0xfffffffff, 0xf];
|
||||
static const _int64UnsignedSmiOverflowLimits = const [
|
||||
0xfffffff,
|
||||
0xfffffffffffffff
|
||||
];
|
||||
static const _int64UnsignedOverflowLimit = 0xfffffffff;
|
||||
static const _int64UnsignedSmiOverflowLimit = 0xfffffff;
|
||||
|
||||
/// In the `--limit-ints-to-64-bits` mode calculation of the expression
|
||||
///
|
||||
|
|
|
@ -1069,12 +1069,18 @@ class Assembler : public ValueObject {
|
|||
const Register crn = ConcreteRegister(rn);
|
||||
EmitFPIntCvtOp(SCVTFD, static_cast<Register>(vd), crn, kWord);
|
||||
}
|
||||
void fcvtzds(Register rd, VRegister vn) {
|
||||
void fcvtzdsx(Register rd, VRegister vn) {
|
||||
ASSERT(rd != R31);
|
||||
ASSERT(rd != CSP);
|
||||
const Register crd = ConcreteRegister(rd);
|
||||
EmitFPIntCvtOp(FCVTZDS, crd, static_cast<Register>(vn));
|
||||
}
|
||||
void fcvtzdsw(Register rd, VRegister vn) {
|
||||
ASSERT(rd != R31);
|
||||
ASSERT(rd != CSP);
|
||||
const Register crd = ConcreteRegister(rd);
|
||||
EmitFPIntCvtOp(FCVTZDS, crd, static_cast<Register>(vn), kWord);
|
||||
}
|
||||
void fmovdd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FMOVDD, vd, vn); }
|
||||
void fabsd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FABSD, vd, vn); }
|
||||
void fnegd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FNEGD, vd, vn); }
|
||||
|
@ -1365,9 +1371,17 @@ class Assembler : public ValueObject {
|
|||
LslImmediate(dst, src, kSmiTagSize);
|
||||
}
|
||||
|
||||
void BranchIfNotSmi(Register reg, Label* label) { tbnz(label, reg, kSmiTag); }
|
||||
void BranchIfNotSmi(Register reg, Label* label) {
|
||||
ASSERT(kSmiTagMask == 1);
|
||||
ASSERT(kSmiTag == 0);
|
||||
tbnz(label, reg, 0);
|
||||
}
|
||||
|
||||
void BranchIfSmi(Register reg, Label* label) { tbz(label, reg, kSmiTag); }
|
||||
void BranchIfSmi(Register reg, Label* label) {
|
||||
ASSERT(kSmiTagMask == 1);
|
||||
ASSERT(kSmiTag == 0);
|
||||
tbz(label, reg, 0);
|
||||
}
|
||||
|
||||
void Branch(const StubEntry& stub_entry,
|
||||
Register pp,
|
||||
|
@ -1451,6 +1465,11 @@ class Assembler : public ValueObject {
|
|||
kValueCanBeSmi,
|
||||
};
|
||||
|
||||
enum CanBeHeapPointer {
|
||||
kValueIsNotHeapPointer,
|
||||
kValueCanBeHeapPointer,
|
||||
};
|
||||
|
||||
// Storing into an object.
|
||||
void StoreIntoObject(Register object,
|
||||
const Address& dest,
|
||||
|
@ -1591,6 +1610,22 @@ class Assembler : public ValueObject {
|
|||
Register tmp,
|
||||
OperandSize sz);
|
||||
|
||||
void AssertSmiInRange(
|
||||
Register object,
|
||||
CanBeHeapPointer can_be_heap_pointer = kValueIsNotHeapPointer) {
|
||||
#if defined(DEBUG)
|
||||
Label ok;
|
||||
if (can_be_heap_pointer == kValueCanBeHeapPointer) {
|
||||
BranchIfNotSmi(object, &ok);
|
||||
}
|
||||
cmp(object, Operand(object, SXTW, 0));
|
||||
b(&ok, EQ);
|
||||
Stop("Smi out of range");
|
||||
|
||||
Bind(&ok);
|
||||
#endif
|
||||
}
|
||||
|
||||
private:
|
||||
AssemblerBuffer buffer_; // Contains position independent code.
|
||||
ObjectPoolWrapper object_pool_wrapper_;
|
||||
|
|
|
@ -2576,17 +2576,73 @@ ASSEMBLER_TEST_RUN(FldrqFstrqPrePostIndex, test) {
|
|||
EXPECT_EQ(42.0, EXECUTE_TEST_CODE_DOUBLE(DoubleReturn, test->entry()));
|
||||
}
|
||||
|
||||
ASSEMBLER_TEST_GENERATE(Fcvtzds, assembler) {
|
||||
ASSEMBLER_TEST_GENERATE(Fcvtzdsx, assembler) {
|
||||
__ LoadDImmediate(V0, 42.0);
|
||||
__ fcvtzds(R0, V0);
|
||||
__ fcvtzdsx(R0, V0);
|
||||
__ ret();
|
||||
}
|
||||
|
||||
ASSEMBLER_TEST_RUN(Fcvtzds, test) {
|
||||
ASSEMBLER_TEST_RUN(Fcvtzdsx, test) {
|
||||
typedef int64_t (*Int64Return)() DART_UNUSED;
|
||||
EXPECT_EQ(42, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
|
||||
}
|
||||
|
||||
ASSEMBLER_TEST_GENERATE(Fcvtzdsw, assembler) {
|
||||
__ LoadDImmediate(V0, 42.0);
|
||||
__ fcvtzdsw(R0, V0);
|
||||
__ ret();
|
||||
}
|
||||
|
||||
ASSEMBLER_TEST_RUN(Fcvtzdsw, test) {
|
||||
typedef int64_t (*Int64Return)() DART_UNUSED;
|
||||
EXPECT_EQ(42, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
|
||||
}
|
||||
|
||||
ASSEMBLER_TEST_GENERATE(Fcvtzdsx_overflow, assembler) {
|
||||
__ LoadDImmediate(V0, 1e20);
|
||||
__ fcvtzdsx(R0, V0);
|
||||
__ ret();
|
||||
}
|
||||
|
||||
ASSEMBLER_TEST_RUN(Fcvtzdsx_overflow, test) {
|
||||
typedef int64_t (*Int64Return)() DART_UNUSED;
|
||||
EXPECT_EQ(kMaxInt64, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
|
||||
}
|
||||
|
||||
ASSEMBLER_TEST_GENERATE(Fcvtzdsx_overflow_negative, assembler) {
|
||||
__ LoadDImmediate(V0, -1e20);
|
||||
__ fcvtzdsx(R0, V0);
|
||||
__ ret();
|
||||
}
|
||||
|
||||
ASSEMBLER_TEST_RUN(Fcvtzdsx_overflow_negative, test) {
|
||||
typedef int64_t (*Int64Return)() DART_UNUSED;
|
||||
EXPECT_EQ(kMinInt64, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
|
||||
}
|
||||
|
||||
ASSEMBLER_TEST_GENERATE(Fcvtzdsw_overflow, assembler) {
|
||||
__ LoadDImmediate(V0, 1e10);
|
||||
__ fcvtzdsw(R0, V0);
|
||||
__ ret();
|
||||
}
|
||||
|
||||
ASSEMBLER_TEST_RUN(Fcvtzdsw_overflow, test) {
|
||||
typedef int64_t (*Int64Return)() DART_UNUSED;
|
||||
EXPECT_EQ(kMaxInt32, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
|
||||
}
|
||||
|
||||
ASSEMBLER_TEST_GENERATE(Fcvtzdsw_overflow_negative, assembler) {
|
||||
__ LoadDImmediate(V0, -1e10);
|
||||
__ fcvtzdsw(R0, V0);
|
||||
__ sxtw(R0, R0);
|
||||
__ ret();
|
||||
}
|
||||
|
||||
ASSEMBLER_TEST_RUN(Fcvtzdsw_overflow_negative, test) {
|
||||
typedef int64_t (*Int64Return)() DART_UNUSED;
|
||||
EXPECT_EQ(kMinInt32, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
|
||||
}
|
||||
|
||||
ASSEMBLER_TEST_GENERATE(Scvtfdx, assembler) {
|
||||
__ LoadImmediate(R0, 42);
|
||||
__ scvtfdx(V0, R0);
|
||||
|
|
|
@ -521,7 +521,10 @@ class Assembler : public ValueObject {
|
|||
return CompareImmediate(reg, Immediate(immediate));
|
||||
}
|
||||
|
||||
void testl(Register reg, const Immediate& imm) { testq(reg, imm); }
|
||||
void testl(Register reg, const Immediate& imm) {
|
||||
Immediate imm2(imm.value() & 0xffffffffll);
|
||||
testq(reg, imm2);
|
||||
}
|
||||
void testb(const Address& address, const Immediate& imm);
|
||||
|
||||
void testq(Register reg, const Immediate& imm);
|
||||
|
@ -710,6 +713,11 @@ class Assembler : public ValueObject {
|
|||
kValueCanBeSmi,
|
||||
};
|
||||
|
||||
enum CanBeHeapPointer {
|
||||
kValueIsNotHeapPointer,
|
||||
kValueCanBeHeapPointer,
|
||||
};
|
||||
|
||||
// Destroys value.
|
||||
void StoreIntoObject(Register object, // Object we are storing into.
|
||||
const Address& dest, // Where we are storing into.
|
||||
|
@ -939,6 +947,26 @@ class Assembler : public ValueObject {
|
|||
Register array,
|
||||
Register index);
|
||||
|
||||
void AssertSmiInRange(
|
||||
Register object,
|
||||
CanBeHeapPointer can_be_heap_pointer = kValueIsNotHeapPointer) {
|
||||
#if defined(DEBUG)
|
||||
Register tmp = object == TMP ? TMP2 : TMP;
|
||||
Label ok;
|
||||
if (can_be_heap_pointer == kValueCanBeHeapPointer) {
|
||||
testl(object, Immediate(kSmiTagMask));
|
||||
ASSERT(kSmiTag == 0);
|
||||
j(ZERO, &ok);
|
||||
}
|
||||
movsxd(tmp, object);
|
||||
cmpq(tmp, object);
|
||||
j(EQUAL, &ok);
|
||||
Stop("Smi out of range");
|
||||
|
||||
Bind(&ok);
|
||||
#endif
|
||||
}
|
||||
|
||||
static Address VMTagAddress() {
|
||||
return Address(THR, Thread::vm_tag_offset());
|
||||
}
|
||||
|
|
|
@ -1585,10 +1585,10 @@ bool BinaryIntegerOpInstr::RightIsPowerOfTwoConstant() const {
|
|||
return Utils::IsPowerOfTwo(Utils::Abs(int_value));
|
||||
}
|
||||
|
||||
static intptr_t RepresentationBits(Representation r) {
|
||||
static intptr_t SignificantRepresentationBits(Representation r) {
|
||||
switch (r) {
|
||||
case kTagged:
|
||||
return kBitsPerWord - 1;
|
||||
return 31;
|
||||
case kUnboxedInt32:
|
||||
case kUnboxedUint32:
|
||||
return 32;
|
||||
|
@ -1602,7 +1602,7 @@ static intptr_t RepresentationBits(Representation r) {
|
|||
|
||||
static int64_t RepresentationMask(Representation r) {
|
||||
return static_cast<int64_t>(static_cast<uint64_t>(-1) >>
|
||||
(64 - RepresentationBits(r)));
|
||||
(64 - SignificantRepresentationBits(r)));
|
||||
}
|
||||
|
||||
static bool ToIntegerConstant(Value* value, int64_t* result) {
|
||||
|
@ -2163,7 +2163,8 @@ Definition* BinaryIntegerOpInstr::Canonicalize(FlowGraph* flow_graph) {
|
|||
break;
|
||||
|
||||
case Token::kSHL: {
|
||||
const intptr_t kMaxShift = RepresentationBits(representation()) - 1;
|
||||
const intptr_t kMaxShift =
|
||||
SignificantRepresentationBits(representation()) - 1;
|
||||
if (rhs == 0) {
|
||||
return left()->definition();
|
||||
} else if ((rhs < 0) || (rhs >= kMaxShift)) {
|
||||
|
|
|
@ -997,9 +997,13 @@ CompileType LoadIndexedInstr::ComputeType() const {
|
|||
case kTwoByteStringCid:
|
||||
case kExternalOneByteStringCid:
|
||||
case kExternalTwoByteStringCid:
|
||||
return CompileType::FromCid(kSmiCid);
|
||||
|
||||
case kTypedDataInt32ArrayCid:
|
||||
case kTypedDataUint32ArrayCid:
|
||||
return CompileType::FromCid(kSmiCid);
|
||||
// TODO(erikcorry): Perhaps this can return a faster type. See
|
||||
// https://github.com/dart-lang/sdk/issues/32582
|
||||
return CompileType::Int();
|
||||
|
||||
default:
|
||||
UNIMPLEMENTED();
|
||||
|
@ -1273,7 +1277,6 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
__ SmiTag(result);
|
||||
break;
|
||||
case kTwoByteStringCid:
|
||||
case kExternalTwoByteStringCid:
|
||||
|
@ -1287,12 +1290,15 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
__ SmiTag(result);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
if (representation_ == kTagged) {
|
||||
ASSERT(can_pack_into_smi());
|
||||
__ SmiTag(result);
|
||||
}
|
||||
}
|
||||
|
||||
Representation StoreIndexedInstr::RequiredInputRepresentation(
|
||||
|
@ -2770,18 +2776,27 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
|
|||
if (locs.in(1).IsConstant()) {
|
||||
const Object& constant = locs.in(1).constant();
|
||||
ASSERT(constant.IsSmi());
|
||||
// Immediate shift operation takes 6 bits for the count.
|
||||
const intptr_t kCountLimit = 0x3F;
|
||||
// Immediate shift operation takes 5 bits for the count.
|
||||
const intptr_t kCountLimit = 0x1F;
|
||||
// These should be around the same size.
|
||||
COMPILE_ASSERT(kCountLimit + 1 == kSmiBits + 2);
|
||||
const intptr_t value = Smi::Cast(constant).Value();
|
||||
ASSERT((0 < value) && (value < kCountLimit));
|
||||
if (shift_left->can_overflow()) {
|
||||
// Check for overflow (preserve left).
|
||||
__ LslImmediate(TMP, left, value);
|
||||
__ cmp(left, Operand(TMP, ASR, value));
|
||||
__ LslImmediate(TMP, left, value, kWord);
|
||||
__ cmpw(left, Operand(TMP, ASR, value));
|
||||
__ b(deopt, NE); // Overflow.
|
||||
}
|
||||
// Shift for result now we know there is no overflow.
|
||||
// Shift for result now we know there is no overflow. This writes the full
|
||||
// 64 bits of the output register, but unless we are in truncating mode the
|
||||
// top bits will just be sign extension bits.
|
||||
__ LslImmediate(result, left, value);
|
||||
if (shift_left->is_truncating()) {
|
||||
// This preserves the invariant that Smis only use the low 32 bits of the
|
||||
// register, the high bits being sign extension bits.
|
||||
__ sxtw(result, result);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2789,28 +2804,33 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
|
|||
const Register right = locs.in(1).reg();
|
||||
Range* right_range = shift_left->right_range();
|
||||
if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
|
||||
// TODO(srdjan): Implement code below for is_truncating().
|
||||
// If left is constant, we know the maximal allowed size for right.
|
||||
const Object& obj = shift_left->left()->BoundConstant();
|
||||
if (obj.IsSmi()) {
|
||||
const intptr_t left_int = Smi::Cast(obj).Value();
|
||||
if (left_int == 0) {
|
||||
__ CompareRegisters(right, ZR);
|
||||
__ b(deopt, MI);
|
||||
__ mov(result, ZR);
|
||||
return;
|
||||
}
|
||||
const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
|
||||
const bool right_needs_check =
|
||||
!RangeUtils::IsWithin(right_range, 0, max_right - 1);
|
||||
if (right_needs_check) {
|
||||
__ CompareImmediate(right,
|
||||
reinterpret_cast<int64_t>(Smi::New(max_right)));
|
||||
__ b(deopt, CS);
|
||||
}
|
||||
__ SmiUntag(TMP, right);
|
||||
__ lslv(result, left, TMP);
|
||||
// Even though we have a non-Smi constant on the left, we might still emit
|
||||
// a Smi op here. In that case the Smi check above will have deopted, so
|
||||
// we can't reach this point. Emit a breakpoint to be sure.
|
||||
if (!obj.IsSmi()) {
|
||||
__ Breakpoint();
|
||||
return;
|
||||
}
|
||||
const intptr_t left_int = Smi::Cast(obj).Value();
|
||||
if (left_int == 0) {
|
||||
__ CompareRegisters(right, ZR);
|
||||
__ b(deopt, MI);
|
||||
__ mov(result, ZR);
|
||||
return;
|
||||
}
|
||||
const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
|
||||
const bool right_needs_check =
|
||||
!RangeUtils::IsWithin(right_range, 0, max_right - 1);
|
||||
if (right_needs_check) {
|
||||
__ CompareImmediate(right,
|
||||
reinterpret_cast<int64_t>(Smi::New(max_right)));
|
||||
__ b(deopt, CS);
|
||||
}
|
||||
__ SmiUntag(TMP, right);
|
||||
__ lslv(result, left, TMP);
|
||||
ASSERT(!shift_left->is_truncating());
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2834,7 +2854,11 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
|
|||
__ SmiUntag(TMP, right);
|
||||
__ lslv(result, left, TMP);
|
||||
}
|
||||
if (shift_left->is_truncating()) {
|
||||
__ sxtw(result, result);
|
||||
}
|
||||
} else {
|
||||
// If we can overflow.
|
||||
if (right_needs_check) {
|
||||
ASSERT(shift_left->CanDeoptimize());
|
||||
__ CompareImmediate(right,
|
||||
|
@ -2842,15 +2866,16 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
|
|||
__ b(deopt, CS);
|
||||
}
|
||||
// Left is not a constant.
|
||||
// Check if count too large for handling it inlined.
|
||||
// Check if count is too large for handling it inlined.
|
||||
__ SmiUntag(TMP, right);
|
||||
// Overflow test (preserve left, right, and TMP);
|
||||
const Register temp = locs.temp(0).reg();
|
||||
__ lslv(temp, left, TMP);
|
||||
__ asrv(TMP2, temp, TMP);
|
||||
__ CompareRegisters(left, TMP2);
|
||||
__ lslvw(temp, left, TMP);
|
||||
__ asrvw(TMP2, temp, TMP);
|
||||
__ cmpw(left, Operand(TMP2));
|
||||
__ b(deopt, NE); // Overflow.
|
||||
// Shift for result now we know there is no overflow.
|
||||
// Shift for result now we know there is no overflow. This is a 64 bit
|
||||
// operation, so no sign extension is needed.
|
||||
__ lslv(result, left, TMP);
|
||||
}
|
||||
}
|
||||
|
@ -2931,18 +2956,20 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
|
||||
switch (op_kind()) {
|
||||
case Token::kADD:
|
||||
__ adds(result, left, Operand(right));
|
||||
__ addsw(result, left, Operand(right));
|
||||
__ b(slow_path->entry_label(), VS);
|
||||
__ sxtw(result, result);
|
||||
break;
|
||||
case Token::kSUB:
|
||||
__ subs(result, left, Operand(right));
|
||||
__ subsw(result, left, Operand(right));
|
||||
__ b(slow_path->entry_label(), VS);
|
||||
__ sxtw(result, result);
|
||||
break;
|
||||
case Token::kMUL:
|
||||
__ SmiUntag(TMP, left);
|
||||
__ mul(result, TMP, right);
|
||||
__ smulh(TMP, TMP, right);
|
||||
// TMP: result bits 64..127.
|
||||
__ smull(result, TMP, right);
|
||||
__ AsrImmediate(TMP, result, 31);
|
||||
// TMP: result bits 31-63
|
||||
__ cmp(TMP, Operand(result, ASR, 63));
|
||||
__ b(slow_path->entry_label(), NE);
|
||||
break;
|
||||
|
@ -2967,8 +2994,8 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
|
||||
__ SmiUntag(TMP, right);
|
||||
__ lslv(result, left, TMP);
|
||||
__ asrv(TMP2, result, TMP);
|
||||
__ CompareRegisters(left, TMP2);
|
||||
__ asrvw(TMP2, result, TMP);
|
||||
__ cmp(left, Operand(TMP2, SXTW, 0));
|
||||
__ b(slow_path->entry_label(), NE); // Overflow.
|
||||
break;
|
||||
case Token::kSHR:
|
||||
|
@ -2978,6 +3005,8 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
reinterpret_cast<int64_t>(Smi::New(Smi::kBits)));
|
||||
__ b(slow_path->entry_label(), CS);
|
||||
|
||||
__ AssertSmiInRange(left);
|
||||
__ AssertSmiInRange(right);
|
||||
__ SmiUntag(result, right);
|
||||
__ SmiUntag(TMP, left);
|
||||
__ asrv(result, TMP, result);
|
||||
|
@ -2987,6 +3016,7 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
UNIMPLEMENTED();
|
||||
}
|
||||
__ Bind(slow_path->exit_label());
|
||||
__ AssertSmiInRange(result, Assembler::kValueCanBeHeapPointer);
|
||||
}
|
||||
|
||||
class CheckedSmiComparisonSlowPath
|
||||
|
@ -3177,20 +3207,28 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
case Token::kADD: {
|
||||
if (deopt == NULL) {
|
||||
__ AddImmediate(result, left, imm);
|
||||
if (is_truncating()) {
|
||||
__ sxtw(result, result);
|
||||
}
|
||||
} else {
|
||||
__ AddImmediateSetFlags(result, left, imm);
|
||||
__ AddImmediateSetFlags(result, left, imm, kWord);
|
||||
__ b(deopt, VS);
|
||||
__ sxtw(result, result);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Token::kSUB: {
|
||||
if (deopt == NULL) {
|
||||
__ AddImmediate(result, left, -imm);
|
||||
if (is_truncating()) {
|
||||
__ sxtw(result, result);
|
||||
}
|
||||
} else {
|
||||
// Negating imm and using AddImmediateSetFlags would not detect the
|
||||
// overflow when imm == kMinInt64.
|
||||
__ SubImmediateSetFlags(result, left, imm);
|
||||
// overflow when imm == kMinInt32.
|
||||
__ SubImmediateSetFlags(result, left, imm, kWord);
|
||||
__ b(deopt, VS);
|
||||
__ sxtw(result, result);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -3198,12 +3236,14 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
// Keep left value tagged and untag right value.
|
||||
const intptr_t value = Smi::Cast(constant).Value();
|
||||
__ LoadImmediate(TMP, value);
|
||||
__ mul(result, left, TMP);
|
||||
__ smull(result, left, TMP);
|
||||
if (deopt != NULL) {
|
||||
__ smulh(TMP, left, TMP);
|
||||
// TMP: result bits 64..127.
|
||||
__ AsrImmediate(TMP, result, 31);
|
||||
// TMP: result bits 31..63.
|
||||
__ cmp(TMP, Operand(result, ASR, 63));
|
||||
__ b(deopt, NE);
|
||||
} else if (is_truncating()) {
|
||||
__ sxtw(result, result);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -3214,9 +3254,10 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
const intptr_t shift_count =
|
||||
Utils::ShiftForPowerOfTwo(Utils::Abs(value)) + kSmiTagSize;
|
||||
ASSERT(kSmiTagSize == 1);
|
||||
__ AsrImmediate(TMP, left, 63);
|
||||
__ AsrImmediate(TMP, left, 31); // All 1s or all 0s.
|
||||
ASSERT(shift_count > 1); // 1, -1 case handled above.
|
||||
const Register temp = TMP2;
|
||||
// Adjust so that we round to 0 instead of round down.
|
||||
__ add(temp, left, Operand(TMP, LSR, 64 - shift_count));
|
||||
ASSERT(shift_count > 0);
|
||||
__ AsrImmediate(result, temp, shift_count);
|
||||
|
@ -3251,6 +3292,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
__ AssertSmiInRange(result);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -3259,18 +3301,26 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
case Token::kADD: {
|
||||
if (deopt == NULL) {
|
||||
__ add(result, left, Operand(right));
|
||||
if (is_truncating()) {
|
||||
__ sxtw(result, result);
|
||||
}
|
||||
} else {
|
||||
__ adds(result, left, Operand(right));
|
||||
__ addsw(result, left, Operand(right));
|
||||
__ b(deopt, VS);
|
||||
__ sxtw(result, result);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Token::kSUB: {
|
||||
if (deopt == NULL) {
|
||||
__ sub(result, left, Operand(right));
|
||||
if (is_truncating()) {
|
||||
__ sxtw(result, result);
|
||||
}
|
||||
} else {
|
||||
__ subs(result, left, Operand(right));
|
||||
__ subsw(result, left, Operand(right));
|
||||
__ b(deopt, VS);
|
||||
__ sxtw(result, result);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -3278,10 +3328,13 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
__ SmiUntag(TMP, left);
|
||||
if (deopt == NULL) {
|
||||
__ mul(result, TMP, right);
|
||||
if (is_truncating()) {
|
||||
__ sxtw(result, result);
|
||||
}
|
||||
} else {
|
||||
__ mul(result, TMP, right);
|
||||
__ smulh(TMP, TMP, right);
|
||||
// TMP: result bits 64..127.
|
||||
__ smull(result, TMP, right);
|
||||
__ AsrImmediate(TMP, result, 31);
|
||||
// TMP: result bits 31..63.
|
||||
__ cmp(TMP, Operand(result, ASR, 63));
|
||||
__ b(deopt, NE);
|
||||
}
|
||||
|
@ -3316,7 +3369,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
|
||||
// Check the corner case of dividing the 'MIN_SMI' with -1, in which
|
||||
// case we cannot tag the result.
|
||||
__ CompareImmediate(result, 0x4000000000000000LL);
|
||||
__ CompareImmediate(result, 0x40000000LL);
|
||||
__ b(deopt, EQ);
|
||||
__ SmiTag(result);
|
||||
break;
|
||||
|
@ -3361,8 +3414,10 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
__ b(deopt, LT);
|
||||
}
|
||||
__ SmiUntag(TMP, right);
|
||||
// sarl operation masks the count to 6 bits.
|
||||
const intptr_t kCountLimit = 0x3F;
|
||||
// The asrv operation masks the count to 6 bits, but any shift between 31
|
||||
// and 63 gives the same result because 32 bit Smis are stored sign
|
||||
// extended in the registers.
|
||||
const intptr_t kCountLimit = 0x1F;
|
||||
if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
|
||||
__ LoadImmediate(TMP2, kCountLimit);
|
||||
__ CompareRegisters(TMP, TMP2);
|
||||
|
@ -3391,6 +3446,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
__ AssertSmiInRange(result);
|
||||
}
|
||||
|
||||
LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone,
|
||||
|
@ -3543,10 +3599,17 @@ LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
|
|||
ASSERT((from_representation() == kUnboxedInt32) ||
|
||||
(from_representation() == kUnboxedUint32));
|
||||
const intptr_t kNumInputs = 1;
|
||||
const intptr_t kNumTemps = 0;
|
||||
const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
|
||||
LocationSummary* summary = new (zone)
|
||||
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
|
||||
LocationSummary(zone, kNumInputs, kNumTemps,
|
||||
ValueFitsSmi() ? LocationSummary::kNoCall
|
||||
: LocationSummary::kCallOnSlowPath);
|
||||
// Get two distinct registers for input and output, plus a temp
|
||||
// register for testing for overflow and allocating a Mint.
|
||||
summary->set_in(0, Location::RequiresRegister());
|
||||
if (!ValueFitsSmi()) {
|
||||
summary->set_temp(0, Location::RequiresRegister());
|
||||
}
|
||||
summary->set_out(0, Location::RequiresRegister());
|
||||
return summary;
|
||||
}
|
||||
|
@ -3555,16 +3618,51 @@ void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
Register value = locs()->in(0).reg();
|
||||
Register out = locs()->out(0).reg();
|
||||
ASSERT(value != out);
|
||||
Label done;
|
||||
|
||||
ASSERT(kSmiTagSize == 1);
|
||||
// TODO(vegorov) implement and use UBFM/SBFM for this.
|
||||
__ LslImmediate(out, value, 32);
|
||||
if (from_representation() == kUnboxedInt32) {
|
||||
__ AsrImmediate(out, out, 32 - kSmiTagSize);
|
||||
ASSERT(kSmiTag == 0);
|
||||
// Signed Bitfield Insert in Zero instruction extracts the 31 significant
|
||||
// bits from a Smi.
|
||||
__ sbfiz(out, value, kSmiTagSize, 32 - kSmiTagSize);
|
||||
if (ValueFitsSmi()) {
|
||||
return;
|
||||
}
|
||||
Register temp = locs()->temp(0).reg();
|
||||
__ cmp(out, Operand(value, LSL, 1));
|
||||
__ b(&done, EQ); // Jump if the sbfiz instruction didn't lose info.
|
||||
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
|
||||
temp);
|
||||
__ sxtw(temp, value);
|
||||
} else {
|
||||
ASSERT(from_representation() == kUnboxedUint32);
|
||||
__ LsrImmediate(out, out, 32 - kSmiTagSize);
|
||||
ASSERT(kSmiTag == 0);
|
||||
// A 32 bit positive Smi has one tag bit and one unused sign bit,
|
||||
// leaving only 30 bits for the payload.
|
||||
__ ubfiz(out, value, kSmiTagSize, kSmiBits);
|
||||
if (ValueFitsSmi()) {
|
||||
return;
|
||||
}
|
||||
Register temp = locs()->temp(0).reg();
|
||||
__ TestImmediate(value, 0xc0000000);
|
||||
__ b(&done, EQ); // Jump if both bits are zero.
|
||||
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
|
||||
temp);
|
||||
__ ubfiz(temp, value, 0, 32); // Zero extend word.
|
||||
}
|
||||
|
||||
__ StoreToOffset(locs()->temp(0).reg(), out,
|
||||
Mint::value_offset() - kHeapObjectTag);
|
||||
|
||||
#if defined(DEBUG)
|
||||
Label skip_smi_test;
|
||||
__ b(&skip_smi_test);
|
||||
__ Bind(&done);
|
||||
__ AssertSmiInRange(out, Assembler::kValueCanBeHeapPointer);
|
||||
__ Bind(&skip_smi_test);
|
||||
#else
|
||||
__ Bind(&done);
|
||||
#endif
|
||||
}
|
||||
|
||||
LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
|
||||
|
@ -3592,7 +3690,8 @@ void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
}
|
||||
|
||||
ASSERT(kSmiTag == 0);
|
||||
__ LslImmediate(out, in, kSmiTagSize);
|
||||
__ LslImmediate(out, in, kSmiTagSize, kWord);
|
||||
__ sxtw(out, out);
|
||||
Label done;
|
||||
__ cmp(in, Operand(out, ASR, kSmiTagSize));
|
||||
__ b(&done, EQ);
|
||||
|
@ -4333,8 +4432,9 @@ void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
switch (op_kind()) {
|
||||
case Token::kNEGATE: {
|
||||
Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
|
||||
__ subs(result, ZR, Operand(value));
|
||||
__ subsw(result, ZR, Operand(value));
|
||||
__ b(deopt, VS);
|
||||
__ sxtw(result, result);
|
||||
break;
|
||||
}
|
||||
case Token::kBIT_NOT:
|
||||
|
@ -4345,6 +4445,7 @@ void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
__ AssertSmiInRange(result);
|
||||
}
|
||||
|
||||
LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
|
||||
|
@ -4396,7 +4497,7 @@ void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
const Register value = locs()->in(0).reg();
|
||||
const VRegister result = locs()->out(0).fpu_reg();
|
||||
__ SmiUntag(TMP, value);
|
||||
__ scvtfdx(result, TMP);
|
||||
__ scvtfdw(result, TMP);
|
||||
}
|
||||
|
||||
LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
|
||||
|
@ -4440,12 +4541,13 @@ void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
__ fcmpd(VTMP, VTMP);
|
||||
__ b(&do_call, VS);
|
||||
|
||||
__ fcvtzds(result, VTMP);
|
||||
__ fcvtzdsx(result, VTMP);
|
||||
// Overflow is signaled with minint.
|
||||
|
||||
// Check for overflow and that it fits into Smi.
|
||||
__ CompareImmediate(result, 0xC000000000000000);
|
||||
__ b(&do_call, MI);
|
||||
__ AsrImmediate(TMP, result, 30);
|
||||
__ cmp(TMP, Operand(result, ASR, 63));
|
||||
__ b(&do_call, NE);
|
||||
__ SmiTag(result);
|
||||
__ b(&done);
|
||||
__ Bind(&do_call);
|
||||
|
@ -4462,6 +4564,7 @@ void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
args_info, locs(), ICData::Handle(),
|
||||
ICData::kStatic);
|
||||
__ Bind(&done);
|
||||
__ AssertSmiInRange(result, Assembler::kValueCanBeHeapPointer);
|
||||
}
|
||||
|
||||
LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,
|
||||
|
@ -4485,11 +4588,13 @@ void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
__ fcmpd(value, value);
|
||||
__ b(deopt, VS);
|
||||
|
||||
__ fcvtzds(result, value);
|
||||
__ fcvtzdsx(result, value);
|
||||
// Check for overflow and that it fits into Smi.
|
||||
__ CompareImmediate(result, 0xC000000000000000);
|
||||
__ b(deopt, MI);
|
||||
__ AsrImmediate(TMP, result, 30);
|
||||
__ cmp(TMP, Operand(result, ASR, 63));
|
||||
__ b(deopt, NE);
|
||||
__ SmiTag(result);
|
||||
__ AssertSmiInRange(result);
|
||||
}
|
||||
|
||||
LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
|
||||
|
@ -4758,7 +4863,7 @@ void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
|
||||
// Check the corner case of dividing the 'MIN_SMI' with -1, in which
|
||||
// case we cannot tag the result.
|
||||
__ CompareImmediate(result_div, 0x4000000000000000);
|
||||
__ CompareImmediate(result_div, 0x40000000);
|
||||
__ b(deopt, EQ);
|
||||
// result_mod <- left - right * result_div.
|
||||
__ msub(result_mod, TMP, result_div, result_mod);
|
||||
|
@ -5287,6 +5392,10 @@ void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
Register shifter = locs()->in(1).reg();
|
||||
|
||||
// TODO(johnmccutchan): Use range information to avoid these checks.
|
||||
// Assert this is a legitimate Smi in debug mode, but does not assert
|
||||
// anything about the range relative to the bit width.
|
||||
__ AssertSmiInRange(shifter);
|
||||
|
||||
__ SmiUntag(TMP, shifter);
|
||||
__ CompareImmediate(TMP, 0);
|
||||
// If shift value is < 0, deoptimize.
|
||||
|
@ -5306,7 +5415,7 @@ void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
|
||||
__ CompareImmediate(TMP, kShifterLimit);
|
||||
// If shift value is > 31, return zero.
|
||||
__ csel(out, out, ZR, GT);
|
||||
__ csel(out, ZR, out, GT);
|
||||
}
|
||||
|
||||
LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,
|
||||
|
|
|
@ -1074,11 +1074,13 @@ CompileType LoadIndexedInstr::ComputeType() const {
|
|||
case kTwoByteStringCid:
|
||||
case kExternalOneByteStringCid:
|
||||
case kExternalTwoByteStringCid:
|
||||
case kTypedDataInt32ArrayCid:
|
||||
case kTypedDataUint32ArrayCid:
|
||||
return CompileType::FromCid(kSmiCid);
|
||||
|
||||
case kTypedDataInt32ArrayCid:
|
||||
case kTypedDataUint32ArrayCid:
|
||||
case kTypedDataInt64ArrayCid:
|
||||
// TODO(erikcorry): Perhaps this can return a faster type. See
|
||||
// https://github.com/dart-lang/sdk/issues/32582
|
||||
return CompileType::Int();
|
||||
|
||||
default:
|
||||
|
@ -1261,16 +1263,24 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
|
||||
LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
|
||||
bool opt) const {
|
||||
const bool might_box = (representation() == kTagged) && !can_pack_into_smi();
|
||||
const intptr_t kNumInputs = 2;
|
||||
const intptr_t kNumTemps = 0;
|
||||
LocationSummary* summary = new (zone)
|
||||
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
|
||||
const intptr_t kNumTemps = might_box ? 2 : 0;
|
||||
LocationSummary* summary = new (zone) LocationSummary(
|
||||
zone, kNumInputs, kNumTemps,
|
||||
might_box ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
|
||||
summary->set_in(0, Location::RequiresRegister());
|
||||
// The smi index is either untagged (element size == 1), or it is left smi
|
||||
// tagged (for all element sizes > 1).
|
||||
summary->set_in(1, index_scale() == 1 ? Location::WritableRegister()
|
||||
: Location::RequiresRegister());
|
||||
summary->set_out(0, Location::RequiresRegister());
|
||||
|
||||
if (might_box) {
|
||||
summary->set_temp(0, Location::RequiresRegister());
|
||||
summary->set_temp(1, Location::RequiresRegister());
|
||||
}
|
||||
|
||||
return summary;
|
||||
}
|
||||
|
||||
|
@ -1302,7 +1312,6 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
__ SmiTag(result);
|
||||
break;
|
||||
case kTwoByteStringCid:
|
||||
case kExternalTwoByteStringCid:
|
||||
|
@ -1316,12 +1325,34 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
__ SmiTag(result);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
if (representation_ == kTagged) {
|
||||
if (can_pack_into_smi()) {
|
||||
__ SmiTag(result);
|
||||
} else {
|
||||
// If the value cannot fit in a smi then allocate a mint box for it.
|
||||
Register temp = locs()->temp(0).reg();
|
||||
Register temp2 = locs()->temp(1).reg();
|
||||
// Temp register needs to be manually preserved on allocation slow-path.
|
||||
locs()->live_registers()->Add(locs()->temp(0), kUnboxedInt32);
|
||||
|
||||
ASSERT(temp != result);
|
||||
__ MoveRegister(temp, result);
|
||||
__ SmiTag(result);
|
||||
|
||||
Label done;
|
||||
__ TestImmediate(temp, Immediate(0xc0000000ll));
|
||||
__ j(ZERO, &done);
|
||||
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(),
|
||||
result, temp2);
|
||||
__ movq(FieldAddress(result, Mint::value_offset()), temp);
|
||||
__ Bind(&done);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Representation StoreIndexedInstr::RequiredInputRepresentation(
|
||||
|
@ -2711,27 +2742,32 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
|
|||
if (locs.in(1).IsConstant()) {
|
||||
const Object& constant = locs.in(1).constant();
|
||||
ASSERT(constant.IsSmi());
|
||||
// shlq operation masks the count to 6 bits.
|
||||
const intptr_t kCountLimit = 0x3F;
|
||||
// shll operation masks the count to 5 bits.
|
||||
const intptr_t kCountLimit = 0x1F;
|
||||
const intptr_t value = Smi::Cast(constant).Value();
|
||||
ASSERT((0 < value) && (value < kCountLimit));
|
||||
if (shift_left->can_overflow()) {
|
||||
if (value == 1) {
|
||||
// Use overflow flag.
|
||||
__ shlq(left, Immediate(1));
|
||||
__ shll(left, Immediate(1));
|
||||
__ j(OVERFLOW, deopt);
|
||||
__ movsxd(left, left);
|
||||
return;
|
||||
}
|
||||
// Check for overflow.
|
||||
Register temp = locs.temp(0).reg();
|
||||
__ movq(temp, left);
|
||||
__ shlq(left, Immediate(value));
|
||||
__ sarq(left, Immediate(value));
|
||||
__ shll(left, Immediate(value));
|
||||
__ sarl(left, Immediate(value));
|
||||
__ movsxd(left, left);
|
||||
__ cmpq(left, temp);
|
||||
__ j(NOT_EQUAL, deopt); // Overflow.
|
||||
}
|
||||
// Shift for result now we know there is no overflow.
|
||||
__ shlq(left, Immediate(value));
|
||||
if (shift_left->is_truncating()) {
|
||||
__ movsxd(left, left);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2742,23 +2778,32 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
|
|||
// TODO(srdjan): Implement code below for is_truncating().
|
||||
// If left is constant, we know the maximal allowed size for right.
|
||||
const Object& obj = shift_left->left()->BoundConstant();
|
||||
if (obj.IsSmi()) {
|
||||
const intptr_t left_int = Smi::Cast(obj).Value();
|
||||
if (left_int == 0) {
|
||||
__ CompareImmediate(right, Immediate(0));
|
||||
__ j(NEGATIVE, deopt);
|
||||
return;
|
||||
}
|
||||
const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
|
||||
const bool right_needs_check =
|
||||
!RangeUtils::IsWithin(right_range, 0, max_right - 1);
|
||||
if (right_needs_check) {
|
||||
__ CompareImmediate(
|
||||
right, Immediate(reinterpret_cast<int64_t>(Smi::New(max_right))));
|
||||
__ j(ABOVE_EQUAL, deopt);
|
||||
}
|
||||
__ SmiUntag(right);
|
||||
__ shlq(left, right);
|
||||
// Even though we have a non-Smi constant on the left, we might still emit
|
||||
// a Smi op here. In that case the Smi check above will have deopted, so
|
||||
// we can't reach this point. Emit a breakpoint to be sure.
|
||||
if (!obj.IsSmi()) {
|
||||
__ int3();
|
||||
return;
|
||||
}
|
||||
const intptr_t left_int = Smi::Cast(obj).Value();
|
||||
if (left_int == 0) {
|
||||
__ CompareImmediate(right, Immediate(0));
|
||||
__ j(NEGATIVE, deopt);
|
||||
return;
|
||||
}
|
||||
const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
|
||||
const bool right_needs_check =
|
||||
!RangeUtils::IsWithin(right_range, 0, max_right - 1);
|
||||
if (right_needs_check) {
|
||||
__ CompareImmediate(
|
||||
right, Immediate(reinterpret_cast<int64_t>(Smi::New(max_right))));
|
||||
__ j(ABOVE_EQUAL, deopt);
|
||||
}
|
||||
__ AssertSmiInRange(right);
|
||||
__ SmiUntag(right);
|
||||
__ shlq(left, right);
|
||||
if (shift_left->is_truncating()) {
|
||||
__ movsxd(left, left);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -2782,13 +2827,18 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
|
|||
__ xorq(left, left);
|
||||
__ jmp(&done, Assembler::kNearJump);
|
||||
__ Bind(&is_not_zero);
|
||||
__ AssertSmiInRange(right);
|
||||
__ SmiUntag(right);
|
||||
__ shlq(left, right);
|
||||
__ Bind(&done);
|
||||
} else {
|
||||
__ AssertSmiInRange(right);
|
||||
__ SmiUntag(right);
|
||||
__ shlq(left, right);
|
||||
}
|
||||
if (shift_left->is_truncating()) {
|
||||
__ movsxd(left, left);
|
||||
}
|
||||
} else {
|
||||
if (right_needs_check) {
|
||||
ASSERT(shift_left->CanDeoptimize());
|
||||
|
@ -2798,16 +2848,18 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
|
|||
}
|
||||
// Left is not a constant.
|
||||
Register temp = locs.temp(0).reg();
|
||||
// Check if count too large for handling it inlined.
|
||||
__ movq(temp, left);
|
||||
// Check if count is too large for handling it inlined.
|
||||
__ movl(temp, left);
|
||||
__ AssertSmiInRange(right);
|
||||
__ SmiUntag(right);
|
||||
// Overflow test (preserve temp and right);
|
||||
__ shlq(left, right);
|
||||
__ sarq(left, right);
|
||||
__ cmpq(left, temp);
|
||||
__ shll(temp, right);
|
||||
__ sarl(temp, right);
|
||||
__ cmpl(temp, left);
|
||||
__ j(NOT_EQUAL, deopt); // Overflow.
|
||||
// Shift for result now we know there is no overflow.
|
||||
__ shlq(left, right);
|
||||
ASSERT(!shift_left->is_truncating());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2907,19 +2959,23 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
switch (op_kind()) {
|
||||
case Token::kADD:
|
||||
__ movq(result, left);
|
||||
__ addq(result, right);
|
||||
__ addl(result, right);
|
||||
__ j(OVERFLOW, slow_path->entry_label());
|
||||
__ movsxd(result, result);
|
||||
break;
|
||||
case Token::kSUB:
|
||||
__ movq(result, left);
|
||||
__ subq(result, right);
|
||||
__ subl(result, right);
|
||||
__ j(OVERFLOW, slow_path->entry_label());
|
||||
__ movsxd(result, result);
|
||||
break;
|
||||
case Token::kMUL:
|
||||
__ movq(result, left);
|
||||
__ AssertSmiInRange(result);
|
||||
__ SmiUntag(result);
|
||||
__ imulq(result, right);
|
||||
__ imull(result, right);
|
||||
__ j(OVERFLOW, slow_path->entry_label());
|
||||
__ movsxd(result, result);
|
||||
break;
|
||||
case Token::kBIT_OR:
|
||||
ASSERT(left == result);
|
||||
|
@ -2940,13 +2996,15 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
__ j(ABOVE_EQUAL, slow_path->entry_label());
|
||||
|
||||
__ movq(RCX, right);
|
||||
__ AssertSmiInRange(RCX);
|
||||
__ SmiUntag(RCX);
|
||||
__ movq(result, left);
|
||||
__ shlq(result, RCX);
|
||||
__ shll(result, RCX);
|
||||
__ movq(TMP, result);
|
||||
__ sarq(TMP, RCX);
|
||||
__ cmpq(TMP, left);
|
||||
__ sarl(TMP, RCX);
|
||||
__ cmpl(TMP, left);
|
||||
__ j(NOT_EQUAL, slow_path->entry_label());
|
||||
__ movsxd(result, result);
|
||||
break;
|
||||
case Token::kSHR: {
|
||||
Label shift_count_ok;
|
||||
|
@ -2955,6 +3013,8 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
__ cmpq(right, Immediate(Smi::RawValue(Smi::kBits)));
|
||||
__ j(ABOVE_EQUAL, slow_path->entry_label());
|
||||
|
||||
__ AssertSmiInRange(left);
|
||||
__ AssertSmiInRange(right);
|
||||
__ movq(RCX, right);
|
||||
__ SmiUntag(RCX);
|
||||
__ movq(result, left);
|
||||
|
@ -3212,20 +3272,41 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
const int64_t imm = reinterpret_cast<int64_t>(constant.raw());
|
||||
switch (op_kind()) {
|
||||
case Token::kADD: {
|
||||
__ AddImmediate(left, Immediate(imm));
|
||||
if (deopt != NULL) __ j(OVERFLOW, deopt);
|
||||
if (deopt != NULL) {
|
||||
__ AddImmediate(left, Immediate(imm), Assembler::k32Bit);
|
||||
__ j(OVERFLOW, deopt);
|
||||
} else {
|
||||
__ AddImmediate(left, Immediate(imm), Assembler::k64Bit);
|
||||
}
|
||||
if (deopt != NULL || is_truncating()) {
|
||||
__ movsxd(left, left);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Token::kSUB: {
|
||||
__ SubImmediate(left, Immediate(imm));
|
||||
if (deopt != NULL) __ j(OVERFLOW, deopt);
|
||||
if (deopt != NULL) {
|
||||
__ SubImmediate(left, Immediate(imm), Assembler::k32Bit);
|
||||
__ j(OVERFLOW, deopt);
|
||||
} else {
|
||||
__ SubImmediate(left, Immediate(imm), Assembler::k64Bit);
|
||||
}
|
||||
if (deopt != NULL || is_truncating()) {
|
||||
__ movsxd(left, left);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Token::kMUL: {
|
||||
// Keep left value tagged and untag right value.
|
||||
const intptr_t value = Smi::Cast(constant).Value();
|
||||
__ MulImmediate(left, Immediate(value));
|
||||
if (deopt != NULL) __ j(OVERFLOW, deopt);
|
||||
if (deopt != NULL) {
|
||||
__ MulImmediate(left, Immediate(value), Assembler::k32Bit);
|
||||
__ j(OVERFLOW, deopt);
|
||||
} else {
|
||||
__ MulImmediate(left, Immediate(value), Assembler::k64Bit);
|
||||
}
|
||||
if (deopt != NULL || is_truncating()) {
|
||||
__ movsxd(left, left);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Token::kTRUNCDIV: {
|
||||
|
@ -3237,7 +3318,9 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
ASSERT(kSmiTagSize == 1);
|
||||
Register temp = locs()->temp(0).reg();
|
||||
__ movq(temp, left);
|
||||
__ sarq(temp, Immediate(63));
|
||||
// Since Smis are sign extended this is enough shift to put all-1s or
|
||||
// all-0s in the temp register.
|
||||
__ sarq(temp, Immediate(31));
|
||||
ASSERT(shift_count > 1); // 1, -1 case handled above.
|
||||
__ shrq(temp, Immediate(64 - shift_count));
|
||||
__ addq(left, temp);
|
||||
|
@ -3266,8 +3349,10 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
}
|
||||
|
||||
case Token::kSHR: {
|
||||
// sarq operation masks the count to 6 bits.
|
||||
const intptr_t kCountLimit = 0x3F;
|
||||
// The sarq operation masks the count to 6 bits, but any shift between
|
||||
// 31 and 63 gives the same result because 32 bit Smis are stored sign
|
||||
// extended in the registers.
|
||||
const intptr_t kCountLimit = 0x1F;
|
||||
const intptr_t value = Smi::Cast(constant).Value();
|
||||
__ sarq(left,
|
||||
Immediate(Utils::Minimum(value + kSmiTagSize, kCountLimit)));
|
||||
|
@ -3279,6 +3364,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
__ AssertSmiInRange(left);
|
||||
return;
|
||||
} // locs()->in(1).IsConstant().
|
||||
|
||||
|
@ -3286,19 +3372,40 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
const Address& right = locs()->in(1).ToStackSlotAddress();
|
||||
switch (op_kind()) {
|
||||
case Token::kADD: {
|
||||
__ addq(left, right);
|
||||
if (deopt != NULL) __ j(OVERFLOW, deopt);
|
||||
if (deopt != NULL) {
|
||||
__ addl(left, right);
|
||||
__ j(OVERFLOW, deopt);
|
||||
} else {
|
||||
__ addq(left, right);
|
||||
}
|
||||
if (deopt != NULL || is_truncating()) {
|
||||
__ movsxd(left, left);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Token::kSUB: {
|
||||
__ subq(left, right);
|
||||
if (deopt != NULL) __ j(OVERFLOW, deopt);
|
||||
if (deopt != NULL) {
|
||||
__ subl(left, right);
|
||||
__ j(OVERFLOW, deopt);
|
||||
} else {
|
||||
__ subq(left, right);
|
||||
}
|
||||
if (deopt != NULL || is_truncating()) {
|
||||
__ movsxd(left, left);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Token::kMUL: {
|
||||
__ SmiUntag(left);
|
||||
__ imulq(left, right);
|
||||
if (deopt != NULL) __ j(OVERFLOW, deopt);
|
||||
if (deopt != NULL) {
|
||||
__ imull(left, right);
|
||||
__ j(OVERFLOW, deopt);
|
||||
} else {
|
||||
__ imulq(left, right);
|
||||
}
|
||||
if (deopt != NULL || is_truncating()) {
|
||||
__ movsxd(left, left);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Token::kBIT_AND: {
|
||||
|
@ -3327,19 +3434,40 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
Register right = locs()->in(1).reg();
|
||||
switch (op_kind()) {
|
||||
case Token::kADD: {
|
||||
__ addq(left, right);
|
||||
if (deopt != NULL) __ j(OVERFLOW, deopt);
|
||||
if (deopt != NULL) {
|
||||
__ addl(left, right);
|
||||
__ j(OVERFLOW, deopt);
|
||||
} else {
|
||||
__ addq(left, right);
|
||||
}
|
||||
if (deopt != NULL || is_truncating()) {
|
||||
__ movsxd(left, left);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Token::kSUB: {
|
||||
__ subq(left, right);
|
||||
if (deopt != NULL) __ j(OVERFLOW, deopt);
|
||||
if (deopt != NULL) {
|
||||
__ subl(left, right);
|
||||
__ j(OVERFLOW, deopt);
|
||||
} else {
|
||||
__ subq(left, right);
|
||||
}
|
||||
if (deopt != NULL || is_truncating()) {
|
||||
__ movsxd(left, left);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Token::kMUL: {
|
||||
__ SmiUntag(left);
|
||||
__ imulq(left, right);
|
||||
if (deopt != NULL) __ j(OVERFLOW, deopt);
|
||||
if (deopt != NULL) {
|
||||
__ imull(left, right);
|
||||
__ j(OVERFLOW, deopt);
|
||||
} else {
|
||||
__ imulq(left, right);
|
||||
}
|
||||
if (deopt != NULL || is_truncating()) {
|
||||
__ movsxd(left, left);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Token::kBIT_AND: {
|
||||
|
@ -3358,8 +3486,6 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
break;
|
||||
}
|
||||
case Token::kTRUNCDIV: {
|
||||
Label not_32bit, done;
|
||||
|
||||
Register temp = locs()->temp(0).reg();
|
||||
ASSERT(left == RAX);
|
||||
ASSERT((right != RDX) && (right != RAX));
|
||||
|
@ -3370,43 +3496,20 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
__ testq(right, right);
|
||||
__ j(ZERO, deopt);
|
||||
}
|
||||
// Check if both operands fit into 32bits as idiv with 64bit operands
|
||||
// requires twice as many cycles and has much higher latency.
|
||||
// We are checking this before untagging them to avoid corner case
|
||||
// dividing INT_MAX by -1 that raises exception because quotient is
|
||||
// too large for 32bit register.
|
||||
__ movsxd(temp, left);
|
||||
__ cmpq(temp, left);
|
||||
__ j(NOT_EQUAL, ¬_32bit);
|
||||
__ movsxd(temp, right);
|
||||
__ cmpq(temp, right);
|
||||
__ j(NOT_EQUAL, ¬_32bit);
|
||||
|
||||
// Both operands are 31bit smis. Divide using 32bit idiv.
|
||||
__ SmiUntag(left);
|
||||
__ SmiUntag(right);
|
||||
__ cdq();
|
||||
__ idivl(right);
|
||||
__ movsxd(result, result);
|
||||
__ jmp(&done);
|
||||
|
||||
// Divide using 64bit idiv.
|
||||
__ Bind(¬_32bit);
|
||||
__ SmiUntag(left);
|
||||
__ SmiUntag(right);
|
||||
__ cqo(); // Sign extend RAX -> RDX:RAX.
|
||||
__ idivq(right); // RAX: quotient, RDX: remainder.
|
||||
// Check the corner case of dividing the 'MIN_SMI' with -1, in which
|
||||
// case we cannot tag the result.
|
||||
__ CompareImmediate(result, Immediate(0x4000000000000000));
|
||||
__ cmpl(result, Immediate(0x40000000));
|
||||
__ j(EQUAL, deopt);
|
||||
__ Bind(&done);
|
||||
__ movsxd(result, result);
|
||||
__ SmiTag(result);
|
||||
break;
|
||||
}
|
||||
case Token::kMOD: {
|
||||
Label not_32bit, div_done;
|
||||
|
||||
Register temp = locs()->temp(0).reg();
|
||||
ASSERT(left == RDX);
|
||||
ASSERT((right != RDX) && (right != RAX));
|
||||
|
@ -3417,17 +3520,6 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
__ testq(right, right);
|
||||
__ j(ZERO, deopt);
|
||||
}
|
||||
// Check if both operands fit into 32bits as idiv with 64bit operands
|
||||
// requires twice as many cycles and has much higher latency.
|
||||
// We are checking this before untagging them to avoid corner case
|
||||
// dividing INT_MAX by -1 that raises exception because quotient is
|
||||
// too large for 32bit register.
|
||||
__ movsxd(temp, left);
|
||||
__ cmpq(temp, left);
|
||||
__ j(NOT_EQUAL, ¬_32bit);
|
||||
__ movsxd(temp, right);
|
||||
__ cmpq(temp, right);
|
||||
__ j(NOT_EQUAL, ¬_32bit);
|
||||
// Both operands are 31bit smis. Divide using 32bit idiv.
|
||||
__ SmiUntag(left);
|
||||
__ SmiUntag(right);
|
||||
|
@ -3435,16 +3527,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
__ cdq();
|
||||
__ idivl(right);
|
||||
__ movsxd(result, result);
|
||||
__ jmp(&div_done);
|
||||
|
||||
// Divide using 64bit idiv.
|
||||
__ Bind(¬_32bit);
|
||||
__ SmiUntag(left);
|
||||
__ SmiUntag(right);
|
||||
__ movq(RAX, RDX);
|
||||
__ cqo(); // Sign extend RAX -> RDX:RAX.
|
||||
__ idivq(right); // RAX: quotient, RDX: remainder.
|
||||
__ Bind(&div_done);
|
||||
// res = left % right;
|
||||
// if (res < 0) {
|
||||
// if (right < 0) {
|
||||
|
@ -3482,7 +3565,10 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
__ j(LESS, deopt);
|
||||
}
|
||||
__ SmiUntag(right);
|
||||
// sarq operation masks the count to 6 bits.
|
||||
// The sarq operation masks the count to 6 bits, but any shift between 31
|
||||
// and 63 gives the same result because 32 bit Smis are stored sign
|
||||
// extended in the registers. We check for 63 in order to take the branch
|
||||
// more predictably.
|
||||
const intptr_t kCountLimit = 0x3F;
|
||||
if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
|
||||
__ CompareImmediate(right, Immediate(kCountLimit));
|
||||
|
@ -3695,41 +3781,52 @@ void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
GetDeoptId(), ICData::kDeoptUnboxInteger)
|
||||
: NULL;
|
||||
ASSERT(value == locs()->out(0).reg());
|
||||
Label done_and_no_need_to_check_range;
|
||||
|
||||
ASSERT(locs()->out(0).reg() == value);
|
||||
|
||||
if (value_cid == kSmiCid) {
|
||||
__ AssertSmiInRange(value);
|
||||
__ SmiUntag(value);
|
||||
return;
|
||||
} else if (value_cid == kMintCid) {
|
||||
__ movq(value, FieldAddress(value, Mint::value_offset()));
|
||||
} else if (!CanDeoptimize()) {
|
||||
// Type information is not conclusive, but range analysis found
|
||||
// the value to be in int64 range. Therefore it must be a smi
|
||||
// or mint value.
|
||||
ASSERT(is_truncating());
|
||||
Label done;
|
||||
__ SmiUntag(value);
|
||||
__ j(NOT_CARRY, &done, Assembler::kNearJump);
|
||||
__ movq(value, Address(value, TIMES_2, Mint::value_offset()));
|
||||
// Multiply by two in addressing mode because we erroneously
|
||||
// untagged a pointer by dividing it by two.
|
||||
Address value_field(value, TIMES_2, Mint::value_offset());
|
||||
if (is_truncating()) {
|
||||
__ movl(value, value_field);
|
||||
__ movsxd(value, value);
|
||||
} else {
|
||||
__ movq(value, value_field);
|
||||
}
|
||||
__ Bind(&done);
|
||||
return;
|
||||
} else {
|
||||
Label done;
|
||||
// Optimistically untag value.
|
||||
__ SmiUntagOrCheckClass(value, kMintCid, &done);
|
||||
__ SmiUntagOrCheckClass(value, kMintCid, &done_and_no_need_to_check_range);
|
||||
__ j(NOT_EQUAL, deopt);
|
||||
// Undo untagging by multiplying value with 2.
|
||||
// Multiply by two in addressing mode because we erroneously
|
||||
// untagged a pointer by dividing it by two.
|
||||
__ movq(value, Address(value, TIMES_2, Mint::value_offset()));
|
||||
__ Bind(&done);
|
||||
}
|
||||
|
||||
// TODO(vegorov): as it is implemented right now truncating unboxing would
|
||||
// leave "garbage" in the higher word.
|
||||
// We get here for the Mint cases, which might be out of range for an
|
||||
// unboxed int32 output.
|
||||
|
||||
// TODO(vegorov): Truncating unboxing leaves garbage in the higher word.
|
||||
// Is this the best semantics?
|
||||
if (!is_truncating() && (deopt != NULL)) {
|
||||
ASSERT(representation() == kUnboxedInt32);
|
||||
Register temp = locs()->temp(0).reg();
|
||||
const Register temp = locs()->temp(0).reg();
|
||||
__ movsxd(temp, value);
|
||||
__ cmpq(temp, value);
|
||||
__ j(NOT_EQUAL, deopt);
|
||||
}
|
||||
__ Bind(&done_and_no_need_to_check_range);
|
||||
}
|
||||
|
||||
LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
|
||||
|
@ -3737,27 +3834,61 @@ LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
|
|||
ASSERT((from_representation() == kUnboxedInt32) ||
|
||||
(from_representation() == kUnboxedUint32));
|
||||
const intptr_t kNumInputs = 1;
|
||||
const intptr_t kNumTemps = 0;
|
||||
const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
|
||||
LocationSummary* summary = new (zone)
|
||||
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
|
||||
summary->set_in(0, Location::RequiresRegister());
|
||||
summary->set_out(0, Location::RequiresRegister());
|
||||
LocationSummary(zone, kNumInputs, kNumTemps,
|
||||
ValueFitsSmi() ? LocationSummary::kNoCall
|
||||
: LocationSummary::kCallOnSlowPath);
|
||||
const bool needs_writable_input =
|
||||
ValueFitsSmi() || (from_representation() == kUnboxedUint32);
|
||||
summary->set_in(0, needs_writable_input ? Location::RequiresRegister()
|
||||
: Location::WritableRegister());
|
||||
if (!ValueFitsSmi()) {
|
||||
summary->set_temp(0, Location::RequiresRegister());
|
||||
}
|
||||
summary->set_out(0, ValueFitsSmi() ? Location::SameAsFirstInput()
|
||||
: Location::RequiresRegister());
|
||||
return summary;
|
||||
}
|
||||
|
||||
void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
||||
const Register value = locs()->in(0).reg();
|
||||
const Register out = locs()->out(0).reg();
|
||||
ASSERT(value != out);
|
||||
Label done;
|
||||
|
||||
ASSERT(kSmiTagSize == 1);
|
||||
if (from_representation() == kUnboxedInt32) {
|
||||
__ movsxd(out, value);
|
||||
__ MoveRegister(out, value);
|
||||
ASSERT(kSmiTagMask == 1 && kSmiTag == 0);
|
||||
__ addl(out, out);
|
||||
__ movsxd(out, out); // Does not affect flags.
|
||||
} else {
|
||||
ASSERT(from_representation() == kUnboxedUint32);
|
||||
// Unsigned.
|
||||
__ movl(out, value);
|
||||
__ SmiTag(out);
|
||||
}
|
||||
|
||||
if (!ValueFitsSmi()) {
|
||||
if (from_representation() == kUnboxedInt32) {
|
||||
__ j(NO_OVERFLOW, &done);
|
||||
} else {
|
||||
ASSERT(value != out);
|
||||
__ TestImmediate(value, Immediate(0xc0000000ll));
|
||||
__ j(ZERO, &done);
|
||||
}
|
||||
// Allocate a mint.
|
||||
// Value input is a writable register and we have to inform the compiler of
|
||||
// the type so it can be preserved untagged on the slow path
|
||||
locs()->live_registers()->Add(locs()->in(0), kUnboxedInt32);
|
||||
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
|
||||
locs()->temp(0).reg());
|
||||
if (from_representation() == kUnboxedInt32) {
|
||||
__ movsxd(value, value);
|
||||
} else {
|
||||
__ movl(value, value);
|
||||
}
|
||||
__ movq(FieldAddress(out, Mint::value_offset()), value);
|
||||
__ Bind(&done);
|
||||
}
|
||||
__ SmiTag(out);
|
||||
}
|
||||
|
||||
LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
|
||||
|
@ -3779,15 +3910,20 @@ LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
|
|||
void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
||||
const Register out = locs()->out(0).reg();
|
||||
const Register value = locs()->in(0).reg();
|
||||
__ MoveRegister(out, value);
|
||||
__ SmiTag(out);
|
||||
__ leaq(out, Address(value, value, TIMES_1, 0));
|
||||
if (!ValueFitsSmi()) {
|
||||
const Register temp = locs()->temp(0).reg();
|
||||
Label done;
|
||||
__ j(NO_OVERFLOW, &done);
|
||||
__ movq(temp, value);
|
||||
__ sarq(temp, Immediate(30));
|
||||
__ addq(temp, Immediate(1));
|
||||
__ cmpq(temp, Immediate(2));
|
||||
__ j(BELOW, &done);
|
||||
|
||||
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
|
||||
temp);
|
||||
__ movq(FieldAddress(out, Mint::value_offset()), value);
|
||||
|
||||
__ Bind(&done);
|
||||
}
|
||||
}
|
||||
|
@ -4349,8 +4485,9 @@ void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
switch (op_kind()) {
|
||||
case Token::kNEGATE: {
|
||||
Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
|
||||
__ cmpq(value, Immediate(-0x80000000ll));
|
||||
__ j(EQUAL, deopt);
|
||||
__ negq(value);
|
||||
__ j(OVERFLOW, deopt);
|
||||
break;
|
||||
}
|
||||
case Token::kBIT_NOT:
|
||||
|
@ -4499,6 +4636,7 @@ LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone,
|
|||
void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
||||
Register value = locs()->in(0).reg();
|
||||
FpuRegister result = locs()->out(0).fpu_reg();
|
||||
__ AssertSmiInRange(value);
|
||||
__ SmiUntag(value);
|
||||
__ cvtsi2sdq(result, value);
|
||||
}
|
||||
|
@ -4528,14 +4666,15 @@ void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
ASSERT(result != value_obj);
|
||||
ASSERT(result != temp);
|
||||
__ movsd(value_double, FieldAddress(value_obj, Double::value_offset()));
|
||||
__ cvttsd2siq(result, value_double);
|
||||
__ cvttsd2sil(result, value_double);
|
||||
// Overflow is signalled with minint.
|
||||
Label do_call, done;
|
||||
// Check for overflow and that it fits into Smi.
|
||||
__ movq(temp, result);
|
||||
__ shlq(temp, Immediate(1));
|
||||
__ movl(temp, result);
|
||||
__ shll(temp, Immediate(1));
|
||||
__ j(OVERFLOW, &do_call, Assembler::kNearJump);
|
||||
__ SmiTag(result);
|
||||
ASSERT(kSmiTagShift == 1 && kSmiTag == 0);
|
||||
__ movsxd(result, temp);
|
||||
__ jmp(&done);
|
||||
__ Bind(&do_call);
|
||||
__ pushq(value_obj);
|
||||
|
@ -4571,14 +4710,15 @@ void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
XmmRegister value = locs()->in(0).fpu_reg();
|
||||
Register temp = locs()->temp(0).reg();
|
||||
|
||||
__ cvttsd2siq(result, value);
|
||||
__ cvttsd2sil(result, value);
|
||||
// Overflow is signalled with minint.
|
||||
Label do_call, done;
|
||||
// Check for overflow and that it fits into Smi.
|
||||
__ movq(temp, result);
|
||||
__ shlq(temp, Immediate(1));
|
||||
__ movl(temp, result);
|
||||
__ shll(temp, Immediate(1));
|
||||
__ j(OVERFLOW, deopt);
|
||||
__ SmiTag(result);
|
||||
ASSERT(kSmiTagShift == 1 && kSmiTag == 0);
|
||||
__ movsxd(result, temp);
|
||||
}
|
||||
|
||||
LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
|
||||
|
@ -4877,6 +5017,7 @@ LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
|
|||
// Both inputs must be writable because they will be untagged.
|
||||
summary->set_in(0, Location::RegisterLocation(RAX));
|
||||
summary->set_in(1, Location::WritableRegister());
|
||||
// Output is a pair of registers.
|
||||
summary->set_out(0, Location::Pair(Location::RegisterLocation(RAX),
|
||||
Location::RegisterLocation(RDX)));
|
||||
return summary;
|
||||
|
@ -4891,50 +5032,26 @@ void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
PairLocation* pair = locs()->out(0).AsPairLocation();
|
||||
Register result1 = pair->At(0).reg();
|
||||
Register result2 = pair->At(1).reg();
|
||||
Label not_32bit, done;
|
||||
Register temp = RDX;
|
||||
ASSERT(left == RAX);
|
||||
ASSERT((right != RDX) && (right != RAX));
|
||||
ASSERT(result1 == RAX);
|
||||
ASSERT(result2 == RDX);
|
||||
if (RangeUtils::CanBeZero(divisor_range())) {
|
||||
// Handle divide by zero in runtime.
|
||||
__ testq(right, right);
|
||||
__ j(ZERO, deopt);
|
||||
}
|
||||
// Check if both operands fit into 32bits as idiv with 64bit operands
|
||||
// requires twice as many cycles and has much higher latency.
|
||||
// We are checking this before untagging them to avoid corner case
|
||||
// dividing INT_MAX by -1 that raises exception because quotient is
|
||||
// too large for 32bit register.
|
||||
__ movsxd(temp, left);
|
||||
__ cmpq(temp, left);
|
||||
__ j(NOT_EQUAL, ¬_32bit);
|
||||
__ movsxd(temp, right);
|
||||
__ cmpq(temp, right);
|
||||
__ j(NOT_EQUAL, ¬_32bit);
|
||||
|
||||
ASSERT(left == RAX);
|
||||
ASSERT((right != RDX) && (right != RAX));
|
||||
ASSERT(result1 == RAX);
|
||||
ASSERT(result2 == RDX);
|
||||
// Both operands are 31bit smis. Divide using 32bit idiv.
|
||||
__ SmiUntag(left);
|
||||
__ SmiUntag(right);
|
||||
__ cdq();
|
||||
__ idivl(right);
|
||||
__ movsxd(RAX, RAX);
|
||||
__ movsxd(RDX, RDX);
|
||||
__ jmp(&done);
|
||||
|
||||
// Divide using 64bit idiv.
|
||||
__ Bind(¬_32bit);
|
||||
__ SmiUntag(left);
|
||||
__ SmiUntag(right);
|
||||
__ cqo(); // Sign extend RAX -> RDX:RAX.
|
||||
__ idivq(right); // RAX: quotient, RDX: remainder.
|
||||
// Check the corner case of dividing the 'MIN_SMI' with -1, in which
|
||||
// case we cannot tag the result.
|
||||
__ CompareImmediate(RAX, Immediate(0x4000000000000000));
|
||||
__ cmpl(RAX, Immediate(0x40000000));
|
||||
__ j(EQUAL, deopt);
|
||||
__ Bind(&done);
|
||||
|
||||
__ movsxd(RAX, RAX);
|
||||
__ movsxd(RDX, RDX);
|
||||
// Modulo correction (RDX).
|
||||
// res = left % right;
|
||||
// if (res < 0) {
|
||||
|
@ -4944,16 +5061,16 @@ void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
// res = res + right;
|
||||
// }
|
||||
// }
|
||||
Label all_done;
|
||||
Label done;
|
||||
__ cmpq(RDX, Immediate(0));
|
||||
__ j(GREATER_EQUAL, &all_done, Assembler::kNearJump);
|
||||
__ j(GREATER_EQUAL, &done, Assembler::kNearJump);
|
||||
// Result is negative, adjust it.
|
||||
if ((divisor_range() == NULL) || divisor_range()->Overlaps(-1, 1)) {
|
||||
Label subtract;
|
||||
__ cmpq(right, Immediate(0));
|
||||
__ j(LESS, &subtract, Assembler::kNearJump);
|
||||
__ addq(RDX, right);
|
||||
__ jmp(&all_done, Assembler::kNearJump);
|
||||
__ jmp(&done, Assembler::kNearJump);
|
||||
__ Bind(&subtract);
|
||||
__ subq(RDX, right);
|
||||
} else if (divisor_range()->IsPositive()) {
|
||||
|
@ -4963,7 +5080,7 @@ void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
// Right is negative.
|
||||
__ subq(RDX, right);
|
||||
}
|
||||
__ Bind(&all_done);
|
||||
__ Bind(&done);
|
||||
|
||||
__ SmiTag(RAX);
|
||||
__ SmiTag(RDX);
|
||||
|
@ -5305,6 +5422,7 @@ void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
// Code for a variable shift amount.
|
||||
// Deoptimize if shift count is > 63 or negative.
|
||||
// Sarq and shlq instructions mask the count to 6 bits.
|
||||
__ AssertSmiInRange(RCX);
|
||||
__ SmiUntag(RCX);
|
||||
if (!IsShiftCountInRange()) {
|
||||
__ cmpq(RCX, Immediate(kMintShiftCountLimit));
|
||||
|
@ -5337,15 +5455,15 @@ void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
}
|
||||
|
||||
CompileType BinaryUint32OpInstr::ComputeType() const {
|
||||
return CompileType::FromCid(kSmiCid);
|
||||
return CompileType::Int();
|
||||
}
|
||||
|
||||
CompileType ShiftUint32OpInstr::ComputeType() const {
|
||||
return CompileType::FromCid(kSmiCid);
|
||||
return CompileType::Int();
|
||||
}
|
||||
|
||||
CompileType UnaryUint32OpInstr::ComputeType() const {
|
||||
return CompileType::FromCid(kSmiCid);
|
||||
return CompileType::Int();
|
||||
}
|
||||
|
||||
LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone,
|
||||
|
@ -5460,6 +5578,7 @@ void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
Label zero;
|
||||
|
||||
// TODO(johnmccutchan): Use range information to avoid these checks.
|
||||
__ AssertSmiInRange(shifter);
|
||||
__ SmiUntag(shifter);
|
||||
__ cmpq(shifter, Immediate(0));
|
||||
// If shift value is < 0, deoptimize.
|
||||
|
@ -5484,7 +5603,7 @@ void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
|
||||
__ Bind(&zero);
|
||||
// Shift was greater than 31 bits, just return zero.
|
||||
__ xorq(left, left);
|
||||
__ xorl(left, left);
|
||||
|
||||
// Exit path.
|
||||
__ Bind(&done);
|
||||
|
@ -5525,8 +5644,8 @@ void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
const Register out = locs()->out(0).reg();
|
||||
// Representations are bitwise equivalent but we want to normalize
|
||||
// upperbits for safety reasons.
|
||||
// TODO(vegorov) if we ensure that we never use upperbits we could
|
||||
// avoid this.
|
||||
// TODO(vegorov) if we ensure that we never leave garbage in the upper bits
|
||||
// we could avoid this.
|
||||
__ movl(out, value);
|
||||
} else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
|
||||
// Representations are bitwise equivalent.
|
||||
|
|
|
@ -66,17 +66,10 @@ TEST_CASE(RangeTests) {
|
|||
RangeBoundary::PositiveInfinity());
|
||||
TEST_RANGE_OP(Range::Shl, -1, 1, 63, 63, RangeBoundary(kMinInt64),
|
||||
RangeBoundary::PositiveInfinity());
|
||||
if (kBitsPerWord == 64) {
|
||||
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 62, 62, RangeBoundary(kSmiMin),
|
||||
RangeBoundary(kSmiMax));
|
||||
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 30, 30, RangeBoundary(-(1 << 30)),
|
||||
RangeBoundary(1 << 30));
|
||||
} else {
|
||||
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 30, 30, RangeBoundary(kSmiMin),
|
||||
RangeBoundary(kSmiMax));
|
||||
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 62, 62, RangeBoundary(kSmiMin),
|
||||
RangeBoundary(kSmiMax));
|
||||
}
|
||||
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 30, 30, RangeBoundary(kSmiMin),
|
||||
RangeBoundary(kSmiMax));
|
||||
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 62, 62, RangeBoundary(kSmiMin),
|
||||
RangeBoundary(kSmiMax));
|
||||
TEST_RANGE_OP(Range::Shl, 0, 100, 0, 64, RangeBoundary(0),
|
||||
RangeBoundary::PositiveInfinity());
|
||||
TEST_RANGE_OP(Range::Shl, -100, 0, 0, 64, RangeBoundary::NegativeInfinity(),
|
||||
|
|
|
@ -1558,10 +1558,10 @@ void CallSpecializer::VisitStaticCall(StaticCallInstr* call) {
|
|||
}
|
||||
|
||||
void CallSpecializer::VisitLoadCodeUnits(LoadCodeUnitsInstr* instr) {
|
||||
// TODO(zerny): Use kUnboxedUint32 once it is fully supported/optimized.
|
||||
#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM)
|
||||
// Note that on ARM64 the result can always be packed into a Smi, so this
|
||||
// is never triggered.
|
||||
// TODO(zerny): Use kUnboxedUint32 once it is fully supported/optimized.
|
||||
if (!instr->can_pack_into_smi()) instr->set_representation(kUnboxedInt64);
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool CidTestResultsContains(const ZoneGrowableArray<intptr_t>& results,
|
||||
|
|
|
@ -265,8 +265,9 @@ static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
|
|||
void Intrinsifier::Integer_addFromInteger(Assembler* assembler) {
|
||||
Label fall_through;
|
||||
TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis.
|
||||
__ adds(R0, R0, Operand(R1)); // Adds.
|
||||
__ addsw(R0, R0, Operand(R1)); // Adds.
|
||||
__ b(&fall_through, VS); // Fall-through on overflow.
|
||||
__ sxtw(R0, R0); // Sign extend - flags not affected.
|
||||
__ ret();
|
||||
__ Bind(&fall_through);
|
||||
}
|
||||
|
@ -278,8 +279,9 @@ void Intrinsifier::Integer_add(Assembler* assembler) {
|
|||
void Intrinsifier::Integer_subFromInteger(Assembler* assembler) {
|
||||
Label fall_through;
|
||||
TestBothArgumentsSmis(assembler, &fall_through);
|
||||
__ subs(R0, R0, Operand(R1)); // Subtract.
|
||||
__ b(&fall_through, VS); // Fall-through on overflow.
|
||||
__ subsw(R0, R0, Operand(R1)); // Subtract.
|
||||
__ b(&fall_through, VS); // Fall-through on overflow.
|
||||
__ sxtw(R0, R0); // Sign extend - flags not affected.
|
||||
__ ret();
|
||||
__ Bind(&fall_through);
|
||||
}
|
||||
|
@ -287,8 +289,9 @@ void Intrinsifier::Integer_subFromInteger(Assembler* assembler) {
|
|||
void Intrinsifier::Integer_sub(Assembler* assembler) {
|
||||
Label fall_through;
|
||||
TestBothArgumentsSmis(assembler, &fall_through);
|
||||
__ subs(R0, R1, Operand(R0)); // Subtract.
|
||||
__ b(&fall_through, VS); // Fall-through on overflow.
|
||||
__ subsw(R0, R1, Operand(R0)); // Subtract.
|
||||
__ b(&fall_through, VS); // Fall-through on overflow.
|
||||
__ sxtw(R0, R0); // Sign extend - flags not affected.
|
||||
__ ret();
|
||||
__ Bind(&fall_through);
|
||||
}
|
||||
|
@ -299,9 +302,9 @@ void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) {
|
|||
TestBothArgumentsSmis(assembler, &fall_through); // checks two smis
|
||||
__ SmiUntag(R0); // Untags R6. We only want result shifted by one.
|
||||
|
||||
__ mul(TMP, R0, R1);
|
||||
__ smulh(TMP2, R0, R1);
|
||||
// TMP: result bits 64..127.
|
||||
__ smull(TMP, R0, R1);
|
||||
__ AsrImmediate(TMP2, TMP, 31);
|
||||
// TMP: result bits 31..63.
|
||||
__ cmp(TMP2, Operand(TMP, ASR, 63));
|
||||
__ b(&fall_through, NE);
|
||||
__ mov(R0, TMP);
|
||||
|
@ -417,7 +420,7 @@ void Intrinsifier::Integer_truncDivide(Assembler* assembler) {
|
|||
|
||||
// Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
|
||||
// cannot tag the result.
|
||||
__ CompareImmediate(R0, 0x4000000000000000);
|
||||
__ CompareImmediate(R0, 0x40000000);
|
||||
__ b(&fall_through, EQ);
|
||||
__ SmiTag(R0); // Not equal. Okay to tag and return.
|
||||
__ ret(); // Return.
|
||||
|
@ -428,8 +431,9 @@ void Intrinsifier::Integer_negate(Assembler* assembler) {
|
|||
Label fall_through;
|
||||
__ ldr(R0, Address(SP, +0 * kWordSize)); // Grab first argument.
|
||||
__ BranchIfNotSmi(R0, &fall_through);
|
||||
__ negs(R0, R0);
|
||||
__ negsw(R0, R0);
|
||||
__ b(&fall_through, VS);
|
||||
__ sxtw(R0, R0); // Sign extend - flags not affected.
|
||||
__ ret();
|
||||
__ Bind(&fall_through);
|
||||
}
|
||||
|
@ -488,9 +492,9 @@ void Intrinsifier::Integer_shl(Assembler* assembler) {
|
|||
// Check if count too large for handling it inlined.
|
||||
__ SmiUntag(TMP, right); // SmiUntag right into TMP.
|
||||
// Overflow test (preserve left, right, and TMP);
|
||||
__ lslv(temp, left, TMP);
|
||||
__ asrv(TMP2, temp, TMP);
|
||||
__ CompareRegisters(left, TMP2);
|
||||
__ lslvw(temp, left, TMP);
|
||||
__ asrvw(TMP2, temp, TMP);
|
||||
__ cmpw(left, Operand(TMP2));
|
||||
__ b(&fall_through, NE); // Overflow.
|
||||
// Shift for result now we know there is no overflow.
|
||||
__ lslv(result, left, TMP);
|
||||
|
@ -563,6 +567,7 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
|
|||
|
||||
__ CompareClassId(R0, kDoubleCid);
|
||||
__ b(&fall_through, EQ);
|
||||
__ AssertSmiInRange(R1);
|
||||
__ LoadObject(R0, Bool::False()); // Smi == Mint -> false.
|
||||
__ ret();
|
||||
|
||||
|
@ -573,6 +578,7 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
|
|||
__ b(&fall_through, NE);
|
||||
// Receiver is Mint, return false if right is Smi.
|
||||
__ BranchIfNotSmi(R0, &fall_through);
|
||||
__ AssertSmiInRange(R0);
|
||||
__ LoadObject(R0, Bool::False());
|
||||
__ ret();
|
||||
// TODO(srdjan): Implement Mint == Mint comparison.
|
||||
|
@ -1495,11 +1501,12 @@ void Intrinsifier::DoubleToInteger(Assembler* assembler) {
|
|||
__ fcmpd(V0, V0);
|
||||
__ b(&fall_through, VS);
|
||||
|
||||
__ fcvtzds(R0, V0);
|
||||
__ fcvtzdsx(R0, V0);
|
||||
// Overflow is signaled with minint.
|
||||
// Check for overflow and that it fits into Smi.
|
||||
__ CompareImmediate(R0, 0xC000000000000000);
|
||||
__ b(&fall_through, MI);
|
||||
__ AsrImmediate(TMP, R0, 30);
|
||||
__ cmp(TMP, Operand(R0, ASR, 63));
|
||||
__ b(&fall_through, NE);
|
||||
__ SmiTag(R0);
|
||||
__ ret();
|
||||
__ Bind(&fall_through);
|
||||
|
@ -1516,10 +1523,10 @@ void Intrinsifier::Double_hashCode(Assembler* assembler) {
|
|||
__ fcmpd(V0, V0);
|
||||
__ b(&double_hash, VS);
|
||||
|
||||
// Convert double value to signed 64-bit int in R0 and back to a
|
||||
// Convert double value to signed 32-bit int in R0 and back to a
|
||||
// double value in V1.
|
||||
__ fcvtzds(R0, V0);
|
||||
__ scvtfdx(V1, R0);
|
||||
__ fcvtzdsw(R0, V0);
|
||||
__ scvtfdw(V1, R0);
|
||||
|
||||
// Tag the int as a Smi, making sure that it fits; this checks for
|
||||
// overflow in the conversion from double to int. Conversion
|
||||
|
@ -1527,8 +1534,9 @@ void Intrinsifier::Double_hashCode(Assembler* assembler) {
|
|||
// INT64_MAX or INT64_MIN (saturation).
|
||||
Label fall_through;
|
||||
ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
|
||||
__ adds(R0, R0, Operand(R0));
|
||||
__ addsw(R0, R0, Operand(R0));
|
||||
__ b(&fall_through, VS);
|
||||
__ sxtw(R0, R0); // Sign extend - flags not affected.
|
||||
|
||||
// Compare the two double values. If they are equal, we return the
|
||||
// Smi tagged result immediately as the hash code.
|
||||
|
|
|
@ -269,8 +269,10 @@ void Intrinsifier::Integer_addFromInteger(Assembler* assembler) {
|
|||
Label fall_through;
|
||||
TestBothArgumentsSmis(assembler, &fall_through);
|
||||
// RAX contains right argument.
|
||||
__ addq(RAX, Address(RSP, +2 * kWordSize));
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ addl(RAX, Address(RSP, +2 * kWordSize));
|
||||
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
|
||||
__ movsxd(RAX, RAX);
|
||||
// Result is in RAX.
|
||||
__ ret();
|
||||
__ Bind(&fall_through);
|
||||
|
@ -284,8 +286,10 @@ void Intrinsifier::Integer_subFromInteger(Assembler* assembler) {
|
|||
Label fall_through;
|
||||
TestBothArgumentsSmis(assembler, &fall_through);
|
||||
// RAX contains right argument, which is the actual minuend of subtraction.
|
||||
__ subq(RAX, Address(RSP, +2 * kWordSize));
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ subl(RAX, Address(RSP, +2 * kWordSize));
|
||||
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
|
||||
__ movsxd(RAX, RAX);
|
||||
// Result is in RAX.
|
||||
__ ret();
|
||||
__ Bind(&fall_through);
|
||||
|
@ -295,10 +299,13 @@ void Intrinsifier::Integer_sub(Assembler* assembler) {
|
|||
Label fall_through;
|
||||
TestBothArgumentsSmis(assembler, &fall_through);
|
||||
// RAX contains right argument, which is the actual subtrahend of subtraction.
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ movq(RCX, RAX);
|
||||
__ movq(RAX, Address(RSP, +2 * kWordSize));
|
||||
__ subq(RAX, RCX);
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ subl(RAX, RCX);
|
||||
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
|
||||
__ movsxd(RAX, RAX);
|
||||
// Result is in RAX.
|
||||
__ ret();
|
||||
__ Bind(&fall_through);
|
||||
|
@ -308,10 +315,12 @@ void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) {
|
|||
Label fall_through;
|
||||
TestBothArgumentsSmis(assembler, &fall_through);
|
||||
// RAX is the right argument.
|
||||
__ AssertSmiInRange(RAX);
|
||||
ASSERT(kSmiTag == 0); // Adjust code below if not the case.
|
||||
__ SmiUntag(RAX);
|
||||
__ imulq(RAX, Address(RSP, +2 * kWordSize));
|
||||
__ imull(RAX, Address(RSP, +2 * kWordSize));
|
||||
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
|
||||
__ movsxd(RAX, RAX);
|
||||
// Result is in RAX.
|
||||
__ ret();
|
||||
__ Bind(&fall_through);
|
||||
|
@ -333,7 +342,9 @@ void Intrinsifier::Integer_mul(Assembler* assembler) {
|
|||
// RAX: Untagged fallthrough result (remainder to be adjusted), or
|
||||
// RAX: Tagged return result (remainder).
|
||||
static void EmitRemainderOperation(Assembler* assembler) {
|
||||
Label return_zero, try_modulo, not_32bit, done;
|
||||
Label return_zero, try_modulo, not_32bit;
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ AssertSmiInRange(RCX);
|
||||
// Check for quick zero results.
|
||||
__ cmpq(RAX, Immediate(0));
|
||||
__ j(EQUAL, &return_zero, Assembler::kNearJump);
|
||||
|
@ -355,33 +366,12 @@ static void EmitRemainderOperation(Assembler* assembler) {
|
|||
|
||||
__ Bind(&try_modulo);
|
||||
|
||||
// Check if both operands fit into 32bits as idiv with 64bit operands
|
||||
// requires twice as many cycles and has much higher latency. We are checking
|
||||
// this before untagging them to avoid corner case dividing INT_MAX by -1 that
|
||||
// raises exception because quotient is too large for 32bit register.
|
||||
__ movsxd(RBX, RAX);
|
||||
__ cmpq(RBX, RAX);
|
||||
__ j(NOT_EQUAL, ¬_32bit, Assembler::kNearJump);
|
||||
__ movsxd(RBX, RCX);
|
||||
__ cmpq(RBX, RCX);
|
||||
__ j(NOT_EQUAL, ¬_32bit, Assembler::kNearJump);
|
||||
|
||||
// Both operands are 31bit smis. Divide using 32bit idiv.
|
||||
__ SmiUntag(RAX);
|
||||
__ SmiUntag(RCX);
|
||||
__ cdq();
|
||||
__ idivl(RCX);
|
||||
__ movsxd(RAX, RDX);
|
||||
__ jmp(&done, Assembler::kNearJump);
|
||||
|
||||
// Divide using 64bit idiv.
|
||||
__ Bind(¬_32bit);
|
||||
__ SmiUntag(RAX);
|
||||
__ SmiUntag(RCX);
|
||||
__ cqo();
|
||||
__ idivq(RCX);
|
||||
__ movq(RAX, RDX);
|
||||
__ Bind(&done);
|
||||
}
|
||||
|
||||
// Implementation:
|
||||
|
@ -396,7 +386,9 @@ static void EmitRemainderOperation(Assembler* assembler) {
|
|||
void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) {
|
||||
Label fall_through, negative_result;
|
||||
TestBothArgumentsSmis(assembler, &fall_through);
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ movq(RCX, Address(RSP, +2 * kWordSize));
|
||||
__ AssertSmiInRange(RCX);
|
||||
// RAX: Tagged left (dividend).
|
||||
// RCX: Tagged right (divisor).
|
||||
__ cmpq(RCX, Immediate(0));
|
||||
|
@ -430,21 +422,17 @@ void Intrinsifier::Integer_truncDivide(Assembler* assembler) {
|
|||
Label fall_through, not_32bit;
|
||||
TestBothArgumentsSmis(assembler, &fall_through);
|
||||
// RAX: right argument (divisor)
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ cmpq(RAX, Immediate(0));
|
||||
__ j(EQUAL, &fall_through, Assembler::kNearJump);
|
||||
__ movq(RCX, RAX);
|
||||
__ movq(RAX, Address(RSP, +2 * kWordSize)); // Left argument (dividend).
|
||||
__ AssertSmiInRange(RAX);
|
||||
|
||||
// Check if both operands fit into 32bits as idiv with 64bit operands
|
||||
// requires twice as many cycles and has much higher latency. We are checking
|
||||
// this before untagging them to avoid corner case dividing INT_MAX by -1 that
|
||||
// raises exception because quotient is too large for 32bit register.
|
||||
__ movsxd(RBX, RAX);
|
||||
__ cmpq(RBX, RAX);
|
||||
__ j(NOT_EQUAL, ¬_32bit);
|
||||
__ movsxd(RBX, RCX);
|
||||
__ cmpq(RBX, RCX);
|
||||
__ j(NOT_EQUAL, ¬_32bit);
|
||||
// Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
|
||||
// cannot tag the result.
|
||||
__ cmpq(RAX, Immediate(-0x80000000ll));
|
||||
__ j(EQUAL, &fall_through);
|
||||
|
||||
// Both operands are 31bit smis. Divide using 32bit idiv.
|
||||
__ SmiUntag(RAX);
|
||||
|
@ -454,21 +442,6 @@ void Intrinsifier::Integer_truncDivide(Assembler* assembler) {
|
|||
__ movsxd(RAX, RAX);
|
||||
__ SmiTag(RAX); // Result is guaranteed to fit into a smi.
|
||||
__ ret();
|
||||
|
||||
// Divide using 64bit idiv.
|
||||
__ Bind(¬_32bit);
|
||||
__ SmiUntag(RAX);
|
||||
__ SmiUntag(RCX);
|
||||
__ pushq(RDX); // Preserve RDX in case of 'fall_through'.
|
||||
__ cqo();
|
||||
__ idivq(RCX);
|
||||
__ popq(RDX);
|
||||
// Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
|
||||
// cannot tag the result.
|
||||
__ cmpq(RAX, Immediate(0x4000000000000000));
|
||||
__ j(EQUAL, &fall_through);
|
||||
__ SmiTag(RAX);
|
||||
__ ret();
|
||||
__ Bind(&fall_through);
|
||||
}
|
||||
|
||||
|
@ -477,8 +450,10 @@ void Intrinsifier::Integer_negate(Assembler* assembler) {
|
|||
__ movq(RAX, Address(RSP, +1 * kWordSize));
|
||||
__ testq(RAX, Immediate(kSmiTagMask));
|
||||
__ j(NOT_ZERO, &fall_through, Assembler::kNearJump); // Non-smi value.
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ cmpq(RAX, Immediate(-0x80000000ll));
|
||||
__ j(EQUAL, &fall_through, Assembler::kNearJump);
|
||||
__ negq(RAX);
|
||||
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
|
||||
// Result is in RAX.
|
||||
__ ret();
|
||||
__ Bind(&fall_through);
|
||||
|
@ -487,6 +462,7 @@ void Intrinsifier::Integer_negate(Assembler* assembler) {
|
|||
void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) {
|
||||
Label fall_through;
|
||||
TestBothArgumentsSmis(assembler, &fall_through);
|
||||
__ AssertSmiInRange(RAX);
|
||||
// RAX is the right argument.
|
||||
__ andq(RAX, Address(RSP, +2 * kWordSize));
|
||||
// Result is in RAX.
|
||||
|
@ -502,6 +478,7 @@ void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) {
|
|||
Label fall_through;
|
||||
TestBothArgumentsSmis(assembler, &fall_through);
|
||||
// RAX is the right argument.
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ orq(RAX, Address(RSP, +2 * kWordSize));
|
||||
// Result is in RAX.
|
||||
__ ret();
|
||||
|
@ -517,6 +494,7 @@ void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) {
|
|||
TestBothArgumentsSmis(assembler, &fall_through);
|
||||
// RAX is the right argument.
|
||||
__ xorq(RAX, Address(RSP, +2 * kWordSize));
|
||||
__ AssertSmiInRange(RAX);
|
||||
// Result is in RAX.
|
||||
__ ret();
|
||||
__ Bind(&fall_through);
|
||||
|
@ -532,28 +510,32 @@ void Intrinsifier::Integer_shl(Assembler* assembler) {
|
|||
Label fall_through, overflow;
|
||||
TestBothArgumentsSmis(assembler, &fall_through);
|
||||
// Shift value is in RAX. Compare with tagged Smi.
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ cmpq(RAX, Immediate(Smi::RawValue(Smi::kBits)));
|
||||
__ j(ABOVE_EQUAL, &fall_through, Assembler::kNearJump);
|
||||
|
||||
__ SmiUntag(RAX);
|
||||
__ movq(RCX, RAX); // Shift amount must be in RCX.
|
||||
__ movq(RAX, Address(RSP, +2 * kWordSize)); // Value.
|
||||
__ AssertSmiInRange(RAX);
|
||||
|
||||
// Overflow test - all the shifted-out bits must be same as the sign bit.
|
||||
__ movq(RDI, RAX);
|
||||
__ shlq(RAX, RCX);
|
||||
__ sarq(RAX, RCX);
|
||||
__ shll(RAX, RCX);
|
||||
__ sarl(RAX, RCX);
|
||||
__ movsxd(RAX, RAX);
|
||||
__ cmpq(RAX, RDI);
|
||||
__ j(NOT_EQUAL, &overflow, Assembler::kNearJump);
|
||||
|
||||
__ shlq(RAX, RCX); // Shift for result now we know there is no overflow.
|
||||
__ shlq(RDI, RCX); // Shift for result now we know there is no overflow.
|
||||
__ movq(RAX, RDI);
|
||||
|
||||
// RAX is a correctly tagged Smi.
|
||||
__ ret();
|
||||
|
||||
__ Bind(&overflow);
|
||||
// Mint is rarely used on x64 (only for integers requiring 64 bit instead of
|
||||
// 63 bits as represented by Smi).
|
||||
// Mint is used on x64 for integers requiring 64 bit instead of 31 bits as
|
||||
// represented by Smi.
|
||||
__ Bind(&fall_through);
|
||||
}
|
||||
|
||||
|
@ -561,6 +543,7 @@ static void CompareIntegers(Assembler* assembler, Condition true_condition) {
|
|||
Label fall_through, true_label;
|
||||
TestBothArgumentsSmis(assembler, &fall_through);
|
||||
// RAX contains the right argument.
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ cmpq(Address(RSP, +2 * kWordSize), RAX);
|
||||
__ j(true_condition, &true_label, Assembler::kNearJump);
|
||||
__ LoadObject(RAX, Bool::False());
|
||||
|
@ -606,6 +589,9 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
|
|||
__ orq(RAX, RCX);
|
||||
__ testq(RAX, Immediate(kSmiTagMask));
|
||||
__ j(NOT_ZERO, &check_for_mint, Assembler::kNearJump);
|
||||
// Or-ing them together should still leave them both as compressible smis.
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ AssertSmiInRange(RCX);
|
||||
// Both arguments are smi, '===' is good enough.
|
||||
__ LoadObject(RAX, Bool::False());
|
||||
__ ret();
|
||||
|
@ -623,9 +609,21 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
|
|||
// Left (receiver) is Smi, return false if right is not Double.
|
||||
// Note that an instance of Mint or Bigint never contains a value that can be
|
||||
// represented by Smi.
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ movq(RAX, Address(RSP, +kArgumentOffset * kWordSize));
|
||||
__ CompareClassId(RAX, kDoubleCid);
|
||||
__ j(EQUAL, &fall_through);
|
||||
#if defined(DEBUG)
|
||||
Label ok;
|
||||
__ CompareClassId(RAX, kMintCid);
|
||||
__ j(NOT_EQUAL, &ok);
|
||||
__ movq(RAX, FieldAddress(RAX, Mint::value_offset()));
|
||||
__ sarq(RCX, Immediate(1));
|
||||
__ cmpq(RAX, RCX);
|
||||
__ j(NOT_EQUAL, &ok);
|
||||
__ Stop("Smi wrapped in a Mint");
|
||||
__ Bind(&ok);
|
||||
#endif
|
||||
__ LoadObject(RAX, Bool::False());
|
||||
__ ret();
|
||||
|
||||
|
@ -637,6 +635,7 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
|
|||
__ movq(RAX, Address(RSP, +kArgumentOffset * kWordSize));
|
||||
__ testq(RAX, Immediate(kSmiTagMask));
|
||||
__ j(NOT_ZERO, &fall_through);
|
||||
__ AssertSmiInRange(RAX);
|
||||
// Smi == Mint -> false.
|
||||
__ LoadObject(RAX, Bool::False());
|
||||
__ ret();
|
||||
|
@ -666,6 +665,7 @@ void Intrinsifier::Integer_sar(Assembler* assembler) {
|
|||
__ Bind(&shift_count_ok);
|
||||
__ movq(RCX, RAX); // Shift amount must be in RCX.
|
||||
__ movq(RAX, Address(RSP, +2 * kWordSize)); // Value.
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ SmiUntag(RAX); // Value.
|
||||
__ sarq(RAX, RCX);
|
||||
__ SmiTag(RAX);
|
||||
|
@ -676,6 +676,7 @@ void Intrinsifier::Integer_sar(Assembler* assembler) {
|
|||
// Argument is Smi (receiver).
|
||||
void Intrinsifier::Smi_bitNegate(Assembler* assembler) {
|
||||
__ movq(RAX, Address(RSP, +1 * kWordSize)); // Index.
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ notq(RAX);
|
||||
__ andq(RAX, Immediate(~kSmiTagMask)); // Remove inverted smi-tag.
|
||||
__ ret();
|
||||
|
@ -684,6 +685,7 @@ void Intrinsifier::Smi_bitNegate(Assembler* assembler) {
|
|||
void Intrinsifier::Smi_bitLength(Assembler* assembler) {
|
||||
ASSERT(kSmiTagShift == 1);
|
||||
__ movq(RAX, Address(RSP, +1 * kWordSize)); // Index.
|
||||
__ AssertSmiInRange(RAX);
|
||||
// XOR with sign bit to complement bits if value is negative.
|
||||
__ movq(RCX, RAX);
|
||||
__ sarq(RCX, Immediate(63)); // All 0 or all 1.
|
||||
|
@ -709,6 +711,7 @@ void Intrinsifier::Bigint_lsh(Assembler* assembler) {
|
|||
__ subq(R8, Immediate(2)); // x_used > 0, Smi. R8 = x_used - 1, round up.
|
||||
__ sarq(R8, Immediate(2)); // R8 + 1 = number of digit pairs to read.
|
||||
__ movq(RCX, Address(RSP, 2 * kWordSize)); // n is Smi
|
||||
__ AssertSmiInRange(RCX);
|
||||
__ SmiUntag(RCX);
|
||||
__ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits
|
||||
__ movq(RSI, RCX);
|
||||
|
@ -744,6 +747,7 @@ void Intrinsifier::Bigint_rsh(Assembler* assembler) {
|
|||
|
||||
__ movq(RDI, Address(RSP, 4 * kWordSize)); // x_digits
|
||||
__ movq(RCX, Address(RSP, 2 * kWordSize)); // n is Smi
|
||||
__ AssertSmiInRange(RCX);
|
||||
__ SmiUntag(RCX);
|
||||
__ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits
|
||||
__ movq(RDX, RCX);
|
||||
|
@ -1231,6 +1235,7 @@ static void CompareDoubles(Assembler* assembler, Condition true_condition) {
|
|||
__ LoadObject(RAX, Bool::True());
|
||||
__ ret();
|
||||
__ Bind(&is_smi);
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ SmiUntag(RAX);
|
||||
__ cvtsi2sdq(XMM1, RAX);
|
||||
__ jmp(&double_op);
|
||||
|
@ -1291,6 +1296,7 @@ static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) {
|
|||
__ movsd(FieldAddress(RAX, Double::value_offset()), XMM0);
|
||||
__ ret();
|
||||
__ Bind(&is_smi);
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ SmiUntag(RAX);
|
||||
__ cvtsi2sdq(XMM1, RAX);
|
||||
__ jmp(&double_op);
|
||||
|
@ -1320,6 +1326,7 @@ void Intrinsifier::Double_mulFromInteger(Assembler* assembler) {
|
|||
__ testq(RAX, Immediate(kSmiTagMask));
|
||||
__ j(NOT_ZERO, &fall_through);
|
||||
// Is Smi.
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ SmiUntag(RAX);
|
||||
__ cvtsi2sdq(XMM1, RAX);
|
||||
__ movq(RAX, Address(RSP, +2 * kWordSize));
|
||||
|
@ -1342,6 +1349,7 @@ void Intrinsifier::DoubleFromInteger(Assembler* assembler) {
|
|||
__ testq(RAX, Immediate(kSmiTagMask));
|
||||
__ j(NOT_ZERO, &fall_through);
|
||||
// Is Smi.
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ SmiUntag(RAX);
|
||||
__ cvtsi2sdq(XMM0, RAX);
|
||||
const Class& double_class =
|
||||
|
@ -1412,14 +1420,15 @@ void Intrinsifier::Double_getIsNegative(Assembler* assembler) {
|
|||
void Intrinsifier::DoubleToInteger(Assembler* assembler) {
|
||||
__ movq(RAX, Address(RSP, +1 * kWordSize));
|
||||
__ movsd(XMM0, FieldAddress(RAX, Double::value_offset()));
|
||||
__ cvttsd2siq(RAX, XMM0);
|
||||
__ cvttsd2sil(RAX, XMM0);
|
||||
// Overflow is signalled with minint.
|
||||
Label fall_through;
|
||||
// Check for overflow and that it fits into Smi.
|
||||
__ movq(RCX, RAX);
|
||||
__ shlq(RCX, Immediate(1));
|
||||
__ shll(RCX, Immediate(1));
|
||||
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
|
||||
__ SmiTag(RAX);
|
||||
ASSERT(kSmiTagShift == 1 && kSmiTag == 0);
|
||||
__ movsxd(RAX, RCX);
|
||||
__ ret();
|
||||
__ Bind(&fall_through);
|
||||
}
|
||||
|
@ -1431,16 +1440,17 @@ void Intrinsifier::Double_hashCode(Assembler* assembler) {
|
|||
// back to a double in XMM1.
|
||||
__ movq(RCX, Address(RSP, +1 * kWordSize));
|
||||
__ movsd(XMM0, FieldAddress(RCX, Double::value_offset()));
|
||||
__ cvttsd2siq(RAX, XMM0);
|
||||
__ cvtsi2sdq(XMM1, RAX);
|
||||
__ cvttsd2sil(RAX, XMM0);
|
||||
__ cvtsi2sdl(XMM1, RAX);
|
||||
|
||||
// Tag the int as a Smi, making sure that it fits; this checks for
|
||||
// overflow and NaN in the conversion from double to int. Conversion
|
||||
// overflow from cvttsd2si is signalled with an INT64_MIN value.
|
||||
// overflow from cvttsd2sil is signalled with an INT32_MIN value.
|
||||
Label fall_through;
|
||||
ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
|
||||
__ addq(RAX, RAX);
|
||||
__ addl(RAX, RAX);
|
||||
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
|
||||
__ movsxd(RAX, RAX);
|
||||
|
||||
// Compare the two double values. If they are equal, we return the
|
||||
// Smi tagged result immediately as the hash code.
|
||||
|
@ -1478,6 +1488,7 @@ void Intrinsifier::MathSqrt(Assembler* assembler) {
|
|||
__ movsd(FieldAddress(RAX, Double::value_offset()), XMM0);
|
||||
__ ret();
|
||||
__ Bind(&is_smi);
|
||||
__ AssertSmiInRange(RAX);
|
||||
__ SmiUntag(RAX);
|
||||
__ cvtsi2sdq(XMM1, RAX);
|
||||
__ jmp(&double_op);
|
||||
|
|
|
@ -708,14 +708,14 @@ enum Shift {
|
|||
|
||||
enum Extend {
|
||||
kNoExtend = -1,
|
||||
UXTB = 0,
|
||||
UXTH = 1,
|
||||
UXTW = 2,
|
||||
UXTX = 3,
|
||||
SXTB = 4,
|
||||
SXTH = 5,
|
||||
SXTW = 6,
|
||||
SXTX = 7,
|
||||
UXTB = 0, // Zero extend byte.
|
||||
UXTH = 1, // Zero extend halfword (16 bits).
|
||||
UXTW = 2, // Zero extend word (32 bits).
|
||||
UXTX = 3, // Zero extend doubleword (64 bits).
|
||||
SXTB = 4, // Sign extend byte.
|
||||
SXTH = 5, // Sign extend halfword (16 bits).
|
||||
SXTW = 6, // Sign extend word (32 bits).
|
||||
SXTX = 7, // Sign extend doubleword (64 bits).
|
||||
kMaxExtend = 8,
|
||||
};
|
||||
|
||||
|
|
|
@ -21,9 +21,8 @@
|
|||
#undef OVERFLOW // From math.h conflicts in constants_ia32.h
|
||||
|
||||
namespace dart {
|
||||
// Smi value range is from -(2^N) to (2^N)-1.
|
||||
// N=30 (32-bit build) or N=62 (64-bit build).
|
||||
const intptr_t kSmiBits = kBitsPerWord - 2;
|
||||
// Smi value range is from -(2^N) to (2^N)-1. N=30
|
||||
const intptr_t kSmiBits = 30;
|
||||
const intptr_t kSmiMax = (static_cast<intptr_t>(1) << kSmiBits) - 1;
|
||||
const intptr_t kSmiMin = -(static_cast<intptr_t>(1) << kSmiBits);
|
||||
|
||||
|
|
|
@ -7907,7 +7907,16 @@ class Array : public Instance {
|
|||
virtual uword ComputeCanonicalTableHash() const;
|
||||
|
||||
static const intptr_t kBytesPerElement = kWordSize;
|
||||
static const intptr_t kMaxElements = kSmiMax / kBytesPerElement;
|
||||
// The length field is a Smi so that sets one limit on the max Array length.
|
||||
// But we also need to be able to represent the length in bytes in an
|
||||
// intptr_t, which is a different limit. Either may be smaller. We can't
|
||||
// use Utils::Minimum here because it is not a const expression.
|
||||
static const intptr_t kElementLimitDueToIntptrMax = static_cast<intptr_t>(
|
||||
(kIntptrMax - sizeof(RawArray) - kObjectAlignment + kBytesPerElement) /
|
||||
kBytesPerElement);
|
||||
static const intptr_t kMaxElements = kSmiMax < kElementLimitDueToIntptrMax
|
||||
? kSmiMax
|
||||
: kElementLimitDueToIntptrMax;
|
||||
static const intptr_t kMaxNewSpaceElements =
|
||||
(Heap::kNewAllocatableSize - sizeof(RawArray)) / kBytesPerElement;
|
||||
|
||||
|
|
|
@ -308,16 +308,10 @@ ISOLATE_UNIT_TEST_CASE(Smi) {
|
|||
EXPECT(Smi::IsValid(-15));
|
||||
EXPECT(Smi::IsValid(0xFFu));
|
||||
// Upper two bits must be either 00 or 11.
|
||||
#if defined(ARCH_IS_64_BIT)
|
||||
EXPECT(!Smi::IsValid(kMaxInt64));
|
||||
EXPECT(Smi::IsValid(0x3FFFFFFFFFFFFFFF));
|
||||
EXPECT(Smi::IsValid(-1));
|
||||
#else
|
||||
EXPECT(!Smi::IsValid(kMaxInt32));
|
||||
EXPECT(Smi::IsValid(0x3FFFFFFF));
|
||||
EXPECT(Smi::IsValid(-1));
|
||||
EXPECT(!Smi::IsValid(0xFFFFFFFFu));
|
||||
#endif
|
||||
|
||||
EXPECT_EQ(5, smi.AsInt64Value());
|
||||
EXPECT_EQ(5.0, smi.AsDoubleValue());
|
||||
|
@ -445,9 +439,6 @@ ISOLATE_UNIT_TEST_CASE(StringIRITwoByte) {
|
|||
}
|
||||
|
||||
ISOLATE_UNIT_TEST_CASE(Mint) {
|
||||
// On 64-bit architectures a Smi is stored in a 64 bit word. A Midint cannot
|
||||
// be allocated if it does fit into a Smi.
|
||||
#if !defined(ARCH_IS_64_BIT)
|
||||
{
|
||||
Mint& med = Mint::Handle();
|
||||
EXPECT(med.IsNull());
|
||||
|
@ -517,7 +508,6 @@ ISOLATE_UNIT_TEST_CASE(Mint) {
|
|||
EXPECT_EQ(mint1.value(), mint_value);
|
||||
EXPECT_EQ(mint2.value(), mint_value);
|
||||
EXPECT_EQ(mint1.raw(), mint2.raw());
|
||||
#endif
|
||||
}
|
||||
|
||||
ISOLATE_UNIT_TEST_CASE(Double) {
|
||||
|
@ -2747,22 +2737,6 @@ ISOLATE_UNIT_TEST_CASE(EmbedSmiInCode) {
|
|||
EXPECT(Smi::Cast(result).Value() == kSmiTestValue);
|
||||
}
|
||||
|
||||
#if defined(ARCH_IS_64_BIT)
|
||||
// Test for Embedded Smi object in the instructions.
|
||||
ISOLATE_UNIT_TEST_CASE(EmbedSmiIn64BitCode) {
|
||||
extern void GenerateEmbedSmiInCode(Assembler * assembler, intptr_t value);
|
||||
const intptr_t kSmiTestValue = DART_INT64_C(5) << 32;
|
||||
Assembler _assembler_;
|
||||
GenerateEmbedSmiInCode(&_assembler_, kSmiTestValue);
|
||||
const Function& function =
|
||||
Function::Handle(CreateFunction("Test_EmbedSmiIn64BitCode"));
|
||||
const Code& code = Code::Handle(Code::FinalizeCode(function, &_assembler_));
|
||||
function.AttachCode(code);
|
||||
const Object& result =
|
||||
Object::Handle(DartEntry::InvokeFunction(function, Array::empty_array()));
|
||||
EXPECT(Smi::Cast(result).Value() == kSmiTestValue);
|
||||
}
|
||||
#endif // ARCH_IS_64_BIT
|
||||
|
||||
ISOLATE_UNIT_TEST_CASE(ExceptionHandlers) {
|
||||
const int kNumEntries = 4;
|
||||
|
|
|
@ -3023,10 +3023,19 @@ void ChoiceNode::SetUpPreLoad(RegExpCompiler* compiler,
|
|||
Trace* current_trace,
|
||||
PreloadState* state) {
|
||||
if (state->eats_at_least_ == PreloadState::kEatsAtLeastNotYetInitialized) {
|
||||
// Save some time by looking at most one machine word ahead.
|
||||
state->eats_at_least_ =
|
||||
EatsAtLeast(compiler->one_byte() ? 4 : 2, kRecursionBudget,
|
||||
current_trace->at_start() == Trace::FALSE_VALUE);
|
||||
// On ARM64, only read 16 bits ahead for now. This ensures that boxing is
|
||||
// trivial even with the new smaller Smis. See
|
||||
// https://github.com/dart-lang/sdk/issues/29951 and
|
||||
// LoadCodeUnitsInstr::EmitNativeCode.
|
||||
#if defined(TARGET_ARCH_ARM64)
|
||||
const int kMaxBytesLoaded = 2;
|
||||
#else
|
||||
const int kMaxBytesLoaded = 4;
|
||||
#endif
|
||||
const int kMaxTwoByteCharactersLoaded = kMaxBytesLoaded / 2;
|
||||
state->eats_at_least_ = EatsAtLeast(
|
||||
compiler->one_byte() ? kMaxBytesLoaded : kMaxTwoByteCharactersLoaded,
|
||||
kRecursionBudget, current_trace->at_start() == Trace::FALSE_VALUE);
|
||||
}
|
||||
state->preload_characters_ =
|
||||
CalculatePreloadCharacters(compiler, state->eats_at_least_);
|
||||
|
|
|
@ -3199,13 +3199,21 @@ void Simulator::DecodeFPIntCvt(Instr* instr) {
|
|||
set_vregisterd(vd, 1, 0);
|
||||
} else if (instr->Bits(16, 5) == 24) {
|
||||
// Format(instr, "fcvtzds'sf 'rd, 'vn");
|
||||
const intptr_t max = instr->Bit(31) == 1 ? INT64_MAX : INT32_MAX;
|
||||
const intptr_t min = instr->Bit(31) == 1 ? INT64_MIN : INT32_MIN;
|
||||
const double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn, 0));
|
||||
if (vn_val >= static_cast<double>(INT64_MAX)) {
|
||||
set_register(instr, rd, INT64_MAX, instr->RdMode());
|
||||
} else if (vn_val <= static_cast<double>(INT64_MIN)) {
|
||||
set_register(instr, rd, INT64_MIN, instr->RdMode());
|
||||
int64_t result;
|
||||
if (vn_val >= static_cast<double>(max)) {
|
||||
result = max;
|
||||
} else if (vn_val <= static_cast<double>(min)) {
|
||||
result = min;
|
||||
} else {
|
||||
set_register(instr, rd, static_cast<int64_t>(vn_val), instr->RdMode());
|
||||
result = static_cast<int64_t>(vn_val);
|
||||
}
|
||||
if (instr->Bit(31) == 1) {
|
||||
set_register(instr, rd, result, instr->RdMode());
|
||||
} else {
|
||||
set_register(instr, rd, result & 0xffffffffll, instr->RdMode());
|
||||
}
|
||||
} else {
|
||||
UnimplementedInstruction(instr);
|
||||
|
|
|
@ -636,11 +636,11 @@ void Simulator::Exit(Thread* thread,
|
|||
// __builtin_s{add,sub,mul}_overflow() intrinsics here and below.
|
||||
// Note that they may clobber the output location even when there is overflow:
|
||||
// https://gcc.gnu.org/onlinedocs/gcc/Integer-Overflow-Builtins.html
|
||||
DART_FORCE_INLINE static bool SignedAddWithOverflow(intptr_t lhs,
|
||||
intptr_t rhs,
|
||||
DART_FORCE_INLINE static bool SignedAddWithOverflow(int32_t lhs,
|
||||
int32_t rhs,
|
||||
intptr_t* out) {
|
||||
intptr_t res = 1;
|
||||
#if defined(HOST_ARCH_IA32) || defined(HOST_ARCH_X64)
|
||||
#if defined(HOST_ARCH_IA32)
|
||||
asm volatile(
|
||||
"add %2, %1\n"
|
||||
"jo 1f;\n"
|
||||
|
@ -650,7 +650,19 @@ DART_FORCE_INLINE static bool SignedAddWithOverflow(intptr_t lhs,
|
|||
: "+r"(res), "+r"(lhs)
|
||||
: "r"(rhs), "r"(out)
|
||||
: "cc");
|
||||
#elif defined(HOST_ARCH_ARM) || defined(HOST_ARCH_ARM64)
|
||||
#elif defined(HOST_ARCH_X64)
|
||||
int64_t tmp;
|
||||
asm volatile(
|
||||
"addl %[rhs], %[lhs]\n"
|
||||
"jo 1f;\n"
|
||||
"xor %[res], %[res]\n"
|
||||
"movslq %[lhs], %[tmp]\n"
|
||||
"mov %[tmp], 0(%[out])\n"
|
||||
"1: "
|
||||
: [res] "+r"(res), [lhs] "+r"(lhs), [tmp] "=&r"(tmp)
|
||||
: [rhs] "r"(rhs), [out] "r"(out)
|
||||
: "cc");
|
||||
#elif defined(HOST_ARCH_ARM)
|
||||
asm volatile(
|
||||
"adds %1, %1, %2;\n"
|
||||
"bvs 1f;\n"
|
||||
|
@ -660,32 +672,13 @@ DART_FORCE_INLINE static bool SignedAddWithOverflow(intptr_t lhs,
|
|||
: "+r"(res), "+r"(lhs)
|
||||
: "r"(rhs), "r"(out)
|
||||
: "cc");
|
||||
#else
|
||||
#error "Unsupported platform"
|
||||
#endif
|
||||
return (res != 0);
|
||||
}
|
||||
|
||||
DART_FORCE_INLINE static bool SignedSubWithOverflow(intptr_t lhs,
|
||||
intptr_t rhs,
|
||||
intptr_t* out) {
|
||||
intptr_t res = 1;
|
||||
#if defined(HOST_ARCH_IA32) || defined(HOST_ARCH_X64)
|
||||
#elif defined(HOST_ARCH_ARM64)
|
||||
asm volatile(
|
||||
"sub %2, %1\n"
|
||||
"jo 1f;\n"
|
||||
"xor %0, %0\n"
|
||||
"mov %1, 0(%3)\n"
|
||||
"1: "
|
||||
: "+r"(res), "+r"(lhs)
|
||||
: "r"(rhs), "r"(out)
|
||||
: "cc");
|
||||
#elif defined(HOST_ARCH_ARM) || defined(HOST_ARCH_ARM64)
|
||||
asm volatile(
|
||||
"subs %1, %1, %2;\n"
|
||||
"adds %w1, %w1, %w2;\n"
|
||||
"bvs 1f;\n"
|
||||
"sxtw %x1, %w1;\n"
|
||||
"mov %0, #0;\n"
|
||||
"str %1, [%3, #0]\n"
|
||||
"str %x1, [%3, #0]\n"
|
||||
"1:"
|
||||
: "+r"(res), "+r"(lhs)
|
||||
: "r"(rhs), "r"(out)
|
||||
|
@ -696,11 +689,64 @@ DART_FORCE_INLINE static bool SignedSubWithOverflow(intptr_t lhs,
|
|||
return (res != 0);
|
||||
}
|
||||
|
||||
DART_FORCE_INLINE static bool SignedMulWithOverflow(intptr_t lhs,
|
||||
intptr_t rhs,
|
||||
DART_FORCE_INLINE static bool SignedSubWithOverflow(int32_t lhs,
|
||||
int32_t rhs,
|
||||
intptr_t* out) {
|
||||
intptr_t res = 1;
|
||||
#if defined(HOST_ARCH_IA32) || defined(HOST_ARCH_X64)
|
||||
#if defined(HOST_ARCH_IA32)
|
||||
asm volatile(
|
||||
"sub %2, %1\n"
|
||||
"jo 1f;\n"
|
||||
"xor %0, %0\n"
|
||||
"mov %1, 0(%3)\n"
|
||||
"1: "
|
||||
: "+r"(res), "+r"(lhs)
|
||||
: "r"(rhs), "r"(out)
|
||||
: "cc");
|
||||
#elif defined(HOST_ARCH_X64)
|
||||
int64_t tmp;
|
||||
asm volatile(
|
||||
"subl %[rhs], %[lhs]\n"
|
||||
"jo 1f;\n"
|
||||
"xor %[res], %[res]\n"
|
||||
"movslq %[lhs], %[tmp]\n"
|
||||
"mov %[tmp], 0(%[out])\n"
|
||||
"1: "
|
||||
: [res] "+r"(res), [lhs] "+r"(lhs), [tmp] "=&r"(tmp)
|
||||
: [rhs] "r"(rhs), [out] "r"(out)
|
||||
: "cc");
|
||||
#elif defined(HOST_ARCH_ARM)
|
||||
asm volatile(
|
||||
"subs %1, %1, %2;\n"
|
||||
"bvs 1f;\n"
|
||||
"mov %0, #0;\n"
|
||||
"str %1, [%3, #0]\n"
|
||||
"1:"
|
||||
: "+r"(res), "+r"(lhs)
|
||||
: "r"(rhs), "r"(out)
|
||||
: "cc");
|
||||
#elif defined(HOST_ARCH_ARM64)
|
||||
asm volatile(
|
||||
"subs %w1, %w1, %w2;\n"
|
||||
"bvs 1f;\n"
|
||||
"sxtw %x1, %w1;\n"
|
||||
"mov %0, #0;\n"
|
||||
"str %x1, [%3, #0]\n"
|
||||
"1:"
|
||||
: "+r"(res), "+r"(lhs)
|
||||
: "r"(rhs), "r"(out)
|
||||
: "cc");
|
||||
#else
|
||||
#error "Unsupported platform"
|
||||
#endif
|
||||
return (res != 0);
|
||||
}
|
||||
|
||||
DART_FORCE_INLINE static bool SignedMulWithOverflow(int32_t lhs,
|
||||
int32_t rhs,
|
||||
intptr_t* out) {
|
||||
intptr_t res = 1;
|
||||
#if defined(HOST_ARCH_IA32)
|
||||
asm volatile(
|
||||
"imul %2, %1\n"
|
||||
"jo 1f;\n"
|
||||
|
@ -710,6 +756,18 @@ DART_FORCE_INLINE static bool SignedMulWithOverflow(intptr_t lhs,
|
|||
: "+r"(res), "+r"(lhs)
|
||||
: "r"(rhs), "r"(out)
|
||||
: "cc");
|
||||
#elif defined(HOST_ARCH_X64)
|
||||
int64_t tmp;
|
||||
asm volatile(
|
||||
"imull %[rhs], %[lhs]\n"
|
||||
"jo 1f;\n"
|
||||
"xor %[res], %[res]\n"
|
||||
"movslq %[lhs], %[tmp]\n"
|
||||
"mov %[tmp], 0(%[out])\n"
|
||||
"1: "
|
||||
: [res] "+r"(res), [lhs] "+r"(lhs), [tmp] "=&r"(tmp)
|
||||
: [rhs] "r"(rhs), [out] "r"(out)
|
||||
: "cc");
|
||||
#elif defined(HOST_ARCH_ARM)
|
||||
asm volatile(
|
||||
"smull %1, ip, %1, %2;\n"
|
||||
|
@ -724,12 +782,12 @@ DART_FORCE_INLINE static bool SignedMulWithOverflow(intptr_t lhs,
|
|||
#elif defined(HOST_ARCH_ARM64)
|
||||
int64_t prod_lo = 0;
|
||||
asm volatile(
|
||||
"mul %1, %2, %3\n"
|
||||
"smulh %2, %2, %3\n"
|
||||
"cmp %2, %1, ASR #63;\n"
|
||||
"smull %x1, %w2, %w3\n"
|
||||
"asr %x2, %x1, #63\n"
|
||||
"cmp %x2, %x1, ASR #31;\n"
|
||||
"bne 1f;\n"
|
||||
"mov %0, #0;\n"
|
||||
"str %1, [%4, #0]\n"
|
||||
"str %x1, [%4, #0]\n"
|
||||
"1:"
|
||||
: "=r"(res), "+r"(prod_lo), "+r"(lhs)
|
||||
: "r"(rhs), "r"(out)
|
||||
|
@ -1971,11 +2029,7 @@ RawObject* Simulator::Call(const Code& code,
|
|||
if (rhs != 0) {
|
||||
const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
|
||||
const intptr_t res = (lhs >> kSmiTagSize) / (rhs >> kSmiTagSize);
|
||||
#if defined(ARCH_IS_64_BIT)
|
||||
const intptr_t untaggable = 0x4000000000000000LL;
|
||||
#else
|
||||
const intptr_t untaggable = 0x40000000L;
|
||||
#endif // defined(ARCH_IS_64_BIT)
|
||||
if (res != untaggable) {
|
||||
*reinterpret_cast<intptr_t*>(&FP[rA]) = res << kSmiTagSize;
|
||||
pc++;
|
||||
|
@ -2001,11 +2055,12 @@ RawObject* Simulator::Call(const Code& code,
|
|||
{
|
||||
BYTECODE(Shl, A_B_C);
|
||||
const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rC]) >> kSmiTagSize;
|
||||
if (static_cast<uintptr_t>(rhs) < kBitsPerWord) {
|
||||
const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
|
||||
const intptr_t res = lhs << rhs;
|
||||
const int kBitsPerInt32 = 32;
|
||||
if (static_cast<uintptr_t>(rhs) < kBitsPerInt32) {
|
||||
const int32_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
|
||||
const int32_t res = lhs << rhs;
|
||||
if (lhs == (res >> rhs)) {
|
||||
*reinterpret_cast<intptr_t*>(&FP[rA]) = res;
|
||||
*reinterpret_cast<intptr_t*>(&FP[rA]) = static_cast<intptr_t>(res);
|
||||
pc++;
|
||||
}
|
||||
}
|
||||
|
@ -2016,8 +2071,7 @@ RawObject* Simulator::Call(const Code& code,
|
|||
BYTECODE(Shr, A_B_C);
|
||||
const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rC]) >> kSmiTagSize;
|
||||
if (rhs >= 0) {
|
||||
const intptr_t shift_amount =
|
||||
(rhs >= kBitsPerWord) ? (kBitsPerWord - 1) : rhs;
|
||||
const intptr_t shift_amount = (rhs >= 32) ? (32 - 1) : rhs;
|
||||
const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]) >> kSmiTagSize;
|
||||
*reinterpret_cast<intptr_t*>(&FP[rA]) = (lhs >> shift_amount)
|
||||
<< kSmiTagSize;
|
||||
|
|
|
@ -253,10 +253,6 @@ void CheckMint(int64_t value) {
|
|||
// here covers most of the 64-bit range. On 32-bit platforms the smi
|
||||
// range covers most of the 32-bit range and values outside that
|
||||
// range are also represented as mints.
|
||||
#if defined(ARCH_IS_64_BIT)
|
||||
EXPECT_EQ(Dart_CObject_kInt64, mint_cobject->type);
|
||||
EXPECT_EQ(value, mint_cobject->value.as_int64);
|
||||
#else
|
||||
if (kMinInt32 < value && value < kMaxInt32) {
|
||||
EXPECT_EQ(Dart_CObject_kInt32, mint_cobject->type);
|
||||
EXPECT_EQ(value, mint_cobject->value.as_int32);
|
||||
|
@ -264,7 +260,6 @@ void CheckMint(int64_t value) {
|
|||
EXPECT_EQ(Dart_CObject_kInt64, mint_cobject->type);
|
||||
EXPECT_EQ(value, mint_cobject->value.as_int64);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST_CASE(SerializeMints) {
|
||||
|
|
|
@ -1334,14 +1334,18 @@ static void EmitFastSmiOp(Assembler* assembler,
|
|||
__ ldr(R1, Address(SP, +1 * kWordSize)); // Left.
|
||||
__ orr(TMP, R0, Operand(R1));
|
||||
__ BranchIfNotSmi(TMP, not_smi_or_overflow);
|
||||
__ AssertSmiInRange(R0);
|
||||
__ AssertSmiInRange(R1);
|
||||
switch (kind) {
|
||||
case Token::kADD: {
|
||||
__ adds(R0, R1, Operand(R0)); // Adds.
|
||||
__ addsw(R0, R1, Operand(R0)); // Adds.
|
||||
__ sxtw(R0, R0);
|
||||
__ b(not_smi_or_overflow, VS); // Branch if overflow.
|
||||
break;
|
||||
}
|
||||
case Token::kSUB: {
|
||||
__ subs(R0, R1, Operand(R0)); // Subtract.
|
||||
__ subsw(R0, R1, Operand(R0)); // Subtract.
|
||||
__ sxtw(R0, R0);
|
||||
__ b(not_smi_or_overflow, VS); // Branch if overflow.
|
||||
break;
|
||||
}
|
||||
|
@ -1383,6 +1387,8 @@ static void EmitFastSmiOp(Assembler* assembler,
|
|||
__ StoreToOffset(R1, R6, count_offset);
|
||||
}
|
||||
|
||||
__ AssertSmiInRange(R0, Assembler::kValueCanBeHeapPointer);
|
||||
|
||||
__ ret();
|
||||
}
|
||||
|
||||
|
|
|
@ -1274,13 +1274,15 @@ static void EmitFastSmiOp(Assembler* assembler,
|
|||
__ j(NOT_ZERO, not_smi_or_overflow);
|
||||
switch (kind) {
|
||||
case Token::kADD: {
|
||||
__ addq(RAX, RCX);
|
||||
__ addl(RAX, RCX);
|
||||
__ j(OVERFLOW, not_smi_or_overflow);
|
||||
__ movsxd(RAX, RAX);
|
||||
break;
|
||||
}
|
||||
case Token::kSUB: {
|
||||
__ subq(RAX, RCX);
|
||||
__ subl(RAX, RCX);
|
||||
__ j(OVERFLOW, not_smi_or_overflow);
|
||||
__ movsxd(RAX, RAX);
|
||||
break;
|
||||
}
|
||||
case Token::kEQ: {
|
||||
|
|
|
@ -18,7 +18,7 @@ namespace dart {
|
|||
// ClassifyingTokenPositions N -> -1 - N
|
||||
//
|
||||
// Synthetically created AstNodes are given real source positions but encoded
|
||||
// as negative numbers from [kSmiMin32, -1 - N]. For example:
|
||||
// as negative numbers from [kSmiMin, -1 - N]. For example:
|
||||
//
|
||||
// A source position of 0 in a synthetic AstNode would be encoded as -2 - N.
|
||||
// A source position of 1 in a synthetic AstNode would be encoded as -3 - N.
|
||||
|
@ -86,7 +86,7 @@ class TokenPosition {
|
|||
#undef DECLARE_VALUES
|
||||
static const intptr_t kMinSourcePos = 0;
|
||||
static const TokenPosition kMinSource;
|
||||
static const intptr_t kMaxSourcePos = kSmiMax32 - kMaxSentinelDescriptors - 2;
|
||||
static const intptr_t kMaxSourcePos = kSmiMax - kMaxSentinelDescriptors - 2;
|
||||
static const TokenPosition kMaxSource;
|
||||
|
||||
// Decode from a snapshot.
|
||||
|
|
|
@ -188,9 +188,6 @@ LibTest/collection/ListBase/ListBase_class_A01_t02: Skip # co19 issue 673, These
|
|||
LibTest/collection/ListMixin/ListMixin_class_A01_t02: Skip # co19 issue 673, These tests take too much memory (300 MB) for our 1 GB test machine co19 issue 673. http://code.google.com/p/co19/issues/detail?id=673
|
||||
LibTest/core/List/List_class_A01_t02: Skip # co19 issue 673, These tests take too much memory (300 MB) for our 1 GB test machine co19 issue 673. http://code.google.com/p/co19/issues/detail?id=673
|
||||
|
||||
[ $arch != arm64 && $arch != simarm64 && $arch != simdbc && $arch != simdbc64 && $arch != x64 && ($runtime == dart_precompiled || $runtime == vm) ]
|
||||
LibTest/core/int/operator_left_shift_A01_t02: Fail # co19 issue 129
|
||||
|
||||
[ $arch == ia32 && $mode == release && $runtime == vm && $system == linux ]
|
||||
service/dev_fs_spawn_test: Pass, Fail # Issue 28411
|
||||
|
||||
|
@ -382,6 +379,9 @@ LibTest/typed_data/Uint64List/Uint64List.view_A01_t01: CompileTimeError # Large
|
|||
LibTest/typed_data/Uint64List/Uint64List.view_A01_t02: CompileTimeError # Large integer literal
|
||||
WebPlatformTest/*: SkipByDesign # dart:html not supported on VM.
|
||||
|
||||
[ $runtime == dart_precompiled || $runtime == vm ]
|
||||
LibTest/core/int/operator_left_shift_A01_t02: Fail # Can't represent 1 << 2147483647 without running out of memory.
|
||||
|
||||
[ $runtime == flutter || $hot_reload || $hot_reload_rollback ]
|
||||
Language/Expressions/Assignment/prefix_object_t02: Skip # Requires deferred libraries
|
||||
Language/Expressions/Constants/constant_constructor_t03: Skip # Requires deferred libraries
|
||||
|
|
|
@ -613,5 +613,4 @@ regexp/UC16_test: RuntimeError
|
|||
|
||||
[ $hot_reload || $hot_reload_rollback ]
|
||||
bigint_parse_radix_test: Pass, Timeout # Issue 31659
|
||||
bigint_test: Pass, Crash # Issue 31660
|
||||
integer_parsed_mul_div_vm_test: Pass, Slow # Slow
|
||||
|
|
Loading…
Reference in a new issue