Revert "[VM] Reduce Smi size to 32 bit on 64 bit platforms"

This reverts commit cf78da8a48.

Reason for revert: introduces significant performance regression (~30%) on analyzer benchmarks (warm-analysis) without clearly visible hot-spot.

Original change's description:
> [VM] Reduce Smi size to 32 bit on 64 bit platforms
> 
> This reduces small tagged integers on 64 bit platforms from 63 bits to
> 31 bits plus one tag bit.
> This is a step on the way to compile-time-optional compressed pointers
> on 64 bit platforms.  See more about this at go/dartvmlearnings
> This causes a slowdown for some uses of integers that don't fit in 31
> signed bits, but because both x64 and ARM64 have unboxed 64 bit
> integers now the performance hit should not be too bad.
> 
> This is a reapplication of
> https://dart-review.googlesource.com/c/sdk/+/46244
> It was reverted due to a compilation error on 32 bit
> ARM with DBC.
> 
> R=​vegorov@google.com
> 
> Change-Id: I943de1768519457f0e5a61ef0b4ef204b6a53281
> Reviewed-on: https://dart-review.googlesource.com/51321
> Reviewed-by: Vyacheslav Egorov <vegorov@google.com>

TBR=vegorov@google.com,erikcorry@google.com

# Not skipping CQ checks because original CL landed > 1 day ago.

Change-Id: I8c5b909ec38663b5f5b05f69ef488c97341f8f3d
Reviewed-on: https://dart-review.googlesource.com/54000
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
Commit-Queue: Vyacheslav Egorov <vegorov@google.com>
This commit is contained in:
Vyacheslav Egorov 2018-05-07 13:56:10 +00:00 committed by commit-bot@chromium.org
parent 89e43b4141
commit a06b1d96cb
25 changed files with 511 additions and 913 deletions

View file

@ -78,14 +78,19 @@ abstract class _HashBase implements _HashVMBase {
static const int _UNUSED_PAIR = 0;
static const int _DELETED_PAIR = 1;
// The top bits are wasted to avoid Mint allocation.
// On 32-bit, the top bits are wasted to avoid Mint allocation.
// TODO(koda): Reclaim the bits by making the compiler treat hash patterns
// as unsigned words.
static int _indexSizeToHashMask(int indexSize) {
int indexBits = indexSize.bitLength - 2;
return (1 << (30 - indexBits)) - 1;
return internal.is64Bit
? (1 << (32 - indexBits)) - 1
: (1 << (30 - indexBits)) - 1;
}
static int _hashPattern(int fullHash, int hashMask, int size) {
final int maskedHash = fullHash & hashMask;
// TODO(koda): Consider keeping bit length and use left shift.
return (maskedHash == 0) ? (size >> 1) : maskedHash * (size >> 1);
}

View file

@ -32,7 +32,7 @@ class int {
return null; // Empty.
}
}
var smiLimit = 9;
var smiLimit = is64Bit ? 18 : 9;
if ((last - ix) >= smiLimit) {
return null; // May not fit into a Smi.
}
@ -133,7 +133,7 @@ class int {
static int _parseRadix(
String source, int radix, int start, int end, int sign) {
int tableIndex = (radix - 2) * 2;
int tableIndex = (radix - 2) * 4 + (is64Bit ? 2 : 0);
int blockSize = _PARSE_LIMITS[tableIndex];
int length = end - start;
if (length <= blockSize) {
@ -159,7 +159,7 @@ class int {
int positiveOverflowLimit = 0;
int negativeOverflowLimit = 0;
if (_limitIntsTo64Bits) {
tableIndex = tableIndex << 1; // Pre-multiply by 2 for simpler indexing.
tableIndex = tableIndex << 1; // pre-multiply by 2 for simpler indexing
positiveOverflowLimit = _int64OverflowLimits[tableIndex];
if (positiveOverflowLimit == 0) {
positiveOverflowLimit =
@ -175,10 +175,14 @@ class int {
if (result >= positiveOverflowLimit) {
if ((result > positiveOverflowLimit) ||
(smi > _int64OverflowLimits[tableIndex + 2])) {
// Although the unsigned overflow limits do not depend on the
// platform, the multiplier and block size, which are used to
// compute it, do.
int X = is64Bit ? 1 : 0;
if (radix == 16 &&
!(result >= _int64UnsignedOverflowLimit &&
(result > _int64UnsignedOverflowLimit ||
smi > _int64UnsignedSmiOverflowLimit)) &&
!(result >= _int64UnsignedOverflowLimits[X] &&
(result > _int64UnsignedOverflowLimits[X] ||
smi > _int64UnsignedSmiOverflowLimits[X])) &&
blockEnd + blockSize > end) {
return (result * multiplier) + smi;
}
@ -223,42 +227,43 @@ class int {
// For each radix, 2-36, how many digits are guaranteed to fit in a smi,
// and magnitude of such a block (radix ** digit-count).
// 32-bit limit/multiplier at (radix - 2)*4, 64-bit limit at (radix-2)*4+2
static const _PARSE_LIMITS = const [
30, 1073741824, // radix: 2
18, 387420489,
15, 1073741824,
12, 244140625, // radix: 5
11, 362797056,
10, 282475249,
10, 1073741824,
9, 387420489,
9, 1000000000, // radix: 10
8, 214358881,
8, 429981696,
8, 815730721,
7, 105413504,
7, 170859375, // radix: 15
7, 268435456,
7, 410338673,
7, 612220032,
7, 893871739,
6, 64000000, // radix: 20
6, 85766121,
6, 113379904,
6, 148035889,
6, 191102976,
6, 244140625, // radix: 25
6, 308915776,
6, 387420489,
6, 481890304,
6, 594823321,
6, 729000000, // radix: 30
6, 887503681,
6, 1073741824,
5, 39135393,
5, 45435424,
5, 52521875, // radix: 35
5, 60466176,
30, 1073741824, 62, 4611686018427387904, // radix: 2
18, 387420489, 39, 4052555153018976267,
15, 1073741824, 30, 1152921504606846976,
12, 244140625, 26, 1490116119384765625, // radix: 5
11, 362797056, 23, 789730223053602816,
10, 282475249, 22, 3909821048582988049,
10, 1073741824, 20, 1152921504606846976,
9, 387420489, 19, 1350851717672992089,
9, 1000000000, 18, 1000000000000000000, // radix: 10
8, 214358881, 17, 505447028499293771,
8, 429981696, 17, 2218611106740436992,
8, 815730721, 16, 665416609183179841,
7, 105413504, 16, 2177953337809371136,
7, 170859375, 15, 437893890380859375, // radix: 15
7, 268435456, 15, 1152921504606846976,
7, 410338673, 15, 2862423051509815793,
7, 612220032, 14, 374813367582081024,
7, 893871739, 14, 799006685782884121,
6, 64000000, 14, 1638400000000000000, // radix: 20
6, 85766121, 14, 3243919932521508681,
6, 113379904, 13, 282810057883082752,
6, 148035889, 13, 504036361936467383,
6, 191102976, 13, 876488338465357824,
6, 244140625, 13, 1490116119384765625, // radix: 25
6, 308915776, 13, 2481152873203736576,
6, 387420489, 13, 4052555153018976267,
6, 481890304, 12, 232218265089212416,
6, 594823321, 12, 353814783205469041,
6, 729000000, 12, 531441000000000000, // radix: 30
6, 887503681, 12, 787662783788549761,
6, 1073741824, 12, 1152921504606846976,
5, 39135393, 12, 1667889514952984961,
5, 45435424, 12, 2386420683693101056,
5, 52521875, 12, 3379220508056640625, // radix: 35
5, 60466176, 11, 131621703842267136,
];
/// Flag indicating if integers are limited by 64 bits
@ -268,8 +273,11 @@ class int {
static const _maxInt64 = 0x7fffffffffffffff;
static const _minInt64 = -_maxInt64 - 1;
static const _int64UnsignedOverflowLimit = 0xfffffffff;
static const _int64UnsignedSmiOverflowLimit = 0xfffffff;
static const _int64UnsignedOverflowLimits = const [0xfffffffff, 0xf];
static const _int64UnsignedSmiOverflowLimits = const [
0xfffffff,
0xfffffffffffffff
];
/// In the `--limit-ints-to-64-bits` mode calculation of the expression
///

View file

@ -1069,18 +1069,12 @@ class Assembler : public ValueObject {
const Register crn = ConcreteRegister(rn);
EmitFPIntCvtOp(SCVTFD, static_cast<Register>(vd), crn, kWord);
}
void fcvtzdsx(Register rd, VRegister vn) {
void fcvtzds(Register rd, VRegister vn) {
ASSERT(rd != R31);
ASSERT(rd != CSP);
const Register crd = ConcreteRegister(rd);
EmitFPIntCvtOp(FCVTZDS, crd, static_cast<Register>(vn));
}
void fcvtzdsw(Register rd, VRegister vn) {
ASSERT(rd != R31);
ASSERT(rd != CSP);
const Register crd = ConcreteRegister(rd);
EmitFPIntCvtOp(FCVTZDS, crd, static_cast<Register>(vn), kWord);
}
void fmovdd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FMOVDD, vd, vn); }
void fabsd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FABSD, vd, vn); }
void fnegd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FNEGD, vd, vn); }
@ -1371,17 +1365,9 @@ class Assembler : public ValueObject {
LslImmediate(dst, src, kSmiTagSize);
}
void BranchIfNotSmi(Register reg, Label* label) {
ASSERT(kSmiTagMask == 1);
ASSERT(kSmiTag == 0);
tbnz(label, reg, 0);
}
void BranchIfNotSmi(Register reg, Label* label) { tbnz(label, reg, kSmiTag); }
void BranchIfSmi(Register reg, Label* label) {
ASSERT(kSmiTagMask == 1);
ASSERT(kSmiTag == 0);
tbz(label, reg, 0);
}
void BranchIfSmi(Register reg, Label* label) { tbz(label, reg, kSmiTag); }
void Branch(const StubEntry& stub_entry,
Register pp,
@ -1465,11 +1451,6 @@ class Assembler : public ValueObject {
kValueCanBeSmi,
};
enum CanBeHeapPointer {
kValueIsNotHeapPointer,
kValueCanBeHeapPointer,
};
// Storing into an object.
void StoreIntoObject(Register object,
const Address& dest,
@ -1611,22 +1592,6 @@ class Assembler : public ValueObject {
Register tmp,
OperandSize sz);
void AssertSmiInRange(
Register object,
CanBeHeapPointer can_be_heap_pointer = kValueIsNotHeapPointer) {
#if defined(DEBUG)
Label ok;
if (can_be_heap_pointer == kValueCanBeHeapPointer) {
BranchIfNotSmi(object, &ok);
}
cmp(object, Operand(object, SXTW, 0));
b(&ok, EQ);
Stop("Smi out of range");
Bind(&ok);
#endif
}
private:
AssemblerBuffer buffer_; // Contains position independent code.
ObjectPoolWrapper object_pool_wrapper_;

View file

@ -2576,73 +2576,17 @@ ASSEMBLER_TEST_RUN(FldrqFstrqPrePostIndex, test) {
EXPECT_EQ(42.0, EXECUTE_TEST_CODE_DOUBLE(DoubleReturn, test->entry()));
}
ASSEMBLER_TEST_GENERATE(Fcvtzdsx, assembler) {
ASSEMBLER_TEST_GENERATE(Fcvtzds, assembler) {
__ LoadDImmediate(V0, 42.0);
__ fcvtzdsx(R0, V0);
__ fcvtzds(R0, V0);
__ ret();
}
ASSEMBLER_TEST_RUN(Fcvtzdsx, test) {
ASSEMBLER_TEST_RUN(Fcvtzds, test) {
typedef int64_t (*Int64Return)() DART_UNUSED;
EXPECT_EQ(42, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
}
ASSEMBLER_TEST_GENERATE(Fcvtzdsw, assembler) {
__ LoadDImmediate(V0, 42.0);
__ fcvtzdsw(R0, V0);
__ ret();
}
ASSEMBLER_TEST_RUN(Fcvtzdsw, test) {
typedef int64_t (*Int64Return)() DART_UNUSED;
EXPECT_EQ(42, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
}
ASSEMBLER_TEST_GENERATE(Fcvtzdsx_overflow, assembler) {
__ LoadDImmediate(V0, 1e20);
__ fcvtzdsx(R0, V0);
__ ret();
}
ASSEMBLER_TEST_RUN(Fcvtzdsx_overflow, test) {
typedef int64_t (*Int64Return)() DART_UNUSED;
EXPECT_EQ(kMaxInt64, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
}
ASSEMBLER_TEST_GENERATE(Fcvtzdsx_overflow_negative, assembler) {
__ LoadDImmediate(V0, -1e20);
__ fcvtzdsx(R0, V0);
__ ret();
}
ASSEMBLER_TEST_RUN(Fcvtzdsx_overflow_negative, test) {
typedef int64_t (*Int64Return)() DART_UNUSED;
EXPECT_EQ(kMinInt64, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
}
ASSEMBLER_TEST_GENERATE(Fcvtzdsw_overflow, assembler) {
__ LoadDImmediate(V0, 1e10);
__ fcvtzdsw(R0, V0);
__ ret();
}
ASSEMBLER_TEST_RUN(Fcvtzdsw_overflow, test) {
typedef int64_t (*Int64Return)() DART_UNUSED;
EXPECT_EQ(kMaxInt32, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
}
ASSEMBLER_TEST_GENERATE(Fcvtzdsw_overflow_negative, assembler) {
__ LoadDImmediate(V0, -1e10);
__ fcvtzdsw(R0, V0);
__ sxtw(R0, R0);
__ ret();
}
ASSEMBLER_TEST_RUN(Fcvtzdsw_overflow_negative, test) {
typedef int64_t (*Int64Return)() DART_UNUSED;
EXPECT_EQ(kMinInt32, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
}
ASSEMBLER_TEST_GENERATE(Scvtfdx, assembler) {
__ LoadImmediate(R0, 42);
__ scvtfdx(V0, R0);

View file

@ -521,10 +521,7 @@ class Assembler : public ValueObject {
return CompareImmediate(reg, Immediate(immediate));
}
void testl(Register reg, const Immediate& imm) {
Immediate imm2(imm.value() & 0xffffffffll);
testq(reg, imm2);
}
void testl(Register reg, const Immediate& imm) { testq(reg, imm); }
void testb(const Address& address, const Immediate& imm);
void testq(Register reg, const Immediate& imm);
@ -713,11 +710,6 @@ class Assembler : public ValueObject {
kValueCanBeSmi,
};
enum CanBeHeapPointer {
kValueIsNotHeapPointer,
kValueCanBeHeapPointer,
};
// Destroys value.
void StoreIntoObject(Register object, // Object we are storing into.
const Address& dest, // Where we are storing into.
@ -948,26 +940,6 @@ class Assembler : public ValueObject {
Register array,
Register index);
void AssertSmiInRange(
Register object,
CanBeHeapPointer can_be_heap_pointer = kValueIsNotHeapPointer) {
#if defined(DEBUG)
Register tmp = object == TMP ? TMP2 : TMP;
Label ok;
if (can_be_heap_pointer == kValueCanBeHeapPointer) {
testl(object, Immediate(kSmiTagMask));
ASSERT(kSmiTag == 0);
j(ZERO, &ok);
}
movsxd(tmp, object);
cmpq(tmp, object);
j(EQUAL, &ok);
Stop("Smi out of range");
Bind(&ok);
#endif
}
static Address VMTagAddress() {
return Address(THR, Thread::vm_tag_offset());
}

View file

@ -1585,10 +1585,10 @@ bool BinaryIntegerOpInstr::RightIsPowerOfTwoConstant() const {
return Utils::IsPowerOfTwo(Utils::Abs(int_value));
}
static intptr_t SignificantRepresentationBits(Representation r) {
static intptr_t RepresentationBits(Representation r) {
switch (r) {
case kTagged:
return 31;
return kBitsPerWord - 1;
case kUnboxedInt32:
case kUnboxedUint32:
return 32;
@ -1602,7 +1602,7 @@ static intptr_t SignificantRepresentationBits(Representation r) {
static int64_t RepresentationMask(Representation r) {
return static_cast<int64_t>(static_cast<uint64_t>(-1) >>
(64 - SignificantRepresentationBits(r)));
(64 - RepresentationBits(r)));
}
static bool ToIntegerConstant(Value* value, int64_t* result) {
@ -2163,8 +2163,7 @@ Definition* BinaryIntegerOpInstr::Canonicalize(FlowGraph* flow_graph) {
break;
case Token::kSHL: {
const intptr_t kMaxShift =
SignificantRepresentationBits(representation()) - 1;
const intptr_t kMaxShift = RepresentationBits(representation()) - 1;
if (rhs == 0) {
return left()->definition();
} else if ((rhs < 0) || (rhs >= kMaxShift)) {

View file

@ -997,13 +997,9 @@ CompileType LoadIndexedInstr::ComputeType() const {
case kTwoByteStringCid:
case kExternalOneByteStringCid:
case kExternalTwoByteStringCid:
return CompileType::FromCid(kSmiCid);
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
// TODO(erikcorry): Perhaps this can return a faster type. See
// https://github.com/dart-lang/sdk/issues/32582
return CompileType::Int();
return CompileType::FromCid(kSmiCid);
default:
UNIMPLEMENTED();
@ -1277,6 +1273,7 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
default:
UNREACHABLE();
}
__ SmiTag(result);
break;
case kTwoByteStringCid:
case kExternalTwoByteStringCid:
@ -1290,15 +1287,12 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
default:
UNREACHABLE();
}
__ SmiTag(result);
break;
default:
UNREACHABLE();
break;
}
if (representation_ == kTagged) {
ASSERT(can_pack_into_smi());
__ SmiTag(result);
}
}
Representation StoreIndexedInstr::RequiredInputRepresentation(
@ -2776,27 +2770,18 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
if (locs.in(1).IsConstant()) {
const Object& constant = locs.in(1).constant();
ASSERT(constant.IsSmi());
// Immediate shift operation takes 5 bits for the count.
const intptr_t kCountLimit = 0x1F;
// These should be around the same size.
COMPILE_ASSERT(kCountLimit + 1 == kSmiBits + 2);
// Immediate shift operation takes 6 bits for the count.
const intptr_t kCountLimit = 0x3F;
const intptr_t value = Smi::Cast(constant).Value();
ASSERT((0 < value) && (value < kCountLimit));
if (shift_left->can_overflow()) {
// Check for overflow (preserve left).
__ LslImmediate(TMP, left, value, kWord);
__ cmpw(left, Operand(TMP, ASR, value));
__ LslImmediate(TMP, left, value);
__ cmp(left, Operand(TMP, ASR, value));
__ b(deopt, NE); // Overflow.
}
// Shift for result now we know there is no overflow. This writes the full
// 64 bits of the output register, but unless we are in truncating mode the
// top bits will just be sign extension bits.
// Shift for result now we know there is no overflow.
__ LslImmediate(result, left, value);
if (shift_left->is_truncating()) {
// This preserves the invariant that Smis only use the low 32 bits of the
// register, the high bits being sign extension bits.
__ sxtw(result, result);
}
return;
}
@ -2804,33 +2789,28 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
const Register right = locs.in(1).reg();
Range* right_range = shift_left->right_range();
if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
// TODO(srdjan): Implement code below for is_truncating().
// If left is constant, we know the maximal allowed size for right.
const Object& obj = shift_left->left()->BoundConstant();
// Even though we have a non-Smi constant on the left, we might still emit
// a Smi op here. In that case the Smi check above will have deopted, so
// we can't reach this point. Emit a breakpoint to be sure.
if (!obj.IsSmi()) {
__ Breakpoint();
return;
if (obj.IsSmi()) {
const intptr_t left_int = Smi::Cast(obj).Value();
if (left_int == 0) {
__ CompareRegisters(right, ZR);
__ b(deopt, MI);
__ mov(result, ZR);
return;
}
const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
const bool right_needs_check =
!RangeUtils::IsWithin(right_range, 0, max_right - 1);
if (right_needs_check) {
__ CompareImmediate(right,
reinterpret_cast<int64_t>(Smi::New(max_right)));
__ b(deopt, CS);
}
__ SmiUntag(TMP, right);
__ lslv(result, left, TMP);
}
const intptr_t left_int = Smi::Cast(obj).Value();
if (left_int == 0) {
__ CompareRegisters(right, ZR);
__ b(deopt, MI);
__ mov(result, ZR);
return;
}
const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
const bool right_needs_check =
!RangeUtils::IsWithin(right_range, 0, max_right - 1);
if (right_needs_check) {
__ CompareImmediate(right,
reinterpret_cast<int64_t>(Smi::New(max_right)));
__ b(deopt, CS);
}
__ SmiUntag(TMP, right);
__ lslv(result, left, TMP);
ASSERT(!shift_left->is_truncating());
return;
}
@ -2854,11 +2834,7 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
__ SmiUntag(TMP, right);
__ lslv(result, left, TMP);
}
if (shift_left->is_truncating()) {
__ sxtw(result, result);
}
} else {
// If we can overflow.
if (right_needs_check) {
ASSERT(shift_left->CanDeoptimize());
__ CompareImmediate(right,
@ -2866,16 +2842,15 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
__ b(deopt, CS);
}
// Left is not a constant.
// Check if count is too large for handling it inlined.
// Check if count too large for handling it inlined.
__ SmiUntag(TMP, right);
// Overflow test (preserve left, right, and TMP);
const Register temp = locs.temp(0).reg();
__ lslvw(temp, left, TMP);
__ asrvw(TMP2, temp, TMP);
__ cmpw(left, Operand(TMP2));
__ lslv(temp, left, TMP);
__ asrv(TMP2, temp, TMP);
__ CompareRegisters(left, TMP2);
__ b(deopt, NE); // Overflow.
// Shift for result now we know there is no overflow. This is a 64 bit
// operation, so no sign extension is needed.
// Shift for result now we know there is no overflow.
__ lslv(result, left, TMP);
}
}
@ -2956,20 +2931,18 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
switch (op_kind()) {
case Token::kADD:
__ addsw(result, left, Operand(right));
__ adds(result, left, Operand(right));
__ b(slow_path->entry_label(), VS);
__ sxtw(result, result);
break;
case Token::kSUB:
__ subsw(result, left, Operand(right));
__ subs(result, left, Operand(right));
__ b(slow_path->entry_label(), VS);
__ sxtw(result, result);
break;
case Token::kMUL:
__ SmiUntag(TMP, left);
__ smull(result, TMP, right);
__ AsrImmediate(TMP, result, 31);
// TMP: result bits 31-63
__ mul(result, TMP, right);
__ smulh(TMP, TMP, right);
// TMP: result bits 64..127.
__ cmp(TMP, Operand(result, ASR, 63));
__ b(slow_path->entry_label(), NE);
break;
@ -2994,8 +2967,8 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ SmiUntag(TMP, right);
__ lslv(result, left, TMP);
__ asrvw(TMP2, result, TMP);
__ cmp(left, Operand(TMP2, SXTW, 0));
__ asrv(TMP2, result, TMP);
__ CompareRegisters(left, TMP2);
__ b(slow_path->entry_label(), NE); // Overflow.
break;
case Token::kSHR:
@ -3005,8 +2978,6 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
reinterpret_cast<int64_t>(Smi::New(Smi::kBits)));
__ b(slow_path->entry_label(), CS);
__ AssertSmiInRange(left);
__ AssertSmiInRange(right);
__ SmiUntag(result, right);
__ SmiUntag(TMP, left);
__ asrv(result, TMP, result);
@ -3016,7 +2987,6 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNIMPLEMENTED();
}
__ Bind(slow_path->exit_label());
__ AssertSmiInRange(result, Assembler::kValueCanBeHeapPointer);
}
class CheckedSmiComparisonSlowPath
@ -3207,28 +3177,20 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case Token::kADD: {
if (deopt == NULL) {
__ AddImmediate(result, left, imm);
if (is_truncating()) {
__ sxtw(result, result);
}
} else {
__ AddImmediateSetFlags(result, left, imm, kWord);
__ AddImmediateSetFlags(result, left, imm);
__ b(deopt, VS);
__ sxtw(result, result);
}
break;
}
case Token::kSUB: {
if (deopt == NULL) {
__ AddImmediate(result, left, -imm);
if (is_truncating()) {
__ sxtw(result, result);
}
} else {
// Negating imm and using AddImmediateSetFlags would not detect the
// overflow when imm == kMinInt32.
__ SubImmediateSetFlags(result, left, imm, kWord);
// overflow when imm == kMinInt64.
__ SubImmediateSetFlags(result, left, imm);
__ b(deopt, VS);
__ sxtw(result, result);
}
break;
}
@ -3236,14 +3198,12 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Keep left value tagged and untag right value.
const intptr_t value = Smi::Cast(constant).Value();
__ LoadImmediate(TMP, value);
__ smull(result, left, TMP);
__ mul(result, left, TMP);
if (deopt != NULL) {
__ AsrImmediate(TMP, result, 31);
// TMP: result bits 31..63.
__ smulh(TMP, left, TMP);
// TMP: result bits 64..127.
__ cmp(TMP, Operand(result, ASR, 63));
__ b(deopt, NE);
} else if (is_truncating()) {
__ sxtw(result, result);
}
break;
}
@ -3254,10 +3214,9 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t shift_count =
Utils::ShiftForPowerOfTwo(Utils::Abs(value)) + kSmiTagSize;
ASSERT(kSmiTagSize == 1);
__ AsrImmediate(TMP, left, 31); // All 1s or all 0s.
__ AsrImmediate(TMP, left, 63);
ASSERT(shift_count > 1); // 1, -1 case handled above.
const Register temp = TMP2;
// Adjust so that we round to 0 instead of round down.
__ add(temp, left, Operand(TMP, LSR, 64 - shift_count));
ASSERT(shift_count > 0);
__ AsrImmediate(result, temp, shift_count);
@ -3292,7 +3251,6 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
break;
}
__ AssertSmiInRange(result);
return;
}
@ -3301,26 +3259,18 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case Token::kADD: {
if (deopt == NULL) {
__ add(result, left, Operand(right));
if (is_truncating()) {
__ sxtw(result, result);
}
} else {
__ addsw(result, left, Operand(right));
__ adds(result, left, Operand(right));
__ b(deopt, VS);
__ sxtw(result, result);
}
break;
}
case Token::kSUB: {
if (deopt == NULL) {
__ sub(result, left, Operand(right));
if (is_truncating()) {
__ sxtw(result, result);
}
} else {
__ subsw(result, left, Operand(right));
__ subs(result, left, Operand(right));
__ b(deopt, VS);
__ sxtw(result, result);
}
break;
}
@ -3328,13 +3278,10 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ SmiUntag(TMP, left);
if (deopt == NULL) {
__ mul(result, TMP, right);
if (is_truncating()) {
__ sxtw(result, result);
}
} else {
__ smull(result, TMP, right);
__ AsrImmediate(TMP, result, 31);
// TMP: result bits 31..63.
__ mul(result, TMP, right);
__ smulh(TMP, TMP, right);
// TMP: result bits 64..127.
__ cmp(TMP, Operand(result, ASR, 63));
__ b(deopt, NE);
}
@ -3369,7 +3316,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Check the corner case of dividing the 'MIN_SMI' with -1, in which
// case we cannot tag the result.
__ CompareImmediate(result, 0x40000000LL);
__ CompareImmediate(result, 0x4000000000000000LL);
__ b(deopt, EQ);
__ SmiTag(result);
break;
@ -3414,10 +3361,8 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ b(deopt, LT);
}
__ SmiUntag(TMP, right);
// The asrv operation masks the count to 6 bits, but any shift between 31
// and 63 gives the same result because 32 bit Smis are stored sign
// extended in the registers.
const intptr_t kCountLimit = 0x1F;
// sarl operation masks the count to 6 bits.
const intptr_t kCountLimit = 0x3F;
if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
__ LoadImmediate(TMP2, kCountLimit);
__ CompareRegisters(TMP, TMP2);
@ -3446,7 +3391,6 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
break;
}
__ AssertSmiInRange(result);
}
LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone,
@ -3599,17 +3543,10 @@ LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
ASSERT((from_representation() == kUnboxedInt32) ||
(from_representation() == kUnboxedUint32));
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps,
ValueFitsSmi() ? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath);
// Get two distinct registers for input and output, plus a temp
// register for testing for overflow and allocating a Mint.
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
if (!ValueFitsSmi()) {
summary->set_temp(0, Location::RequiresRegister());
}
summary->set_out(0, Location::RequiresRegister());
return summary;
}
@ -3618,51 +3555,16 @@ void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register out = locs()->out(0).reg();
ASSERT(value != out);
Label done;
ASSERT(kSmiTagSize == 1);
// TODO(vegorov) implement and use UBFM/SBFM for this.
__ LslImmediate(out, value, 32);
if (from_representation() == kUnboxedInt32) {
ASSERT(kSmiTag == 0);
// Signed Bitfield Insert in Zero instruction extracts the 31 significant
// bits from a Smi.
__ sbfiz(out, value, kSmiTagSize, 32 - kSmiTagSize);
if (ValueFitsSmi()) {
return;
}
Register temp = locs()->temp(0).reg();
__ cmp(out, Operand(value, LSL, 1));
__ b(&done, EQ); // Jump if the sbfiz instruction didn't lose info.
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
temp);
__ sxtw(temp, value);
__ AsrImmediate(out, out, 32 - kSmiTagSize);
} else {
ASSERT(from_representation() == kUnboxedUint32);
ASSERT(kSmiTag == 0);
// A 32 bit positive Smi has one tag bit and one unused sign bit,
// leaving only 30 bits for the payload.
__ ubfiz(out, value, kSmiTagSize, kSmiBits);
if (ValueFitsSmi()) {
return;
}
Register temp = locs()->temp(0).reg();
__ TestImmediate(value, 0xc0000000);
__ b(&done, EQ); // Jump if both bits are zero.
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
temp);
__ ubfiz(temp, value, 0, 32); // Zero extend word.
__ LsrImmediate(out, out, 32 - kSmiTagSize);
}
__ StoreToOffset(locs()->temp(0).reg(), out,
Mint::value_offset() - kHeapObjectTag);
#if defined(DEBUG)
Label skip_smi_test;
__ b(&skip_smi_test);
__ Bind(&done);
__ AssertSmiInRange(out, Assembler::kValueCanBeHeapPointer);
__ Bind(&skip_smi_test);
#else
__ Bind(&done);
#endif
}
LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
@ -3690,8 +3592,7 @@ void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
ASSERT(kSmiTag == 0);
__ LslImmediate(out, in, kSmiTagSize, kWord);
__ sxtw(out, out);
__ LslImmediate(out, in, kSmiTagSize);
Label done;
__ cmp(in, Operand(out, ASR, kSmiTagSize));
__ b(&done, EQ);
@ -4432,9 +4333,8 @@ void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
switch (op_kind()) {
case Token::kNEGATE: {
Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
__ subsw(result, ZR, Operand(value));
__ subs(result, ZR, Operand(value));
__ b(deopt, VS);
__ sxtw(result, result);
break;
}
case Token::kBIT_NOT:
@ -4445,7 +4345,6 @@ void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
default:
UNREACHABLE();
}
__ AssertSmiInRange(result);
}
LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
@ -4497,7 +4396,7 @@ void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register value = locs()->in(0).reg();
const VRegister result = locs()->out(0).fpu_reg();
__ SmiUntag(TMP, value);
__ scvtfdw(result, TMP);
__ scvtfdx(result, TMP);
}
LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
@ -4541,13 +4440,12 @@ void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ fcmpd(VTMP, VTMP);
__ b(&do_call, VS);
__ fcvtzdsx(result, VTMP);
__ fcvtzds(result, VTMP);
// Overflow is signaled with minint.
// Check for overflow and that it fits into Smi.
__ AsrImmediate(TMP, result, 30);
__ cmp(TMP, Operand(result, ASR, 63));
__ b(&do_call, NE);
__ CompareImmediate(result, 0xC000000000000000);
__ b(&do_call, MI);
__ SmiTag(result);
__ b(&done);
__ Bind(&do_call);
@ -4564,7 +4462,6 @@ void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
args_info, locs(), ICData::Handle(),
ICData::kStatic);
__ Bind(&done);
__ AssertSmiInRange(result, Assembler::kValueCanBeHeapPointer);
}
LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,
@ -4588,13 +4485,11 @@ void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ fcmpd(value, value);
__ b(deopt, VS);
__ fcvtzdsx(result, value);
__ fcvtzds(result, value);
// Check for overflow and that it fits into Smi.
__ AsrImmediate(TMP, result, 30);
__ cmp(TMP, Operand(result, ASR, 63));
__ b(deopt, NE);
__ CompareImmediate(result, 0xC000000000000000);
__ b(deopt, MI);
__ SmiTag(result);
__ AssertSmiInRange(result);
}
LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
@ -4863,7 +4758,7 @@ void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Check the corner case of dividing the 'MIN_SMI' with -1, in which
// case we cannot tag the result.
__ CompareImmediate(result_div, 0x40000000);
__ CompareImmediate(result_div, 0x4000000000000000);
__ b(deopt, EQ);
// result_mod <- left - right * result_div.
__ msub(result_mod, TMP, result_div, result_mod);
@ -5392,10 +5287,6 @@ void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register shifter = locs()->in(1).reg();
// TODO(johnmccutchan): Use range information to avoid these checks.
// Assert this is a legitimate Smi in debug mode, but does not assert
// anything about the range relative to the bit width.
__ AssertSmiInRange(shifter);
__ SmiUntag(TMP, shifter);
__ CompareImmediate(TMP, 0);
// If shift value is < 0, deoptimize.
@ -5415,7 +5306,7 @@ void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ CompareImmediate(TMP, kShifterLimit);
// If shift value is > 31, return zero.
__ csel(out, ZR, out, GT);
__ csel(out, out, ZR, GT);
}
LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,

View file

@ -1074,13 +1074,11 @@ CompileType LoadIndexedInstr::ComputeType() const {
case kTwoByteStringCid:
case kExternalOneByteStringCid:
case kExternalTwoByteStringCid:
return CompileType::FromCid(kSmiCid);
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
return CompileType::FromCid(kSmiCid);
case kTypedDataInt64ArrayCid:
// TODO(erikcorry): Perhaps this can return a faster type. See
// https://github.com/dart-lang/sdk/issues/32582
return CompileType::Int();
default:
@ -1263,24 +1261,16 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const bool might_box = (representation() == kTagged) && !can_pack_into_smi();
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = might_box ? 2 : 0;
LocationSummary* summary = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps,
might_box ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
// The smi index is either untagged (element size == 1), or it is left smi
// tagged (for all element sizes > 1).
summary->set_in(1, index_scale() == 1 ? Location::WritableRegister()
: Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
if (might_box) {
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
}
return summary;
}
@ -1312,6 +1302,7 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
default:
UNREACHABLE();
}
__ SmiTag(result);
break;
case kTwoByteStringCid:
case kExternalTwoByteStringCid:
@ -1325,34 +1316,12 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
default:
UNREACHABLE();
}
__ SmiTag(result);
break;
default:
UNREACHABLE();
break;
}
if (representation_ == kTagged) {
if (can_pack_into_smi()) {
__ SmiTag(result);
} else {
// If the value cannot fit in a smi then allocate a mint box for it.
Register temp = locs()->temp(0).reg();
Register temp2 = locs()->temp(1).reg();
// Temp register needs to be manually preserved on allocation slow-path.
locs()->live_registers()->Add(locs()->temp(0), kUnboxedInt32);
ASSERT(temp != result);
__ MoveRegister(temp, result);
__ SmiTag(result);
Label done;
__ TestImmediate(temp, Immediate(0xc0000000ll));
__ j(ZERO, &done);
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(),
result, temp2);
__ movq(FieldAddress(result, Mint::value_offset()), temp);
__ Bind(&done);
}
}
}
Representation StoreIndexedInstr::RequiredInputRepresentation(
@ -2742,32 +2711,27 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
if (locs.in(1).IsConstant()) {
const Object& constant = locs.in(1).constant();
ASSERT(constant.IsSmi());
// shll operation masks the count to 5 bits.
const intptr_t kCountLimit = 0x1F;
// shlq operation masks the count to 6 bits.
const intptr_t kCountLimit = 0x3F;
const intptr_t value = Smi::Cast(constant).Value();
ASSERT((0 < value) && (value < kCountLimit));
if (shift_left->can_overflow()) {
if (value == 1) {
// Use overflow flag.
__ shll(left, Immediate(1));
__ shlq(left, Immediate(1));
__ j(OVERFLOW, deopt);
__ movsxd(left, left);
return;
}
// Check for overflow.
Register temp = locs.temp(0).reg();
__ movq(temp, left);
__ shll(left, Immediate(value));
__ sarl(left, Immediate(value));
__ movsxd(left, left);
__ shlq(left, Immediate(value));
__ sarq(left, Immediate(value));
__ cmpq(left, temp);
__ j(NOT_EQUAL, deopt); // Overflow.
}
// Shift for result now we know there is no overflow.
__ shlq(left, Immediate(value));
if (shift_left->is_truncating()) {
__ movsxd(left, left);
}
return;
}
@ -2778,32 +2742,23 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
// TODO(srdjan): Implement code below for is_truncating().
// If left is constant, we know the maximal allowed size for right.
const Object& obj = shift_left->left()->BoundConstant();
// Even though we have a non-Smi constant on the left, we might still emit
// a Smi op here. In that case the Smi check above will have deopted, so
// we can't reach this point. Emit a breakpoint to be sure.
if (!obj.IsSmi()) {
__ int3();
return;
}
const intptr_t left_int = Smi::Cast(obj).Value();
if (left_int == 0) {
__ CompareImmediate(right, Immediate(0));
__ j(NEGATIVE, deopt);
return;
}
const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
const bool right_needs_check =
!RangeUtils::IsWithin(right_range, 0, max_right - 1);
if (right_needs_check) {
__ CompareImmediate(
right, Immediate(reinterpret_cast<int64_t>(Smi::New(max_right))));
__ j(ABOVE_EQUAL, deopt);
}
__ AssertSmiInRange(right);
__ SmiUntag(right);
__ shlq(left, right);
if (shift_left->is_truncating()) {
__ movsxd(left, left);
if (obj.IsSmi()) {
const intptr_t left_int = Smi::Cast(obj).Value();
if (left_int == 0) {
__ CompareImmediate(right, Immediate(0));
__ j(NEGATIVE, deopt);
return;
}
const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
const bool right_needs_check =
!RangeUtils::IsWithin(right_range, 0, max_right - 1);
if (right_needs_check) {
__ CompareImmediate(
right, Immediate(reinterpret_cast<int64_t>(Smi::New(max_right))));
__ j(ABOVE_EQUAL, deopt);
}
__ SmiUntag(right);
__ shlq(left, right);
}
return;
}
@ -2827,18 +2782,13 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
__ xorq(left, left);
__ jmp(&done, Assembler::kNearJump);
__ Bind(&is_not_zero);
__ AssertSmiInRange(right);
__ SmiUntag(right);
__ shlq(left, right);
__ Bind(&done);
} else {
__ AssertSmiInRange(right);
__ SmiUntag(right);
__ shlq(left, right);
}
if (shift_left->is_truncating()) {
__ movsxd(left, left);
}
} else {
if (right_needs_check) {
ASSERT(shift_left->CanDeoptimize());
@ -2848,18 +2798,16 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
}
// Left is not a constant.
Register temp = locs.temp(0).reg();
// Check if count is too large for handling it inlined.
__ movl(temp, left);
__ AssertSmiInRange(right);
// Check if count too large for handling it inlined.
__ movq(temp, left);
__ SmiUntag(right);
// Overflow test (preserve temp and right);
__ shll(temp, right);
__ sarl(temp, right);
__ cmpl(temp, left);
__ shlq(left, right);
__ sarq(left, right);
__ cmpq(left, temp);
__ j(NOT_EQUAL, deopt); // Overflow.
// Shift for result now we know there is no overflow.
__ shlq(left, right);
ASSERT(!shift_left->is_truncating());
}
}
@ -2959,23 +2907,19 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
switch (op_kind()) {
case Token::kADD:
__ movq(result, left);
__ addl(result, right);
__ addq(result, right);
__ j(OVERFLOW, slow_path->entry_label());
__ movsxd(result, result);
break;
case Token::kSUB:
__ movq(result, left);
__ subl(result, right);
__ subq(result, right);
__ j(OVERFLOW, slow_path->entry_label());
__ movsxd(result, result);
break;
case Token::kMUL:
__ movq(result, left);
__ AssertSmiInRange(result);
__ SmiUntag(result);
__ imull(result, right);
__ imulq(result, right);
__ j(OVERFLOW, slow_path->entry_label());
__ movsxd(result, result);
break;
case Token::kBIT_OR:
ASSERT(left == result);
@ -2996,15 +2940,13 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ j(ABOVE_EQUAL, slow_path->entry_label());
__ movq(RCX, right);
__ AssertSmiInRange(RCX);
__ SmiUntag(RCX);
__ movq(result, left);
__ shll(result, RCX);
__ shlq(result, RCX);
__ movq(TMP, result);
__ sarl(TMP, RCX);
__ cmpl(TMP, left);
__ sarq(TMP, RCX);
__ cmpq(TMP, left);
__ j(NOT_EQUAL, slow_path->entry_label());
__ movsxd(result, result);
break;
case Token::kSHR: {
Label shift_count_ok;
@ -3013,8 +2955,6 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ cmpq(right, Immediate(Smi::RawValue(Smi::kBits)));
__ j(ABOVE_EQUAL, slow_path->entry_label());
__ AssertSmiInRange(left);
__ AssertSmiInRange(right);
__ movq(RCX, right);
__ SmiUntag(RCX);
__ movq(result, left);
@ -3272,41 +3212,20 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const int64_t imm = reinterpret_cast<int64_t>(constant.raw());
switch (op_kind()) {
case Token::kADD: {
if (deopt != NULL) {
__ AddImmediate(left, Immediate(imm), Assembler::k32Bit);
__ j(OVERFLOW, deopt);
} else {
__ AddImmediate(left, Immediate(imm), Assembler::k64Bit);
}
if (deopt != NULL || is_truncating()) {
__ movsxd(left, left);
}
__ AddImmediate(left, Immediate(imm));
if (deopt != NULL) __ j(OVERFLOW, deopt);
break;
}
case Token::kSUB: {
if (deopt != NULL) {
__ SubImmediate(left, Immediate(imm), Assembler::k32Bit);
__ j(OVERFLOW, deopt);
} else {
__ SubImmediate(left, Immediate(imm), Assembler::k64Bit);
}
if (deopt != NULL || is_truncating()) {
__ movsxd(left, left);
}
__ SubImmediate(left, Immediate(imm));
if (deopt != NULL) __ j(OVERFLOW, deopt);
break;
}
case Token::kMUL: {
// Keep left value tagged and untag right value.
const intptr_t value = Smi::Cast(constant).Value();
if (deopt != NULL) {
__ MulImmediate(left, Immediate(value), Assembler::k32Bit);
__ j(OVERFLOW, deopt);
} else {
__ MulImmediate(left, Immediate(value), Assembler::k64Bit);
}
if (deopt != NULL || is_truncating()) {
__ movsxd(left, left);
}
__ MulImmediate(left, Immediate(value));
if (deopt != NULL) __ j(OVERFLOW, deopt);
break;
}
case Token::kTRUNCDIV: {
@ -3318,9 +3237,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(kSmiTagSize == 1);
Register temp = locs()->temp(0).reg();
__ movq(temp, left);
// Since Smis are sign extended this is enough shift to put all-1s or
// all-0s in the temp register.
__ sarq(temp, Immediate(31));
__ sarq(temp, Immediate(63));
ASSERT(shift_count > 1); // 1, -1 case handled above.
__ shrq(temp, Immediate(64 - shift_count));
__ addq(left, temp);
@ -3349,10 +3266,8 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
case Token::kSHR: {
// The sarq operation masks the count to 6 bits, but any shift between
// 31 and 63 gives the same result because 32 bit Smis are stored sign
// extended in the registers.
const intptr_t kCountLimit = 0x1F;
// sarq operation masks the count to 6 bits.
const intptr_t kCountLimit = 0x3F;
const intptr_t value = Smi::Cast(constant).Value();
__ sarq(left,
Immediate(Utils::Minimum(value + kSmiTagSize, kCountLimit)));
@ -3364,7 +3279,6 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
break;
}
__ AssertSmiInRange(left);
return;
} // locs()->in(1).IsConstant().
@ -3372,40 +3286,19 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Address& right = locs()->in(1).ToStackSlotAddress();
switch (op_kind()) {
case Token::kADD: {
if (deopt != NULL) {
__ addl(left, right);
__ j(OVERFLOW, deopt);
} else {
__ addq(left, right);
}
if (deopt != NULL || is_truncating()) {
__ movsxd(left, left);
}
__ addq(left, right);
if (deopt != NULL) __ j(OVERFLOW, deopt);
break;
}
case Token::kSUB: {
if (deopt != NULL) {
__ subl(left, right);
__ j(OVERFLOW, deopt);
} else {
__ subq(left, right);
}
if (deopt != NULL || is_truncating()) {
__ movsxd(left, left);
}
__ subq(left, right);
if (deopt != NULL) __ j(OVERFLOW, deopt);
break;
}
case Token::kMUL: {
__ SmiUntag(left);
if (deopt != NULL) {
__ imull(left, right);
__ j(OVERFLOW, deopt);
} else {
__ imulq(left, right);
}
if (deopt != NULL || is_truncating()) {
__ movsxd(left, left);
}
__ imulq(left, right);
if (deopt != NULL) __ j(OVERFLOW, deopt);
break;
}
case Token::kBIT_AND: {
@ -3434,40 +3327,19 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register right = locs()->in(1).reg();
switch (op_kind()) {
case Token::kADD: {
if (deopt != NULL) {
__ addl(left, right);
__ j(OVERFLOW, deopt);
} else {
__ addq(left, right);
}
if (deopt != NULL || is_truncating()) {
__ movsxd(left, left);
}
__ addq(left, right);
if (deopt != NULL) __ j(OVERFLOW, deopt);
break;
}
case Token::kSUB: {
if (deopt != NULL) {
__ subl(left, right);
__ j(OVERFLOW, deopt);
} else {
__ subq(left, right);
}
if (deopt != NULL || is_truncating()) {
__ movsxd(left, left);
}
__ subq(left, right);
if (deopt != NULL) __ j(OVERFLOW, deopt);
break;
}
case Token::kMUL: {
__ SmiUntag(left);
if (deopt != NULL) {
__ imull(left, right);
__ j(OVERFLOW, deopt);
} else {
__ imulq(left, right);
}
if (deopt != NULL || is_truncating()) {
__ movsxd(left, left);
}
__ imulq(left, right);
if (deopt != NULL) __ j(OVERFLOW, deopt);
break;
}
case Token::kBIT_AND: {
@ -3486,6 +3358,8 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
break;
}
case Token::kTRUNCDIV: {
Label not_32bit, done;
Register temp = locs()->temp(0).reg();
ASSERT(left == RAX);
ASSERT((right != RDX) && (right != RAX));
@ -3496,20 +3370,43 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ testq(right, right);
__ j(ZERO, deopt);
}
// Check if both operands fit into 32bits as idiv with 64bit operands
// requires twice as many cycles and has much higher latency.
// We are checking this before untagging them to avoid corner case
// dividing INT_MAX by -1 that raises exception because quotient is
// too large for 32bit register.
__ movsxd(temp, left);
__ cmpq(temp, left);
__ j(NOT_EQUAL, &not_32bit);
__ movsxd(temp, right);
__ cmpq(temp, right);
__ j(NOT_EQUAL, &not_32bit);
// Both operands are 31bit smis. Divide using 32bit idiv.
__ SmiUntag(left);
__ SmiUntag(right);
__ cdq();
__ idivl(right);
__ movsxd(result, result);
__ jmp(&done);
// Divide using 64bit idiv.
__ Bind(&not_32bit);
__ SmiUntag(left);
__ SmiUntag(right);
__ cqo(); // Sign extend RAX -> RDX:RAX.
__ idivq(right); // RAX: quotient, RDX: remainder.
// Check the corner case of dividing the 'MIN_SMI' with -1, in which
// case we cannot tag the result.
__ cmpl(result, Immediate(0x40000000));
__ CompareImmediate(result, Immediate(0x4000000000000000));
__ j(EQUAL, deopt);
__ movsxd(result, result);
__ Bind(&done);
__ SmiTag(result);
break;
}
case Token::kMOD: {
Label not_32bit, div_done;
Register temp = locs()->temp(0).reg();
ASSERT(left == RDX);
ASSERT((right != RDX) && (right != RAX));
@ -3520,6 +3417,17 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ testq(right, right);
__ j(ZERO, deopt);
}
// Check if both operands fit into 32bits as idiv with 64bit operands
// requires twice as many cycles and has much higher latency.
// We are checking this before untagging them to avoid corner case
// dividing INT_MAX by -1 that raises exception because quotient is
// too large for 32bit register.
__ movsxd(temp, left);
__ cmpq(temp, left);
__ j(NOT_EQUAL, &not_32bit);
__ movsxd(temp, right);
__ cmpq(temp, right);
__ j(NOT_EQUAL, &not_32bit);
// Both operands are 31bit smis. Divide using 32bit idiv.
__ SmiUntag(left);
__ SmiUntag(right);
@ -3527,7 +3435,16 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ cdq();
__ idivl(right);
__ movsxd(result, result);
__ jmp(&div_done);
// Divide using 64bit idiv.
__ Bind(&not_32bit);
__ SmiUntag(left);
__ SmiUntag(right);
__ movq(RAX, RDX);
__ cqo(); // Sign extend RAX -> RDX:RAX.
__ idivq(right); // RAX: quotient, RDX: remainder.
__ Bind(&div_done);
// res = left % right;
// if (res < 0) {
// if (right < 0) {
@ -3565,10 +3482,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ j(LESS, deopt);
}
__ SmiUntag(right);
// The sarq operation masks the count to 6 bits, but any shift between 31
// and 63 gives the same result because 32 bit Smis are stored sign
// extended in the registers. We check for 63 in order to take the branch
// more predictably.
// sarq operation masks the count to 6 bits.
const intptr_t kCountLimit = 0x3F;
if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
__ CompareImmediate(right, Immediate(kCountLimit));
@ -3781,52 +3695,41 @@ void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
GetDeoptId(), ICData::kDeoptUnboxInteger)
: NULL;
ASSERT(value == locs()->out(0).reg());
Label done_and_no_need_to_check_range;
ASSERT(locs()->out(0).reg() == value);
if (value_cid == kSmiCid) {
__ AssertSmiInRange(value);
__ SmiUntag(value);
return;
} else if (value_cid == kMintCid) {
__ movq(value, FieldAddress(value, Mint::value_offset()));
} else if (!CanDeoptimize()) {
// Type information is not conclusive, but range analysis found
// the value to be in int64 range. Therefore it must be a smi
// or mint value.
ASSERT(is_truncating());
Label done;
__ SmiUntag(value);
__ j(NOT_CARRY, &done, Assembler::kNearJump);
// Multiply by two in addressing mode because we erroneously
// untagged a pointer by dividing it by two.
Address value_field(value, TIMES_2, Mint::value_offset());
if (is_truncating()) {
__ movl(value, value_field);
__ movsxd(value, value);
} else {
__ movq(value, value_field);
}
__ movq(value, Address(value, TIMES_2, Mint::value_offset()));
__ Bind(&done);
return;
} else {
__ SmiUntagOrCheckClass(value, kMintCid, &done_and_no_need_to_check_range);
Label done;
// Optimistically untag value.
__ SmiUntagOrCheckClass(value, kMintCid, &done);
__ j(NOT_EQUAL, deopt);
// Multiply by two in addressing mode because we erroneously
// untagged a pointer by dividing it by two.
// Undo untagging by multiplying value with 2.
__ movq(value, Address(value, TIMES_2, Mint::value_offset()));
__ Bind(&done);
}
// We get here for the Mint cases, which might be out of range for an
// unboxed int32 output.
// TODO(vegorov): Truncating unboxing leaves garbage in the higher word.
// Is this the best semantics?
// TODO(vegorov): as it is implemented right now truncating unboxing would
// leave "garbage" in the higher word.
if (!is_truncating() && (deopt != NULL)) {
ASSERT(representation() == kUnboxedInt32);
const Register temp = locs()->temp(0).reg();
Register temp = locs()->temp(0).reg();
__ movsxd(temp, value);
__ cmpq(temp, value);
__ j(NOT_EQUAL, deopt);
}
__ Bind(&done_and_no_need_to_check_range);
}
LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
@ -3834,61 +3737,27 @@ LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
ASSERT((from_representation() == kUnboxedInt32) ||
(from_representation() == kUnboxedUint32));
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps,
ValueFitsSmi() ? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath);
const bool needs_writable_input =
ValueFitsSmi() || (from_representation() == kUnboxedUint32);
summary->set_in(0, needs_writable_input ? Location::RequiresRegister()
: Location::WritableRegister());
if (!ValueFitsSmi()) {
summary->set_temp(0, Location::RequiresRegister());
}
summary->set_out(0, ValueFitsSmi() ? Location::SameAsFirstInput()
: Location::RequiresRegister());
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register value = locs()->in(0).reg();
const Register out = locs()->out(0).reg();
Label done;
ASSERT(value != out);
ASSERT(kSmiTagSize == 1);
if (from_representation() == kUnboxedInt32) {
__ MoveRegister(out, value);
ASSERT(kSmiTagMask == 1 && kSmiTag == 0);
__ addl(out, out);
__ movsxd(out, out); // Does not affect flags.
__ movsxd(out, value);
} else {
// Unsigned.
ASSERT(from_representation() == kUnboxedUint32);
__ movl(out, value);
__ SmiTag(out);
}
if (!ValueFitsSmi()) {
if (from_representation() == kUnboxedInt32) {
__ j(NO_OVERFLOW, &done);
} else {
ASSERT(value != out);
__ TestImmediate(value, Immediate(0xc0000000ll));
__ j(ZERO, &done);
}
// Allocate a mint.
// Value input is a writable register and we have to inform the compiler of
// the type so it can be preserved untagged on the slow path
locs()->live_registers()->Add(locs()->in(0), kUnboxedInt32);
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
locs()->temp(0).reg());
if (from_representation() == kUnboxedInt32) {
__ movsxd(value, value);
} else {
__ movl(value, value);
}
__ movq(FieldAddress(out, Mint::value_offset()), value);
__ Bind(&done);
}
__ SmiTag(out);
}
LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
@ -3910,20 +3779,15 @@ LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register out = locs()->out(0).reg();
const Register value = locs()->in(0).reg();
__ leaq(out, Address(value, value, TIMES_1, 0));
__ MoveRegister(out, value);
__ SmiTag(out);
if (!ValueFitsSmi()) {
const Register temp = locs()->temp(0).reg();
Label done;
__ movq(temp, value);
__ sarq(temp, Immediate(30));
__ addq(temp, Immediate(1));
__ cmpq(temp, Immediate(2));
__ j(BELOW, &done);
__ j(NO_OVERFLOW, &done);
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
temp);
__ movq(FieldAddress(out, Mint::value_offset()), value);
__ Bind(&done);
}
}
@ -4485,9 +4349,8 @@ void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
switch (op_kind()) {
case Token::kNEGATE: {
Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
__ cmpq(value, Immediate(-0x80000000ll));
__ j(EQUAL, deopt);
__ negq(value);
__ j(OVERFLOW, deopt);
break;
}
case Token::kBIT_NOT:
@ -4636,7 +4499,6 @@ LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone,
void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
FpuRegister result = locs()->out(0).fpu_reg();
__ AssertSmiInRange(value);
__ SmiUntag(value);
__ cvtsi2sdq(result, value);
}
@ -4666,15 +4528,14 @@ void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(result != value_obj);
ASSERT(result != temp);
__ movsd(value_double, FieldAddress(value_obj, Double::value_offset()));
__ cvttsd2sil(result, value_double);
__ cvttsd2siq(result, value_double);
// Overflow is signalled with minint.
Label do_call, done;
// Check for overflow and that it fits into Smi.
__ movl(temp, result);
__ shll(temp, Immediate(1));
__ movq(temp, result);
__ shlq(temp, Immediate(1));
__ j(OVERFLOW, &do_call, Assembler::kNearJump);
ASSERT(kSmiTagShift == 1 && kSmiTag == 0);
__ movsxd(result, temp);
__ SmiTag(result);
__ jmp(&done);
__ Bind(&do_call);
__ pushq(value_obj);
@ -4710,15 +4571,14 @@ void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
XmmRegister value = locs()->in(0).fpu_reg();
Register temp = locs()->temp(0).reg();
__ cvttsd2sil(result, value);
__ cvttsd2siq(result, value);
// Overflow is signalled with minint.
Label do_call, done;
// Check for overflow and that it fits into Smi.
__ movl(temp, result);
__ shll(temp, Immediate(1));
__ movq(temp, result);
__ shlq(temp, Immediate(1));
__ j(OVERFLOW, deopt);
ASSERT(kSmiTagShift == 1 && kSmiTag == 0);
__ movsxd(result, temp);
__ SmiTag(result);
}
LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
@ -5017,7 +4877,6 @@ LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
// Both inputs must be writable because they will be untagged.
summary->set_in(0, Location::RegisterLocation(RAX));
summary->set_in(1, Location::WritableRegister());
// Output is a pair of registers.
summary->set_out(0, Location::Pair(Location::RegisterLocation(RAX),
Location::RegisterLocation(RDX)));
return summary;
@ -5032,26 +4891,50 @@ void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
PairLocation* pair = locs()->out(0).AsPairLocation();
Register result1 = pair->At(0).reg();
Register result2 = pair->At(1).reg();
Label not_32bit, done;
Register temp = RDX;
ASSERT(left == RAX);
ASSERT((right != RDX) && (right != RAX));
ASSERT(result1 == RAX);
ASSERT(result2 == RDX);
if (RangeUtils::CanBeZero(divisor_range())) {
// Handle divide by zero in runtime.
__ testq(right, right);
__ j(ZERO, deopt);
}
ASSERT(left == RAX);
ASSERT((right != RDX) && (right != RAX));
ASSERT(result1 == RAX);
ASSERT(result2 == RDX);
// Check if both operands fit into 32bits as idiv with 64bit operands
// requires twice as many cycles and has much higher latency.
// We are checking this before untagging them to avoid corner case
// dividing INT_MAX by -1 that raises exception because quotient is
// too large for 32bit register.
__ movsxd(temp, left);
__ cmpq(temp, left);
__ j(NOT_EQUAL, &not_32bit);
__ movsxd(temp, right);
__ cmpq(temp, right);
__ j(NOT_EQUAL, &not_32bit);
// Both operands are 31bit smis. Divide using 32bit idiv.
__ SmiUntag(left);
__ SmiUntag(right);
__ cdq();
__ idivl(right);
// Check the corner case of dividing the 'MIN_SMI' with -1, in which
// case we cannot tag the result.
__ cmpl(RAX, Immediate(0x40000000));
__ j(EQUAL, deopt);
__ movsxd(RAX, RAX);
__ movsxd(RDX, RDX);
__ jmp(&done);
// Divide using 64bit idiv.
__ Bind(&not_32bit);
__ SmiUntag(left);
__ SmiUntag(right);
__ cqo(); // Sign extend RAX -> RDX:RAX.
__ idivq(right); // RAX: quotient, RDX: remainder.
// Check the corner case of dividing the 'MIN_SMI' with -1, in which
// case we cannot tag the result.
__ CompareImmediate(RAX, Immediate(0x4000000000000000));
__ j(EQUAL, deopt);
__ Bind(&done);
// Modulo correction (RDX).
// res = left % right;
// if (res < 0) {
@ -5061,16 +4944,16 @@ void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// res = res + right;
// }
// }
Label done;
Label all_done;
__ cmpq(RDX, Immediate(0));
__ j(GREATER_EQUAL, &done, Assembler::kNearJump);
__ j(GREATER_EQUAL, &all_done, Assembler::kNearJump);
// Result is negative, adjust it.
if ((divisor_range() == NULL) || divisor_range()->Overlaps(-1, 1)) {
Label subtract;
__ cmpq(right, Immediate(0));
__ j(LESS, &subtract, Assembler::kNearJump);
__ addq(RDX, right);
__ jmp(&done, Assembler::kNearJump);
__ jmp(&all_done, Assembler::kNearJump);
__ Bind(&subtract);
__ subq(RDX, right);
} else if (divisor_range()->IsPositive()) {
@ -5080,7 +4963,7 @@ void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Right is negative.
__ subq(RDX, right);
}
__ Bind(&done);
__ Bind(&all_done);
__ SmiTag(RAX);
__ SmiTag(RDX);
@ -5422,7 +5305,6 @@ void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Code for a variable shift amount.
// Deoptimize if shift count is > 63 or negative.
// Sarq and shlq instructions mask the count to 6 bits.
__ AssertSmiInRange(RCX);
__ SmiUntag(RCX);
if (!IsShiftCountInRange()) {
__ cmpq(RCX, Immediate(kMintShiftCountLimit));
@ -5455,15 +5337,15 @@ void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
CompileType BinaryUint32OpInstr::ComputeType() const {
return CompileType::Int();
return CompileType::FromCid(kSmiCid);
}
CompileType ShiftUint32OpInstr::ComputeType() const {
return CompileType::Int();
return CompileType::FromCid(kSmiCid);
}
CompileType UnaryUint32OpInstr::ComputeType() const {
return CompileType::Int();
return CompileType::FromCid(kSmiCid);
}
LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone,
@ -5578,7 +5460,6 @@ void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label zero;
// TODO(johnmccutchan): Use range information to avoid these checks.
__ AssertSmiInRange(shifter);
__ SmiUntag(shifter);
__ cmpq(shifter, Immediate(0));
// If shift value is < 0, deoptimize.
@ -5603,7 +5484,7 @@ void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&zero);
// Shift was greater than 31 bits, just return zero.
__ xorl(left, left);
__ xorq(left, left);
// Exit path.
__ Bind(&done);
@ -5644,8 +5525,8 @@ void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register out = locs()->out(0).reg();
// Representations are bitwise equivalent but we want to normalize
// upperbits for safety reasons.
// TODO(vegorov) if we ensure that we never leave garbage in the upper bits
// we could avoid this.
// TODO(vegorov) if we ensure that we never use upperbits we could
// avoid this.
__ movl(out, value);
} else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
// Representations are bitwise equivalent.

View file

@ -66,10 +66,17 @@ TEST_CASE(RangeTests) {
RangeBoundary::PositiveInfinity());
TEST_RANGE_OP(Range::Shl, -1, 1, 63, 63, RangeBoundary(kMinInt64),
RangeBoundary::PositiveInfinity());
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 30, 30, RangeBoundary(kSmiMin),
RangeBoundary(kSmiMax));
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 62, 62, RangeBoundary(kSmiMin),
RangeBoundary(kSmiMax));
if (kBitsPerWord == 64) {
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 62, 62, RangeBoundary(kSmiMin),
RangeBoundary(kSmiMax));
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 30, 30, RangeBoundary(-(1 << 30)),
RangeBoundary(1 << 30));
} else {
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 30, 30, RangeBoundary(kSmiMin),
RangeBoundary(kSmiMax));
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 62, 62, RangeBoundary(kSmiMin),
RangeBoundary(kSmiMax));
}
TEST_RANGE_OP(Range::Shl, 0, 100, 0, 64, RangeBoundary(0),
RangeBoundary::PositiveInfinity());
TEST_RANGE_OP(Range::Shl, -100, 0, 0, 64, RangeBoundary::NegativeInfinity(),

View file

@ -1560,10 +1560,10 @@ void CallSpecializer::VisitStaticCall(StaticCallInstr* call) {
}
void CallSpecializer::VisitLoadCodeUnits(LoadCodeUnitsInstr* instr) {
// Note that on ARM64 the result can always be packed into a Smi, so this
// is never triggered.
// TODO(zerny): Use kUnboxedUint32 once it is fully supported/optimized.
// TODO(zerny): Use kUnboxedUint32 once it is fully supported/optimized.
#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM)
if (!instr->can_pack_into_smi()) instr->set_representation(kUnboxedInt64);
#endif
}
static bool CidTestResultsContains(const ZoneGrowableArray<intptr_t>& results,

View file

@ -265,9 +265,8 @@ static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
void Intrinsifier::Integer_addFromInteger(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis.
__ addsw(R0, R0, Operand(R1)); // Adds.
__ adds(R0, R0, Operand(R1)); // Adds.
__ b(&fall_through, VS); // Fall-through on overflow.
__ sxtw(R0, R0); // Sign extend - flags not affected.
__ ret();
__ Bind(&fall_through);
}
@ -279,9 +278,8 @@ void Intrinsifier::Integer_add(Assembler* assembler) {
void Intrinsifier::Integer_subFromInteger(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
__ subsw(R0, R0, Operand(R1)); // Subtract.
__ b(&fall_through, VS); // Fall-through on overflow.
__ sxtw(R0, R0); // Sign extend - flags not affected.
__ subs(R0, R0, Operand(R1)); // Subtract.
__ b(&fall_through, VS); // Fall-through on overflow.
__ ret();
__ Bind(&fall_through);
}
@ -289,9 +287,8 @@ void Intrinsifier::Integer_subFromInteger(Assembler* assembler) {
void Intrinsifier::Integer_sub(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
__ subsw(R0, R1, Operand(R0)); // Subtract.
__ b(&fall_through, VS); // Fall-through on overflow.
__ sxtw(R0, R0); // Sign extend - flags not affected.
__ subs(R0, R1, Operand(R0)); // Subtract.
__ b(&fall_through, VS); // Fall-through on overflow.
__ ret();
__ Bind(&fall_through);
}
@ -302,9 +299,9 @@ void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) {
TestBothArgumentsSmis(assembler, &fall_through); // checks two smis
__ SmiUntag(R0); // Untags R6. We only want result shifted by one.
__ smull(TMP, R0, R1);
__ AsrImmediate(TMP2, TMP, 31);
// TMP: result bits 31..63.
__ mul(TMP, R0, R1);
__ smulh(TMP2, R0, R1);
// TMP: result bits 64..127.
__ cmp(TMP2, Operand(TMP, ASR, 63));
__ b(&fall_through, NE);
__ mov(R0, TMP);
@ -420,7 +417,7 @@ void Intrinsifier::Integer_truncDivide(Assembler* assembler) {
// Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
// cannot tag the result.
__ CompareImmediate(R0, 0x40000000);
__ CompareImmediate(R0, 0x4000000000000000);
__ b(&fall_through, EQ);
__ SmiTag(R0); // Not equal. Okay to tag and return.
__ ret(); // Return.
@ -431,9 +428,8 @@ void Intrinsifier::Integer_negate(Assembler* assembler) {
Label fall_through;
__ ldr(R0, Address(SP, +0 * kWordSize)); // Grab first argument.
__ BranchIfNotSmi(R0, &fall_through);
__ negsw(R0, R0);
__ negs(R0, R0);
__ b(&fall_through, VS);
__ sxtw(R0, R0); // Sign extend - flags not affected.
__ ret();
__ Bind(&fall_through);
}
@ -492,9 +488,9 @@ void Intrinsifier::Integer_shl(Assembler* assembler) {
// Check if count too large for handling it inlined.
__ SmiUntag(TMP, right); // SmiUntag right into TMP.
// Overflow test (preserve left, right, and TMP);
__ lslvw(temp, left, TMP);
__ asrvw(TMP2, temp, TMP);
__ cmpw(left, Operand(TMP2));
__ lslv(temp, left, TMP);
__ asrv(TMP2, temp, TMP);
__ CompareRegisters(left, TMP2);
__ b(&fall_through, NE); // Overflow.
// Shift for result now we know there is no overflow.
__ lslv(result, left, TMP);
@ -567,7 +563,6 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
__ CompareClassId(R0, kDoubleCid);
__ b(&fall_through, EQ);
__ AssertSmiInRange(R1);
__ LoadObject(R0, Bool::False()); // Smi == Mint -> false.
__ ret();
@ -578,7 +573,6 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
__ b(&fall_through, NE);
// Receiver is Mint, return false if right is Smi.
__ BranchIfNotSmi(R0, &fall_through);
__ AssertSmiInRange(R0);
__ LoadObject(R0, Bool::False());
__ ret();
// TODO(srdjan): Implement Mint == Mint comparison.
@ -1501,12 +1495,11 @@ void Intrinsifier::DoubleToInteger(Assembler* assembler) {
__ fcmpd(V0, V0);
__ b(&fall_through, VS);
__ fcvtzdsx(R0, V0);
__ fcvtzds(R0, V0);
// Overflow is signaled with minint.
// Check for overflow and that it fits into Smi.
__ AsrImmediate(TMP, R0, 30);
__ cmp(TMP, Operand(R0, ASR, 63));
__ b(&fall_through, NE);
__ CompareImmediate(R0, 0xC000000000000000);
__ b(&fall_through, MI);
__ SmiTag(R0);
__ ret();
__ Bind(&fall_through);
@ -1523,10 +1516,10 @@ void Intrinsifier::Double_hashCode(Assembler* assembler) {
__ fcmpd(V0, V0);
__ b(&double_hash, VS);
// Convert double value to signed 32-bit int in R0 and back to a
// Convert double value to signed 64-bit int in R0 and back to a
// double value in V1.
__ fcvtzdsw(R0, V0);
__ scvtfdw(V1, R0);
__ fcvtzds(R0, V0);
__ scvtfdx(V1, R0);
// Tag the int as a Smi, making sure that it fits; this checks for
// overflow in the conversion from double to int. Conversion
@ -1534,9 +1527,8 @@ void Intrinsifier::Double_hashCode(Assembler* assembler) {
// INT64_MAX or INT64_MIN (saturation).
Label fall_through;
ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
__ addsw(R0, R0, Operand(R0));
__ adds(R0, R0, Operand(R0));
__ b(&fall_through, VS);
__ sxtw(R0, R0); // Sign extend - flags not affected.
// Compare the two double values. If they are equal, we return the
// Smi tagged result immediately as the hash code.

View file

@ -269,10 +269,8 @@ void Intrinsifier::Integer_addFromInteger(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
// RAX contains right argument.
__ AssertSmiInRange(RAX);
__ addl(RAX, Address(RSP, +2 * kWordSize));
__ addq(RAX, Address(RSP, +2 * kWordSize));
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
__ movsxd(RAX, RAX);
// Result is in RAX.
__ ret();
__ Bind(&fall_through);
@ -286,10 +284,8 @@ void Intrinsifier::Integer_subFromInteger(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
// RAX contains right argument, which is the actual minuend of subtraction.
__ AssertSmiInRange(RAX);
__ subl(RAX, Address(RSP, +2 * kWordSize));
__ subq(RAX, Address(RSP, +2 * kWordSize));
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
__ movsxd(RAX, RAX);
// Result is in RAX.
__ ret();
__ Bind(&fall_through);
@ -299,13 +295,10 @@ void Intrinsifier::Integer_sub(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
// RAX contains right argument, which is the actual subtrahend of subtraction.
__ AssertSmiInRange(RAX);
__ movq(RCX, RAX);
__ movq(RAX, Address(RSP, +2 * kWordSize));
__ AssertSmiInRange(RAX);
__ subl(RAX, RCX);
__ subq(RAX, RCX);
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
__ movsxd(RAX, RAX);
// Result is in RAX.
__ ret();
__ Bind(&fall_through);
@ -315,12 +308,10 @@ void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
// RAX is the right argument.
__ AssertSmiInRange(RAX);
ASSERT(kSmiTag == 0); // Adjust code below if not the case.
__ SmiUntag(RAX);
__ imull(RAX, Address(RSP, +2 * kWordSize));
__ imulq(RAX, Address(RSP, +2 * kWordSize));
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
__ movsxd(RAX, RAX);
// Result is in RAX.
__ ret();
__ Bind(&fall_through);
@ -342,9 +333,7 @@ void Intrinsifier::Integer_mul(Assembler* assembler) {
// RAX: Untagged fallthrough result (remainder to be adjusted), or
// RAX: Tagged return result (remainder).
static void EmitRemainderOperation(Assembler* assembler) {
Label return_zero, try_modulo, not_32bit;
__ AssertSmiInRange(RAX);
__ AssertSmiInRange(RCX);
Label return_zero, try_modulo, not_32bit, done;
// Check for quick zero results.
__ cmpq(RAX, Immediate(0));
__ j(EQUAL, &return_zero, Assembler::kNearJump);
@ -366,12 +355,33 @@ static void EmitRemainderOperation(Assembler* assembler) {
__ Bind(&try_modulo);
// Check if both operands fit into 32bits as idiv with 64bit operands
// requires twice as many cycles and has much higher latency. We are checking
// this before untagging them to avoid corner case dividing INT_MAX by -1 that
// raises exception because quotient is too large for 32bit register.
__ movsxd(RBX, RAX);
__ cmpq(RBX, RAX);
__ j(NOT_EQUAL, &not_32bit, Assembler::kNearJump);
__ movsxd(RBX, RCX);
__ cmpq(RBX, RCX);
__ j(NOT_EQUAL, &not_32bit, Assembler::kNearJump);
// Both operands are 31bit smis. Divide using 32bit idiv.
__ SmiUntag(RAX);
__ SmiUntag(RCX);
__ cdq();
__ idivl(RCX);
__ movsxd(RAX, RDX);
__ jmp(&done, Assembler::kNearJump);
// Divide using 64bit idiv.
__ Bind(&not_32bit);
__ SmiUntag(RAX);
__ SmiUntag(RCX);
__ cqo();
__ idivq(RCX);
__ movq(RAX, RDX);
__ Bind(&done);
}
// Implementation:
@ -386,9 +396,7 @@ static void EmitRemainderOperation(Assembler* assembler) {
void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) {
Label fall_through, negative_result;
TestBothArgumentsSmis(assembler, &fall_through);
__ AssertSmiInRange(RAX);
__ movq(RCX, Address(RSP, +2 * kWordSize));
__ AssertSmiInRange(RCX);
// RAX: Tagged left (dividend).
// RCX: Tagged right (divisor).
__ cmpq(RCX, Immediate(0));
@ -422,17 +430,21 @@ void Intrinsifier::Integer_truncDivide(Assembler* assembler) {
Label fall_through, not_32bit;
TestBothArgumentsSmis(assembler, &fall_through);
// RAX: right argument (divisor)
__ AssertSmiInRange(RAX);
__ cmpq(RAX, Immediate(0));
__ j(EQUAL, &fall_through, Assembler::kNearJump);
__ movq(RCX, RAX);
__ movq(RAX, Address(RSP, +2 * kWordSize)); // Left argument (dividend).
__ AssertSmiInRange(RAX);
// Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
// cannot tag the result.
__ cmpq(RAX, Immediate(-0x80000000ll));
__ j(EQUAL, &fall_through);
// Check if both operands fit into 32bits as idiv with 64bit operands
// requires twice as many cycles and has much higher latency. We are checking
// this before untagging them to avoid corner case dividing INT_MAX by -1 that
// raises exception because quotient is too large for 32bit register.
__ movsxd(RBX, RAX);
__ cmpq(RBX, RAX);
__ j(NOT_EQUAL, &not_32bit);
__ movsxd(RBX, RCX);
__ cmpq(RBX, RCX);
__ j(NOT_EQUAL, &not_32bit);
// Both operands are 31bit smis. Divide using 32bit idiv.
__ SmiUntag(RAX);
@ -442,6 +454,21 @@ void Intrinsifier::Integer_truncDivide(Assembler* assembler) {
__ movsxd(RAX, RAX);
__ SmiTag(RAX); // Result is guaranteed to fit into a smi.
__ ret();
// Divide using 64bit idiv.
__ Bind(&not_32bit);
__ SmiUntag(RAX);
__ SmiUntag(RCX);
__ pushq(RDX); // Preserve RDX in case of 'fall_through'.
__ cqo();
__ idivq(RCX);
__ popq(RDX);
// Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
// cannot tag the result.
__ cmpq(RAX, Immediate(0x4000000000000000));
__ j(EQUAL, &fall_through);
__ SmiTag(RAX);
__ ret();
__ Bind(&fall_through);
}
@ -450,10 +477,8 @@ void Intrinsifier::Integer_negate(Assembler* assembler) {
__ movq(RAX, Address(RSP, +1 * kWordSize));
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &fall_through, Assembler::kNearJump); // Non-smi value.
__ AssertSmiInRange(RAX);
__ cmpq(RAX, Immediate(-0x80000000ll));
__ j(EQUAL, &fall_through, Assembler::kNearJump);
__ negq(RAX);
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
// Result is in RAX.
__ ret();
__ Bind(&fall_through);
@ -462,7 +487,6 @@ void Intrinsifier::Integer_negate(Assembler* assembler) {
void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
__ AssertSmiInRange(RAX);
// RAX is the right argument.
__ andq(RAX, Address(RSP, +2 * kWordSize));
// Result is in RAX.
@ -478,7 +502,6 @@ void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
// RAX is the right argument.
__ AssertSmiInRange(RAX);
__ orq(RAX, Address(RSP, +2 * kWordSize));
// Result is in RAX.
__ ret();
@ -494,7 +517,6 @@ void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) {
TestBothArgumentsSmis(assembler, &fall_through);
// RAX is the right argument.
__ xorq(RAX, Address(RSP, +2 * kWordSize));
__ AssertSmiInRange(RAX);
// Result is in RAX.
__ ret();
__ Bind(&fall_through);
@ -510,32 +532,28 @@ void Intrinsifier::Integer_shl(Assembler* assembler) {
Label fall_through, overflow;
TestBothArgumentsSmis(assembler, &fall_through);
// Shift value is in RAX. Compare with tagged Smi.
__ AssertSmiInRange(RAX);
__ cmpq(RAX, Immediate(Smi::RawValue(Smi::kBits)));
__ j(ABOVE_EQUAL, &fall_through, Assembler::kNearJump);
__ SmiUntag(RAX);
__ movq(RCX, RAX); // Shift amount must be in RCX.
__ movq(RAX, Address(RSP, +2 * kWordSize)); // Value.
__ AssertSmiInRange(RAX);
// Overflow test - all the shifted-out bits must be same as the sign bit.
__ movq(RDI, RAX);
__ shll(RAX, RCX);
__ sarl(RAX, RCX);
__ movsxd(RAX, RAX);
__ shlq(RAX, RCX);
__ sarq(RAX, RCX);
__ cmpq(RAX, RDI);
__ j(NOT_EQUAL, &overflow, Assembler::kNearJump);
__ shlq(RDI, RCX); // Shift for result now we know there is no overflow.
__ movq(RAX, RDI);
__ shlq(RAX, RCX); // Shift for result now we know there is no overflow.
// RAX is a correctly tagged Smi.
__ ret();
__ Bind(&overflow);
// Mint is used on x64 for integers requiring 64 bit instead of 31 bits as
// represented by Smi.
// Mint is rarely used on x64 (only for integers requiring 64 bit instead of
// 63 bits as represented by Smi).
__ Bind(&fall_through);
}
@ -543,7 +561,6 @@ static void CompareIntegers(Assembler* assembler, Condition true_condition) {
Label fall_through, true_label;
TestBothArgumentsSmis(assembler, &fall_through);
// RAX contains the right argument.
__ AssertSmiInRange(RAX);
__ cmpq(Address(RSP, +2 * kWordSize), RAX);
__ j(true_condition, &true_label, Assembler::kNearJump);
__ LoadObject(RAX, Bool::False());
@ -589,9 +606,6 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
__ orq(RAX, RCX);
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &check_for_mint, Assembler::kNearJump);
// Or-ing them together should still leave them both as compressible smis.
__ AssertSmiInRange(RAX);
__ AssertSmiInRange(RCX);
// Both arguments are smi, '===' is good enough.
__ LoadObject(RAX, Bool::False());
__ ret();
@ -609,21 +623,9 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
// Left (receiver) is Smi, return false if right is not Double.
// Note that an instance of Mint or Bigint never contains a value that can be
// represented by Smi.
__ AssertSmiInRange(RAX);
__ movq(RAX, Address(RSP, +kArgumentOffset * kWordSize));
__ CompareClassId(RAX, kDoubleCid);
__ j(EQUAL, &fall_through);
#if defined(DEBUG)
Label ok;
__ CompareClassId(RAX, kMintCid);
__ j(NOT_EQUAL, &ok);
__ movq(RAX, FieldAddress(RAX, Mint::value_offset()));
__ sarq(RCX, Immediate(1));
__ cmpq(RAX, RCX);
__ j(NOT_EQUAL, &ok);
__ Stop("Smi wrapped in a Mint");
__ Bind(&ok);
#endif
__ LoadObject(RAX, Bool::False());
__ ret();
@ -635,7 +637,6 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
__ movq(RAX, Address(RSP, +kArgumentOffset * kWordSize));
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &fall_through);
__ AssertSmiInRange(RAX);
// Smi == Mint -> false.
__ LoadObject(RAX, Bool::False());
__ ret();
@ -665,7 +666,6 @@ void Intrinsifier::Integer_sar(Assembler* assembler) {
__ Bind(&shift_count_ok);
__ movq(RCX, RAX); // Shift amount must be in RCX.
__ movq(RAX, Address(RSP, +2 * kWordSize)); // Value.
__ AssertSmiInRange(RAX);
__ SmiUntag(RAX); // Value.
__ sarq(RAX, RCX);
__ SmiTag(RAX);
@ -676,7 +676,6 @@ void Intrinsifier::Integer_sar(Assembler* assembler) {
// Argument is Smi (receiver).
void Intrinsifier::Smi_bitNegate(Assembler* assembler) {
__ movq(RAX, Address(RSP, +1 * kWordSize)); // Index.
__ AssertSmiInRange(RAX);
__ notq(RAX);
__ andq(RAX, Immediate(~kSmiTagMask)); // Remove inverted smi-tag.
__ ret();
@ -685,7 +684,6 @@ void Intrinsifier::Smi_bitNegate(Assembler* assembler) {
void Intrinsifier::Smi_bitLength(Assembler* assembler) {
ASSERT(kSmiTagShift == 1);
__ movq(RAX, Address(RSP, +1 * kWordSize)); // Index.
__ AssertSmiInRange(RAX);
// XOR with sign bit to complement bits if value is negative.
__ movq(RCX, RAX);
__ sarq(RCX, Immediate(63)); // All 0 or all 1.
@ -711,7 +709,6 @@ void Intrinsifier::Bigint_lsh(Assembler* assembler) {
__ subq(R8, Immediate(2)); // x_used > 0, Smi. R8 = x_used - 1, round up.
__ sarq(R8, Immediate(2)); // R8 + 1 = number of digit pairs to read.
__ movq(RCX, Address(RSP, 2 * kWordSize)); // n is Smi
__ AssertSmiInRange(RCX);
__ SmiUntag(RCX);
__ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits
__ movq(RSI, RCX);
@ -747,7 +744,6 @@ void Intrinsifier::Bigint_rsh(Assembler* assembler) {
__ movq(RDI, Address(RSP, 4 * kWordSize)); // x_digits
__ movq(RCX, Address(RSP, 2 * kWordSize)); // n is Smi
__ AssertSmiInRange(RCX);
__ SmiUntag(RCX);
__ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits
__ movq(RDX, RCX);
@ -1235,7 +1231,6 @@ static void CompareDoubles(Assembler* assembler, Condition true_condition) {
__ LoadObject(RAX, Bool::True());
__ ret();
__ Bind(&is_smi);
__ AssertSmiInRange(RAX);
__ SmiUntag(RAX);
__ cvtsi2sdq(XMM1, RAX);
__ jmp(&double_op);
@ -1296,7 +1291,6 @@ static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) {
__ movsd(FieldAddress(RAX, Double::value_offset()), XMM0);
__ ret();
__ Bind(&is_smi);
__ AssertSmiInRange(RAX);
__ SmiUntag(RAX);
__ cvtsi2sdq(XMM1, RAX);
__ jmp(&double_op);
@ -1326,7 +1320,6 @@ void Intrinsifier::Double_mulFromInteger(Assembler* assembler) {
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &fall_through);
// Is Smi.
__ AssertSmiInRange(RAX);
__ SmiUntag(RAX);
__ cvtsi2sdq(XMM1, RAX);
__ movq(RAX, Address(RSP, +2 * kWordSize));
@ -1349,7 +1342,6 @@ void Intrinsifier::DoubleFromInteger(Assembler* assembler) {
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &fall_through);
// Is Smi.
__ AssertSmiInRange(RAX);
__ SmiUntag(RAX);
__ cvtsi2sdq(XMM0, RAX);
const Class& double_class =
@ -1420,15 +1412,14 @@ void Intrinsifier::Double_getIsNegative(Assembler* assembler) {
void Intrinsifier::DoubleToInteger(Assembler* assembler) {
__ movq(RAX, Address(RSP, +1 * kWordSize));
__ movsd(XMM0, FieldAddress(RAX, Double::value_offset()));
__ cvttsd2sil(RAX, XMM0);
__ cvttsd2siq(RAX, XMM0);
// Overflow is signalled with minint.
Label fall_through;
// Check for overflow and that it fits into Smi.
__ movq(RCX, RAX);
__ shll(RCX, Immediate(1));
__ shlq(RCX, Immediate(1));
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
ASSERT(kSmiTagShift == 1 && kSmiTag == 0);
__ movsxd(RAX, RCX);
__ SmiTag(RAX);
__ ret();
__ Bind(&fall_through);
}
@ -1440,17 +1431,16 @@ void Intrinsifier::Double_hashCode(Assembler* assembler) {
// back to a double in XMM1.
__ movq(RCX, Address(RSP, +1 * kWordSize));
__ movsd(XMM0, FieldAddress(RCX, Double::value_offset()));
__ cvttsd2sil(RAX, XMM0);
__ cvtsi2sdl(XMM1, RAX);
__ cvttsd2siq(RAX, XMM0);
__ cvtsi2sdq(XMM1, RAX);
// Tag the int as a Smi, making sure that it fits; this checks for
// overflow and NaN in the conversion from double to int. Conversion
// overflow from cvttsd2sil is signalled with an INT32_MIN value.
// overflow from cvttsd2si is signalled with an INT64_MIN value.
Label fall_through;
ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
__ addl(RAX, RAX);
__ addq(RAX, RAX);
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
__ movsxd(RAX, RAX);
// Compare the two double values. If they are equal, we return the
// Smi tagged result immediately as the hash code.
@ -1488,7 +1478,6 @@ void Intrinsifier::MathSqrt(Assembler* assembler) {
__ movsd(FieldAddress(RAX, Double::value_offset()), XMM0);
__ ret();
__ Bind(&is_smi);
__ AssertSmiInRange(RAX);
__ SmiUntag(RAX);
__ cvtsi2sdq(XMM1, RAX);
__ jmp(&double_op);

View file

@ -708,14 +708,14 @@ enum Shift {
enum Extend {
kNoExtend = -1,
UXTB = 0, // Zero extend byte.
UXTH = 1, // Zero extend halfword (16 bits).
UXTW = 2, // Zero extend word (32 bits).
UXTX = 3, // Zero extend doubleword (64 bits).
SXTB = 4, // Sign extend byte.
SXTH = 5, // Sign extend halfword (16 bits).
SXTW = 6, // Sign extend word (32 bits).
SXTX = 7, // Sign extend doubleword (64 bits).
UXTB = 0,
UXTH = 1,
UXTW = 2,
UXTX = 3,
SXTB = 4,
SXTH = 5,
SXTW = 6,
SXTX = 7,
kMaxExtend = 8,
};

View file

@ -21,8 +21,9 @@
#undef OVERFLOW // From math.h conflicts in constants_ia32.h
namespace dart {
// Smi value range is from -(2^N) to (2^N)-1. N=30
const intptr_t kSmiBits = 30;
// Smi value range is from -(2^N) to (2^N)-1.
// N=30 (32-bit build) or N=62 (64-bit build).
const intptr_t kSmiBits = kBitsPerWord - 2;
const intptr_t kSmiMax = (static_cast<intptr_t>(1) << kSmiBits) - 1;
const intptr_t kSmiMin = -(static_cast<intptr_t>(1) << kSmiBits);

View file

@ -7873,16 +7873,7 @@ class Array : public Instance {
virtual uword ComputeCanonicalTableHash() const;
static const intptr_t kBytesPerElement = kWordSize;
// The length field is a Smi so that sets one limit on the max Array length.
// But we also need to be able to represent the length in bytes in an
// intptr_t, which is a different limit. Either may be smaller. We can't
// use Utils::Minimum here because it is not a const expression.
static const intptr_t kElementLimitDueToIntptrMax = static_cast<intptr_t>(
(kIntptrMax - sizeof(RawArray) - kObjectAlignment + kBytesPerElement) /
kBytesPerElement);
static const intptr_t kMaxElements = kSmiMax < kElementLimitDueToIntptrMax
? kSmiMax
: kElementLimitDueToIntptrMax;
static const intptr_t kMaxElements = kSmiMax / kBytesPerElement;
static const intptr_t kMaxNewSpaceElements =
(Heap::kNewAllocatableSize - sizeof(RawArray)) / kBytesPerElement;

View file

@ -308,10 +308,16 @@ ISOLATE_UNIT_TEST_CASE(Smi) {
EXPECT(Smi::IsValid(-15));
EXPECT(Smi::IsValid(0xFFu));
// Upper two bits must be either 00 or 11.
#if defined(ARCH_IS_64_BIT)
EXPECT(!Smi::IsValid(kMaxInt64));
EXPECT(Smi::IsValid(0x3FFFFFFFFFFFFFFF));
EXPECT(Smi::IsValid(-1));
#else
EXPECT(!Smi::IsValid(kMaxInt32));
EXPECT(Smi::IsValid(0x3FFFFFFF));
EXPECT(Smi::IsValid(-1));
EXPECT(!Smi::IsValid(0xFFFFFFFFu));
#endif
EXPECT_EQ(5, smi.AsInt64Value());
EXPECT_EQ(5.0, smi.AsDoubleValue());
@ -439,6 +445,9 @@ ISOLATE_UNIT_TEST_CASE(StringIRITwoByte) {
}
ISOLATE_UNIT_TEST_CASE(Mint) {
// On 64-bit architectures a Smi is stored in a 64 bit word. A Midint cannot
// be allocated if it does fit into a Smi.
#if !defined(ARCH_IS_64_BIT)
{
Mint& med = Mint::Handle();
EXPECT(med.IsNull());
@ -508,6 +517,7 @@ ISOLATE_UNIT_TEST_CASE(Mint) {
EXPECT_EQ(mint1.value(), mint_value);
EXPECT_EQ(mint2.value(), mint_value);
EXPECT_EQ(mint1.raw(), mint2.raw());
#endif
}
ISOLATE_UNIT_TEST_CASE(Double) {
@ -2737,6 +2747,22 @@ ISOLATE_UNIT_TEST_CASE(EmbedSmiInCode) {
EXPECT(Smi::Cast(result).Value() == kSmiTestValue);
}
#if defined(ARCH_IS_64_BIT)
// Test for Embedded Smi object in the instructions.
ISOLATE_UNIT_TEST_CASE(EmbedSmiIn64BitCode) {
extern void GenerateEmbedSmiInCode(Assembler * assembler, intptr_t value);
const intptr_t kSmiTestValue = DART_INT64_C(5) << 32;
Assembler _assembler_;
GenerateEmbedSmiInCode(&_assembler_, kSmiTestValue);
const Function& function =
Function::Handle(CreateFunction("Test_EmbedSmiIn64BitCode"));
const Code& code = Code::Handle(Code::FinalizeCode(function, &_assembler_));
function.AttachCode(code);
const Object& result =
Object::Handle(DartEntry::InvokeFunction(function, Array::empty_array()));
EXPECT(Smi::Cast(result).Value() == kSmiTestValue);
}
#endif // ARCH_IS_64_BIT
ISOLATE_UNIT_TEST_CASE(ExceptionHandlers) {
const int kNumEntries = 4;

View file

@ -3023,19 +3023,10 @@ void ChoiceNode::SetUpPreLoad(RegExpCompiler* compiler,
Trace* current_trace,
PreloadState* state) {
if (state->eats_at_least_ == PreloadState::kEatsAtLeastNotYetInitialized) {
// On ARM64, only read 16 bits ahead for now. This ensures that boxing is
// trivial even with the new smaller Smis. See
// https://github.com/dart-lang/sdk/issues/29951 and
// LoadCodeUnitsInstr::EmitNativeCode.
#if defined(TARGET_ARCH_ARM64)
const int kMaxBytesLoaded = 2;
#else
const int kMaxBytesLoaded = 4;
#endif
const int kMaxTwoByteCharactersLoaded = kMaxBytesLoaded / 2;
state->eats_at_least_ = EatsAtLeast(
compiler->one_byte() ? kMaxBytesLoaded : kMaxTwoByteCharactersLoaded,
kRecursionBudget, current_trace->at_start() == Trace::FALSE_VALUE);
// Save some time by looking at most one machine word ahead.
state->eats_at_least_ =
EatsAtLeast(compiler->one_byte() ? 4 : 2, kRecursionBudget,
current_trace->at_start() == Trace::FALSE_VALUE);
}
state->preload_characters_ =
CalculatePreloadCharacters(compiler, state->eats_at_least_);

View file

@ -3199,21 +3199,13 @@ void Simulator::DecodeFPIntCvt(Instr* instr) {
set_vregisterd(vd, 1, 0);
} else if (instr->Bits(16, 5) == 24) {
// Format(instr, "fcvtzds'sf 'rd, 'vn");
const intptr_t max = instr->Bit(31) == 1 ? INT64_MAX : INT32_MAX;
const intptr_t min = instr->Bit(31) == 1 ? INT64_MIN : INT32_MIN;
const double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn, 0));
int64_t result;
if (vn_val >= static_cast<double>(max)) {
result = max;
} else if (vn_val <= static_cast<double>(min)) {
result = min;
if (vn_val >= static_cast<double>(INT64_MAX)) {
set_register(instr, rd, INT64_MAX, instr->RdMode());
} else if (vn_val <= static_cast<double>(INT64_MIN)) {
set_register(instr, rd, INT64_MIN, instr->RdMode());
} else {
result = static_cast<int64_t>(vn_val);
}
if (instr->Bit(31) == 1) {
set_register(instr, rd, result, instr->RdMode());
} else {
set_register(instr, rd, result & 0xffffffffll, instr->RdMode());
set_register(instr, rd, static_cast<int64_t>(vn_val), instr->RdMode());
}
} else {
UnimplementedInstruction(instr);

View file

@ -636,11 +636,11 @@ void Simulator::Exit(Thread* thread,
// __builtin_s{add,sub,mul}_overflow() intrinsics here and below.
// Note that they may clobber the output location even when there is overflow:
// https://gcc.gnu.org/onlinedocs/gcc/Integer-Overflow-Builtins.html
DART_FORCE_INLINE static bool SignedAddWithOverflow(int32_t lhs,
int32_t rhs,
DART_FORCE_INLINE static bool SignedAddWithOverflow(intptr_t lhs,
intptr_t rhs,
intptr_t* out) {
intptr_t res = 1;
#if defined(HOST_ARCH_IA32)
#if defined(HOST_ARCH_IA32) || defined(HOST_ARCH_X64)
asm volatile(
"add %2, %1\n"
"jo 1f;\n"
@ -650,19 +650,7 @@ DART_FORCE_INLINE static bool SignedAddWithOverflow(int32_t lhs,
: "+r"(res), "+r"(lhs)
: "r"(rhs), "r"(out)
: "cc");
#elif defined(HOST_ARCH_X64)
int64_t tmp;
asm volatile(
"addl %[rhs], %[lhs]\n"
"jo 1f;\n"
"xor %[res], %[res]\n"
"movslq %[lhs], %[tmp]\n"
"mov %[tmp], 0(%[out])\n"
"1: "
: [res] "+r"(res), [lhs] "+r"(lhs), [tmp] "=&r"(tmp)
: [rhs] "r"(rhs), [out] "r"(out)
: "cc");
#elif defined(HOST_ARCH_ARM)
#elif defined(HOST_ARCH_ARM) || defined(HOST_ARCH_ARM64)
asm volatile(
"adds %1, %1, %2;\n"
"bvs 1f;\n"
@ -672,28 +660,17 @@ DART_FORCE_INLINE static bool SignedAddWithOverflow(int32_t lhs,
: "+r"(res), "+r"(lhs)
: "r"(rhs), "r"(out)
: "cc");
#elif defined(HOST_ARCH_ARM64)
asm volatile(
"adds %w1, %w1, %w2;\n"
"bvs 1f;\n"
"sxtw %x1, %w1;\n"
"mov %0, #0;\n"
"str %x1, [%3, #0]\n"
"1:"
: "+r"(res), "+r"(lhs)
: "r"(rhs), "r"(out)
: "cc");
#else
#error "Unsupported platform"
#endif
return (res != 0);
}
DART_FORCE_INLINE static bool SignedSubWithOverflow(int32_t lhs,
int32_t rhs,
DART_FORCE_INLINE static bool SignedSubWithOverflow(intptr_t lhs,
intptr_t rhs,
intptr_t* out) {
intptr_t res = 1;
#if defined(HOST_ARCH_IA32)
#if defined(HOST_ARCH_IA32) || defined(HOST_ARCH_X64)
asm volatile(
"sub %2, %1\n"
"jo 1f;\n"
@ -703,19 +680,7 @@ DART_FORCE_INLINE static bool SignedSubWithOverflow(int32_t lhs,
: "+r"(res), "+r"(lhs)
: "r"(rhs), "r"(out)
: "cc");
#elif defined(HOST_ARCH_X64)
int64_t tmp;
asm volatile(
"subl %[rhs], %[lhs]\n"
"jo 1f;\n"
"xor %[res], %[res]\n"
"movslq %[lhs], %[tmp]\n"
"mov %[tmp], 0(%[out])\n"
"1: "
: [res] "+r"(res), [lhs] "+r"(lhs), [tmp] "=&r"(tmp)
: [rhs] "r"(rhs), [out] "r"(out)
: "cc");
#elif defined(HOST_ARCH_ARM)
#elif defined(HOST_ARCH_ARM) || defined(HOST_ARCH_ARM64)
asm volatile(
"subs %1, %1, %2;\n"
"bvs 1f;\n"
@ -725,28 +690,17 @@ DART_FORCE_INLINE static bool SignedSubWithOverflow(int32_t lhs,
: "+r"(res), "+r"(lhs)
: "r"(rhs), "r"(out)
: "cc");
#elif defined(HOST_ARCH_ARM64)
asm volatile(
"subs %w1, %w1, %w2;\n"
"bvs 1f;\n"
"sxtw %x1, %w1;\n"
"mov %0, #0;\n"
"str %x1, [%3, #0]\n"
"1:"
: "+r"(res), "+r"(lhs)
: "r"(rhs), "r"(out)
: "cc");
#else
#error "Unsupported platform"
#endif
return (res != 0);
}
DART_FORCE_INLINE static bool SignedMulWithOverflow(int32_t lhs,
int32_t rhs,
DART_FORCE_INLINE static bool SignedMulWithOverflow(intptr_t lhs,
intptr_t rhs,
intptr_t* out) {
intptr_t res = 1;
#if defined(HOST_ARCH_IA32)
#if defined(HOST_ARCH_IA32) || defined(HOST_ARCH_X64)
asm volatile(
"imul %2, %1\n"
"jo 1f;\n"
@ -756,18 +710,6 @@ DART_FORCE_INLINE static bool SignedMulWithOverflow(int32_t lhs,
: "+r"(res), "+r"(lhs)
: "r"(rhs), "r"(out)
: "cc");
#elif defined(HOST_ARCH_X64)
int64_t tmp;
asm volatile(
"imull %[rhs], %[lhs]\n"
"jo 1f;\n"
"xor %[res], %[res]\n"
"movslq %[lhs], %[tmp]\n"
"mov %[tmp], 0(%[out])\n"
"1: "
: [res] "+r"(res), [lhs] "+r"(lhs), [tmp] "=&r"(tmp)
: [rhs] "r"(rhs), [out] "r"(out)
: "cc");
#elif defined(HOST_ARCH_ARM)
asm volatile(
"smull %1, ip, %1, %2;\n"
@ -782,12 +724,12 @@ DART_FORCE_INLINE static bool SignedMulWithOverflow(int32_t lhs,
#elif defined(HOST_ARCH_ARM64)
int64_t prod_lo = 0;
asm volatile(
"smull %x1, %w2, %w3\n"
"asr %x2, %x1, #63\n"
"cmp %x2, %x1, ASR #31;\n"
"mul %1, %2, %3\n"
"smulh %2, %2, %3\n"
"cmp %2, %1, ASR #63;\n"
"bne 1f;\n"
"mov %0, #0;\n"
"str %x1, [%4, #0]\n"
"str %1, [%4, #0]\n"
"1:"
: "=r"(res), "+r"(prod_lo), "+r"(lhs)
: "r"(rhs), "r"(out)
@ -2029,7 +1971,11 @@ RawObject* Simulator::Call(const Code& code,
if (rhs != 0) {
const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
const intptr_t res = (lhs >> kSmiTagSize) / (rhs >> kSmiTagSize);
#if defined(ARCH_IS_64_BIT)
const intptr_t untaggable = 0x4000000000000000LL;
#else
const intptr_t untaggable = 0x40000000L;
#endif // defined(ARCH_IS_64_BIT)
if (res != untaggable) {
*reinterpret_cast<intptr_t*>(&FP[rA]) = res << kSmiTagSize;
pc++;
@ -2055,12 +2001,11 @@ RawObject* Simulator::Call(const Code& code,
{
BYTECODE(Shl, A_B_C);
const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rC]) >> kSmiTagSize;
const int kBitsPerInt32 = 32;
if (static_cast<uintptr_t>(rhs) < kBitsPerInt32) {
const int32_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
const int32_t res = lhs << rhs;
if (static_cast<uintptr_t>(rhs) < kBitsPerWord) {
const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
const intptr_t res = lhs << rhs;
if (lhs == (res >> rhs)) {
*reinterpret_cast<intptr_t*>(&FP[rA]) = static_cast<intptr_t>(res);
*reinterpret_cast<intptr_t*>(&FP[rA]) = res;
pc++;
}
}
@ -2071,7 +2016,8 @@ RawObject* Simulator::Call(const Code& code,
BYTECODE(Shr, A_B_C);
const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rC]) >> kSmiTagSize;
if (rhs >= 0) {
const intptr_t shift_amount = (rhs >= 32) ? (32 - 1) : rhs;
const intptr_t shift_amount =
(rhs >= kBitsPerWord) ? (kBitsPerWord - 1) : rhs;
const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]) >> kSmiTagSize;
*reinterpret_cast<intptr_t*>(&FP[rA]) = (lhs >> shift_amount)
<< kSmiTagSize;

View file

@ -253,6 +253,10 @@ void CheckMint(int64_t value) {
// here covers most of the 64-bit range. On 32-bit platforms the smi
// range covers most of the 32-bit range and values outside that
// range are also represented as mints.
#if defined(ARCH_IS_64_BIT)
EXPECT_EQ(Dart_CObject_kInt64, mint_cobject->type);
EXPECT_EQ(value, mint_cobject->value.as_int64);
#else
if (kMinInt32 < value && value < kMaxInt32) {
EXPECT_EQ(Dart_CObject_kInt32, mint_cobject->type);
EXPECT_EQ(value, mint_cobject->value.as_int32);
@ -260,6 +264,7 @@ void CheckMint(int64_t value) {
EXPECT_EQ(Dart_CObject_kInt64, mint_cobject->type);
EXPECT_EQ(value, mint_cobject->value.as_int64);
}
#endif
}
TEST_CASE(SerializeMints) {

View file

@ -1334,18 +1334,14 @@ static void EmitFastSmiOp(Assembler* assembler,
__ ldr(R1, Address(SP, +1 * kWordSize)); // Left.
__ orr(TMP, R0, Operand(R1));
__ BranchIfNotSmi(TMP, not_smi_or_overflow);
__ AssertSmiInRange(R0);
__ AssertSmiInRange(R1);
switch (kind) {
case Token::kADD: {
__ addsw(R0, R1, Operand(R0)); // Adds.
__ sxtw(R0, R0);
__ adds(R0, R1, Operand(R0)); // Adds.
__ b(not_smi_or_overflow, VS); // Branch if overflow.
break;
}
case Token::kSUB: {
__ subsw(R0, R1, Operand(R0)); // Subtract.
__ sxtw(R0, R0);
__ subs(R0, R1, Operand(R0)); // Subtract.
__ b(not_smi_or_overflow, VS); // Branch if overflow.
break;
}
@ -1387,8 +1383,6 @@ static void EmitFastSmiOp(Assembler* assembler,
__ StoreToOffset(R1, R6, count_offset);
}
__ AssertSmiInRange(R0, Assembler::kValueCanBeHeapPointer);
__ ret();
}

View file

@ -1274,15 +1274,13 @@ static void EmitFastSmiOp(Assembler* assembler,
__ j(NOT_ZERO, not_smi_or_overflow);
switch (kind) {
case Token::kADD: {
__ addl(RAX, RCX);
__ addq(RAX, RCX);
__ j(OVERFLOW, not_smi_or_overflow);
__ movsxd(RAX, RAX);
break;
}
case Token::kSUB: {
__ subl(RAX, RCX);
__ subq(RAX, RCX);
__ j(OVERFLOW, not_smi_or_overflow);
__ movsxd(RAX, RAX);
break;
}
case Token::kEQ: {

View file

@ -18,7 +18,7 @@ namespace dart {
// ClassifyingTokenPositions N -> -1 - N
//
// Synthetically created AstNodes are given real source positions but encoded
// as negative numbers from [kSmiMin, -1 - N]. For example:
// as negative numbers from [kSmiMin32, -1 - N]. For example:
//
// A source position of 0 in a synthetic AstNode would be encoded as -2 - N.
// A source position of 1 in a synthetic AstNode would be encoded as -3 - N.
@ -86,7 +86,7 @@ class TokenPosition {
#undef DECLARE_VALUES
static const intptr_t kMinSourcePos = 0;
static const TokenPosition kMinSource;
static const intptr_t kMaxSourcePos = kSmiMax - kMaxSentinelDescriptors - 2;
static const intptr_t kMaxSourcePos = kSmiMax32 - kMaxSentinelDescriptors - 2;
static const TokenPosition kMaxSource;
// Decode from a snapshot.

View file

@ -188,6 +188,9 @@ LibTest/collection/ListBase/ListBase_class_A01_t02: Skip # co19 issue 673, These
LibTest/collection/ListMixin/ListMixin_class_A01_t02: Skip # co19 issue 673, These tests take too much memory (300 MB) for our 1 GB test machine co19 issue 673. http://code.google.com/p/co19/issues/detail?id=673
LibTest/core/List/List_class_A01_t02: Skip # co19 issue 673, These tests take too much memory (300 MB) for our 1 GB test machine co19 issue 673. http://code.google.com/p/co19/issues/detail?id=673
[ $arch != arm64 && $arch != simarm64 && $arch != simdbc && $arch != simdbc64 && $arch != x64 && ($runtime == dart_precompiled || $runtime == vm) ]
LibTest/core/int/operator_left_shift_A01_t02: Fail # co19 issue 129
[ $arch == ia32 && $mode == release && $runtime == vm && $system == linux ]
service/dev_fs_spawn_test: Pass, Fail # Issue 28411
@ -379,9 +382,6 @@ LibTest/typed_data/Uint64List/Uint64List.view_A01_t01: CompileTimeError # Large
LibTest/typed_data/Uint64List/Uint64List.view_A01_t02: CompileTimeError # Large integer literal
WebPlatformTest/*: SkipByDesign # dart:html not supported on VM.
[ $runtime == dart_precompiled || $runtime == vm ]
LibTest/core/int/operator_left_shift_A01_t02: Fail # Can't represent 1 << 2147483647 without running out of memory.
[ $runtime == flutter || $hot_reload || $hot_reload_rollback ]
Language/Expressions/Assignment/prefix_object_t02: Skip # Requires deferred libraries
Language/Expressions/Constants/constant_constructor_t03: Skip # Requires deferred libraries

View file

@ -614,4 +614,5 @@ regexp/UC16_test: RuntimeError
[ $hot_reload || $hot_reload_rollback ]
bigint_parse_radix_test: Pass, Timeout # Issue 31659
bigint_test: Pass, Crash # Issue 31660
integer_parsed_mul_div_vm_test: Pass, Slow # Slow