[VM] Reduce Smi size to 32 bit on 64 bit platforms.

This reduces small tagged integers on 64 bit platforms from 63 bits to 31 bits
plus one tag bit.
This is a step on the way to compile-time-optional compressed pointers on 64
bit platforms.  See more about this at go/dartvmlearnings
This causes a slowdown for some uses of integers that don't fit in 31 signed
bits, but because both x64 and ARM64 have unboxed 64 bit integers now the
performance hit should not be too bad.

R=kustermann@google.com

Change-Id: I035ed84c29b64f0432cd2d24193eb1c6303c14b0
Reviewed-on: https://dart-review.googlesource.com/46244
Commit-Queue: Erik Corry <erikcorry@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
This commit is contained in:
Erik Corry 2018-04-06 10:07:53 +00:00 committed by commit-bot@chromium.org
parent 4995978b84
commit 19b4349487
24 changed files with 945 additions and 565 deletions

View file

@ -78,19 +78,14 @@ abstract class _HashBase implements _HashVMBase {
static const int _UNUSED_PAIR = 0;
static const int _DELETED_PAIR = 1;
// On 32-bit, the top bits are wasted to avoid Mint allocation.
// TODO(koda): Reclaim the bits by making the compiler treat hash patterns
// as unsigned words.
// The top bits are wasted to avoid Mint allocation.
static int _indexSizeToHashMask(int indexSize) {
int indexBits = indexSize.bitLength - 2;
return internal.is64Bit
? (1 << (32 - indexBits)) - 1
: (1 << (30 - indexBits)) - 1;
return (1 << (30 - indexBits)) - 1;
}
static int _hashPattern(int fullHash, int hashMask, int size) {
final int maskedHash = fullHash & hashMask;
// TODO(koda): Consider keeping bit length and use left shift.
return (maskedHash == 0) ? (size >> 1) : maskedHash * (size >> 1);
}

View file

@ -32,7 +32,7 @@ class int {
return null; // Empty.
}
}
var smiLimit = is64Bit ? 18 : 9;
var smiLimit = 9;
if ((last - ix) >= smiLimit) {
return null; // May not fit into a Smi.
}
@ -117,7 +117,7 @@ class int {
static int _parseRadix(
String source, int radix, int start, int end, int sign) {
int tableIndex = (radix - 2) * 4 + (is64Bit ? 2 : 0);
int tableIndex = (radix - 2) * 2;
int blockSize = _PARSE_LIMITS[tableIndex];
int length = end - start;
if (length <= blockSize) {
@ -143,7 +143,7 @@ class int {
int positiveOverflowLimit = 0;
int negativeOverflowLimit = 0;
if (_limitIntsTo64Bits) {
tableIndex = tableIndex << 1; // pre-multiply by 2 for simpler indexing
tableIndex = tableIndex << 1; // Pre-multiply by 2 for simpler indexing.
positiveOverflowLimit = _int64OverflowLimits[tableIndex];
if (positiveOverflowLimit == 0) {
positiveOverflowLimit =
@ -159,14 +159,10 @@ class int {
if (result >= positiveOverflowLimit) {
if ((result > positiveOverflowLimit) ||
(smi > _int64OverflowLimits[tableIndex + 2])) {
// Although the unsigned overflow limits do not depend on the
// platform, the multiplier and block size, which are used to
// compute it, do.
int X = is64Bit ? 1 : 0;
if (radix == 16 &&
!(result >= _int64UnsignedOverflowLimits[X] &&
(result > _int64UnsignedOverflowLimits[X] ||
smi > _int64UnsignedSmiOverflowLimits[X])) &&
!(result >= _int64UnsignedOverflowLimit &&
(result > _int64UnsignedOverflowLimit ||
smi > _int64UnsignedSmiOverflowLimit)) &&
blockEnd + blockSize > end) {
return (result * multiplier) + smi;
}
@ -211,43 +207,42 @@ class int {
// For each radix, 2-36, how many digits are guaranteed to fit in a smi,
// and magnitude of such a block (radix ** digit-count).
// 32-bit limit/multiplier at (radix - 2)*4, 64-bit limit at (radix-2)*4+2
static const _PARSE_LIMITS = const [
30, 1073741824, 62, 4611686018427387904, // radix: 2
18, 387420489, 39, 4052555153018976267,
15, 1073741824, 30, 1152921504606846976,
12, 244140625, 26, 1490116119384765625, // radix: 5
11, 362797056, 23, 789730223053602816,
10, 282475249, 22, 3909821048582988049,
10, 1073741824, 20, 1152921504606846976,
9, 387420489, 19, 1350851717672992089,
9, 1000000000, 18, 1000000000000000000, // radix: 10
8, 214358881, 17, 505447028499293771,
8, 429981696, 17, 2218611106740436992,
8, 815730721, 16, 665416609183179841,
7, 105413504, 16, 2177953337809371136,
7, 170859375, 15, 437893890380859375, // radix: 15
7, 268435456, 15, 1152921504606846976,
7, 410338673, 15, 2862423051509815793,
7, 612220032, 14, 374813367582081024,
7, 893871739, 14, 799006685782884121,
6, 64000000, 14, 1638400000000000000, // radix: 20
6, 85766121, 14, 3243919932521508681,
6, 113379904, 13, 282810057883082752,
6, 148035889, 13, 504036361936467383,
6, 191102976, 13, 876488338465357824,
6, 244140625, 13, 1490116119384765625, // radix: 25
6, 308915776, 13, 2481152873203736576,
6, 387420489, 13, 4052555153018976267,
6, 481890304, 12, 232218265089212416,
6, 594823321, 12, 353814783205469041,
6, 729000000, 12, 531441000000000000, // radix: 30
6, 887503681, 12, 787662783788549761,
6, 1073741824, 12, 1152921504606846976,
5, 39135393, 12, 1667889514952984961,
5, 45435424, 12, 2386420683693101056,
5, 52521875, 12, 3379220508056640625, // radix: 35
5, 60466176, 11, 131621703842267136,
30, 1073741824, // radix: 2
18, 387420489,
15, 1073741824,
12, 244140625, // radix: 5
11, 362797056,
10, 282475249,
10, 1073741824,
9, 387420489,
9, 1000000000, // radix: 10
8, 214358881,
8, 429981696,
8, 815730721,
7, 105413504,
7, 170859375, // radix: 15
7, 268435456,
7, 410338673,
7, 612220032,
7, 893871739,
6, 64000000, // radix: 20
6, 85766121,
6, 113379904,
6, 148035889,
6, 191102976,
6, 244140625, // radix: 25
6, 308915776,
6, 387420489,
6, 481890304,
6, 594823321,
6, 729000000, // radix: 30
6, 887503681,
6, 1073741824,
5, 39135393,
5, 45435424,
5, 52521875, // radix: 35
5, 60466176,
];
/// Flag indicating if integers are limited by 64 bits
@ -257,11 +252,8 @@ class int {
static const _maxInt64 = 0x7fffffffffffffff;
static const _minInt64 = -_maxInt64 - 1;
static const _int64UnsignedOverflowLimits = const [0xfffffffff, 0xf];
static const _int64UnsignedSmiOverflowLimits = const [
0xfffffff,
0xfffffffffffffff
];
static const _int64UnsignedOverflowLimit = 0xfffffffff;
static const _int64UnsignedSmiOverflowLimit = 0xfffffff;
/// In the `--limit-ints-to-64-bits` mode calculation of the expression
///

View file

@ -1062,12 +1062,18 @@ class Assembler : public ValueObject {
const Register crn = ConcreteRegister(rn);
EmitFPIntCvtOp(SCVTFD, static_cast<Register>(vd), crn, kWord);
}
void fcvtzds(Register rd, VRegister vn) {
void fcvtzdsx(Register rd, VRegister vn) {
ASSERT(rd != R31);
ASSERT(rd != CSP);
const Register crd = ConcreteRegister(rd);
EmitFPIntCvtOp(FCVTZDS, crd, static_cast<Register>(vn));
}
void fcvtzdsw(Register rd, VRegister vn) {
ASSERT(rd != R31);
ASSERT(rd != CSP);
const Register crd = ConcreteRegister(rd);
EmitFPIntCvtOp(FCVTZDS, crd, static_cast<Register>(vn), kWord);
}
void fmovdd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FMOVDD, vd, vn); }
void fabsd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FABSD, vd, vn); }
void fnegd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FNEGD, vd, vn); }
@ -1358,9 +1364,17 @@ class Assembler : public ValueObject {
LslImmediate(dst, src, kSmiTagSize);
}
void BranchIfNotSmi(Register reg, Label* label) { tbnz(label, reg, kSmiTag); }
void BranchIfNotSmi(Register reg, Label* label) {
ASSERT(kSmiTagMask == 1);
ASSERT(kSmiTag == 0);
tbnz(label, reg, 0);
}
void BranchIfSmi(Register reg, Label* label) { tbz(label, reg, kSmiTag); }
void BranchIfSmi(Register reg, Label* label) {
ASSERT(kSmiTagMask == 1);
ASSERT(kSmiTag == 0);
tbz(label, reg, 0);
}
void Branch(const StubEntry& stub_entry,
Register pp,
@ -1444,6 +1458,11 @@ class Assembler : public ValueObject {
kValueCanBeSmi,
};
enum CanBeHeapPointer {
kValueIsNotHeapPointer,
kValueCanBeHeapPointer,
};
// Storing into an object.
void StoreIntoObject(Register object,
const Address& dest,
@ -1583,6 +1602,22 @@ class Assembler : public ValueObject {
Register tmp,
OperandSize sz);
void AssertSmiInRange(
Register object,
CanBeHeapPointer can_be_heap_pointer = kValueIsNotHeapPointer) {
#if defined(DEBUG)
Label ok;
if (can_be_heap_pointer == kValueCanBeHeapPointer) {
BranchIfNotSmi(object, &ok);
}
cmp(object, Operand(object, SXTW, 0));
b(&ok, EQ);
Stop("Smi out of range");
Bind(&ok);
#endif
}
private:
AssemblerBuffer buffer_; // Contains position independent code.
ObjectPoolWrapper object_pool_wrapper_;

View file

@ -2576,17 +2576,73 @@ ASSEMBLER_TEST_RUN(FldrqFstrqPrePostIndex, test) {
EXPECT_EQ(42.0, EXECUTE_TEST_CODE_DOUBLE(DoubleReturn, test->entry()));
}
ASSEMBLER_TEST_GENERATE(Fcvtzds, assembler) {
ASSEMBLER_TEST_GENERATE(Fcvtzdsx, assembler) {
__ LoadDImmediate(V0, 42.0);
__ fcvtzds(R0, V0);
__ fcvtzdsx(R0, V0);
__ ret();
}
ASSEMBLER_TEST_RUN(Fcvtzds, test) {
ASSEMBLER_TEST_RUN(Fcvtzdsx, test) {
typedef int64_t (*Int64Return)() DART_UNUSED;
EXPECT_EQ(42, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
}
ASSEMBLER_TEST_GENERATE(Fcvtzdsw, assembler) {
__ LoadDImmediate(V0, 42.0);
__ fcvtzdsw(R0, V0);
__ ret();
}
ASSEMBLER_TEST_RUN(Fcvtzdsw, test) {
typedef int64_t (*Int64Return)() DART_UNUSED;
EXPECT_EQ(42, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
}
ASSEMBLER_TEST_GENERATE(Fcvtzdsx_overflow, assembler) {
__ LoadDImmediate(V0, 1e20);
__ fcvtzdsx(R0, V0);
__ ret();
}
ASSEMBLER_TEST_RUN(Fcvtzdsx_overflow, test) {
typedef int64_t (*Int64Return)() DART_UNUSED;
EXPECT_EQ(kMaxInt64, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
}
ASSEMBLER_TEST_GENERATE(Fcvtzdsx_overflow_negative, assembler) {
__ LoadDImmediate(V0, -1e20);
__ fcvtzdsx(R0, V0);
__ ret();
}
ASSEMBLER_TEST_RUN(Fcvtzdsx_overflow_negative, test) {
typedef int64_t (*Int64Return)() DART_UNUSED;
EXPECT_EQ(kMinInt64, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
}
ASSEMBLER_TEST_GENERATE(Fcvtzdsw_overflow, assembler) {
__ LoadDImmediate(V0, 1e10);
__ fcvtzdsw(R0, V0);
__ ret();
}
ASSEMBLER_TEST_RUN(Fcvtzdsw_overflow, test) {
typedef int64_t (*Int64Return)() DART_UNUSED;
EXPECT_EQ(kMaxInt32, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
}
ASSEMBLER_TEST_GENERATE(Fcvtzdsw_overflow_negative, assembler) {
__ LoadDImmediate(V0, -1e10);
__ fcvtzdsw(R0, V0);
__ sxtw(R0, R0);
__ ret();
}
ASSEMBLER_TEST_RUN(Fcvtzdsw_overflow_negative, test) {
typedef int64_t (*Int64Return)() DART_UNUSED;
EXPECT_EQ(kMinInt32, EXECUTE_TEST_CODE_INT64(Int64Return, test->entry()));
}
ASSEMBLER_TEST_GENERATE(Scvtfdx, assembler) {
__ LoadImmediate(R0, 42);
__ scvtfdx(V0, R0);

View file

@ -709,6 +709,11 @@ class Assembler : public ValueObject {
kValueCanBeSmi,
};
enum CanBeHeapPointer {
kValueIsNotHeapPointer,
kValueCanBeHeapPointer,
};
// Destroys value.
void StoreIntoObject(Register object, // Object we are storing into.
const Address& dest, // Where we are storing into.
@ -932,6 +937,26 @@ class Assembler : public ValueObject {
Register array,
Register index);
void AssertSmiInRange(
Register object,
CanBeHeapPointer can_be_heap_pointer = kValueIsNotHeapPointer) {
#if defined(DEBUG)
Register tmp = object == TMP ? TMP2 : TMP;
Label ok;
if (can_be_heap_pointer == kValueCanBeHeapPointer) {
testl(object, Immediate(kSmiTagMask));
ASSERT(kSmiTag == 0);
j(ZERO, &ok);
}
movsxd(tmp, object);
cmpq(tmp, object);
j(EQUAL, &ok);
Stop("Smi out of range");
Bind(&ok);
#endif
}
static Address VMTagAddress() {
return Address(THR, Thread::vm_tag_offset());
}

View file

@ -1552,10 +1552,10 @@ bool BinaryIntegerOpInstr::RightIsPowerOfTwoConstant() const {
return Utils::IsPowerOfTwo(Utils::Abs(int_value));
}
static intptr_t RepresentationBits(Representation r) {
static intptr_t SignificantRepresentationBits(Representation r) {
switch (r) {
case kTagged:
return kBitsPerWord - 1;
return 31;
case kUnboxedInt32:
case kUnboxedUint32:
return 32;
@ -1569,7 +1569,7 @@ static intptr_t RepresentationBits(Representation r) {
static int64_t RepresentationMask(Representation r) {
return static_cast<int64_t>(static_cast<uint64_t>(-1) >>
(64 - RepresentationBits(r)));
(64 - SignificantRepresentationBits(r)));
}
static bool ToIntegerConstant(Value* value, int64_t* result) {
@ -2130,7 +2130,8 @@ Definition* BinaryIntegerOpInstr::Canonicalize(FlowGraph* flow_graph) {
break;
case Token::kSHL: {
const intptr_t kMaxShift = RepresentationBits(representation()) - 1;
const intptr_t kMaxShift =
SignificantRepresentationBits(representation()) - 1;
if (rhs == 0) {
return left()->definition();
} else if ((rhs < 0) || (rhs >= kMaxShift)) {

View file

@ -955,9 +955,13 @@ CompileType LoadIndexedInstr::ComputeType() const {
case kTwoByteStringCid:
case kExternalOneByteStringCid:
case kExternalTwoByteStringCid:
return CompileType::FromCid(kSmiCid);
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
return CompileType::FromCid(kSmiCid);
// TODO(erikcorry): Perhaps this can return a faster type. See
// https://github.com/dart-lang/sdk/issues/32582
return CompileType::Int();
default:
UNIMPLEMENTED();
@ -1231,7 +1235,6 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
default:
UNREACHABLE();
}
__ SmiTag(result);
break;
case kTwoByteStringCid:
case kExternalTwoByteStringCid:
@ -1245,12 +1248,15 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
default:
UNREACHABLE();
}
__ SmiTag(result);
break;
default:
UNREACHABLE();
break;
}
if (representation_ == kTagged) {
ASSERT(can_pack_into_smi());
__ SmiTag(result);
}
}
Representation StoreIndexedInstr::RequiredInputRepresentation(
@ -2723,18 +2729,27 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
if (locs.in(1).IsConstant()) {
const Object& constant = locs.in(1).constant();
ASSERT(constant.IsSmi());
// Immediate shift operation takes 6 bits for the count.
const intptr_t kCountLimit = 0x3F;
// Immediate shift operation takes 5 bits for the count.
const intptr_t kCountLimit = 0x1F;
// These should be around the same size.
COMPILE_ASSERT(kCountLimit + 1 == kSmiBits + 2);
const intptr_t value = Smi::Cast(constant).Value();
ASSERT((0 < value) && (value < kCountLimit));
if (shift_left->can_overflow()) {
// Check for overflow (preserve left).
__ LslImmediate(TMP, left, value);
__ cmp(left, Operand(TMP, ASR, value));
__ LslImmediate(TMP, left, value, kWord);
__ cmpw(left, Operand(TMP, ASR, value));
__ b(deopt, NE); // Overflow.
}
// Shift for result now we know there is no overflow.
// Shift for result now we know there is no overflow. This writes the full
// 64 bits of the output register, but unless we are in truncating mode the
// top bits will just be sign extension bits.
__ LslImmediate(result, left, value);
if (shift_left->is_truncating()) {
// This preserves the invariant that Smis only use the low 32 bits of the
// register, the high bits being sign extension bits.
__ sxtw(result, result);
}
return;
}
@ -2742,28 +2757,33 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
const Register right = locs.in(1).reg();
Range* right_range = shift_left->right_range();
if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
// TODO(srdjan): Implement code below for is_truncating().
// If left is constant, we know the maximal allowed size for right.
const Object& obj = shift_left->left()->BoundConstant();
if (obj.IsSmi()) {
const intptr_t left_int = Smi::Cast(obj).Value();
if (left_int == 0) {
__ CompareRegisters(right, ZR);
__ b(deopt, MI);
__ mov(result, ZR);
return;
}
const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
const bool right_needs_check =
!RangeUtils::IsWithin(right_range, 0, max_right - 1);
if (right_needs_check) {
__ CompareImmediate(right,
reinterpret_cast<int64_t>(Smi::New(max_right)));
__ b(deopt, CS);
}
__ SmiUntag(TMP, right);
__ lslv(result, left, TMP);
// Even though we have a non-Smi constant on the left, we might still emit
// a Smi op here. In that case the Smi check above will have deopted, so
// we can't reach this point. Emit a breakpoint to be sure.
if (!obj.IsSmi()) {
__ Breakpoint();
return;
}
const intptr_t left_int = Smi::Cast(obj).Value();
if (left_int == 0) {
__ CompareRegisters(right, ZR);
__ b(deopt, MI);
__ mov(result, ZR);
return;
}
const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
const bool right_needs_check =
!RangeUtils::IsWithin(right_range, 0, max_right - 1);
if (right_needs_check) {
__ CompareImmediate(right,
reinterpret_cast<int64_t>(Smi::New(max_right)));
__ b(deopt, CS);
}
__ SmiUntag(TMP, right);
__ lslv(result, left, TMP);
ASSERT(!shift_left->is_truncating());
return;
}
@ -2787,7 +2807,11 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
__ SmiUntag(TMP, right);
__ lslv(result, left, TMP);
}
if (shift_left->is_truncating()) {
__ sxtw(result, result);
}
} else {
// If we can overflow.
if (right_needs_check) {
ASSERT(shift_left->CanDeoptimize());
__ CompareImmediate(right,
@ -2795,15 +2819,16 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
__ b(deopt, CS);
}
// Left is not a constant.
// Check if count too large for handling it inlined.
// Check if count is too large for handling it inlined.
__ SmiUntag(TMP, right);
// Overflow test (preserve left, right, and TMP);
const Register temp = locs.temp(0).reg();
__ lslv(temp, left, TMP);
__ asrv(TMP2, temp, TMP);
__ CompareRegisters(left, TMP2);
__ lslvw(temp, left, TMP);
__ asrvw(TMP2, temp, TMP);
__ cmpw(left, Operand(TMP2));
__ b(deopt, NE); // Overflow.
// Shift for result now we know there is no overflow.
// Shift for result now we know there is no overflow. This is a 64 bit
// operation, so no sign extension is needed.
__ lslv(result, left, TMP);
}
}
@ -2885,18 +2910,20 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
switch (op_kind()) {
case Token::kADD:
__ adds(result, left, Operand(right));
__ addsw(result, left, Operand(right));
__ b(slow_path->entry_label(), VS);
__ sxtw(result, result);
break;
case Token::kSUB:
__ subs(result, left, Operand(right));
__ subsw(result, left, Operand(right));
__ b(slow_path->entry_label(), VS);
__ sxtw(result, result);
break;
case Token::kMUL:
__ SmiUntag(TMP, left);
__ mul(result, TMP, right);
__ smulh(TMP, TMP, right);
// TMP: result bits 64..127.
__ smull(result, TMP, right);
__ AsrImmediate(TMP, result, 31);
// TMP: result bits 31-63
__ cmp(TMP, Operand(result, ASR, 63));
__ b(slow_path->entry_label(), NE);
break;
@ -2921,8 +2948,8 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ SmiUntag(TMP, right);
__ lslv(result, left, TMP);
__ asrv(TMP2, result, TMP);
__ CompareRegisters(left, TMP2);
__ asrvw(TMP2, result, TMP);
__ cmp(left, Operand(TMP2, SXTW, 0));
__ b(slow_path->entry_label(), NE); // Overflow.
break;
case Token::kSHR:
@ -2932,6 +2959,8 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
reinterpret_cast<int64_t>(Smi::New(Smi::kBits)));
__ b(slow_path->entry_label(), CS);
__ AssertSmiInRange(left);
__ AssertSmiInRange(right);
__ SmiUntag(result, right);
__ SmiUntag(TMP, left);
__ asrv(result, TMP, result);
@ -2941,6 +2970,7 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNIMPLEMENTED();
}
__ Bind(slow_path->exit_label());
__ AssertSmiInRange(result, Assembler::kValueCanBeHeapPointer);
}
class CheckedSmiComparisonSlowPath : public SlowPathCode {
@ -3131,20 +3161,28 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case Token::kADD: {
if (deopt == NULL) {
__ AddImmediate(result, left, imm);
if (is_truncating()) {
__ sxtw(result, result);
}
} else {
__ AddImmediateSetFlags(result, left, imm);
__ AddImmediateSetFlags(result, left, imm, kWord);
__ b(deopt, VS);
__ sxtw(result, result);
}
break;
}
case Token::kSUB: {
if (deopt == NULL) {
__ AddImmediate(result, left, -imm);
if (is_truncating()) {
__ sxtw(result, result);
}
} else {
// Negating imm and using AddImmediateSetFlags would not detect the
// overflow when imm == kMinInt64.
__ SubImmediateSetFlags(result, left, imm);
// overflow when imm == kMinInt32.
__ SubImmediateSetFlags(result, left, imm, kWord);
__ b(deopt, VS);
__ sxtw(result, result);
}
break;
}
@ -3152,12 +3190,14 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Keep left value tagged and untag right value.
const intptr_t value = Smi::Cast(constant).Value();
__ LoadImmediate(TMP, value);
__ mul(result, left, TMP);
__ smull(result, left, TMP);
if (deopt != NULL) {
__ smulh(TMP, left, TMP);
// TMP: result bits 64..127.
__ AsrImmediate(TMP, result, 31);
// TMP: result bits 31..63.
__ cmp(TMP, Operand(result, ASR, 63));
__ b(deopt, NE);
} else if (is_truncating()) {
__ sxtw(result, result);
}
break;
}
@ -3168,9 +3208,10 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t shift_count =
Utils::ShiftForPowerOfTwo(Utils::Abs(value)) + kSmiTagSize;
ASSERT(kSmiTagSize == 1);
__ AsrImmediate(TMP, left, 63);
__ AsrImmediate(TMP, left, 31); // All 1s or all 0s.
ASSERT(shift_count > 1); // 1, -1 case handled above.
const Register temp = TMP2;
// Adjust so that we round to 0 instead of round down.
__ add(temp, left, Operand(TMP, LSR, 64 - shift_count));
ASSERT(shift_count > 0);
__ AsrImmediate(result, temp, shift_count);
@ -3205,6 +3246,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
break;
}
__ AssertSmiInRange(result);
return;
}
@ -3213,18 +3255,26 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case Token::kADD: {
if (deopt == NULL) {
__ add(result, left, Operand(right));
if (is_truncating()) {
__ sxtw(result, result);
}
} else {
__ adds(result, left, Operand(right));
__ addsw(result, left, Operand(right));
__ b(deopt, VS);
__ sxtw(result, result);
}
break;
}
case Token::kSUB: {
if (deopt == NULL) {
__ sub(result, left, Operand(right));
if (is_truncating()) {
__ sxtw(result, result);
}
} else {
__ subs(result, left, Operand(right));
__ subsw(result, left, Operand(right));
__ b(deopt, VS);
__ sxtw(result, result);
}
break;
}
@ -3232,10 +3282,13 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ SmiUntag(TMP, left);
if (deopt == NULL) {
__ mul(result, TMP, right);
if (is_truncating()) {
__ sxtw(result, result);
}
} else {
__ mul(result, TMP, right);
__ smulh(TMP, TMP, right);
// TMP: result bits 64..127.
__ smull(result, TMP, right);
__ AsrImmediate(TMP, result, 31);
// TMP: result bits 31..63.
__ cmp(TMP, Operand(result, ASR, 63));
__ b(deopt, NE);
}
@ -3270,7 +3323,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Check the corner case of dividing the 'MIN_SMI' with -1, in which
// case we cannot tag the result.
__ CompareImmediate(result, 0x4000000000000000LL);
__ CompareImmediate(result, 0x40000000LL);
__ b(deopt, EQ);
__ SmiTag(result);
break;
@ -3315,8 +3368,10 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ b(deopt, LT);
}
__ SmiUntag(TMP, right);
// sarl operation masks the count to 6 bits.
const intptr_t kCountLimit = 0x3F;
// The asrv operation masks the count to 6 bits, but any shift between 31
// and 63 gives the same result because 32 bit Smis are stored sign
// extended in the registers.
const intptr_t kCountLimit = 0x1F;
if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
__ LoadImmediate(TMP2, kCountLimit);
__ CompareRegisters(TMP, TMP2);
@ -3345,6 +3400,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
break;
}
__ AssertSmiInRange(result);
}
LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone,
@ -3497,10 +3553,17 @@ LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
ASSERT((from_representation() == kUnboxedInt32) ||
(from_representation() == kUnboxedUint32));
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
LocationSummary(zone, kNumInputs, kNumTemps,
ValueFitsSmi() ? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath);
// Get two distinct registers for input and output, plus a temp
// register for testing for overflow and allocating a Mint.
summary->set_in(0, Location::RequiresRegister());
if (!ValueFitsSmi()) {
summary->set_temp(0, Location::RequiresRegister());
}
summary->set_out(0, Location::RequiresRegister());
return summary;
}
@ -3509,16 +3572,51 @@ void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register out = locs()->out(0).reg();
ASSERT(value != out);
Label done;
ASSERT(kSmiTagSize == 1);
// TODO(vegorov) implement and use UBFM/SBFM for this.
__ LslImmediate(out, value, 32);
if (from_representation() == kUnboxedInt32) {
__ AsrImmediate(out, out, 32 - kSmiTagSize);
ASSERT(kSmiTag == 0);
// Signed Bitfield Insert in Zero instruction extracts the 31 significant
// bits from a Smi.
__ sbfiz(out, value, kSmiTagSize, 32 - kSmiTagSize);
if (ValueFitsSmi()) {
return;
}
Register temp = locs()->temp(0).reg();
__ cmp(out, Operand(value, LSL, 1));
__ b(&done, EQ); // Jump if the sbfiz instruction didn't lose info.
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
temp);
__ sxtw(temp, value);
} else {
ASSERT(from_representation() == kUnboxedUint32);
__ LsrImmediate(out, out, 32 - kSmiTagSize);
ASSERT(kSmiTag == 0);
// A 32 bit positive Smi has one tag bit and one unused sign bit,
// leaving only 30 bits for the payload.
__ ubfiz(out, value, kSmiTagSize, kSmiBits);
if (ValueFitsSmi()) {
return;
}
Register temp = locs()->temp(0).reg();
__ TestImmediate(value, 0xc0000000);
__ b(&done, EQ); // Jump if both bits are zero.
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
temp);
__ ubfiz(temp, value, 0, 32); // Zero extend word.
}
__ StoreToOffset(locs()->temp(0).reg(), out,
Mint::value_offset() - kHeapObjectTag);
#if defined(DEBUG)
Label skip_smi_test;
__ b(&skip_smi_test);
__ Bind(&done);
__ AssertSmiInRange(out, Assembler::kValueCanBeHeapPointer);
__ Bind(&skip_smi_test);
#else
__ Bind(&done);
#endif
}
LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
@ -3546,7 +3644,8 @@ void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
ASSERT(kSmiTag == 0);
__ LslImmediate(out, in, kSmiTagSize);
__ LslImmediate(out, in, kSmiTagSize, kWord);
__ sxtw(out, out);
Label done;
__ cmp(in, Operand(out, ASR, kSmiTagSize));
__ b(&done, EQ);
@ -4287,8 +4386,9 @@ void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
switch (op_kind()) {
case Token::kNEGATE: {
Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
__ subs(result, ZR, Operand(value));
__ subsw(result, ZR, Operand(value));
__ b(deopt, VS);
__ sxtw(result, result);
break;
}
case Token::kBIT_NOT:
@ -4299,6 +4399,7 @@ void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
default:
UNREACHABLE();
}
__ AssertSmiInRange(result);
}
LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
@ -4350,7 +4451,7 @@ void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register value = locs()->in(0).reg();
const VRegister result = locs()->out(0).fpu_reg();
__ SmiUntag(TMP, value);
__ scvtfdx(result, TMP);
__ scvtfdw(result, TMP);
}
LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
@ -4394,12 +4495,13 @@ void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ fcmpd(VTMP, VTMP);
__ b(&do_call, VS);
__ fcvtzds(result, VTMP);
__ fcvtzdsx(result, VTMP);
// Overflow is signaled with minint.
// Check for overflow and that it fits into Smi.
__ CompareImmediate(result, 0xC000000000000000);
__ b(&do_call, MI);
__ AsrImmediate(TMP, result, 30);
__ cmp(TMP, Operand(result, ASR, 63));
__ b(&do_call, NE);
__ SmiTag(result);
__ b(&done);
__ Bind(&do_call);
@ -4416,6 +4518,7 @@ void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
args_info, locs(), ICData::Handle(),
ICData::kStatic);
__ Bind(&done);
__ AssertSmiInRange(result, Assembler::kValueCanBeHeapPointer);
}
LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,
@ -4439,11 +4542,13 @@ void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ fcmpd(value, value);
__ b(deopt, VS);
__ fcvtzds(result, value);
__ fcvtzdsx(result, value);
// Check for overflow and that it fits into Smi.
__ CompareImmediate(result, 0xC000000000000000);
__ b(deopt, MI);
__ AsrImmediate(TMP, result, 30);
__ cmp(TMP, Operand(result, ASR, 63));
__ b(deopt, NE);
__ SmiTag(result);
__ AssertSmiInRange(result);
}
LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
@ -4712,7 +4817,7 @@ void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Check the corner case of dividing the 'MIN_SMI' with -1, in which
// case we cannot tag the result.
__ CompareImmediate(result_div, 0x4000000000000000);
__ CompareImmediate(result_div, 0x40000000);
__ b(deopt, EQ);
// result_mod <- left - right * result_div.
__ msub(result_mod, TMP, result_div, result_mod);
@ -5241,6 +5346,10 @@ void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register shifter = locs()->in(1).reg();
// TODO(johnmccutchan): Use range information to avoid these checks.
// Assert this is a legitimate Smi in debug mode, but does not assert
// anything about the range relative to the bit width.
__ AssertSmiInRange(shifter);
__ SmiUntag(TMP, shifter);
__ CompareImmediate(TMP, 0);
// If shift value is < 0, deoptimize.
@ -5260,7 +5369,7 @@ void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ CompareImmediate(TMP, kShifterLimit);
// If shift value is > 31, return zero.
__ csel(out, out, ZR, GT);
__ csel(out, ZR, out, GT);
}
LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,

File diff suppressed because it is too large Load diff

View file

@ -66,17 +66,10 @@ TEST_CASE(RangeTests) {
RangeBoundary::PositiveInfinity());
TEST_RANGE_OP(Range::Shl, -1, 1, 63, 63, RangeBoundary(kMinInt64),
RangeBoundary::PositiveInfinity());
if (kBitsPerWord == 64) {
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 62, 62, RangeBoundary(kSmiMin),
RangeBoundary(kSmiMax));
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 30, 30, RangeBoundary(-(1 << 30)),
RangeBoundary(1 << 30));
} else {
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 30, 30, RangeBoundary(kSmiMin),
RangeBoundary(kSmiMax));
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 62, 62, RangeBoundary(kSmiMin),
RangeBoundary(kSmiMax));
}
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 30, 30, RangeBoundary(kSmiMin),
RangeBoundary(kSmiMax));
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 62, 62, RangeBoundary(kSmiMin),
RangeBoundary(kSmiMax));
TEST_RANGE_OP(Range::Shl, 0, 100, 0, 64, RangeBoundary(0),
RangeBoundary::PositiveInfinity());
TEST_RANGE_OP(Range::Shl, -100, 0, 0, 64, RangeBoundary::NegativeInfinity(),

View file

@ -1558,10 +1558,10 @@ void CallSpecializer::VisitStaticCall(StaticCallInstr* call) {
}
void CallSpecializer::VisitLoadCodeUnits(LoadCodeUnitsInstr* instr) {
// TODO(zerny): Use kUnboxedUint32 once it is fully supported/optimized.
#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM)
// Note that on ARM64 the result can always be packed into a Smi, so this
// is never triggered.
// TODO(zerny): Use kUnboxedUint32 once it is fully supported/optimized.
if (!instr->can_pack_into_smi()) instr->set_representation(kUnboxedInt64);
#endif
}
static bool CidTestResultsContains(const ZoneGrowableArray<intptr_t>& results,

View file

@ -265,8 +265,9 @@ static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
void Intrinsifier::Integer_addFromInteger(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through); // Checks two smis.
__ adds(R0, R0, Operand(R1)); // Adds.
__ addsw(R0, R0, Operand(R1)); // Adds.
__ b(&fall_through, VS); // Fall-through on overflow.
__ sxtw(R0, R0); // Sign extend - flags not affected.
__ ret();
__ Bind(&fall_through);
}
@ -278,8 +279,9 @@ void Intrinsifier::Integer_add(Assembler* assembler) {
void Intrinsifier::Integer_subFromInteger(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
__ subs(R0, R0, Operand(R1)); // Subtract.
__ b(&fall_through, VS); // Fall-through on overflow.
__ subsw(R0, R0, Operand(R1)); // Subtract.
__ b(&fall_through, VS); // Fall-through on overflow.
__ sxtw(R0, R0); // Sign extend - flags not affected.
__ ret();
__ Bind(&fall_through);
}
@ -287,8 +289,9 @@ void Intrinsifier::Integer_subFromInteger(Assembler* assembler) {
void Intrinsifier::Integer_sub(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
__ subs(R0, R1, Operand(R0)); // Subtract.
__ b(&fall_through, VS); // Fall-through on overflow.
__ subsw(R0, R1, Operand(R0)); // Subtract.
__ b(&fall_through, VS); // Fall-through on overflow.
__ sxtw(R0, R0); // Sign extend - flags not affected.
__ ret();
__ Bind(&fall_through);
}
@ -299,9 +302,9 @@ void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) {
TestBothArgumentsSmis(assembler, &fall_through); // checks two smis
__ SmiUntag(R0); // Untags R6. We only want result shifted by one.
__ mul(TMP, R0, R1);
__ smulh(TMP2, R0, R1);
// TMP: result bits 64..127.
__ smull(TMP, R0, R1);
__ AsrImmediate(TMP2, TMP, 31);
// TMP: result bits 31..63.
__ cmp(TMP2, Operand(TMP, ASR, 63));
__ b(&fall_through, NE);
__ mov(R0, TMP);
@ -417,7 +420,7 @@ void Intrinsifier::Integer_truncDivide(Assembler* assembler) {
// Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
// cannot tag the result.
__ CompareImmediate(R0, 0x4000000000000000);
__ CompareImmediate(R0, 0x40000000);
__ b(&fall_through, EQ);
__ SmiTag(R0); // Not equal. Okay to tag and return.
__ ret(); // Return.
@ -428,8 +431,9 @@ void Intrinsifier::Integer_negate(Assembler* assembler) {
Label fall_through;
__ ldr(R0, Address(SP, +0 * kWordSize)); // Grab first argument.
__ BranchIfNotSmi(R0, &fall_through);
__ negs(R0, R0);
__ negsw(R0, R0);
__ b(&fall_through, VS);
__ sxtw(R0, R0); // Sign extend - flags not affected.
__ ret();
__ Bind(&fall_through);
}
@ -488,9 +492,9 @@ void Intrinsifier::Integer_shl(Assembler* assembler) {
// Check if count too large for handling it inlined.
__ SmiUntag(TMP, right); // SmiUntag right into TMP.
// Overflow test (preserve left, right, and TMP);
__ lslv(temp, left, TMP);
__ asrv(TMP2, temp, TMP);
__ CompareRegisters(left, TMP2);
__ lslvw(temp, left, TMP);
__ asrvw(TMP2, temp, TMP);
__ cmpw(left, Operand(TMP2));
__ b(&fall_through, NE); // Overflow.
// Shift for result now we know there is no overflow.
__ lslv(result, left, TMP);
@ -563,6 +567,7 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
__ CompareClassId(R0, kDoubleCid);
__ b(&fall_through, EQ);
__ AssertSmiInRange(R1);
__ LoadObject(R0, Bool::False()); // Smi == Mint -> false.
__ ret();
@ -573,6 +578,7 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
__ b(&fall_through, NE);
// Receiver is Mint, return false if right is Smi.
__ BranchIfNotSmi(R0, &fall_through);
__ AssertSmiInRange(R0);
__ LoadObject(R0, Bool::False());
__ ret();
// TODO(srdjan): Implement Mint == Mint comparison.
@ -1495,11 +1501,12 @@ void Intrinsifier::DoubleToInteger(Assembler* assembler) {
__ fcmpd(V0, V0);
__ b(&fall_through, VS);
__ fcvtzds(R0, V0);
__ fcvtzdsx(R0, V0);
// Overflow is signaled with minint.
// Check for overflow and that it fits into Smi.
__ CompareImmediate(R0, 0xC000000000000000);
__ b(&fall_through, MI);
__ AsrImmediate(TMP, R0, 30);
__ cmp(TMP, Operand(R0, ASR, 63));
__ b(&fall_through, NE);
__ SmiTag(R0);
__ ret();
__ Bind(&fall_through);
@ -1516,10 +1523,10 @@ void Intrinsifier::Double_hashCode(Assembler* assembler) {
__ fcmpd(V0, V0);
__ b(&double_hash, VS);
// Convert double value to signed 64-bit int in R0 and back to a
// Convert double value to signed 32-bit int in R0 and back to a
// double value in V1.
__ fcvtzds(R0, V0);
__ scvtfdx(V1, R0);
__ fcvtzdsw(R0, V0);
__ scvtfdw(V1, R0);
// Tag the int as a Smi, making sure that it fits; this checks for
// overflow in the conversion from double to int. Conversion
@ -1527,8 +1534,9 @@ void Intrinsifier::Double_hashCode(Assembler* assembler) {
// INT64_MAX or INT64_MIN (saturation).
Label fall_through;
ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
__ adds(R0, R0, Operand(R0));
__ addsw(R0, R0, Operand(R0));
__ b(&fall_through, VS);
__ sxtw(R0, R0); // Sign extend - flags not affected.
// Compare the two double values. If they are equal, we return the
// Smi tagged result immediately as the hash code.

View file

@ -269,8 +269,10 @@ void Intrinsifier::Integer_addFromInteger(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
// RAX contains right argument.
__ addq(RAX, Address(RSP, +2 * kWordSize));
__ AssertSmiInRange(RAX);
__ addl(RAX, Address(RSP, +2 * kWordSize));
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
__ movsxd(RAX, RAX);
// Result is in RAX.
__ ret();
__ Bind(&fall_through);
@ -284,8 +286,10 @@ void Intrinsifier::Integer_subFromInteger(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
// RAX contains right argument, which is the actual minuend of subtraction.
__ subq(RAX, Address(RSP, +2 * kWordSize));
__ AssertSmiInRange(RAX);
__ subl(RAX, Address(RSP, +2 * kWordSize));
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
__ movsxd(RAX, RAX);
// Result is in RAX.
__ ret();
__ Bind(&fall_through);
@ -295,10 +299,13 @@ void Intrinsifier::Integer_sub(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
// RAX contains right argument, which is the actual subtrahend of subtraction.
__ AssertSmiInRange(RAX);
__ movq(RCX, RAX);
__ movq(RAX, Address(RSP, +2 * kWordSize));
__ subq(RAX, RCX);
__ AssertSmiInRange(RAX);
__ subl(RAX, RCX);
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
__ movsxd(RAX, RAX);
// Result is in RAX.
__ ret();
__ Bind(&fall_through);
@ -308,10 +315,12 @@ void Intrinsifier::Integer_mulFromInteger(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
// RAX is the right argument.
__ AssertSmiInRange(RAX);
ASSERT(kSmiTag == 0); // Adjust code below if not the case.
__ SmiUntag(RAX);
__ imulq(RAX, Address(RSP, +2 * kWordSize));
__ imull(RAX, Address(RSP, +2 * kWordSize));
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
__ movsxd(RAX, RAX);
// Result is in RAX.
__ ret();
__ Bind(&fall_through);
@ -333,7 +342,9 @@ void Intrinsifier::Integer_mul(Assembler* assembler) {
// RAX: Untagged fallthrough result (remainder to be adjusted), or
// RAX: Tagged return result (remainder).
static void EmitRemainderOperation(Assembler* assembler) {
Label return_zero, try_modulo, not_32bit, done;
Label return_zero, try_modulo, not_32bit;
__ AssertSmiInRange(RAX);
__ AssertSmiInRange(RCX);
// Check for quick zero results.
__ cmpq(RAX, Immediate(0));
__ j(EQUAL, &return_zero, Assembler::kNearJump);
@ -355,33 +366,12 @@ static void EmitRemainderOperation(Assembler* assembler) {
__ Bind(&try_modulo);
// Check if both operands fit into 32bits as idiv with 64bit operands
// requires twice as many cycles and has much higher latency. We are checking
// this before untagging them to avoid corner case dividing INT_MAX by -1 that
// raises exception because quotient is too large for 32bit register.
__ movsxd(RBX, RAX);
__ cmpq(RBX, RAX);
__ j(NOT_EQUAL, &not_32bit, Assembler::kNearJump);
__ movsxd(RBX, RCX);
__ cmpq(RBX, RCX);
__ j(NOT_EQUAL, &not_32bit, Assembler::kNearJump);
// Both operands are 31bit smis. Divide using 32bit idiv.
__ SmiUntag(RAX);
__ SmiUntag(RCX);
__ cdq();
__ idivl(RCX);
__ movsxd(RAX, RDX);
__ jmp(&done, Assembler::kNearJump);
// Divide using 64bit idiv.
__ Bind(&not_32bit);
__ SmiUntag(RAX);
__ SmiUntag(RCX);
__ cqo();
__ idivq(RCX);
__ movq(RAX, RDX);
__ Bind(&done);
}
// Implementation:
@ -396,7 +386,9 @@ static void EmitRemainderOperation(Assembler* assembler) {
void Intrinsifier::Integer_moduloFromInteger(Assembler* assembler) {
Label fall_through, negative_result;
TestBothArgumentsSmis(assembler, &fall_through);
__ AssertSmiInRange(RAX);
__ movq(RCX, Address(RSP, +2 * kWordSize));
__ AssertSmiInRange(RCX);
// RAX: Tagged left (dividend).
// RCX: Tagged right (divisor).
__ cmpq(RCX, Immediate(0));
@ -430,21 +422,17 @@ void Intrinsifier::Integer_truncDivide(Assembler* assembler) {
Label fall_through, not_32bit;
TestBothArgumentsSmis(assembler, &fall_through);
// RAX: right argument (divisor)
__ AssertSmiInRange(RAX);
__ cmpq(RAX, Immediate(0));
__ j(EQUAL, &fall_through, Assembler::kNearJump);
__ movq(RCX, RAX);
__ movq(RAX, Address(RSP, +2 * kWordSize)); // Left argument (dividend).
__ AssertSmiInRange(RAX);
// Check if both operands fit into 32bits as idiv with 64bit operands
// requires twice as many cycles and has much higher latency. We are checking
// this before untagging them to avoid corner case dividing INT_MAX by -1 that
// raises exception because quotient is too large for 32bit register.
__ movsxd(RBX, RAX);
__ cmpq(RBX, RAX);
__ j(NOT_EQUAL, &not_32bit);
__ movsxd(RBX, RCX);
__ cmpq(RBX, RCX);
__ j(NOT_EQUAL, &not_32bit);
// Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
// cannot tag the result.
__ cmpq(RAX, Immediate(-0x80000000ll));
__ j(EQUAL, &fall_through);
// Both operands are 31bit smis. Divide using 32bit idiv.
__ SmiUntag(RAX);
@ -454,21 +442,6 @@ void Intrinsifier::Integer_truncDivide(Assembler* assembler) {
__ movsxd(RAX, RAX);
__ SmiTag(RAX); // Result is guaranteed to fit into a smi.
__ ret();
// Divide using 64bit idiv.
__ Bind(&not_32bit);
__ SmiUntag(RAX);
__ SmiUntag(RCX);
__ pushq(RDX); // Preserve RDX in case of 'fall_through'.
__ cqo();
__ idivq(RCX);
__ popq(RDX);
// Check the corner case of dividing the 'MIN_SMI' with -1, in which case we
// cannot tag the result.
__ cmpq(RAX, Immediate(0x4000000000000000));
__ j(EQUAL, &fall_through);
__ SmiTag(RAX);
__ ret();
__ Bind(&fall_through);
}
@ -477,8 +450,10 @@ void Intrinsifier::Integer_negate(Assembler* assembler) {
__ movq(RAX, Address(RSP, +1 * kWordSize));
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &fall_through, Assembler::kNearJump); // Non-smi value.
__ AssertSmiInRange(RAX);
__ cmpq(RAX, Immediate(-0x80000000ll));
__ j(EQUAL, &fall_through, Assembler::kNearJump);
__ negq(RAX);
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
// Result is in RAX.
__ ret();
__ Bind(&fall_through);
@ -487,6 +462,7 @@ void Intrinsifier::Integer_negate(Assembler* assembler) {
void Intrinsifier::Integer_bitAndFromInteger(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
__ AssertSmiInRange(RAX);
// RAX is the right argument.
__ andq(RAX, Address(RSP, +2 * kWordSize));
// Result is in RAX.
@ -502,6 +478,7 @@ void Intrinsifier::Integer_bitOrFromInteger(Assembler* assembler) {
Label fall_through;
TestBothArgumentsSmis(assembler, &fall_through);
// RAX is the right argument.
__ AssertSmiInRange(RAX);
__ orq(RAX, Address(RSP, +2 * kWordSize));
// Result is in RAX.
__ ret();
@ -517,6 +494,7 @@ void Intrinsifier::Integer_bitXorFromInteger(Assembler* assembler) {
TestBothArgumentsSmis(assembler, &fall_through);
// RAX is the right argument.
__ xorq(RAX, Address(RSP, +2 * kWordSize));
__ AssertSmiInRange(RAX);
// Result is in RAX.
__ ret();
__ Bind(&fall_through);
@ -532,28 +510,32 @@ void Intrinsifier::Integer_shl(Assembler* assembler) {
Label fall_through, overflow;
TestBothArgumentsSmis(assembler, &fall_through);
// Shift value is in RAX. Compare with tagged Smi.
__ AssertSmiInRange(RAX);
__ cmpq(RAX, Immediate(Smi::RawValue(Smi::kBits)));
__ j(ABOVE_EQUAL, &fall_through, Assembler::kNearJump);
__ SmiUntag(RAX);
__ movq(RCX, RAX); // Shift amount must be in RCX.
__ movq(RAX, Address(RSP, +2 * kWordSize)); // Value.
__ AssertSmiInRange(RAX);
// Overflow test - all the shifted-out bits must be same as the sign bit.
__ movq(RDI, RAX);
__ shlq(RAX, RCX);
__ sarq(RAX, RCX);
__ shll(RAX, RCX);
__ sarl(RAX, RCX);
__ movsxd(RAX, RAX);
__ cmpq(RAX, RDI);
__ j(NOT_EQUAL, &overflow, Assembler::kNearJump);
__ shlq(RAX, RCX); // Shift for result now we know there is no overflow.
__ shlq(RDI, RCX); // Shift for result now we know there is no overflow.
__ movq(RAX, RDI);
// RAX is a correctly tagged Smi.
__ ret();
__ Bind(&overflow);
// Mint is rarely used on x64 (only for integers requiring 64 bit instead of
// 63 bits as represented by Smi).
// Mint is used on x64 for integers requiring 64 bit instead of 31 bits as
// represented by Smi.
__ Bind(&fall_through);
}
@ -561,6 +543,7 @@ static void CompareIntegers(Assembler* assembler, Condition true_condition) {
Label fall_through, true_label;
TestBothArgumentsSmis(assembler, &fall_through);
// RAX contains the right argument.
__ AssertSmiInRange(RAX);
__ cmpq(Address(RSP, +2 * kWordSize), RAX);
__ j(true_condition, &true_label, Assembler::kNearJump);
__ LoadObject(RAX, Bool::False());
@ -606,6 +589,9 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
__ orq(RAX, RCX);
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &check_for_mint, Assembler::kNearJump);
// Or-ing them together should still leave them both as compressible smis.
__ AssertSmiInRange(RAX);
__ AssertSmiInRange(RCX);
// Both arguments are smi, '===' is good enough.
__ LoadObject(RAX, Bool::False());
__ ret();
@ -623,9 +609,21 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
// Left (receiver) is Smi, return false if right is not Double.
// Note that an instance of Mint or Bigint never contains a value that can be
// represented by Smi.
__ AssertSmiInRange(RAX);
__ movq(RAX, Address(RSP, +kArgumentOffset * kWordSize));
__ CompareClassId(RAX, kDoubleCid);
__ j(EQUAL, &fall_through);
#if defined(DEBUG)
Label ok;
__ CompareClassId(RAX, kMintCid);
__ j(NOT_EQUAL, &ok);
__ movq(RAX, FieldAddress(RAX, Mint::value_offset()));
__ sarq(RCX, Immediate(1));
__ cmpq(RAX, RCX);
__ j(NOT_EQUAL, &ok);
__ Stop("Smi wrapped in a Mint");
__ Bind(&ok);
#endif
__ LoadObject(RAX, Bool::False());
__ ret();
@ -637,6 +635,7 @@ void Intrinsifier::Integer_equalToInteger(Assembler* assembler) {
__ movq(RAX, Address(RSP, +kArgumentOffset * kWordSize));
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &fall_through);
__ AssertSmiInRange(RAX);
// Smi == Mint -> false.
__ LoadObject(RAX, Bool::False());
__ ret();
@ -666,6 +665,7 @@ void Intrinsifier::Integer_sar(Assembler* assembler) {
__ Bind(&shift_count_ok);
__ movq(RCX, RAX); // Shift amount must be in RCX.
__ movq(RAX, Address(RSP, +2 * kWordSize)); // Value.
__ AssertSmiInRange(RAX);
__ SmiUntag(RAX); // Value.
__ sarq(RAX, RCX);
__ SmiTag(RAX);
@ -676,6 +676,7 @@ void Intrinsifier::Integer_sar(Assembler* assembler) {
// Argument is Smi (receiver).
void Intrinsifier::Smi_bitNegate(Assembler* assembler) {
__ movq(RAX, Address(RSP, +1 * kWordSize)); // Index.
__ AssertSmiInRange(RAX);
__ notq(RAX);
__ andq(RAX, Immediate(~kSmiTagMask)); // Remove inverted smi-tag.
__ ret();
@ -684,6 +685,7 @@ void Intrinsifier::Smi_bitNegate(Assembler* assembler) {
void Intrinsifier::Smi_bitLength(Assembler* assembler) {
ASSERT(kSmiTagShift == 1);
__ movq(RAX, Address(RSP, +1 * kWordSize)); // Index.
__ AssertSmiInRange(RAX);
// XOR with sign bit to complement bits if value is negative.
__ movq(RCX, RAX);
__ sarq(RCX, Immediate(63)); // All 0 or all 1.
@ -709,6 +711,7 @@ void Intrinsifier::Bigint_lsh(Assembler* assembler) {
__ subq(R8, Immediate(2)); // x_used > 0, Smi. R8 = x_used - 1, round up.
__ sarq(R8, Immediate(2)); // R8 + 1 = number of digit pairs to read.
__ movq(RCX, Address(RSP, 2 * kWordSize)); // n is Smi
__ AssertSmiInRange(RCX);
__ SmiUntag(RCX);
__ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits
__ movq(RSI, RCX);
@ -744,6 +747,7 @@ void Intrinsifier::Bigint_rsh(Assembler* assembler) {
__ movq(RDI, Address(RSP, 4 * kWordSize)); // x_digits
__ movq(RCX, Address(RSP, 2 * kWordSize)); // n is Smi
__ AssertSmiInRange(RCX);
__ SmiUntag(RCX);
__ movq(RBX, Address(RSP, 1 * kWordSize)); // r_digits
__ movq(RDX, RCX);
@ -1231,6 +1235,7 @@ static void CompareDoubles(Assembler* assembler, Condition true_condition) {
__ LoadObject(RAX, Bool::True());
__ ret();
__ Bind(&is_smi);
__ AssertSmiInRange(RAX);
__ SmiUntag(RAX);
__ cvtsi2sdq(XMM1, RAX);
__ jmp(&double_op);
@ -1291,6 +1296,7 @@ static void DoubleArithmeticOperations(Assembler* assembler, Token::Kind kind) {
__ movsd(FieldAddress(RAX, Double::value_offset()), XMM0);
__ ret();
__ Bind(&is_smi);
__ AssertSmiInRange(RAX);
__ SmiUntag(RAX);
__ cvtsi2sdq(XMM1, RAX);
__ jmp(&double_op);
@ -1320,6 +1326,7 @@ void Intrinsifier::Double_mulFromInteger(Assembler* assembler) {
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &fall_through);
// Is Smi.
__ AssertSmiInRange(RAX);
__ SmiUntag(RAX);
__ cvtsi2sdq(XMM1, RAX);
__ movq(RAX, Address(RSP, +2 * kWordSize));
@ -1342,6 +1349,7 @@ void Intrinsifier::DoubleFromInteger(Assembler* assembler) {
__ testq(RAX, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &fall_through);
// Is Smi.
__ AssertSmiInRange(RAX);
__ SmiUntag(RAX);
__ cvtsi2sdq(XMM0, RAX);
const Class& double_class =
@ -1412,14 +1420,15 @@ void Intrinsifier::Double_getIsNegative(Assembler* assembler) {
void Intrinsifier::DoubleToInteger(Assembler* assembler) {
__ movq(RAX, Address(RSP, +1 * kWordSize));
__ movsd(XMM0, FieldAddress(RAX, Double::value_offset()));
__ cvttsd2siq(RAX, XMM0);
__ cvttsd2sil(RAX, XMM0);
// Overflow is signalled with minint.
Label fall_through;
// Check for overflow and that it fits into Smi.
__ movq(RCX, RAX);
__ shlq(RCX, Immediate(1));
__ shll(RCX, Immediate(1));
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
__ SmiTag(RAX);
ASSERT(kSmiTagShift == 1 && kSmiTag == 0);
__ movsxd(RAX, RCX);
__ ret();
__ Bind(&fall_through);
}
@ -1431,16 +1440,17 @@ void Intrinsifier::Double_hashCode(Assembler* assembler) {
// back to a double in XMM1.
__ movq(RCX, Address(RSP, +1 * kWordSize));
__ movsd(XMM0, FieldAddress(RCX, Double::value_offset()));
__ cvttsd2siq(RAX, XMM0);
__ cvtsi2sdq(XMM1, RAX);
__ cvttsd2sil(RAX, XMM0);
__ cvtsi2sdl(XMM1, RAX);
// Tag the int as a Smi, making sure that it fits; this checks for
// overflow and NaN in the conversion from double to int. Conversion
// overflow from cvttsd2si is signalled with an INT64_MIN value.
// overflow from cvttsd2sil is signalled with an INT32_MIN value.
Label fall_through;
ASSERT(kSmiTag == 0 && kSmiTagShift == 1);
__ addq(RAX, RAX);
__ addl(RAX, RAX);
__ j(OVERFLOW, &fall_through, Assembler::kNearJump);
__ movsxd(RAX, RAX);
// Compare the two double values. If they are equal, we return the
// Smi tagged result immediately as the hash code.
@ -1478,6 +1488,7 @@ void Intrinsifier::MathSqrt(Assembler* assembler) {
__ movsd(FieldAddress(RAX, Double::value_offset()), XMM0);
__ ret();
__ Bind(&is_smi);
__ AssertSmiInRange(RAX);
__ SmiUntag(RAX);
__ cvtsi2sdq(XMM1, RAX);
__ jmp(&double_op);

View file

@ -707,14 +707,14 @@ enum Shift {
enum Extend {
kNoExtend = -1,
UXTB = 0,
UXTH = 1,
UXTW = 2,
UXTX = 3,
SXTB = 4,
SXTH = 5,
SXTW = 6,
SXTX = 7,
UXTB = 0, // Zero extend byte.
UXTH = 1, // Zero extend halfword (16 bits).
UXTW = 2, // Zero extend word (32 bits).
UXTX = 3, // Zero extend doubleword (64 bits).
SXTB = 4, // Sign extend byte.
SXTH = 5, // Sign extend halfword (16 bits).
SXTW = 6, // Sign extend word (32 bits).
SXTX = 7, // Sign extend doubleword (64 bits).
kMaxExtend = 8,
};

View file

@ -21,9 +21,8 @@
#undef OVERFLOW // From math.h conflicts in constants_ia32.h
namespace dart {
// Smi value range is from -(2^N) to (2^N)-1.
// N=30 (32-bit build) or N=62 (64-bit build).
const intptr_t kSmiBits = kBitsPerWord - 2;
// Smi value range is from -(2^N) to (2^N)-1. N=30
const intptr_t kSmiBits = 30;
const intptr_t kSmiMax = (static_cast<intptr_t>(1) << kSmiBits) - 1;
const intptr_t kSmiMin = -(static_cast<intptr_t>(1) << kSmiBits);

View file

@ -7827,7 +7827,16 @@ class Array : public Instance {
virtual uword ComputeCanonicalTableHash() const;
static const intptr_t kBytesPerElement = kWordSize;
static const intptr_t kMaxElements = kSmiMax / kBytesPerElement;
// The length field is a Smi so that sets one limit on the max Array length.
// But we also need to be able to represent the length in bytes in an
// intptr_t, which is a different limit. Either may be smaller. We can't
// use Utils::Minimum here because it is not a const expression.
static const intptr_t kElementLimitDueToIntptrMax = static_cast<intptr_t>(
(kIntptrMax - sizeof(RawArray) - kObjectAlignment + kBytesPerElement) /
kBytesPerElement);
static const intptr_t kMaxElements = kSmiMax < kElementLimitDueToIntptrMax
? kSmiMax
: kElementLimitDueToIntptrMax;
static const intptr_t kMaxNewSpaceElements =
(Heap::kNewAllocatableSize - sizeof(RawArray)) / kBytesPerElement;

View file

@ -308,16 +308,10 @@ ISOLATE_UNIT_TEST_CASE(Smi) {
EXPECT(Smi::IsValid(-15));
EXPECT(Smi::IsValid(0xFFu));
// Upper two bits must be either 00 or 11.
#if defined(ARCH_IS_64_BIT)
EXPECT(!Smi::IsValid(kMaxInt64));
EXPECT(Smi::IsValid(0x3FFFFFFFFFFFFFFF));
EXPECT(Smi::IsValid(-1));
#else
EXPECT(!Smi::IsValid(kMaxInt32));
EXPECT(Smi::IsValid(0x3FFFFFFF));
EXPECT(Smi::IsValid(-1));
EXPECT(!Smi::IsValid(0xFFFFFFFFu));
#endif
EXPECT_EQ(5, smi.AsInt64Value());
EXPECT_EQ(5.0, smi.AsDoubleValue());
@ -445,9 +439,6 @@ ISOLATE_UNIT_TEST_CASE(StringIRITwoByte) {
}
ISOLATE_UNIT_TEST_CASE(Mint) {
// On 64-bit architectures a Smi is stored in a 64 bit word. A Midint cannot
// be allocated if it does fit into a Smi.
#if !defined(ARCH_IS_64_BIT)
{
Mint& med = Mint::Handle();
EXPECT(med.IsNull());
@ -517,7 +508,6 @@ ISOLATE_UNIT_TEST_CASE(Mint) {
EXPECT_EQ(mint1.value(), mint_value);
EXPECT_EQ(mint2.value(), mint_value);
EXPECT_EQ(mint1.raw(), mint2.raw());
#endif
}
ISOLATE_UNIT_TEST_CASE(Double) {
@ -2747,22 +2737,6 @@ ISOLATE_UNIT_TEST_CASE(EmbedSmiInCode) {
EXPECT(Smi::Cast(result).Value() == kSmiTestValue);
}
#if defined(ARCH_IS_64_BIT)
// Test for Embedded Smi object in the instructions.
ISOLATE_UNIT_TEST_CASE(EmbedSmiIn64BitCode) {
extern void GenerateEmbedSmiInCode(Assembler * assembler, intptr_t value);
const intptr_t kSmiTestValue = DART_INT64_C(5) << 32;
Assembler _assembler_;
GenerateEmbedSmiInCode(&_assembler_, kSmiTestValue);
const Function& function =
Function::Handle(CreateFunction("Test_EmbedSmiIn64BitCode"));
const Code& code = Code::Handle(Code::FinalizeCode(function, &_assembler_));
function.AttachCode(code);
const Object& result =
Object::Handle(DartEntry::InvokeFunction(function, Array::empty_array()));
EXPECT(Smi::Cast(result).Value() == kSmiTestValue);
}
#endif // ARCH_IS_64_BIT
ISOLATE_UNIT_TEST_CASE(ExceptionHandlers) {
const int kNumEntries = 4;

View file

@ -3023,10 +3023,19 @@ void ChoiceNode::SetUpPreLoad(RegExpCompiler* compiler,
Trace* current_trace,
PreloadState* state) {
if (state->eats_at_least_ == PreloadState::kEatsAtLeastNotYetInitialized) {
// Save some time by looking at most one machine word ahead.
state->eats_at_least_ =
EatsAtLeast(compiler->one_byte() ? 4 : 2, kRecursionBudget,
current_trace->at_start() == Trace::FALSE_VALUE);
// On ARM64, only read 16 bits ahead for now. This ensures that boxing is
// trivial even with the new smaller Smis. See
// https://github.com/dart-lang/sdk/issues/29951 and
// LoadCodeUnitsInstr::EmitNativeCode.
#if defined(TARGET_ARCH_ARM64)
const int kMaxBytesLoaded = 2;
#else
const int kMaxBytesLoaded = 4;
#endif
const int kMaxTwoByteCharactersLoaded = kMaxBytesLoaded / 2;
state->eats_at_least_ = EatsAtLeast(
compiler->one_byte() ? kMaxBytesLoaded : kMaxTwoByteCharactersLoaded,
kRecursionBudget, current_trace->at_start() == Trace::FALSE_VALUE);
}
state->preload_characters_ =
CalculatePreloadCharacters(compiler, state->eats_at_least_);

View file

@ -3199,13 +3199,21 @@ void Simulator::DecodeFPIntCvt(Instr* instr) {
set_vregisterd(vd, 1, 0);
} else if (instr->Bits(16, 5) == 24) {
// Format(instr, "fcvtzds'sf 'rd, 'vn");
const intptr_t max = instr->Bit(31) == 1 ? INT64_MAX : INT32_MAX;
const intptr_t min = instr->Bit(31) == 1 ? INT64_MIN : INT32_MIN;
const double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn, 0));
if (vn_val >= static_cast<double>(INT64_MAX)) {
set_register(instr, rd, INT64_MAX, instr->RdMode());
} else if (vn_val <= static_cast<double>(INT64_MIN)) {
set_register(instr, rd, INT64_MIN, instr->RdMode());
int64_t result;
if (vn_val >= static_cast<double>(max)) {
result = max;
} else if (vn_val <= static_cast<double>(min)) {
result = min;
} else {
set_register(instr, rd, static_cast<int64_t>(vn_val), instr->RdMode());
result = static_cast<int64_t>(vn_val);
}
if (instr->Bit(31) == 1) {
set_register(instr, rd, result, instr->RdMode());
} else {
set_register(instr, rd, result & 0xffffffffll, instr->RdMode());
}
} else {
UnimplementedInstruction(instr);

View file

@ -636,11 +636,11 @@ void Simulator::Exit(Thread* thread,
// __builtin_s{add,sub,mul}_overflow() intrinsics here and below.
// Note that they may clobber the output location even when there is overflow:
// https://gcc.gnu.org/onlinedocs/gcc/Integer-Overflow-Builtins.html
DART_FORCE_INLINE static bool SignedAddWithOverflow(intptr_t lhs,
intptr_t rhs,
DART_FORCE_INLINE static bool SignedAddWithOverflow(int32_t lhs,
int32_t rhs,
intptr_t* out) {
intptr_t res = 1;
#if defined(HOST_ARCH_IA32) || defined(HOST_ARCH_X64)
#if defined(HOST_ARCH_IA32)
asm volatile(
"add %2, %1\n"
"jo 1f;\n"
@ -650,12 +650,25 @@ DART_FORCE_INLINE static bool SignedAddWithOverflow(intptr_t lhs,
: "+r"(res), "+r"(lhs)
: "r"(rhs), "r"(out)
: "cc");
#elif defined(HOST_ARCH_X64)
int64_t tmp;
asm volatile(
"addl %[rhs], %[lhs]\n"
"jo 1f;\n"
"xor %[res], %[res]\n"
"movslq %[lhs], %[tmp]\n"
"mov %[tmp], 0(%[out])\n"
"1: "
: [res] "+r"(res), [lhs] "+r"(lhs), [tmp] "=&r"(tmp)
: [rhs] "r"(rhs), [out] "r"(out)
: "cc");
#elif defined(HOST_ARCH_ARM) || defined(HOST_ARCH_ARM64)
asm volatile(
"adds %1, %1, %2;\n"
"adds %w1, %w1, %w2;\n"
"bvs 1f;\n"
"sxtw %x1, %w1;\n"
"mov %0, #0;\n"
"str %1, [%3, #0]\n"
"str %x1, [%3, #0]\n"
"1:"
: "+r"(res), "+r"(lhs)
: "r"(rhs), "r"(out)
@ -666,11 +679,11 @@ DART_FORCE_INLINE static bool SignedAddWithOverflow(intptr_t lhs,
return (res != 0);
}
DART_FORCE_INLINE static bool SignedSubWithOverflow(intptr_t lhs,
intptr_t rhs,
DART_FORCE_INLINE static bool SignedSubWithOverflow(int32_t lhs,
int32_t rhs,
intptr_t* out) {
intptr_t res = 1;
#if defined(HOST_ARCH_IA32) || defined(HOST_ARCH_X64)
#if defined(HOST_ARCH_IA32)
asm volatile(
"sub %2, %1\n"
"jo 1f;\n"
@ -680,12 +693,25 @@ DART_FORCE_INLINE static bool SignedSubWithOverflow(intptr_t lhs,
: "+r"(res), "+r"(lhs)
: "r"(rhs), "r"(out)
: "cc");
#elif defined(HOST_ARCH_X64)
int64_t tmp;
asm volatile(
"subl %[rhs], %[lhs]\n"
"jo 1f;\n"
"xor %[res], %[res]\n"
"movslq %[lhs], %[tmp]\n"
"mov %[tmp], 0(%[out])\n"
"1: "
: [res] "+r"(res), [lhs] "+r"(lhs), [tmp] "=&r"(tmp)
: [rhs] "r"(rhs), [out] "r"(out)
: "cc");
#elif defined(HOST_ARCH_ARM) || defined(HOST_ARCH_ARM64)
asm volatile(
"subs %1, %1, %2;\n"
"subs %w1, %w1, %w2;\n"
"bvs 1f;\n"
"sxtw %x1, %w1;\n"
"mov %0, #0;\n"
"str %1, [%3, #0]\n"
"str %x1, [%3, #0]\n"
"1:"
: "+r"(res), "+r"(lhs)
: "r"(rhs), "r"(out)
@ -696,11 +722,11 @@ DART_FORCE_INLINE static bool SignedSubWithOverflow(intptr_t lhs,
return (res != 0);
}
DART_FORCE_INLINE static bool SignedMulWithOverflow(intptr_t lhs,
intptr_t rhs,
DART_FORCE_INLINE static bool SignedMulWithOverflow(int32_t lhs,
int32_t rhs,
intptr_t* out) {
intptr_t res = 1;
#if defined(HOST_ARCH_IA32) || defined(HOST_ARCH_X64)
#if defined(HOST_ARCH_IA32)
asm volatile(
"imul %2, %1\n"
"jo 1f;\n"
@ -710,6 +736,18 @@ DART_FORCE_INLINE static bool SignedMulWithOverflow(intptr_t lhs,
: "+r"(res), "+r"(lhs)
: "r"(rhs), "r"(out)
: "cc");
#elif defined(HOST_ARCH_X64)
int64_t tmp;
asm volatile(
"imull %[rhs], %[lhs]\n"
"jo 1f;\n"
"xor %[res], %[res]\n"
"movslq %[lhs], %[tmp]\n"
"mov %[tmp], 0(%[out])\n"
"1: "
: [res] "+r"(res), [lhs] "+r"(lhs), [tmp] "=&r"(tmp)
: [rhs] "r"(rhs), [out] "r"(out)
: "cc");
#elif defined(HOST_ARCH_ARM)
asm volatile(
"smull %1, ip, %1, %2;\n"
@ -724,12 +762,12 @@ DART_FORCE_INLINE static bool SignedMulWithOverflow(intptr_t lhs,
#elif defined(HOST_ARCH_ARM64)
int64_t prod_lo = 0;
asm volatile(
"mul %1, %2, %3\n"
"smulh %2, %2, %3\n"
"cmp %2, %1, ASR #63;\n"
"smull %x1, %w2, %w3\n"
"asr %x2, %x1, #63\n"
"cmp %x2, %x1, ASR #31;\n"
"bne 1f;\n"
"mov %0, #0;\n"
"str %1, [%4, #0]\n"
"str %x1, [%4, #0]\n"
"1:"
: "=r"(res), "+r"(prod_lo), "+r"(lhs)
: "r"(rhs), "r"(out)
@ -1971,11 +2009,7 @@ RawObject* Simulator::Call(const Code& code,
if (rhs != 0) {
const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
const intptr_t res = (lhs >> kSmiTagSize) / (rhs >> kSmiTagSize);
#if defined(ARCH_IS_64_BIT)
const intptr_t untaggable = 0x4000000000000000LL;
#else
const intptr_t untaggable = 0x40000000L;
#endif // defined(ARCH_IS_64_BIT)
if (res != untaggable) {
*reinterpret_cast<intptr_t*>(&FP[rA]) = res << kSmiTagSize;
pc++;
@ -2001,11 +2035,12 @@ RawObject* Simulator::Call(const Code& code,
{
BYTECODE(Shl, A_B_C);
const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rC]) >> kSmiTagSize;
if (static_cast<uintptr_t>(rhs) < kBitsPerWord) {
const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
const intptr_t res = lhs << rhs;
const int kBitsPerInt32 = 32;
if (static_cast<uintptr_t>(rhs) < kBitsPerInt32) {
const int32_t lhs = reinterpret_cast<intptr_t>(FP[rB]);
const int32_t res = lhs << rhs;
if (lhs == (res >> rhs)) {
*reinterpret_cast<intptr_t*>(&FP[rA]) = res;
*reinterpret_cast<intptr_t*>(&FP[rA]) = static_cast<intptr_t>(res);
pc++;
}
}
@ -2016,8 +2051,7 @@ RawObject* Simulator::Call(const Code& code,
BYTECODE(Shr, A_B_C);
const intptr_t rhs = reinterpret_cast<intptr_t>(FP[rC]) >> kSmiTagSize;
if (rhs >= 0) {
const intptr_t shift_amount =
(rhs >= kBitsPerWord) ? (kBitsPerWord - 1) : rhs;
const intptr_t shift_amount = (rhs >= 32) ? (32 - 1) : rhs;
const intptr_t lhs = reinterpret_cast<intptr_t>(FP[rB]) >> kSmiTagSize;
*reinterpret_cast<intptr_t*>(&FP[rA]) = (lhs >> shift_amount)
<< kSmiTagSize;

View file

@ -253,10 +253,6 @@ void CheckMint(int64_t value) {
// here covers most of the 64-bit range. On 32-bit platforms the smi
// range covers most of the 32-bit range and values outside that
// range are also represented as mints.
#if defined(ARCH_IS_64_BIT)
EXPECT_EQ(Dart_CObject_kInt64, mint_cobject->type);
EXPECT_EQ(value, mint_cobject->value.as_int64);
#else
if (kMinInt32 < value && value < kMaxInt32) {
EXPECT_EQ(Dart_CObject_kInt32, mint_cobject->type);
EXPECT_EQ(value, mint_cobject->value.as_int32);
@ -264,7 +260,6 @@ void CheckMint(int64_t value) {
EXPECT_EQ(Dart_CObject_kInt64, mint_cobject->type);
EXPECT_EQ(value, mint_cobject->value.as_int64);
}
#endif
}
TEST_CASE(SerializeMints) {

View file

@ -1333,14 +1333,18 @@ static void EmitFastSmiOp(Assembler* assembler,
__ ldr(R1, Address(SP, +1 * kWordSize)); // Left.
__ orr(TMP, R0, Operand(R1));
__ BranchIfNotSmi(TMP, not_smi_or_overflow);
__ AssertSmiInRange(R0);
__ AssertSmiInRange(R1);
switch (kind) {
case Token::kADD: {
__ adds(R0, R1, Operand(R0)); // Adds.
__ addsw(R0, R1, Operand(R0)); // Adds.
__ sxtw(R0, R0);
__ b(not_smi_or_overflow, VS); // Branch if overflow.
break;
}
case Token::kSUB: {
__ subs(R0, R1, Operand(R0)); // Subtract.
__ subsw(R0, R1, Operand(R0)); // Subtract.
__ sxtw(R0, R0);
__ b(not_smi_or_overflow, VS); // Branch if overflow.
break;
}
@ -1382,6 +1386,8 @@ static void EmitFastSmiOp(Assembler* assembler,
__ StoreToOffset(R1, R6, count_offset);
}
__ AssertSmiInRange(R0, Assembler::kValueCanBeHeapPointer);
__ ret();
}

View file

@ -1270,13 +1270,15 @@ static void EmitFastSmiOp(Assembler* assembler,
__ j(NOT_ZERO, not_smi_or_overflow);
switch (kind) {
case Token::kADD: {
__ addq(RAX, RCX);
__ addl(RAX, RCX);
__ j(OVERFLOW, not_smi_or_overflow);
__ movsxd(RAX, RAX);
break;
}
case Token::kSUB: {
__ subq(RAX, RCX);
__ subl(RAX, RCX);
__ j(OVERFLOW, not_smi_or_overflow);
__ movsxd(RAX, RAX);
break;
}
case Token::kEQ: {

View file

@ -18,7 +18,7 @@ namespace dart {
// ClassifyingTokenPositions N -> -1 - N
//
// Synthetically created AstNodes are given real source positions but encoded
// as negative numbers from [kSmiMin32, -1 - N]. For example:
// as negative numbers from [kSmiMin, -1 - N]. For example:
//
// A source position of 0 in a synthetic AstNode would be encoded as -2 - N.
// A source position of 1 in a synthetic AstNode would be encoded as -3 - N.
@ -86,7 +86,7 @@ class TokenPosition {
#undef DECLARE_VALUES
static const intptr_t kMinSourcePos = 0;
static const TokenPosition kMinSource;
static const intptr_t kMaxSourcePos = kSmiMax32 - kMaxSentinelDescriptors - 2;
static const intptr_t kMaxSourcePos = kSmiMax - kMaxSentinelDescriptors - 2;
static const TokenPosition kMaxSource;
// Decode from a snapshot.

View file

@ -188,9 +188,6 @@ LibTest/collection/ListBase/ListBase_class_A01_t02: Skip # co19 issue 673, These
LibTest/collection/ListMixin/ListMixin_class_A01_t02: Skip # co19 issue 673, These tests take too much memory (300 MB) for our 1 GB test machine co19 issue 673. http://code.google.com/p/co19/issues/detail?id=673
LibTest/core/List/List_class_A01_t02: Skip # co19 issue 673, These tests take too much memory (300 MB) for our 1 GB test machine co19 issue 673. http://code.google.com/p/co19/issues/detail?id=673
[ $arch != arm64 && $arch != simarm64 && $arch != simdbc && $arch != simdbc64 && $arch != x64 && ($runtime == dart_precompiled || $runtime == vm) ]
LibTest/core/int/operator_left_shift_A01_t02: Fail # co19 issue 129
[ $arch == ia32 && $mode == release && $runtime == vm && $system == linux ]
service/dev_fs_spawn_test: Pass, Fail # Issue 28411
@ -382,6 +379,9 @@ LibTest/typed_data/Uint64List/Uint64List.view_A01_t01: CompileTimeError # Large
LibTest/typed_data/Uint64List/Uint64List.view_A01_t02: CompileTimeError # Large integer literal
WebPlatformTest/*: SkipByDesign # dart:html not supported on VM.
[ $runtime == dart_precompiled || $runtime == vm ]
LibTest/core/int/operator_left_shift_A01_t02: Fail # Can't represent 1 << 2147483647 without running out of memory.
[ $runtime == flutter || $hot_reload || $hot_reload_rollback ]
Language/Expressions/Assignment/prefix_object_t02: Skip # Requires deferred libraries
Language/Expressions/Constants/constant_constructor_t03: Skip # Requires deferred libraries