Option to truncate integers to 64 bits, part 1 (core VM changes)

This changeset revises --limit-ints-to-64-bits option to
change range of integers from unlimited to int64.
On overflow, integer arithmetic operations silently wrap around and
discard extra bits. No Bigints are allocated in case of overflow.

This changeset is the 1st part in the series of changes, it revises the implementation of integer arithmetic operations. More changes will follow.

R=regis@google.com, zra@google.com

Issue: https://github.com/dart-lang/sdk/issues/30103
Review-Url: https://codereview.chromium.org/2974633003 .
This commit is contained in:
Alexander Markov 2017-07-10 15:16:34 -07:00
parent c9c5767630
commit 7a20b6b8d8
12 changed files with 376 additions and 64 deletions

View file

@ -79,6 +79,19 @@ static RawInteger* DoubleToInteger(double val, const char* error_msg) {
args.SetAt(0, String::Handle(String::New(error_msg)));
Exceptions::ThrowByType(Exceptions::kUnsupported, args);
}
if (FLAG_limit_ints_to_64_bits) {
// TODO(alexmarkov): decide on the double-to-integer conversion semantics
// in truncating mode.
int64_t ival = 0;
if (val <= static_cast<double>(kMinInt64)) {
ival = kMinInt64;
} else if (val >= static_cast<double>(kMaxInt64)) {
ival = kMaxInt64;
} else { // Representable in int64_t.
ival = static_cast<int64_t>(val);
}
return Integer::New(ival);
}
if ((-1.0 < val) && (val < 1.0)) {
return Smi::New(0);
}

View file

@ -27,6 +27,7 @@ DEFINE_FLAG(bool,
// when it could have been a Smi.
static bool CheckInteger(const Integer& i) {
if (i.IsBigint()) {
ASSERT(!FLAG_limit_ints_to_64_bits);
const Bigint& bigint = Bigint::Cast(i);
return !bigint.FitsIntoSmi() && !bigint.FitsIntoInt64();
}
@ -260,29 +261,32 @@ static RawInteger* ShiftOperationHelper(Token::Kind kind,
}
if (value.IsMint()) {
const int64_t mint_value = value.AsInt64Value();
const int count = Utils::HighestBit(mint_value);
intptr_t shift_count = amount.Value();
if (kind == Token::kSHR) {
shift_count = -shift_count;
}
if ((count + shift_count) < Mint::kBits) {
switch (kind) {
case Token::kSHL:
return Integer::New(mint_value << shift_count, Heap::kNew);
case Token::kSHR:
shift_count =
(-shift_count > Mint::kBits) ? Mint::kBits : -shift_count;
return Integer::New(mint_value >> shift_count, Heap::kNew);
default:
UNIMPLEMENTED();
}
} else {
// Overflow in shift, use Bigints
return Integer::null();
switch (kind) {
case Token::kSHL:
if (FLAG_limit_ints_to_64_bits) {
return Integer::New(
Utils::ShiftLeftWithTruncation(mint_value, shift_count),
Heap::kNew);
} else {
const int count = Utils::HighestBit(mint_value);
if (shift_count < (Mint::kBits - count)) {
return Integer::New(mint_value << shift_count, Heap::kNew);
} else {
// Overflow in shift, use Bigints
return Integer::null();
}
}
case Token::kSHR:
shift_count = Utils::Minimum(shift_count, Mint::kBits);
return Integer::New(mint_value >> shift_count, Heap::kNew);
default:
UNIMPLEMENTED();
}
} else {
ASSERT(value.IsBigint());
}
ASSERT(!FLAG_limit_ints_to_64_bits);
return Integer::null();
}
@ -409,12 +413,9 @@ DEFINE_NATIVE_ENTRY(Bigint_getDigits, 1) {
DEFINE_NATIVE_ENTRY(Bigint_allocate, 4) {
if (FLAG_limit_ints_to_64_bits) {
// The allocated Bigint value is not necessarily out of range, but it may
// be used as an operand in an operation resulting in a Bigint.
Exceptions::ThrowRangeErrorMsg(
"Integer operand requires conversion to Bigint");
}
// TODO(alexmarkov): Revise this assertion if this native method can be used
// to explicitly allocate Bigint objects in --limit-ints-to-64-bits mode.
ASSERT(!FLAG_limit_ints_to_64_bits);
// First arg is null type arguments, since class Bigint is not parameterized.
const Bool& neg = Bool::CheckedHandle(arguments->NativeArgAt(1));
const Smi& used = Smi::CheckedHandle(arguments->NativeArgAt(2));

View file

@ -458,6 +458,7 @@ const uword kUwordMax = kMaxUint64;
const int kBitsPerByte = 8;
const int kBitsPerByteLog2 = 3;
const int kBitsPerInt32 = kInt32Size * kBitsPerByte;
const int kBitsPerInt64 = kInt64Size * kBitsPerByte;
const int kBitsPerWord = kWordSize * kBitsPerByte;
const int kBitsPerWordLog2 = kWordSizeLog2 + kBitsPerByteLog2;

View file

@ -184,6 +184,45 @@ class Utils {
}
// Adds two int64_t values with wrapping around
// (two's complement arithmetic).
static inline int64_t AddWithWrapAround(int64_t a, int64_t b) {
// Avoid undefined behavior by doing arithmetic in the unsigned type.
return static_cast<int64_t>(static_cast<uint64_t>(a) +
static_cast<uint64_t>(b));
}
// Subtracts two int64_t values with wrapping around
// (two's complement arithmetic).
static inline int64_t SubWithWrapAround(int64_t a, int64_t b) {
// Avoid undefined behavior by doing arithmetic in the unsigned type.
return static_cast<int64_t>(static_cast<uint64_t>(a) -
static_cast<uint64_t>(b));
}
// Multiplies two int64_t values with wrapping around
// (two's complement arithmetic).
static inline int64_t MulWithWrapAround(int64_t a, int64_t b) {
// Avoid undefined behavior by doing arithmetic in the unsigned type.
return static_cast<int64_t>(static_cast<uint64_t>(a) *
static_cast<uint64_t>(b));
}
// Shifts int64_t value left. Supports any non-negative number of bits and
// silently discards shifted out bits.
static inline int64_t ShiftLeftWithTruncation(int64_t a, int64_t b) {
ASSERT(b >= 0);
if (b >= kBitsPerInt64) {
return 0;
}
// Avoid undefined behavior by doing arithmetic in the unsigned type.
return static_cast<int64_t>(static_cast<uint64_t>(a) << b);
}
// Utility functions for converting values from host endianness to
// big or little endian values.
static uint16_t HostToBigEndian16(uint16_t host_value);

View file

@ -0,0 +1,184 @@
// Copyright (c) 2017, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// VMOptions=--limit-ints-to-64-bits --enable-inlining-annotations --optimization_counter_threshold=10 --no-use-osr --no-background-compilation
// Test for truncating (wrap-around) integer arithmetic in --limit-ints-to-64-bits mode.
import "package:expect/expect.dart";
const alwaysInline = "AlwaysInline";
const neverInline = "NeverInline";
@neverInline
add_smi(var a, var b) => a + b;
@neverInline
add_mint(var a, var b) => a + b;
@neverInline
add_mint_consts() => 0x5000000000000000 + 0x6000000000000000;
@neverInline
test_add(var v2, var v3, var v3fxx, var v5fxx, var v7fxx, var n60xx) {
for (var i = 0; i < 20; i++) {
Expect.equals(5, add_smi(v2, v3));
}
// Trigger deoptimization and re-compilation
for (var i = 0; i < 20; i++) {
Expect.equals(0x4000000000000001, add_smi(v2, v3fxx));
}
for (var i = 0; i < 20; i++) {
Expect.equals(-1, add_mint(v5fxx, n60xx));
}
// Wrap-around
for (var i = 0; i < 20; i++) {
Expect.equals(-0x2000000000000002, add_mint(v5fxx, v7fxx));
}
// Constant folding
for (var i = 0; i < 20; i++) {
Expect.equals(-0x5000000000000000, add_mint_consts());
}
}
@neverInline
sub_smi(var a, var b) => a - b;
@neverInline
sub_mint(var a, var b) => a - b;
@neverInline
sub_mint_consts() => (-0x5000000000000000) - 0x6000000000000000;
@neverInline
test_sub(var v2, var v3, var v3fxx, var v5fxx, var v7fxx, var n60xx) {
for (var i = 0; i < 20; i++) {
Expect.equals(1, sub_smi(v3, v2));
}
// Trigger deoptimization and re-compilation
for (var i = 0; i < 20; i++) {
Expect.equals(-0x7ffffffffffffffe, sub_smi(-v3fxx, v3fxx));
}
for (var i = 0; i < 20; i++) {
Expect.equals(0x2000000000000000, sub_mint(v7fxx, v5fxx));
}
// Wrap-around
for (var i = 0; i < 20; i++) {
Expect.equals(0x4000000000000001, sub_mint(n60xx, v5fxx));
}
// Constant folding
for (var i = 0; i < 20; i++) {
Expect.equals(0x5000000000000000, sub_mint_consts());
}
}
@neverInline
mul_smi(var a, var b) => a * b;
@neverInline
mul_mint(var a, var b) => a * b;
@neverInline
mul_mint_consts() => 0x5000000000000001 * 0x6000000000000001;
@neverInline
test_mul(var v2, var v3, var v3fxx, var v5fxx, var v7fxx, var n60xx) {
for (var i = 0; i < 20; i++) {
Expect.equals(6, mul_smi(v2, v3));
}
// Trigger deoptimization and re-compilation
for (var i = 0; i < 20; i++) {
Expect.equals(0x7ffffffffffffffe, mul_smi(v2, v3fxx));
}
// Wrap around
for (var i = 0; i < 20; i++) {
Expect.equals(0x1ffffffffffffffd, mul_mint(v5fxx, 3));
}
// Constant folding
for (var i = 0; i < 20; i++) {
Expect.equals(-0x4fffffffffffffff, mul_mint_consts());
}
}
@neverInline
shl_smi(var a, var b) => a << b;
@neverInline
shl_mint(var a, var b) => a << b;
@neverInline
shl_mint_by_const16(var a) => a << 16;
@neverInline
shl_smi_by_const96(var a) => a << 96;
@neverInline
shl_mint_by_const96(var a) => a << 96;
@neverInline
shl_mint_consts() => 0x77665544aabbccdd << 48;
@neverInline
test_shl(var v2, var v3, var v8, var v40) {
for (var i = 0; i < 20; i++) {
Expect.equals(16, shl_smi(v2, v3));
}
// Trigger deoptimization and re-compilation, wrap-around
for (var i = 0; i < 20; i++) {
Expect.equals(0x5566770000000000, shl_smi(0x0011223344556677, v40));
}
// Wrap around
for (var i = 0; i < 20; i++) {
Expect.equals(-0x554433ffeeddcd00, shl_mint(0x7faabbcc00112233, v8));
}
// Shift mint by small constant
for (var i = 0; i < 20; i++) {
Expect.equals(0x5544332211aa0000, shl_mint_by_const16(0x77665544332211aa));
}
// Shift smi by large constant
for (var i = 0; i < 20; i++) {
Expect.equals(0, shl_smi_by_const96(0x77665544332211));
}
// Shift mint by large constant
for (var i = 0; i < 20; i++) {
Expect.equals(0, shl_mint_by_const96(0x77665544332211aa));
}
// Constant folding
for (var i = 0; i < 20; i++) {
Expect.equals(-0x3323000000000000, shl_mint_consts());
}
}
main() {
var v2 = 2; // smi
var v3 = 3; // smi
var v8 = 8; // smi
var v40 = 40; // smi
var v3fxx = 0x3fffffffffffffff; // max smi
var v5fxx = 0x5fffffffffffffff; // mint
var v7fxx = 0x7fffffffffffffff; // max mint
var n60xx = -0x6000000000000000; // negative mint
test_add(v2, v3, v3fxx, v5fxx, v7fxx, n60xx);
test_sub(v2, v3, v3fxx, v5fxx, v7fxx, n60xx);
test_mul(v2, v3, v3fxx, v5fxx, v7fxx, n60xx);
test_shl(v2, v3, v8, v40);
}

View file

@ -94,8 +94,7 @@
P(interpret_irregexp, bool, USING_DBC, "Use irregexp bytecode interpreter") \
P(lazy_dispatchers, bool, true, "Generate dispatchers lazily") \
P(link_natives_lazily, bool, false, "Link native calls lazily") \
R(limit_ints_to_64_bits, false, bool, false, \
"Throw a RangeError on 64-bit integer overflow"); \
R(limit_ints_to_64_bits, false, bool, false, "Truncate integers to 64 bits") \
C(load_deferred_eagerly, true, true, bool, false, \
"Load deferred libraries eagerly.") \
R(log_marker_tasks, false, bool, false, \

View file

@ -1818,7 +1818,7 @@ RawInteger* BinaryIntegerOpInstr::Evaluate(const Integer& left,
if (is_truncating()) {
int64_t truncated = result.AsTruncatedInt64Value();
truncated &= RepresentationMask(representation());
result = Integer::New(truncated);
result = Integer::New(truncated, Heap::kOld);
ASSERT(IsRepresentable(result, representation()));
} else if (!IsRepresentable(result, representation())) {
// If this operation is not truncating it would deoptimize on overflow.

View file

@ -7,6 +7,7 @@
#include "vm/allocation.h"
#include "vm/ast.h"
#include "vm/flags.h"
#include "vm/growable_array.h"
#include "vm/locations.h"
#include "vm/method_recognizer.h"
@ -730,7 +731,7 @@ class Instruction : public ZoneAllocated {
}
inline Definition* ArgumentAt(intptr_t index) const;
// Returns true, if this instruction can deoptimize with its current imputs.
// Returns true, if this instruction can deoptimize with its current inputs.
// This property can change if we add or remove redefinitions that constrain
// the type or the range of input operands during compilation.
virtual bool ComputeCanDeoptimize() const = 0;
@ -7184,12 +7185,29 @@ class BinaryMintOpInstr : public BinaryIntegerOpInstr {
Value* left,
Value* right,
intptr_t deopt_id)
: BinaryIntegerOpInstr(op_kind, left, right, deopt_id) {}
: BinaryIntegerOpInstr(op_kind, left, right, deopt_id) {
if (FLAG_limit_ints_to_64_bits) {
mark_truncating();
}
}
virtual bool ComputeCanDeoptimize() const {
return (can_overflow() &&
((op_kind() == Token::kADD) || (op_kind() == Token::kSUB))) ||
(op_kind() == Token::kMUL); // Deopt if inputs are not int32.
switch (op_kind()) {
case Token::kADD:
case Token::kSUB:
return can_overflow();
case Token::kMUL:
// Note that ARM64 does not support operations with unboxed mints,
// so it is not handled here.
#if defined(TARGET_ARCH_X64)
return can_overflow(); // Deopt if overflow.
#else
// IA32, ARM
return true; // Deopt if inputs are not int32.
#endif
default:
return false;
}
}
virtual Representation representation() const { return kUnboxedMint; }
@ -7218,6 +7236,9 @@ class ShiftMintOpInstr : public BinaryIntegerOpInstr {
: BinaryIntegerOpInstr(op_kind, left, right, deopt_id),
shift_range_(NULL) {
ASSERT((op_kind == Token::kSHR) || (op_kind == Token::kSHL));
if (FLAG_limit_ints_to_64_bits) {
mark_truncating();
}
}
Range* shift_range() const { return shift_range_; }

View file

@ -6546,6 +6546,8 @@ void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(locs()->in(1).constant().IsSmi());
const int32_t shift =
reinterpret_cast<int32_t>(locs()->in(1).constant().raw()) >> 1;
ASSERT(!has_shift_count_check());
ASSERT(shift >= 0);
switch (op_kind()) {
case Token::kSHR: {
if (shift < 32) {

View file

@ -6085,6 +6085,8 @@ void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(locs()->in(1).constant().IsSmi());
const int32_t shift =
reinterpret_cast<int32_t>(locs()->in(1).constant().raw()) >> 1;
ASSERT(!has_shift_count_check());
ASSERT(shift >= 0);
switch (op_kind()) {
case Token::kSHR: {
if (shift > 31) {

View file

@ -6072,6 +6072,8 @@ void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(locs()->in(1).constant().IsSmi());
const int64_t shift =
reinterpret_cast<int64_t>(locs()->in(1).constant().raw()) >> 1;
ASSERT(!has_shift_count_check());
ASSERT((0 <= shift) && (shift < 64));
switch (op_kind()) {
case Token::kSHR:
__ sarq(left, Immediate(shift));

View file

@ -18998,7 +18998,11 @@ RawInteger* Integer::New(const String& str, Heap::Space space) {
Bigint::Handle(Bigint::NewFromCString(str.ToCString(), space));
ASSERT(!big.FitsIntoSmi());
ASSERT(!big.FitsIntoInt64());
return big.raw();
if (!FLAG_limit_ints_to_64_bits) {
return big.raw();
}
// TODO(alexmarkov): Throw error in FLAG_limit_ints_to_64_bits mode.
value = big.AsTruncatedInt64Value();
}
return Integer::New(value, space);
}
@ -19012,7 +19016,11 @@ RawInteger* Integer::NewCanonical(const String& str) {
const Bigint& big = Bigint::Handle(Bigint::NewCanonical(str));
ASSERT(!big.FitsIntoSmi());
ASSERT(!big.FitsIntoInt64());
return big.raw();
if (!FLAG_limit_ints_to_64_bits) {
return big.raw();
}
// TODO(alexmarkov): Throw error in FLAG_limit_ints_to_64_bits mode.
value = big.AsTruncatedInt64Value();
}
if (Smi::IsValid(value)) {
return Smi::New(static_cast<intptr_t>(value));
@ -19032,7 +19040,12 @@ RawInteger* Integer::New(int64_t value, Heap::Space space) {
RawInteger* Integer::NewFromUint64(uint64_t value, Heap::Space space) {
if (value > static_cast<uint64_t>(Mint::kMaxValue)) {
return Bigint::NewFromUint64(value, space);
if (FLAG_limit_ints_to_64_bits) {
// TODO(alexmarkov): Throw error in FLAG_limit_ints_to_64_bits mode.
return Integer::New(static_cast<int64_t>(value), space);
} else {
return Bigint::NewFromUint64(value, space);
}
} else {
return Integer::New(value, space);
}
@ -19141,13 +19154,18 @@ RawInteger* Integer::ArithmeticOp(Token::Kind operation,
static_cast<int64_t>(right_value),
space);
} else {
// In 64-bit mode, the product of two signed integers fits in a
// 64-bit result if the sum of the highest bits of their absolute
// values is smaller than 62.
ASSERT(sizeof(intptr_t) == sizeof(int64_t));
if ((Utils::HighestBit(left_value) + Utils::HighestBit(right_value)) <
62) {
return Integer::New(left_value * right_value, space);
if (FLAG_limit_ints_to_64_bits) {
return Integer::New(
Utils::MulWithWrapAround(left_value, right_value), space);
} else {
// In 64-bit mode, the product of two signed integers fits in a
// 64-bit result if the sum of the highest bits of their absolute
// values is smaller than 62.
if ((Utils::HighestBit(left_value) +
Utils::HighestBit(right_value)) < 62) {
return Integer::New(left_value * right_value, space);
}
}
}
// Perform a Bigint multiplication below.
@ -19175,26 +19193,47 @@ RawInteger* Integer::ArithmeticOp(Token::Kind operation,
const int64_t right_value = other.AsInt64Value();
switch (operation) {
case Token::kADD: {
if (!Utils::WillAddOverflow(left_value, right_value)) {
return Integer::New(left_value + right_value, space);
if (FLAG_limit_ints_to_64_bits) {
return Integer::New(Utils::AddWithWrapAround(left_value, right_value),
space);
} else {
if (!Utils::WillAddOverflow(left_value, right_value)) {
return Integer::New(left_value + right_value, space);
}
}
break;
}
case Token::kSUB: {
if (!Utils::WillSubOverflow(left_value, right_value)) {
return Integer::New(left_value - right_value, space);
if (FLAG_limit_ints_to_64_bits) {
return Integer::New(Utils::SubWithWrapAround(left_value, right_value),
space);
} else {
if (!Utils::WillSubOverflow(left_value, right_value)) {
return Integer::New(left_value - right_value, space);
}
}
break;
}
case Token::kMUL: {
if ((Utils::HighestBit(left_value) + Utils::HighestBit(right_value)) <
62) {
return Integer::New(left_value * right_value, space);
if (FLAG_limit_ints_to_64_bits) {
return Integer::New(Utils::MulWithWrapAround(left_value, right_value),
space);
} else {
if ((Utils::HighestBit(left_value) + Utils::HighestBit(right_value)) <
62) {
return Integer::New(left_value * right_value, space);
}
}
break;
}
case Token::kTRUNCDIV: {
if ((left_value != Mint::kMinValue) || (right_value != -1)) {
if ((left_value == Mint::kMinValue) && (right_value == -1)) {
// Division special case: overflow in int64_t.
if (FLAG_limit_ints_to_64_bits) {
// MIN_VALUE / -1 = (MAX_VALUE + 1), which wraps around to MIN_VALUE
return Integer::New(Mint::kMinValue, space);
}
} else {
return Integer::New(left_value / right_value, space);
}
break;
@ -19214,6 +19253,7 @@ RawInteger* Integer::ArithmeticOp(Token::Kind operation,
UNIMPLEMENTED();
}
}
ASSERT(!FLAG_limit_ints_to_64_bits);
return Integer::null(); // Notify caller that a bigint operation is required.
}
@ -19259,6 +19299,7 @@ RawInteger* Integer::BitOp(Token::Kind kind,
UNIMPLEMENTED();
}
}
ASSERT(!FLAG_limit_ints_to_64_bits);
return Integer::null(); // Notify caller that a bigint operation is required.
}
@ -19278,12 +19319,18 @@ RawInteger* Smi::ShiftOp(Token::Kind kind,
}
{ // Check for overflow.
int cnt = Utils::BitLength(left_value);
if ((cnt + right_value) > Smi::kBits) {
if ((cnt + right_value) > Mint::kBits) {
return Bigint::NewFromShiftedInt64(left_value, right_value, space);
if (right_value > (Smi::kBits - cnt)) {
if (FLAG_limit_ints_to_64_bits) {
return Integer::New(
Utils::ShiftLeftWithTruncation(left_value, right_value), space);
} else {
int64_t left_64 = left_value;
return Integer::New(left_64 << right_value, space);
if (right_value > (Mint::kBits - cnt)) {
return Bigint::NewFromShiftedInt64(left_value, right_value,
space);
} else {
int64_t left_64 = left_value;
return Integer::New(left_64 << right_value, space);
}
}
}
}
@ -19705,6 +19752,7 @@ bool Bigint::CheckAndCanonicalizeFields(Thread* thread,
RawBigint* Bigint::New(Heap::Space space) {
// TODO(alexmarkov): Throw error or assert in --limit-ints-to-64-bits mode.
Thread* thread = Thread::Current();
Zone* zone = thread->zone();
Isolate* isolate = thread->isolate();
@ -19728,6 +19776,7 @@ RawBigint* Bigint::New(bool neg,
intptr_t used,
const TypedData& digits,
Heap::Space space) {
// TODO(alexmarkov): Throw error or assert in --limit-ints-to-64-bits mode.
ASSERT((used == 0) ||
(!digits.IsNull() && (digits.Length() >= (used + (used & 1)))));
Thread* thread = Thread::Current();
@ -19767,6 +19816,7 @@ RawBigint* Bigint::New(bool neg,
RawBigint* Bigint::NewFromInt64(int64_t value, Heap::Space space) {
// Currently only used to convert Smi or Mint to hex String, therefore do
// not throw RangeError if --limit-ints-to-64-bits.
// TODO(alexmarkov): Throw error or assert in --limit-ints-to-64-bits mode.
const TypedData& digits = TypedData::Handle(NewDigits(2, space));
bool neg;
uint64_t abs_value;
@ -19784,10 +19834,9 @@ RawBigint* Bigint::NewFromInt64(int64_t value, Heap::Space space) {
RawBigint* Bigint::NewFromUint64(uint64_t value, Heap::Space space) {
if (FLAG_limit_ints_to_64_bits) {
Exceptions::ThrowRangeErrorMsg(
"Integer operand requires conversion to Bigint");
}
// TODO(alexmarkov): Revise this assertion if this factory method is used
// to explicitly allocate Bigint objects in --limit-ints-to-64-bits mode.
ASSERT(!FLAG_limit_ints_to_64_bits);
const TypedData& digits = TypedData::Handle(NewDigits(2, space));
SetDigitAt(digits, 0, static_cast<uint32_t>(value));
SetDigitAt(digits, 1, static_cast<uint32_t>(value >> 32));
@ -19798,12 +19847,9 @@ RawBigint* Bigint::NewFromUint64(uint64_t value, Heap::Space space) {
RawBigint* Bigint::NewFromShiftedInt64(int64_t value,
intptr_t shift,
Heap::Space space) {
if (FLAG_limit_ints_to_64_bits) {
// The allocated Bigint value is not necessarily out of range, but it may
// be used as an operand in an operation resulting in a Bigint.
Exceptions::ThrowRangeErrorMsg(
"Integer operand requires conversion to Bigint");
}
// TODO(alexmarkov): Revise this assertion if this factory method is used
// to explicitly allocate Bigint objects in --limit-ints-to-64-bits mode.
ASSERT(!FLAG_limit_ints_to_64_bits);
ASSERT(kBitsPerDigit == 32);
ASSERT(shift >= 0);
const intptr_t digit_shift = shift / kBitsPerDigit;
@ -19835,6 +19881,7 @@ RawBigint* Bigint::NewFromShiftedInt64(int64_t value,
RawBigint* Bigint::NewFromCString(const char* str, Heap::Space space) {
// Allow parser to scan Bigint literal, even with --limit-ints-to-64-bits.
// TODO(alexmarkov): Throw error or assert in --limit-ints-to-64-bits mode.
ASSERT(str != NULL);
bool neg = false;
TypedData& digits = TypedData::Handle();
@ -19857,6 +19904,7 @@ RawBigint* Bigint::NewFromCString(const char* str, Heap::Space space) {
RawBigint* Bigint::NewCanonical(const String& str) {
// Allow parser to scan Bigint literal, even with --limit-ints-to-64-bits.
// TODO(alexmarkov): Throw error or assert in --limit-ints-to-64-bits mode.
Thread* thread = Thread::Current();
Zone* zone = thread->zone();
Isolate* isolate = thread->isolate();