diff --git a/runtime/lib/double.cc b/runtime/lib/double.cc index 813002445b1..1d3927c5db1 100644 --- a/runtime/lib/double.cc +++ b/runtime/lib/double.cc @@ -79,6 +79,19 @@ static RawInteger* DoubleToInteger(double val, const char* error_msg) { args.SetAt(0, String::Handle(String::New(error_msg))); Exceptions::ThrowByType(Exceptions::kUnsupported, args); } + if (FLAG_limit_ints_to_64_bits) { + // TODO(alexmarkov): decide on the double-to-integer conversion semantics + // in truncating mode. + int64_t ival = 0; + if (val <= static_cast(kMinInt64)) { + ival = kMinInt64; + } else if (val >= static_cast(kMaxInt64)) { + ival = kMaxInt64; + } else { // Representable in int64_t. + ival = static_cast(val); + } + return Integer::New(ival); + } if ((-1.0 < val) && (val < 1.0)) { return Smi::New(0); } diff --git a/runtime/lib/integers.cc b/runtime/lib/integers.cc index a3fdc274f2b..f0a6563374d 100644 --- a/runtime/lib/integers.cc +++ b/runtime/lib/integers.cc @@ -27,6 +27,7 @@ DEFINE_FLAG(bool, // when it could have been a Smi. static bool CheckInteger(const Integer& i) { if (i.IsBigint()) { + ASSERT(!FLAG_limit_ints_to_64_bits); const Bigint& bigint = Bigint::Cast(i); return !bigint.FitsIntoSmi() && !bigint.FitsIntoInt64(); } @@ -260,29 +261,32 @@ static RawInteger* ShiftOperationHelper(Token::Kind kind, } if (value.IsMint()) { const int64_t mint_value = value.AsInt64Value(); - const int count = Utils::HighestBit(mint_value); intptr_t shift_count = amount.Value(); - if (kind == Token::kSHR) { - shift_count = -shift_count; - } - if ((count + shift_count) < Mint::kBits) { - switch (kind) { - case Token::kSHL: - return Integer::New(mint_value << shift_count, Heap::kNew); - case Token::kSHR: - shift_count = - (-shift_count > Mint::kBits) ? Mint::kBits : -shift_count; - return Integer::New(mint_value >> shift_count, Heap::kNew); - default: - UNIMPLEMENTED(); - } - } else { - // Overflow in shift, use Bigints - return Integer::null(); + switch (kind) { + case Token::kSHL: + if (FLAG_limit_ints_to_64_bits) { + return Integer::New( + Utils::ShiftLeftWithTruncation(mint_value, shift_count), + Heap::kNew); + } else { + const int count = Utils::HighestBit(mint_value); + if (shift_count < (Mint::kBits - count)) { + return Integer::New(mint_value << shift_count, Heap::kNew); + } else { + // Overflow in shift, use Bigints + return Integer::null(); + } + } + case Token::kSHR: + shift_count = Utils::Minimum(shift_count, Mint::kBits); + return Integer::New(mint_value >> shift_count, Heap::kNew); + default: + UNIMPLEMENTED(); } } else { ASSERT(value.IsBigint()); } + ASSERT(!FLAG_limit_ints_to_64_bits); return Integer::null(); } @@ -409,12 +413,9 @@ DEFINE_NATIVE_ENTRY(Bigint_getDigits, 1) { DEFINE_NATIVE_ENTRY(Bigint_allocate, 4) { - if (FLAG_limit_ints_to_64_bits) { - // The allocated Bigint value is not necessarily out of range, but it may - // be used as an operand in an operation resulting in a Bigint. - Exceptions::ThrowRangeErrorMsg( - "Integer operand requires conversion to Bigint"); - } + // TODO(alexmarkov): Revise this assertion if this native method can be used + // to explicitly allocate Bigint objects in --limit-ints-to-64-bits mode. + ASSERT(!FLAG_limit_ints_to_64_bits); // First arg is null type arguments, since class Bigint is not parameterized. const Bool& neg = Bool::CheckedHandle(arguments->NativeArgAt(1)); const Smi& used = Smi::CheckedHandle(arguments->NativeArgAt(2)); diff --git a/runtime/platform/globals.h b/runtime/platform/globals.h index 06d1086db77..0b8899484a0 100644 --- a/runtime/platform/globals.h +++ b/runtime/platform/globals.h @@ -458,6 +458,7 @@ const uword kUwordMax = kMaxUint64; const int kBitsPerByte = 8; const int kBitsPerByteLog2 = 3; const int kBitsPerInt32 = kInt32Size * kBitsPerByte; +const int kBitsPerInt64 = kInt64Size * kBitsPerByte; const int kBitsPerWord = kWordSize * kBitsPerByte; const int kBitsPerWordLog2 = kWordSizeLog2 + kBitsPerByteLog2; diff --git a/runtime/platform/utils.h b/runtime/platform/utils.h index a1695d4b291..beed22b5a55 100644 --- a/runtime/platform/utils.h +++ b/runtime/platform/utils.h @@ -184,6 +184,45 @@ class Utils { } + // Adds two int64_t values with wrapping around + // (two's complement arithmetic). + static inline int64_t AddWithWrapAround(int64_t a, int64_t b) { + // Avoid undefined behavior by doing arithmetic in the unsigned type. + return static_cast(static_cast(a) + + static_cast(b)); + } + + + // Subtracts two int64_t values with wrapping around + // (two's complement arithmetic). + static inline int64_t SubWithWrapAround(int64_t a, int64_t b) { + // Avoid undefined behavior by doing arithmetic in the unsigned type. + return static_cast(static_cast(a) - + static_cast(b)); + } + + + // Multiplies two int64_t values with wrapping around + // (two's complement arithmetic). + static inline int64_t MulWithWrapAround(int64_t a, int64_t b) { + // Avoid undefined behavior by doing arithmetic in the unsigned type. + return static_cast(static_cast(a) * + static_cast(b)); + } + + + // Shifts int64_t value left. Supports any non-negative number of bits and + // silently discards shifted out bits. + static inline int64_t ShiftLeftWithTruncation(int64_t a, int64_t b) { + ASSERT(b >= 0); + if (b >= kBitsPerInt64) { + return 0; + } + // Avoid undefined behavior by doing arithmetic in the unsigned type. + return static_cast(static_cast(a) << b); + } + + // Utility functions for converting values from host endianness to // big or little endian values. static uint16_t HostToBigEndian16(uint16_t host_value); diff --git a/runtime/tests/vm/dart/truncating_ints_test.dart b/runtime/tests/vm/dart/truncating_ints_test.dart new file mode 100644 index 00000000000..59524a14f71 --- /dev/null +++ b/runtime/tests/vm/dart/truncating_ints_test.dart @@ -0,0 +1,184 @@ +// Copyright (c) 2017, the Dart project authors. Please see the AUTHORS file +// for details. All rights reserved. Use of this source code is governed by a +// BSD-style license that can be found in the LICENSE file. + +// VMOptions=--limit-ints-to-64-bits --enable-inlining-annotations --optimization_counter_threshold=10 --no-use-osr --no-background-compilation + +// Test for truncating (wrap-around) integer arithmetic in --limit-ints-to-64-bits mode. + +import "package:expect/expect.dart"; + +const alwaysInline = "AlwaysInline"; +const neverInline = "NeverInline"; + +@neverInline +add_smi(var a, var b) => a + b; + +@neverInline +add_mint(var a, var b) => a + b; + +@neverInline +add_mint_consts() => 0x5000000000000000 + 0x6000000000000000; + +@neverInline +test_add(var v2, var v3, var v3fxx, var v5fxx, var v7fxx, var n60xx) { + for (var i = 0; i < 20; i++) { + Expect.equals(5, add_smi(v2, v3)); + } + + // Trigger deoptimization and re-compilation + for (var i = 0; i < 20; i++) { + Expect.equals(0x4000000000000001, add_smi(v2, v3fxx)); + } + + for (var i = 0; i < 20; i++) { + Expect.equals(-1, add_mint(v5fxx, n60xx)); + } + + // Wrap-around + for (var i = 0; i < 20; i++) { + Expect.equals(-0x2000000000000002, add_mint(v5fxx, v7fxx)); + } + + // Constant folding + for (var i = 0; i < 20; i++) { + Expect.equals(-0x5000000000000000, add_mint_consts()); + } +} + +@neverInline +sub_smi(var a, var b) => a - b; + +@neverInline +sub_mint(var a, var b) => a - b; + +@neverInline +sub_mint_consts() => (-0x5000000000000000) - 0x6000000000000000; + +@neverInline +test_sub(var v2, var v3, var v3fxx, var v5fxx, var v7fxx, var n60xx) { + for (var i = 0; i < 20; i++) { + Expect.equals(1, sub_smi(v3, v2)); + } + + // Trigger deoptimization and re-compilation + for (var i = 0; i < 20; i++) { + Expect.equals(-0x7ffffffffffffffe, sub_smi(-v3fxx, v3fxx)); + } + + for (var i = 0; i < 20; i++) { + Expect.equals(0x2000000000000000, sub_mint(v7fxx, v5fxx)); + } + + // Wrap-around + for (var i = 0; i < 20; i++) { + Expect.equals(0x4000000000000001, sub_mint(n60xx, v5fxx)); + } + + // Constant folding + for (var i = 0; i < 20; i++) { + Expect.equals(0x5000000000000000, sub_mint_consts()); + } +} + +@neverInline +mul_smi(var a, var b) => a * b; + +@neverInline +mul_mint(var a, var b) => a * b; + +@neverInline +mul_mint_consts() => 0x5000000000000001 * 0x6000000000000001; + +@neverInline +test_mul(var v2, var v3, var v3fxx, var v5fxx, var v7fxx, var n60xx) { + for (var i = 0; i < 20; i++) { + Expect.equals(6, mul_smi(v2, v3)); + } + + // Trigger deoptimization and re-compilation + for (var i = 0; i < 20; i++) { + Expect.equals(0x7ffffffffffffffe, mul_smi(v2, v3fxx)); + } + + // Wrap around + for (var i = 0; i < 20; i++) { + Expect.equals(0x1ffffffffffffffd, mul_mint(v5fxx, 3)); + } + + // Constant folding + for (var i = 0; i < 20; i++) { + Expect.equals(-0x4fffffffffffffff, mul_mint_consts()); + } +} + +@neverInline +shl_smi(var a, var b) => a << b; + +@neverInline +shl_mint(var a, var b) => a << b; + +@neverInline +shl_mint_by_const16(var a) => a << 16; + +@neverInline +shl_smi_by_const96(var a) => a << 96; + +@neverInline +shl_mint_by_const96(var a) => a << 96; + +@neverInline +shl_mint_consts() => 0x77665544aabbccdd << 48; + +@neverInline +test_shl(var v2, var v3, var v8, var v40) { + for (var i = 0; i < 20; i++) { + Expect.equals(16, shl_smi(v2, v3)); + } + + // Trigger deoptimization and re-compilation, wrap-around + for (var i = 0; i < 20; i++) { + Expect.equals(0x5566770000000000, shl_smi(0x0011223344556677, v40)); + } + + // Wrap around + for (var i = 0; i < 20; i++) { + Expect.equals(-0x554433ffeeddcd00, shl_mint(0x7faabbcc00112233, v8)); + } + + // Shift mint by small constant + for (var i = 0; i < 20; i++) { + Expect.equals(0x5544332211aa0000, shl_mint_by_const16(0x77665544332211aa)); + } + + // Shift smi by large constant + for (var i = 0; i < 20; i++) { + Expect.equals(0, shl_smi_by_const96(0x77665544332211)); + } + + // Shift mint by large constant + for (var i = 0; i < 20; i++) { + Expect.equals(0, shl_mint_by_const96(0x77665544332211aa)); + } + + // Constant folding + for (var i = 0; i < 20; i++) { + Expect.equals(-0x3323000000000000, shl_mint_consts()); + } +} + +main() { + var v2 = 2; // smi + var v3 = 3; // smi + var v8 = 8; // smi + var v40 = 40; // smi + var v3fxx = 0x3fffffffffffffff; // max smi + var v5fxx = 0x5fffffffffffffff; // mint + var v7fxx = 0x7fffffffffffffff; // max mint + var n60xx = -0x6000000000000000; // negative mint + + test_add(v2, v3, v3fxx, v5fxx, v7fxx, n60xx); + test_sub(v2, v3, v3fxx, v5fxx, v7fxx, n60xx); + test_mul(v2, v3, v3fxx, v5fxx, v7fxx, n60xx); + test_shl(v2, v3, v8, v40); +} diff --git a/runtime/vm/flag_list.h b/runtime/vm/flag_list.h index 2c327819c2e..5da998c809a 100644 --- a/runtime/vm/flag_list.h +++ b/runtime/vm/flag_list.h @@ -94,8 +94,7 @@ P(interpret_irregexp, bool, USING_DBC, "Use irregexp bytecode interpreter") \ P(lazy_dispatchers, bool, true, "Generate dispatchers lazily") \ P(link_natives_lazily, bool, false, "Link native calls lazily") \ - R(limit_ints_to_64_bits, false, bool, false, \ - "Throw a RangeError on 64-bit integer overflow"); \ + R(limit_ints_to_64_bits, false, bool, false, "Truncate integers to 64 bits") \ C(load_deferred_eagerly, true, true, bool, false, \ "Load deferred libraries eagerly.") \ R(log_marker_tasks, false, bool, false, \ diff --git a/runtime/vm/intermediate_language.cc b/runtime/vm/intermediate_language.cc index d1e8c29d475..557f301003d 100644 --- a/runtime/vm/intermediate_language.cc +++ b/runtime/vm/intermediate_language.cc @@ -1818,7 +1818,7 @@ RawInteger* BinaryIntegerOpInstr::Evaluate(const Integer& left, if (is_truncating()) { int64_t truncated = result.AsTruncatedInt64Value(); truncated &= RepresentationMask(representation()); - result = Integer::New(truncated); + result = Integer::New(truncated, Heap::kOld); ASSERT(IsRepresentable(result, representation())); } else if (!IsRepresentable(result, representation())) { // If this operation is not truncating it would deoptimize on overflow. diff --git a/runtime/vm/intermediate_language.h b/runtime/vm/intermediate_language.h index 00e42f1679d..afa2f8a41a2 100644 --- a/runtime/vm/intermediate_language.h +++ b/runtime/vm/intermediate_language.h @@ -7,6 +7,7 @@ #include "vm/allocation.h" #include "vm/ast.h" +#include "vm/flags.h" #include "vm/growable_array.h" #include "vm/locations.h" #include "vm/method_recognizer.h" @@ -730,7 +731,7 @@ class Instruction : public ZoneAllocated { } inline Definition* ArgumentAt(intptr_t index) const; - // Returns true, if this instruction can deoptimize with its current imputs. + // Returns true, if this instruction can deoptimize with its current inputs. // This property can change if we add or remove redefinitions that constrain // the type or the range of input operands during compilation. virtual bool ComputeCanDeoptimize() const = 0; @@ -7184,12 +7185,29 @@ class BinaryMintOpInstr : public BinaryIntegerOpInstr { Value* left, Value* right, intptr_t deopt_id) - : BinaryIntegerOpInstr(op_kind, left, right, deopt_id) {} + : BinaryIntegerOpInstr(op_kind, left, right, deopt_id) { + if (FLAG_limit_ints_to_64_bits) { + mark_truncating(); + } + } virtual bool ComputeCanDeoptimize() const { - return (can_overflow() && - ((op_kind() == Token::kADD) || (op_kind() == Token::kSUB))) || - (op_kind() == Token::kMUL); // Deopt if inputs are not int32. + switch (op_kind()) { + case Token::kADD: + case Token::kSUB: + return can_overflow(); + case Token::kMUL: +// Note that ARM64 does not support operations with unboxed mints, +// so it is not handled here. +#if defined(TARGET_ARCH_X64) + return can_overflow(); // Deopt if overflow. +#else + // IA32, ARM + return true; // Deopt if inputs are not int32. +#endif + default: + return false; + } } virtual Representation representation() const { return kUnboxedMint; } @@ -7218,6 +7236,9 @@ class ShiftMintOpInstr : public BinaryIntegerOpInstr { : BinaryIntegerOpInstr(op_kind, left, right, deopt_id), shift_range_(NULL) { ASSERT((op_kind == Token::kSHR) || (op_kind == Token::kSHL)); + if (FLAG_limit_ints_to_64_bits) { + mark_truncating(); + } } Range* shift_range() const { return shift_range_; } diff --git a/runtime/vm/intermediate_language_arm.cc b/runtime/vm/intermediate_language_arm.cc index 9515ea3abb2..5ba506f9af0 100644 --- a/runtime/vm/intermediate_language_arm.cc +++ b/runtime/vm/intermediate_language_arm.cc @@ -6546,6 +6546,8 @@ void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { ASSERT(locs()->in(1).constant().IsSmi()); const int32_t shift = reinterpret_cast(locs()->in(1).constant().raw()) >> 1; + ASSERT(!has_shift_count_check()); + ASSERT(shift >= 0); switch (op_kind()) { case Token::kSHR: { if (shift < 32) { diff --git a/runtime/vm/intermediate_language_ia32.cc b/runtime/vm/intermediate_language_ia32.cc index 8484162172d..bcd1f58cab2 100644 --- a/runtime/vm/intermediate_language_ia32.cc +++ b/runtime/vm/intermediate_language_ia32.cc @@ -6085,6 +6085,8 @@ void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { ASSERT(locs()->in(1).constant().IsSmi()); const int32_t shift = reinterpret_cast(locs()->in(1).constant().raw()) >> 1; + ASSERT(!has_shift_count_check()); + ASSERT(shift >= 0); switch (op_kind()) { case Token::kSHR: { if (shift > 31) { diff --git a/runtime/vm/intermediate_language_x64.cc b/runtime/vm/intermediate_language_x64.cc index 6cff06c7fa2..69d1586610b 100644 --- a/runtime/vm/intermediate_language_x64.cc +++ b/runtime/vm/intermediate_language_x64.cc @@ -6072,6 +6072,8 @@ void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { ASSERT(locs()->in(1).constant().IsSmi()); const int64_t shift = reinterpret_cast(locs()->in(1).constant().raw()) >> 1; + ASSERT(!has_shift_count_check()); + ASSERT((0 <= shift) && (shift < 64)); switch (op_kind()) { case Token::kSHR: __ sarq(left, Immediate(shift)); diff --git a/runtime/vm/object.cc b/runtime/vm/object.cc index bb9334f92b4..0eb69a9d0bb 100644 --- a/runtime/vm/object.cc +++ b/runtime/vm/object.cc @@ -18998,7 +18998,11 @@ RawInteger* Integer::New(const String& str, Heap::Space space) { Bigint::Handle(Bigint::NewFromCString(str.ToCString(), space)); ASSERT(!big.FitsIntoSmi()); ASSERT(!big.FitsIntoInt64()); - return big.raw(); + if (!FLAG_limit_ints_to_64_bits) { + return big.raw(); + } + // TODO(alexmarkov): Throw error in FLAG_limit_ints_to_64_bits mode. + value = big.AsTruncatedInt64Value(); } return Integer::New(value, space); } @@ -19012,7 +19016,11 @@ RawInteger* Integer::NewCanonical(const String& str) { const Bigint& big = Bigint::Handle(Bigint::NewCanonical(str)); ASSERT(!big.FitsIntoSmi()); ASSERT(!big.FitsIntoInt64()); - return big.raw(); + if (!FLAG_limit_ints_to_64_bits) { + return big.raw(); + } + // TODO(alexmarkov): Throw error in FLAG_limit_ints_to_64_bits mode. + value = big.AsTruncatedInt64Value(); } if (Smi::IsValid(value)) { return Smi::New(static_cast(value)); @@ -19032,7 +19040,12 @@ RawInteger* Integer::New(int64_t value, Heap::Space space) { RawInteger* Integer::NewFromUint64(uint64_t value, Heap::Space space) { if (value > static_cast(Mint::kMaxValue)) { - return Bigint::NewFromUint64(value, space); + if (FLAG_limit_ints_to_64_bits) { + // TODO(alexmarkov): Throw error in FLAG_limit_ints_to_64_bits mode. + return Integer::New(static_cast(value), space); + } else { + return Bigint::NewFromUint64(value, space); + } } else { return Integer::New(value, space); } @@ -19141,13 +19154,18 @@ RawInteger* Integer::ArithmeticOp(Token::Kind operation, static_cast(right_value), space); } else { - // In 64-bit mode, the product of two signed integers fits in a - // 64-bit result if the sum of the highest bits of their absolute - // values is smaller than 62. ASSERT(sizeof(intptr_t) == sizeof(int64_t)); - if ((Utils::HighestBit(left_value) + Utils::HighestBit(right_value)) < - 62) { - return Integer::New(left_value * right_value, space); + if (FLAG_limit_ints_to_64_bits) { + return Integer::New( + Utils::MulWithWrapAround(left_value, right_value), space); + } else { + // In 64-bit mode, the product of two signed integers fits in a + // 64-bit result if the sum of the highest bits of their absolute + // values is smaller than 62. + if ((Utils::HighestBit(left_value) + + Utils::HighestBit(right_value)) < 62) { + return Integer::New(left_value * right_value, space); + } } } // Perform a Bigint multiplication below. @@ -19175,26 +19193,47 @@ RawInteger* Integer::ArithmeticOp(Token::Kind operation, const int64_t right_value = other.AsInt64Value(); switch (operation) { case Token::kADD: { - if (!Utils::WillAddOverflow(left_value, right_value)) { - return Integer::New(left_value + right_value, space); + if (FLAG_limit_ints_to_64_bits) { + return Integer::New(Utils::AddWithWrapAround(left_value, right_value), + space); + } else { + if (!Utils::WillAddOverflow(left_value, right_value)) { + return Integer::New(left_value + right_value, space); + } } break; } case Token::kSUB: { - if (!Utils::WillSubOverflow(left_value, right_value)) { - return Integer::New(left_value - right_value, space); + if (FLAG_limit_ints_to_64_bits) { + return Integer::New(Utils::SubWithWrapAround(left_value, right_value), + space); + } else { + if (!Utils::WillSubOverflow(left_value, right_value)) { + return Integer::New(left_value - right_value, space); + } } break; } case Token::kMUL: { - if ((Utils::HighestBit(left_value) + Utils::HighestBit(right_value)) < - 62) { - return Integer::New(left_value * right_value, space); + if (FLAG_limit_ints_to_64_bits) { + return Integer::New(Utils::MulWithWrapAround(left_value, right_value), + space); + } else { + if ((Utils::HighestBit(left_value) + Utils::HighestBit(right_value)) < + 62) { + return Integer::New(left_value * right_value, space); + } } break; } case Token::kTRUNCDIV: { - if ((left_value != Mint::kMinValue) || (right_value != -1)) { + if ((left_value == Mint::kMinValue) && (right_value == -1)) { + // Division special case: overflow in int64_t. + if (FLAG_limit_ints_to_64_bits) { + // MIN_VALUE / -1 = (MAX_VALUE + 1), which wraps around to MIN_VALUE + return Integer::New(Mint::kMinValue, space); + } + } else { return Integer::New(left_value / right_value, space); } break; @@ -19214,6 +19253,7 @@ RawInteger* Integer::ArithmeticOp(Token::Kind operation, UNIMPLEMENTED(); } } + ASSERT(!FLAG_limit_ints_to_64_bits); return Integer::null(); // Notify caller that a bigint operation is required. } @@ -19259,6 +19299,7 @@ RawInteger* Integer::BitOp(Token::Kind kind, UNIMPLEMENTED(); } } + ASSERT(!FLAG_limit_ints_to_64_bits); return Integer::null(); // Notify caller that a bigint operation is required. } @@ -19278,12 +19319,18 @@ RawInteger* Smi::ShiftOp(Token::Kind kind, } { // Check for overflow. int cnt = Utils::BitLength(left_value); - if ((cnt + right_value) > Smi::kBits) { - if ((cnt + right_value) > Mint::kBits) { - return Bigint::NewFromShiftedInt64(left_value, right_value, space); + if (right_value > (Smi::kBits - cnt)) { + if (FLAG_limit_ints_to_64_bits) { + return Integer::New( + Utils::ShiftLeftWithTruncation(left_value, right_value), space); } else { - int64_t left_64 = left_value; - return Integer::New(left_64 << right_value, space); + if (right_value > (Mint::kBits - cnt)) { + return Bigint::NewFromShiftedInt64(left_value, right_value, + space); + } else { + int64_t left_64 = left_value; + return Integer::New(left_64 << right_value, space); + } } } } @@ -19705,6 +19752,7 @@ bool Bigint::CheckAndCanonicalizeFields(Thread* thread, RawBigint* Bigint::New(Heap::Space space) { + // TODO(alexmarkov): Throw error or assert in --limit-ints-to-64-bits mode. Thread* thread = Thread::Current(); Zone* zone = thread->zone(); Isolate* isolate = thread->isolate(); @@ -19728,6 +19776,7 @@ RawBigint* Bigint::New(bool neg, intptr_t used, const TypedData& digits, Heap::Space space) { + // TODO(alexmarkov): Throw error or assert in --limit-ints-to-64-bits mode. ASSERT((used == 0) || (!digits.IsNull() && (digits.Length() >= (used + (used & 1))))); Thread* thread = Thread::Current(); @@ -19767,6 +19816,7 @@ RawBigint* Bigint::New(bool neg, RawBigint* Bigint::NewFromInt64(int64_t value, Heap::Space space) { // Currently only used to convert Smi or Mint to hex String, therefore do // not throw RangeError if --limit-ints-to-64-bits. + // TODO(alexmarkov): Throw error or assert in --limit-ints-to-64-bits mode. const TypedData& digits = TypedData::Handle(NewDigits(2, space)); bool neg; uint64_t abs_value; @@ -19784,10 +19834,9 @@ RawBigint* Bigint::NewFromInt64(int64_t value, Heap::Space space) { RawBigint* Bigint::NewFromUint64(uint64_t value, Heap::Space space) { - if (FLAG_limit_ints_to_64_bits) { - Exceptions::ThrowRangeErrorMsg( - "Integer operand requires conversion to Bigint"); - } + // TODO(alexmarkov): Revise this assertion if this factory method is used + // to explicitly allocate Bigint objects in --limit-ints-to-64-bits mode. + ASSERT(!FLAG_limit_ints_to_64_bits); const TypedData& digits = TypedData::Handle(NewDigits(2, space)); SetDigitAt(digits, 0, static_cast(value)); SetDigitAt(digits, 1, static_cast(value >> 32)); @@ -19798,12 +19847,9 @@ RawBigint* Bigint::NewFromUint64(uint64_t value, Heap::Space space) { RawBigint* Bigint::NewFromShiftedInt64(int64_t value, intptr_t shift, Heap::Space space) { - if (FLAG_limit_ints_to_64_bits) { - // The allocated Bigint value is not necessarily out of range, but it may - // be used as an operand in an operation resulting in a Bigint. - Exceptions::ThrowRangeErrorMsg( - "Integer operand requires conversion to Bigint"); - } + // TODO(alexmarkov): Revise this assertion if this factory method is used + // to explicitly allocate Bigint objects in --limit-ints-to-64-bits mode. + ASSERT(!FLAG_limit_ints_to_64_bits); ASSERT(kBitsPerDigit == 32); ASSERT(shift >= 0); const intptr_t digit_shift = shift / kBitsPerDigit; @@ -19835,6 +19881,7 @@ RawBigint* Bigint::NewFromShiftedInt64(int64_t value, RawBigint* Bigint::NewFromCString(const char* str, Heap::Space space) { // Allow parser to scan Bigint literal, even with --limit-ints-to-64-bits. + // TODO(alexmarkov): Throw error or assert in --limit-ints-to-64-bits mode. ASSERT(str != NULL); bool neg = false; TypedData& digits = TypedData::Handle(); @@ -19857,6 +19904,7 @@ RawBigint* Bigint::NewFromCString(const char* str, Heap::Space space) { RawBigint* Bigint::NewCanonical(const String& str) { // Allow parser to scan Bigint literal, even with --limit-ints-to-64-bits. + // TODO(alexmarkov): Throw error or assert in --limit-ints-to-64-bits mode. Thread* thread = Thread::Current(); Zone* zone = thread->zone(); Isolate* isolate = thread->isolate();