Fix StoreIndexedInstr input representation requirements for Int32/Uint32 arrays.

Previous implementation changed input representation depending on the propagated type of the value which violated assumptions made by SelectRepresentations phase.

Instead of using tagged/mint input require unboxed Int32/Uint32 input and insert explicit truncating unboxing when building StoreIndexed operation in the optimizer. This also leads to strictly better code and opens possibilities for further optimizations.

Implement Int32/Uint32 representation support on all platforms. This includes boxing, unboxing and unboxed converter operations.

Merge BoxInt32/BoxUint32 and UnboxInt32/UnboxUint32 instruction sequences to minimize duplication.

Improve instruction sequences by utilizing CARRY flag set by smi untagging where possible (ARM, ia32, x86).

Enable all tests that were disabled by r40078, r40079.

BUG=http://dartbug.com/20875
R=fschneider@google.com, johnmccutchan@google.com, srdjan@google.com, zra@google.com

Review URL: https://codereview.chromium.org//552303005

git-svn-id: https://dart.googlecode.com/svn/branches/bleeding_edge/dart@40143 260f80e4-7a28-3924-810f-c04153c831b5
This commit is contained in:
vegorov@google.com 2014-09-11 12:32:54 +00:00
parent bf8dc4d549
commit 183abe7207
18 changed files with 833 additions and 479 deletions

View file

@ -20,11 +20,6 @@ scheduled_test/test/scheduled_process_test: Pass, Slow # Issue 9231
scheduled_test/test/scheduled_stream/stream_matcher_test: Pass, Slow
polymer/test/build/script_compactor_test: Pass, Slow
# Failures when running with --optimization-counter-threshold=5
[ $runtime == vm && $mode == debug && $checked ]
intl/test/number_format_test: Skip # Dart Issue 20875
[ $compiler == none && ($runtime == drt || $runtime == dartium || $runtime == ContentShellOnAndroid) ]
third_party/angular_tests/browser_test/*: Skip # github perf_api.dart issue 5
third_party/angular_tests/browser_test/core_dom/shadow_root_options: Fail # Issue 19329

View file

@ -2257,11 +2257,23 @@ void Assembler::Lsr(Register rd, Register rm, Register rs, Condition cond) {
void Assembler::Asr(Register rd, Register rm, uint32_t shift_imm,
Condition cond) {
ASSERT(shift_imm != 0); // Do not use Asr if no shift is wanted.
if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
if (shift_imm == 32) {
shift_imm = 0; // Comply to UAL syntax.
}
mov(rd, Operand(rm, ASR, shift_imm), cond);
}
void Assembler::Asrs(Register rd, Register rm, uint32_t shift_imm,
Condition cond) {
ASSERT(shift_imm != 0); // Do not use Asr if no shift is wanted.
if (shift_imm == 32) {
shift_imm = 0; // Comply to UAL syntax.
}
movs(rd, Operand(rm, ASR, shift_imm), cond);
}
void Assembler::Asr(Register rd, Register rm, Register rs, Condition cond) {
mov(rd, Operand(rm, ASR, rs), cond);
}

View file

@ -719,6 +719,7 @@ class Assembler : public ValueObject {
void Lsr(Register rd, Register rm, Register rs, Condition cond = AL);
void Asr(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
void Asr(Register rd, Register rm, Register rs, Condition cond = AL);
void Asrs(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
void Ror(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
void Ror(Register rd, Register rm, Register rs, Condition cond = AL);
void Rrx(Register rd, Register rm, Condition cond = AL);
@ -736,6 +737,10 @@ class Assembler : public ValueObject {
Lsl(reg, reg, kSmiTagSize, cond);
}
void SmiTag(Register dst, Register src, Condition cond = AL) {
Lsl(dst, src, kSmiTagSize, cond);
}
void SmiUntag(Register reg, Condition cond = AL) {
Asr(reg, reg, kSmiTagSize, cond);
}
@ -744,6 +749,16 @@ class Assembler : public ValueObject {
Asr(dst, src, kSmiTagSize, cond);
}
// Untag the value in the register assuming it is a smi.
// Untagging shifts tag bit into the carry flag - if carry is clear
// assumption was correct. In this case jump to the is_smi label.
// Otherwise fall-through.
void SmiUntag(Register dst, Register src, Label* is_smi) {
ASSERT(kSmiTagSize == 1);
Asrs(dst, src, kSmiTagSize);
b(is_smi, CC);
}
// Function frame setup and tear down.
void EnterFrame(RegList regs, intptr_t frame_space);
void LeaveFrame(RegList regs);

View file

@ -2761,6 +2761,26 @@ void Assembler::CompareClassId(Register object,
}
void Assembler::SmiUntagOrCheckClass(Register object,
intptr_t class_id,
Register scratch,
Label* is_smi) {
ASSERT(kSmiTagShift == 1);
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);
const intptr_t class_id_offset = Object::tags_offset() +
RawObject::kClassIdTagPos / kBitsPerByte;
// Untag optimistically. Tag bit is shifted into the CARRY.
SmiUntag(object);
j(NOT_CARRY, is_smi, kNearJump);
// Load cid: can't use LoadClassId, object is untagged. Use TIMES_2 scale
// factor in the addressing mode to compensate for this.
movzxw(scratch, Address(object, TIMES_2, class_id_offset));
cmpl(scratch, Immediate(class_id));
}
void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
ASSERT(result != object);
static const intptr_t kSmiCidSource = kSmiCid << RawObject::kClassIdTagPos;

View file

@ -697,6 +697,11 @@ class Assembler : public ValueObject {
void LoadTaggedClassIdMayBeSmi(Register result,
Register object);
void SmiUntagOrCheckClass(Register object,
intptr_t class_id,
Register scratch,
Label* is_smi);
static Address ElementAddressForIntIndex(bool is_external,
intptr_t cid,
intptr_t index_scale,

View file

@ -857,7 +857,9 @@ class Assembler : public ValueObject {
const uint16_t low = Utils::Low16Bits(value);
const uint16_t high = Utils::High16Bits(value);
lui(rd, Immediate(high));
ori(rd, rd, Immediate(low));
if (low != 0) {
ori(rd, rd, Immediate(low));
}
}
}
@ -1126,6 +1128,10 @@ class Assembler : public ValueObject {
sll(reg, reg, kSmiTagSize);
}
void SmiTag(Register dst, Register src) {
sll(dst, src, kSmiTagSize);
}
void SmiUntag(Register reg) {
sra(reg, reg, kSmiTagSize);
}

View file

@ -3448,6 +3448,25 @@ void Assembler::CompareClassId(Register object, intptr_t class_id) {
}
void Assembler::SmiUntagOrCheckClass(Register object,
intptr_t class_id,
Label* is_smi) {
ASSERT(kSmiTagShift == 1);
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);
const intptr_t class_id_offset = Object::tags_offset() +
RawObject::kClassIdTagPos / kBitsPerByte;
// Untag optimistically. Tag bit is shifted into the CARRY.
SmiUntag(object);
j(NOT_CARRY, is_smi, kNearJump);
// Load cid: can't use LoadClassId, object is untagged. Use TIMES_2 scale
// factor in the addressing mode to compensate for this.
movzxw(TMP, Address(object, TIMES_2, class_id_offset));
cmpl(TMP, Immediate(class_id));
}
void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
ASSERT(result != object);

View file

@ -761,6 +761,10 @@ class Assembler : public ValueObject {
void LoadTaggedClassIdMayBeSmi(Register result, Register object);
// CheckClassIs fused with optimistic SmiUntag.
// Value in the register object is untagged optimistically.
void SmiUntagOrCheckClass(Register object, intptr_t class_id, Label* smi);
/*
* Misc. functionality.
*/

View file

@ -1364,6 +1364,24 @@ bool FlowGraphOptimizer::InlineSetIndexed(
stored_value,
NULL,
FlowGraph::kValue);
} else if (array_cid == kTypedDataInt32ArrayCid) {
stored_value = new(I) UnboxInt32Instr(
new(I) Value(stored_value),
call->deopt_id());
stored_value->AsUnboxIntN()->mark_truncating();
cursor = flow_graph()->AppendTo(cursor,
stored_value,
call->env(),
FlowGraph::kValue);
} else if (array_cid == kTypedDataUint32ArrayCid) {
stored_value = new(I) UnboxUint32Instr(
new(I) Value(stored_value),
call->deopt_id());
ASSERT(stored_value->AsUnboxIntN()->is_truncating());
cursor = flow_graph()->AppendTo(cursor,
stored_value,
call->env(),
FlowGraph::kValue);
}
const intptr_t index_scale = Instance::ElementSizeFor(array_cid);
@ -1446,17 +1464,10 @@ bool FlowGraphOptimizer::TryInlineRecognizedMethod(intptr_t receiver_cid,
&ic_data, value_check, entry, last);
case MethodRecognizer::kInt32ArraySetIndexed:
case MethodRecognizer::kUint32ArraySetIndexed:
if (!CanUnboxInt32()) {
return false;
}
// Check that value is always smi or mint, if the platform has unboxed
// mints (ia32 with at least SSE 4.1).
// Check that value is always smi or mint. We use Int32/Uint32 unboxing
// which can only deal unbox these values.
value_check = ic_data.AsUnaryClassChecksForArgNr(2);
if (FlowGraphCompiler::SupportsUnboxedMints()) {
if (!HasOnlySmiOrMint(value_check)) {
return false;
}
} else if (!HasOnlyOneSmi(value_check)) {
if (!HasOnlySmiOrMint(value_check)) {
return false;
}
return InlineSetIndexed(kind, target, call, receiver, token_pos,
@ -1570,16 +1581,10 @@ bool FlowGraphOptimizer::TryInlineRecognizedMethod(intptr_t receiver_cid,
kTypedDataUint16ArrayCid,
ic_data, entry, last);
case MethodRecognizer::kByteArrayBaseSetInt32:
if (!CanUnboxInt32()) {
return false;
}
return InlineByteArrayViewStore(target, call, receiver, receiver_cid,
kTypedDataInt32ArrayCid,
ic_data, entry, last);
case MethodRecognizer::kByteArrayBaseSetUint32:
if (!CanUnboxInt32()) {
return false;
}
return InlineByteArrayViewStore(target, call, receiver, receiver_cid,
kTypedDataUint32ArrayCid,
ic_data, entry, last);
@ -3720,9 +3725,8 @@ bool FlowGraphOptimizer::InlineByteArrayViewStore(const Function& target,
}
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
// Prevent excessive deoptimization, assume full 32 bits used, and therefore
// generate Mint on 32-bit architectures.
if (kSmiBits >= 32) {
// On 64-bit platforms assume that stored value is always a smi.
if (kSmiBits >= 32) {
value_check = ICData::New(flow_graph_->parsed_function().function(),
i_call->function_name(),
Object::empty_array(), // Dummy args. descr.
@ -3780,6 +3784,24 @@ bool FlowGraphOptimizer::InlineByteArrayViewStore(const Function& target,
stored_value,
NULL,
FlowGraph::kValue);
} else if (view_cid == kTypedDataInt32ArrayCid) {
stored_value = new(I) UnboxInt32Instr(
new(I) Value(stored_value),
call->deopt_id());
stored_value->AsUnboxIntN()->mark_truncating();
cursor = flow_graph()->AppendTo(cursor,
stored_value,
call->env(),
FlowGraph::kValue);
} else if (view_cid == kTypedDataUint32ArrayCid) {
stored_value = new(I) UnboxUint32Instr(
new(I) Value(stored_value),
call->deopt_id());
ASSERT(stored_value->AsUnboxIntN()->is_truncating());
cursor = flow_graph()->AppendTo(cursor,
stored_value,
call->env(),
FlowGraph::kValue);
}
StoreBarrierType needs_store_barrier = kNoStoreBarrier;
@ -8283,15 +8305,11 @@ void ConstantPropagator::HandleBinaryOp(Definition* instr,
}
case Token::kSHL:
case Token::kSHR:
if (left.IsSmi() && right.IsSmi()) {
if (left.IsSmi() &&
right.IsSmi() &&
(Smi::Cast(right).Value() >= 0)) {
Instance& result = Integer::ZoneHandle(I,
Smi::Cast(left_int).ShiftOp(op_kind, Smi::Cast(right_int)));
if (result.IsNull()) {
// TODO(regis): A bigint operation is required. Invoke dart?
// Punt for now.
SetValue(instr, non_constant_);
break;
}
result = result.CheckAndCanonicalize(NULL);
ASSERT(!result.IsNull());
SetValue(instr, result);

View file

@ -1043,10 +1043,19 @@ void PhiInstr::PrintTo(BufferFormatter* f) const {
}
void UnboxIntNInstr::PrintOperandsTo(BufferFormatter* f) const {
if (is_truncating()) {
f->Print("[tr], ");
}
Definition::PrintOperandsTo(f);
}
void UnboxedIntConverterInstr::PrintOperandsTo(BufferFormatter* f) const {
f->Print("%s->%s, ",
f->Print("%s->%s%s, ",
RepresentationToCString(from()),
RepresentationToCString(to()));
RepresentationToCString(to()),
is_truncating() ? "[tr]" : "");
Definition::PrintOperandsTo(f);
}

View file

@ -1168,6 +1168,7 @@ void Instruction::Goto(JoinEntryInstr* entry) {
bool UnboxedIntConverterInstr::CanDeoptimize() const {
return (to() == kUnboxedInt32) &&
!is_truncating() &&
!RangeUtils::Fits(value()->definition()->range(),
RangeBoundary::kRangeBoundaryInt32);
}
@ -1176,10 +1177,14 @@ bool UnboxedIntConverterInstr::CanDeoptimize() const {
bool UnboxInt32Instr::CanDeoptimize() const {
const intptr_t value_cid = value()->Type()->ToCid();
if (value_cid == kSmiCid) {
return false;
return (kSmiBits > 32) &&
!is_truncating() &&
!RangeUtils::Fits(value()->definition()->range(),
RangeBoundary::kRangeBoundaryInt32);
} else if (value_cid == kMintCid) {
return !RangeUtils::Fits(value()->definition()->range(),
RangeBoundary::kRangeBoundaryInt32);
return !is_truncating() &&
!RangeUtils::Fits(value()->definition()->range(),
RangeBoundary::kRangeBoundaryInt32);
} else {
return true;
}
@ -1743,6 +1748,9 @@ Definition* UnboxIntNInstr::Canonicalize(FlowGraph* flow_graph) {
representation(),
box_defn->value()->CopyWithType(),
representation() == kUnboxedInt32 ? deopt_id_ : Isolate::kNoDeoptId);
if ((representation() == kUnboxedInt32) && is_truncating()) {
converter->mark_truncating();
}
flow_graph->InsertBefore(this, converter, env(), FlowGraph::kValue);
return converter;
}
@ -1767,6 +1775,9 @@ Definition* UnboxedIntConverterInstr::Canonicalize(FlowGraph* flow_graph) {
representation(),
box_defn->value()->CopyWithType(),
to() == kUnboxedInt32 ? deopt_id_ : NULL);
if ((representation() == kUnboxedInt32) && is_truncating()) {
converter->mark_truncating();
}
flow_graph->InsertBefore(this, converter, env(), FlowGraph::kValue);
return converter;
}
@ -1781,6 +1792,9 @@ Definition* UnboxedIntConverterInstr::Canonicalize(FlowGraph* flow_graph) {
// these instructions close to each other instead of fusing them.
Definition* replacement =
new UnboxInt32Instr(unbox_defn->value()->CopyWithType(), deopt_id_);
if (is_truncating()) {
replacement->AsUnboxInt32()->mark_truncating();
}
flow_graph->InsertBefore(this,
replacement,
env(),
@ -1800,6 +1814,15 @@ Definition* UnboxInt32Instr::Canonicalize(FlowGraph* flow_graph) {
ConstantInstr* c = value()->definition()->AsConstant();
if ((c != NULL) && c->value().IsSmi()) {
if (!is_truncating() && (kSmiBits > 32)) {
// Check that constant fits into 32-bit integer.
const int64_t value =
static_cast<int64_t>(Smi::Cast(c->value()).Value());
if (!Utils::IsInt(32, value)) {
return this;
}
}
UnboxedConstantInstr* uc =
new UnboxedConstantInstr(c->value(), kUnboxedInt32);
flow_graph->InsertBefore(this, uc, NULL, FlowGraph::kValue);

View file

@ -564,15 +564,21 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
// Functions required in all concrete instruction classes.
#define DECLARE_INSTRUCTION(type) \
#define DECLARE_INSTRUCTION_NO_BACKEND(type) \
virtual Tag tag() const { return k##type; } \
virtual void Accept(FlowGraphVisitor* visitor); \
virtual type##Instr* As##type() { return this; } \
virtual const char* DebugName() const { return #type; } \
#define DECLARE_INSTRUCTION_BACKEND(type) \
virtual LocationSummary* MakeLocationSummary(Isolate* isolate, \
bool optimizing) const; \
virtual void EmitNativeCode(FlowGraphCompiler* compiler); \
// Functions required in all concrete instruction classes.
#define DECLARE_INSTRUCTION(type) \
DECLARE_INSTRUCTION_NO_BACKEND(type) \
DECLARE_INSTRUCTION_BACKEND(type) \
class Instruction : public ZoneAllocated {
public:
@ -7966,10 +7972,12 @@ class UnaryUint32OpInstr : public TemplateDefinition<1> {
class BoxIntNInstr : public TemplateDefinition<1> {
public:
BoxIntNInstr(Representation representation, Value* value)
: representation_(representation) {
: from_representation_(representation) {
SetInputAt(0, value);
}
Representation from_representation() const { return from_representation_; }
Value* value() const { return inputs_[0]; }
virtual bool ValueFitsSmi() const;
@ -7984,14 +7992,14 @@ class BoxIntNInstr : public TemplateDefinition<1> {
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT(idx == 0);
return representation_;
return from_representation_;
}
virtual bool AllowsCSE() const { return true; }
virtual EffectSet Effects() const { return EffectSet::None(); }
virtual EffectSet Dependencies() const { return EffectSet::None(); }
virtual bool AttributesEqual(Instruction* other) const {
return other->AsBoxIntN()->representation_ == representation_;
return other->AsBoxIntN()->from_representation_ == from_representation_;
}
virtual bool MayThrow() const { return false; }
@ -8000,8 +8008,10 @@ class BoxIntNInstr : public TemplateDefinition<1> {
virtual BoxIntNInstr* AsBoxIntN() { return this; }
DECLARE_INSTRUCTION_BACKEND(BoxIntN)
private:
const Representation representation_;
const Representation from_representation_;
DISALLOW_COPY_AND_ASSIGN(BoxIntNInstr);
};
@ -8012,7 +8022,7 @@ class BoxUint32Instr : public BoxIntNInstr {
explicit BoxUint32Instr(Value* value)
: BoxIntNInstr(kUnboxedUint32, value) { }
DECLARE_INSTRUCTION(BoxUint32)
DECLARE_INSTRUCTION_NO_BACKEND(BoxUint32)
private:
DISALLOW_COPY_AND_ASSIGN(BoxUint32Instr);
@ -8026,7 +8036,7 @@ class BoxInt32Instr : public BoxIntNInstr {
virtual void InferRange(RangeAnalysis* analysis, Range* range);
DECLARE_INSTRUCTION(BoxInt32)
DECLARE_INSTRUCTION_NO_BACKEND(BoxInt32)
private:
DISALLOW_COPY_AND_ASSIGN(BoxInt32Instr);
@ -8038,13 +8048,17 @@ class UnboxIntNInstr : public TemplateDefinition<1> {
UnboxIntNInstr(Representation representation,
Value* value,
intptr_t deopt_id)
: representation_(representation) {
: representation_(representation),
is_truncating_(representation == kUnboxedUint32) {
SetInputAt(0, value);
deopt_id_ = deopt_id;
}
Value* value() const { return inputs_[0]; }
bool is_truncating() const { return is_truncating_; }
void mark_truncating() { is_truncating_ = true; }
virtual Representation representation() const {
return representation_;
}
@ -8055,7 +8069,9 @@ class UnboxIntNInstr : public TemplateDefinition<1> {
virtual EffectSet Effects() const { return EffectSet::None(); }
virtual EffectSet Dependencies() const { return EffectSet::None(); }
virtual bool AttributesEqual(Instruction* other) const {
return other->AsUnboxIntN()->representation_ == representation_;
UnboxIntNInstr* other_unbox = other->AsUnboxIntN();
return (other_unbox->representation_ == representation_) &&
(other_unbox->is_truncating_ == is_truncating_);
}
virtual bool MayThrow() const { return false; }
@ -8064,8 +8080,13 @@ class UnboxIntNInstr : public TemplateDefinition<1> {
virtual UnboxIntNInstr* AsUnboxIntN() { return this; }
virtual void PrintOperandsTo(BufferFormatter* f) const;
DECLARE_INSTRUCTION_BACKEND(UnboxIntNInstr);
private:
const Representation representation_;
bool is_truncating_;
DISALLOW_COPY_AND_ASSIGN(UnboxIntNInstr);
};
@ -8075,6 +8096,7 @@ class UnboxUint32Instr : public UnboxIntNInstr {
public:
UnboxUint32Instr(Value* value, intptr_t deopt_id)
: UnboxIntNInstr(kUnboxedUint32, value, deopt_id) {
ASSERT(is_truncating());
}
virtual bool CanDeoptimize() const {
@ -8082,7 +8104,7 @@ class UnboxUint32Instr : public UnboxIntNInstr {
&& (value()->Type()->ToCid() != kMintCid);
}
DECLARE_INSTRUCTION(UnboxUint32)
DECLARE_INSTRUCTION_NO_BACKEND(UnboxUint32)
private:
DISALLOW_COPY_AND_ASSIGN(UnboxUint32Instr);
@ -8101,7 +8123,7 @@ class UnboxInt32Instr : public UnboxIntNInstr {
virtual Definition* Canonicalize(FlowGraph* flow_graph);
DECLARE_INSTRUCTION(UnboxInt32)
DECLARE_INSTRUCTION_NO_BACKEND(UnboxInt32)
private:
DISALLOW_COPY_AND_ASSIGN(UnboxInt32Instr);
@ -8115,7 +8137,8 @@ class UnboxedIntConverterInstr : public TemplateDefinition<1> {
Value* value,
intptr_t deopt_id)
: from_representation_(from),
to_representation_(to) {
to_representation_(to),
is_truncating_(to == kUnboxedUint32) {
ASSERT(from != to);
ASSERT((from == kUnboxedMint) ||
(from == kUnboxedUint32) ||
@ -8132,6 +8155,9 @@ class UnboxedIntConverterInstr : public TemplateDefinition<1> {
Representation from() const { return from_representation_; }
Representation to() const { return to_representation_; }
bool is_truncating() const { return is_truncating_; }
void mark_truncating() { is_truncating_ = true; }
Definition* Canonicalize(FlowGraph* flow_graph);
@ -8151,7 +8177,9 @@ class UnboxedIntConverterInstr : public TemplateDefinition<1> {
virtual bool AttributesEqual(Instruction* other) const {
ASSERT(other->IsUnboxedIntConverter());
UnboxedIntConverterInstr* converter = other->AsUnboxedIntConverter();
return (converter->from() == from()) && (converter->to() == to());
return (converter->from() == from()) &&
(converter->to() == to()) &&
(converter->is_truncating() == is_truncating());
}
virtual bool MayThrow() const { return false; }
@ -8165,6 +8193,8 @@ class UnboxedIntConverterInstr : public TemplateDefinition<1> {
private:
const Representation from_representation_;
const Representation to_representation_;
bool is_truncating_;
DISALLOW_COPY_AND_ASSIGN(UnboxedIntConverterInstr);
};

View file

@ -303,8 +303,13 @@ LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Isolate* isolate,
const intptr_t kNumTemps = (representation_ == kUnboxedInt32) ? 0 : 1;
LocationSummary* locs = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_out(0, Location::RequiresFpuRegister());
if (representation_ != kUnboxedInt32) {
if (representation_ == kUnboxedInt32) {
locs->set_out(0, Location::RequiresRegister());
} else {
ASSERT(representation_ == kUnboxedDouble);
locs->set_out(0, Location::RequiresFpuRegister());
}
if (kNumTemps > 0) {
locs->set_temp(0, Location::RequiresRegister());
}
return locs;
@ -1374,8 +1379,9 @@ Representation StoreIndexedInstr::RequiredInputRepresentation(
case kTypedDataUint16ArrayCid:
return kTagged;
case kTypedDataInt32ArrayCid:
return kUnboxedInt32;
case kTypedDataUint32ArrayCid:
return value()->IsSmiValue() ? kTagged : kUnboxedMint;
return kUnboxedUint32;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
return kUnboxedDouble;
@ -1433,19 +1439,9 @@ LocationSummary* StoreIndexedInstr::MakeLocationSummary(Isolate* isolate,
case kOneByteStringCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
locs->set_in(2, Location::RequiresRegister());
break;
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
// Smis are untagged in TMP register. Mints are stored in register pairs.
if (value()->IsSmiValue()) {
locs->set_in(2, Location::RequiresRegister());
} else {
// We only move the lower 32-bits so we don't care where the high bits
// are located.
locs->set_in(2, Location::Pair(Location::RequiresRegister(),
Location::Any()));
}
locs->set_in(2, Location::RequiresRegister());
break;
case kTypedDataFloat32ArrayCid:
// Need low register (<= Q7).
@ -1544,17 +1540,8 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid: {
if (value()->IsSmiValue()) {
ASSERT(RequiredInputRepresentation(2) == kTagged);
const Register value = locs()->in(2).reg();
__ SmiUntag(IP, value);
__ str(IP, element_address);
} else {
ASSERT(RequiredInputRepresentation(2) == kUnboxedMint);
PairLocation* value_pair = locs()->in(2).AsPairLocation();
Register value1 = value_pair->At(0).reg();
__ str(value1, element_address);
}
const Register value = locs()->in(2).reg();
__ str(value, element_address);
break;
}
case kTypedDataFloat32ArrayCid: {
@ -6703,110 +6690,10 @@ void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
LocationSummary* BoxUint32Instr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 1;
LocationSummary* summary = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
summary->set_in(0, Location::RequiresRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
void BoxUint32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register out = locs()->out(0).reg();
Register temp = locs()->temp(0).reg();
ASSERT(value != out);
Label not_smi, done;
// TODO(johnmccutchan): Use range information to fast path smi / mint boxing.
// Test if this value is <= kSmiMax.
__ CompareImmediate(value, kSmiMax);
__ b(&not_smi, HI);
// Smi.
__ mov(out, Operand(value));
__ SmiTag(out);
__ b(&done);
__ Bind(&not_smi);
// Allocate a mint.
BoxAllocationSlowPath::Allocate(
compiler,
this,
compiler->mint_class(),
out,
temp);
// Copy low word into mint.
__ StoreToOffset(kWord,
value,
out,
Mint::value_offset() - kHeapObjectTag);
// Zero high word.
__ eor(temp, temp, Operand(temp));
__ StoreToOffset(kWord,
temp,
out,
Mint::value_offset() - kHeapObjectTag + kWordSize);
__ Bind(&done);
}
LocationSummary* UnboxUint32Instr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps =
((value_cid == kMintCid) || (value_cid == kSmiCid)) ? 0 : 1;
LocationSummary* summary = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
if (kNumTemps > 0) {
summary->set_temp(0, Location::RequiresRegister());
}
summary->set_out(0, Location::RequiresRegister());
return summary;
}
void UnboxUint32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t value_cid = value()->Type()->ToCid();
const Register value = locs()->in(0).reg();
const Register out = locs()->out(0).reg();
ASSERT(value != out);
// TODO(johnmccutchan): Emit better code for constant inputs.
if (value_cid == kMintCid) {
__ LoadFromOffset(kWord, out, value, Mint::value_offset() - kHeapObjectTag);
} else if (value_cid == kSmiCid) {
__ mov(out, Operand(value));
__ SmiUntag(out);
} else {
Register temp = locs()->temp(0).reg();
Label* deopt = compiler->AddDeoptStub(deopt_id_,
ICData::kDeoptUnboxInteger);
Label done;
__ tst(value, Operand(kSmiTagMask));
// Smi case.
__ mov(out, Operand(value), EQ);
__ SmiUntag(out, EQ);
__ b(&done, EQ);
// Mint case.
__ CompareClassId(value, kMintCid, temp);
__ b(deopt, NE);
__ LoadFromOffset(kWord, out, value, Mint::value_offset() - kHeapObjectTag);
__ Bind(&done);
}
}
LocationSummary* BoxInt32Instr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
LocationSummary* BoxIntNInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
ASSERT((from_representation() == kUnboxedInt32) ||
(from_representation() == kUnboxedUint32));
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
LocationSummary* summary = new(isolate) LocationSummary(
@ -6824,16 +6711,23 @@ LocationSummary* BoxInt32Instr::MakeLocationSummary(Isolate* isolate,
}
void BoxInt32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
void BoxIntNInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register out = locs()->out(0).reg();
ASSERT(value != out);
__ Lsl(out, value, 1);
__ SmiTag(out, value);
if (!ValueFitsSmi()) {
Register temp = locs()->temp(0).reg();
Label done;
__ cmp(value, Operand(out, ASR, 1));
if (from_representation() == kUnboxedInt32) {
__ cmp(value, Operand(out, ASR, 1));
} else {
ASSERT(from_representation() == kUnboxedUint32);
// Note: better to test upper bits instead of comparing with
// kSmiMax as kSmiMax does not fit into immediate operand.
__ TestImmediate(value, 0xC0000000);
}
__ b(&done, EQ);
BoxAllocationSlowPath::Allocate(
compiler,
@ -6841,7 +6735,12 @@ void BoxInt32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler->mint_class(),
out,
temp);
__ Asr(temp, value, kBitsPerWord - 1);
if (from_representation() == kUnboxedInt32) {
__ Asr(temp, value, kBitsPerWord - 1);
} else {
ASSERT(from_representation() == kUnboxedUint32);
__ eor(temp, temp, Operand(temp));
}
__ StoreToOffset(kWord,
value,
out,
@ -6855,24 +6754,6 @@ void BoxInt32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
LocationSummary* UnboxInt32Instr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps =
((value_cid == kMintCid) || (value_cid == kSmiCid)) ? 0 : 1;
LocationSummary* summary = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
if (kNumTemps > 0) {
summary->set_temp(0, Location::RequiresRegister());
}
summary->set_out(0, Location::RequiresRegister());
return summary;
}
static void LoadInt32FromMint(FlowGraphCompiler* compiler,
Register mint,
Register result,
@ -6893,41 +6774,44 @@ static void LoadInt32FromMint(FlowGraphCompiler* compiler,
}
void UnboxInt32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* UnboxIntNInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
ASSERT((representation() == kUnboxedInt32) ||
(representation() == kUnboxedUint32));
ASSERT((representation() != kUnboxedUint32) || is_truncating());
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = CanDeoptimize() ? 1 : 0;
LocationSummary* summary = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
if (kNumTemps > 0) {
summary->set_temp(0, Location::RequiresRegister());
}
summary->set_out(0, Location::RequiresRegister());
return summary;
}
void UnboxIntNInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t value_cid = value()->Type()->ToCid();
const Register value = locs()->in(0).reg();
const Register out = locs()->out(0).reg();
const Register temp = CanDeoptimize() ? locs()->temp(0).reg() : kNoRegister;
Label* deopt = CanDeoptimize() ?
compiler->AddDeoptStub(deopt_id_, ICData::kDeoptUnboxInteger) : NULL;
Label* out_of_range = !is_truncating() ? deopt : NULL;
ASSERT(value != out);
if (value_cid == kMintCid) {
Register temp = CanDeoptimize() ? locs()->temp(0).reg() : kNoRegister;
Label* deopt = CanDeoptimize() ?
compiler->AddDeoptStub(deopt_id_, ICData::kDeoptUnboxInteger) : NULL;
LoadInt32FromMint(compiler,
value,
out,
temp,
deopt);
} else if (value_cid == kSmiCid) {
if (value_cid == kSmiCid) {
__ SmiUntag(out, value);
} else if (value_cid == kMintCid) {
LoadInt32FromMint(compiler, value, out, temp, out_of_range);
} else {
Register temp = locs()->temp(0).reg();
Label* deopt = compiler->AddDeoptStub(deopt_id_,
ICData::kDeoptUnboxInteger);
Label done;
__ tst(value, Operand(kSmiTagMask));
// Smi case.
__ mov(out, Operand(value), EQ);
__ SmiUntag(out, EQ);
__ b(&done, EQ);
// Mint case.
__ SmiUntag(out, value, &done);
__ CompareClassId(value, kMintCid, temp);
__ b(deopt, NE);
LoadInt32FromMint(compiler,
value,
out,
temp,
deopt);
LoadInt32FromMint(compiler, value, out, temp, out_of_range);
__ Bind(&done);
}
}

View file

@ -295,22 +295,35 @@ void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t kNumInputs = 0;
const Location out = (representation_ == kUnboxedInt32) ?
Location::RequiresRegister() : Location::RequiresFpuRegister();
return LocationSummary::Make(isolate,
kNumInputs,
Location::RequiresFpuRegister(),
out,
LocationSummary::kNoCall);
}
void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(representation_ == kUnboxedDouble);
if (!locs()->out(0).IsInvalid()) {
if (Utils::DoublesBitEqual(Double::Cast(value()).value(), 0.0)) {
const VRegister dst = locs()->out(0).fpu_reg();
__ veor(dst, dst, dst);
} else {
const VRegister dst = locs()->out(0).fpu_reg();
__ LoadDImmediate(dst, Double::Cast(value()).value(), PP);
switch (representation_) {
case kUnboxedDouble:
if (Utils::DoublesBitEqual(Double::Cast(value()).value(), 0.0)) {
const VRegister dst = locs()->out(0).fpu_reg();
__ veor(dst, dst, dst);
} else {
const VRegister dst = locs()->out(0).fpu_reg();
__ LoadDImmediate(dst, Double::Cast(value()).value(), PP);
}
break;
case kUnboxedInt32:
__ LoadImmediate(locs()->out(0).reg(),
static_cast<int32_t>(Smi::Cast(value()).Value()),
PP);
break;
default:
UNREACHABLE();
break;
}
}
}
@ -1137,9 +1150,11 @@ Representation StoreIndexedInstr::RequiredInputRepresentation(
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
return kTagged;
case kTypedDataInt32ArrayCid:
return kUnboxedInt32;
case kTypedDataUint32ArrayCid:
return kUnboxedUint32;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
return kUnboxedDouble;
@ -1278,8 +1293,7 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid: {
const Register value = locs()->in(2).reg();
__ SmiUntag(TMP, value);
__ str(TMP, element_address, kUnsignedWord);
__ str(value, element_address, kUnsignedWord);
break;
}
case kTypedDataFloat32ArrayCid: {
@ -5170,38 +5184,140 @@ CompileType UnaryUint32OpInstr::ComputeType() const {
DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryUint32OpInstr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(ShiftUint32OpInstr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(UnaryUint32OpInstr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(BoxInt32Instr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(UnboxInt32Instr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(BoxUint32Instr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(UnboxedIntConverterInstr)
LocationSummary* UnboxUint32Instr::MakeLocationSummary(Isolate* isolate,
LocationSummary* UnboxIntNInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::SameAsFirstInput());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
void UnboxUint32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
void UnboxIntNInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t value_cid = value()->Type()->ToCid();
const Register out = locs()->out(0).reg();
const Register value = locs()->in(0).reg();
ASSERT(value == locs()->out(0).reg());
Label* deopt = CanDeoptimize() ?
compiler->AddDeoptStub(deopt_id_, ICData::kDeoptUnboxInteger) : NULL;
if (value_cid == kSmiCid) {
__ SmiUntag(value);
__ SmiUntag(out, value);
} else if (value_cid == kMintCid) {
__ LoadFieldFromOffset(out, value, Mint::value_offset(), PP);
} else {
Label* deopt = compiler->AddDeoptStub(deopt_id_,
ICData::kDeoptUnboxInteger);
__ tsti(value, kSmiTagMask);
Label done;
__ SmiUntag(out, value);
__ TestImmediate(value, kSmiTagMask, PP);
__ b(&done, EQ);
__ CompareClassId(value, kMintCid, PP);
__ b(deopt, NE);
__ SmiUntag(value);
__ LoadFieldFromOffset(out, value, Mint::value_offset(), PP);
__ Bind(&done);
}
// TODO(vegorov): as it is implemented right now truncating unboxing would
// leave "garbage" in the higher word.
if (!is_truncating() && (deopt != NULL)) {
ASSERT(representation() == kUnboxedInt32);
__ cmp(out, Operand(out, SXTW, 0));
__ b(deopt, NE);
}
}
LocationSummary* BoxIntNInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
ASSERT((from_representation() == kUnboxedInt32) ||
(from_representation() == kUnboxedUint32));
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new(isolate) LocationSummary(
isolate,
kNumInputs,
kNumTemps,
LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
void BoxIntNInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register out = locs()->out(0).reg();
ASSERT(value != out);
ASSERT(kSmiTagSize == 1);
// TODO(vegorov) implement and use UBFM/SBFM for this.
__ Lsl(out, value, 32);
if (from_representation() == kUnboxedInt32) {
__ Asr(out, out, 32 - kSmiTagSize);
} else {
ASSERT(from_representation() == kUnboxedUint32);
__ Lsr(out, out, 32 - kSmiTagSize);
}
}
LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
if (from() == kUnboxedMint) {
UNREACHABLE();
} else if (to() == kUnboxedMint) {
UNREACHABLE();
} else {
ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32));
ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32));
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
}
return summary;
}
void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
const Register value = locs()->in(0).reg();
const Register out = locs()->out(0).reg();
// Representations are bitwise equivalent but we want to normalize
// upperbits for safety reasons.
// TODO(vegorov) if we ensure that we never use kDoubleWord size
// with it then we could avoid this.
// TODO(vegorov) implement and use UBFM for zero extension.
__ Lsl(out, value, 32);
__ Lsr(out, out, 32);
} else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
// Representations are bitwise equivalent.
// TODO(vegorov) if we ensure that we never use kDoubleWord size
// with it then we could avoid this.
// TODO(vegorov) implement and use SBFM for sign extension.
const Register value = locs()->in(0).reg();
const Register out = locs()->out(0).reg();
__ Lsl(out, value, 32);
__ Asr(out, out, 32);
if (CanDeoptimize()) {
Label* deopt =
compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
__ cmp(out, Operand(value, UXTW, 0));
__ b(deopt, NE);
}
} else if (from() == kUnboxedMint) {
UNREACHABLE();
} else if (to() == kUnboxedMint) {
ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32));
UNREACHABLE();
} else {
UNREACHABLE();
}
}

View file

@ -168,10 +168,16 @@ void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t kNumInputs = 0;
const intptr_t kNumTemps = (constant_address() == 0) ? 1 : 0;
const intptr_t kNumTemps =
(constant_address() == 0) && (representation() != kUnboxedInt32) ? 1 : 0;
LocationSummary* locs = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_out(0, Location::RequiresFpuRegister());
if (representation() == kUnboxedDouble) {
locs->set_out(0, Location::RequiresFpuRegister());
} else {
ASSERT(representation() == kUnboxedInt32);
locs->set_out(0, Location::RequiresRegister());
}
if (kNumTemps == 1) {
locs->set_temp(0, Location::RequiresRegister());
}
@ -1228,8 +1234,9 @@ Representation StoreIndexedInstr::RequiredInputRepresentation(
case kTypedDataUint16ArrayCid:
return kTagged;
case kTypedDataInt32ArrayCid:
return kUnboxedInt32;
case kTypedDataUint32ArrayCid:
return value()->IsSmiValue() ? kTagged : kUnboxedMint;
return kUnboxedUint32;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
return kUnboxedDouble;
@ -1286,16 +1293,7 @@ LocationSummary* StoreIndexedInstr::MakeLocationSummary(Isolate* isolate,
break;
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
// For smis, use a writable register because the value must be untagged
// before storing. Mints are stored in registers pairs.
if (value()->IsSmiValue()) {
locs->set_in(2, Location::WritableRegister());
} else {
// We only move the lower 32-bits so we don't care where the high bits
// are located.
locs->set_in(2, Location::Pair(Location::RequiresRegister(),
Location::Any()));
}
locs->set_in(2, Location::RequiresRegister());
break;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
@ -1396,17 +1394,7 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
if (value()->IsSmiValue()) {
ASSERT(RequiredInputRepresentation(2) == kTagged);
Register value = locs()->in(2).reg();
__ SmiUntag(value);
__ movl(element_address, value);
} else {
ASSERT(RequiredInputRepresentation(2) == kUnboxedMint);
PairLocation* value_pair = locs()->in(2).AsPairLocation();
Register value1 = value_pair->At(0).reg();
__ movl(element_address, value1);
}
__ movl(element_address, locs()->in(2).reg());
break;
case kTypedDataFloat32ArrayCid:
__ movss(element_address, locs()->in(2).fpu_reg());
@ -6408,54 +6396,7 @@ void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
LocationSummary* BoxUint32Instr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new(isolate) LocationSummary(
isolate,
kNumInputs,
kNumTemps,
ValueFitsSmi() ? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath);
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, ValueFitsSmi() ? Location::SameAsFirstInput()
: Location::RequiresRegister());
return summary;
}
void BoxUint32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register out = locs()->out(0).reg();
Label not_smi, done;
if (ValueFitsSmi()) {
ASSERT(value == out);
__ SmiTag(value);
} else {
ASSERT(value != out);
__ cmpl(value, Immediate(kSmiMax));
__ j(ABOVE, &not_smi);
// Smi.
__ movl(out, value);
__ SmiTag(out);
__ jmp(&done);
__ Bind(&not_smi);
// Allocate a mint.
BoxAllocationSlowPath::Allocate(
compiler, this, compiler->mint_class(), out, kNoRegister);
// Copy low word into mint.
__ movl(FieldAddress(out, Mint::value_offset()), value);
// Zero high word.
__ movl(FieldAddress(out, Mint::value_offset() + kWordSize), Immediate(0));
__ Bind(&done);
}
}
LocationSummary* BoxInt32Instr::MakeLocationSummary(Isolate* isolate,
LocationSummary* BoxIntNInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
@ -6463,25 +6404,32 @@ LocationSummary* BoxInt32Instr::MakeLocationSummary(Isolate* isolate,
isolate, kNumInputs, kNumTemps,
ValueFitsSmi() ? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath);
summary->set_in(0, ValueFitsSmi() ? Location::RequiresRegister()
: Location::WritableRegister());
const bool needs_writable_input = ValueFitsSmi() ||
(from_representation() == kUnboxedUint32);
summary->set_in(0, needs_writable_input ? Location::RequiresRegister()
: Location::WritableRegister());
summary->set_out(0, ValueFitsSmi() ? Location::SameAsFirstInput()
: Location::RequiresRegister());
return summary;
}
void BoxInt32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register out = locs()->out(0).reg();
void BoxIntNInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register value = locs()->in(0).reg();
const Register out = locs()->out(0).reg();
if (out != value) {
__ movl(out, value);
}
__ shll(out, Immediate(1));
__ MoveRegister(out, value);
__ shll(out, Immediate(kSmiTagSize));
if (!ValueFitsSmi()) {
Label done;
__ j(NO_OVERFLOW, &done);
ASSERT(value != out);
if (from_representation() == kUnboxedInt32) {
__ j(NO_OVERFLOW, &done);
} else {
__ testl(value, Immediate(0xC0000000));
__ j(ZERO, &done);
}
// Allocate a mint.
// Value input is writable register and has to be manually preserved
// on the slow path.
@ -6489,126 +6437,102 @@ void BoxInt32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
BoxAllocationSlowPath::Allocate(
compiler, this, compiler->mint_class(), out, kNoRegister);
__ movl(FieldAddress(out, Mint::value_offset()), value);
__ sarl(value, Immediate(31)); // Sign extend.
__ movl(FieldAddress(out, Mint::value_offset() + kWordSize), value);
if (from_representation() == kUnboxedInt32) {
__ sarl(value, Immediate(31)); // Sign extend.
__ movl(FieldAddress(out, Mint::value_offset() + kWordSize), value);
} else {
__ movl(FieldAddress(out, Mint::value_offset() + kWordSize),
Immediate(0));
}
__ Bind(&done);
}
}
LocationSummary* UnboxUint32Instr::MakeLocationSummary(Isolate* isolate,
LocationSummary* UnboxIntNInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps =
((value_cid == kMintCid) || (value_cid == kSmiCid)) ? 0 : 1;
intptr_t kNumTemps = 0;
if (CanDeoptimize()) {
if ((value_cid != kSmiCid) &&
(value_cid != kMintCid) &&
!is_truncating()) {
kNumTemps = 2;
} else {
kNumTemps = 1;
}
}
LocationSummary* summary = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
if (kNumTemps > 0) {
summary->set_temp(0, Location::RequiresRegister());
for (int i = 0; i < kNumTemps; i++) {
summary->set_temp(i, Location::RequiresRegister());
}
summary->set_out(0, Location::SameAsFirstInput());
return summary;
}
void UnboxUint32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t value_cid = value()->Type()->ToCid();
const Register value = locs()->in(0).reg();
ASSERT(value == locs()->out(0).reg());
// TODO(johnmccutchan): Emit better code for constant inputs.
if (value_cid == kMintCid) {
__ movl(value, FieldAddress(value, Mint::value_offset()));
} else if (value_cid == kSmiCid) {
__ SmiUntag(value);
} else {
Register temp = locs()->temp(0).reg();
Label* deopt = compiler->AddDeoptStub(deopt_id_,
ICData::kDeoptUnboxInteger);
Label is_smi, done;
__ testl(value, Immediate(kSmiTagMask));
__ j(ZERO, &is_smi);
__ CompareClassId(value, kMintCid, temp);
__ j(NOT_EQUAL, deopt);
__ movl(value, FieldAddress(value, Mint::value_offset()));
__ jmp(&done);
__ Bind(&is_smi);
__ SmiUntag(value);
__ Bind(&done);
}
}
LocationSummary* UnboxInt32Instr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = CanDeoptimize() ? 1 : 0;
LocationSummary* summary = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
if (kNumTemps > 0) {
summary->set_temp(0, Location::RequiresRegister());
}
summary->set_out(0, (value_cid == kSmiCid) ? Location::SameAsFirstInput()
: Location::RequiresRegister());
summary->set_out(0, ((value_cid == kSmiCid) || (value_cid != kMintCid)) ?
Location::SameAsFirstInput() : Location::RequiresRegister());
return summary;
}
static void LoadInt32FromMint(FlowGraphCompiler* compiler,
Register mint,
Register result,
const Address& lo,
const Address& hi,
Register temp,
Label* deopt) {
__ movl(result, FieldAddress(mint, Mint::value_offset()));
__ movl(result, lo);
if (deopt != NULL) {
ASSERT(temp != result);
__ movl(temp, result);
__ sarl(temp, Immediate(31));
__ cmpl(temp, FieldAddress(mint, Mint::value_offset() + kWordSize));
__ cmpl(temp, hi);
__ j(NOT_EQUAL, deopt);
}
}
void UnboxInt32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
void UnboxIntNInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t value_cid = value()->Type()->ToCid();
const Register value = locs()->in(0).reg();
Register value = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
const Register temp = CanDeoptimize() ? locs()->temp(0).reg() : kNoRegister;
Label* deopt = CanDeoptimize() ?
compiler->AddDeoptStub(deopt_id_, ICData::kDeoptUnboxInteger) : NULL;
Label* out_of_range = !is_truncating() ? deopt : NULL;
// TODO(johnmccutchan): Emit better code for constant inputs.
if (value_cid == kMintCid) {
Register temp = CanDeoptimize() ? locs()->temp(0).reg() : kNoRegister;
Label* deopt = CanDeoptimize() ?
compiler->AddDeoptStub(deopt_id_, ICData::kDeoptUnboxInteger) : NULL;
LoadInt32FromMint(compiler,
value,
result,
temp,
deopt);
} else if (value_cid == kSmiCid) {
const intptr_t lo_offset = Mint::value_offset();
const intptr_t hi_offset = Mint::value_offset() + kWordSize;
if (value_cid == kSmiCid) {
ASSERT(value == result);
__ SmiUntag(value);
} else {
Register temp = locs()->temp(0).reg();
Label* deopt = compiler->AddDeoptStub(deopt_id_,
ICData::kDeoptUnboxInteger);
Label is_smi, done;
__ testl(value, Immediate(kSmiTagMask));
__ j(ZERO, &is_smi);
__ CompareClassId(value, kMintCid, temp);
__ j(NOT_EQUAL, deopt);
} else if (value_cid == kMintCid) {
ASSERT((value != result) || (out_of_range == NULL));
LoadInt32FromMint(compiler,
value,
result,
FieldAddress(value, lo_offset),
FieldAddress(value, hi_offset),
temp,
deopt);
__ movl(value, FieldAddress(value, Mint::value_offset()));
__ jmp(&done);
__ Bind(&is_smi);
__ SmiUntag(value);
out_of_range);
} else {
ASSERT(value == result);
Label done;
__ SmiUntagOrCheckClass(value, kMintCid, temp, &done);
__ j(NOT_EQUAL, deopt);
if (out_of_range != NULL) {
Register value_temp = locs()->temp(1).reg();
__ movl(value_temp, value);
value = value_temp;
}
LoadInt32FromMint(compiler,
result,
Address(value, TIMES_2, lo_offset),
Address(value, TIMES_2, hi_offset),
temp,
out_of_range);
__ Bind(&done);
}
}

View file

@ -323,25 +323,44 @@ void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t kNumInputs = 0;
const intptr_t kNumTemps = 1;
const intptr_t kNumTemps = (representation_ == kUnboxedInt32) ? 0 : 1;
LocationSummary* locs = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_out(0, Location::RequiresFpuRegister());
locs->set_temp(0, Location::RequiresRegister());
if (representation_ == kUnboxedInt32) {
locs->set_out(0, Location::RequiresRegister());
} else {
ASSERT(representation_ == kUnboxedDouble);
locs->set_out(0, Location::RequiresFpuRegister());
}
if (kNumTemps > 0) {
locs->set_temp(0, Location::RequiresRegister());
}
return locs;
}
void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(representation_ == kUnboxedDouble);
// The register allocator drops constant definitions that have no uses.
if (!locs()->out(0).IsInvalid()) {
ASSERT(value().IsDouble());
const Register const_value = locs()->temp(0).reg();
const DRegister result = locs()->out(0).fpu_reg();
__ LoadObject(const_value, value());
__ LoadDFromOffset(result, const_value,
Double::value_offset() - kHeapObjectTag);
switch (representation_) {
case kUnboxedDouble: {
ASSERT(value().IsDouble());
const Register const_value = locs()->temp(0).reg();
const DRegister result = locs()->out(0).fpu_reg();
__ LoadObject(const_value, value());
__ LoadDFromOffset(result, const_value,
Double::value_offset() - kHeapObjectTag);
break;
}
case kUnboxedInt32:
__ LoadImmediate(locs()->out(0).reg(),
Smi::Cast(value()).Value());
break;
default:
UNREACHABLE();
}
}
}
@ -1259,8 +1278,9 @@ Representation StoreIndexedInstr::RequiredInputRepresentation(
case kTypedDataUint16ArrayCid:
return kTagged;
case kTypedDataInt32ArrayCid:
return kUnboxedInt32;
case kTypedDataUint32ArrayCid:
return value()->IsSmiValue() ? kTagged : kUnboxedMint;
return kUnboxedUint32;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
return kUnboxedDouble;
@ -1396,14 +1416,7 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid: {
if (value()->IsSmiValue()) {
ASSERT(RequiredInputRepresentation(2) == kTagged);
Register value = locs()->in(2).reg();
__ SmiUntag(TMP, value);
__ sw(TMP, element_address);
} else {
UNIMPLEMENTED();
}
__ sw(locs()->in(2).reg(), element_address);
break;
}
case kTypedDataFloat32ArrayCid: {
@ -4582,12 +4595,166 @@ CompileType UnaryUint32OpInstr::ComputeType() const {
DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryUint32OpInstr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(ShiftUint32OpInstr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(UnaryUint32OpInstr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(BoxInt32Instr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(UnboxInt32Instr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(BoxUint32Instr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(UnboxUint32Instr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(UnboxedIntConverterInstr)
LocationSummary* BoxIntNInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
ASSERT((from_representation() == kUnboxedInt32) ||
(from_representation() == kUnboxedUint32));
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 1;
LocationSummary* summary = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
summary->set_in(0, Location::RequiresRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
void BoxIntNInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register out = locs()->out(0).reg();
ASSERT(value != out);
Label done;
__ SmiTag(out, value);
if (!ValueFitsSmi()) {
Register temp = locs()->temp(0).reg();
if (from_representation() == kUnboxedInt32) {
__ SmiUntag(CMPRES1, out);
__ BranchEqual(CMPRES1, value, &done);
} else {
ASSERT(from_representation() == kUnboxedUint32);
__ AndImmediate(CMPRES1, value, 0xC0000000);
__ BranchEqual(CMPRES1, ZR, &done);
}
BoxAllocationSlowPath::Allocate(
compiler,
this,
compiler->mint_class(),
out,
temp);
Register hi;
if (from_representation() == kUnboxedInt32) {
hi = temp;
__ sra(hi, value, kBitsPerWord - 1);
} else {
ASSERT(from_representation() == kUnboxedUint32);
hi = ZR;
}
__ StoreToOffset(value,
out,
Mint::value_offset() - kHeapObjectTag);
__ StoreToOffset(hi,
out,
Mint::value_offset() - kHeapObjectTag + kWordSize);
__ Bind(&done);
}
}
LocationSummary* UnboxIntNInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
ASSERT((representation() == kUnboxedInt32) ||
(representation() == kUnboxedUint32));
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
static void LoadInt32FromMint(FlowGraphCompiler* compiler,
Register mint,
Register result,
Label* deopt) {
__ LoadFromOffset(result,
mint,
Mint::value_offset() - kHeapObjectTag);
if (deopt != NULL) {
__ LoadFromOffset(CMPRES1,
mint,
Mint::value_offset() - kHeapObjectTag + kWordSize);
__ sra(CMPRES2, result, kBitsPerWord - 1);
__ BranchNotEqual(CMPRES1, CMPRES2, deopt);
}
}
void UnboxIntNInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t value_cid = value()->Type()->ToCid();
const Register value = locs()->in(0).reg();
const Register out = locs()->out(0).reg();
Label* deopt = CanDeoptimize() ?
compiler->AddDeoptStub(deopt_id_, ICData::kDeoptUnboxInteger) : NULL;
Label* out_of_range = !is_truncating() ? deopt : NULL;
ASSERT(value != out);
if (value_cid == kSmiCid) {
__ SmiUntag(out, value);
} else if (value_cid == kMintCid) {
LoadInt32FromMint(compiler, value, out, out_of_range);
} else {
Label done;
__ SmiUntag(out, value);
__ andi(CMPRES1, value, Immediate(kSmiTagMask));
__ beq(CMPRES1, ZR, &done);
__ LoadClassId(CMPRES1, value);
__ BranchNotEqual(CMPRES1, kMintCid, deopt);
LoadInt32FromMint(compiler, value, out, out_of_range);
__ Bind(&done);
}
}
LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
if (from() == kUnboxedMint) {
UNREACHABLE();
} else if (to() == kUnboxedMint) {
UNREACHABLE();
} else {
ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32));
ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32));
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::SameAsFirstInput());
}
return summary;
}
void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
const Register out = locs()->out(0).reg();
// Representations are bitwise equivalent.
ASSERT(out == locs()->in(0).reg());
} else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
const Register out = locs()->out(0).reg();
// Representations are bitwise equivalent.
ASSERT(out == locs()->in(0).reg());
if (CanDeoptimize()) {
Label* deopt =
compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
__ BranchSignedLess(out, 0, deopt);
}
} else if (from() == kUnboxedMint) {
UNREACHABLE();
} else if (to() == kUnboxedMint) {
ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
UNREACHABLE();
} else {
UNREACHABLE();
}
}
LocationSummary* ThrowInstr::MakeLocationSummary(Isolate* isolate,

View file

@ -254,21 +254,41 @@ LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Isolate* isolate,
const intptr_t kNumTemps = 0;
LocationSummary* locs = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_out(0, Location::RequiresFpuRegister());
switch (representation()) {
case kUnboxedDouble:
locs->set_out(0, Location::RequiresFpuRegister());
break;
case kUnboxedInt32:
locs->set_out(0, Location::RequiresRegister());
break;
default:
UNREACHABLE();
break;
}
return locs;
}
void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(representation_ == kUnboxedDouble);
// The register allocator drops constant definitions that have no uses.
if (!locs()->out(0).IsInvalid()) {
XmmRegister result = locs()->out(0).fpu_reg();
if (Utils::DoublesBitEqual(Double::Cast(value()).value(), 0.0)) {
__ xorps(result, result);
} else {
__ LoadObject(TMP, value(), PP);
__ movsd(result, FieldAddress(TMP, Double::value_offset()));
switch (representation()) {
case kUnboxedDouble: {
XmmRegister result = locs()->out(0).fpu_reg();
if (Utils::DoublesBitEqual(Double::Cast(value()).value(), 0.0)) {
__ xorps(result, result);
} else {
__ LoadObject(TMP, value(), PP);
__ movsd(result, FieldAddress(TMP, Double::value_offset()));
}
break;
}
case kUnboxedInt32:
__ movl(locs()->out(0).reg(),
Immediate(static_cast<int32_t>(Smi::Cast(value()).Value())));
break;
default:
UNREACHABLE();
}
}
}
@ -1089,9 +1109,11 @@ Representation StoreIndexedInstr::RequiredInputRepresentation(
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
return kTagged;
case kTypedDataInt32ArrayCid:
return kUnboxedInt32;
case kTypedDataUint32ArrayCid:
return kUnboxedUint32;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
return kUnboxedDouble;
@ -1249,7 +1271,6 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid: {
Register value = locs()->in(2).reg();
__ SmiUntag(value);
__ movl(element_address, value);
break;
}
@ -5534,38 +5555,137 @@ CompileType UnaryUint32OpInstr::ComputeType() const {
DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryUint32OpInstr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(ShiftUint32OpInstr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(UnaryUint32OpInstr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(BoxInt32Instr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(UnboxInt32Instr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(BoxUint32Instr)
DEFINE_UNIMPLEMENTED_INSTRUCTION(UnboxedIntConverterInstr)
LocationSummary* UnboxUint32Instr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
LocationSummary* UnboxIntNInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
const intptr_t kNumTemps = (!is_truncating() && CanDeoptimize()) ? 1 : 0;
LocationSummary* summary = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::SameAsFirstInput());
if (kNumTemps > 0) {
summary->set_temp(0, Location::RequiresRegister());
}
return summary;
}
void UnboxUint32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
void UnboxIntNInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t value_cid = value()->Type()->ToCid();
const Register value = locs()->in(0).reg();
Label* deopt = CanDeoptimize() ?
compiler->AddDeoptStub(deopt_id_, ICData::kDeoptUnboxInteger) : NULL;
ASSERT(value == locs()->out(0).reg());
if (value_cid == kSmiCid) {
__ SmiUntag(value);
} else if (value_cid == kMintCid) {
__ movq(value, FieldAddress(value, Mint::value_offset()));
} else {
Label* deopt = compiler->AddDeoptStub(deopt_id_,
ICData::kDeoptUnboxInteger);
__ testq(value, Immediate(kSmiTagMask));
__ j(NOT_ZERO, deopt);
__ SmiUntag(value);
Label done;
// Optimistically untag value.
__ SmiUntagOrCheckClass(value, kMintCid, &done);
__ j(NOT_EQUAL, deopt);
// Undo untagging by multiplying value with 2.
__ movq(value, Address(value, TIMES_2, Mint::value_offset()));
__ Bind(&done);
}
// TODO(vegorov): as it is implemented right now truncating unboxing would
// leave "garbage" in the higher word.
if (!is_truncating() && (deopt != NULL)) {
ASSERT(representation() == kUnboxedInt32);
Register temp = locs()->temp(0).reg();
__ movsxd(temp, value);
__ cmpq(temp, value);
__ j(NOT_EQUAL, deopt);
}
}
LocationSummary* BoxIntNInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
ASSERT((from_representation() == kUnboxedInt32) ||
(from_representation() == kUnboxedUint32));
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new(isolate) LocationSummary(
isolate,
kNumInputs,
kNumTemps,
LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::RequiresRegister());
return summary;
}
void BoxIntNInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register value = locs()->in(0).reg();
const Register out = locs()->out(0).reg();
ASSERT(value != out);
ASSERT(kSmiTagSize == 1);
if (from_representation() == kUnboxedInt32) {
__ movsxd(out, value);
} else {
ASSERT(from_representation() == kUnboxedUint32);
__ movl(out, value);
}
__ SmiTag(out);
}
LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Isolate* isolate,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new(isolate) LocationSummary(
isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
if (from() == kUnboxedMint) {
UNREACHABLE();
} else if (to() == kUnboxedMint) {
UNREACHABLE();
} else {
ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32));
ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32));
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::SameAsFirstInput());
}
return summary;
}
void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
const Register value = locs()->in(0).reg();
const Register out = locs()->out(0).reg();
// Representations are bitwise equivalent but we want to normalize
// upperbits for safety reasons.
// TODO(vegorov) if we ensure that we never use upperbits we could
// avoid this.
__ movl(out, value);
} else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
// Representations are bitwise equivalent.
const Register value = locs()->in(0).reg();
const Register out = locs()->out(0).reg();
__ movsxd(out, value);
if (CanDeoptimize()) {
Label* deopt =
compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
__ testl(out, out);
__ j(NEGATIVE, deopt);
}
} else if (from() == kUnboxedMint) {
UNREACHABLE();
} else if (to() == kUnboxedMint) {
ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32));
UNREACHABLE();
} else {
UNREACHABLE();
}
}

View file

@ -2,19 +2,6 @@
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Failing on vm-linux-release-optcounter-threshold-be only. There seems to
# be no way to disable only that configuration though.
[ $runtime == vm && $system == linux && $mode == release ]
LibTest/core/Duration/operator_lt_A01_t01: Skip # Issue 20875
LibTest/core/Duration/operator_lte_A01_t01: Skip # Issue 20875
LibTest/core/Duration/operator_gte_A01_t01: Skip # Issue 20875
LibTest/core/Duration/operator_mult_A01_t01: Skip # Issue 20875
LibTest/core/Duration/operator_gt_A01_t01: Skip # Issue 20875
LibTest/core/Duration/operator_plus_A01_t01: Skip # Issue 20875
LibTest/core/Duration/operator_eq_A01_t01: Skip # Issue 20875
LibTest/core/Duration/operator_minus_A01_t01: Skip # Issue 20875
LibTest/core/Duration/operator_div_A01_t01: Skip # Issue 20875
[ $compiler == none && ($runtime == vm || $runtime == dartium || $runtime == ContentShellOnAndroid) ]