[vm, compiler] Implement unboxed SIMD for RISC-V via lowering.

TEST=ci
Change-Id: Ice2ec0847ee43ff9b8c5859ba15dbbeee48ba36e
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/250943
Reviewed-by: Alexander Markov <alexmarkov@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Slava Egorov <vegorov@google.com>
This commit is contained in:
Ryan Macnak 2022-08-02 20:24:54 +00:00 committed by Commit Bot
parent 2bedf1e55d
commit 460bd7a03a
35 changed files with 1941 additions and 747 deletions

View file

@ -662,6 +662,20 @@ void Assembler::LoadImmediate(Register reg, int64_t imm) {
}
}
void Assembler::LoadSImmediate(VRegister vd, float imms) {
int32_t imm32 = bit_cast<int32_t, float>(imms);
if (imm32 == 0) {
veor(vd, vd, vd);
} else if (constant_pool_allowed()) {
intptr_t index = object_pool_builder().FindImmediate(imm32);
intptr_t offset = target::ObjectPool::element_offset(index);
LoadSFromOffset(vd, PP, offset);
} else {
LoadImmediate(TMP, imm32);
fmovsr(vd, TMP);
}
}
void Assembler::LoadDImmediate(VRegister vd, double immd) {
if (fmovdi(vd, immd)) return;

View file

@ -2066,6 +2066,7 @@ class Assembler : public AssemblerBase {
LoadImmediate(reg, imm.value());
}
void LoadSImmediate(VRegister reg, float immd);
void LoadDImmediate(VRegister reg, double immd);
void LoadQImmediate(VRegister reg, simd128_value_t immq);

View file

@ -2199,6 +2199,13 @@ void Assembler::IncrementSmiField(const Address& dest, int32_t increment) {
addl(dest, inc_imm);
}
void Assembler::LoadSImmediate(XmmRegister dst, float value) {
int32_t constant = bit_cast<int32_t, float>(value);
pushl(Immediate(constant));
movss(dst, Address(ESP, 0));
addl(ESP, Immediate(target::kWordSize));
}
void Assembler::LoadDImmediate(XmmRegister dst, double value) {
// TODO(5410843): Need to have a code constants table.
int64_t constant = bit_cast<int64_t, double>(value);

View file

@ -759,6 +759,7 @@ class Assembler : public AssemblerBase {
LoadImmediate(reg, immediate.value());
}
void LoadSImmediate(XmmRegister dst, float value);
void LoadDImmediate(XmmRegister dst, double value);
void Drop(intptr_t stack_elements);

View file

@ -3351,6 +3351,18 @@ void Assembler::LoadImmediate(Register reg, intx_t imm) {
}
}
void Assembler::LoadSImmediate(FRegister reg, float imms) {
int32_t imm = bit_cast<int32_t, float>(imms);
if (imm == 0) {
fmvwx(reg, ZR); // bit_cast uint32_t -> float
} else {
ASSERT(constant_pool_allowed());
intptr_t index = object_pool_builder().FindImmediate(imm);
intptr_t offset = target::ObjectPool::element_offset(index);
LoadSFromOffset(reg, PP, offset);
}
}
void Assembler::LoadDImmediate(FRegister reg, double immd) {
int64_t imm = bit_cast<int64_t, double>(immd);
if (imm == 0) {

View file

@ -432,6 +432,8 @@ class MicroAssembler : public AssemblerBase {
void feqs(Register rd, FRegister rs1, FRegister rs2);
void flts(Register rd, FRegister rs1, FRegister rs2);
void fles(Register rd, FRegister rs1, FRegister rs2);
void fgts(Register rd, FRegister rs1, FRegister rs2) { flts(rd, rs2, rs1); }
void fges(Register rd, FRegister rs1, FRegister rs2) { fles(rd, rs2, rs1); }
void fclasss(Register rd, FRegister rs1);
// int32_t <- float
void fcvtws(Register rd, FRegister rs1, RoundingMode rounding = RNE);
@ -517,6 +519,8 @@ class MicroAssembler : public AssemblerBase {
void feqd(Register rd, FRegister rs1, FRegister rs2);
void fltd(Register rd, FRegister rs1, FRegister rs2);
void fled(Register rd, FRegister rs1, FRegister rs2);
void fgtd(Register rd, FRegister rs1, FRegister rs2) { fltd(rd, rs2, rs1); }
void fged(Register rd, FRegister rs1, FRegister rs2) { fled(rd, rs2, rs1); }
void fclassd(Register rd, FRegister rs1);
// int32_t <- double
void fcvtwd(Register rd, FRegister rs1, RoundingMode rounding = RNE);
@ -1018,6 +1022,9 @@ class Assembler : public MicroAssembler {
Register index);
void LoadSFromOffset(FRegister dest, Register base, int32_t offset);
void LoadDFromOffset(FRegister dest, Register base, int32_t offset);
void LoadSFieldFromOffset(FRegister dest, Register base, int32_t offset) {
LoadSFromOffset(dest, base, offset - kHeapObjectTag);
}
void LoadDFieldFromOffset(FRegister dest, Register base, int32_t offset) {
LoadDFromOffset(dest, base, offset - kHeapObjectTag);
}
@ -1043,6 +1050,9 @@ class Assembler : public MicroAssembler {
sx(ZR, address);
}
void StoreSToOffset(FRegister src, Register base, int32_t offset);
void StoreSFieldToOffset(FRegister src, Register base, int32_t offset) {
StoreSToOffset(src, base, offset - kHeapObjectTag);
}
void StoreDToOffset(FRegister src, Register base, int32_t offset);
void StoreDFieldToOffset(FRegister src, Register base, int32_t offset) {
StoreDToOffset(src, base, offset - kHeapObjectTag);
@ -1182,6 +1192,7 @@ class Assembler : public MicroAssembler {
// Note: the function never clobbers TMP, TMP2 scratch registers.
void LoadImmediate(Register reg, intx_t imm);
void LoadSImmediate(FRegister reg, float imms);
void LoadDImmediate(FRegister reg, double immd);
void LoadQImmediate(FRegister reg, simd128_value_t immq);

View file

@ -1394,6 +1394,17 @@ void Assembler::MoveImmediate(const Address& dst, const Immediate& imm) {
}
}
void Assembler::LoadSImmediate(FpuRegister dst, float immediate) {
int32_t bits = bit_cast<int32_t>(immediate);
if (bits == 0) {
xorps(dst, dst);
} else {
intptr_t index = object_pool_builder().FindImmediate(bits);
LoadUnboxedSingle(
dst, PP, target::ObjectPool::element_offset(index) - kHeapObjectTag);
}
}
void Assembler::LoadDImmediate(FpuRegister dst, double immediate) {
int64_t bits = bit_cast<int64_t>(immediate);
if (bits == 0) {

View file

@ -787,6 +787,7 @@ class Assembler : public AssemblerBase {
void LoadImmediate(Register reg, int32_t immediate) {
LoadImmediate(reg, Immediate(immediate));
}
void LoadSImmediate(FpuRegister dst, float immediate);
void LoadDImmediate(FpuRegister dst, double immediate);
void LoadQImmediate(FpuRegister dst, simd128_value_t immediate);
@ -1082,6 +1083,9 @@ class Assembler : public AssemblerBase {
movq(Address(base, offset), src);
}
void LoadUnboxedSingle(FpuRegister dst, Register base, int32_t offset) {
movss(dst, Address(base, offset));
}
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset) {
movsd(dst, Address(base, offset));
}

View file

@ -136,6 +136,8 @@ class BlockBuilder : public ValueObject {
entry_->AsJoinEntry()->InsertPhi(phi);
}
Instruction* last() const { return current_; }
private:
static CompileType* TypeForRepresentation(Representation rep) {
switch (rep) {

View file

@ -872,6 +872,16 @@ void ConstantPropagator::VisitBooleanNegate(BooleanNegateInstr* instr) {
}
}
void ConstantPropagator::VisitBoolToInt(BoolToIntInstr* instr) {
// TODO(riscv)
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitIntToBool(IntToBoolInstr* instr) {
// TODO(riscv)
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitInstanceOf(InstanceOfInstr* instr) {
Definition* def = instr->value()->definition();
const Object& value = def->constant_value();
@ -1216,8 +1226,27 @@ void ConstantPropagator::VisitUnarySmiOp(UnarySmiOpInstr* instr) {
VisitUnaryIntegerOp(instr);
}
static bool IsIntegerOrDouble(const Object& value) {
return value.IsInteger() || value.IsDouble();
}
static double ToDouble(const Object& value) {
return value.IsInteger() ? Integer::Cast(value).AsDoubleValue()
: Double::Cast(value).value();
}
void ConstantPropagator::VisitUnaryDoubleOp(UnaryDoubleOpInstr* instr) {
// TODO(kmillikin): Handle unary operations.
const Object& value = instr->value()->definition()->constant_value();
if (IsUnknown(value)) {
return;
}
if (value.IsDouble()) {
const double result_val = Evaluator::EvaluateUnaryDoubleOp(
ToDouble(value), instr->op_kind(), instr->representation());
const Double& result = Double::ZoneHandle(Double::NewCanonical(result_val));
SetValue(instr, result);
return;
}
SetValue(instr, non_constant_);
}
@ -1273,11 +1302,6 @@ void ConstantPropagator::VisitDoubleToSmi(DoubleToSmiInstr* instr) {
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitDoubleToDouble(DoubleToDoubleInstr* instr) {
// TODO(kmillikin): Handle conversion.
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitDoubleToFloat(DoubleToFloatInstr* instr) {
// TODO(kmillikin): Handle conversion.
SetValue(instr, non_constant_);
@ -1288,6 +1312,11 @@ void ConstantPropagator::VisitFloatToDouble(FloatToDoubleInstr* instr) {
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitFloatCompare(FloatCompareInstr* instr) {
// TODO(riscv)
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitInvokeMathCFunction(
InvokeMathCFunctionInstr* instr) {
// TODO(kmillikin): Handle conversion.
@ -1303,6 +1332,25 @@ void ConstantPropagator::VisitExtractNthOutput(ExtractNthOutputInstr* instr) {
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitUnboxLane(UnboxLaneInstr* instr) {
if (BoxLanesInstr* box = instr->value()->definition()->AsBoxLanes()) {
const Object& value =
box->InputAt(instr->lane())->definition()->constant_value();
if (IsUnknown(value)) {
return;
}
SetValue(instr, value);
return;
}
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitBoxLanes(BoxLanesInstr* instr) {
// TODO(riscv)
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitConstant(ConstantInstr* instr) {
SetValue(instr, instr->value());
}
@ -1321,15 +1369,6 @@ void ConstantPropagator::VisitMaterializeObject(MaterializeObjectInstr* instr) {
UNREACHABLE();
}
static bool IsIntegerOrDouble(const Object& value) {
return value.IsInteger() || value.IsDouble();
}
static double ToDouble(const Object& value) {
return value.IsInteger() ? Integer::Cast(value).AsDoubleValue()
: Double::Cast(value).value();
}
void ConstantPropagator::VisitBinaryDoubleOp(BinaryDoubleOpInstr* instr) {
const Object& left = instr->left()->definition()->constant_value();
const Object& right = instr->right()->definition()->constant_value();
@ -1343,8 +1382,9 @@ void ConstantPropagator::VisitBinaryDoubleOp(BinaryDoubleOpInstr* instr) {
const bool both_are_integers = left.IsInteger() && right.IsInteger();
if (IsIntegerOrDouble(left) && IsIntegerOrDouble(right) &&
!both_are_integers) {
const double result_val = Evaluator::EvaluateDoubleOp(
ToDouble(left), ToDouble(right), instr->op_kind());
const double result_val = Evaluator::EvaluateBinaryDoubleOp(
ToDouble(left), ToDouble(right), instr->op_kind(),
instr->representation());
const Double& result = Double::ZoneHandle(Double::NewCanonical(result_val));
SetValue(instr, result);
return;
@ -1384,11 +1424,6 @@ void ConstantPropagator::VisitSimdOp(SimdOpInstr* instr) {
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitMathUnary(MathUnaryInstr* instr) {
// TODO(kmillikin): Handle Math's unary operations (sqrt, cos, sin).
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitMathMinMax(MathMinMaxInstr* instr) {
// TODO(srdjan): Handle min and max.
SetValue(instr, non_constant_);

View file

@ -189,20 +189,91 @@ IntegerPtr Evaluator::BitLengthEvaluate(const Object& value,
return result.ptr();
}
double Evaluator::EvaluateDoubleOp(const double left,
const double right,
Token::Kind token_kind) {
switch (token_kind) {
case Token::kADD:
return left + right;
case Token::kSUB:
return left - right;
case Token::kMUL:
return left * right;
case Token::kDIV:
return left / right;
default:
UNREACHABLE();
double Evaluator::EvaluateUnaryDoubleOp(const double value,
Token::Kind token_kind,
Representation representation) {
// The different set of operations for float32 and float64 is due to the
// different set of operations made available by dart:core.double and
// dart:typed_data.Float64x2 versus dart:typed_data.Float32x4.
if (representation == kUnboxedDouble) {
switch (token_kind) {
case Token::kABS:
return fabs(value);
case Token::kNEGATE:
return -value;
case Token::kSQRT:
return sqrt(value);
case Token::kSQUARE:
return value * value;
case Token::kTRUNCATE:
return trunc(value);
case Token::kFLOOR:
return floor(value);
case Token::kCEILING:
return ceil(value);
default:
UNREACHABLE();
}
} else {
ASSERT(representation == kUnboxedFloat);
switch (token_kind) {
case Token::kABS:
return fabsf(static_cast<float>(value));
case Token::kNEGATE:
return -static_cast<float>(value);
case Token::kRECIPROCAL:
return 1.0f / static_cast<float>(value);
case Token::kRECIPROCAL_SQRT:
return sqrtf(1.0f / static_cast<float>(value));
case Token::kSQRT:
return sqrtf(static_cast<float>(value));
case Token::kSQUARE:
return static_cast<float>(value) * static_cast<float>(value);
default:
UNREACHABLE();
}
}
}
double Evaluator::EvaluateBinaryDoubleOp(const double left,
const double right,
Token::Kind token_kind,
Representation representation) {
if (representation == kUnboxedDouble) {
switch (token_kind) {
case Token::kADD:
return left + right;
case Token::kSUB:
return left - right;
case Token::kMUL:
return left * right;
case Token::kDIV:
return left / right;
case Token::kMIN:
return fmin(left, right);
case Token::kMAX:
return fmax(left, right);
default:
UNREACHABLE();
}
} else {
ASSERT(representation == kUnboxedFloat);
switch (token_kind) {
case Token::kADD:
return static_cast<float>(left) + static_cast<float>(right);
case Token::kSUB:
return static_cast<float>(left) - static_cast<float>(right);
case Token::kMUL:
return static_cast<float>(left) * static_cast<float>(right);
case Token::kDIV:
return static_cast<float>(left) / static_cast<float>(right);
case Token::kMIN:
return fminf(static_cast<float>(left), static_cast<float>(right));
case Token::kMAX:
return fmaxf(static_cast<float>(left), static_cast<float>(right));
default:
UNREACHABLE();
}
}
}

View file

@ -44,10 +44,16 @@ class Evaluator : public AllStatic {
Representation representation,
Thread* thread);
// Evaluates a unary double operation and returns the result.
static double EvaluateUnaryDoubleOp(const double value,
Token::Kind token_kind,
Representation representation);
// Evaluates a binary double operation and returns the result.
static double EvaluateDoubleOp(const double left,
const double right,
Token::Kind token_kind);
static double EvaluateBinaryDoubleOp(const double left,
const double right,
Token::Kind token_kind,
Representation representation);
// Returns whether the value is an int64, and returns the int64 value
// through the result parameter.

View file

@ -220,6 +220,7 @@ bool FlowGraph::IsConstantRepresentable(const Object& value,
case kUnboxedInt64:
return value.IsInteger();
case kUnboxedFloat:
case kUnboxedDouble:
return value.IsInteger() || value.IsDouble();
@ -238,11 +239,13 @@ Definition* FlowGraph::TryCreateConstantReplacementFor(Definition* op,
return op;
}
if (representation == kUnboxedDouble && value.IsInteger()) {
// Convert the boxed constant from int to double.
if (((representation == kUnboxedFloat) ||
(representation == kUnboxedDouble)) &&
value.IsInteger()) {
// Convert the boxed constant from int to float/double.
return GetConstant(Double::Handle(Double::NewCanonical(
Integer::Cast(value).AsDoubleValue())),
kUnboxedDouble);
representation);
}
return GetConstant(value, representation);

View file

@ -395,6 +395,9 @@ static CatchEntryMove CatchEntryMoveFor(compiler::Assembler* assembler,
case kUnboxedUint32:
src_kind = CatchEntryMove::SourceKind::kUint32Slot;
break;
case kUnboxedFloat:
src_kind = CatchEntryMove::SourceKind::kFloatSlot;
break;
case kUnboxedDouble:
src_kind = CatchEntryMove::SourceKind::kDoubleSlot;
break;

View file

@ -547,7 +547,8 @@ Definition* Definition::OriginalDefinitionIgnoreBoxingAndConstraints() {
while (true) {
Definition* orig;
if (def->IsConstraint() || def->IsBox() || def->IsUnbox() ||
def->IsIntConverter()) {
def->IsIntConverter() || def->IsFloatToDouble() ||
def->IsDoubleToFloat()) {
orig = def->InputAt(0)->definition();
} else {
orig = def->OriginalDefinition();
@ -2131,32 +2132,26 @@ static Definition* CanonicalizeCommutativeDoubleArithmetic(Token::Kind op,
}
Definition* DoubleToFloatInstr::Canonicalize(FlowGraph* flow_graph) {
#ifdef DEBUG
// Must only be used in Float32 StoreIndexedInstr, FloatToDoubleInstr,
// Phis introduce by load forwarding, or MaterializeObject for
// eliminated Float32 array.
ASSERT(env_use_list() == NULL);
for (Value* use = input_use_list(); use != NULL; use = use->next_use()) {
ASSERT(use->instruction()->IsPhi() ||
use->instruction()->IsFloatToDouble() ||
(use->instruction()->IsStoreIndexed() &&
(use->instruction()->AsStoreIndexed()->class_id() ==
kTypedDataFloat32ArrayCid)) ||
(use->instruction()->IsMaterializeObject() &&
(use->instruction()->AsMaterializeObject()->cls().id() ==
kTypedDataFloat32ArrayCid)));
}
#endif
if (!HasUses()) return NULL;
if (value()->definition()->IsFloatToDouble()) {
// F2D(D2F(v)) == v.
return value()->definition()->AsFloatToDouble()->value()->definition();
}
if (value()->BindsToConstant()) {
double narrowed_val =
static_cast<float>(Double::Cast(value()->BoundConstant()).value());
return flow_graph->GetConstant(
Double::ZoneHandle(Double::NewCanonical(narrowed_val)), kUnboxedFloat);
}
return this;
}
Definition* FloatToDoubleInstr::Canonicalize(FlowGraph* flow_graph) {
return HasUses() ? this : NULL;
if (!HasUses()) return NULL;
if (value()->BindsToConstant()) {
return flow_graph->GetConstant(value()->BoundConstant(), kUnboxedDouble);
}
return this;
}
Definition* BinaryDoubleOpInstr::Canonicalize(FlowGraph* flow_graph) {
@ -2176,11 +2171,11 @@ Definition* BinaryDoubleOpInstr::Canonicalize(FlowGraph* flow_graph) {
if ((op_kind() == Token::kMUL) &&
(left()->definition() == right()->definition())) {
MathUnaryInstr* math_unary = new MathUnaryInstr(
MathUnaryInstr::kDoubleSquare, new Value(left()->definition()),
DeoptimizationTarget());
flow_graph->InsertBefore(this, math_unary, env(), FlowGraph::kValue);
return math_unary;
UnaryDoubleOpInstr* square = new UnaryDoubleOpInstr(
Token::kSQUARE, new Value(left()->definition()), DeoptimizationTarget(),
speculative_mode_, representation());
flow_graph->InsertBefore(this, square, env(), FlowGraph::kValue);
return square;
}
return this;
@ -2621,13 +2616,6 @@ Definition* ConstantInstr::Canonicalize(FlowGraph* flow_graph) {
return HasUses() ? this : NULL;
}
// A math unary instruction has a side effect (exception
// thrown) if the argument is not a number.
// TODO(srdjan): eliminate if has no uses and input is guaranteed to be number.
Definition* MathUnaryInstr::Canonicalize(FlowGraph* flow_graph) {
return this;
}
bool LoadFieldInstr::TryEvaluateLoad(const Object& instance,
const Slot& field,
Object* result) {
@ -2992,12 +2980,30 @@ Definition* BoxInstr::Canonicalize(FlowGraph* flow_graph) {
if ((unbox_defn != NULL) &&
(unbox_defn->representation() == from_representation()) &&
(unbox_defn->value()->Type()->ToCid() == Type()->ToCid())) {
if (from_representation() == kUnboxedFloat) {
// This is a narrowing conversion.
return this;
}
return unbox_defn->value()->definition();
}
return this;
}
Definition* BoxLanesInstr::Canonicalize(FlowGraph* flow_graph) {
return HasUses() ? this : NULL;
}
Definition* UnboxLaneInstr::Canonicalize(FlowGraph* flow_graph) {
if (!HasUses()) return NULL;
if (BoxLanesInstr* box = value()->definition()->AsBoxLanes()) {
return box->InputAt(lane())->definition();
}
return this;
}
bool BoxIntegerInstr::ValueFitsSmi() const {
Range* range = value()->definition()->range();
return RangeUtils::Fits(range, RangeBoundary::kRangeBoundarySmi);
@ -3054,11 +3060,28 @@ Definition* BoxInt64Instr::Canonicalize(FlowGraph* flow_graph) {
Definition* UnboxInstr::Canonicalize(FlowGraph* flow_graph) {
if (!HasUses() && !CanDeoptimize()) return NULL;
// Fold away Unbox<rep>(Box<rep>(v)).
BoxInstr* box_defn = value()->definition()->AsBox();
if ((box_defn != NULL) &&
(box_defn->from_representation() == representation())) {
return box_defn->value()->definition();
if (box_defn != NULL) {
// Fold away Unbox<rep>(Box<rep>(v)).
if (box_defn->from_representation() == representation()) {
return box_defn->value()->definition();
}
if ((box_defn->from_representation() == kUnboxedDouble) &&
(representation() == kUnboxedFloat)) {
Definition* replacement = new DoubleToFloatInstr(
box_defn->value()->CopyWithType(), DeoptId::kNone);
flow_graph->InsertBefore(this, replacement, NULL, FlowGraph::kValue);
return replacement;
}
if ((box_defn->from_representation() == kUnboxedFloat) &&
(representation() == kUnboxedDouble)) {
Definition* replacement = new FloatToDoubleInstr(
box_defn->value()->CopyWithType(), DeoptId::kNone);
flow_graph->InsertBefore(this, replacement, NULL, FlowGraph::kValue);
return replacement;
}
}
if (representation() == kUnboxedDouble && value()->BindsToConstant()) {
@ -3073,6 +3096,22 @@ Definition* UnboxInstr::Canonicalize(FlowGraph* flow_graph) {
}
}
if (representation() == kUnboxedFloat && value()->BindsToConstant()) {
const Object& val = value()->BoundConstant();
if (val.IsInteger()) {
double narrowed_val =
static_cast<float>(Integer::Cast(val).AsDoubleValue());
return flow_graph->GetConstant(
Double::ZoneHandle(Double::NewCanonical(narrowed_val)),
kUnboxedFloat);
} else if (val.IsDouble()) {
double narrowed_val = static_cast<float>(Double::Cast(val).value());
return flow_graph->GetConstant(
Double::ZoneHandle(Double::NewCanonical(narrowed_val)),
kUnboxedFloat);
}
}
return this;
}
@ -6431,19 +6470,6 @@ const RuntimeEntry& InvokeMathCFunctionInstr::TargetFunction() const {
return kLibcPowRuntimeEntry;
}
const char* MathUnaryInstr::KindToCString(MathUnaryKind kind) {
switch (kind) {
case kIllegal:
return "illegal";
case kSqrt:
return "sqrt";
case kDoubleSquare:
return "double-square";
}
UNREACHABLE();
return "";
}
TruncDivModInstr::TruncDivModInstr(Value* lhs, Value* rhs, intptr_t deopt_id)
: TemplateDefinition(deopt_id) {
SetInputAt(0, lhs);

View file

@ -451,6 +451,8 @@ struct InstrAttrs {
M(LoadStaticField, _) \
M(StoreStaticField, kNoGC) \
M(BooleanNegate, kNoGC) \
M(BoolToInt, kNoGC) \
M(IntToBool, kNoGC) \
M(InstanceOf, _) \
M(CreateArray, _) \
M(AllocateObject, _) \
@ -474,9 +476,9 @@ struct InstrAttrs {
M(Int64ToDouble, kNoGC) \
M(DoubleToInteger, _) \
M(DoubleToSmi, kNoGC) \
M(DoubleToDouble, kNoGC) \
M(DoubleToFloat, kNoGC) \
M(FloatToDouble, kNoGC) \
M(FloatCompare, kNoGC) \
M(CheckClass, kNoGC) \
M(CheckClassId, kNoGC) \
M(CheckSmi, kNoGC) \
@ -487,7 +489,6 @@ struct InstrAttrs {
M(CheckEitherNonSmi, kNoGC) \
M(BinaryDoubleOp, kNoGC) \
M(DoubleTestOp, kNoGC) \
M(MathUnary, kNoGC) \
M(MathMinMax, kNoGC) \
M(Box, _) \
M(Unbox, kNoGC) \
@ -515,6 +516,8 @@ struct InstrAttrs {
M(TestSmi, kNoGC) \
M(TestCids, kNoGC) \
M(ExtractNthOutput, kNoGC) \
M(UnboxLane, kNoGC) \
M(BoxLanes, _) \
M(BinaryUint32Op, kNoGC) \
M(ShiftUint32Op, kNoGC) \
M(SpeculativeShiftUint32Op, kNoGC) \
@ -6251,6 +6254,58 @@ class BooleanNegateInstr : public TemplateDefinition<1, NoThrow> {
DISALLOW_COPY_AND_ASSIGN(BooleanNegateInstr);
};
// bool ? -1 : 0
class BoolToIntInstr : public TemplateDefinition<1, NoThrow> {
public:
explicit BoolToIntInstr(Value* value) {
ASSERT(value->definition()->representation() == kTagged);
SetInputAt(0, value);
}
DECLARE_INSTRUCTION(BoolToInt)
virtual CompileType ComputeType() const;
Value* value() const { return inputs_[0]; }
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
return kTagged;
}
virtual Representation representation() const { return kUnboxedInt32; }
virtual bool ComputeCanDeoptimize() const { return false; }
virtual bool HasUnknownSideEffects() const { return false; }
private:
DISALLOW_COPY_AND_ASSIGN(BoolToIntInstr);
};
// int == 0 ? false : true
class IntToBoolInstr : public TemplateDefinition<1, NoThrow> {
public:
explicit IntToBoolInstr(Value* value) {
ASSERT(value->definition()->representation() == kUnboxedInt32);
SetInputAt(0, value);
}
DECLARE_INSTRUCTION(IntToBool)
virtual CompileType ComputeType() const;
Value* value() const { return inputs_[0]; }
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
return kUnboxedInt32;
}
virtual Representation representation() const { return kTagged; }
virtual bool ComputeCanDeoptimize() const { return false; }
virtual bool HasUnknownSideEffects() const { return false; }
private:
DISALLOW_COPY_AND_ASSIGN(IntToBoolInstr);
};
class InstanceOfInstr : public TemplateDefinition<3, Throws> {
public:
InstanceOfInstr(const InstructionSource& source,
@ -7490,60 +7545,6 @@ bool Definition::IsInt64Definition() {
IsBoxInt64() || IsUnboxInt64();
}
class MathUnaryInstr : public TemplateDefinition<1, NoThrow, Pure> {
public:
enum MathUnaryKind {
kIllegal,
kSqrt,
kDoubleSquare,
};
MathUnaryInstr(MathUnaryKind kind, Value* value, intptr_t deopt_id)
: TemplateDefinition(deopt_id), kind_(kind) {
SetInputAt(0, value);
}
Value* value() const { return inputs_[0]; }
MathUnaryKind kind() const { return kind_; }
virtual bool ComputeCanDeoptimize() const { return false; }
virtual Representation representation() const { return kUnboxedDouble; }
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT(idx == 0);
return kUnboxedDouble;
}
virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
ASSERT(idx == 0);
return kNotSpeculative;
}
virtual intptr_t DeoptimizationTarget() const {
// Direct access since this instruction cannot deoptimize, and the deopt-id
// was inherited from another instruction that could deoptimize.
return GetDeoptId();
}
DECLARE_INSTRUCTION(MathUnary)
virtual CompileType ComputeType() const;
virtual bool AttributesEqual(const Instruction& other) const {
return kind() == other.AsMathUnary()->kind();
}
Definition* Canonicalize(FlowGraph* flow_graph);
static const char* KindToCString(MathUnaryKind kind);
PRINT_OPERANDS_TO_SUPPORT
private:
const MathUnaryKind kind_;
DISALLOW_COPY_AND_ASSIGN(MathUnaryInstr);
};
// Calls into the runtime and performs a case-insensitive comparison of the
// UTF16 strings (i.e. TwoByteString or ExternalTwoByteString) located at
// str[lhs_index:lhs_index + length] and str[rhs_index:rhs_index + length].
@ -7663,11 +7664,15 @@ class BinaryDoubleOpInstr : public TemplateDefinition<2, NoThrow, Pure> {
Value* right,
intptr_t deopt_id,
const InstructionSource& source,
SpeculativeMode speculative_mode = kGuardInputs)
SpeculativeMode speculative_mode = kGuardInputs,
Representation representation = kUnboxedDouble)
: TemplateDefinition(source, deopt_id),
op_kind_(op_kind),
token_pos_(source.token_pos),
speculative_mode_(speculative_mode) {
speculative_mode_(speculative_mode),
representation_(representation) {
ASSERT((representation == kUnboxedFloat) ||
(representation == kUnboxedDouble));
SetInputAt(0, left);
SetInputAt(1, right);
}
@ -7681,11 +7686,11 @@ class BinaryDoubleOpInstr : public TemplateDefinition<2, NoThrow, Pure> {
virtual bool ComputeCanDeoptimize() const { return false; }
virtual Representation representation() const { return kUnboxedDouble; }
virtual Representation representation() const { return representation_; }
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT((idx == 0) || (idx == 1));
return kUnboxedDouble;
return representation_;
}
virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
@ -7708,13 +7713,15 @@ class BinaryDoubleOpInstr : public TemplateDefinition<2, NoThrow, Pure> {
virtual bool AttributesEqual(const Instruction& other) const {
auto const other_bin_op = other.AsBinaryDoubleOp();
return (op_kind() == other_bin_op->op_kind()) &&
(speculative_mode_ == other_bin_op->speculative_mode_);
(speculative_mode_ == other_bin_op->speculative_mode_) &&
(representation_ == other_bin_op->representation_);
}
private:
const Token::Kind op_kind_;
const TokenPosition token_pos_;
const SpeculativeMode speculative_mode_;
const Representation representation_;
DISALLOW_COPY_AND_ASSIGN(BinaryDoubleOpInstr);
};
@ -8297,17 +8304,19 @@ class SpeculativeShiftUint32OpInstr : public ShiftIntegerOpInstr {
DISALLOW_COPY_AND_ASSIGN(SpeculativeShiftUint32OpInstr);
};
// Handles only NEGATE.
class UnaryDoubleOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
public:
UnaryDoubleOpInstr(Token::Kind op_kind,
Value* value,
intptr_t deopt_id,
SpeculativeMode speculative_mode = kGuardInputs)
SpeculativeMode speculative_mode = kGuardInputs,
Representation representation = kUnboxedDouble)
: TemplateDefinition(deopt_id),
op_kind_(op_kind),
speculative_mode_(speculative_mode) {
ASSERT(op_kind == Token::kNEGATE);
speculative_mode_(speculative_mode),
representation_(representation) {
ASSERT((representation == kUnboxedFloat) ||
(representation == kUnboxedDouble));
SetInputAt(0, value);
}
@ -8325,11 +8334,11 @@ class UnaryDoubleOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
return GetDeoptId();
}
virtual Representation representation() const { return kUnboxedDouble; }
virtual Representation representation() const { return representation_; }
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT(idx == 0);
return kUnboxedDouble;
return representation_;
}
virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
@ -8337,7 +8346,8 @@ class UnaryDoubleOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
}
virtual bool AttributesEqual(const Instruction& other) const {
return speculative_mode_ == other.AsUnaryDoubleOp()->speculative_mode_;
return (speculative_mode_ == other.AsUnaryDoubleOp()->speculative_mode_) &&
(representation_ == other.AsUnaryDoubleOp()->representation_);
}
PRINT_OPERANDS_TO_SUPPORT
@ -8345,6 +8355,7 @@ class UnaryDoubleOpInstr : public TemplateDefinition<1, NoThrow, Pure> {
private:
const Token::Kind op_kind_;
const SpeculativeMode speculative_mode_;
const Representation representation_;
DISALLOW_COPY_AND_ASSIGN(UnaryDoubleOpInstr);
};
@ -8574,51 +8585,6 @@ class DoubleToSmiInstr : public TemplateDefinition<1, NoThrow, Pure> {
DISALLOW_COPY_AND_ASSIGN(DoubleToSmiInstr);
};
class DoubleToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
public:
DoubleToDoubleInstr(Value* value,
MethodRecognizer::Kind recognized_kind,
intptr_t deopt_id)
: TemplateDefinition(deopt_id), recognized_kind_(recognized_kind) {
ASSERT((recognized_kind == MethodRecognizer::kDoubleTruncateToDouble) ||
(recognized_kind == MethodRecognizer::kDoubleFloorToDouble) ||
(recognized_kind == MethodRecognizer::kDoubleCeilToDouble));
SetInputAt(0, value);
}
Value* value() const { return inputs_[0]; }
MethodRecognizer::Kind recognized_kind() const { return recognized_kind_; }
DECLARE_INSTRUCTION(DoubleToDouble)
virtual CompileType ComputeType() const;
virtual bool ComputeCanDeoptimize() const { return false; }
virtual Representation representation() const { return kUnboxedDouble; }
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT(idx == 0);
return kUnboxedDouble;
}
virtual SpeculativeMode SpeculativeModeOfInput(intptr_t idx) const {
ASSERT(idx == 0);
return kNotSpeculative;
}
virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
virtual bool AttributesEqual(const Instruction& other) const {
return other.AsDoubleToDouble()->recognized_kind() == recognized_kind();
}
private:
const MethodRecognizer::Kind recognized_kind_;
DISALLOW_COPY_AND_ASSIGN(DoubleToDoubleInstr);
};
class DoubleToFloatInstr : public TemplateDefinition<1, NoThrow, Pure> {
public:
DoubleToFloatInstr(Value* value,
@ -8691,6 +8657,42 @@ class FloatToDoubleInstr : public TemplateDefinition<1, NoThrow, Pure> {
DISALLOW_COPY_AND_ASSIGN(FloatToDoubleInstr);
};
// left op right ? -1 : 0
class FloatCompareInstr : public TemplateDefinition<2, NoThrow, Pure> {
public:
FloatCompareInstr(Token::Kind op_kind, Value* left, Value* right)
: op_kind_(op_kind) {
SetInputAt(0, left);
SetInputAt(1, right);
}
Value* left() const { return inputs_[0]; }
Value* right() const { return inputs_[1]; }
Token::Kind op_kind() const { return op_kind_; }
DECLARE_INSTRUCTION(FloatCompare)
virtual CompileType ComputeType() const;
virtual bool ComputeCanDeoptimize() const { return false; }
virtual Representation representation() const { return kUnboxedInt32; }
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
return kUnboxedFloat;
}
virtual bool AttributesEqual(const Instruction& other) const {
return other.AsFloatCompare()->op_kind() == op_kind();
}
private:
const Token::Kind op_kind_;
DISALLOW_COPY_AND_ASSIGN(FloatCompareInstr);
};
// TODO(sjindel): Replace with FFICallInstr.
class InvokeMathCFunctionInstr : public PureDefinition {
public:
@ -8803,6 +8805,136 @@ class ExtractNthOutputInstr : public TemplateDefinition<1, NoThrow, Pure> {
DISALLOW_COPY_AND_ASSIGN(ExtractNthOutputInstr);
};
class UnboxLaneInstr : public TemplateDefinition<1, NoThrow, Pure> {
public:
UnboxLaneInstr(Value* value,
intptr_t n,
Representation definition_rep,
intptr_t definition_cid)
: lane_(n),
definition_rep_(definition_rep),
definition_cid_(definition_cid) {
SetInputAt(0, value);
}
Value* value() const { return inputs_[0]; }
DECLARE_INSTRUCTION(UnboxLane)
virtual CompileType ComputeType() const;
virtual bool ComputeCanDeoptimize() const { return false; }
intptr_t lane() const { return lane_; }
virtual Representation representation() const { return definition_rep_; }
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT(idx == 0);
return kTagged;
}
virtual bool AttributesEqual(const Instruction& other) const {
auto const other_split = other.AsUnboxLane();
return (other_split->representation() == representation()) &&
(other_split->lane() == lane());
}
Definition* Canonicalize(FlowGraph* flow_graph);
PRINT_OPERANDS_TO_SUPPORT
private:
const intptr_t lane_;
const Representation definition_rep_;
const intptr_t definition_cid_;
DISALLOW_COPY_AND_ASSIGN(UnboxLaneInstr);
};
class BoxLanesInstr : public TemplateDefinition<4, NoThrow, Pure> {
public:
BoxLanesInstr(Representation from_representation, Value* x, Value* y)
: from_representation_(from_representation) {
ASSERT(from_representation == kUnboxedDouble);
ASSERT(x->definition()->representation() == from_representation);
ASSERT(y->definition()->representation() == from_representation);
SetInputAt(0, x);
SetInputAt(1, y);
}
BoxLanesInstr(Representation from_representation,
Value* x,
Value* y,
Value* z,
Value* w)
: from_representation_(from_representation) {
ASSERT((from_representation == kUnboxedInt32) ||
(from_representation == kUnboxedFloat));
ASSERT(x->definition()->representation() == from_representation);
ASSERT(y->definition()->representation() == from_representation);
ASSERT(z->definition()->representation() == from_representation);
ASSERT(w->definition()->representation() == from_representation);
SetInputAt(0, x);
SetInputAt(1, y);
SetInputAt(2, z);
SetInputAt(3, w);
}
intptr_t InputCount() const {
switch (from_representation_) {
case kUnboxedDouble:
return 2;
case kUnboxedFloat:
return 4;
case kUnboxedInt32:
return 4;
default:
UNREACHABLE();
return 0;
}
}
Value* x() const { return inputs_[0]; }
Value* y() const { return inputs_[1]; }
Value* z() const {
ASSERT((from_representation() == kUnboxedInt32) ||
(from_representation() == kUnboxedFloat));
return inputs_[2];
}
Value* w() const {
ASSERT((from_representation() == kUnboxedInt32) ||
(from_representation() == kUnboxedFloat));
return inputs_[3];
}
Representation from_representation() const { return from_representation_; }
DECLARE_INSTRUCTION(BoxLanes)
virtual CompileType ComputeType() const;
virtual bool ComputeCanDeoptimize() const { return false; }
virtual intptr_t DeoptimizationTarget() const { return DeoptId::kNone; }
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT(idx == 0 || idx == 1 || idx == 2 || idx == 3);
return from_representation();
}
virtual bool AttributesEqual(const Instruction& other) const {
return other.AsBoxLanes()->from_representation() == from_representation();
}
Definition* Canonicalize(FlowGraph* flow_graph);
virtual TokenPosition token_pos() const { return TokenPosition::kBox; }
virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
return kNotSpeculative;
}
PRINT_OPERANDS_TO_SUPPORT
private:
const Representation from_representation_;
DISALLOW_COPY_AND_ASSIGN(BoxLanesInstr);
};
class TruncDivModInstr : public TemplateDefinition<2, NoThrow, Pure> {
public:
TruncDivModInstr(Value* lhs, Value* rhs, intptr_t deopt_id);

View file

@ -698,11 +698,10 @@ void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
}
} else if (destination.IsFpuRegister()) {
const DRegister dst = EvenDRegisterOf(destination.fpu_reg());
if (Utils::DoublesBitEqual(Double::Cast(value_).value(), 0.0) &&
TargetCPUFeatures::neon_supported()) {
QRegister qdst = destination.fpu_reg();
__ veorq(qdst, qdst, qdst);
if (representation() == kUnboxedFloat) {
__ LoadSImmediate(EvenSRegisterOf(dst), Double::Cast(value_).value());
} else {
ASSERT(representation() == kUnboxedDouble);
ASSERT(tmp != kNoRegister);
__ LoadDImmediate(dst, Double::Cast(value_).value(), tmp);
}
@ -5704,33 +5703,6 @@ void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
#undef DEFINE_EMIT
LocationSummary* MathUnaryInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
ASSERT((kind() == MathUnaryInstr::kSqrt) ||
(kind() == MathUnaryInstr::kDoubleSquare));
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresFpuRegister());
summary->set_out(0, Location::RequiresFpuRegister());
return summary;
}
void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (kind() == MathUnaryInstr::kSqrt) {
const DRegister val = EvenDRegisterOf(locs()->in(0).fpu_reg());
const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
__ vsqrtd(result, val);
} else if (kind() == MathUnaryInstr::kDoubleSquare) {
const DRegister val = EvenDRegisterOf(locs()->in(0).fpu_reg());
const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
__ vmuld(result, val, val);
} else {
UNREACHABLE();
}
}
LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
Zone* zone,
bool opt) const {
@ -5883,9 +5855,22 @@ LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
}
void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(representation() == kUnboxedDouble);
const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
__ vnegd(result, value);
switch (op_kind()) {
case Token::kNEGATE:
__ vnegd(result, value);
break;
case Token::kSQRT:
__ vsqrtd(result, value);
break;
case Token::kSQUARE:
__ vmuld(result, value, value);
break;
default:
UNREACHABLE();
}
}
LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
@ -6000,16 +5985,6 @@ void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ SmiTag(result);
}
LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNIMPLEMENTED();
return NULL;
}
void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNIMPLEMENTED();
}
LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -6048,6 +6023,16 @@ void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ vcvtds(result, value);
}
LocationSummary* FloatCompareInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void FloatCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
ASSERT((InputCount() == 1) || (InputCount() == 2));
@ -6298,6 +6283,26 @@ void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
LocationSummary* UnboxLaneInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void UnboxLaneInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* BoxLanesInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void BoxLanesInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
@ -7577,6 +7582,26 @@ void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::Operand(compiler::target::ObjectAlignment::kBoolValueMask));
}
LocationSummary* BoolToIntInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void BoolToIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* IntToBoolInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void IntToBoolInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;

View file

@ -625,6 +625,12 @@ void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
} else if (destination.IsFpuRegister()) {
const VRegister dst = destination.fpu_reg();
__ LoadDImmediate(dst, Double::Cast(value_).value());
if (representation() == kUnboxedFloat) {
__ LoadSImmediate(dst, Double::Cast(value_).value());
} else {
ASSERT(representation() == kUnboxedDouble);
__ LoadDImmediate(dst, Double::Cast(value_).value());
}
} else if (destination.IsDoubleStackSlot()) {
const intptr_t dest_offset = destination.ToStackSlotOffset();
if (Utils::DoublesBitEqual(Double::Cast(value_).value(), 0.0)) {
@ -4796,33 +4802,6 @@ void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
#undef DEFINE_EMIT
LocationSummary* MathUnaryInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
ASSERT((kind() == MathUnaryInstr::kSqrt) ||
(kind() == MathUnaryInstr::kDoubleSquare));
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresFpuRegister());
summary->set_out(0, Location::RequiresFpuRegister());
return summary;
}
void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (kind() == MathUnaryInstr::kSqrt) {
const VRegister val = locs()->in(0).fpu_reg();
const VRegister result = locs()->out(0).fpu_reg();
__ fsqrtd(result, val);
} else if (kind() == MathUnaryInstr::kDoubleSquare) {
const VRegister val = locs()->in(0).fpu_reg();
const VRegister result = locs()->out(0).fpu_reg();
__ fmuld(result, val, val);
} else {
UNREACHABLE();
}
}
LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
Zone* zone,
bool opt) const {
@ -4975,9 +4954,22 @@ LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
}
void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(representation() == kUnboxedDouble);
const VRegister result = locs()->out(0).fpu_reg();
const VRegister value = locs()->in(0).fpu_reg();
__ fnegd(result, value);
switch (op_kind()) {
case Token::kNEGATE:
__ fnegd(result, value);
break;
case Token::kSQRT:
__ fsqrtd(result, value);
break;
case Token::kSQUARE:
__ fmuld(result, value, value);
break;
default:
UNREACHABLE();
}
}
LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
@ -5126,16 +5118,6 @@ void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ SmiTag(result);
}
LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNIMPLEMENTED();
return NULL;
}
void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNIMPLEMENTED();
}
LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -5170,6 +5152,16 @@ void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ fcvtds(result, value);
}
LocationSummary* FloatCompareInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void FloatCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
ASSERT((InputCount() == 1) || (InputCount() == 2));
@ -5372,6 +5364,26 @@ void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
LocationSummary* UnboxLaneInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void UnboxLaneInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* BoxLanesInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void BoxLanesInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
@ -6675,6 +6687,26 @@ void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::Immediate(compiler::target::ObjectAlignment::kBoolValueMask));
}
LocationSummary* BoolToIntInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void BoolToIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* IntToBoolInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void IntToBoolInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;

View file

@ -421,18 +421,23 @@ void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
__ LoadObjectSafely(destination.reg(), value_);
}
} else if (destination.IsFpuRegister()) {
const double value_as_double = Double::Cast(value_).value();
uword addr = FindDoubleConstant(value_as_double);
if (addr == 0) {
__ pushl(EAX);
__ LoadObject(EAX, value_);
__ movsd(destination.fpu_reg(),
compiler::FieldAddress(EAX, Double::value_offset()));
__ popl(EAX);
} else if (Utils::DoublesBitEqual(value_as_double, 0.0)) {
__ xorps(destination.fpu_reg(), destination.fpu_reg());
if (representation() == kUnboxedFloat) {
__ LoadSImmediate(destination.fpu_reg(),
static_cast<float>(Double::Cast(value_).value()));
} else {
__ movsd(destination.fpu_reg(), compiler::Address::Absolute(addr));
const double value_as_double = Double::Cast(value_).value();
uword addr = FindDoubleConstant(value_as_double);
if (addr == 0) {
__ pushl(EAX);
__ LoadObject(EAX, value_);
__ movsd(destination.fpu_reg(),
compiler::FieldAddress(EAX, Double::value_offset()));
__ popl(EAX);
} else if (Utils::DoublesBitEqual(value_as_double, 0.0)) {
__ xorps(destination.fpu_reg(), destination.fpu_reg());
} else {
__ movsd(destination.fpu_reg(), compiler::Address::Absolute(addr));
}
}
} else if (destination.IsDoubleStackSlot()) {
const double value_as_double = Double::Cast(value_).value();
@ -4817,35 +4822,6 @@ void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
#undef DEFINE_EMIT
LocationSummary* MathUnaryInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
ASSERT((kind() == MathUnaryInstr::kSqrt) ||
(kind() == MathUnaryInstr::kDoubleSquare));
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresFpuRegister());
if (kind() == MathUnaryInstr::kDoubleSquare) {
summary->set_out(0, Location::SameAsFirstInput());
} else {
summary->set_out(0, Location::RequiresFpuRegister());
}
return summary;
}
void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (kind() == MathUnaryInstr::kSqrt) {
__ sqrtsd(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg());
} else if (kind() == MathUnaryInstr::kDoubleSquare) {
XmmRegister value_reg = locs()->in(0).fpu_reg();
__ mulsd(value_reg, value_reg);
ASSERT(value_reg == locs()->out(0).fpu_reg());
} else {
UNREACHABLE();
}
}
LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
Zone* zone,
bool opt) const {
@ -5001,9 +4977,31 @@ LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
}
void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(representation() == kUnboxedDouble);
XmmRegister value = locs()->in(0).fpu_reg();
ASSERT(locs()->out(0).fpu_reg() == value);
__ DoubleNegate(value);
switch (op_kind()) {
case Token::kNEGATE:
__ DoubleNegate(value);
break;
case Token::kSQRT:
__ sqrtsd(value, value);
break;
case Token::kSQUARE:
__ mulsd(value, value);
break;
case Token::kTRUNCATE:
__ roundsd(value, value, compiler::Assembler::kRoundToZero);
break;
case Token::kFLOOR:
__ roundsd(value, value, compiler::Assembler::kRoundDown);
break;
case Token::kCEILING:
__ roundsd(value, value, compiler::Assembler::kRoundUp);
break;
default:
UNREACHABLE();
}
}
LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
@ -5126,35 +5124,6 @@ void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ SmiTag(result);
}
LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* result = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
result->set_in(0, Location::RequiresFpuRegister());
result->set_out(0, Location::RequiresFpuRegister());
return result;
}
void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
XmmRegister value = locs()->in(0).fpu_reg();
XmmRegister result = locs()->out(0).fpu_reg();
switch (recognized_kind()) {
case MethodRecognizer::kDoubleTruncateToDouble:
__ roundsd(result, value, compiler::Assembler::kRoundToZero);
break;
case MethodRecognizer::kDoubleFloorToDouble:
__ roundsd(result, value, compiler::Assembler::kRoundDown);
break;
case MethodRecognizer::kDoubleCeilToDouble:
__ roundsd(result, value, compiler::Assembler::kRoundUp);
break;
default:
UNREACHABLE();
}
}
LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -5185,6 +5154,16 @@ void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ cvtss2sd(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg());
}
LocationSummary* FloatCompareInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void FloatCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
ASSERT((InputCount() == 1) || (InputCount() == 2));
@ -5411,6 +5390,26 @@ void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
LocationSummary* UnboxLaneInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void UnboxLaneInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* BoxLanesInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void BoxLanesInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
@ -6662,6 +6661,26 @@ void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::target::ObjectAlignment::kBoolValueMask));
}
LocationSummary* BoolToIntInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void BoolToIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* IntToBoolInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void IntToBoolInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;

View file

@ -484,7 +484,9 @@ void Definition::PrintTo(BaseTextBuffer* f) const {
range_->PrintTo(f);
}
if (type_ != NULL) {
if (representation() != kNoRepresentation && representation() != kTagged) {
f->Printf(" %s", RepresentationToCString(representation()));
} else if (type_ != NULL) {
f->AddString(" ");
type_->PrintTo(f);
}
@ -553,10 +555,6 @@ void ConstantInstr::PrintOperandsTo(BaseTextBuffer* f) const {
buffer[pos] = '\0';
f->Printf("#%s\\n...", buffer);
}
if (representation() != kNoRepresentation && representation() != kTagged) {
f->Printf(" %s", RepresentationToCString(representation()));
}
}
void ConstraintInstr::PrintOperandsTo(BaseTextBuffer* f) const {
@ -910,11 +908,6 @@ void AllocateUninitializedContextInstr::PrintOperandsTo(
TemplateAllocation::PrintOperandsTo(f);
}
void MathUnaryInstr::PrintOperandsTo(BaseTextBuffer* f) const {
f->Printf("'%s', ", MathUnaryInstr::KindToCString(kind()));
value()->PrintTo(f);
}
void TruncDivModInstr::PrintOperandsTo(BaseTextBuffer* f) const {
Definition::PrintOperandsTo(f);
}
@ -924,6 +917,15 @@ void ExtractNthOutputInstr::PrintOperandsTo(BaseTextBuffer* f) const {
Definition::PrintOperandsTo(f);
}
void UnboxLaneInstr::PrintOperandsTo(BaseTextBuffer* f) const {
Definition::PrintOperandsTo(f);
f->Printf(", lane %" Pd, lane());
}
void BoxLanesInstr::PrintOperandsTo(BaseTextBuffer* f) const {
Definition::PrintOperandsTo(f);
}
void UnaryIntegerOpInstr::PrintOperandsTo(BaseTextBuffer* f) const {
f->Printf("%s, ", Token::Str(op_kind()));
value()->PrintTo(f);

View file

@ -715,7 +715,12 @@ void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
}
} else if (destination.IsFpuRegister()) {
const FRegister dst = destination.fpu_reg();
__ LoadDImmediate(dst, Double::Cast(value_).value());
if (representation() == kUnboxedFloat) {
__ LoadSImmediate(dst, Double::Cast(value_).value());
} else {
ASSERT(representation() == kUnboxedDouble);
__ LoadDImmediate(dst, Double::Cast(value_).value());
}
} else if (destination.IsDoubleStackSlot()) {
const intptr_t dest_offset = destination.ToStackSlotOffset();
#if XLEN == 32
@ -1198,7 +1203,7 @@ static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
__ CompareImmediate(TMP, 0);
return NE;
case Token::kGT:
__ fltd(TMP, right, left);
__ fgtd(TMP, left, right);
__ CompareImmediate(TMP, 0);
return NE;
case Token::kLTE:
@ -1206,7 +1211,7 @@ static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
__ CompareImmediate(TMP, 0);
return NE;
case Token::kGTE:
__ fled(TMP, right, left);
__ fged(TMP, left, right);
__ CompareImmediate(TMP, 0);
return NE;
default:
@ -3363,9 +3368,11 @@ void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ LoadDFieldFromOffset(result, temp, Double::value_offset());
break;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4LoadFieldInstr");
UNIMPLEMENTED();
break;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2LoadFieldInstr");
UNIMPLEMENTED();
break;
default:
@ -4434,6 +4441,16 @@ void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
}
#endif
case kUnboxedFloat: {
const FRegister result = locs()->out(0).fpu_reg();
__ SmiUntag(TMP, box);
#if XLEN == 32
__ fcvtsw(result, TMP);
#elif XLEN == 64
__ fcvtsl(result, TMP);
#endif
break;
}
case kUnboxedDouble: {
const FRegister result = locs()->out(0).fpu_reg();
__ SmiUntag(TMP, box);
@ -4808,21 +4825,53 @@ void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const FRegister left = locs()->in(0).fpu_reg();
const FRegister right = locs()->in(1).fpu_reg();
const FRegister result = locs()->out(0).fpu_reg();
switch (op_kind()) {
case Token::kADD:
__ faddd(result, left, right);
break;
case Token::kSUB:
__ fsubd(result, left, right);
break;
case Token::kMUL:
__ fmuld(result, left, right);
break;
case Token::kDIV:
__ fdivd(result, left, right);
break;
default:
UNREACHABLE();
if (representation() == kUnboxedDouble) {
switch (op_kind()) {
case Token::kADD:
__ faddd(result, left, right);
break;
case Token::kSUB:
__ fsubd(result, left, right);
break;
case Token::kMUL:
__ fmuld(result, left, right);
break;
case Token::kDIV:
__ fdivd(result, left, right);
break;
case Token::kMIN:
__ fmind(result, left, right);
break;
case Token::kMAX:
__ fmaxd(result, left, right);
break;
default:
UNREACHABLE();
}
} else {
ASSERT(representation() == kUnboxedFloat);
switch (op_kind()) {
case Token::kADD:
__ fadds(result, left, right);
break;
case Token::kSUB:
__ fsubs(result, left, right);
break;
case Token::kMUL:
__ fmuls(result, left, right);
break;
case Token::kDIV:
__ fdivs(result, left, right);
break;
case Token::kMIN:
__ fmins(result, left, right);
break;
case Token::kMAX:
__ fmaxs(result, left, right);
break;
default:
UNREACHABLE();
}
}
}
@ -4853,259 +4902,13 @@ Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
return kind() == Token::kEQ ? NOT_ZERO : ZERO;
}
// SIMD
#define DEFINE_EMIT(Name, Args) \
static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr, \
PP_APPLY(PP_UNPACK, Args))
#define SIMD_OP_FLOAT_ARITH(V, Name, op) \
V(Float32x4##Name, op##s) \
V(Float64x2##Name, op##d)
#define SIMD_OP_SIMPLE_BINARY(V) \
SIMD_OP_FLOAT_ARITH(V, Add, vadd) \
SIMD_OP_FLOAT_ARITH(V, Sub, vsub) \
SIMD_OP_FLOAT_ARITH(V, Mul, vmul) \
SIMD_OP_FLOAT_ARITH(V, Div, vdiv) \
SIMD_OP_FLOAT_ARITH(V, Min, vmin) \
SIMD_OP_FLOAT_ARITH(V, Max, vmax) \
V(Int32x4Add, vaddw) \
V(Int32x4Sub, vsubw) \
V(Int32x4BitAnd, vand) \
V(Int32x4BitOr, vorr) \
V(Int32x4BitXor, veor) \
V(Float32x4Equal, vceqs) \
V(Float32x4GreaterThan, vcgts) \
V(Float32x4GreaterThanOrEqual, vcges)
DEFINE_EMIT(SimdBinaryOp, (FRegister result, FRegister left, FRegister right)) {
UNIMPLEMENTED();
}
#define SIMD_OP_SIMPLE_UNARY(V) \
SIMD_OP_FLOAT_ARITH(V, Sqrt, vsqrt) \
SIMD_OP_FLOAT_ARITH(V, Negate, vneg) \
SIMD_OP_FLOAT_ARITH(V, Abs, vabs) \
V(Float32x4Reciprocal, VRecps) \
V(Float32x4ReciprocalSqrt, VRSqrts)
DEFINE_EMIT(SimdUnaryOp, (FRegister result, FRegister value)) {
UNIMPLEMENTED();
}
DEFINE_EMIT(Simd32x4GetSignMask,
(Register out, FRegister value, Temp<Register> temp)) {
UNIMPLEMENTED();
}
DEFINE_EMIT(
Float32x4FromDoubles,
(FRegister r, FRegister v0, FRegister v1, FRegister v2, FRegister v3)) {
UNIMPLEMENTED();
}
DEFINE_EMIT(
Float32x4Clamp,
(FRegister result, FRegister value, FRegister lower, FRegister upper)) {
UNIMPLEMENTED();
}
DEFINE_EMIT(
Float64x2Clamp,
(FRegister result, FRegister value, FRegister lower, FRegister upper)) {
UNIMPLEMENTED();
}
DEFINE_EMIT(Float32x4With,
(FRegister result, FRegister replacement, FRegister value)) {
UNIMPLEMENTED();
}
DEFINE_EMIT(Simd32x4ToSimd32x4, (SameAsFirstInput, FRegister value)) {
// TODO(dartbug.com/30949) these operations are essentially nop and should
// not generate any code. They should be removed from the graph before
// code generation.
}
DEFINE_EMIT(SimdZero, (FRegister v)) {
UNIMPLEMENTED();
}
DEFINE_EMIT(Float64x2GetSignMask, (Register out, FRegister value)) {
UNIMPLEMENTED();
}
DEFINE_EMIT(Float64x2With,
(SameAsFirstInput, FRegister left, FRegister right)) {
UNIMPLEMENTED();
}
DEFINE_EMIT(
Int32x4FromInts,
(FRegister result, Register v0, Register v1, Register v2, Register v3)) {
UNIMPLEMENTED();
}
DEFINE_EMIT(Int32x4FromBools,
(FRegister result,
Register v0,
Register v1,
Register v2,
Register v3,
Temp<Register> temp)) {
UNIMPLEMENTED();
}
DEFINE_EMIT(Int32x4GetFlag, (Register result, FRegister value)) {
UNIMPLEMENTED();
}
DEFINE_EMIT(Int32x4Select,
(FRegister out,
FRegister mask,
FRegister trueValue,
FRegister falseValue,
Temp<FRegister> temp)) {
UNIMPLEMENTED();
}
DEFINE_EMIT(Int32x4WithFlag,
(SameAsFirstInput, FRegister mask, Register flag)) {
UNIMPLEMENTED();
}
// Map SimdOpInstr::Kind-s to corresponding emit functions. Uses the following
// format:
//
// CASE(OpA) CASE(OpB) ____(Emitter) - Emitter is used to emit OpA and OpB.
// SIMPLE(OpA) - Emitter with name OpA is used to emit OpA.
//
#define SIMD_OP_VARIANTS(CASE, ____) \
SIMD_OP_SIMPLE_BINARY(CASE) \
CASE(Float32x4ShuffleMix) \
CASE(Int32x4ShuffleMix) \
CASE(Float32x4NotEqual) \
CASE(Float32x4LessThan) \
CASE(Float32x4LessThanOrEqual) \
CASE(Float32x4Scale) \
CASE(Float64x2FromDoubles) \
CASE(Float64x2Scale) \
____(SimdBinaryOp) \
SIMD_OP_SIMPLE_UNARY(CASE) \
CASE(Float32x4GetX) \
CASE(Float32x4GetY) \
CASE(Float32x4GetZ) \
CASE(Float32x4GetW) \
CASE(Int32x4Shuffle) \
CASE(Float32x4Shuffle) \
CASE(Float32x4Splat) \
CASE(Float64x2GetX) \
CASE(Float64x2GetY) \
CASE(Float64x2Splat) \
CASE(Float64x2ToFloat32x4) \
CASE(Float32x4ToFloat64x2) \
____(SimdUnaryOp) \
CASE(Float32x4GetSignMask) \
CASE(Int32x4GetSignMask) \
____(Simd32x4GetSignMask) \
CASE(Float32x4FromDoubles) \
____(Float32x4FromDoubles) \
CASE(Float32x4Zero) \
CASE(Float64x2Zero) \
____(SimdZero) \
CASE(Float32x4Clamp) \
____(Float32x4Clamp) \
CASE(Float64x2Clamp) \
____(Float64x2Clamp) \
CASE(Float32x4WithX) \
CASE(Float32x4WithY) \
CASE(Float32x4WithZ) \
CASE(Float32x4WithW) \
____(Float32x4With) \
CASE(Float32x4ToInt32x4) \
CASE(Int32x4ToFloat32x4) \
____(Simd32x4ToSimd32x4) \
CASE(Float64x2GetSignMask) \
____(Float64x2GetSignMask) \
CASE(Float64x2WithX) \
CASE(Float64x2WithY) \
____(Float64x2With) \
CASE(Int32x4FromInts) \
____(Int32x4FromInts) \
CASE(Int32x4FromBools) \
____(Int32x4FromBools) \
CASE(Int32x4GetFlagX) \
CASE(Int32x4GetFlagY) \
CASE(Int32x4GetFlagZ) \
CASE(Int32x4GetFlagW) \
____(Int32x4GetFlag) \
CASE(Int32x4Select) \
____(Int32x4Select) \
CASE(Int32x4WithFlagX) \
CASE(Int32x4WithFlagY) \
CASE(Int32x4WithFlagZ) \
CASE(Int32x4WithFlagW) \
____(Int32x4WithFlag)
LocationSummary* SimdOpInstr::MakeLocationSummary(Zone* zone, bool opt) const {
switch (kind()) {
#define CASE(Name, ...) case k##Name:
#define EMIT(Name) \
return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name);
SIMD_OP_VARIANTS(CASE, EMIT)
#undef CASE
#undef EMIT
case kIllegalSimdOp:
UNREACHABLE();
break;
}
UNREACHABLE();
return NULL;
}
void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
switch (kind()) {
#define CASE(Name, ...) case k##Name:
#define EMIT(Name) \
InvokeEmitter(compiler, this, &Emit##Name); \
break;
SIMD_OP_VARIANTS(CASE, EMIT)
#undef CASE
#undef EMIT
case kIllegalSimdOp:
UNREACHABLE();
break;
}
}
#undef DEFINE_EMIT
LocationSummary* MathUnaryInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
ASSERT((kind() == MathUnaryInstr::kSqrt) ||
(kind() == MathUnaryInstr::kDoubleSquare));
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresFpuRegister());
summary->set_out(0, Location::RequiresFpuRegister());
return summary;
}
void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (kind() == MathUnaryInstr::kSqrt) {
const FRegister val = locs()->in(0).fpu_reg();
const FRegister result = locs()->out(0).fpu_reg();
__ fsqrtd(result, val);
} else if (kind() == MathUnaryInstr::kDoubleSquare) {
const FRegister val = locs()->in(0).fpu_reg();
const FRegister result = locs()->out(0).fpu_reg();
__ fmuld(result, val, val);
} else {
UNREACHABLE();
}
UNREACHABLE();
}
LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
@ -5249,7 +5052,53 @@ LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const FRegister result = locs()->out(0).fpu_reg();
const FRegister value = locs()->in(0).fpu_reg();
__ fnegd(result, value);
if (representation() == kUnboxedDouble) {
switch (op_kind()) {
case Token::kABS:
__ fabsd(result, value);
break;
case Token::kNEGATE:
__ fnegd(result, value);
break;
case Token::kSQRT:
__ fsqrtd(result, value);
break;
case Token::kSQUARE:
__ fmuld(result, value, value);
break;
default:
UNREACHABLE();
}
} else {
ASSERT(representation() == kUnboxedFloat);
switch (op_kind()) {
case Token::kABS:
__ fabss(result, value);
break;
case Token::kNEGATE:
__ fnegs(result, value);
break;
case Token::kRECIPROCAL:
__ li(TMP, 1);
__ fcvtsw(FTMP, TMP);
__ fdivs(result, FTMP, value);
break;
case Token::kRECIPROCAL_SQRT:
__ li(TMP, 1);
__ fcvtsw(FTMP, TMP);
__ fdivs(result, FTMP, value);
__ fsqrts(result, result);
break;
case Token::kSQRT:
__ fsqrts(result, value);
break;
case Token::kSQUARE:
__ fmuls(result, value, value);
break;
default:
UNREACHABLE();
}
}
}
LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
@ -5397,16 +5246,6 @@ void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ bne(TMP, TMP2, deopt);
}
LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNIMPLEMENTED();
return NULL;
}
void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNIMPLEMENTED();
}
LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -5441,6 +5280,45 @@ void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ fcvtds(result, value);
}
LocationSummary* FloatCompareInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* result = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
result->set_in(0, Location::RequiresFpuRegister());
result->set_in(1, Location::RequiresFpuRegister());
result->set_out(0, Location::RequiresRegister());
return result;
}
void FloatCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const FRegister lhs = locs()->in(0).fpu_reg();
const FRegister rhs = locs()->in(1).fpu_reg();
const Register result = locs()->out(0).reg();
switch (op_kind()) {
case Token::kEQ:
__ feqs(result, lhs, rhs); // lhs op rhs ? 1 : 0
break;
case Token::kLT:
__ flts(result, lhs, rhs);
break;
case Token::kLTE:
__ fles(result, lhs, rhs);
break;
case Token::kGT:
__ fgts(result, lhs, rhs);
break;
case Token::kGTE:
__ fges(result, lhs, rhs);
break;
default:
UNREACHABLE();
}
__ neg(result, result); // lhs op rhs ? -1 : 0
}
LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
ASSERT((InputCount() == 1) || (InputCount() == 2));
@ -5522,6 +5400,119 @@ void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
LocationSummary* UnboxLaneInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
LocationSummary* summary =
new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
switch (representation()) {
case kUnboxedDouble:
case kUnboxedFloat:
summary->set_out(0, Location::RequiresFpuRegister());
break;
case kUnboxedInt32:
summary->set_out(0, Location::RequiresRegister());
break;
default:
UNREACHABLE();
}
return summary;
}
void UnboxLaneInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register in = locs()->in(0).reg();
switch (representation()) {
case kUnboxedDouble:
__ fld(locs()->out(0).fpu_reg(),
compiler::FieldAddress(
in, compiler::target::Float64x2::value_offset() +
lane() * sizeof(double)));
break;
case kUnboxedFloat:
__ flw(locs()->out(0).fpu_reg(),
compiler::FieldAddress(
in, compiler::target::Float32x4::value_offset() +
lane() * sizeof(float)));
break;
case kUnboxedInt32:
__ lw(
locs()->out(0).reg(),
compiler::FieldAddress(in, compiler::target::Int32x4::value_offset() +
lane() * sizeof(int32_t)));
break;
default:
UNREACHABLE();
}
}
LocationSummary* BoxLanesInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = InputCount();
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, 0, LocationSummary::kCallOnSlowPath);
switch (from_representation()) {
case kUnboxedDouble:
summary->set_in(0, Location::RequiresFpuRegister());
summary->set_in(1, Location::RequiresFpuRegister());
break;
case kUnboxedFloat:
summary->set_in(0, Location::RequiresFpuRegister());
summary->set_in(1, Location::RequiresFpuRegister());
summary->set_in(2, Location::RequiresFpuRegister());
summary->set_in(3, Location::RequiresFpuRegister());
break;
case kUnboxedInt32:
summary->set_in(0, Location::RequiresRegister());
summary->set_in(1, Location::RequiresRegister());
summary->set_in(2, Location::RequiresRegister());
summary->set_in(3, Location::RequiresRegister());
break;
default:
UNREACHABLE();
}
summary->set_out(0, Location::RequiresRegister());
return summary;
}
void BoxLanesInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register result = locs()->out(0).reg();
switch (from_representation()) {
case kUnboxedDouble:
BoxAllocationSlowPath::Allocate(compiler, this,
compiler->float64x2_class(), result, TMP);
for (intptr_t i = 0; i < 2; i++) {
__ fsd(locs()->in(i).fpu_reg(),
compiler::FieldAddress(
result, compiler::target::Float64x2::value_offset() +
i * sizeof(double)));
}
break;
case kUnboxedFloat:
BoxAllocationSlowPath::Allocate(compiler, this,
compiler->float32x4_class(), result, TMP);
for (intptr_t i = 0; i < 4; i++) {
__ fsw(locs()->in(i).fpu_reg(),
compiler::FieldAddress(
result, compiler::target::Float32x4::value_offset() +
i * sizeof(float)));
}
break;
case kUnboxedInt32:
BoxAllocationSlowPath::Allocate(compiler, this, compiler->int32x4_class(),
result, TMP);
for (intptr_t i = 0; i < 4; i++) {
__ sw(locs()->in(i).reg(),
compiler::FieldAddress(result,
compiler::target::Int32x4::value_offset() +
i * sizeof(int32_t)));
}
break;
default:
UNREACHABLE();
}
}
LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
@ -7615,6 +7606,36 @@ void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ xori(result, input, compiler::target::ObjectAlignment::kBoolValueMask);
}
LocationSummary* BoolToIntInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
return LocationSummary::Make(zone, 1, Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void BoolToIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register input = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
__ LoadObject(TMP, Bool::True());
__ xor_(TMP, TMP, input);
__ seqz(TMP, TMP);
__ neg(result, TMP);
}
LocationSummary* IntToBoolInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
return LocationSummary::Make(zone, 1, Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void IntToBoolInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register input = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
__ seqz(result, input);
__ slli(result, result, kBoolValueBitPosition);
__ add(result, result, NULL_REG);
__ addi(result, result, kTrueOffsetFromNull);
}
LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;

View file

@ -567,7 +567,12 @@ void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
__ LoadObject(destination.reg(), value_);
}
} else if (destination.IsFpuRegister()) {
__ LoadDImmediate(destination.fpu_reg(), Double::Cast(value_).value());
if (representation() == kUnboxedFloat) {
__ LoadSImmediate(destination.fpu_reg(), Double::Cast(value_).value());
} else {
ASSERT(representation() == kUnboxedDouble);
__ LoadDImmediate(destination.fpu_reg(), Double::Cast(value_).value());
}
} else if (destination.IsDoubleStackSlot()) {
__ LoadDImmediate(FpuTMP, Double::Cast(value_).value());
__ movsd(LocationToStackSlotAddress(destination), FpuTMP);
@ -5043,35 +5048,6 @@ void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
#undef DEFINE_EMIT
LocationSummary* MathUnaryInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
ASSERT((kind() == MathUnaryInstr::kSqrt) ||
(kind() == MathUnaryInstr::kDoubleSquare));
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresFpuRegister());
if (kind() == MathUnaryInstr::kDoubleSquare) {
summary->set_out(0, Location::SameAsFirstInput());
} else {
summary->set_out(0, Location::RequiresFpuRegister());
}
return summary;
}
void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (kind() == MathUnaryInstr::kSqrt) {
__ sqrtsd(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg());
} else if (kind() == MathUnaryInstr::kDoubleSquare) {
XmmRegister value_reg = locs()->in(0).fpu_reg();
__ mulsd(value_reg, value_reg);
ASSERT(value_reg == locs()->out(0).fpu_reg());
} else {
UNREACHABLE();
}
}
LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
Zone* zone,
bool opt) const {
@ -5129,14 +5105,39 @@ LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresFpuRegister());
summary->set_out(0, Location::SameAsFirstInput());
summary->set_out(0, Location::RequiresFpuRegister());
return summary;
}
void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(representation() == kUnboxedDouble);
XmmRegister result = locs()->out(0).fpu_reg();
XmmRegister value = locs()->in(0).fpu_reg();
ASSERT(locs()->out(0).fpu_reg() == value);
__ DoubleNegate(value, value);
switch (op_kind()) {
case Token::kNEGATE:
__ DoubleNegate(result, value);
break;
case Token::kSQRT:
__ sqrtsd(result, value);
break;
case Token::kSQUARE:
if (result != value) {
__ movsd(result, value);
}
__ mulsd(result, value);
break;
case Token::kTRUNCATE:
__ roundsd(result, value, compiler::Assembler::kRoundToZero);
break;
case Token::kFLOOR:
__ roundsd(result, value, compiler::Assembler::kRoundDown);
break;
case Token::kCEILING:
__ roundsd(result, value, compiler::Assembler::kRoundUp);
break;
default:
UNREACHABLE();
}
}
LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
@ -5361,40 +5362,6 @@ void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ SmiTag(result);
}
LocationSummary* DoubleToDoubleInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* result = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
result->set_in(0, Location::RequiresFpuRegister());
result->set_out(0, Location::RequiresFpuRegister());
return result;
}
void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
XmmRegister value = locs()->in(0).fpu_reg();
XmmRegister result = locs()->out(0).fpu_reg();
if (value != result) {
// Clear full register to avoid false dependency due to
// a partial access to XMM register in roundsd instruction.
__ xorps(result, result);
}
switch (recognized_kind()) {
case MethodRecognizer::kDoubleTruncateToDouble:
__ roundsd(result, value, compiler::Assembler::kRoundToZero);
break;
case MethodRecognizer::kDoubleFloorToDouble:
__ roundsd(result, value, compiler::Assembler::kRoundDown);
break;
case MethodRecognizer::kDoubleCeilToDouble:
__ roundsd(result, value, compiler::Assembler::kRoundUp);
break;
default:
UNREACHABLE();
}
}
LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -5425,6 +5392,16 @@ void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ cvtss2sd(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg());
}
LocationSummary* FloatCompareInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void FloatCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// Calling convention on x64 uses XMM0 and XMM1 to pass the first two
@ -5650,6 +5627,26 @@ void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
LocationSummary* UnboxLaneInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void UnboxLaneInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* BoxLanesInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void BoxLanesInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
@ -7012,6 +7009,26 @@ void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::target::ObjectAlignment::kBoolValueMask));
}
LocationSummary* BoolToIntInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void BoolToIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* IntToBoolInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
return NULL;
}
void IntToBoolInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;

View file

@ -78,6 +78,7 @@ DEFINE_FLAG(int,
"Max. number of inlined calls per depth");
DEFINE_FLAG(bool, print_inlining_tree, false, "Print inlining tree");
DECLARE_FLAG(bool, enable_simd_inline);
DECLARE_FLAG(int, max_deoptimization_counter_threshold);
DECLARE_FLAG(bool, print_flow_graph);
DECLARE_FLAG(bool, print_flow_graph_optimized);
@ -807,7 +808,8 @@ static void ReplaceParameterStubs(Zone* zone,
for (intptr_t i = 0; i < defns->length(); ++i) {
ConstantInstr* constant = (*defns)[i]->AsConstant();
if (constant != nullptr && constant->HasUses()) {
constant->ReplaceUsesWith(caller_graph->GetConstant(constant->value()));
constant->ReplaceUsesWith(caller_graph->GetConstant(
constant->value(), constant->representation()));
}
}
@ -815,7 +817,8 @@ static void ReplaceParameterStubs(Zone* zone,
for (intptr_t i = 0; i < defns->length(); ++i) {
ConstantInstr* constant = (*defns)[i]->AsConstant();
if (constant != nullptr && constant->HasUses()) {
constant->ReplaceUsesWith(caller_graph->GetConstant(constant->value()));
constant->ReplaceUsesWith(caller_graph->GetConstant(
constant->value(), constant->representation()));
}
SpecialParameterInstr* param = (*defns)[i]->AsSpecialParameter();
@ -3537,6 +3540,585 @@ static bool CheckMask(Definition* definition, intptr_t* mask_ptr) {
return true;
}
class SimdLowering : public ValueObject {
public:
SimdLowering(FlowGraph* flow_graph,
Instruction* call,
GraphEntryInstr* graph_entry,
FunctionEntryInstr** entry,
Instruction** last,
Definition** result)
: flow_graph_(flow_graph),
call_(call),
graph_entry_(graph_entry),
entry_(entry),
last_(last),
result_(result) {
*entry_ = new (zone())
FunctionEntryInstr(graph_entry_, flow_graph_->allocate_block_id(),
call_->GetBlock()->try_index(), call_->deopt_id());
*last = *entry_;
}
bool TryInline(MethodRecognizer::Kind kind) {
switch (kind) {
// ==== Int32x4 ====
case MethodRecognizer::kInt32x4FromInts:
UnboxScalar(0, kUnboxedInt32, 4);
UnboxScalar(1, kUnboxedInt32, 4);
UnboxScalar(2, kUnboxedInt32, 4);
UnboxScalar(3, kUnboxedInt32, 4);
Gather(4);
BoxVector(kUnboxedInt32, 4);
return true;
case MethodRecognizer::kInt32x4FromBools:
UnboxBool(0, 4);
UnboxBool(1, 4);
UnboxBool(2, 4);
UnboxBool(3, 4);
Gather(4);
BoxVector(kUnboxedInt32, 4);
return true;
case MethodRecognizer::kInt32x4GetFlagX:
UnboxVector(0, kUnboxedInt32, kMintCid, 4);
IntToBool();
Return(0);
return true;
case MethodRecognizer::kInt32x4GetFlagY:
UnboxVector(0, kUnboxedInt32, kMintCid, 4);
IntToBool();
Return(1);
return true;
case MethodRecognizer::kInt32x4GetFlagZ:
UnboxVector(0, kUnboxedInt32, kMintCid, 4);
IntToBool();
Return(2);
return true;
case MethodRecognizer::kInt32x4GetFlagW:
UnboxVector(0, kUnboxedInt32, kMintCid, 4);
IntToBool();
Return(3);
return true;
case MethodRecognizer::kInt32x4WithFlagX:
UnboxVector(0, kUnboxedInt32, kMintCid, 4);
UnboxBool(1, 4);
With(0);
BoxVector(kUnboxedInt32, 4);
return true;
case MethodRecognizer::kInt32x4WithFlagY:
UnboxVector(0, kUnboxedInt32, kMintCid, 4);
UnboxBool(1, 4);
With(1);
BoxVector(kUnboxedInt32, 4);
return true;
case MethodRecognizer::kInt32x4WithFlagZ:
UnboxVector(0, kUnboxedInt32, kMintCid, 4);
UnboxBool(1, 4);
With(2);
BoxVector(kUnboxedInt32, 4);
return true;
case MethodRecognizer::kInt32x4WithFlagW:
UnboxVector(0, kUnboxedInt32, kMintCid, 4);
UnboxBool(1, 4);
With(3);
BoxVector(kUnboxedInt32, 4);
return true;
case MethodRecognizer::kInt32x4Shuffle: {
Definition* mask_definition =
call_->ArgumentAt(call_->ArgumentCount() - 1);
intptr_t mask = 0;
if (!CheckMask(mask_definition, &mask)) {
return false;
}
UnboxVector(0, kUnboxedInt32, kMintCid, 4);
Shuffle(mask);
BoxVector(kUnboxedInt32, 4);
return true;
}
case MethodRecognizer::kInt32x4ShuffleMix: {
Definition* mask_definition =
call_->ArgumentAt(call_->ArgumentCount() - 1);
intptr_t mask = 0;
if (!CheckMask(mask_definition, &mask)) {
return false;
}
UnboxVector(0, kUnboxedInt32, kMintCid, 4);
UnboxVector(1, kUnboxedInt32, kMintCid, 4);
ShuffleMix(mask);
BoxVector(kUnboxedInt32, 4);
return true;
}
case MethodRecognizer::kInt32x4GetSignMask:
case MethodRecognizer::kInt32x4Select:
// TODO(riscv)
return false;
// ==== Float32x4 ====
case MethodRecognizer::kFloat32x4Abs:
Float32x4Unary(Token::kABS);
return true;
case MethodRecognizer::kFloat32x4Negate:
Float32x4Unary(Token::kNEGATE);
return true;
case MethodRecognizer::kFloat32x4Sqrt:
Float32x4Unary(Token::kSQRT);
return true;
case MethodRecognizer::kFloat32x4Reciprocal:
Float32x4Unary(Token::kRECIPROCAL);
return true;
case MethodRecognizer::kFloat32x4ReciprocalSqrt:
Float32x4Unary(Token::kRECIPROCAL_SQRT);
return true;
case MethodRecognizer::kFloat32x4GetSignMask:
// TODO(riscv)
return false;
case MethodRecognizer::kFloat32x4Equal:
Float32x4Compare(Token::kEQ);
return true;
case MethodRecognizer::kFloat32x4GreaterThan:
Float32x4Compare(Token::kGT);
return true;
case MethodRecognizer::kFloat32x4GreaterThanOrEqual:
Float32x4Compare(Token::kGTE);
return true;
case MethodRecognizer::kFloat32x4LessThan:
Float32x4Compare(Token::kLT);
return true;
case MethodRecognizer::kFloat32x4LessThanOrEqual:
Float32x4Compare(Token::kLTE);
return true;
case MethodRecognizer::kFloat32x4Add:
Float32x4Binary(Token::kADD);
return true;
case MethodRecognizer::kFloat32x4Sub:
Float32x4Binary(Token::kSUB);
return true;
case MethodRecognizer::kFloat32x4Mul:
Float32x4Binary(Token::kMUL);
return true;
case MethodRecognizer::kFloat32x4Div:
Float32x4Binary(Token::kDIV);
return true;
case MethodRecognizer::kFloat32x4Min:
Float32x4Binary(Token::kMIN);
return true;
case MethodRecognizer::kFloat32x4Max:
Float32x4Binary(Token::kMAX);
return true;
case MethodRecognizer::kFloat32x4Scale:
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4);
UnboxScalar(1, kUnboxedFloat, 4);
BinaryDoubleOp(Token::kMUL, kUnboxedFloat, 4);
BoxVector(kUnboxedFloat, 4);
return true;
case MethodRecognizer::kFloat32x4Splat:
UnboxScalar(0, kUnboxedFloat, 4);
Splat(4);
BoxVector(kUnboxedFloat, 4);
return true;
case MethodRecognizer::kFloat32x4WithX:
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4);
UnboxScalar(1, kUnboxedFloat, 4);
With(0);
BoxVector(kUnboxedFloat, 4);
return true;
case MethodRecognizer::kFloat32x4WithY:
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4);
UnboxScalar(1, kUnboxedFloat, 4);
With(1);
BoxVector(kUnboxedFloat, 4);
return true;
case MethodRecognizer::kFloat32x4WithZ:
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4);
UnboxScalar(1, kUnboxedFloat, 4);
With(2);
BoxVector(kUnboxedFloat, 4);
return true;
case MethodRecognizer::kFloat32x4WithW:
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4);
UnboxScalar(1, kUnboxedFloat, 4);
With(3);
BoxVector(kUnboxedFloat, 4);
return true;
case MethodRecognizer::kFloat32x4Zero:
UnboxDoubleZero(kUnboxedFloat, 4);
BoxVector(kUnboxedFloat, 4);
return true;
case MethodRecognizer::kFloat32x4FromDoubles:
UnboxScalar(0, kUnboxedFloat, 4);
UnboxScalar(1, kUnboxedFloat, 4);
UnboxScalar(2, kUnboxedFloat, 4);
UnboxScalar(3, kUnboxedFloat, 4);
Gather(4);
BoxVector(kUnboxedFloat, 4);
return true;
case MethodRecognizer::kFloat32x4GetX:
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4);
BoxScalar(0, kUnboxedFloat);
return true;
case MethodRecognizer::kFloat32x4GetY:
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4);
BoxScalar(1, kUnboxedFloat);
return true;
case MethodRecognizer::kFloat32x4GetZ:
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4);
BoxScalar(2, kUnboxedFloat);
return true;
case MethodRecognizer::kFloat32x4GetW:
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4);
BoxScalar(3, kUnboxedFloat);
return true;
case MethodRecognizer::kFloat32x4Shuffle: {
Definition* mask_definition =
call_->ArgumentAt(call_->ArgumentCount() - 1);
intptr_t mask = 0;
if (!CheckMask(mask_definition, &mask)) {
return false;
}
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4);
Shuffle(mask);
BoxVector(kUnboxedFloat, 4);
return true;
}
case MethodRecognizer::kFloat32x4ShuffleMix: {
Definition* mask_definition =
call_->ArgumentAt(call_->ArgumentCount() - 1);
intptr_t mask = 0;
if (!CheckMask(mask_definition, &mask)) {
return false;
}
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4);
UnboxVector(1, kUnboxedFloat, kDoubleCid, 4);
ShuffleMix(mask);
BoxVector(kUnboxedFloat, 4);
return true;
}
// ==== Float64x2 ====
case MethodRecognizer::kFloat64x2Abs:
Float64x2Unary(Token::kABS);
return true;
case MethodRecognizer::kFloat64x2Negate:
Float64x2Unary(Token::kNEGATE);
return true;
case MethodRecognizer::kFloat64x2Sqrt:
Float64x2Unary(Token::kSQRT);
return true;
case MethodRecognizer::kFloat64x2Add:
Float64x2Binary(Token::kADD);
return true;
case MethodRecognizer::kFloat64x2Sub:
Float64x2Binary(Token::kSUB);
return true;
case MethodRecognizer::kFloat64x2Mul:
Float64x2Binary(Token::kMUL);
return true;
case MethodRecognizer::kFloat64x2Div:
Float64x2Binary(Token::kDIV);
return true;
case MethodRecognizer::kFloat64x2Min:
Float64x2Binary(Token::kMIN);
return true;
case MethodRecognizer::kFloat64x2Max:
Float64x2Binary(Token::kMAX);
return true;
case MethodRecognizer::kFloat64x2Scale:
UnboxVector(0, kUnboxedDouble, kDoubleCid, 2);
UnboxScalar(1, kUnboxedDouble, 2);
BinaryDoubleOp(Token::kMUL, kUnboxedDouble, 2);
BoxVector(kUnboxedDouble, 2);
return true;
case MethodRecognizer::kFloat64x2Splat:
UnboxScalar(0, kUnboxedDouble, 2);
Splat(2);
BoxVector(kUnboxedDouble, 2);
return true;
case MethodRecognizer::kFloat64x2WithX:
UnboxVector(0, kUnboxedDouble, kDoubleCid, 2);
UnboxScalar(1, kUnboxedDouble, 2);
With(0);
BoxVector(kUnboxedDouble, 2);
return true;
case MethodRecognizer::kFloat64x2WithY:
UnboxVector(0, kUnboxedDouble, kDoubleCid, 2);
UnboxScalar(1, kUnboxedDouble, 2);
With(1);
BoxVector(kUnboxedDouble, 2);
return true;
case MethodRecognizer::kFloat64x2Zero:
UnboxDoubleZero(kUnboxedDouble, 2);
BoxVector(kUnboxedDouble, 2);
return true;
case MethodRecognizer::kFloat64x2FromDoubles:
UnboxScalar(0, kUnboxedDouble, 2);
UnboxScalar(1, kUnboxedDouble, 2);
Gather(2);
BoxVector(kUnboxedDouble, 2);
return true;
case MethodRecognizer::kFloat64x2GetX:
UnboxVector(0, kUnboxedDouble, kDoubleCid, 2);
BoxScalar(0, kUnboxedDouble);
return true;
case MethodRecognizer::kFloat64x2GetY:
UnboxVector(0, kUnboxedDouble, kDoubleCid, 2);
BoxScalar(1, kUnboxedDouble);
return true;
// Mixed
case MethodRecognizer::kFloat32x4ToFloat64x2: {
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4, 1);
Float32x4ToFloat64x2();
BoxVector(kUnboxedDouble, 2);
return true;
}
case MethodRecognizer::kFloat64x2ToFloat32x4: {
UnboxVector(0, kUnboxedDouble, kDoubleCid, 2, 1);
Float64x2ToFloat32x4();
BoxVector(kUnboxedFloat, 4);
return true;
}
case MethodRecognizer::kInt32x4ToFloat32x4:
UnboxVector(0, kUnboxedInt32, kMintCid, 4, 1);
Int32x4ToFloat32x4();
BoxVector(kUnboxedFloat, 4);
return true;
case MethodRecognizer::kFloat32x4ToInt32x4:
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4, 1);
Float32x4ToInt32x4();
BoxVector(kUnboxedInt32, 4);
return true;
default:
return false;
}
}
private:
void Float32x4Unary(Token::Kind op) {
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4);
UnaryDoubleOp(op, kUnboxedFloat, 4);
BoxVector(kUnboxedFloat, 4);
}
void Float32x4Binary(Token::Kind op) {
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4);
UnboxVector(1, kUnboxedFloat, kDoubleCid, 4);
BinaryDoubleOp(op, kUnboxedFloat, 4);
BoxVector(kUnboxedFloat, 4);
}
void Float32x4Compare(Token::Kind op) {
UnboxVector(0, kUnboxedFloat, kDoubleCid, 4);
UnboxVector(1, kUnboxedFloat, kDoubleCid, 4);
FloatCompare(op);
BoxVector(kUnboxedInt32, 4);
}
void Float64x2Unary(Token::Kind op) {
UnboxVector(0, kUnboxedDouble, kDoubleCid, 2);
UnaryDoubleOp(op, kUnboxedDouble, 2);
BoxVector(kUnboxedDouble, 2);
}
void Float64x2Binary(Token::Kind op) {
UnboxVector(0, kUnboxedDouble, kDoubleCid, 2);
UnboxVector(1, kUnboxedDouble, kDoubleCid, 2);
BinaryDoubleOp(op, kUnboxedDouble, 2);
BoxVector(kUnboxedDouble, 2);
}
void UnboxVector(intptr_t i,
Representation rep,
intptr_t cid,
intptr_t n,
intptr_t type_args = 0) {
Definition* arg = call_->ArgumentAt(i + type_args);
if (CompilerState::Current().is_aot()) {
// Add null-checks in case of the arguments are known to be compatible
// but they are possibly nullable.
// By inserting the null-check, we can allow the unbox instruction later
// inserted to be non-speculative.
arg = AddDefinition(new (zone()) CheckNullInstr(
new (zone()) Value(arg), Symbols::SecondArg(), call_->deopt_id(),
call_->source(), CheckNullInstr::kArgumentError));
}
for (intptr_t lane = 0; lane < n; lane++) {
in_[i][lane] = AddDefinition(
new (zone()) UnboxLaneInstr(new (zone()) Value(arg), lane, rep, cid));
}
}
void UnboxScalar(intptr_t i,
Representation rep,
intptr_t n,
intptr_t type_args = 0) {
Definition* arg = call_->ArgumentAt(i + type_args);
if (CompilerState::Current().is_aot()) {
// Add null-checks in case of the arguments are known to be compatible
// but they are possibly nullable.
// By inserting the null-check, we can allow the unbox instruction later
// inserted to be non-speculative.
arg = AddDefinition(new (zone()) CheckNullInstr(
new (zone()) Value(arg), Symbols::SecondArg(), call_->deopt_id(),
call_->source(), CheckNullInstr::kArgumentError));
}
Definition* unbox = AddDefinition(
UnboxInstr::Create(rep, new (zone()) Value(arg), DeoptId::kNone,
Instruction::kNotSpeculative));
for (intptr_t lane = 0; lane < n; lane++) {
in_[i][lane] = unbox;
}
}
void UnboxBool(intptr_t i, intptr_t n) {
Definition* unbox = AddDefinition(new (zone()) BoolToIntInstr(
call_->ArgumentValueAt(i)->CopyWithType(zone())));
for (intptr_t lane = 0; lane < n; lane++) {
in_[i][lane] = unbox;
}
}
void UnboxDoubleZero(Representation rep, intptr_t n) {
Definition* zero = flow_graph_->GetConstant(
Double::ZoneHandle(Double::NewCanonical(0.0)), rep);
for (intptr_t lane = 0; lane < n; lane++) {
op_[lane] = zero;
}
}
void UnaryDoubleOp(Token::Kind op, Representation rep, intptr_t n) {
for (intptr_t lane = 0; lane < n; lane++) {
op_[lane] = AddDefinition(new (zone()) UnaryDoubleOpInstr(
op, new (zone()) Value(in_[0][lane]), call_->deopt_id(),
Instruction::kNotSpeculative, rep));
}
}
void BinaryDoubleOp(Token::Kind op, Representation rep, intptr_t n) {
for (intptr_t lane = 0; lane < n; lane++) {
op_[lane] = AddDefinition(new (zone()) BinaryDoubleOpInstr(
op, new (zone()) Value(in_[0][lane]),
new (zone()) Value(in_[1][lane]), call_->deopt_id(), call_->source(),
Instruction::kNotSpeculative, rep));
}
}
void FloatCompare(Token::Kind op) {
for (intptr_t lane = 0; lane < 4; lane++) {
op_[lane] = AddDefinition(
new (zone()) FloatCompareInstr(op, new (zone()) Value(in_[0][lane]),
new (zone()) Value(in_[1][lane])));
}
}
void With(intptr_t i) {
for (intptr_t lane = 0; lane < 4; lane++) {
op_[lane] = in_[0][lane];
}
op_[i] = in_[1][0];
}
void Splat(intptr_t n) {
for (intptr_t lane = 0; lane < n; lane++) {
op_[lane] = in_[0][0];
}
}
void Gather(intptr_t n) {
for (intptr_t lane = 0; lane < n; lane++) {
op_[lane] = in_[lane][0];
}
}
void Shuffle(intptr_t mask) {
op_[0] = in_[0][(mask >> 0) & 3];
op_[1] = in_[0][(mask >> 2) & 3];
op_[2] = in_[0][(mask >> 4) & 3];
op_[3] = in_[0][(mask >> 6) & 3];
}
void ShuffleMix(intptr_t mask) {
op_[0] = in_[0][(mask >> 0) & 3];
op_[1] = in_[0][(mask >> 2) & 3];
op_[2] = in_[1][(mask >> 4) & 3];
op_[3] = in_[1][(mask >> 6) & 3];
}
void Float32x4ToFloat64x2() {
for (intptr_t lane = 0; lane < 2; lane++) {
op_[lane] = AddDefinition(new (zone()) FloatToDoubleInstr(
new (zone()) Value(in_[0][lane]), DeoptId::kNone));
}
}
void Float64x2ToFloat32x4() {
for (intptr_t lane = 0; lane < 2; lane++) {
op_[lane] = AddDefinition(new (zone()) DoubleToFloatInstr(
new (zone()) Value(in_[0][lane]), DeoptId::kNone));
}
Definition* zero = flow_graph_->GetConstant(
Double::ZoneHandle(Double::NewCanonical(0.0)), kUnboxedFloat);
op_[2] = zero;
op_[3] = zero;
}
void Int32x4ToFloat32x4() {
for (intptr_t lane = 0; lane < 4; lane++) {
op_[lane] = AddDefinition(new (zone()) BitCastInstr(
kUnboxedInt32, kUnboxedFloat, new (zone()) Value(in_[0][lane])));
}
}
void Float32x4ToInt32x4() {
for (intptr_t lane = 0; lane < 4; lane++) {
op_[lane] = AddDefinition(new (zone()) BitCastInstr(
kUnboxedFloat, kUnboxedInt32, new (zone()) Value(in_[0][lane])));
}
}
void IntToBool() {
for (intptr_t lane = 0; lane < 4; lane++) {
op_[lane] = AddDefinition(
new (zone()) IntToBoolInstr(new (zone()) Value(in_[0][lane])));
}
}
void BoxVector(Representation rep, intptr_t n) {
Definition* box;
if (n == 2) {
box = new (zone()) BoxLanesInstr(rep, new (zone()) Value(op_[0]),
new (zone()) Value(op_[1]));
} else {
ASSERT(n == 4);
box = new (zone()) BoxLanesInstr(
rep, new (zone()) Value(op_[0]), new (zone()) Value(op_[1]),
new (zone()) Value(op_[2]), new (zone()) Value(op_[3]));
}
Done(AddDefinition(box));
}
void BoxScalar(intptr_t lane, Representation rep) {
Definition* box = BoxInstr::Create(rep, new (zone()) Value(in_[0][lane]));
Done(AddDefinition(box));
}
void Return(intptr_t lane) { Done(op_[lane]); }
void Done(Definition* result) {
// InheritDeoptTarget also inherits environment (which may add 'entry' into
// env_use_list()), so InheritDeoptTarget should be done only after decided
// to inline.
(*entry_)->InheritDeoptTarget(zone(), call_);
*result_ = result;
}
Definition* AddDefinition(Definition* def) {
*last_ = flow_graph_->AppendTo(
*last_, def, call_->deopt_id() != DeoptId::kNone ? call_->env() : NULL,
FlowGraph::kValue);
return def;
}
Zone* zone() { return flow_graph_->zone(); }
FlowGraph* flow_graph_;
Instruction* call_;
GraphEntryInstr* graph_entry_;
FunctionEntryInstr** entry_;
Instruction** last_;
Definition** result_;
// First index is the argment number, second index is the lane number.
Definition* in_[4][4];
// Index is the lane number.
Definition* op_[4];
};
static bool InlineSimdOp(FlowGraph* flow_graph,
bool is_dynamic_call,
Instruction* call,
@ -3546,9 +4128,6 @@ static bool InlineSimdOp(FlowGraph* flow_graph,
FunctionEntryInstr** entry,
Instruction** last,
Definition** result) {
if (!ShouldInlineSimd()) {
return false;
}
if (is_dynamic_call && call->ArgumentCount() > 1) {
// Issue(dartbug.com/37737): Dynamic invocation forwarders have the
// same recognized kind as the method they are forwarding to.
@ -3563,6 +4142,19 @@ static bool InlineSimdOp(FlowGraph* flow_graph,
return false;
}
if (!FLAG_enable_simd_inline) {
return false;
}
if (!FlowGraphCompiler::SupportsUnboxedSimd128()) {
#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
SimdLowering lowering(flow_graph, call, graph_entry, entry, last, result);
return lowering.TryInline(kind);
#else
UNREACHABLE();
#endif
}
*entry =
new (Z) FunctionEntryInstr(graph_entry, flow_graph->allocate_block_id(),
call->GetBlock()->try_index(), DeoptId::kNone);

View file

@ -1323,6 +1323,14 @@ CompileType BooleanNegateInstr::ComputeType() const {
return CompileType::Bool();
}
CompileType BoolToIntInstr::ComputeType() const {
return CompileType::Int();
}
CompileType IntToBoolInstr::ComputeType() const {
return CompileType::Bool();
}
CompileType InstanceOfInstr::ComputeType() const {
return CompileType::Bool();
}
@ -1754,10 +1762,6 @@ CompileType SimdOpInstr::ComputeType() const {
return CompileType::FromCid(simd_op_result_cids[kind()]);
}
CompileType MathUnaryInstr::ComputeType() const {
return CompileType::FromCid(kDoubleCid);
}
CompileType MathMinMaxInstr::ComputeType() const {
return CompileType::FromCid(result_cid_);
}
@ -1811,6 +1815,20 @@ CompileType BoxInstr::ComputeType() const {
}
}
CompileType BoxLanesInstr::ComputeType() const {
switch (from_representation()) {
case kUnboxedFloat:
return CompileType::FromCid(kFloat32x4Cid);
case kUnboxedDouble:
return CompileType::FromCid(kFloat64x2Cid);
case kUnboxedInt32:
return CompileType::FromCid(kInt32x4Cid);
default:
UNREACHABLE();
return CompileType::Dynamic();
}
}
CompileType Int32ToDoubleInstr::ComputeType() const {
return CompileType::FromCid(kDoubleCid);
}
@ -1823,12 +1841,12 @@ CompileType Int64ToDoubleInstr::ComputeType() const {
return CompileType::FromCid(kDoubleCid);
}
CompileType DoubleToDoubleInstr::ComputeType() const {
CompileType FloatToDoubleInstr::ComputeType() const {
return CompileType::FromCid(kDoubleCid);
}
CompileType FloatToDoubleInstr::ComputeType() const {
return CompileType::FromCid(kDoubleCid);
CompileType FloatCompareInstr::ComputeType() const {
return CompileType::Int();
}
CompileType DoubleToFloatInstr::ComputeType() const {
@ -1848,6 +1866,10 @@ CompileType ExtractNthOutputInstr::ComputeType() const {
return CompileType::FromCid(definition_cid_);
}
CompileType UnboxLaneInstr::ComputeType() const {
return CompileType::FromCid(definition_cid_);
}
static AbstractTypePtr ExtractElementTypeFromArrayType(
const AbstractType& array_type) {
if (array_type.IsTypeParameter()) {

View file

@ -1244,15 +1244,6 @@ Fragment BaseFlowGraphBuilder::InvokeMathCFunction(
return Fragment(instr);
}
Fragment BaseFlowGraphBuilder::DoubleToDouble(
MethodRecognizer::Kind recognized_kind) {
Value* value = Pop();
auto* instr =
new (Z) DoubleToDoubleInstr(value, recognized_kind, GetNextDeoptId());
Push(instr);
return Fragment(instr);
}
Fragment BaseFlowGraphBuilder::DoubleToInteger(
MethodRecognizer::Kind recognized_kind) {
Value* value = Pop();
@ -1262,9 +1253,10 @@ Fragment BaseFlowGraphBuilder::DoubleToInteger(
return Fragment(instr);
}
Fragment BaseFlowGraphBuilder::MathUnary(MathUnaryInstr::MathUnaryKind kind) {
Fragment BaseFlowGraphBuilder::UnaryDoubleOp(Token::Kind op) {
Value* value = Pop();
auto* instr = new (Z) MathUnaryInstr(kind, value, GetNextDeoptId());
auto* instr = new (Z) UnaryDoubleOpInstr(op, value, GetNextDeoptId(),
Instruction::kNotSpeculative);
Push(instr);
return Fragment(instr);
}

View file

@ -455,18 +455,13 @@ class BaseFlowGraphBuilder {
Fragment InvokeMathCFunction(MethodRecognizer::Kind recognized_kind,
intptr_t num_inputs);
// Pops double value and converts it to double as specified
// by the recognized method (kDoubleTruncateToDouble,
// kDoubleFloorToDouble or kDoubleCeilToDouble).
Fragment DoubleToDouble(MethodRecognizer::Kind recognized_kind);
// Pops double value and converts it to int as specified
// by the recognized method (kDoubleToInteger,
// kDoubleFloorToInt or kDoubleCeilToInt).
Fragment DoubleToInteger(MethodRecognizer::Kind recognized_kind);
// Pops double value and applies unary math operation.
Fragment MathUnary(MathUnaryInstr::MathUnaryKind kind);
Fragment UnaryDoubleOp(Token::Kind op);
// Records coverage for this position, if the current VM mode supports it.
Fragment RecordCoverage(TokenPosition position);

View file

@ -1634,14 +1634,26 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
((kind == MethodRecognizer::kDoubleTruncateToDouble) ||
(kind == MethodRecognizer::kDoubleFloorToDouble) ||
(kind == MethodRecognizer::kDoubleCeilToDouble))) {
body += DoubleToDouble(kind);
switch (kind) {
case MethodRecognizer::kDoubleTruncateToDouble:
body += UnaryDoubleOp(Token::kTRUNCATE);
break;
case MethodRecognizer::kDoubleFloorToDouble:
body += UnaryDoubleOp(Token::kFLOOR);
break;
case MethodRecognizer::kDoubleCeilToDouble:
body += UnaryDoubleOp(Token::kCEILING);
break;
default:
UNREACHABLE();
}
} else {
body += InvokeMathCFunction(kind, function.NumParameters());
}
} break;
case MethodRecognizer::kMathSqrt: {
body += LoadLocal(parsed_function_->RawParameterVariable(0));
body += MathUnary(MathUnaryInstr::kSqrt);
body += UnaryDoubleOp(Token::kSQRT);
} break;
case MethodRecognizer::kFinalizerBase_setIsolate:
ASSERT_EQUAL(function.NumParameters(), 1);

View file

@ -395,15 +395,9 @@ void DeferredObject::Fill() {
static_cast<uint64_t>(Integer::Cast(value).AsInt64Value()));
break;
case kTypedDataFloat32ArrayCid:
// Although element of Float32 array is represented with Double,
// it is already converted to 32-bit float via DoubleToFloat
// instruction before it was stored.
// Reinterpret double value as float to get the value back.
typed_data.SetFloat32(
element_offset,
bit_cast<float, uint32_t>(
static_cast<uint32_t>(bit_cast<uint64_t, double>(
Double::Cast(value).value()))));
static_cast<float>(Double::Cast(value).value()));
break;
case kTypedDataFloat64ArrayCid:
typed_data.SetFloat64(element_offset,

View file

@ -232,6 +232,7 @@ static bool IsObjectInstruction(DeoptInstr::Kind kind) {
case DeoptInstr::kInt32x4:
case DeoptInstr::kFloat64x2:
case DeoptInstr::kWord:
case DeoptInstr::kFloat:
case DeoptInstr::kDouble:
case DeoptInstr::kMint:
case DeoptInstr::kMintPair:
@ -668,6 +669,12 @@ class DeoptFpuInstr : public DeoptInstr {
DISALLOW_COPY_AND_ASSIGN(DeoptFpuInstr);
};
typedef DeoptFpuInstr<DeoptInstr::kFloat,
CatchEntryMove::SourceKind::kFloatSlot,
float,
DoublePtr>
DeoptFloatInstr;
typedef DeoptFpuInstr<DeoptInstr::kDouble,
CatchEntryMove::SourceKind::kDoubleSlot,
double,
@ -902,6 +909,8 @@ DeoptInstr* DeoptInstr::Create(intptr_t kind_as_int, intptr_t source_index) {
switch (kind) {
case kWord:
return new DeoptWordInstr(source_index);
case kFloat:
return new DeoptFloatInstr(source_index);
case kDouble:
return new DeoptDoubleInstr(source_index);
case kMint:
@ -945,6 +954,8 @@ const char* DeoptInstr::KindToCString(Kind kind) {
switch (kind) {
case kWord:
return "word";
case kFloat:
return "float";
case kDouble:
return "double";
case kMint:
@ -1127,6 +1138,9 @@ void DeoptInfoBuilder::AddCopy(Value* value,
new (zone()) DeoptUint32Instr(ToCpuRegisterSource(source_loc));
break;
case kUnboxedFloat:
deopt_instr = new (zone()) DeoptFloatInstr(
ToFpuRegisterSource(source_loc, Location::kDoubleStackSlot));
break;
case kUnboxedDouble:
deopt_instr = new (zone()) DeoptDoubleInstr(
ToFpuRegisterSource(source_loc, Location::kDoubleStackSlot));

View file

@ -82,7 +82,15 @@ class DeoptContext : public MallocAllocated {
return cpu_registers_[reg];
}
double FpuRegisterValue(FpuRegister reg) const {
float FpuRegisterValueAsFloat(FpuRegister reg) const {
ASSERT(FlowGraphCompiler::SupportsUnboxedDoubles());
ASSERT(fpu_registers_ != NULL);
ASSERT(reg >= 0);
ASSERT(reg < kNumberOfFpuRegisters);
return *reinterpret_cast<float*>(&fpu_registers_[reg]);
}
double FpuRegisterValueAsDouble(FpuRegister reg) const {
ASSERT(FlowGraphCompiler::SupportsUnboxedDoubles());
ASSERT(fpu_registers_ != NULL);
ASSERT(reg >= 0);
@ -159,6 +167,11 @@ class DeoptContext : public MallocAllocated {
idx, reinterpret_cast<ObjectPtr*>(slot), deferred_slots_);
}
void DeferMaterialization(float value, DoublePtr* slot) {
deferred_slots_ = new DeferredDouble(
value, reinterpret_cast<ObjectPtr*>(slot), deferred_slots_);
}
void DeferMaterialization(double value, DoublePtr* slot) {
deferred_slots_ = new DeferredDouble(
value, reinterpret_cast<ObjectPtr*>(slot), deferred_slots_);
@ -267,6 +280,7 @@ class DeoptInstr : public ZoneAllocated {
kRetAddress,
kConstant,
kWord,
kFloat,
kDouble,
kFloat32x4,
kFloat64x2,
@ -357,10 +371,17 @@ struct RegisterReader<Register, T> {
}
};
template <>
struct RegisterReader<FpuRegister, float> {
static double Read(DeoptContext* context, FpuRegister reg) {
return context->FpuRegisterValueAsFloat(reg);
}
};
template <>
struct RegisterReader<FpuRegister, double> {
static double Read(DeoptContext* context, FpuRegister reg) {
return context->FpuRegisterValue(reg);
return context->FpuRegisterValueAsDouble(reg);
}
};

View file

@ -237,6 +237,10 @@ class ExceptionHandlerFinder : public StackResource {
value = *TaggedSlotAt(fp, move.src_slot());
break;
case CatchEntryMove::SourceKind::kFloatSlot:
value = Double::New(*SlotAt<float>(fp, move.src_slot()));
break;
case CatchEntryMove::SourceKind::kDoubleSlot:
value = Double::New(*SlotAt<double>(fp, move.src_slot()));
break;
@ -369,6 +373,11 @@ const char* CatchEntryMove::ToCString() const {
Utils::SNPrint(from, ARRAY_SIZE(from), "fp[%" Pd "]", src_slot());
break;
case SourceKind::kFloatSlot:
Utils::SNPrint(from, ARRAY_SIZE(from), "f32 [fp + %" Pd "]",
src_slot() * compiler::target::kWordSize);
break;
case SourceKind::kDoubleSlot:
Utils::SNPrint(from, ARRAY_SIZE(from), "f64 [fp + %" Pd "]",
src_slot() * compiler::target::kWordSize);

View file

@ -147,6 +147,7 @@ class CatchEntryMove {
enum class SourceKind {
kConstant,
kTaggedSlot,
kFloatSlot,
kDoubleSlot,
kFloat32x4Slot,
kFloat64x2Slot,

View file

@ -56,6 +56,8 @@ bool Token::IsBinaryArithmeticOperator(Token::Kind token) {
case Token::kSHL:
case Token::kSHR:
case Token::kUSHR:
case Token::kMAX:
case Token::kMIN:
return true;
default:
return false;
@ -63,7 +65,21 @@ bool Token::IsBinaryArithmeticOperator(Token::Kind token) {
}
bool Token::IsUnaryArithmeticOperator(Token::Kind token) {
return (token == kBIT_NOT) || (token == kNEGATE);
switch (token) {
case Token::kBIT_NOT:
case Token::kNEGATE:
case Token::kABS:
case Token::kSQRT:
case Token::kSQUARE:
case Token::kRECIPROCAL:
case Token::kRECIPROCAL_SQRT:
case Token::kTRUNCATE:
case Token::kFLOOR:
case Token::kCEILING:
return true;
default:
return false;
}
}
bool Token::IsBinaryBitwiseOperator(Token::Kind token) {

View file

@ -140,7 +140,17 @@ namespace dart {
TOK(kSCRIPTTAG, "#!", 0, kNoAttribute) \
\
/* Support for optimized code */ \
TOK(kREM, "", 0, kNoAttribute)
TOK(kREM, "rem", 0, kNoAttribute) \
TOK(kABS, "abs", 0, kNoAttribute) \
TOK(kSQRT, "sqrt", 0, kNoAttribute) \
TOK(kMIN, "min", 0, kNoAttribute) \
TOK(kMAX, "max", 0, kNoAttribute) \
TOK(kRECIPROCAL, "reciprocal", 0, kNoAttribute) \
TOK(kRECIPROCAL_SQRT, "reciprocal-sqrt", 0, kNoAttribute) \
TOK(kSQUARE, "square", 0, kNoAttribute) \
TOK(kTRUNCATE, "truncate", 0, kNoAttribute) \
TOK(kFLOOR, "floor", 0, kNoAttribute) \
TOK(kCEILING, "ceiling", 0, kNoAttribute)
// List of keywords. The list must be alphabetically ordered. The
// keyword recognition code depends on the ordering.

View file

@ -2456,21 +2456,75 @@ abstract class Float32x4 {
Float32x4 operator /(Float32x4 other);
/// Relational less than.
///
/// Equivalent to:
///
/// ```
/// Int32x4(this.x < other.x ? -1 : 0,
/// this.y < other.y ? -1 : 0,
/// this.z < other.z ? -1 : 0,
/// this.w < other.w ? -1 : 0);
/// ```
Int32x4 lessThan(Float32x4 other);
/// Relational less than or equal.
///
/// Equivalent to:
///
/// ```
/// Int32x4(this.x <= other.x ? -1 : 0,
/// this.y <= other.y ? -1 : 0,
/// this.z <= other.z ? -1 : 0,
/// this.w <= other.w ? -1 : 0);
/// ```
Int32x4 lessThanOrEqual(Float32x4 other);
/// Relational greater than.
///
/// Equivalent to:
///
/// ```
/// Int32x4(this.x > other.x ? -1 : 0,
/// this.y > other.y ? -1 : 0,
/// this.z > other.z ? -1 : 0,
/// this.w > other.w ? -1 : 0);
/// ```
Int32x4 greaterThan(Float32x4 other);
/// Relational greater than or equal.
///
/// Equivalent to:
///
/// ```
/// Int32x4(this.x >= other.x ? -1 : 0,
/// this.y >= other.y ? -1 : 0,
/// this.z >= other.z ? -1 : 0,
/// this.w >= other.w ? -1 : 0);
/// ```
Int32x4 greaterThanOrEqual(Float32x4 other);
/// Relational equal.
///
/// Equivalent to:
///
/// ```
/// Int32x4(this.x == other.x ? -1 : 0,
/// this.y == other.y ? -1 : 0,
/// this.z == other.z ? -1 : 0,
/// this.w == other.w ? -1 : 0);
/// ```
Int32x4 equal(Float32x4 other);
/// Relational not-equal.
///
/// Equivalent to:
///
/// ```
/// Int32x4(this.x != other.x ? -1 : 0,
/// this.y != other.y ? -1 : 0,
/// this.z != other.z ? -1 : 0,
/// this.w != other.w ? -1 : 0);
/// ```
Int32x4 notEqual(Float32x4 other);
/// Returns a copy of [this] each lane being scaled by [s].
@ -2802,7 +2856,14 @@ abstract class Float32x4 {
/// The lanes are "x", "y", "z", and "w" respectively.
abstract class Int32x4 {
external factory Int32x4(int x, int y, int z, int w);
/// Equivalent to:
///
/// ```
/// Int32x4(x ? -1 : 0, y ? -1 : 0, z ? -1 : 0, w ? -1 : 0)
/// ```
external factory Int32x4.bool(bool x, bool y, bool z, bool w);
external factory Int32x4.fromFloat32x4Bits(Float32x4 x);
/// The bit-wise or operator.