From a31bff5042b9e01943ae03ea6043241781a5c75f Mon Sep 17 00:00:00 2001 From: "srdjan@google.com" Date: Fri, 23 Jan 2015 22:41:05 +0000 Subject: [PATCH] Implement bitLength intrinsic on x64. R=johnmccutchan@google.com Review URL: https://codereview.chromium.org//867383002 git-svn-id: https://dart.googlecode.com/svn/branches/bleeding_edge/dart@43126 260f80e4-7a28-3924-810f-c04153c831b5 --- runtime/vm/assembler_x64.cc | 11 +++++++++++ runtime/vm/assembler_x64.h | 2 ++ runtime/vm/assembler_x64_test.cc | 29 ++++++++++++++++++++++++++--- runtime/vm/disassembler_x64.cc | 10 ++++++---- runtime/vm/intrinsifier_x64.cc | 13 ++++++++++++- 5 files changed, 57 insertions(+), 8 deletions(-) diff --git a/runtime/vm/assembler_x64.cc b/runtime/vm/assembler_x64.cc index 03fa7609d67..8205ce84d82 100644 --- a/runtime/vm/assembler_x64.cc +++ b/runtime/vm/assembler_x64.cc @@ -2397,6 +2397,17 @@ void Assembler::notq(Register reg) { EmitUint8(0xD0 | (reg & 7)); } + +void Assembler::bsrq(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + Operand operand(src); + EmitOperandREX(dst, operand, REX_W); + EmitUint8(0x0F); + EmitUint8(0xBD); + EmitOperand(dst & 7, operand); +} + + void Assembler::btq(Register base, Register offset) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); Operand operand(base); diff --git a/runtime/vm/assembler_x64.h b/runtime/vm/assembler_x64.h index 5a5179ba004..9b1da4dc65a 100644 --- a/runtime/vm/assembler_x64.h +++ b/runtime/vm/assembler_x64.h @@ -641,6 +641,8 @@ class Assembler : public ValueObject { void notl(Register reg); void notq(Register reg); + void bsrq(Register dst, Register src); + void btq(Register base, Register offset); void enter(const Immediate& imm); diff --git a/runtime/vm/assembler_x64_test.cc b/runtime/vm/assembler_x64_test.cc index 8ad42ffc290..eafb7654f73 100644 --- a/runtime/vm/assembler_x64_test.cc +++ b/runtime/vm/assembler_x64_test.cc @@ -620,9 +620,9 @@ ASSEMBLER_TEST_RUN(UnsignedDivideLong, test) { ASSEMBLER_TEST_GENERATE(Negate, assembler) { - __ movl(RCX, Immediate(42)); - __ negl(RCX); - __ movl(RAX, RCX); + __ movq(RCX, Immediate(42)); + __ negq(RCX); + __ movq(RAX, RCX); __ ret(); } @@ -633,6 +633,29 @@ ASSEMBLER_TEST_RUN(Negate, test) { } +ASSEMBLER_TEST_GENERATE(BitScanReverse, assembler) { + __ pushq(CallingConventions::kArg1Reg); + __ movq(RCX, Address(RSP, 0)); + __ movq(RAX, Immediate(666)); // Marker for conditional write. + __ bsrq(RAX, RCX); + __ popq(RCX); + __ ret(); +} + + +ASSEMBLER_TEST_RUN(BitScanReverse, test) { + typedef int (*Bsr)(int input); + Bsr call = reinterpret_cast(test->entry()); + EXPECT_EQ(666, call(0)); + EXPECT_EQ(0, call(1)); + EXPECT_EQ(1, call(2)); + EXPECT_EQ(1, call(3)); + EXPECT_EQ(2, call(4)); + EXPECT_EQ(5, call(42)); + EXPECT_EQ(31, call(-1)); +} + + ASSEMBLER_TEST_GENERATE(MoveExtend, assembler) { __ movq(RDX, Immediate(0xffff)); __ movzxb(RAX, RDX); // RAX = 0xff diff --git a/runtime/vm/disassembler_x64.cc b/runtime/vm/disassembler_x64.cc index 568cba6b161..859ab9f1c61 100644 --- a/runtime/vm/disassembler_x64.cc +++ b/runtime/vm/disassembler_x64.cc @@ -1522,17 +1522,17 @@ int DisassemblerX64::TwoByteOpcodeInstruction(uint8_t* data) { // SETcc: Set byte on condition. Needs pointer to beginning of instruction. current = data + SetCC(data); - } else if ((opcode & 0xFE) == 0xA4 || (opcode & 0xFE) == 0xAC || - opcode == 0xAB || opcode == 0xA3) { + } else if (((opcode & 0xFE) == 0xA4) || ((opcode & 0xFE) == 0xAC) || + (opcode == 0xAB) || (opcode == 0xA3) || (opcode == 0xBD)) { // SHLD, SHRD (double-prec. shift), BTS (bit test and set), BT (bit test). AppendToBuffer("%s%c ", mnemonic, operand_size_code()); int mod, regop, rm; get_modrm(*current, &mod, ®op, &rm); current += PrintRightOperand(current); AppendToBuffer(",%s", NameOfCPURegister(regop)); - if (opcode == 0xAB || opcode == 0xA3) { + if ((opcode == 0xAB) || (opcode == 0xA3) || (opcode == 0xBD)) { // Done. - } else if (opcode == 0xA5 || opcode == 0xAD) { + } else if ((opcode == 0xA5) || (opcode == 0xAD)) { AppendToBuffer(",cl"); } else { AppendToBuffer(","); @@ -1586,6 +1586,8 @@ const char* DisassemblerX64::TwoByteMnemonic(uint8_t opcode) { return "movzxw"; case 0xBE: return "movsxb"; + case 0xBD: + return "bsr"; case 0xBF: return "movsxw"; case 0x12: diff --git a/runtime/vm/intrinsifier_x64.cc b/runtime/vm/intrinsifier_x64.cc index 4f80141be1c..223ea5f5459 100644 --- a/runtime/vm/intrinsifier_x64.cc +++ b/runtime/vm/intrinsifier_x64.cc @@ -794,7 +794,18 @@ void Intrinsifier::Smi_bitNegate(Assembler* assembler) { void Intrinsifier::Smi_bitLength(Assembler* assembler) { - // TODO(sra): Implement using bsrq. + ASSERT(kSmiTagShift == 1); + __ movq(RAX, Address(RSP, + 1 * kWordSize)); // Index. + // XOR with sign bit to complement bits if value is negative. + __ movq(RCX, RAX); + __ sarq(RCX, Immediate(63)); // All 0 or all 1. + __ xorq(RAX, RCX); + // BSR does not write the destination register if source is zero. Put a 1 in + // the Smi tag bit to ensure BSR writes to destination register. + __ orq(RAX, Immediate(kSmiTagMask)); + __ bsrq(RAX, RAX); + __ SmiTag(RAX); + __ ret(); }