mirror of
https://github.com/golang/go
synced 2024-11-05 18:36:08 +00:00
cmd/compile: move SSA rotate instruction detection to arch-independent rules
Detect rotate instructions while still in architecture-independent form. It's easier to do here, and we don't need to repeat it in each architecture file. Change-Id: I9396954b3f3b3bfb96c160d064a02002309935bb Reviewed-on: https://go-review.googlesource.com/c/go/+/421195 TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Eric Fang <eric.fang@arm.com> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Joedian Reid <joedian@golang.org> Reviewed-by: Ruinan Sun <Ruinan.Sun@arm.com> Run-TryBot: Keith Randall <khr@golang.org>
This commit is contained in:
parent
a36a0c440e
commit
60ad3c48f5
23 changed files with 5885 additions and 4531 deletions
|
@ -148,10 +148,14 @@
|
|||
(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 => (SARWconst x [15])
|
||||
(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SARBconst x [7])
|
||||
|
||||
// rotates
|
||||
(RotateLeft32 ...) => (ROLL ...)
|
||||
(RotateLeft16 ...) => (ROLW ...)
|
||||
(RotateLeft8 ...) => (ROLB ...)
|
||||
// constant rotates
|
||||
(RotateLeft32 x (MOVLconst [c])) => (ROLLconst [c&31] x)
|
||||
(RotateLeft16 x (MOVLconst [c])) => (ROLWconst [int16(c&15)] x)
|
||||
(RotateLeft8 x (MOVLconst [c])) => (ROLBconst [int8(c&7)] x)
|
||||
(ROLL x (MOVLconst [c])) => (ROLLconst [c&31] x)
|
||||
(ROLW x (MOVLconst [c])) => (ROLWconst [int16(c&15)] x)
|
||||
(ROLB x (MOVLconst [c])) => (ROLBconst [int8(c&7)] x)
|
||||
|
||||
// Lowering comparisons
|
||||
(Less32 x y) => (SETL (CMPL x y))
|
||||
|
@ -425,29 +429,10 @@
|
|||
|
||||
// Rotate instructions
|
||||
|
||||
(ADDL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x)
|
||||
( ORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x)
|
||||
(XORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x)
|
||||
|
||||
(ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2
|
||||
=> (ROLWconst x [int16(c)])
|
||||
( ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2
|
||||
=> (ROLWconst x [int16(c)])
|
||||
(XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2
|
||||
=> (ROLWconst x [int16(c)])
|
||||
|
||||
(ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1
|
||||
=> (ROLBconst x [int8(c)])
|
||||
( ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1
|
||||
=> (ROLBconst x [int8(c)])
|
||||
(XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1
|
||||
=> (ROLBconst x [int8(c)])
|
||||
|
||||
(ROLLconst [c] (ROLLconst [d] x)) => (ROLLconst [(c+d)&31] x)
|
||||
(ROLWconst [c] (ROLWconst [d] x)) => (ROLWconst [(c+d)&15] x)
|
||||
(ROLBconst [c] (ROLBconst [d] x)) => (ROLBconst [(c+d)& 7] x)
|
||||
|
||||
|
||||
// Constant shift simplifications
|
||||
|
||||
(SHLLconst x [0]) => x
|
||||
|
|
|
@ -275,6 +275,9 @@ func init() {
|
|||
{name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-15
|
||||
{name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-7
|
||||
|
||||
{name: "ROLL", argLength: 2, reg: gp21shift, asm: "ROLL", resultInArg0: true, clobberFlags: true}, // 32 bits of arg0 rotate left by arg1
|
||||
{name: "ROLW", argLength: 2, reg: gp21shift, asm: "ROLW", resultInArg0: true, clobberFlags: true}, // low 16 bits of arg0 rotate left by arg1
|
||||
{name: "ROLB", argLength: 2, reg: gp21shift, asm: "ROLB", resultInArg0: true, clobberFlags: true}, // low 8 bits of arg0 rotate left by arg1
|
||||
{name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31
|
||||
{name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15
|
||||
{name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7
|
||||
|
|
|
@ -853,86 +853,11 @@
|
|||
((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x y)
|
||||
((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGL <t> y))
|
||||
|
||||
// Constant rotate instructions
|
||||
((ADDQ|ORQ|XORQ) (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c => (ROLQconst x [c])
|
||||
((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c => (ROLLconst x [c])
|
||||
|
||||
((ADDL|ORL|XORL) <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 => (ROLWconst x [c])
|
||||
((ADDL|ORL|XORL) <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 => (ROLBconst x [c])
|
||||
|
||||
(ROLQconst [c] (ROLQconst [d] x)) => (ROLQconst [(c+d)&63] x)
|
||||
(ROLLconst [c] (ROLLconst [d] x)) => (ROLLconst [(c+d)&31] x)
|
||||
(ROLWconst [c] (ROLWconst [d] x)) => (ROLWconst [(c+d)&15] x)
|
||||
(ROLBconst [c] (ROLBconst [d] x)) => (ROLBconst [(c+d)& 7] x)
|
||||
|
||||
(RotateLeft8 ...) => (ROLB ...)
|
||||
(RotateLeft16 ...) => (ROLW ...)
|
||||
(RotateLeft32 ...) => (ROLL ...)
|
||||
(RotateLeft64 ...) => (ROLQ ...)
|
||||
|
||||
// Non-constant rotates.
|
||||
// We want to issue a rotate when the Go source contains code like
|
||||
// y &= 63
|
||||
// x << y | x >> (64-y)
|
||||
// The shift rules above convert << to SHLx and >> to SHRx.
|
||||
// SHRx converts its shift argument from 64-y to -y.
|
||||
// A tricky situation occurs when y==0. Then the original code would be:
|
||||
// x << 0 | x >> 64
|
||||
// But x >> 64 is 0, not x. So there's an additional mask that is ANDed in
|
||||
// to force the second term to 0. We don't need that mask, but we must match
|
||||
// it in order to strip it out.
|
||||
(ORQ (SHLQ x y) (ANDQ (SHRQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (ROLQ x y)
|
||||
(ORQ (SHRQ x y) (ANDQ (SHLQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (RORQ x y)
|
||||
(ORQ (SHLXQ x y) (ANDQ (SHRXQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (ROLQ x y)
|
||||
(ORQ (SHRXQ x y) (ANDQ (SHLXQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (RORQ x y)
|
||||
|
||||
(ORL (SHLL x y) (ANDL (SHRL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (ROLL x y)
|
||||
(ORL (SHRL x y) (ANDL (SHLL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (RORL x y)
|
||||
(ORL (SHLXL x y) (ANDL (SHRXL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (ROLL x y)
|
||||
(ORL (SHRXL x y) (ANDL (SHLXL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (RORL x y)
|
||||
|
||||
// Help with rotate detection
|
||||
(CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) => (FlagLT_ULT)
|
||||
(CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) => (FlagLT_ULT)
|
||||
|
||||
(ORL (SHLL x (AND(Q|L)const y [15]))
|
||||
(ANDL (SHRW x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])))
|
||||
(SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])) [16]))))
|
||||
&& v.Type.Size() == 2
|
||||
=> (ROLW x y)
|
||||
(ORL (SHRW x (AND(Q|L)const y [15]))
|
||||
(SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16]))))
|
||||
&& v.Type.Size() == 2
|
||||
=> (RORW x y)
|
||||
(ORL (SHLXL x (AND(Q|L)const y [15]))
|
||||
(ANDL (SHRW x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])))
|
||||
(SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])) [16]))))
|
||||
&& v.Type.Size() == 2
|
||||
=> (ROLW x y)
|
||||
(ORL (SHRW x (AND(Q|L)const y [15]))
|
||||
(SHLXL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16]))))
|
||||
&& v.Type.Size() == 2
|
||||
=> (RORW x y)
|
||||
|
||||
(ORL (SHLL x (AND(Q|L)const y [ 7]))
|
||||
(ANDL (SHRB x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])))
|
||||
(SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])) [ 8]))))
|
||||
&& v.Type.Size() == 1
|
||||
=> (ROLB x y)
|
||||
(ORL (SHRB x (AND(Q|L)const y [ 7]))
|
||||
(SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8]))))
|
||||
&& v.Type.Size() == 1
|
||||
=> (RORB x y)
|
||||
(ORL (SHLXL x (AND(Q|L)const y [ 7]))
|
||||
(ANDL (SHRB x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])))
|
||||
(SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])) [ 8]))))
|
||||
&& v.Type.Size() == 1
|
||||
=> (ROLB x y)
|
||||
(ORL (SHRB x (AND(Q|L)const y [ 7]))
|
||||
(SHLXL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8]))))
|
||||
&& v.Type.Size() == 1
|
||||
=> (RORB x y)
|
||||
|
||||
// rotate left negative = rotate right
|
||||
(ROLQ x (NEG(Q|L) y)) => (RORQ x y)
|
||||
(ROLL x (NEG(Q|L) y)) => (RORL x y)
|
||||
|
|
|
@ -1130,14 +1130,6 @@
|
|||
(CMNshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftRL x y [c])
|
||||
(CMNshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftRA x y [c])
|
||||
|
||||
// Generate rotates
|
||||
(ADDshiftLL [c] (SRLconst x [32-c]) x) => (SRRconst [32-c] x)
|
||||
( ORshiftLL [c] (SRLconst x [32-c]) x) => (SRRconst [32-c] x)
|
||||
(XORshiftLL [c] (SRLconst x [32-c]) x) => (SRRconst [32-c] x)
|
||||
(ADDshiftRL [c] (SLLconst x [32-c]) x) => (SRRconst [ c] x)
|
||||
( ORshiftRL [c] (SLLconst x [32-c]) x) => (SRRconst [ c] x)
|
||||
(XORshiftRL [c] (SLLconst x [32-c]) x) => (SRRconst [ c] x)
|
||||
|
||||
(RotateLeft16 <t> x (MOVWconst [c])) => (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
|
||||
(RotateLeft8 <t> x (MOVWconst [c])) => (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
|
||||
(RotateLeft32 x y) => (SRR x (RSBconst [0] <y.Type> y))
|
||||
|
|
|
@ -1820,56 +1820,9 @@
|
|||
(ORNshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
|
||||
(ORNshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
|
||||
|
||||
// Generate rotates with const shift
|
||||
(ADDshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
|
||||
( ORshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
|
||||
(XORshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
|
||||
(ADDshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
|
||||
( ORshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
|
||||
(XORshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
|
||||
|
||||
(ADDshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
|
||||
=> (RORWconst [32-c] x)
|
||||
( ORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
|
||||
=> (RORWconst [32-c] x)
|
||||
(XORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
|
||||
=> (RORWconst [32-c] x)
|
||||
(ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
|
||||
( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
|
||||
(XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
|
||||
|
||||
(RORconst [c] (RORconst [d] x)) => (RORconst [(c+d)&63] x)
|
||||
(RORWconst [c] (RORWconst [d] x)) => (RORWconst [(c+d)&31] x)
|
||||
|
||||
// Generate rotates with non-const shift.
|
||||
// These rules match the Go source code like
|
||||
// y &= 63
|
||||
// x << y | x >> (64-y)
|
||||
// "|" can also be "^" or "+".
|
||||
// As arm64 does not have a ROL instruction, so ROL(x, y) is replaced by ROR(x, -y).
|
||||
((ADD|OR|XOR) (SLL x (ANDconst <t> [63] y))
|
||||
(CSEL0 <typ.UInt64> [cc] (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
|
||||
(CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc == OpARM64LessThanU
|
||||
=> (ROR x (NEG <t> y))
|
||||
((ADD|OR|XOR) (SRL <typ.UInt64> x (ANDconst <t> [63] y))
|
||||
(CSEL0 <typ.UInt64> [cc] (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
|
||||
(CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc == OpARM64LessThanU
|
||||
=> (ROR x y)
|
||||
|
||||
// These rules match the Go source code like
|
||||
// y &= 31
|
||||
// x << y | x >> (32-y)
|
||||
// "|" can also be "^" or "+".
|
||||
// As arm64 does not have a ROLW instruction, so ROLW(x, y) is replaced by RORW(x, -y).
|
||||
((ADD|OR|XOR) (SLL x (ANDconst <t> [31] y))
|
||||
(CSEL0 <typ.UInt32> [cc] (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
|
||||
(CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc == OpARM64LessThanU
|
||||
=> (RORW x (NEG <t> y))
|
||||
((ADD|OR|XOR) (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y))
|
||||
(CSEL0 <typ.UInt32> [cc] (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
|
||||
(CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc == OpARM64LessThanU
|
||||
=> (RORW x y)
|
||||
|
||||
// rev16w | rev16
|
||||
// ((x>>8) | (x<<8)) => (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
|
||||
((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) => (REV16W x)
|
||||
|
|
|
@ -128,39 +128,8 @@
|
|||
// Rotates
|
||||
(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
|
||||
(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
|
||||
(RotateLeft32 x (MOVDconst [c])) => (ROTLWconst [c&31] x)
|
||||
(RotateLeft64 x (MOVDconst [c])) => (ROTLconst [c&63] x)
|
||||
|
||||
// Rotate generation with const shift
|
||||
(ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
|
||||
( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
|
||||
(XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
|
||||
|
||||
(ADD (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
|
||||
( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
|
||||
(XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
|
||||
|
||||
// Rotate generation with non-const shift
|
||||
// these match patterns from math/bits/RotateLeft[32|64], but there could be others
|
||||
(ADD (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (Select0 <typ.UInt> (ANDCCconst [63] y))))) => (ROTL x y)
|
||||
(ADD (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst <typ.UInt> [64] (Select0 <typ.UInt> (ANDCCconst [63] y))))) => (ROTL x y)
|
||||
( OR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (Select0 <typ.UInt> (ANDCCconst [63] y)))))=> (ROTL x y)
|
||||
( OR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst <typ.UInt> [64] (Select0 <typ.UInt> (ANDCCconst [63] y))))) => (ROTL x y)
|
||||
(XOR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (Select0 <typ.UInt> (ANDCCconst [63] y))))) => (ROTL x y)
|
||||
(XOR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst <typ.UInt> [64] (Select0 <typ.UInt> (ANDCCconst [63] y))))) => (ROTL x y)
|
||||
|
||||
|
||||
(ADD (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst <typ.UInt> [32] (Select0 <typ.UInt> (ANDCCconst [31] y))))) => (ROTLW x y)
|
||||
(ADD (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (Select0 <typ.UInt> (ANDCCconst [31] y))))) => (ROTLW x y)
|
||||
( OR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst <typ.UInt> [32] (Select0 <typ.UInt> (ANDCCconst [31] y))))) => (ROTLW x y)
|
||||
( OR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (Select0 <typ.UInt> (ANDCCconst [31] y))))) => (ROTLW x y)
|
||||
(XOR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst <typ.UInt> [32] (Select0 <typ.UInt> (ANDCCconst [31] y))))) => (ROTLW x y)
|
||||
(XOR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (Select0 <typ.UInt> (ANDCCconst [31] y))))) => (ROTLW x y)
|
||||
|
||||
|
||||
// Lowering rotates
|
||||
(RotateLeft32 x y) => (ROTLW x y)
|
||||
(RotateLeft64 x y) => (ROTL x y)
|
||||
(RotateLeft32 ...) => (ROTLW ...)
|
||||
(RotateLeft64 ...) => (ROTL ...)
|
||||
|
||||
// Constant rotate generation
|
||||
(ROTLW x (MOVDconst [c])) => (ROTLWconst x [c&31])
|
||||
|
|
|
@ -691,10 +691,6 @@
|
|||
(RLLG x (MOVDconst [c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, uint8(c&63))})
|
||||
(RLL x (MOVDconst [c])) => (RLLconst x [uint8(c&31)])
|
||||
|
||||
// Match rotate by constant pattern.
|
||||
((ADD|OR|XOR) (SLDconst x [c]) (SRDconst x [64-c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
|
||||
((ADD|OR|XOR)W (SLWconst x [c]) (SRWconst x [32-c])) => (RLLconst x [c])
|
||||
|
||||
// Signed 64-bit comparison with immediate.
|
||||
(CMP x (MOVDconst [c])) && is32Bit(c) => (CMPconst x [int32(c)])
|
||||
(CMP (MOVDconst [c]) x) && is32Bit(c) => (InvertFlags (CMPconst x [int32(c)]))
|
||||
|
|
|
@ -217,6 +217,11 @@
|
|||
(Rsh8x64 x y) => (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
|
||||
(Rsh8Ux64 x y) => (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
|
||||
|
||||
(RotateLeft64 x (Int64Make hi lo)) => (RotateLeft64 x lo)
|
||||
(RotateLeft32 x (Int64Make hi lo)) => (RotateLeft32 x lo)
|
||||
(RotateLeft16 x (Int64Make hi lo)) => (RotateLeft16 x lo)
|
||||
(RotateLeft8 x (Int64Make hi lo)) => (RotateLeft8 x lo)
|
||||
|
||||
// Clean up constants a little
|
||||
(Or32 <typ.UInt32> (Zeromask (Const32 [c])) y) && c == 0 => y
|
||||
(Or32 <typ.UInt32> (Zeromask (Const32 [c])) y) && c != 0 => (Const32 <typ.UInt32> [-1])
|
||||
|
|
|
@ -2551,3 +2551,83 @@
|
|||
// Elide self-moves. This only happens rarely (e.g test/fixedbugs/bug277.go).
|
||||
// However, this rule is needed to prevent the previous rule from looping forever in such cases.
|
||||
(Move dst src mem) && isSamePtr(dst, src) => mem
|
||||
|
||||
// Constant rotate detection.
|
||||
((Add64|Or64|Xor64) (Lsh64x64 x z:(Const64 <t> [c])) (Rsh64Ux64 x (Const64 [d]))) && c < 64 && d == 64-c && canRotate(config, 64) => (RotateLeft64 x z)
|
||||
((Add32|Or32|Xor32) (Lsh32x64 x z:(Const64 <t> [c])) (Rsh32Ux64 x (Const64 [d]))) && c < 32 && d == 32-c && canRotate(config, 32) => (RotateLeft32 x z)
|
||||
((Add16|Or16|Xor16) (Lsh16x64 x z:(Const64 <t> [c])) (Rsh16Ux64 x (Const64 [d]))) && c < 16 && d == 16-c && canRotate(config, 16) => (RotateLeft16 x z)
|
||||
((Add8|Or8|Xor8) (Lsh8x64 x z:(Const64 <t> [c])) (Rsh8Ux64 x (Const64 [d]))) && c < 8 && d == 8-c && canRotate(config, 8) => (RotateLeft8 x z)
|
||||
|
||||
// Non-constant rotate detection.
|
||||
// We use shiftIsBounded to make sure that neither of the shifts are >64.
|
||||
// Note: these rules are subtle when the shift amounts are 0/64, as Go shifts
|
||||
// are different from most native shifts. But it works out.
|
||||
((Add64|Or64|Xor64) left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y)
|
||||
((Add64|Or64|Xor64) left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y)
|
||||
((Add64|Or64|Xor64) left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y)
|
||||
((Add64|Or64|Xor64) left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y)
|
||||
|
||||
((Add64|Or64|Xor64) right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z)
|
||||
((Add64|Or64|Xor64) right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z)
|
||||
((Add64|Or64|Xor64) right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z)
|
||||
((Add64|Or64|Xor64) right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z)
|
||||
|
||||
((Add32|Or32|Xor32) left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y)
|
||||
((Add32|Or32|Xor32) left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y)
|
||||
((Add32|Or32|Xor32) left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y)
|
||||
((Add32|Or32|Xor32) left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y)
|
||||
|
||||
((Add32|Or32|Xor32) right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z)
|
||||
((Add32|Or32|Xor32) right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z)
|
||||
((Add32|Or32|Xor32) right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z)
|
||||
((Add32|Or32|Xor32) right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z)
|
||||
|
||||
((Add16|Or16|Xor16) left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y)
|
||||
((Add16|Or16|Xor16) left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y)
|
||||
((Add16|Or16|Xor16) left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y)
|
||||
((Add16|Or16|Xor16) left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y)
|
||||
|
||||
((Add16|Or16|Xor16) right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z)
|
||||
((Add16|Or16|Xor16) right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z)
|
||||
((Add16|Or16|Xor16) right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z)
|
||||
((Add16|Or16|Xor16) right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z)
|
||||
|
||||
((Add8|Or8|Xor8) left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y)
|
||||
((Add8|Or8|Xor8) left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y)
|
||||
((Add8|Or8|Xor8) left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y)
|
||||
((Add8|Or8|Xor8) left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y)
|
||||
|
||||
((Add8|Or8|Xor8) right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z)
|
||||
((Add8|Or8|Xor8) right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z)
|
||||
((Add8|Or8|Xor8) right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z)
|
||||
((Add8|Or8|Xor8) right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z)
|
||||
|
||||
// Rotating by y&c, with c a mask that doesn't change the bottom bits, is the same as rotating by y.
|
||||
(RotateLeft64 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&63 == 63 => (RotateLeft64 x y)
|
||||
(RotateLeft32 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&31 == 31 => (RotateLeft32 x y)
|
||||
(RotateLeft16 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&15 == 15 => (RotateLeft16 x y)
|
||||
(RotateLeft8 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&7 == 7 => (RotateLeft8 x y)
|
||||
|
||||
// Rotating by -(y&c), with c a mask that doesn't change the bottom bits, is the same as rotating by -y.
|
||||
(RotateLeft64 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&63 == 63 => (RotateLeft64 x (Neg(64|32|16|8) <y.Type> y))
|
||||
(RotateLeft32 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&31 == 31 => (RotateLeft32 x (Neg(64|32|16|8) <y.Type> y))
|
||||
(RotateLeft16 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&15 == 15 => (RotateLeft16 x (Neg(64|32|16|8) <y.Type> y))
|
||||
(RotateLeft8 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&7 == 7 => (RotateLeft8 x (Neg(64|32|16|8) <y.Type> y))
|
||||
|
||||
// Rotating by y+c, with c a multiple of the value width, is the same as rotating by y.
|
||||
(RotateLeft64 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&63 == 0 => (RotateLeft64 x y)
|
||||
(RotateLeft32 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&31 == 0 => (RotateLeft32 x y)
|
||||
(RotateLeft16 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&15 == 0 => (RotateLeft16 x y)
|
||||
(RotateLeft8 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&7 == 0 => (RotateLeft8 x y)
|
||||
|
||||
// Rotating by c-y, with c a multiple of the value width, is the same as rotating by -y.
|
||||
(RotateLeft64 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&63 == 0 => (RotateLeft64 x (Neg(64|32|16|8) <y.Type> y))
|
||||
(RotateLeft32 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&31 == 0 => (RotateLeft32 x (Neg(64|32|16|8) <y.Type> y))
|
||||
(RotateLeft16 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&15 == 0 => (RotateLeft16 x (Neg(64|32|16|8) <y.Type> y))
|
||||
(RotateLeft8 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&7 == 0 => (RotateLeft8 x (Neg(64|32|16|8) <y.Type> y))
|
||||
|
||||
// Ensure we don't do Const64 rotates in a 32-bit system.
|
||||
(RotateLeft64 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft64 x (Const32 <t> [int32(c)]))
|
||||
(RotateLeft32 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft32 x (Const32 <t> [int32(c)]))
|
||||
(RotateLeft16 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft16 x (Const32 <t> [int32(c)]))
|
||||
(RotateLeft8 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft8 x (Const32 <t> [int32(c)]))
|
||||
|
|
|
@ -249,14 +249,19 @@ var genericOps = []opData{
|
|||
{name: "BitRev32", argLength: 1}, // Reverse the bits in arg[0]
|
||||
{name: "BitRev64", argLength: 1}, // Reverse the bits in arg[0]
|
||||
|
||||
{name: "PopCount8", argLength: 1}, // Count bits in arg[0]
|
||||
{name: "PopCount16", argLength: 1}, // Count bits in arg[0]
|
||||
{name: "PopCount32", argLength: 1}, // Count bits in arg[0]
|
||||
{name: "PopCount64", argLength: 1}, // Count bits in arg[0]
|
||||
{name: "RotateLeft8", argLength: 2}, // Rotate bits in arg[0] left by arg[1]
|
||||
{name: "RotateLeft16", argLength: 2}, // Rotate bits in arg[0] left by arg[1]
|
||||
{name: "RotateLeft32", argLength: 2}, // Rotate bits in arg[0] left by arg[1]
|
||||
{name: "RotateLeft64", argLength: 2}, // Rotate bits in arg[0] left by arg[1]
|
||||
{name: "PopCount8", argLength: 1}, // Count bits in arg[0]
|
||||
{name: "PopCount16", argLength: 1}, // Count bits in arg[0]
|
||||
{name: "PopCount32", argLength: 1}, // Count bits in arg[0]
|
||||
{name: "PopCount64", argLength: 1}, // Count bits in arg[0]
|
||||
|
||||
// RotateLeftX instructions rotate the X bits of arg[0] to the left
|
||||
// by the low lg_2(X) bits of arg[1], interpreted as an unsigned value.
|
||||
// Note that this works out regardless of the bit width or signedness of
|
||||
// arg[1]. In particular, RotateLeft by x is the same as RotateRight by -x.
|
||||
{name: "RotateLeft64", argLength: 2},
|
||||
{name: "RotateLeft32", argLength: 2},
|
||||
{name: "RotateLeft16", argLength: 2},
|
||||
{name: "RotateLeft8", argLength: 2},
|
||||
|
||||
// Square root.
|
||||
// Special cases:
|
||||
|
|
|
@ -434,6 +434,9 @@ const (
|
|||
Op386SARLconst
|
||||
Op386SARWconst
|
||||
Op386SARBconst
|
||||
Op386ROLL
|
||||
Op386ROLW
|
||||
Op386ROLB
|
||||
Op386ROLLconst
|
||||
Op386ROLWconst
|
||||
Op386ROLBconst
|
||||
|
@ -2963,10 +2966,10 @@ const (
|
|||
OpPopCount16
|
||||
OpPopCount32
|
||||
OpPopCount64
|
||||
OpRotateLeft8
|
||||
OpRotateLeft16
|
||||
OpRotateLeft32
|
||||
OpRotateLeft64
|
||||
OpRotateLeft32
|
||||
OpRotateLeft16
|
||||
OpRotateLeft8
|
||||
OpSqrt
|
||||
OpSqrt32
|
||||
OpFloor
|
||||
|
@ -4636,6 +4639,54 @@ var opcodeTable = [...]opInfo{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ROLL",
|
||||
argLen: 2,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
asm: x86.AROLL,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 2}, // CX
|
||||
{0, 239}, // AX CX DX BX BP SI DI
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 239}, // AX CX DX BX BP SI DI
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ROLW",
|
||||
argLen: 2,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
asm: x86.AROLW,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 2}, // CX
|
||||
{0, 239}, // AX CX DX BX BP SI DI
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 239}, // AX CX DX BX BP SI DI
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ROLB",
|
||||
argLen: 2,
|
||||
resultInArg0: true,
|
||||
clobberFlags: true,
|
||||
asm: x86.AROLB,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{1, 2}, // CX
|
||||
{0, 239}, // AX CX DX BX BP SI DI
|
||||
},
|
||||
outputs: []outputInfo{
|
||||
{0, 239}, // AX CX DX BX BP SI DI
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ROLLconst",
|
||||
auxType: auxInt32,
|
||||
|
@ -38457,12 +38508,7 @@ var opcodeTable = [...]opInfo{
|
|||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "RotateLeft8",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "RotateLeft16",
|
||||
name: "RotateLeft64",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
|
@ -38472,7 +38518,12 @@ var opcodeTable = [...]opInfo{
|
|||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "RotateLeft64",
|
||||
name: "RotateLeft16",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "RotateLeft8",
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
|
|
|
@ -1977,3 +1977,20 @@ func makeJumpTableSym(b *Block) *obj.LSym {
|
|||
s.Set(obj.AttrLocal, true)
|
||||
return s
|
||||
}
|
||||
|
||||
// canRotate reports whether the architecture supports
|
||||
// rotates of integer registers with the given number of bits.
|
||||
func canRotate(c *Config, bits int64) bool {
|
||||
if bits > c.PtrSize*8 {
|
||||
// Don't rewrite to rotates bigger than the machine word.
|
||||
return false
|
||||
}
|
||||
switch c.arch {
|
||||
case "386", "amd64":
|
||||
return true
|
||||
case "arm", "arm64", "s390x", "ppc64", "ppc64le", "wasm":
|
||||
return bits >= 32
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -146,10 +146,16 @@ func rewriteValue386(v *Value) bool {
|
|||
return rewriteValue386_Op386ORLload(v)
|
||||
case Op386ORLmodify:
|
||||
return rewriteValue386_Op386ORLmodify(v)
|
||||
case Op386ROLB:
|
||||
return rewriteValue386_Op386ROLB(v)
|
||||
case Op386ROLBconst:
|
||||
return rewriteValue386_Op386ROLBconst(v)
|
||||
case Op386ROLL:
|
||||
return rewriteValue386_Op386ROLL(v)
|
||||
case Op386ROLLconst:
|
||||
return rewriteValue386_Op386ROLLconst(v)
|
||||
case Op386ROLW:
|
||||
return rewriteValue386_Op386ROLW(v)
|
||||
case Op386ROLWconst:
|
||||
return rewriteValue386_Op386ROLWconst(v)
|
||||
case Op386SARB:
|
||||
|
@ -541,11 +547,14 @@ func rewriteValue386(v *Value) bool {
|
|||
case OpPanicExtend:
|
||||
return rewriteValue386_OpPanicExtend(v)
|
||||
case OpRotateLeft16:
|
||||
return rewriteValue386_OpRotateLeft16(v)
|
||||
v.Op = Op386ROLW
|
||||
return true
|
||||
case OpRotateLeft32:
|
||||
return rewriteValue386_OpRotateLeft32(v)
|
||||
v.Op = Op386ROLL
|
||||
return true
|
||||
case OpRotateLeft8:
|
||||
return rewriteValue386_OpRotateLeft8(v)
|
||||
v.Op = Op386ROLB
|
||||
return true
|
||||
case OpRound32F:
|
||||
v.Op = OpCopy
|
||||
return true
|
||||
|
@ -734,80 +743,6 @@ func rewriteValue386_Op386ADDL(v *Value) bool {
|
|||
}
|
||||
break
|
||||
}
|
||||
// match: (ADDL (SHLLconst [c] x) (SHRLconst [d] x))
|
||||
// cond: d == 32-c
|
||||
// result: (ROLLconst [c] x)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != Op386SHLLconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt32(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != Op386SHRLconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt32(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(d == 32-c) {
|
||||
continue
|
||||
}
|
||||
v.reset(Op386ROLLconst)
|
||||
v.AuxInt = int32ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d]))
|
||||
// cond: c < 16 && d == int16(16-c) && t.Size() == 2
|
||||
// result: (ROLWconst x [int16(c)])
|
||||
for {
|
||||
t := v.Type
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != Op386SHLLconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt32(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != Op386SHRWconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt16(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) {
|
||||
continue
|
||||
}
|
||||
v.reset(Op386ROLWconst)
|
||||
v.AuxInt = int16ToAuxInt(int16(c))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d]))
|
||||
// cond: c < 8 && d == int8(8-c) && t.Size() == 1
|
||||
// result: (ROLBconst x [int8(c)])
|
||||
for {
|
||||
t := v.Type
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != Op386SHLLconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt32(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != Op386SHRBconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt8(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) {
|
||||
continue
|
||||
}
|
||||
v.reset(Op386ROLBconst)
|
||||
v.AuxInt = int8ToAuxInt(int8(c))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (ADDL x (SHLLconst [3] y))
|
||||
// result: (LEAL8 x y)
|
||||
for {
|
||||
|
@ -6305,80 +6240,6 @@ func rewriteValue386_Op386ORL(v *Value) bool {
|
|||
}
|
||||
break
|
||||
}
|
||||
// match: ( ORL (SHLLconst [c] x) (SHRLconst [d] x))
|
||||
// cond: d == 32-c
|
||||
// result: (ROLLconst [c] x)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != Op386SHLLconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt32(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != Op386SHRLconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt32(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(d == 32-c) {
|
||||
continue
|
||||
}
|
||||
v.reset(Op386ROLLconst)
|
||||
v.AuxInt = int32ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: ( ORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
|
||||
// cond: c < 16 && d == int16(16-c) && t.Size() == 2
|
||||
// result: (ROLWconst x [int16(c)])
|
||||
for {
|
||||
t := v.Type
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != Op386SHLLconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt32(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != Op386SHRWconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt16(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) {
|
||||
continue
|
||||
}
|
||||
v.reset(Op386ROLWconst)
|
||||
v.AuxInt = int16ToAuxInt(int16(c))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: ( ORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
|
||||
// cond: c < 8 && d == int8(8-c) && t.Size() == 1
|
||||
// result: (ROLBconst x [int8(c)])
|
||||
for {
|
||||
t := v.Type
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != Op386SHLLconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt32(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != Op386SHRBconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt8(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) {
|
||||
continue
|
||||
}
|
||||
v.reset(Op386ROLBconst)
|
||||
v.AuxInt = int8ToAuxInt(int8(c))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (ORL x l:(MOVLload [off] {sym} ptr mem))
|
||||
// cond: canMergeLoadClobber(v, l, x) && clobber(l)
|
||||
// result: (ORLload x [off] {sym} ptr mem)
|
||||
|
@ -6809,6 +6670,24 @@ func rewriteValue386_Op386ORLmodify(v *Value) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValue386_Op386ROLB(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
// match: (ROLB x (MOVLconst [c]))
|
||||
// result: (ROLBconst [int8(c&7)] x)
|
||||
for {
|
||||
x := v_0
|
||||
if v_1.Op != Op386MOVLconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt32(v_1.AuxInt)
|
||||
v.reset(Op386ROLBconst)
|
||||
v.AuxInt = int8ToAuxInt(int8(c & 7))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValue386_Op386ROLBconst(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (ROLBconst [c] (ROLBconst [d] x))
|
||||
|
@ -6837,6 +6716,24 @@ func rewriteValue386_Op386ROLBconst(v *Value) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValue386_Op386ROLL(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
// match: (ROLL x (MOVLconst [c]))
|
||||
// result: (ROLLconst [c&31] x)
|
||||
for {
|
||||
x := v_0
|
||||
if v_1.Op != Op386MOVLconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt32(v_1.AuxInt)
|
||||
v.reset(Op386ROLLconst)
|
||||
v.AuxInt = int32ToAuxInt(c & 31)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValue386_Op386ROLLconst(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (ROLLconst [c] (ROLLconst [d] x))
|
||||
|
@ -6865,6 +6762,24 @@ func rewriteValue386_Op386ROLLconst(v *Value) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValue386_Op386ROLW(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
// match: (ROLW x (MOVLconst [c]))
|
||||
// result: (ROLWconst [int16(c&15)] x)
|
||||
for {
|
||||
x := v_0
|
||||
if v_1.Op != Op386MOVLconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt32(v_1.AuxInt)
|
||||
v.reset(Op386ROLWconst)
|
||||
v.AuxInt = int16ToAuxInt(int16(c & 15))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValue386_Op386ROLWconst(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (ROLWconst [c] (ROLWconst [d] x))
|
||||
|
@ -8346,80 +8261,6 @@ func rewriteValue386_Op386XORL(v *Value) bool {
|
|||
}
|
||||
break
|
||||
}
|
||||
// match: (XORL (SHLLconst [c] x) (SHRLconst [d] x))
|
||||
// cond: d == 32-c
|
||||
// result: (ROLLconst [c] x)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != Op386SHLLconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt32(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != Op386SHRLconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt32(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(d == 32-c) {
|
||||
continue
|
||||
}
|
||||
v.reset(Op386ROLLconst)
|
||||
v.AuxInt = int32ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
|
||||
// cond: c < 16 && d == int16(16-c) && t.Size() == 2
|
||||
// result: (ROLWconst x [int16(c)])
|
||||
for {
|
||||
t := v.Type
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != Op386SHLLconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt32(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != Op386SHRWconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt16(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) {
|
||||
continue
|
||||
}
|
||||
v.reset(Op386ROLWconst)
|
||||
v.AuxInt = int16ToAuxInt(int16(c))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
|
||||
// cond: c < 8 && d == int8(8-c) && t.Size() == 1
|
||||
// result: (ROLBconst x [int8(c)])
|
||||
for {
|
||||
t := v.Type
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != Op386SHLLconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt32(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != Op386SHRBconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt8(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) {
|
||||
continue
|
||||
}
|
||||
v.reset(Op386ROLBconst)
|
||||
v.AuxInt = int8ToAuxInt(int8(c))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (XORL x l:(MOVLload [off] {sym} ptr mem))
|
||||
// cond: canMergeLoadClobber(v, l, x) && clobber(l)
|
||||
// result: (XORLload x [off] {sym} ptr mem)
|
||||
|
@ -10298,60 +10139,6 @@ func rewriteValue386_OpPanicExtend(v *Value) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValue386_OpRotateLeft16(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
// match: (RotateLeft16 x (MOVLconst [c]))
|
||||
// result: (ROLWconst [int16(c&15)] x)
|
||||
for {
|
||||
x := v_0
|
||||
if v_1.Op != Op386MOVLconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt32(v_1.AuxInt)
|
||||
v.reset(Op386ROLWconst)
|
||||
v.AuxInt = int16ToAuxInt(int16(c & 15))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValue386_OpRotateLeft32(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
// match: (RotateLeft32 x (MOVLconst [c]))
|
||||
// result: (ROLLconst [c&31] x)
|
||||
for {
|
||||
x := v_0
|
||||
if v_1.Op != Op386MOVLconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt32(v_1.AuxInt)
|
||||
v.reset(Op386ROLLconst)
|
||||
v.AuxInt = int32ToAuxInt(c & 31)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValue386_OpRotateLeft8(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
// match: (RotateLeft8 x (MOVLconst [c]))
|
||||
// result: (ROLBconst [int8(c&7)] x)
|
||||
for {
|
||||
x := v_0
|
||||
if v_1.Op != Op386MOVLconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt32(v_1.AuxInt)
|
||||
v.reset(Op386ROLBconst)
|
||||
v.AuxInt = int8ToAuxInt(int8(c & 7))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValue386_OpRsh16Ux16(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -2080,22 +2080,6 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (ADDshiftLL [c] (SRLconst x [32-c]) x)
|
||||
// result: (SRRconst [32-c] x)
|
||||
for {
|
||||
c := auxIntToInt32(v.AuxInt)
|
||||
if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
if x != v_1 {
|
||||
break
|
||||
}
|
||||
v.reset(OpARMSRRconst)
|
||||
v.AuxInt = int32ToAuxInt(32 - c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (ADDshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
|
||||
// result: (REV16 x)
|
||||
for {
|
||||
|
@ -2285,22 +2269,6 @@ func rewriteValueARM_OpARMADDshiftRL(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (ADDshiftRL [c] (SLLconst x [32-c]) x)
|
||||
// result: (SRRconst [ c] x)
|
||||
for {
|
||||
c := auxIntToInt32(v.AuxInt)
|
||||
if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
if x != v_1 {
|
||||
break
|
||||
}
|
||||
v.reset(OpARMSRRconst)
|
||||
v.AuxInt = int32ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool {
|
||||
|
@ -8596,22 +8564,6 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: ( ORshiftLL [c] (SRLconst x [32-c]) x)
|
||||
// result: (SRRconst [32-c] x)
|
||||
for {
|
||||
c := auxIntToInt32(v.AuxInt)
|
||||
if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
if x != v_1 {
|
||||
break
|
||||
}
|
||||
v.reset(OpARMSRRconst)
|
||||
v.AuxInt = int32ToAuxInt(32 - c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (ORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
|
||||
// result: (REV16 x)
|
||||
for {
|
||||
|
@ -8831,22 +8783,6 @@ func rewriteValueARM_OpARMORshiftRL(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: ( ORshiftRL [c] (SLLconst x [32-c]) x)
|
||||
// result: (SRRconst [ c] x)
|
||||
for {
|
||||
c := auxIntToInt32(v.AuxInt)
|
||||
if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
if x != v_1 {
|
||||
break
|
||||
}
|
||||
v.reset(OpARMSRRconst)
|
||||
v.AuxInt = int32ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (ORshiftRL y:(SRLconst x [c]) x [c])
|
||||
// result: y
|
||||
for {
|
||||
|
@ -12755,22 +12691,6 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (XORshiftLL [c] (SRLconst x [32-c]) x)
|
||||
// result: (SRRconst [32-c] x)
|
||||
for {
|
||||
c := auxIntToInt32(v.AuxInt)
|
||||
if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
if x != v_1 {
|
||||
break
|
||||
}
|
||||
v.reset(OpARMSRRconst)
|
||||
v.AuxInt = int32ToAuxInt(32 - c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (XORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
|
||||
// result: (REV16 x)
|
||||
for {
|
||||
|
@ -12990,22 +12910,6 @@ func rewriteValueARM_OpARMXORshiftRL(v *Value) bool {
|
|||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (XORshiftRL [c] (SLLconst x [32-c]) x)
|
||||
// result: (SRRconst [ c] x)
|
||||
for {
|
||||
c := auxIntToInt32(v.AuxInt)
|
||||
if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
|
||||
break
|
||||
}
|
||||
x := v_0.Args[0]
|
||||
if x != v_1 {
|
||||
break
|
||||
}
|
||||
v.reset(OpARMSRRconst)
|
||||
v.AuxInt = int32ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (XORshiftRL (SRLconst x [c]) x [c])
|
||||
// result: (MOVWconst [0])
|
||||
for {
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -648,9 +648,11 @@ func rewriteValuePPC64(v *Value) bool {
|
|||
case OpRotateLeft16:
|
||||
return rewriteValuePPC64_OpRotateLeft16(v)
|
||||
case OpRotateLeft32:
|
||||
return rewriteValuePPC64_OpRotateLeft32(v)
|
||||
v.Op = OpPPC64ROTLW
|
||||
return true
|
||||
case OpRotateLeft64:
|
||||
return rewriteValuePPC64_OpRotateLeft64(v)
|
||||
v.Op = OpPPC64ROTL
|
||||
return true
|
||||
case OpRotateLeft8:
|
||||
return rewriteValuePPC64_OpRotateLeft8(v)
|
||||
case OpRound:
|
||||
|
@ -3898,8 +3900,6 @@ func rewriteValuePPC64_OpOffPtr(v *Value) bool {
|
|||
func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
typ := &b.Func.Config.Types
|
||||
// match: (ADD l:(MULLD x y) z)
|
||||
// cond: buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l)
|
||||
// result: (MADDLD x y z)
|
||||
|
@ -3921,236 +3921,6 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
|
|||
}
|
||||
break
|
||||
}
|
||||
// match: (ADD (SLDconst x [c]) (SRDconst x [d]))
|
||||
// cond: d == 64-c
|
||||
// result: (ROTLconst [c] x)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLDconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRDconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt64(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(d == 64-c) {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTLconst)
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (ADD (SLWconst x [c]) (SRWconst x [d]))
|
||||
// cond: d == 32-c
|
||||
// result: (ROTLWconst [c] x)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLWconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRWconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt64(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(d == 32-c) {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTLWconst)
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (ADD (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (Select0 <typ.UInt> (ANDCCconst [63] y)))))
|
||||
// result: (ROTL x y)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLD {
|
||||
continue
|
||||
}
|
||||
_ = v_0.Args[1]
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpSelect0 {
|
||||
continue
|
||||
}
|
||||
v_0_1_0 := v_0_1.Args[0]
|
||||
if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 {
|
||||
continue
|
||||
}
|
||||
y := v_0_1_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRD {
|
||||
continue
|
||||
}
|
||||
_ = v_1.Args[1]
|
||||
if x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v_1_1 := v_1.Args[1]
|
||||
if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
_ = v_1_1.Args[1]
|
||||
v_1_1_0 := v_1_1.Args[0]
|
||||
if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 {
|
||||
continue
|
||||
}
|
||||
v_1_1_1 := v_1_1.Args[1]
|
||||
if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
v_1_1_1_0 := v_1_1_1.Args[0]
|
||||
if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 63 || y != v_1_1_1_0.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTL)
|
||||
v.AddArg2(x, y)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (ADD (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst <typ.UInt> [64] (Select0 <typ.UInt> (ANDCCconst [63] y)))))
|
||||
// result: (ROTL x y)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLD {
|
||||
continue
|
||||
}
|
||||
_ = v_0.Args[1]
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpSelect0 {
|
||||
continue
|
||||
}
|
||||
v_0_1_0 := v_0_1.Args[0]
|
||||
if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 {
|
||||
continue
|
||||
}
|
||||
y := v_0_1_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRD {
|
||||
continue
|
||||
}
|
||||
_ = v_1.Args[1]
|
||||
if x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v_1_1 := v_1.Args[1]
|
||||
if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 {
|
||||
continue
|
||||
}
|
||||
v_1_1_0 := v_1_1.Args[0]
|
||||
if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
v_1_1_0_0 := v_1_1_0.Args[0]
|
||||
if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 63 || y != v_1_1_0_0.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTL)
|
||||
v.AddArg2(x, y)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (ADD (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst <typ.UInt> [32] (Select0 <typ.UInt> (ANDCCconst [31] y)))))
|
||||
// result: (ROTLW x y)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLW {
|
||||
continue
|
||||
}
|
||||
_ = v_0.Args[1]
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpSelect0 {
|
||||
continue
|
||||
}
|
||||
v_0_1_0 := v_0_1.Args[0]
|
||||
if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 {
|
||||
continue
|
||||
}
|
||||
y := v_0_1_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRW {
|
||||
continue
|
||||
}
|
||||
_ = v_1.Args[1]
|
||||
if x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v_1_1 := v_1.Args[1]
|
||||
if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 {
|
||||
continue
|
||||
}
|
||||
v_1_1_0 := v_1_1.Args[0]
|
||||
if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
v_1_1_0_0 := v_1_1_0.Args[0]
|
||||
if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 31 || y != v_1_1_0_0.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTLW)
|
||||
v.AddArg2(x, y)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (ADD (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (Select0 <typ.UInt> (ANDCCconst [31] y)))))
|
||||
// result: (ROTLW x y)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLW {
|
||||
continue
|
||||
}
|
||||
_ = v_0.Args[1]
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpSelect0 {
|
||||
continue
|
||||
}
|
||||
v_0_1_0 := v_0_1.Args[0]
|
||||
if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 {
|
||||
continue
|
||||
}
|
||||
y := v_0_1_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRW {
|
||||
continue
|
||||
}
|
||||
_ = v_1.Args[1]
|
||||
if x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v_1_1 := v_1.Args[1]
|
||||
if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
_ = v_1_1.Args[1]
|
||||
v_1_1_0 := v_1_1.Args[0]
|
||||
if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 {
|
||||
continue
|
||||
}
|
||||
v_1_1_1 := v_1_1.Args[1]
|
||||
if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
v_1_1_1_0 := v_1_1_1.Args[0]
|
||||
if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 31 || y != v_1_1_1_0.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTLW)
|
||||
v.AddArg2(x, y)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (ADD x (MOVDconst [c]))
|
||||
// cond: is32Bit(c)
|
||||
// result: (ADDconst [c] x)
|
||||
|
@ -11642,236 +11412,6 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool {
|
|||
b := v.Block
|
||||
config := b.Func.Config
|
||||
typ := &b.Func.Config.Types
|
||||
// match: ( OR (SLDconst x [c]) (SRDconst x [d]))
|
||||
// cond: d == 64-c
|
||||
// result: (ROTLconst [c] x)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLDconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRDconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt64(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(d == 64-c) {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTLconst)
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: ( OR (SLWconst x [c]) (SRWconst x [d]))
|
||||
// cond: d == 32-c
|
||||
// result: (ROTLWconst [c] x)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLWconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRWconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt64(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(d == 32-c) {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTLWconst)
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: ( OR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (Select0 <typ.UInt> (ANDCCconst [63] y)))))
|
||||
// result: (ROTL x y)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLD {
|
||||
continue
|
||||
}
|
||||
_ = v_0.Args[1]
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpSelect0 {
|
||||
continue
|
||||
}
|
||||
v_0_1_0 := v_0_1.Args[0]
|
||||
if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 {
|
||||
continue
|
||||
}
|
||||
y := v_0_1_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRD {
|
||||
continue
|
||||
}
|
||||
_ = v_1.Args[1]
|
||||
if x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v_1_1 := v_1.Args[1]
|
||||
if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
_ = v_1_1.Args[1]
|
||||
v_1_1_0 := v_1_1.Args[0]
|
||||
if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 {
|
||||
continue
|
||||
}
|
||||
v_1_1_1 := v_1_1.Args[1]
|
||||
if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
v_1_1_1_0 := v_1_1_1.Args[0]
|
||||
if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 63 || y != v_1_1_1_0.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTL)
|
||||
v.AddArg2(x, y)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: ( OR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst <typ.UInt> [64] (Select0 <typ.UInt> (ANDCCconst [63] y)))))
|
||||
// result: (ROTL x y)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLD {
|
||||
continue
|
||||
}
|
||||
_ = v_0.Args[1]
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpSelect0 {
|
||||
continue
|
||||
}
|
||||
v_0_1_0 := v_0_1.Args[0]
|
||||
if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 {
|
||||
continue
|
||||
}
|
||||
y := v_0_1_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRD {
|
||||
continue
|
||||
}
|
||||
_ = v_1.Args[1]
|
||||
if x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v_1_1 := v_1.Args[1]
|
||||
if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 {
|
||||
continue
|
||||
}
|
||||
v_1_1_0 := v_1_1.Args[0]
|
||||
if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
v_1_1_0_0 := v_1_1_0.Args[0]
|
||||
if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 63 || y != v_1_1_0_0.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTL)
|
||||
v.AddArg2(x, y)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: ( OR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst <typ.UInt> [32] (Select0 <typ.UInt> (ANDCCconst [31] y)))))
|
||||
// result: (ROTLW x y)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLW {
|
||||
continue
|
||||
}
|
||||
_ = v_0.Args[1]
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpSelect0 {
|
||||
continue
|
||||
}
|
||||
v_0_1_0 := v_0_1.Args[0]
|
||||
if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 {
|
||||
continue
|
||||
}
|
||||
y := v_0_1_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRW {
|
||||
continue
|
||||
}
|
||||
_ = v_1.Args[1]
|
||||
if x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v_1_1 := v_1.Args[1]
|
||||
if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 {
|
||||
continue
|
||||
}
|
||||
v_1_1_0 := v_1_1.Args[0]
|
||||
if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
v_1_1_0_0 := v_1_1_0.Args[0]
|
||||
if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 31 || y != v_1_1_0_0.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTLW)
|
||||
v.AddArg2(x, y)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: ( OR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (Select0 <typ.UInt> (ANDCCconst [31] y)))))
|
||||
// result: (ROTLW x y)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLW {
|
||||
continue
|
||||
}
|
||||
_ = v_0.Args[1]
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpSelect0 {
|
||||
continue
|
||||
}
|
||||
v_0_1_0 := v_0_1.Args[0]
|
||||
if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 {
|
||||
continue
|
||||
}
|
||||
y := v_0_1_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRW {
|
||||
continue
|
||||
}
|
||||
_ = v_1.Args[1]
|
||||
if x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v_1_1 := v_1.Args[1]
|
||||
if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
_ = v_1_1.Args[1]
|
||||
v_1_1_0 := v_1_1.Args[0]
|
||||
if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 {
|
||||
continue
|
||||
}
|
||||
v_1_1_1 := v_1_1.Args[1]
|
||||
if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
v_1_1_1_0 := v_1_1_1.Args[0]
|
||||
if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 31 || y != v_1_1_1_0.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTLW)
|
||||
v.AddArg2(x, y)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (OR x (NOR y y))
|
||||
// result: (ORN x y)
|
||||
for {
|
||||
|
@ -13950,238 +13490,6 @@ func rewriteValuePPC64_OpPPC64SUBFCconst(v *Value) bool {
|
|||
func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
b := v.Block
|
||||
typ := &b.Func.Config.Types
|
||||
// match: (XOR (SLDconst x [c]) (SRDconst x [d]))
|
||||
// cond: d == 64-c
|
||||
// result: (ROTLconst [c] x)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLDconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRDconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt64(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(d == 64-c) {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTLconst)
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (XOR (SLWconst x [c]) (SRWconst x [d]))
|
||||
// cond: d == 32-c
|
||||
// result: (ROTLWconst [c] x)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLWconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToInt64(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRWconst {
|
||||
continue
|
||||
}
|
||||
d := auxIntToInt64(v_1.AuxInt)
|
||||
if x != v_1.Args[0] || !(d == 32-c) {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTLWconst)
|
||||
v.AuxInt = int64ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (XOR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (Select0 <typ.UInt> (ANDCCconst [63] y)))))
|
||||
// result: (ROTL x y)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLD {
|
||||
continue
|
||||
}
|
||||
_ = v_0.Args[1]
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpSelect0 {
|
||||
continue
|
||||
}
|
||||
v_0_1_0 := v_0_1.Args[0]
|
||||
if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 {
|
||||
continue
|
||||
}
|
||||
y := v_0_1_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRD {
|
||||
continue
|
||||
}
|
||||
_ = v_1.Args[1]
|
||||
if x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v_1_1 := v_1.Args[1]
|
||||
if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
_ = v_1_1.Args[1]
|
||||
v_1_1_0 := v_1_1.Args[0]
|
||||
if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 {
|
||||
continue
|
||||
}
|
||||
v_1_1_1 := v_1_1.Args[1]
|
||||
if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
v_1_1_1_0 := v_1_1_1.Args[0]
|
||||
if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 63 || y != v_1_1_1_0.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTL)
|
||||
v.AddArg2(x, y)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (XOR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst <typ.UInt> [64] (Select0 <typ.UInt> (ANDCCconst [63] y)))))
|
||||
// result: (ROTL x y)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLD {
|
||||
continue
|
||||
}
|
||||
_ = v_0.Args[1]
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpSelect0 {
|
||||
continue
|
||||
}
|
||||
v_0_1_0 := v_0_1.Args[0]
|
||||
if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 {
|
||||
continue
|
||||
}
|
||||
y := v_0_1_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRD {
|
||||
continue
|
||||
}
|
||||
_ = v_1.Args[1]
|
||||
if x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v_1_1 := v_1.Args[1]
|
||||
if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 {
|
||||
continue
|
||||
}
|
||||
v_1_1_0 := v_1_1.Args[0]
|
||||
if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
v_1_1_0_0 := v_1_1_0.Args[0]
|
||||
if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 63 || y != v_1_1_0_0.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTL)
|
||||
v.AddArg2(x, y)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (XOR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst <typ.UInt> [32] (Select0 <typ.UInt> (ANDCCconst [31] y)))))
|
||||
// result: (ROTLW x y)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLW {
|
||||
continue
|
||||
}
|
||||
_ = v_0.Args[1]
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpSelect0 {
|
||||
continue
|
||||
}
|
||||
v_0_1_0 := v_0_1.Args[0]
|
||||
if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 {
|
||||
continue
|
||||
}
|
||||
y := v_0_1_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRW {
|
||||
continue
|
||||
}
|
||||
_ = v_1.Args[1]
|
||||
if x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v_1_1 := v_1.Args[1]
|
||||
if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 {
|
||||
continue
|
||||
}
|
||||
v_1_1_0 := v_1_1.Args[0]
|
||||
if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
v_1_1_0_0 := v_1_1_0.Args[0]
|
||||
if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 31 || y != v_1_1_0_0.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTLW)
|
||||
v.AddArg2(x, y)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (XOR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (Select0 <typ.UInt> (ANDCCconst [31] y)))))
|
||||
// result: (ROTLW x y)
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpPPC64SLW {
|
||||
continue
|
||||
}
|
||||
_ = v_0.Args[1]
|
||||
x := v_0.Args[0]
|
||||
v_0_1 := v_0.Args[1]
|
||||
if v_0_1.Op != OpSelect0 {
|
||||
continue
|
||||
}
|
||||
v_0_1_0 := v_0_1.Args[0]
|
||||
if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 {
|
||||
continue
|
||||
}
|
||||
y := v_0_1_0.Args[0]
|
||||
if v_1.Op != OpPPC64SRW {
|
||||
continue
|
||||
}
|
||||
_ = v_1.Args[1]
|
||||
if x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v_1_1 := v_1.Args[1]
|
||||
if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
_ = v_1_1.Args[1]
|
||||
v_1_1_0 := v_1_1.Args[0]
|
||||
if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 {
|
||||
continue
|
||||
}
|
||||
v_1_1_1 := v_1_1.Args[1]
|
||||
if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt {
|
||||
continue
|
||||
}
|
||||
v_1_1_1_0 := v_1_1_1.Args[0]
|
||||
if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 31 || y != v_1_1_1_0.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpPPC64ROTLW)
|
||||
v.AddArg2(x, y)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (XOR (MOVDconst [c]) (MOVDconst [d]))
|
||||
// result: (MOVDconst [c^d])
|
||||
for {
|
||||
|
@ -14461,58 +13769,6 @@ func rewriteValuePPC64_OpRotateLeft16(v *Value) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValuePPC64_OpRotateLeft32(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
// match: (RotateLeft32 x (MOVDconst [c]))
|
||||
// result: (ROTLWconst [c&31] x)
|
||||
for {
|
||||
x := v_0
|
||||
if v_1.Op != OpPPC64MOVDconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_1.AuxInt)
|
||||
v.reset(OpPPC64ROTLWconst)
|
||||
v.AuxInt = int64ToAuxInt(c & 31)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (RotateLeft32 x y)
|
||||
// result: (ROTLW x y)
|
||||
for {
|
||||
x := v_0
|
||||
y := v_1
|
||||
v.reset(OpPPC64ROTLW)
|
||||
v.AddArg2(x, y)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValuePPC64_OpRotateLeft64(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
// match: (RotateLeft64 x (MOVDconst [c]))
|
||||
// result: (ROTLconst [c&63] x)
|
||||
for {
|
||||
x := v_0
|
||||
if v_1.Op != OpPPC64MOVDconst {
|
||||
break
|
||||
}
|
||||
c := auxIntToInt64(v_1.AuxInt)
|
||||
v.reset(OpPPC64ROTLconst)
|
||||
v.AuxInt = int64ToAuxInt(c & 63)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
// match: (RotateLeft64 x y)
|
||||
// result: (ROTL x y)
|
||||
for {
|
||||
x := v_0
|
||||
y := v_1
|
||||
v.reset(OpPPC64ROTL)
|
||||
v.AddArg2(x, y)
|
||||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValuePPC64_OpRotateLeft8(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
|
|
|
@ -5280,25 +5280,6 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool {
|
|||
}
|
||||
break
|
||||
}
|
||||
// match: (ADD (SLDconst x [c]) (SRDconst x [64-c]))
|
||||
// result: (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpS390XSLDconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToUint8(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 64-c || x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpS390XRISBGZ)
|
||||
v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, c))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (ADD idx (MOVDaddr [c] {s} ptr))
|
||||
// cond: ptr.Op != OpSB
|
||||
// result: (MOVDaddridx [c] {s} ptr idx)
|
||||
|
@ -5473,25 +5454,6 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool {
|
|||
}
|
||||
break
|
||||
}
|
||||
// match: (ADDW (SLWconst x [c]) (SRWconst x [32-c]))
|
||||
// result: (RLLconst x [c])
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpS390XSLWconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToUint8(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 32-c || x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpS390XRLLconst)
|
||||
v.AuxInt = uint8ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (ADDW x (NEGW y))
|
||||
// result: (SUBW x y)
|
||||
for {
|
||||
|
@ -11689,25 +11651,6 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool {
|
|||
}
|
||||
break
|
||||
}
|
||||
// match: (OR (SLDconst x [c]) (SRDconst x [64-c]))
|
||||
// result: (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpS390XSLDconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToUint8(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 64-c || x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpS390XRISBGZ)
|
||||
v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, c))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (OR (MOVDconst [-1<<63]) (LGDR <t> x))
|
||||
// result: (LGDR <t> (LNDFR <x.Type> x))
|
||||
for {
|
||||
|
@ -12387,25 +12330,6 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool {
|
|||
}
|
||||
break
|
||||
}
|
||||
// match: (ORW (SLWconst x [c]) (SRWconst x [32-c]))
|
||||
// result: (RLLconst x [c])
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpS390XSLWconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToUint8(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 32-c || x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpS390XRLLconst)
|
||||
v.AuxInt = uint8ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (ORW x x)
|
||||
// result: x
|
||||
for {
|
||||
|
@ -14972,25 +14896,6 @@ func rewriteValueS390X_OpS390XXOR(v *Value) bool {
|
|||
}
|
||||
break
|
||||
}
|
||||
// match: (XOR (SLDconst x [c]) (SRDconst x [64-c]))
|
||||
// result: (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpS390XSLDconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToUint8(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 64-c || x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpS390XRISBGZ)
|
||||
v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, c))
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (XOR (MOVDconst [c]) (MOVDconst [d]))
|
||||
// result: (MOVDconst [c^d])
|
||||
for {
|
||||
|
@ -15068,25 +14973,6 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool {
|
|||
}
|
||||
break
|
||||
}
|
||||
// match: (XORW (SLWconst x [c]) (SRWconst x [32-c]))
|
||||
// result: (RLLconst x [c])
|
||||
for {
|
||||
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
|
||||
if v_0.Op != OpS390XSLWconst {
|
||||
continue
|
||||
}
|
||||
c := auxIntToUint8(v_0.AuxInt)
|
||||
x := v_0.Args[0]
|
||||
if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 32-c || x != v_1.Args[0] {
|
||||
continue
|
||||
}
|
||||
v.reset(OpS390XRLLconst)
|
||||
v.AuxInt = uint8ToAuxInt(c)
|
||||
v.AddArg(x)
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
// match: (XORW x x)
|
||||
// result: (MOVDconst [0])
|
||||
for {
|
||||
|
|
|
@ -66,6 +66,14 @@ func rewriteValuedec64(v *Value) bool {
|
|||
return rewriteValuedec64_OpOr32(v)
|
||||
case OpOr64:
|
||||
return rewriteValuedec64_OpOr64(v)
|
||||
case OpRotateLeft16:
|
||||
return rewriteValuedec64_OpRotateLeft16(v)
|
||||
case OpRotateLeft32:
|
||||
return rewriteValuedec64_OpRotateLeft32(v)
|
||||
case OpRotateLeft64:
|
||||
return rewriteValuedec64_OpRotateLeft64(v)
|
||||
case OpRotateLeft8:
|
||||
return rewriteValuedec64_OpRotateLeft8(v)
|
||||
case OpRsh16Ux64:
|
||||
return rewriteValuedec64_OpRsh16Ux64(v)
|
||||
case OpRsh16x64:
|
||||
|
@ -1266,6 +1274,74 @@ func rewriteValuedec64_OpOr64(v *Value) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
func rewriteValuedec64_OpRotateLeft16(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
// match: (RotateLeft16 x (Int64Make hi lo))
|
||||
// result: (RotateLeft16 x lo)
|
||||
for {
|
||||
x := v_0
|
||||
if v_1.Op != OpInt64Make {
|
||||
break
|
||||
}
|
||||
lo := v_1.Args[1]
|
||||
v.reset(OpRotateLeft16)
|
||||
v.AddArg2(x, lo)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValuedec64_OpRotateLeft32(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
// match: (RotateLeft32 x (Int64Make hi lo))
|
||||
// result: (RotateLeft32 x lo)
|
||||
for {
|
||||
x := v_0
|
||||
if v_1.Op != OpInt64Make {
|
||||
break
|
||||
}
|
||||
lo := v_1.Args[1]
|
||||
v.reset(OpRotateLeft32)
|
||||
v.AddArg2(x, lo)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValuedec64_OpRotateLeft64(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
// match: (RotateLeft64 x (Int64Make hi lo))
|
||||
// result: (RotateLeft64 x lo)
|
||||
for {
|
||||
x := v_0
|
||||
if v_1.Op != OpInt64Make {
|
||||
break
|
||||
}
|
||||
lo := v_1.Args[1]
|
||||
v.reset(OpRotateLeft64)
|
||||
v.AddArg2(x, lo)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValuedec64_OpRotateLeft8(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
// match: (RotateLeft8 x (Int64Make hi lo))
|
||||
// result: (RotateLeft8 x lo)
|
||||
for {
|
||||
x := v_0
|
||||
if v_1.Op != OpInt64Make {
|
||||
break
|
||||
}
|
||||
lo := v_1.Args[1]
|
||||
v.reset(OpRotateLeft8)
|
||||
v.AddArg2(x, lo)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
func rewriteValuedec64_OpRsh16Ux64(v *Value) bool {
|
||||
v_1 := v.Args[1]
|
||||
v_0 := v.Args[0]
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1039,3 +1039,25 @@ func BenchmarkShiftArithmeticRight(b *testing.B) {
|
|||
}
|
||||
shiftSink64 = x
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func incorrectRotate1(x, c uint64) uint64 {
|
||||
// This should not compile to a rotate instruction.
|
||||
return x<<c | x>>(64-c)
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func incorrectRotate2(x uint64) uint64 {
|
||||
var c uint64 = 66
|
||||
// This should not compile to a rotate instruction.
|
||||
return x<<c | x>>(64-c)
|
||||
}
|
||||
|
||||
func TestIncorrectRotate(t *testing.T) {
|
||||
if got := incorrectRotate1(1, 66); got != 0 {
|
||||
t.Errorf("got %x want 0", got)
|
||||
}
|
||||
if got := incorrectRotate2(1); got != 0 {
|
||||
t.Errorf("got %x want 0", got)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -158,6 +158,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
|
|||
ssa.Op386SHLL,
|
||||
ssa.Op386SHRL, ssa.Op386SHRW, ssa.Op386SHRB,
|
||||
ssa.Op386SARL, ssa.Op386SARW, ssa.Op386SARB,
|
||||
ssa.Op386ROLL, ssa.Op386ROLW, ssa.Op386ROLB,
|
||||
ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD,
|
||||
ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD,
|
||||
ssa.Op386PXOR,
|
||||
|
|
|
@ -39,8 +39,8 @@ func rot64(x uint64) uint64 {
|
|||
// s390x:"RISBGZ\t[$]0, [$]63, [$]10, "
|
||||
// ppc64:"ROTL\t[$]10"
|
||||
// ppc64le:"ROTL\t[$]10"
|
||||
// arm64:"ROR\t[$]57" // TODO this is not great line numbering, but then again, the instruction did appear
|
||||
// s390x:"RISBGZ\t[$]0, [$]63, [$]7, " // TODO ditto
|
||||
// arm64:"ROR\t[$]54"
|
||||
// s390x:"RISBGZ\t[$]0, [$]63, [$]10, "
|
||||
a += bits.RotateLeft64(x, 10)
|
||||
|
||||
return a
|
||||
|
@ -77,8 +77,8 @@ func rot32(x uint32) uint32 {
|
|||
// s390x:"RLL\t[$]10"
|
||||
// ppc64:"ROTLW\t[$]10"
|
||||
// ppc64le:"ROTLW\t[$]10"
|
||||
// arm64:"RORW\t[$]25" // TODO this is not great line numbering, but then again, the instruction did appear
|
||||
// s390x:"RLL\t[$]7" // TODO ditto
|
||||
// arm64:"RORW\t[$]22"
|
||||
// s390x:"RLL\t[$]10"
|
||||
a += bits.RotateLeft32(x, 10)
|
||||
|
||||
return a
|
||||
|
@ -123,12 +123,16 @@ func rot64nc(x uint64, z uint) uint64 {
|
|||
|
||||
z &= 63
|
||||
|
||||
// amd64:"ROLQ"
|
||||
// ppc64:"ROTL"
|
||||
// ppc64le:"ROTL"
|
||||
// amd64:"ROLQ",-"AND"
|
||||
// arm64:"ROR","NEG",-"AND"
|
||||
// ppc64:"ROTL",-"NEG",-"AND"
|
||||
// ppc64le:"ROTL",-"NEG",-"AND"
|
||||
a += x<<z | x>>(64-z)
|
||||
|
||||
// amd64:"RORQ"
|
||||
// amd64:"RORQ",-"AND"
|
||||
// arm64:"ROR",-"NEG",-"AND"
|
||||
// ppc64:"ROTL","NEG",-"AND"
|
||||
// ppc64le:"ROTL","NEG",-"AND"
|
||||
a += x>>z | x<<(64-z)
|
||||
|
||||
return a
|
||||
|
@ -139,12 +143,16 @@ func rot32nc(x uint32, z uint) uint32 {
|
|||
|
||||
z &= 31
|
||||
|
||||
// amd64:"ROLL"
|
||||
// ppc64:"ROTLW"
|
||||
// ppc64le:"ROTLW"
|
||||
// amd64:"ROLL",-"AND"
|
||||
// arm64:"ROR","NEG",-"AND"
|
||||
// ppc64:"ROTLW",-"NEG",-"AND"
|
||||
// ppc64le:"ROTLW",-"NEG",-"AND"
|
||||
a += x<<z | x>>(32-z)
|
||||
|
||||
// amd64:"RORL"
|
||||
// amd64:"RORL",-"AND"
|
||||
// arm64:"ROR",-"NEG",-"AND"
|
||||
// ppc64:"ROTLW","NEG",-"AND"
|
||||
// ppc64le:"ROTLW","NEG",-"AND"
|
||||
a += x>>z | x<<(32-z)
|
||||
|
||||
return a
|
||||
|
@ -155,10 +163,10 @@ func rot16nc(x uint16, z uint) uint16 {
|
|||
|
||||
z &= 15
|
||||
|
||||
// amd64:"ROLW"
|
||||
// amd64:"ROLW",-"ANDQ"
|
||||
a += x<<z | x>>(16-z)
|
||||
|
||||
// amd64:"RORW"
|
||||
// amd64:"RORW",-"ANDQ"
|
||||
a += x>>z | x<<(16-z)
|
||||
|
||||
return a
|
||||
|
@ -169,10 +177,10 @@ func rot8nc(x uint8, z uint) uint8 {
|
|||
|
||||
z &= 7
|
||||
|
||||
// amd64:"ROLB"
|
||||
// amd64:"ROLB",-"ANDQ"
|
||||
a += x<<z | x>>(8-z)
|
||||
|
||||
// amd64:"RORB"
|
||||
// amd64:"RORB",-"ANDQ"
|
||||
a += x>>z | x<<(8-z)
|
||||
|
||||
return a
|
||||
|
|
Loading…
Reference in a new issue