cmd/compile: simiplify arm64 bitfield optimizations

In some rewrite rules for arm64 bitfield optimizations, the
bitfield lsb value and the bitfield width value are related
to datasize, some of them use datasize directly to check the
bitfield lsb value is valid, to get the bitfiled width value,
but some of them call isARM64BFMask() and arm64BFWidth()
functions. In order to be consistent, this patch changes them
all to use datasize.

Besides, this patch sorts the codegen test cases.

Run the "toolstash-check -all" command and find one inconsistent code
is as the following.

new:    src/math/fma.go:104      BEQ	247
master: src/math/fma.go:104      BEQ	248

The above inconsistence is due to this patch changing the range of the
field lsb value in "UBFIZ" optimization rules from "lc+(32|16|8)<64" to
"lc<64", so that the following code is generated as "UBFIZ". The logical
of changed code is still correct.

The code of src/math/fma.go:160:
  const uvinf = 0x7FF0000000000000
  func FMA(a, b uint32) float64 {
  	ps := a+b
  	return Float64frombits(uint64(ps)<<63 | uvinf)
  }

The new assembly code:
  TEXT    "".FMA(SB), LEAF|NOFRAME|ABIInternal, $0-16
  MOVWU   "".a(FP), R0
  MOVWU   "".b+4(FP), R1
  ADD     R1, R0, R0
  UBFIZ   $63, R0, $1, R0
  ORR     $9218868437227405312, R0, R0
  MOVD    R0, "".~r2+8(FP)
  RET     (R30)

The master assembly code:
  TEXT    "".FMA(SB), LEAF|NOFRAME|ABIInternal, $0-16
  MOVWU   "".a(FP), R0
  MOVWU   "".b+4(FP), R1
  ADD     R1, R0, R0
  MOVWU   R0, R0
  LSL     $63, R0, R0
  ORR     $9218868437227405312, R0, R0
  MOVD    R0, "".~r2+8(FP)
  RET     (R30)

Change-Id: I9061104adfdfd3384d0525327ae1e5c8b0df5c35
Reviewed-on: https://go-review.googlesource.com/c/go/+/265038
Trust: fannie zhang <Fannie.Zhang@arm.com>
Run-TryBot: fannie zhang <Fannie.Zhang@arm.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
This commit is contained in:
fanzha02 2020-10-21 18:51:42 +08:00 committed by fannie zhang
parent b32209d22d
commit 2091bd3f26
4 changed files with 286 additions and 239 deletions

View file

@ -1824,6 +1824,7 @@
// sbfiz
// (x << lc) >> rc
(SRAconst [rc] (SLLconst [lc] x)) && lc > rc => (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
// int64(x << lc)
(MOVWreg (SLLconst [lc] x)) && lc < 32 => (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
(MOVHreg (SLLconst [lc] x)) && lc < 16 => (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
(MOVBreg (SLLconst [lc] x)) && lc < 8 => (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
@ -1835,6 +1836,7 @@
// sbfx
// (x << lc) >> rc
(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc => (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
// int64(x) >> rc
(SRAconst [rc] (MOVWreg x)) && rc < 32 => (SBFX [armBFAuxInt(rc, 32-rc)] x)
(SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
(SRAconst [rc] (MOVBreg x)) && rc < 8 => (SBFX [armBFAuxInt(rc, 8-rc)] x)
@ -1851,42 +1853,43 @@
=> (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
// ubfiz
// (x << lc) >> rc
(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
// uint64(x) << lc
(SLLconst [lc] (MOVWUreg x)) => (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
(SLLconst [lc] (MOVHUreg x)) => (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
(SLLconst [lc] (MOVBUreg x)) => (UBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
// uint64(x << lc)
(MOVWUreg (SLLconst [lc] x)) && lc < 32 => (UBFIZ [armBFAuxInt(lc, 32-lc)] x)
(MOVHUreg (SLLconst [lc] x)) && lc < 16 => (UBFIZ [armBFAuxInt(lc, 16-lc)] x)
(MOVBUreg (SLLconst [lc] x)) && lc < 8 => (UBFIZ [armBFAuxInt(lc, 8-lc)] x)
// merge ANDconst into ubfiz
// (x & ac) << sc
(SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0)
=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
(SLLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, 0) => (UBFIZ [armBFAuxInt(sc, 32)] x)
(SLLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, 0) => (UBFIZ [armBFAuxInt(sc, 16)] x)
(SLLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, 0) => (UBFIZ [armBFAuxInt(sc, 8)] x)
// (x << sc) & ac
(ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc)
=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
(MOVWUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, sc)
=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
(MOVHUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, sc)
=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
(MOVBUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, sc)
=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
// (x << lc) >> rc
(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
// ubfx
// (x << lc) >> rc
(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
// uint64(x) >> rc
(SRLconst [rc] (MOVWUreg x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32-rc)] x)
(SRLconst [rc] (MOVHUreg x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16-rc)] x)
(SRLconst [rc] (MOVBUreg x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8-rc)] x)
// uint64(x >> rc)
(MOVWUreg (SRLconst [rc] x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32)] x)
(MOVHUreg (SRLconst [rc] x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16)] x)
(MOVBUreg (SRLconst [rc] x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8)] x)
// merge ANDconst into ubfx
// (x >> sc) & ac
(ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0)
=> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
(MOVWUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, 0) => (UBFX [armBFAuxInt(sc, 32)] x)
(MOVHUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, 0) => (UBFX [armBFAuxInt(sc, 16)] x)
(MOVBUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, 0) => (UBFX [armBFAuxInt(sc, 8)] x)
// (x & ac) >> sc
(SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc)
=> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
(SRLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, sc)
=> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
(SRLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, sc)
=> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
(SRLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, sc)
=> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
// (x << lc) >> rc
(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
// merge ubfx and zerso-extension into ubfx
(MOVWUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 32 => (UBFX [bfc] x)

View file

@ -1572,7 +1572,7 @@ func armBFAuxInt(lsb, width int64) arm64BitField {
if lsb < 0 || lsb > 63 {
panic("ARM(64) bit field lsb constant out of range")
}
if width < 1 || width > 64 {
if width < 1 || lsb+width > 64 {
panic("ARM(64) bit field width constant out of range")
}
return arm64BitField(width | lsb<<8)

View file

@ -7157,37 +7157,37 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (MOVBUreg (SLLconst [sc] x))
// cond: isARM64BFMask(sc, 1<<8-1, sc)
// result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
// match: (MOVBUreg (SLLconst [lc] x))
// cond: lc < 8
// result: (UBFIZ [armBFAuxInt(lc, 8-lc)] x)
for {
if v_0.Op != OpARM64SLLconst {
break
}
sc := auxIntToInt64(v_0.AuxInt)
lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<8-1, sc)) {
if !(lc < 8) {
break
}
v.reset(OpARM64UBFIZ)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)))
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc))
v.AddArg(x)
return true
}
// match: (MOVBUreg (SRLconst [sc] x))
// cond: isARM64BFMask(sc, 1<<8-1, 0)
// result: (UBFX [armBFAuxInt(sc, 8)] x)
// match: (MOVBUreg (SRLconst [rc] x))
// cond: rc < 8
// result: (UBFX [armBFAuxInt(rc, 8)] x)
for {
if v_0.Op != OpARM64SRLconst {
break
}
sc := auxIntToInt64(v_0.AuxInt)
rc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<8-1, 0)) {
if !(rc < 8) {
break
}
v.reset(OpARM64UBFX)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 8))
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8))
v.AddArg(x)
return true
}
@ -10703,37 +10703,37 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (MOVHUreg (SLLconst [sc] x))
// cond: isARM64BFMask(sc, 1<<16-1, sc)
// result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
// match: (MOVHUreg (SLLconst [lc] x))
// cond: lc < 16
// result: (UBFIZ [armBFAuxInt(lc, 16-lc)] x)
for {
if v_0.Op != OpARM64SLLconst {
break
}
sc := auxIntToInt64(v_0.AuxInt)
lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<16-1, sc)) {
if !(lc < 16) {
break
}
v.reset(OpARM64UBFIZ)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)))
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc))
v.AddArg(x)
return true
}
// match: (MOVHUreg (SRLconst [sc] x))
// cond: isARM64BFMask(sc, 1<<16-1, 0)
// result: (UBFX [armBFAuxInt(sc, 16)] x)
// match: (MOVHUreg (SRLconst [rc] x))
// cond: rc < 16
// result: (UBFX [armBFAuxInt(rc, 16)] x)
for {
if v_0.Op != OpARM64SRLconst {
break
}
sc := auxIntToInt64(v_0.AuxInt)
rc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<16-1, 0)) {
if !(rc < 16) {
break
}
v.reset(OpARM64UBFX)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 16))
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16))
v.AddArg(x)
return true
}
@ -12849,37 +12849,37 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (MOVWUreg (SLLconst [sc] x))
// cond: isARM64BFMask(sc, 1<<32-1, sc)
// result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
// match: (MOVWUreg (SLLconst [lc] x))
// cond: lc < 32
// result: (UBFIZ [armBFAuxInt(lc, 32-lc)] x)
for {
if v_0.Op != OpARM64SLLconst {
break
}
sc := auxIntToInt64(v_0.AuxInt)
lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<32-1, sc)) {
if !(lc < 32) {
break
}
v.reset(OpARM64UBFIZ)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)))
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc))
v.AddArg(x)
return true
}
// match: (MOVWUreg (SRLconst [sc] x))
// cond: isARM64BFMask(sc, 1<<32-1, 0)
// result: (UBFX [armBFAuxInt(sc, 32)] x)
// match: (MOVWUreg (SRLconst [rc] x))
// cond: rc < 32
// result: (UBFX [armBFAuxInt(rc, 32)] x)
for {
if v_0.Op != OpARM64SRLconst {
break
}
sc := auxIntToInt64(v_0.AuxInt)
rc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<32-1, 0)) {
if !(rc < 32) {
break
}
v.reset(OpARM64UBFX)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 32))
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32))
v.AddArg(x)
return true
}
@ -20130,6 +20130,45 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
v.AddArg(x)
return true
}
// match: (SLLconst [lc] (MOVWUreg x))
// result: (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
for {
lc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVWUreg {
break
}
x := v_0.Args[0]
v.reset(OpARM64UBFIZ)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(32, 64-lc)))
v.AddArg(x)
return true
}
// match: (SLLconst [lc] (MOVHUreg x))
// result: (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
for {
lc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVHUreg {
break
}
x := v_0.Args[0]
v.reset(OpARM64UBFIZ)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(16, 64-lc)))
v.AddArg(x)
return true
}
// match: (SLLconst [lc] (MOVBUreg x))
// result: (UBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
for {
lc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVBUreg {
break
}
x := v_0.Args[0]
v.reset(OpARM64UBFIZ)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(8, 64-lc)))
v.AddArg(x)
return true
}
// match: (SLLconst [sc] (ANDconst [ac] x))
// cond: isARM64BFMask(sc, ac, 0)
// result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
@ -20148,57 +20187,6 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
v.AddArg(x)
return true
}
// match: (SLLconst [sc] (MOVWUreg x))
// cond: isARM64BFMask(sc, 1<<32-1, 0)
// result: (UBFIZ [armBFAuxInt(sc, 32)] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVWUreg {
break
}
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<32-1, 0)) {
break
}
v.reset(OpARM64UBFIZ)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 32))
v.AddArg(x)
return true
}
// match: (SLLconst [sc] (MOVHUreg x))
// cond: isARM64BFMask(sc, 1<<16-1, 0)
// result: (UBFIZ [armBFAuxInt(sc, 16)] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVHUreg {
break
}
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<16-1, 0)) {
break
}
v.reset(OpARM64UBFIZ)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 16))
v.AddArg(x)
return true
}
// match: (SLLconst [sc] (MOVBUreg x))
// cond: isARM64BFMask(sc, 1<<8-1, 0)
// result: (UBFIZ [armBFAuxInt(sc, 8)] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVBUreg {
break
}
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<8-1, 0)) {
break
}
v.reset(OpARM64UBFIZ)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 8))
v.AddArg(x)
return true
}
// match: (SLLconst [sc] (UBFIZ [bfc] x))
// cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
// result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
@ -20488,75 +20476,6 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (ANDconst [ac] x))
// cond: isARM64BFMask(sc, ac, sc)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64ANDconst {
break
}
ac := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, ac, sc)) {
break
}
v.reset(OpARM64UBFX)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (MOVWUreg x))
// cond: isARM64BFMask(sc, 1<<32-1, sc)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVWUreg {
break
}
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<32-1, sc)) {
break
}
v.reset(OpARM64UBFX)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (MOVHUreg x))
// cond: isARM64BFMask(sc, 1<<16-1, sc)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVHUreg {
break
}
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<16-1, sc)) {
break
}
v.reset(OpARM64UBFX)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (MOVBUreg x))
// cond: isARM64BFMask(sc, 1<<8-1, sc)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVBUreg {
break
}
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<8-1, sc)) {
break
}
v.reset(OpARM64UBFX)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)))
v.AddArg(x)
return true
}
// match: (SRLconst [rc] (SLLconst [lc] x))
// cond: lc < rc
// result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
@ -20575,6 +20494,75 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
v.AddArg(x)
return true
}
// match: (SRLconst [rc] (MOVWUreg x))
// cond: rc < 32
// result: (UBFX [armBFAuxInt(rc, 32-rc)] x)
for {
rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVWUreg {
break
}
x := v_0.Args[0]
if !(rc < 32) {
break
}
v.reset(OpARM64UBFX)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc))
v.AddArg(x)
return true
}
// match: (SRLconst [rc] (MOVHUreg x))
// cond: rc < 16
// result: (UBFX [armBFAuxInt(rc, 16-rc)] x)
for {
rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVHUreg {
break
}
x := v_0.Args[0]
if !(rc < 16) {
break
}
v.reset(OpARM64UBFX)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc))
v.AddArg(x)
return true
}
// match: (SRLconst [rc] (MOVBUreg x))
// cond: rc < 8
// result: (UBFX [armBFAuxInt(rc, 8-rc)] x)
for {
rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVBUreg {
break
}
x := v_0.Args[0]
if !(rc < 8) {
break
}
v.reset(OpARM64UBFX)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (ANDconst [ac] x))
// cond: isARM64BFMask(sc, ac, sc)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
for {
sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64ANDconst {
break
}
ac := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, ac, sc)) {
break
}
v.reset(OpARM64UBFX)
v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (UBFX [bfc] x))
// cond: sc < bfc.getARM64BFwidth()
// result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)

View file

@ -77,11 +77,13 @@ func bfxil2(x, y uint64) uint64 {
}
// sbfiz
// merge shifts into sbfiz: (x << lc) >> rc && lc > rc.
func sbfiz1(x int64) int64 {
// arm64:"SBFIZ\t[$]1, R[0-9]+, [$]60",-"LSL",-"ASR"
return (x << 4) >> 3
}
// merge shift and sign-extension into sbfiz.
func sbfiz2(x int32) int64 {
return int64(x << 3) // arm64:"SBFIZ\t[$]3, R[0-9]+, [$]29",-"LSL"
}
@ -94,6 +96,8 @@ func sbfiz4(x int8) int64 {
return int64(x << 3) // arm64:"SBFIZ\t[$]3, R[0-9]+, [$]5",-"LSL"
}
// sbfiz combinations.
// merge shift with sbfiz into sbfiz.
func sbfiz5(x int32) int32 {
// arm64:"SBFIZ\t[$]1, R[0-9]+, [$]28",-"LSL",-"ASR"
return (x << 4) >> 3
@ -112,6 +116,7 @@ func sbfiz8(x int32) int64 {
}
// sbfx
// merge shifts into sbfx: (x << lc) >> rc && lc <= rc.
func sbfx1(x int64) int64 {
return (x << 3) >> 4 // arm64:"SBFX\t[$]1, R[0-9]+, [$]60",-"LSL",-"ASR"
}
@ -120,6 +125,7 @@ func sbfx2(x int64) int64 {
return (x << 60) >> 60 // arm64:"SBFX\tZR, R[0-9]+, [$]4",-"LSL",-"ASR"
}
// merge shift and sign-extension into sbfx.
func sbfx3(x int32) int64 {
return int64(x) >> 3 // arm64:"SBFX\t[$]3, R[0-9]+, [$]29",-"ASR"
}
@ -132,131 +138,181 @@ func sbfx5(x int8) int64 {
return int64(x) >> 3 // arm64:"SBFX\t[$]3, R[0-9]+, [$]5",-"ASR"
}
func sbfx6(x int32) int32 {
func sbfx6(x int32) int64 {
return int64(x >> 30) // arm64:"SBFX\t[$]30, R[0-9]+, [$]2"
}
func sbfx7(x int16) int64 {
return int64(x >> 10) // arm64:"SBFX\t[$]10, R[0-9]+, [$]6"
}
func sbfx8(x int8) int64 {
return int64(x >> 5) // arm64:"SBFX\t[$]5, R[0-9]+, [$]3"
}
// sbfx combinations.
// merge shifts with sbfiz into sbfx.
func sbfx9(x int32) int32 {
return (x << 3) >> 4 // arm64:"SBFX\t[$]1, R[0-9]+, [$]28",-"LSL",-"ASR"
}
// merge sbfx and sign-extension into sbfx.
func sbfx7(x int32) int64 {
func sbfx10(x int32) int64 {
c := x + 5
return int64(c >> 20) // arm64"SBFX\t[$]20, R[0-9]+, [$]12",-"MOVW\tR[0-9]+, R[0-9]+"
}
// ubfiz
// merge shifts into ubfiz: (x<<lc)>>rc && lc>rc
func ubfiz1(x uint64) uint64 {
// arm64:"UBFIZ\t[$]3, R[0-9]+, [$]12",-"LSL",-"AND"
// s390x:"RISBGZ\t[$]49, [$]60, [$]3,",-"SLD",-"AND"
return (x & 0xfff) << 3
}
func ubfiz2(x uint64) uint64 {
// arm64:"UBFIZ\t[$]4, R[0-9]+, [$]12",-"LSL",-"AND"
// s390x:"RISBGZ\t[$]48, [$]59, [$]4,",-"SLD",-"AND"
return (x << 4) & 0xfff0
}
func ubfiz3(x uint32) uint64 {
return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]32",-"LSL"
}
func ubfiz4(x uint16) uint64 {
return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]16",-"LSL"
}
func ubfiz5(x uint8) uint64 {
return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]8",-"LSL"
}
func ubfiz6(x uint64) uint64 {
// arm64:"UBFIZ\t[$]1, R[0-9]+, [$]60",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]3, [$]62, [$]1, ",-"SLD",-"SRD"
return (x << 4) >> 3
}
func ubfiz7(x uint32) uint32 {
// merge shift and zero-extension into ubfiz.
func ubfiz2(x uint32) uint64 {
return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]32",-"LSL"
}
func ubfiz3(x uint16) uint64 {
return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]16",-"LSL"
}
func ubfiz4(x uint8) uint64 {
return uint64(x+1) << 3 // arm64:"UBFIZ\t[$]3, R[0-9]+, [$]8",-"LSL"
}
func ubfiz5(x uint8) uint64 {
return uint64(x) << 60 // arm64:"UBFIZ\t[$]60, R[0-9]+, [$]4",-"LSL"
}
func ubfiz6(x uint32) uint64 {
return uint64(x << 30) // arm64:"UBFIZ\t[$]30, R[0-9]+, [$]2",
}
func ubfiz7(x uint16) uint64 {
return uint64(x << 10) // arm64:"UBFIZ\t[$]10, R[0-9]+, [$]6",
}
func ubfiz8(x uint8) uint64 {
return uint64(x << 7) // arm64:"UBFIZ\t[$]7, R[0-9]+, [$]1",
}
// merge ANDconst into ubfiz.
func ubfiz9(x uint64) uint64 {
// arm64:"UBFIZ\t[$]3, R[0-9]+, [$]12",-"LSL",-"AND"
// s390x:"RISBGZ\t[$]49, [$]60, [$]3,",-"SLD",-"AND"
return (x & 0xfff) << 3
}
func ubfiz10(x uint64) uint64 {
// arm64:"UBFIZ\t[$]4, R[0-9]+, [$]12",-"LSL",-"AND"
// s390x:"RISBGZ\t[$]48, [$]59, [$]4,",-"SLD",-"AND"
return (x << 4) & 0xfff0
}
// ubfiz combinations
func ubfiz11(x uint32) uint32 {
// arm64:"UBFIZ\t[$]1, R[0-9]+, [$]28",-"LSL",-"LSR"
return (x << 4) >> 3
}
func ubfiz8(x uint64) uint64 {
func ubfiz12(x uint64) uint64 {
// arm64:"UBFIZ\t[$]1, R[0-9]+, [$]20",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]43, [$]62, [$]1, ",-"SLD",-"SRD",-"AND"
return ((x & 0xfffff) << 4) >> 3
}
func ubfiz9(x uint64) uint64 {
func ubfiz13(x uint64) uint64 {
// arm64:"UBFIZ\t[$]5, R[0-9]+, [$]13",-"LSL",-"LSR",-"AND"
return ((x << 3) & 0xffff) << 2
}
func ubfiz10(x uint64) uint64 {
func ubfiz14(x uint64) uint64 {
// arm64:"UBFIZ\t[$]7, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]45, [$]56, [$]7, ",-"SLD",-"SRD",-"AND"
return ((x << 5) & (0xfff << 5)) << 2
}
// ubfx
// merge shifts into ubfx: (x<<lc)>>rc && lc<rc
func ubfx1(x uint64) uint64 {
// arm64:"UBFX\t[$]25, R[0-9]+, [$]10",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]54, [$]63, [$]39, ",-"SRD",-"AND"
return (x >> 25) & 1023
}
func ubfx2(x uint64) uint64 {
// arm64:"UBFX\t[$]4, R[0-9]+, [$]8",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]56, [$]63, [$]60, ",-"SRD",-"AND"
return (x & 0x0ff0) >> 4
}
func ubfx3(x uint32) uint64 {
return uint64(x >> 15) // arm64:"UBFX\t[$]15, R[0-9]+, [$]17",-"LSR"
}
func ubfx4(x uint16) uint64 {
return uint64(x >> 9) // arm64:"UBFX\t[$]9, R[0-9]+, [$]7",-"LSR"
}
func ubfx5(x uint8) uint64 {
return uint64(x >> 3) // arm64:"UBFX\t[$]3, R[0-9]+, [$]5",-"LSR"
}
func ubfx6(x uint64) uint64 {
// arm64:"UBFX\t[$]1, R[0-9]+, [$]62",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]2, [$]63, [$]63,",-"SLD",-"SRD"
return (x << 1) >> 2
}
func ubfx7(x uint32) uint32 {
// merge shift and zero-extension into ubfx.
func ubfx2(x uint32) uint64 {
return uint64(x >> 15) // arm64:"UBFX\t[$]15, R[0-9]+, [$]17",-"LSR"
}
func ubfx3(x uint16) uint64 {
return uint64(x >> 9) // arm64:"UBFX\t[$]9, R[0-9]+, [$]7",-"LSR"
}
func ubfx4(x uint8) uint64 {
return uint64(x >> 3) // arm64:"UBFX\t[$]3, R[0-9]+, [$]5",-"LSR"
}
func ubfx5(x uint32) uint64 {
return uint64(x) >> 30 // arm64:"UBFX\t[$]30, R[0-9]+, [$]2"
}
func ubfx6(x uint16) uint64 {
return uint64(x) >> 10 // arm64:"UBFX\t[$]10, R[0-9]+, [$]6"
}
func ubfx7(x uint8) uint64 {
return uint64(x) >> 3 // arm64:"UBFX\t[$]3, R[0-9]+, [$]5"
}
// merge ANDconst into ubfx.
func ubfx8(x uint64) uint64 {
// arm64:"UBFX\t[$]25, R[0-9]+, [$]10",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]54, [$]63, [$]39, ",-"SRD",-"AND"
return (x >> 25) & 1023
}
func ubfx9(x uint64) uint64 {
// arm64:"UBFX\t[$]4, R[0-9]+, [$]8",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]56, [$]63, [$]60, ",-"SRD",-"AND"
return (x & 0x0ff0) >> 4
}
// ubfx combinations.
func ubfx10(x uint32) uint32 {
// arm64:"UBFX\t[$]1, R[0-9]+, [$]30",-"LSL",-"LSR"
return (x << 1) >> 2
}
func ubfx8(x uint64) uint64 {
func ubfx11(x uint64) uint64 {
// arm64:"UBFX\t[$]1, R[0-9]+, [$]12",-"LSL",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]52, [$]63, [$]63,",-"SLD",-"SRD",-"AND"
return ((x << 1) >> 2) & 0xfff
}
func ubfx9(x uint64) uint64 {
func ubfx12(x uint64) uint64 {
// arm64:"UBFX\t[$]4, R[0-9]+, [$]11",-"LSL",-"LSR",-"AND"
// s390x:"RISBGZ\t[$]53, [$]63, [$]60, ",-"SLD",-"SRD",-"AND"
return ((x >> 3) & 0xfff) >> 1
}
func ubfx10(x uint64) uint64 {
func ubfx13(x uint64) uint64 {
// arm64:"UBFX\t[$]5, R[0-9]+, [$]56",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]8, [$]63, [$]59, ",-"SLD",-"SRD"
return ((x >> 2) << 5) >> 8
}
func ubfx11(x uint64) uint64 {
func ubfx14(x uint64) uint64 {
// arm64:"UBFX\t[$]1, R[0-9]+, [$]19",-"LSL",-"LSR"
// s390x:"RISBGZ\t[$]45, [$]63, [$]63, ",-"SLD",-"SRD",-"AND"
return ((x & 0xfffff) << 3) >> 4
}
// merge ubfx and zero-extension into ubfx.
func ubfx12(x uint64) bool {
func ubfx15(x uint64) bool {
midr := x + 10
part_num := uint16((midr >> 4) & 0xfff)
if part_num == 0xd0c { // arm64:"UBFX\t[$]4, R[0-9]+, [$]12",-"MOVHU\tR[0-9]+, R[0-9]+"