cmd/compile: teach prove about and operation

For this code:
z &= 63
_ = x<<z | x>>(64-z)
Now can prove 'x<<z' in bound. In ppc64 lowering pass, it will not
produce an extra '(ANDconst <typ.Int64> [63] z)' causing
codegen/rotate.go failed. Just remove the type check in rewrite rules
as the workaround.

Removes 32 bounds checks during make.bat.

Fixes #52563.

Change-Id: I14ed2c093ff5638dfea7de9bc7649c0f756dd71a
Reviewed-on: https://go-review.googlesource.com/c/go/+/404315
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Keith Randall <khr@google.com>
Reviewed-by: David Chase <drchase@google.com>
Auto-Submit: Keith Randall <khr@golang.org>
Run-TryBot: Wayne Zuo <wdvxdr@golangcn.org>
TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
Wayne Zuo 2022-05-07 13:58:24 +08:00 committed by Gopher Robot
parent 3391517c0e
commit 1efe38750a
4 changed files with 47 additions and 36 deletions

View file

@ -127,20 +127,20 @@
// Rotate generation with non-const shift
// these match patterns from math/bits/RotateLeft[32|64], but there could be others
(ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(ADD (SLD x (ANDconst [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(ADD (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
( OR (SLD x (ANDconst [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
( OR (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(XOR (SLD x (ANDconst [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(XOR (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
(ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
(XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
(XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
(ADD (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
(ADD (SLW x (ANDconst [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
( OR (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
( OR (SLW x (ANDconst [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
(XOR (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
(XOR (SLW x (ANDconst [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
// Lowering rotates

View file

@ -831,6 +831,9 @@ func prove(f *Func) {
case OpCtz64, OpCtz32, OpCtz16, OpCtz8, OpBitLen64, OpBitLen32, OpBitLen16, OpBitLen8:
ft.update(b, v, ft.zero, signed, gt|eq)
// TODO: we could also do <= 64/32/16/8, if that helped.
case OpAnd64, OpAnd32, OpAnd16, OpAnd8:
ft.update(b, v, v.Args[1], unsigned, lt|eq)
ft.update(b, v, v.Args[0], unsigned, lt|eq)
}
}
}

View file

@ -3947,7 +3947,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
}
break
}
// match: (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))))
// match: (ADD (SLD x (ANDconst [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))))
// result: (ROTL x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -3957,7 +3957,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
@ -3987,7 +3987,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
}
break
}
// match: (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
// match: (ADD (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
// result: (ROTL x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -3997,7 +3997,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
@ -4022,7 +4022,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
}
break
}
// match: (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
// match: (ADD (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
// result: (ROTLW x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -4032,7 +4032,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
@ -4057,7 +4057,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
}
break
}
// match: (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
// match: (ADD (SLW x (ANDconst [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
// result: (ROTLW x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -4067,7 +4067,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
@ -11540,7 +11540,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool {
}
break
}
// match: ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))))
// match: ( OR (SLD x (ANDconst [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))))
// result: (ROTL x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -11550,7 +11550,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool {
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
@ -11580,7 +11580,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool {
}
break
}
// match: ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
// match: ( OR (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
// result: (ROTL x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -11590,7 +11590,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool {
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
@ -11615,7 +11615,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool {
}
break
}
// match: ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
// match: ( OR (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
// result: (ROTLW x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -11625,7 +11625,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool {
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
@ -11650,7 +11650,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool {
}
break
}
// match: ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
// match: ( OR (SLW x (ANDconst [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
// result: (ROTLW x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -11660,7 +11660,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool {
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
@ -13770,7 +13770,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
}
break
}
// match: (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))))
// match: (XOR (SLD x (ANDconst [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))))
// result: (ROTL x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -13780,7 +13780,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
@ -13810,7 +13810,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
}
break
}
// match: (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
// match: (XOR (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
// result: (ROTL x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -13820,7 +13820,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
@ -13845,7 +13845,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
}
break
}
// match: (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
// match: (XOR (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
// result: (ROTLW x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -13855,7 +13855,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
@ -13880,7 +13880,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
}
break
}
// match: (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
// match: (XOR (SLW x (ANDconst [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
// result: (ROTLW x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -13890,7 +13890,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
_ = v_0.Args[1]
x := v_0.Args[0]
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]

View file

@ -1036,6 +1036,14 @@ func divShiftClean32(n int32) int32 {
return n / int32(16) // ERROR "Proved Rsh32x64 shifts to zero"
}
func and(p []byte) ([]byte, []byte) { // issue #52563
const blocksize = 16
fullBlocks := len(p) &^ (blocksize - 1)
blk := p[:fullBlocks] // ERROR "Proved IsSliceInBounds$"
rem := p[fullBlocks:] // ERROR "Proved IsSliceInBounds$"
return blk, rem
}
//go:noinline
func useInt(a int) {
}