diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 7dc381bd81..98cd865182 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -624,6 +624,14 @@ // Recognize bit setting (a |= 1< (BTS(Q|L) x y) (XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y) +(ORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) x) mem) => + (BTSLmodify [off] {sym} ptr (ANDLconst [31] x) mem) +(ORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) x) mem) => + (BTSQmodify [off] {sym} ptr (ANDQconst [63] x) mem) +(XORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) x) mem) => + (BTCLmodify [off] {sym} ptr (ANDLconst [31] x) mem) +(XORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) x) mem) => + (BTCQmodify [off] {sym} ptr (ANDQconst [63] x) mem) // Convert ORconst into BTS, if the code gets smaller, with boundary being // (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes). @@ -646,6 +654,10 @@ => (BTRQconst [int8(log64(^c))] x) (ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128 => (BTRLconst [int8(log32(^c))] x) +(ANDLmodify [off] {sym} ptr (NOTL s:(SHLL (MOVLconst [1]) x)) mem) => + (BTRLmodify [off] {sym} ptr (ANDLconst [31] x) mem) +(ANDQmodify [off] {sym} ptr (NOTQ s:(SHLQ (MOVQconst [1]) x)) mem) => + (BTRQmodify [off] {sym} ptr (ANDQconst [63] x) mem) // Special-case bit patterns on first/last bit. // generic.rules changes ANDs of high-part/low-part masks into a couple of shifts, @@ -2064,11 +2076,15 @@ ((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem) ((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem) (MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) -(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => - ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off] {sym} ptr x mem) +(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => + ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) +(MOVLstore {sym} [off] ptr y:((BTC|BTR|BTS)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => + ((BTC|BTR|BTS)Lmodify [off] {sym} ptr (ANDLconst [31] x) mem) (MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem) -(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => - ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off] {sym} ptr x mem) +(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => + ((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem) +(MOVQstore {sym} [off] ptr y:((BTC|BTR|BTS)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => + ((BTC|BTR|BTS)Qmodify [off] {sym} ptr (ANDQconst [63] x) mem) // Merge ADDQconst and LEAQ into atomic loads. (MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index 6c3fe1d192..af53cc4f9d 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -363,6 +363,11 @@ func init() { {name: "BTSQconst", argLength: 1, reg: gp11, asm: "BTSQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 64 // direct bit operation on memory operand + // + // Note that these operations do not mask the bit offset (arg1), and will write beyond their expected + // bounds if that argument is larger than 64/32 (for BT*Q and BT*L, respectively). If the compiler + // cannot prove that arg1 is in range, it must be explicitly masked (see e.g. the patterns that produce + // BT*modify from (MOVstore (BT* (MOVLload ptr mem) x) mem)). {name: "BTCQmodify", argLength: 3, reg: gpstore, asm: "BTCQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit arg1 in 64-bit arg0+auxint+aux, arg2=mem {name: "BTCLmodify", argLength: 3, reg: gpstore, asm: "BTCL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit arg1 in 32-bit arg0+auxint+aux, arg2=mem {name: "BTSQmodify", argLength: 3, reg: gpstore, asm: "BTSQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit arg1 in 64-bit arg0+auxint+aux, arg2=mem diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 36f872d0c4..ce94fdb952 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -3001,6 +3001,36 @@ func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + // match: (ANDLmodify [off] {sym} ptr (NOTL s:(SHLL (MOVLconst [1]) x)) mem) + // result: (BTRLmodify [off] {sym} ptr (ANDLconst [31] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64NOTL { + break + } + s := v_1.Args[0] + if s.Op != OpAMD64SHLL { + break + } + t := s.Type + x := s.Args[1] + s_0 := s.Args[0] + if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 { + break + } + mem := v_2 + v.reset(OpAMD64BTRLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t) + v0.AuxInt = int32ToAuxInt(31) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } // match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) // cond: is32Bit(int64(off1)+int64(off2)) // result: (ANDLmodify [off1+off2] {sym} base val mem) @@ -3380,6 +3410,36 @@ func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + // match: (ANDQmodify [off] {sym} ptr (NOTQ s:(SHLQ (MOVQconst [1]) x)) mem) + // result: (BTRQmodify [off] {sym} ptr (ANDQconst [63] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64NOTQ { + break + } + s := v_1.Args[0] + if s.Op != OpAMD64SHLQ { + break + } + t := s.Type + x := s.Args[1] + s_0 := s.Args[0] + if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 { + break + } + mem := v_2 + v.reset(OpAMD64BTRQmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t) + v0.AuxInt = int32ToAuxInt(63) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } // match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) // cond: is32Bit(int64(off1)+int64(off2)) // result: (ANDQmodify [off1+off2] {sym} base val mem) @@ -12750,9 +12810,9 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { } break } - // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) x) mem) + // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) - // result: (BTCLmodify [off] {sym} ptr x mem) + // result: (BTCLmodify [off] {sym} ptr (ANDLconst [31] x) mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -12761,6 +12821,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { if y.Op != OpAMD64BTCL { break } + t := y.Type x := y.Args[1] l := y.Args[0] if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { @@ -12773,12 +12834,15 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTCLmodify) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg3(ptr, x, mem) + v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t) + v0.AuxInt = int32ToAuxInt(31) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) return true } - // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) x) mem) + // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) - // result: (BTRLmodify [off] {sym} ptr x mem) + // result: (BTRLmodify [off] {sym} ptr (ANDLconst [31] x) mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -12787,6 +12851,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { if y.Op != OpAMD64BTRL { break } + t := y.Type x := y.Args[1] l := y.Args[0] if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { @@ -12799,12 +12864,15 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTRLmodify) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg3(ptr, x, mem) + v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t) + v0.AuxInt = int32ToAuxInt(31) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) return true } - // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem) + // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) - // result: (BTSLmodify [off] {sym} ptr x mem) + // result: (BTSLmodify [off] {sym} ptr (ANDLconst [31] x) mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -12813,6 +12881,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { if y.Op != OpAMD64BTSL { break } + t := y.Type x := y.Args[1] l := y.Args[0] if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { @@ -12825,7 +12894,10 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { v.reset(OpAMD64BTSLmodify) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg3(ptr, x, mem) + v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t) + v0.AuxInt = int32ToAuxInt(31) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) @@ -13566,6 +13638,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVQstore [off1+off2] {sym} ptr val mem) @@ -13931,9 +14004,9 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { } break } - // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) - // result: (BTCQmodify [off] {sym} ptr x mem) + // result: (BTCQmodify [off] {sym} ptr (ANDQconst [63] x) mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -13942,6 +14015,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { if y.Op != OpAMD64BTCQ { break } + t := y.Type x := y.Args[1] l := y.Args[0] if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { @@ -13954,12 +14028,15 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTCQmodify) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg3(ptr, x, mem) + v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t) + v0.AuxInt = int32ToAuxInt(63) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) return true } - // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) - // result: (BTRQmodify [off] {sym} ptr x mem) + // result: (BTRQmodify [off] {sym} ptr (ANDQconst [63] x) mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -13968,6 +14045,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { if y.Op != OpAMD64BTRQ { break } + t := y.Type x := y.Args[1] l := y.Args[0] if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { @@ -13980,12 +14058,15 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTRQmodify) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg3(ptr, x, mem) + v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t) + v0.AuxInt = int32ToAuxInt(63) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) return true } - // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) - // result: (BTSQmodify [off] {sym} ptr x mem) + // result: (BTSQmodify [off] {sym} ptr (ANDQconst [63] x) mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) @@ -13994,6 +14075,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { if y.Op != OpAMD64BTSQ { break } + t := y.Type x := y.Args[1] l := y.Args[0] if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { @@ -14006,7 +14088,10 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { v.reset(OpAMD64BTSQmodify) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) - v.AddArg3(ptr, x, mem) + v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t) + v0.AuxInt = int32ToAuxInt(63) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) @@ -18391,6 +18476,33 @@ func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + // match: (ORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) x) mem) + // result: (BTSLmodify [off] {sym} ptr (ANDLconst [31] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + s := v_1 + if s.Op != OpAMD64SHLL { + break + } + t := s.Type + x := s.Args[1] + s_0 := s.Args[0] + if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 { + break + } + mem := v_2 + v.reset(OpAMD64BTSLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t) + v0.AuxInt = int32ToAuxInt(31) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } // match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) // cond: is32Bit(int64(off1)+int64(off2)) // result: (ORLmodify [off1+off2] {sym} base val mem) @@ -20066,6 +20178,33 @@ func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + // match: (ORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) x) mem) + // result: (BTSQmodify [off] {sym} ptr (ANDQconst [63] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + s := v_1 + if s.Op != OpAMD64SHLQ { + break + } + t := s.Type + x := s.Args[1] + s_0 := s.Args[0] + if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 { + break + } + mem := v_2 + v.reset(OpAMD64BTSQmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t) + v0.AuxInt = int32ToAuxInt(63) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } // match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) // cond: is32Bit(int64(off1)+int64(off2)) // result: (ORQmodify [off1+off2] {sym} base val mem) @@ -28155,6 +28294,33 @@ func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + // match: (XORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) x) mem) + // result: (BTCLmodify [off] {sym} ptr (ANDLconst [31] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + s := v_1 + if s.Op != OpAMD64SHLL { + break + } + t := s.Type + x := s.Args[1] + s_0 := s.Args[0] + if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 { + break + } + mem := v_2 + v.reset(OpAMD64BTCLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t) + v0.AuxInt = int32ToAuxInt(31) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } // match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) // cond: is32Bit(int64(off1)+int64(off2)) // result: (XORLmodify [off1+off2] {sym} base val mem) @@ -28523,6 +28689,33 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + // match: (XORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) x) mem) + // result: (BTCQmodify [off] {sym} ptr (ANDQconst [63] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + s := v_1 + if s.Op != OpAMD64SHLQ { + break + } + t := s.Type + x := s.Args[1] + s_0 := s.Args[0] + if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 { + break + } + mem := v_2 + v.reset(OpAMD64BTCQmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t) + v0.AuxInt = int32ToAuxInt(63) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } // match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) // cond: is32Bit(int64(off1)+int64(off2)) // result: (XORQmodify [off1+off2] {sym} base val mem) diff --git a/test/codegen/bits.go b/test/codegen/bits.go index 806dad13c8..d41383f42c 100644 --- a/test/codegen/bits.go +++ b/test/codegen/bits.go @@ -262,8 +262,8 @@ func bitcompl32(a, b uint32) (n uint32) { return n } -// check direct operation on memory with constant source -func bitOpOnMem(a []uint32) { +// check direct operation on memory with constant and shifted constant sources +func bitOpOnMem(a []uint32, b, c, d uint32) { // amd64:`ANDL\s[$]200,\s\([A-Z]+\)` a[0] &= 200 // amd64:`ORL\s[$]220,\s4\([A-Z]+\)` @@ -276,6 +276,12 @@ func bitOpOnMem(a []uint32) { a[4] |= 0x4000 // amd64:`BTCL\s[$]13,\s20\([A-Z]+\)`,-`XORL` a[5] ^= 0x2000 + // amd64:`BTRL\s[A-Z]+,\s24\([A-Z]+\)` + a[6] &^= 1 << (b & 31) + // amd64:`BTSL\s[A-Z]+,\s28\([A-Z]+\)` + a[7] |= 1 << (c & 31) + // amd64:`BTCL\s[A-Z]+,\s32\([A-Z]+\)` + a[8] ^= 1 << (d & 31) } func bitcheckMostNegative(b uint8) bool { diff --git a/test/fixedbugs/issue45242.go b/test/fixedbugs/issue45242.go new file mode 100644 index 0000000000..b99722120d --- /dev/null +++ b/test/fixedbugs/issue45242.go @@ -0,0 +1,28 @@ +// run + +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "fmt" + +//go:noinline +func repro(b []byte, bit int32) { + _ = b[3] + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 | 1<<(bit&31) + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func main() { + var b [8]byte + repro(b[:], 32) + want := [8]byte{1, 0, 0, 0, 0, 0, 0, 0} + if b != want { + panic(fmt.Sprintf("got %v, want %v\n", b, want)) + } +}