[dev.boringcrypto] all: merge master into dev.boringcrypto

Change-Id: I083d1e4e997b30d9fab10940401eaf160e36f6c1
This commit is contained in:
Dmitri Shuralyov 2020-05-07 23:27:25 -04:00
commit dd98c0ca3f
63 changed files with 1083 additions and 451 deletions

View file

@ -15,6 +15,7 @@ import (
"cmd/internal/src"
"encoding/binary"
"fmt"
"io"
"math/big"
"os"
"strings"
@ -191,7 +192,7 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj2.FingerprintType
}
// Fingerprint
n, err := in.Read(fingerprint[:])
n, err := io.ReadFull(in, fingerprint[:])
if err != nil || n != len(fingerprint) {
yyerror("import %s: error reading fingerprint", pkg.Path)
errorexit()

View file

@ -528,6 +528,9 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes)
for _, r := range n.List.Slice() {
a, value := splitnode(r)
if a == nblank && candiscard(value) {
continue
}
switch value.Op {
case OSLICELIT:

View file

@ -735,18 +735,33 @@
(ANDQ x (MOVQconst [c])) && is32Bit(c) -> (ANDQconst [c] x)
(ANDL x (MOVLconst [c])) -> (ANDLconst [c] x)
(AND(L|Q)const [c] (AND(L|Q)const [d] x)) -> (AND(L|Q)const [c & d] x)
(BTR(L|Q)const [c] (AND(L|Q)const [d] x)) -> (AND(L|Q)const [d &^ (1<<uint32(c))] x)
(AND(L|Q)const [c] (BTR(L|Q)const [d] x)) -> (AND(L|Q)const [c &^ (1<<uint32(d))] x)
(BTR(L|Q)const [c] (BTR(L|Q)const [d] x)) -> (AND(L|Q)const [^(1<<uint32(c) | 1<<uint32(d))] x)
(XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) -> (XOR(L|Q)const [c ^ d] x)
(BTC(L|Q)const [c] (XOR(L|Q)const [d] x)) -> (XOR(L|Q)const [d ^ 1<<uint32(c)] x)
(XOR(L|Q)const [c] (BTC(L|Q)const [d] x)) -> (XOR(L|Q)const [c ^ 1<<uint32(d)] x)
(BTC(L|Q)const [c] (BTC(L|Q)const [d] x)) -> (XOR(L|Q)const [1<<uint32(c) ^ 1<<uint32(d)] x)
(OR(L|Q)const [c] (OR(L|Q)const [d] x)) -> (OR(L|Q)const [c | d] x)
(OR(L|Q)const [c] (BTS(L|Q)const [d] x)) -> (OR(L|Q)const [c | 1<<uint32(d)] x)
(BTS(L|Q)const [c] (OR(L|Q)const [d] x)) -> (OR(L|Q)const [d | 1<<uint32(c)] x)
(BTS(L|Q)const [c] (BTS(L|Q)const [d] x)) -> (OR(L|Q)const [1<<uint32(d) | 1<<uint32(c)] x)
(AND(L|Q)const [c] (AND(L|Q)const [d] x)) => (AND(L|Q)const [c & d] x)
(XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) => (XOR(L|Q)const [c ^ d] x)
(OR(L|Q)const [c] (OR(L|Q)const [d] x)) => (OR(L|Q)const [c | d] x)
(BTRLconst [c] (ANDLconst [d] x)) => (ANDLconst [d &^ (1<<uint32(c))] x)
(ANDLconst [c] (BTRLconst [d] x)) => (ANDLconst [c &^ (1<<uint32(d))] x)
(BTRLconst [c] (BTRLconst [d] x)) => (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
(BTCLconst [c] (XORLconst [d] x)) => (XORLconst [d ^ 1<<uint32(c)] x)
(XORLconst [c] (BTCLconst [d] x)) => (XORLconst [c ^ 1<<uint32(d)] x)
(BTCLconst [c] (BTCLconst [d] x)) => (XORLconst [1<<uint32(c) | 1<<uint32(d)] x)
(BTSLconst [c] (ORLconst [d] x)) => (ORLconst [d | 1<<uint32(c)] x)
(ORLconst [c] (BTSLconst [d] x)) => (ORLconst [c | 1<<uint32(d)] x)
(BTSLconst [c] (BTSLconst [d] x)) => (ORLconst [1<<uint32(c) | 1<<uint32(d)] x)
(BTRQconst [c] (ANDQconst [d] x)) && is32Bit(int64(d) &^ (1<<uint32(c))) => (ANDQconst [d &^ (1<<uint32(c))] x)
(ANDQconst [c] (BTRQconst [d] x)) && is32Bit(int64(c) &^ (1<<uint32(d))) => (ANDQconst [c &^ (1<<uint32(d))] x)
(BTRQconst [c] (BTRQconst [d] x)) && is32Bit(^(1<<uint32(c) | 1<<uint32(d))) => (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
(BTCQconst [c] (XORQconst [d] x)) && is32Bit(int64(d) ^ 1<<uint32(c)) => (XORQconst [d ^ 1<<uint32(c)] x)
(XORQconst [c] (BTCQconst [d] x)) && is32Bit(int64(c) ^ 1<<uint32(d)) => (XORQconst [c ^ 1<<uint32(d)] x)
(BTCQconst [c] (BTCQconst [d] x)) && is32Bit(1<<uint32(c) ^ 1<<uint32(d)) => (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
(BTSQconst [c] (ORQconst [d] x)) && is32Bit(int64(d) | 1<<uint32(c)) => (ORQconst [d | 1<<uint32(c)] x)
(ORQconst [c] (BTSQconst [d] x)) && is32Bit(int64(c) | 1<<uint32(d)) => (ORQconst [c | 1<<uint32(d)] x)
(BTSQconst [c] (BTSQconst [d] x)) && is32Bit(1<<uint32(c) | 1<<uint32(d)) => (ORQconst [1<<uint32(c) | 1<<uint32(d)] x)
(MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x)
(MULQconst [c] (MULQconst [d] x)) && is32Bit(c*d) -> (MULQconst [c * d] x)

View file

@ -2797,28 +2797,28 @@ func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
// match: (ANDLconst [c] (ANDLconst [d] x))
// result: (ANDLconst [c & d] x)
for {
c := v.AuxInt
c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64ANDLconst {
break
}
d := v_0.AuxInt
d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64ANDLconst)
v.AuxInt = c & d
v.AuxInt = int32ToAuxInt(c & d)
v.AddArg(x)
return true
}
// match: (ANDLconst [c] (BTRLconst [d] x))
// result: (ANDLconst [c &^ (1<<uint32(d))] x)
for {
c := v.AuxInt
c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64BTRLconst {
break
}
d := v_0.AuxInt
d := auxIntToInt8(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64ANDLconst)
v.AuxInt = c &^ (1 << uint32(d))
v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
v.AddArg(x)
return true
}
@ -3176,28 +3176,32 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
// match: (ANDQconst [c] (ANDQconst [d] x))
// result: (ANDQconst [c & d] x)
for {
c := v.AuxInt
c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64ANDQconst {
break
}
d := v_0.AuxInt
d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64ANDQconst)
v.AuxInt = c & d
v.AuxInt = int32ToAuxInt(c & d)
v.AddArg(x)
return true
}
// match: (ANDQconst [c] (BTRQconst [d] x))
// cond: is32Bit(int64(c) &^ (1<<uint32(d)))
// result: (ANDQconst [c &^ (1<<uint32(d))] x)
for {
c := v.AuxInt
c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64BTRQconst {
break
}
d := v_0.AuxInt
d := auxIntToInt8(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(int64(c) &^ (1 << uint32(d)))) {
break
}
v.reset(OpAMD64ANDQconst)
v.AuxInt = c &^ (1 << uint32(d))
v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
v.AddArg(x)
return true
}
@ -3494,28 +3498,28 @@ func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool {
// match: (BTCLconst [c] (XORLconst [d] x))
// result: (XORLconst [d ^ 1<<uint32(c)] x)
for {
c := v.AuxInt
c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64XORLconst {
break
}
d := v_0.AuxInt
d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64XORLconst)
v.AuxInt = d ^ 1<<uint32(c)
v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
v.AddArg(x)
return true
}
// match: (BTCLconst [c] (BTCLconst [d] x))
// result: (XORLconst [1<<uint32(c) ^ 1<<uint32(d)] x)
// result: (XORLconst [1<<uint32(c) | 1<<uint32(d)] x)
for {
c := v.AuxInt
c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64BTCLconst {
break
}
d := v_0.AuxInt
d := auxIntToInt8(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64XORLconst)
v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d)
v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
v.AddArg(x)
return true
}
@ -3635,30 +3639,38 @@ func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool {
func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
v_0 := v.Args[0]
// match: (BTCQconst [c] (XORQconst [d] x))
// cond: is32Bit(int64(d) ^ 1<<uint32(c))
// result: (XORQconst [d ^ 1<<uint32(c)] x)
for {
c := v.AuxInt
c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64XORQconst {
break
}
d := v_0.AuxInt
d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(int64(d) ^ 1<<uint32(c))) {
break
}
v.reset(OpAMD64XORQconst)
v.AuxInt = d ^ 1<<uint32(c)
v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
v.AddArg(x)
return true
}
// match: (BTCQconst [c] (BTCQconst [d] x))
// cond: is32Bit(1<<uint32(c) ^ 1<<uint32(d))
// result: (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
for {
c := v.AuxInt
c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64BTCQconst {
break
}
d := v_0.AuxInt
d := auxIntToInt8(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(1<<uint32(c) ^ 1<<uint32(d))) {
break
}
v.reset(OpAMD64XORQconst)
v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d)
v.AuxInt = int32ToAuxInt(1<<uint32(c) ^ 1<<uint32(d))
v.AddArg(x)
return true
}
@ -3970,28 +3982,28 @@ func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
// match: (BTRLconst [c] (ANDLconst [d] x))
// result: (ANDLconst [d &^ (1<<uint32(c))] x)
for {
c := v.AuxInt
c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64ANDLconst {
break
}
d := v_0.AuxInt
d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64ANDLconst)
v.AuxInt = d &^ (1 << uint32(c))
v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
v.AddArg(x)
return true
}
// match: (BTRLconst [c] (BTRLconst [d] x))
// result: (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
for {
c := v.AuxInt
c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64BTRLconst {
break
}
d := v_0.AuxInt
d := auxIntToInt8(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64ANDLconst)
v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d))
v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
v.AddArg(x)
return true
}
@ -4137,30 +4149,38 @@ func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
return true
}
// match: (BTRQconst [c] (ANDQconst [d] x))
// cond: is32Bit(int64(d) &^ (1<<uint32(c)))
// result: (ANDQconst [d &^ (1<<uint32(c))] x)
for {
c := v.AuxInt
c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64ANDQconst {
break
}
d := v_0.AuxInt
d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(int64(d) &^ (1 << uint32(c)))) {
break
}
v.reset(OpAMD64ANDQconst)
v.AuxInt = d &^ (1 << uint32(c))
v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
v.AddArg(x)
return true
}
// match: (BTRQconst [c] (BTRQconst [d] x))
// cond: is32Bit(^(1<<uint32(c) | 1<<uint32(d)))
// result: (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
for {
c := v.AuxInt
c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64BTRQconst {
break
}
d := v_0.AuxInt
d := auxIntToInt8(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(^(1<<uint32(c) | 1<<uint32(d)))) {
break
}
v.reset(OpAMD64ANDQconst)
v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d))
v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
v.AddArg(x)
return true
}
@ -4308,28 +4328,28 @@ func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool {
// match: (BTSLconst [c] (ORLconst [d] x))
// result: (ORLconst [d | 1<<uint32(c)] x)
for {
c := v.AuxInt
c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64ORLconst {
break
}
d := v_0.AuxInt
d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64ORLconst)
v.AuxInt = d | 1<<uint32(c)
v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
v.AddArg(x)
return true
}
// match: (BTSLconst [c] (BTSLconst [d] x))
// result: (ORLconst [1<<uint32(d) | 1<<uint32(c)] x)
// result: (ORLconst [1<<uint32(c) | 1<<uint32(d)] x)
for {
c := v.AuxInt
c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64BTSLconst {
break
}
d := v_0.AuxInt
d := auxIntToInt8(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64ORLconst)
v.AuxInt = 1<<uint32(d) | 1<<uint32(c)
v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
v.AddArg(x)
return true
}
@ -4475,30 +4495,38 @@ func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
return true
}
// match: (BTSQconst [c] (ORQconst [d] x))
// cond: is32Bit(int64(d) | 1<<uint32(c))
// result: (ORQconst [d | 1<<uint32(c)] x)
for {
c := v.AuxInt
c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64ORQconst {
break
}
d := v_0.AuxInt
d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(int64(d) | 1<<uint32(c))) {
break
}
v.reset(OpAMD64ORQconst)
v.AuxInt = d | 1<<uint32(c)
v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
v.AddArg(x)
return true
}
// match: (BTSQconst [c] (BTSQconst [d] x))
// result: (ORQconst [1<<uint32(d) | 1<<uint32(c)] x)
// cond: is32Bit(1<<uint32(c) | 1<<uint32(d))
// result: (ORQconst [1<<uint32(c) | 1<<uint32(d)] x)
for {
c := v.AuxInt
c := auxIntToInt8(v.AuxInt)
if v_0.Op != OpAMD64BTSQconst {
break
}
d := v_0.AuxInt
d := auxIntToInt8(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(1<<uint32(c) | 1<<uint32(d))) {
break
}
v.reset(OpAMD64ORQconst)
v.AuxInt = 1<<uint32(d) | 1<<uint32(c)
v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
v.AddArg(x)
return true
}
@ -18127,28 +18155,28 @@ func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
// match: (ORLconst [c] (ORLconst [d] x))
// result: (ORLconst [c | d] x)
for {
c := v.AuxInt
c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64ORLconst {
break
}
d := v_0.AuxInt
d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64ORLconst)
v.AuxInt = c | d
v.AuxInt = int32ToAuxInt(c | d)
v.AddArg(x)
return true
}
// match: (ORLconst [c] (BTSLconst [d] x))
// result: (ORLconst [c | 1<<uint32(d)] x)
for {
c := v.AuxInt
c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64BTSLconst {
break
}
d := v_0.AuxInt
d := auxIntToInt8(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64ORLconst)
v.AuxInt = c | 1<<uint32(d)
v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
v.AddArg(x)
return true
}
@ -19754,28 +19782,32 @@ func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
// match: (ORQconst [c] (ORQconst [d] x))
// result: (ORQconst [c | d] x)
for {
c := v.AuxInt
c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64ORQconst {
break
}
d := v_0.AuxInt
d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64ORQconst)
v.AuxInt = c | d
v.AuxInt = int32ToAuxInt(c | d)
v.AddArg(x)
return true
}
// match: (ORQconst [c] (BTSQconst [d] x))
// cond: is32Bit(int64(c) | 1<<uint32(d))
// result: (ORQconst [c | 1<<uint32(d)] x)
for {
c := v.AuxInt
c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64BTSQconst {
break
}
d := v_0.AuxInt
d := auxIntToInt8(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(int64(c) | 1<<uint32(d))) {
break
}
v.reset(OpAMD64ORQconst)
v.AuxInt = c | 1<<uint32(d)
v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
v.AddArg(x)
return true
}
@ -27785,28 +27817,28 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
// match: (XORLconst [c] (XORLconst [d] x))
// result: (XORLconst [c ^ d] x)
for {
c := v.AuxInt
c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64XORLconst {
break
}
d := v_0.AuxInt
d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64XORLconst)
v.AuxInt = c ^ d
v.AuxInt = int32ToAuxInt(c ^ d)
v.AddArg(x)
return true
}
// match: (XORLconst [c] (BTCLconst [d] x))
// result: (XORLconst [c ^ 1<<uint32(d)] x)
for {
c := v.AuxInt
c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64BTCLconst {
break
}
d := v_0.AuxInt
d := auxIntToInt8(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64XORLconst)
v.AuxInt = c ^ 1<<uint32(d)
v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
v.AddArg(x)
return true
}
@ -28151,28 +28183,32 @@ func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
// match: (XORQconst [c] (XORQconst [d] x))
// result: (XORQconst [c ^ d] x)
for {
c := v.AuxInt
c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64XORQconst {
break
}
d := v_0.AuxInt
d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpAMD64XORQconst)
v.AuxInt = c ^ d
v.AuxInt = int32ToAuxInt(c ^ d)
v.AddArg(x)
return true
}
// match: (XORQconst [c] (BTCQconst [d] x))
// cond: is32Bit(int64(c) ^ 1<<uint32(d))
// result: (XORQconst [c ^ 1<<uint32(d)] x)
for {
c := v.AuxInt
c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpAMD64BTCQconst {
break
}
d := v_0.AuxInt
d := auxIntToInt8(v_0.AuxInt)
x := v_0.Args[0]
if !(is32Bit(int64(c) ^ 1<<uint32(d))) {
break
}
v.reset(OpAMD64XORQconst)
v.AuxInt = c ^ 1<<uint32(d)
v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
v.AddArg(x)
return true
}

View file

@ -287,8 +287,8 @@ var optab = []Optab{
{AADD, C_BITCON, C_RSP, C_NONE, C_RSP, 62, 8, 0, 0, 0},
{AADD, C_BITCON, C_NONE, C_NONE, C_RSP, 62, 8, 0, 0, 0},
{ACMP, C_BITCON, C_RSP, C_NONE, C_NONE, 62, 8, 0, 0, 0},
{AADD, C_ADDCON2, C_RSP, C_NONE, C_RSP, 48, 8, 0, 0, 0},
{AADD, C_ADDCON2, C_NONE, C_NONE, C_RSP, 48, 8, 0, 0, 0},
{AADD, C_ADDCON2, C_RSP, C_NONE, C_RSP, 48, 8, 0, NOTUSETMP, 0},
{AADD, C_ADDCON2, C_NONE, C_NONE, C_RSP, 48, 8, 0, NOTUSETMP, 0},
{AADD, C_MOVCON2, C_RSP, C_NONE, C_RSP, 13, 12, 0, 0, 0},
{AADD, C_MOVCON2, C_NONE, C_NONE, C_RSP, 13, 12, 0, 0, 0},
{AADD, C_MOVCON3, C_RSP, C_NONE, C_RSP, 13, 16, 0, 0, 0},
@ -1072,16 +1072,29 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
// We use REGTMP as a scratch register during call injection,
// so instruction sequences that use REGTMP are unsafe to
// preempt asynchronously.
obj.MarkUnsafePoints(c.ctxt, c.cursym.Func.Text, c.newprog, c.isUnsafePoint)
obj.MarkUnsafePoints(c.ctxt, c.cursym.Func.Text, c.newprog, c.isUnsafePoint, c.isRestartable)
}
// Return whether p is an unsafe point.
// isUnsafePoint returns whether p is an unsafe point.
func (c *ctxt7) isUnsafePoint(p *obj.Prog) bool {
if p.From.Reg == REGTMP || p.To.Reg == REGTMP || p.Reg == REGTMP {
return true
// If p explicitly uses REGTMP, it's unsafe to preempt, because the
// preemption sequence clobbers REGTMP.
return p.From.Reg == REGTMP || p.To.Reg == REGTMP || p.Reg == REGTMP
}
// isRestartable returns whether p is a multi-instruction sequence that,
// if preempted, can be restarted.
func (c *ctxt7) isRestartable(p *obj.Prog) bool {
if c.isUnsafePoint(p) {
return false
}
// Most of the multi-instruction sequence uses REGTMP, except
// ones marked safe.
// If p is a multi-instruction sequence with uses REGTMP inserted by
// the assembler in order to materialize a large constant/offset, we
// can restart p (at the start of the instruction sequence), recompute
// the content of REGTMP, upon async preemption. Currently, all cases
// of assembler-inserted REGTMP fall into this category.
// If p doesn't use REGTMP, it can be simply preempted, so we don't
// mark it.
o := c.oplook(p)
return o.size > 4 && o.flag&NOTUSETMP == 0
}
@ -3831,6 +3844,8 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 |= fields | uint32(rs&31)<<16 | uint32(rb&31)<<5 | uint32(rt&31)
case 48: /* ADD $C_ADDCON2, Rm, Rd */
// NOTE: this case does not use REGTMP. If it ever does,
// remove the NOTUSETMP flag in optab.
op := c.opirr(p, p.As)
if op&Sbit != 0 {
c.ctxt.Diag("can not break addition/subtraction when S bit is set", p)

View file

@ -526,16 +526,29 @@ func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
// We use REGTMP as a scratch register during call injection,
// so instruction sequences that use REGTMP are unsafe to
// preempt asynchronously.
obj.MarkUnsafePoints(c.ctxt, c.cursym.Func.Text, c.newprog, c.isUnsafePoint)
obj.MarkUnsafePoints(c.ctxt, c.cursym.Func.Text, c.newprog, c.isUnsafePoint, c.isRestartable)
}
// Return whether p is an unsafe point.
// isUnsafePoint returns whether p is an unsafe point.
func (c *ctxt0) isUnsafePoint(p *obj.Prog) bool {
if p.From.Reg == REGTMP || p.To.Reg == REGTMP || p.Reg == REGTMP {
return true
// If p explicitly uses REGTMP, it's unsafe to preempt, because the
// preemption sequence clobbers REGTMP.
return p.From.Reg == REGTMP || p.To.Reg == REGTMP || p.Reg == REGTMP
}
// isRestartable returns whether p is a multi-instruction sequence that,
// if preempted, can be restarted.
func (c *ctxt0) isRestartable(p *obj.Prog) bool {
if c.isUnsafePoint(p) {
return false
}
// Most of the multi-instruction sequence uses REGTMP, except
// ones marked safe.
// If p is a multi-instruction sequence with uses REGTMP inserted by
// the assembler in order to materialize a large constant/offset, we
// can restart p (at the start of the instruction sequence), recompute
// the content of REGTMP, upon async preemption. Currently, all cases
// of assembler-inserted REGTMP fall into this category.
// If p doesn't use REGTMP, it can be simply preempted, so we don't
// mark it.
o := c.oplook(p)
return o.size > 4 && o.flag&NOTUSETMP == 0
}

View file

@ -236,13 +236,13 @@ func (ctxt *Link) StartUnsafePoint(p *Prog, newprog ProgAlloc) *Prog {
pcdata.From.Type = TYPE_CONST
pcdata.From.Offset = objabi.PCDATA_RegMapIndex
pcdata.To.Type = TYPE_CONST
pcdata.To.Offset = -2 // pcdata -2 marks unsafe point
pcdata.To.Offset = objabi.PCDATA_RegMapUnsafe
return pcdata
}
// EndUnsafePoint generates PCDATA Progs after p to mark the end of an
// unsafe point, restoring the stack map index to oldval.
// unsafe point, restoring the register map index to oldval.
// The unsafe point ends right after p.
// It returns the last Prog generated.
func (ctxt *Link) EndUnsafePoint(p *Prog, newprog ProgAlloc, oldval int64) *Prog {
@ -253,23 +253,33 @@ func (ctxt *Link) EndUnsafePoint(p *Prog, newprog ProgAlloc, oldval int64) *Prog
pcdata.To.Type = TYPE_CONST
pcdata.To.Offset = oldval
// TODO: register map?
return pcdata
}
// MarkUnsafePoints inserts PCDATAs to mark nonpreemptible instruction
// sequences, based on isUnsafePoint predicate. p0 is the start of the
// instruction stream.
func MarkUnsafePoints(ctxt *Link, p0 *Prog, newprog ProgAlloc, isUnsafePoint func(*Prog) bool) {
// MarkUnsafePoints inserts PCDATAs to mark nonpreemptible and restartable
// instruction sequences, based on isUnsafePoint and isRestartable predicate.
// p0 is the start of the instruction stream.
// isUnsafePoint(p) returns true if p is not safe for async preemption.
// isRestartable(p) returns true if we can restart at the start of p (this Prog)
// upon async preemption. (Currently multi-Prog restartable sequence is not
// supported.)
// isRestartable can be nil. In this case it is treated as always returning false.
// If isUnsafePoint(p) and isRestartable(p) are both true, it is treated as
// an unsafe point.
func MarkUnsafePoints(ctxt *Link, p0 *Prog, newprog ProgAlloc, isUnsafePoint, isRestartable func(*Prog) bool) {
if isRestartable == nil {
// Default implementation: nothing is restartable.
isRestartable = func(*Prog) bool { return false }
}
prev := p0
oldval := int64(-1) // entry pcdata
prevPcdata := int64(-1) // entry PC data value
prevRestart := int64(0)
for p := prev.Link; p != nil; p, prev = p.Link, p {
if p.As == APCDATA && p.From.Offset == objabi.PCDATA_RegMapIndex {
oldval = p.To.Offset
prevPcdata = p.To.Offset
continue
}
if oldval == -2 {
if prevPcdata == objabi.PCDATA_RegMapUnsafe {
continue // already unsafe
}
if isUnsafePoint(p) {
@ -283,7 +293,39 @@ func MarkUnsafePoints(ctxt *Link, p0 *Prog, newprog ProgAlloc, isUnsafePoint fun
if p.Link == nil {
break // Reached the end, don't bother marking the end
}
p = ctxt.EndUnsafePoint(p, newprog, oldval)
p = ctxt.EndUnsafePoint(p, newprog, prevPcdata)
p.Pc = p.Link.Pc
continue
}
if isRestartable(p) {
val := int64(objabi.PCDATA_Restart1)
if val == prevRestart {
val = objabi.PCDATA_Restart2
}
prevRestart = val
q := Appendp(prev, newprog)
q.As = APCDATA
q.From.Type = TYPE_CONST
q.From.Offset = objabi.PCDATA_RegMapIndex
q.To.Type = TYPE_CONST
q.To.Offset = val
q.Pc = p.Pc
q.Link = p
if p.Link == nil {
break // Reached the end, don't bother marking the end
}
if isRestartable(p.Link) {
// Next Prog is also restartable. No need to mark the end
// of this sequence. We'll just go ahead mark the next one.
continue
}
p = Appendp(p, newprog)
p.As = APCDATA
p.From.Type = TYPE_CONST
p.From.Offset = objabi.PCDATA_RegMapIndex
p.To.Type = TYPE_CONST
p.To.Offset = prevPcdata
p.Pc = p.Link.Pc
}
}

View file

@ -2005,7 +2005,7 @@ func assemble(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
ctxt.Arch.ByteOrder.PutUint32(p, symcode[i])
}
obj.MarkUnsafePoints(ctxt, cursym.Func.Text, newprog, isUnsafePoint)
obj.MarkUnsafePoints(ctxt, cursym.Func.Text, newprog, isUnsafePoint, nil)
}
func isUnsafePoint(p *obj.Prog) bool {

View file

@ -500,7 +500,7 @@ func spanz(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
// We use REGTMP as a scratch register during call injection,
// so instruction sequences that use REGTMP are unsafe to
// preempt asynchronously.
obj.MarkUnsafePoints(c.ctxt, c.cursym.Func.Text, c.newprog, c.isUnsafePoint)
obj.MarkUnsafePoints(c.ctxt, c.cursym.Func.Text, c.newprog, c.isUnsafePoint, nil)
}
// Return whether p is an unsafe point.

View file

@ -2226,7 +2226,7 @@ func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
// the first instruction.)
return p.From.Index == REG_TLS
}
obj.MarkUnsafePoints(ctxt, s.Func.Text, newprog, useTLS)
obj.MarkUnsafePoints(ctxt, s.Func.Text, newprog, useTLS, nil)
}
}

View file

@ -40,4 +40,15 @@ const (
// PCDATA_UnsafePoint values.
PCDATA_UnsafePointSafe = -1 // Safe for async preemption
PCDATA_UnsafePointUnsafe = -2 // Unsafe for async preemption
// PCDATA_Restart1(2) apply on a sequence of instructions, within
// which if an async preemption happens, we should back off the PC
// to the start of the sequence when resuming.
// We need two so we can distinguish the start/end of the sequence
// in case that two sequences are next to each other.
PCDATA_Restart1 = -3
PCDATA_Restart2 = -4
// Like PCDATA_Restart1, but back to function entry if async preempted.
PCDATA_RestartAtEntry = -5
)

View file

@ -32,6 +32,7 @@ func TestDeadcode(t *testing.T) {
{"typedesc", "type.main.T"},
}
for _, test := range tests {
test := test
t.Run(test.src, func(t *testing.T) {
t.Parallel()
src := filepath.Join("testdata", "deadcode", test.src+".go")

View file

@ -69,6 +69,9 @@ type PublicKey struct {
boring unsafe.Pointer
}
// Any methods implemented on PublicKey might need to also be implemented on
// PrivateKey, as the latter embeds the former and will expose its methods.
// Equal reports whether pub and x have the same value.
//
// Two keys are only considered to have the same value if they have the same Curve value.
@ -100,6 +103,17 @@ func (priv *PrivateKey) Public() crypto.PublicKey {
return &priv.PublicKey
}
// Equal reports whether priv and x have the same value.
//
// See PublicKey.Equal for details on how Curve is compared.
func (priv *PrivateKey) Equal(x crypto.PrivateKey) bool {
xx, ok := x.(*PrivateKey)
if !ok {
return false
}
return priv.PublicKey.Equal(&xx.PublicKey) && priv.D.Cmp(xx.D) == 0
}
// Sign signs digest with priv, reading randomness from rand. The opts argument
// is not currently used but, in keeping with the crypto.Signer interface,
// should be the hash function used to digest the message.

View file

@ -23,23 +23,32 @@ func testEqual(t *testing.T, c elliptic.Curve) {
if !public.Equal(crypto.Signer(private).Public().(*ecdsa.PublicKey)) {
t.Errorf("private.Public() is not Equal to public: %q", public)
}
if !private.Equal(private) {
t.Errorf("private key is not equal to itself: %v", private)
}
enc, err := x509.MarshalPKIXPublicKey(public)
enc, err := x509.MarshalPKCS8PrivateKey(private)
if err != nil {
t.Fatal(err)
}
decoded, err := x509.ParsePKIXPublicKey(enc)
decoded, err := x509.ParsePKCS8PrivateKey(enc)
if err != nil {
t.Fatal(err)
}
if !public.Equal(decoded) {
if !public.Equal(decoded.(crypto.Signer).Public()) {
t.Errorf("public key is not equal to itself after decoding: %v", public)
}
if !private.Equal(decoded) {
t.Errorf("private key is not equal to itself after decoding: %v", private)
}
other, _ := ecdsa.GenerateKey(c, rand.Reader)
if public.Equal(other) {
if public.Equal(other.Public()) {
t.Errorf("different public keys are Equal")
}
if private.Equal(other) {
t.Errorf("different private keys are Equal")
}
// Ensure that keys with the same coordinates but on different curves
// aren't considered Equal.

View file

@ -40,6 +40,9 @@ const (
// PublicKey is the type of Ed25519 public keys.
type PublicKey []byte
// Any methods implemented on PublicKey might need to also be implemented on
// PrivateKey, as the latter embeds the former and will expose its methods.
// Equal reports whether pub and x have the same value.
func (pub PublicKey) Equal(x crypto.PublicKey) bool {
xx, ok := x.(PublicKey)
@ -59,6 +62,15 @@ func (priv PrivateKey) Public() crypto.PublicKey {
return PublicKey(publicKey)
}
// Equal reports whether priv and x have the same value.
func (priv PrivateKey) Equal(x crypto.PrivateKey) bool {
xx, ok := x.(PrivateKey)
if !ok {
return false
}
return bytes.Equal(priv, xx)
}
// Seed returns the private key seed corresponding to priv. It is provided for
// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
// in this package.

View file

@ -113,14 +113,20 @@ func TestEqual(t *testing.T) {
if !public.Equal(public) {
t.Errorf("public key is not equal to itself: %q", public)
}
if !public.Equal(crypto.Signer(private).Public().(PublicKey)) {
if !public.Equal(crypto.Signer(private).Public()) {
t.Errorf("private.Public() is not Equal to public: %q", public)
}
if !private.Equal(private) {
t.Errorf("private key is not equal to itself: %q", private)
}
other, _, _ := GenerateKey(rand.Reader)
if public.Equal(other) {
otherPub, otherPriv, _ := GenerateKey(rand.Reader)
if public.Equal(otherPub) {
t.Errorf("different public keys are Equal")
}
if private.Equal(otherPriv) {
t.Errorf("different private keys are Equal")
}
}
func TestGolden(t *testing.T) {

View file

@ -22,21 +22,30 @@ func TestEqual(t *testing.T) {
if !public.Equal(crypto.Signer(private).Public().(*rsa.PublicKey)) {
t.Errorf("private.Public() is not Equal to public: %q", public)
}
if !private.Equal(private) {
t.Errorf("private key is not equal to itself: %v", private)
}
enc, err := x509.MarshalPKIXPublicKey(public)
enc, err := x509.MarshalPKCS8PrivateKey(private)
if err != nil {
t.Fatal(err)
}
decoded, err := x509.ParsePKIXPublicKey(enc)
decoded, err := x509.ParsePKCS8PrivateKey(enc)
if err != nil {
t.Fatal(err)
}
if !public.Equal(decoded) {
if !public.Equal(decoded.(crypto.Signer).Public()) {
t.Errorf("public key is not equal to itself after decoding: %v", public)
}
if !private.Equal(decoded) {
t.Errorf("private key is not equal to itself after decoding: %v", private)
}
other, _ := rsa.GenerateKey(rand.Reader, 512)
if public.Equal(other) {
if public.Equal(other.Public()) {
t.Errorf("different public keys are Equal")
}
if private.Equal(other) {
t.Errorf("different private keys are Equal")
}
}

View file

@ -51,6 +51,9 @@ type PublicKey struct {
boring unsafe.Pointer
}
// Any methods implemented on PublicKey might need to also be implemented on
// PrivateKey, as the latter embeds the former and will expose its methods.
// Size returns the modulus size in bytes. Raw signatures and ciphertexts
// for or by this public key will have the same size.
func (pub *PublicKey) Size() int {
@ -118,6 +121,27 @@ func (priv *PrivateKey) Public() crypto.PublicKey {
return &priv.PublicKey
}
// Equal reports whether priv and x have equivalent values. It ignores
// Precomputed values.
func (priv *PrivateKey) Equal(x crypto.PrivateKey) bool {
xx, ok := x.(*PrivateKey)
if !ok {
return false
}
if !priv.PublicKey.Equal(&xx.PublicKey) || priv.D.Cmp(xx.D) != 0 {
return false
}
if len(priv.Primes) != len(xx.Primes) {
return false
}
for i := range priv.Primes {
if priv.Primes[i].Cmp(xx.Primes[i]) != 0 {
return false
}
}
return true
}
// Sign signs digest with priv, reading randomness from rand. If opts is a
// *PSSOptions then the PSS algorithm will be used, otherwise PKCS#1 v1.5 will
// be used. digest must be the result of hashing the input message using

View file

@ -207,6 +207,10 @@ const (
downgradeCanaryTLS11 = "DOWNGRD\x00"
)
// testingOnlyForceDowngradeCanary is set in tests to force the server side to
// include downgrade canaries even if it's using its highers supported version.
var testingOnlyForceDowngradeCanary bool
// ConnectionState records basic TLS details about the connection.
type ConnectionState struct {
Version uint16 // TLS version used by the connection (e.g. VersionTLS12)

View file

@ -54,7 +54,7 @@ func (c *Conn) makeClientHello() (*clientHelloMsg, ecdheParameters, error) {
return nil, nil, errors.New("tls: no supported versions satisfy MinVersion and MaxVersion")
}
clientHelloVersion := supportedVersions[0]
clientHelloVersion := config.maxSupportedVersion()
// The version at the beginning of the ClientHello was capped at TLS 1.2
// for compatibility reasons. The supported_versions extension is used
// to negotiate versions now. See RFC 8446, Section 4.2.1.
@ -184,6 +184,18 @@ func (c *Conn) clientHandshake() (err error) {
return err
}
// If we are negotiating a protocol version that's lower than what we
// support, check for the server downgrade canaries.
// See RFC 8446, Section 4.1.3.
maxVers := c.config.maxSupportedVersion()
tls12Downgrade := string(serverHello.random[24:]) == downgradeCanaryTLS12
tls11Downgrade := string(serverHello.random[24:]) == downgradeCanaryTLS11
if maxVers == VersionTLS13 && c.vers <= VersionTLS12 && (tls12Downgrade || tls11Downgrade) ||
maxVers == VersionTLS12 && c.vers <= VersionTLS11 && tls11Downgrade {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: downgrade attempt detected, possibly due to a MitM attack or a broken middlebox")
}
if c.vers == VersionTLS13 {
hs := &clientHandshakeStateTLS13{
c: c,

View file

@ -1984,3 +1984,48 @@ func TestCloseClientConnectionOnIdleServer(t *testing.T) {
t.Errorf("Error expected, but no error returned")
}
}
func testDowngradeCanary(t *testing.T, clientVersion, serverVersion uint16) error {
defer func() { testingOnlyForceDowngradeCanary = false }()
testingOnlyForceDowngradeCanary = true
clientConfig := testConfig.Clone()
clientConfig.MaxVersion = clientVersion
serverConfig := testConfig.Clone()
serverConfig.MaxVersion = serverVersion
_, _, err := testHandshake(t, clientConfig, serverConfig)
return err
}
func TestDowngradeCanary(t *testing.T) {
if err := testDowngradeCanary(t, VersionTLS13, VersionTLS12); err == nil {
t.Errorf("downgrade from TLS 1.3 to TLS 1.2 was not detected")
}
if testing.Short() {
t.Skip("skipping the rest of the checks in short mode")
}
if err := testDowngradeCanary(t, VersionTLS13, VersionTLS11); err == nil {
t.Errorf("downgrade from TLS 1.3 to TLS 1.1 was not detected")
}
if err := testDowngradeCanary(t, VersionTLS13, VersionTLS10); err == nil {
t.Errorf("downgrade from TLS 1.3 to TLS 1.0 was not detected")
}
if err := testDowngradeCanary(t, VersionTLS12, VersionTLS11); err == nil {
t.Errorf("downgrade from TLS 1.2 to TLS 1.1 was not detected")
}
if err := testDowngradeCanary(t, VersionTLS12, VersionTLS10); err == nil {
t.Errorf("downgrade from TLS 1.2 to TLS 1.0 was not detected")
}
if err := testDowngradeCanary(t, VersionTLS13, VersionTLS13); err != nil {
t.Errorf("server unexpectedly sent downgrade canary for TLS 1.3")
}
if err := testDowngradeCanary(t, VersionTLS12, VersionTLS12); err != nil {
t.Errorf("client didn't ignore expected TLS 1.2 canary")
}
if err := testDowngradeCanary(t, VersionTLS11, VersionTLS11); err != nil {
t.Errorf("client unexpectedly reacted to a canary in TLS 1.1")
}
if err := testDowngradeCanary(t, VersionTLS10, VersionTLS10); err != nil {
t.Errorf("client unexpectedly reacted to a canary in TLS 1.0")
}
}

View file

@ -180,51 +180,62 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error {
c := hs.c
// The first ClientHello gets double-hashed into the transcript upon a
// HelloRetryRequest. See RFC 8446, Section 4.4.1.
// HelloRetryRequest. (The idea is that the server might offload transcript
// storage to the client in the cookie.) See RFC 8446, Section 4.4.1.
chHash := hs.transcript.Sum(nil)
hs.transcript.Reset()
hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
hs.transcript.Write(chHash)
hs.transcript.Write(hs.serverHello.marshal())
// The only HelloRetryRequest extensions we support are key_share and
// cookie, and clients must abort the handshake if the HRR would not result
// in any change in the ClientHello.
if hs.serverHello.selectedGroup == 0 && hs.serverHello.cookie == nil {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: server sent an unnecessary HelloRetryRequest message")
}
if hs.serverHello.cookie != nil {
hs.hello.cookie = hs.serverHello.cookie
}
if hs.serverHello.serverShare.group != 0 {
c.sendAlert(alertDecodeError)
return errors.New("tls: received malformed key_share extension")
}
curveID := hs.serverHello.selectedGroup
if curveID == 0 {
c.sendAlert(alertMissingExtension)
return errors.New("tls: received HelloRetryRequest without selected group")
}
curveOK := false
for _, id := range hs.hello.supportedCurves {
if id == curveID {
curveOK = true
break
// If the server sent a key_share extension selecting a group, ensure it's
// a group we advertised but did not send a key share for, and send a key
// share for it this time.
if curveID := hs.serverHello.selectedGroup; curveID != 0 {
curveOK := false
for _, id := range hs.hello.supportedCurves {
if id == curveID {
curveOK = true
break
}
}
if !curveOK {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: server selected unsupported group")
}
if hs.ecdheParams.CurveID() == curveID {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: server sent an unnecessary HelloRetryRequest key_share")
}
if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
c.sendAlert(alertInternalError)
return errors.New("tls: CurvePreferences includes unsupported curve")
}
params, err := generateECDHEParameters(c.config.rand(), curveID)
if err != nil {
c.sendAlert(alertInternalError)
return err
}
hs.ecdheParams = params
hs.hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}}
}
if !curveOK {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: server selected unsupported group")
}
if hs.ecdheParams.CurveID() == curveID {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: server sent an unnecessary HelloRetryRequest message")
}
if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
c.sendAlert(alertInternalError)
return errors.New("tls: CurvePreferences includes unsupported curve")
}
params, err := generateECDHEParameters(c.config.rand(), curveID)
if err != nil {
c.sendAlert(alertInternalError)
return err
}
hs.ecdheParams = params
hs.hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}}
hs.hello.cookie = hs.serverHello.cookie
hs.hello.raw = nil
if len(hs.hello.pskIdentities) > 0 {

View file

@ -308,11 +308,9 @@ func (*sessionState) Generate(rand *rand.Rand, size int) reflect.Value {
s := &sessionState{}
s.vers = uint16(rand.Intn(10000))
s.cipherSuite = uint16(rand.Intn(10000))
s.masterSecret = randomBytes(rand.Intn(100), rand)
numCerts := rand.Intn(20)
s.certificates = make([][]byte, numCerts)
for i := 0; i < numCerts; i++ {
s.certificates[i] = randomBytes(rand.Intn(10)+1, rand)
s.masterSecret = randomBytes(rand.Intn(100)+1, rand)
for i := 0; i < rand.Intn(20); i++ {
s.certificates = append(s.certificates, randomBytes(rand.Intn(500)+1, rand))
}
return reflect.ValueOf(s)
}

View file

@ -193,7 +193,7 @@ func (hs *serverHandshakeState) processClientHello() error {
serverRandom := hs.hello.random
// Downgrade protection canaries. See RFC 8446, Section 4.1.3.
maxVers := c.config.maxSupportedVersion()
if maxVers >= VersionTLS12 && c.vers < maxVers {
if maxVers >= VersionTLS12 && c.vers < maxVers || testingOnlyForceDowngradeCanary {
if c.vers == VersionTLS12 {
copy(serverRandom[24:], downgradeCanaryTLS12)
} else {

View file

@ -12,8 +12,9 @@ import (
"crypto/sha256"
"crypto/subtle"
"errors"
"golang.org/x/crypto/cryptobyte"
"io"
"golang.org/x/crypto/cryptobyte"
)
// sessionState contains the information that is serialized into a session
@ -21,88 +22,56 @@ import (
type sessionState struct {
vers uint16
cipherSuite uint16
masterSecret []byte
certificates [][]byte
masterSecret []byte // opaque master_secret<1..2^16-1>;
// struct { opaque certificate<1..2^32-1> } Certificate;
certificates [][]byte // Certificate certificate_list<0..2^16-1>;
// usedOldKey is true if the ticket from which this session came from
// was encrypted with an older key and thus should be refreshed.
usedOldKey bool
}
func (s *sessionState) marshal() []byte {
length := 2 + 2 + 2 + len(s.masterSecret) + 2
for _, cert := range s.certificates {
length += 4 + len(cert)
}
ret := make([]byte, length)
x := ret
x[0] = byte(s.vers >> 8)
x[1] = byte(s.vers)
x[2] = byte(s.cipherSuite >> 8)
x[3] = byte(s.cipherSuite)
x[4] = byte(len(s.masterSecret) >> 8)
x[5] = byte(len(s.masterSecret))
x = x[6:]
copy(x, s.masterSecret)
x = x[len(s.masterSecret):]
x[0] = byte(len(s.certificates) >> 8)
x[1] = byte(len(s.certificates))
x = x[2:]
for _, cert := range s.certificates {
x[0] = byte(len(cert) >> 24)
x[1] = byte(len(cert) >> 16)
x[2] = byte(len(cert) >> 8)
x[3] = byte(len(cert))
copy(x[4:], cert)
x = x[4+len(cert):]
}
return ret
func (m *sessionState) marshal() []byte {
var b cryptobyte.Builder
b.AddUint16(m.vers)
b.AddUint16(m.cipherSuite)
b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes(m.masterSecret)
})
b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
for _, cert := range m.certificates {
b.AddUint32LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes(cert)
})
}
})
return b.BytesOrPanic()
}
func (s *sessionState) unmarshal(data []byte) bool {
if len(data) < 8 {
func (m *sessionState) unmarshal(data []byte) bool {
*m = sessionState{usedOldKey: m.usedOldKey}
s := cryptobyte.String(data)
if ok := s.ReadUint16(&m.vers) &&
m.vers != VersionTLS13 &&
s.ReadUint16(&m.cipherSuite) &&
readUint16LengthPrefixed(&s, &m.masterSecret) &&
len(m.masterSecret) != 0; !ok {
return false
}
s.vers = uint16(data[0])<<8 | uint16(data[1])
s.cipherSuite = uint16(data[2])<<8 | uint16(data[3])
masterSecretLen := int(data[4])<<8 | int(data[5])
data = data[6:]
if len(data) < masterSecretLen {
var certList cryptobyte.String
if !s.ReadUint16LengthPrefixed(&certList) {
return false
}
s.masterSecret = data[:masterSecretLen]
data = data[masterSecretLen:]
if len(data) < 2 {
return false
for !certList.Empty() {
var certLen uint32
certList.ReadUint32(&certLen)
var cert []byte
if certLen == 0 || !certList.ReadBytes(&cert, int(certLen)) {
return false
}
m.certificates = append(m.certificates, cert)
}
numCerts := int(data[0])<<8 | int(data[1])
data = data[2:]
s.certificates = make([][]byte, numCerts)
for i := range s.certificates {
if len(data) < 4 {
return false
}
certLen := int(data[0])<<24 | int(data[1])<<16 | int(data[2])<<8 | int(data[3])
data = data[4:]
if certLen < 0 {
return false
}
if len(data) < certLen {
return false
}
s.certificates[i] = data[:certLen]
data = data[certLen:]
}
return len(data) == 0
return s.Empty()
}
// sessionStateTLS13 is the content of a TLS 1.3 session ticket. Its first

View file

@ -9,6 +9,7 @@ package x509
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
)
@ -69,7 +70,7 @@ func loadSystemRoots() (*CertPool, error) {
}
for _, directory := range dirs {
fis, err := ioutil.ReadDir(directory)
fis, err := readUniqueDirectoryEntries(directory)
if err != nil {
if firstErr == nil && !os.IsNotExist(err) {
firstErr = err
@ -90,3 +91,29 @@ func loadSystemRoots() (*CertPool, error) {
return nil, firstErr
}
// readUniqueDirectoryEntries is like ioutil.ReadDir but omits
// symlinks that point within the directory.
func readUniqueDirectoryEntries(dir string) ([]os.FileInfo, error) {
fis, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
uniq := fis[:0]
for _, fi := range fis {
if !isSameDirSymlink(fi, dir) {
uniq = append(uniq, fi)
}
}
return uniq, nil
}
// isSameDirSymlink reports whether fi in dir is a symlink with a
// target not containing a slash.
func isSameDirSymlink(fi os.FileInfo, dir string) bool {
if fi.Mode()&os.ModeSymlink == 0 {
return false
}
target, err := os.Readlink(filepath.Join(dir, fi.Name()))
return err == nil && !strings.Contains(target, "/")
}

View file

@ -202,3 +202,30 @@ func TestLoadSystemCertsLoadColonSeparatedDirs(t *testing.T) {
t.Fatalf("Mismatched certPools\nGot:\n%s\n\nWant:\n%s", g, w)
}
}
func TestReadUniqueDirectoryEntries(t *testing.T) {
temp := func(base string) string { return filepath.Join(t.TempDir(), base) }
if f, err := os.Create(temp("file")); err != nil {
t.Fatal(err)
} else {
f.Close()
}
if err := os.Symlink("target-in", temp("link-in")); err != nil {
t.Fatal(err)
}
if err := os.Symlink("../target-out", temp("link-out")); err != nil {
t.Fatal(err)
}
got, err := readUniqueDirectoryEntries(t.TempDir())
if err != nil {
t.Fatal(err)
}
gotNames := []string{}
for _, fi := range got {
gotNames = append(gotNames, fi.Name())
}
wantNames := []string{"file", "link-out"}
if !reflect.DeepEqual(gotNames, wantNames) {
t.Errorf("got %q; want %q", gotNames, wantNames)
}
}

View file

@ -185,18 +185,29 @@ func (se SystemRootsError) Error() string {
// verified. Platform-specific verification needs the ASN.1 contents.
var errNotParsed = errors.New("x509: missing ASN.1 contents; use ParseCertificate")
// VerifyOptions contains parameters for Certificate.Verify. It's a structure
// because other PKIX verification APIs have ended up needing many options.
// VerifyOptions contains parameters for Certificate.Verify.
type VerifyOptions struct {
// IsBoring is a validity check for BoringCrypto.
// If not nil, it will be called to check whether a given certificate
// can be used for constructing verification chains.
IsBoring func(*Certificate) bool
DNSName string
// DNSName, if set, is checked against the leaf certificate with
// Certificate.VerifyHostname.
DNSName string
// Intermediates is an optional pool of certificates that are not trust
// anchors, but can be used to form a chain from the leaf certificate to a
// root certificate.
Intermediates *CertPool
Roots *CertPool // if nil, the system roots are used
CurrentTime time.Time // if zero, the current time is used
// Roots is the set of trusted root certificates the leaf certificate needs
// to chain up to. If nil, the system roots or the platform verifier are used.
Roots *CertPool
// CurrentTime is used to check the validity of all certificates in the
// chain. If zero, the current time is used.
CurrentTime time.Time
// KeyUsage specifies which Extended Key Usage values are acceptable. A leaf
// certificate is accepted if it contains any of the listed values. An empty
// list means ExtKeyUsageServerAuth. To accept any key usage, include
@ -205,6 +216,7 @@ type VerifyOptions struct {
// Certificate chains are required to nest these extended key usage values.
// (This matches the Windows CryptoAPI behavior, but not the spec.)
KeyUsages []ExtKeyUsage
// MaxConstraintComparisions is the maximum number of comparisons to
// perform when checking a given certificate's name constraints. If
// zero, a sensible default is used. This limit prevents pathological
@ -1015,6 +1027,16 @@ func toLowerCaseASCII(in string) string {
// VerifyHostname returns nil if c is a valid certificate for the named host.
// Otherwise it returns an error describing the mismatch.
//
// IP addresses can be optionally enclosed in square brackets and are checked
// against the IPAddresses field. Other names are checked case insensitively
// against the DNSNames field, with support for only one wildcard as the whole
// left-most label.
//
// If the Common Name field is a valid hostname, and the certificate doesn't
// have any Subject Alternative Names, the name will also be checked against the
// Common Name. This legacy behavior can be disabled by setting the GODEBUG
// environment variable to "x509ignoreCN=1" and might be removed in the future.
func (c *Certificate) VerifyHostname(h string) error {
// IP addresses may be written in [ ].
candidateIP := h

View file

@ -158,10 +158,24 @@ func (w *Writer) fieldNeedsQuotes(field string) bool {
if field == "" {
return false
}
if field == `\.` || strings.ContainsRune(field, w.Comma) || strings.ContainsAny(field, "\"\r\n") {
if field == `\.` {
return true
}
if w.Comma < utf8.RuneSelf {
for i := 0; i < len(field); i++ {
c := field[i]
if c == '\n' || c == '\r' || c == '"' || c == byte(w.Comma) {
return true
}
}
} else {
if strings.ContainsRune(field, w.Comma) || strings.ContainsAny(field, "\"\r\n") {
return true
}
}
r1, _ := utf8.DecodeRuneInString(field)
return unicode.IsSpace(r1)
}

View file

@ -93,3 +93,20 @@ func TestError(t *testing.T) {
t.Error("Error should not be nil")
}
}
var benchmarkWriteData = [][]string{
{"abc", "def", "12356", "1234567890987654311234432141542132"},
{"abc", "def", "12356", "1234567890987654311234432141542132"},
{"abc", "def", "12356", "1234567890987654311234432141542132"},
}
func BenchmarkWrite(b *testing.B) {
for i := 0; i < b.N; i++ {
w := NewWriter(&bytes.Buffer{})
err := w.WriteAll(benchmarkWriteData)
if err != nil {
b.Fatal(err)
}
w.Flush()
}
}

View file

@ -21,6 +21,7 @@ import (
"log"
"net"
"net/http"
"net/textproto"
"os"
"os/exec"
"path/filepath"
@ -276,8 +277,8 @@ func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
continue
}
header, val := parts[0], parts[1]
header = strings.TrimSpace(header)
val = strings.TrimSpace(val)
header = textproto.TrimString(header)
val = textproto.TrimString(val)
switch {
case header == "Status":
if len(val) < 3 {

View file

@ -7,6 +7,7 @@ package http
import (
"log"
"net"
"net/textproto"
"strconv"
"strings"
"time"
@ -60,11 +61,11 @@ func readSetCookies(h Header) []*Cookie {
}
cookies := make([]*Cookie, 0, cookieCount)
for _, line := range h["Set-Cookie"] {
parts := strings.Split(strings.TrimSpace(line), ";")
parts := strings.Split(textproto.TrimString(line), ";")
if len(parts) == 1 && parts[0] == "" {
continue
}
parts[0] = strings.TrimSpace(parts[0])
parts[0] = textproto.TrimString(parts[0])
j := strings.Index(parts[0], "=")
if j < 0 {
continue
@ -83,7 +84,7 @@ func readSetCookies(h Header) []*Cookie {
Raw: line,
}
for i := 1; i < len(parts); i++ {
parts[i] = strings.TrimSpace(parts[i])
parts[i] = textproto.TrimString(parts[i])
if len(parts[i]) == 0 {
continue
}
@ -242,7 +243,7 @@ func readCookies(h Header, filter string) []*Cookie {
cookies := make([]*Cookie, 0, len(lines)+strings.Count(lines[0], ";"))
for _, line := range lines {
line = strings.TrimSpace(line)
line = textproto.TrimString(line)
var part string
for len(line) > 0 { // continue since we have rest
@ -251,7 +252,7 @@ func readCookies(h Header, filter string) []*Cookie {
} else {
part, line = line, ""
}
part = strings.TrimSpace(part)
part = textproto.TrimString(part)
if len(part) == 0 {
continue
}

View file

@ -756,7 +756,7 @@ func parseRange(s string, size int64) ([]httpRange, error) {
var ranges []httpRange
noOverlap := false
for _, ra := range strings.Split(s[len(b):], ",") {
ra = strings.TrimSpace(ra)
ra = textproto.TrimString(ra)
if ra == "" {
continue
}
@ -764,7 +764,7 @@ func parseRange(s string, size int64) ([]httpRange, error) {
if i < 0 {
return nil, errors.New("invalid range")
}
start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:])
start, end := textproto.TrimString(ra[:i]), textproto.TrimString(ra[i+1:])
var r httpRange
if start == "" {
// If no start is specified, end specifies the

View file

@ -9,6 +9,7 @@ import (
"fmt"
"io/ioutil"
"net/http"
"net/textproto"
"strconv"
"strings"
@ -221,7 +222,7 @@ func (rw *ResponseRecorder) Result() *http.Response {
// This a modified version of same function found in net/http/transfer.go. This
// one just ignores an invalid header.
func parseContentLength(cl string) int64 {
cl = strings.TrimSpace(cl)
cl = textproto.TrimString(cl)
if cl == "" {
return -1
}

View file

@ -13,6 +13,7 @@ import (
"log"
"net"
"net/http"
"net/textproto"
"net/url"
"strings"
"sync"
@ -387,7 +388,7 @@ func shouldPanicOnCopyError(req *http.Request) bool {
func removeConnectionHeaders(h http.Header) {
for _, f := range h["Connection"] {
for _, sf := range strings.Split(f, ",") {
if sf = strings.TrimSpace(sf); sf != "" {
if sf = textproto.TrimString(sf); sf != "" {
h.Del(sf)
}
}

View file

@ -1224,13 +1224,22 @@ func TestReverseProxyWebSocketCancelation(t *testing.T) {
for i := 0; i < n; i++ {
if _, err := bufrw.WriteString(nthResponse(i)); err != nil {
t.Errorf("Writing response #%d failed: %v", i, err)
select {
case <-triggerCancelCh:
default:
t.Errorf("Writing response #%d failed: %v", i, err)
}
return
}
bufrw.Flush()
time.Sleep(time.Second)
}
if _, err := bufrw.WriteString(terminalMsg); err != nil {
t.Errorf("Failed to write terminal message: %v", err)
select {
case <-triggerCancelCh:
default:
t.Errorf("Failed to write terminal message: %v", err)
}
}
bufrw.Flush()
}))

View file

@ -734,6 +734,7 @@ func TestReadResponseCloseInMiddle(t *testing.T) {
}
func diff(t *testing.T, prefix string, have, want interface{}) {
t.Helper()
hv := reflect.ValueOf(have).Elem()
wv := reflect.ValueOf(want).Elem()
if hv.Type() != wv.Type() {

View file

@ -1347,37 +1347,6 @@ func TestServerAllowsBlockingRemoteAddr(t *testing.T) {
}
}
func TestIdentityResponseHeaders(t *testing.T) {
// Not parallel; changes log output.
defer afterTest(t)
log.SetOutput(ioutil.Discard) // is noisy otherwise
defer log.SetOutput(os.Stderr)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Transfer-Encoding", "identity")
w.(Flusher).Flush()
fmt.Fprintf(w, "I am an identity response.")
}))
defer ts.Close()
c := ts.Client()
res, err := c.Get(ts.URL)
if err != nil {
t.Fatalf("Get error: %v", err)
}
defer res.Body.Close()
if g, e := res.TransferEncoding, []string(nil); !reflect.DeepEqual(g, e) {
t.Errorf("expected TransferEncoding of %v; got %v", e, g)
}
if _, haveCL := res.Header["Content-Length"]; haveCL {
t.Errorf("Unexpected Content-Length")
}
if !res.Close {
t.Errorf("expected Connection: close; got %v", res.Close)
}
}
// TestHeadResponses verifies that all MIME type sniffing and Content-Length
// counting of GET requests also happens on HEAD requests.
func TestHeadResponses_h1(t *testing.T) { testHeadResponses(t, h1Mode) }

View file

@ -425,11 +425,11 @@ type transferReader struct {
ProtoMajor int
ProtoMinor int
// Output
Body io.ReadCloser
ContentLength int64
TransferEncoding []string
Close bool
Trailer Header
Body io.ReadCloser
ContentLength int64
Chunked bool
Close bool
Trailer Header
}
func (t *transferReader) protoAtLeast(m, n int) bool {
@ -501,13 +501,12 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
t.ProtoMajor, t.ProtoMinor = 1, 1
}
// Transfer encoding, content length
err = t.fixTransferEncoding()
if err != nil {
// Transfer-Encoding: chunked, and overriding Content-Length.
if err := t.parseTransferEncoding(); err != nil {
return err
}
realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.TransferEncoding)
realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.Chunked)
if err != nil {
return err
}
@ -522,7 +521,7 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
}
// Trailer
t.Trailer, err = fixTrailer(t.Header, t.TransferEncoding)
t.Trailer, err = fixTrailer(t.Header, t.Chunked)
if err != nil {
return err
}
@ -532,9 +531,7 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
// See RFC 7230, section 3.3.
switch msg.(type) {
case *Response:
if realLength == -1 &&
!chunked(t.TransferEncoding) &&
bodyAllowedForStatus(t.StatusCode) {
if realLength == -1 && !t.Chunked && bodyAllowedForStatus(t.StatusCode) {
// Unbounded body.
t.Close = true
}
@ -543,7 +540,7 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
// Prepare body reader. ContentLength < 0 means chunked encoding
// or close connection when finished, since multipart is not supported yet
switch {
case chunked(t.TransferEncoding):
case t.Chunked:
if noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode) {
t.Body = NoBody
} else {
@ -569,13 +566,17 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
case *Request:
rr.Body = t.Body
rr.ContentLength = t.ContentLength
rr.TransferEncoding = t.TransferEncoding
if t.Chunked {
rr.TransferEncoding = []string{"chunked"}
}
rr.Close = t.Close
rr.Trailer = t.Trailer
case *Response:
rr.Body = t.Body
rr.ContentLength = t.ContentLength
rr.TransferEncoding = t.TransferEncoding
if t.Chunked {
rr.TransferEncoding = []string{"chunked"}
}
rr.Close = t.Close
rr.Trailer = t.Trailer
}
@ -605,8 +606,8 @@ func isUnsupportedTEError(err error) bool {
return ok
}
// fixTransferEncoding sanitizes t.TransferEncoding, if needed.
func (t *transferReader) fixTransferEncoding() error {
// parseTransferEncoding sets t.Chunked based on the Transfer-Encoding header.
func (t *transferReader) parseTransferEncoding() error {
raw, present := t.Header["Transfer-Encoding"]
if !present {
return nil
@ -618,56 +619,38 @@ func (t *transferReader) fixTransferEncoding() error {
return nil
}
encodings := strings.Split(raw[0], ",")
te := make([]string, 0, len(encodings))
// TODO: Even though we only support "identity" and "chunked"
// encodings, the loop below is designed with foresight. One
// invariant that must be maintained is that, if present,
// chunked encoding must always come first.
for _, encoding := range encodings {
encoding = strings.ToLower(strings.TrimSpace(encoding))
// "identity" encoding is not recorded
if encoding == "identity" {
break
}
if encoding != "chunked" {
return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", encoding)}
}
te = te[0 : len(te)+1]
te[len(te)-1] = encoding
// Like nginx, we only support a single Transfer-Encoding header field, and
// only if set to "chunked". This is one of the most security sensitive
// surfaces in HTTP/1.1 due to the risk of request smuggling, so we keep it
// strict and simple.
if len(raw) != 1 {
return &unsupportedTEError{fmt.Sprintf("too many transfer encodings: %q", raw)}
}
if len(te) > 1 {
return badStringError("too many transfer encodings", strings.Join(te, ","))
}
if len(te) > 0 {
// RFC 7230 3.3.2 says "A sender MUST NOT send a
// Content-Length header field in any message that
// contains a Transfer-Encoding header field."
//
// but also:
// "If a message is received with both a
// Transfer-Encoding and a Content-Length header
// field, the Transfer-Encoding overrides the
// Content-Length. Such a message might indicate an
// attempt to perform request smuggling (Section 9.5)
// or response splitting (Section 9.4) and ought to be
// handled as an error. A sender MUST remove the
// received Content-Length field prior to forwarding
// such a message downstream."
//
// Reportedly, these appear in the wild.
delete(t.Header, "Content-Length")
t.TransferEncoding = te
return nil
if strings.ToLower(textproto.TrimString(raw[0])) != "chunked" {
return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", raw[0])}
}
// RFC 7230 3.3.2 says "A sender MUST NOT send a Content-Length header field
// in any message that contains a Transfer-Encoding header field."
//
// but also: "If a message is received with both a Transfer-Encoding and a
// Content-Length header field, the Transfer-Encoding overrides the
// Content-Length. Such a message might indicate an attempt to perform
// request smuggling (Section 9.5) or response splitting (Section 9.4) and
// ought to be handled as an error. A sender MUST remove the received
// Content-Length field prior to forwarding such a message downstream."
//
// Reportedly, these appear in the wild.
delete(t.Header, "Content-Length")
t.Chunked = true
return nil
}
// Determine the expected body length, using RFC 7230 Section 3.3. This
// function is not a method, because ultimately it should be shared by
// ReadResponse and ReadRequest.
func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, error) {
func fixLength(isResponse bool, status int, requestMethod string, header Header, chunked bool) (int64, error) {
isRequest := !isResponse
contentLens := header["Content-Length"]
@ -677,9 +660,9 @@ func fixLength(isResponse bool, status int, requestMethod string, header Header,
// Content-Length headers if they differ in value.
// If there are dups of the value, remove the dups.
// See Issue 16490.
first := strings.TrimSpace(contentLens[0])
first := textproto.TrimString(contentLens[0])
for _, ct := range contentLens[1:] {
if first != strings.TrimSpace(ct) {
if first != textproto.TrimString(ct) {
return 0, fmt.Errorf("http: message cannot contain multiple Content-Length headers; got %q", contentLens)
}
}
@ -711,14 +694,14 @@ func fixLength(isResponse bool, status int, requestMethod string, header Header,
}
// Logic based on Transfer-Encoding
if chunked(te) {
if chunked {
return -1, nil
}
// Logic based on Content-Length
var cl string
if len(contentLens) == 1 {
cl = strings.TrimSpace(contentLens[0])
cl = textproto.TrimString(contentLens[0])
}
if cl != "" {
n, err := parseContentLength(cl)
@ -766,12 +749,12 @@ func shouldClose(major, minor int, header Header, removeCloseHeader bool) bool {
}
// Parse the trailer header
func fixTrailer(header Header, te []string) (Header, error) {
func fixTrailer(header Header, chunked bool) (Header, error) {
vv, ok := header["Trailer"]
if !ok {
return nil, nil
}
if !chunked(te) {
if !chunked {
// Trailer and no chunking:
// this is an invalid use case for trailer header.
// Nevertheless, no error will be returned and we
@ -1049,7 +1032,7 @@ func (bl bodyLocked) Read(p []byte) (n int, err error) {
// parseContentLength trims whitespace from s and returns -1 if no value
// is set, or the value if it's >= 0.
func parseContentLength(cl string) (int64, error) {
cl = strings.TrimSpace(cl)
cl = textproto.TrimString(cl)
if cl == "" {
return -1, nil
}

View file

@ -279,7 +279,7 @@ func TestTransferWriterWriteBodyReaderTypes(t *testing.T) {
}
}
func TestFixTransferEncoding(t *testing.T) {
func TestParseTransferEncoding(t *testing.T) {
tests := []struct {
hdr Header
wantErr error
@ -290,7 +290,23 @@ func TestFixTransferEncoding(t *testing.T) {
},
{
hdr: Header{"Transfer-Encoding": {"chunked, chunked", "identity", "chunked"}},
wantErr: badStringError("too many transfer encodings", "chunked,chunked"),
wantErr: &unsupportedTEError{`too many transfer encodings: ["chunked, chunked" "identity" "chunked"]`},
},
{
hdr: Header{"Transfer-Encoding": {""}},
wantErr: &unsupportedTEError{`unsupported transfer encoding: ""`},
},
{
hdr: Header{"Transfer-Encoding": {"chunked, identity"}},
wantErr: &unsupportedTEError{`unsupported transfer encoding: "chunked, identity"`},
},
{
hdr: Header{"Transfer-Encoding": {"chunked", "identity"}},
wantErr: &unsupportedTEError{`too many transfer encodings: ["chunked" "identity"]`},
},
{
hdr: Header{"Transfer-Encoding": {"\x0bchunked"}},
wantErr: &unsupportedTEError{`unsupported transfer encoding: "\vchunked"`},
},
{
hdr: Header{"Transfer-Encoding": {"chunked"}},
@ -304,7 +320,7 @@ func TestFixTransferEncoding(t *testing.T) {
ProtoMajor: 1,
ProtoMinor: 1,
}
gotErr := tr.fixTransferEncoding()
gotErr := tr.parseTransferEncoding()
if !reflect.DeepEqual(gotErr, tt.wantErr) {
t.Errorf("%d.\ngot error:\n%v\nwant error:\n%v\n\n", i, gotErr, tt.wantErr)
}

View file

@ -843,7 +843,7 @@ func (t *Transport) tryPutIdleConn(pconn *persistConn) error {
// Deliver pconn to goroutine waiting for idle connection, if any.
// (They may be actively dialing, but this conn is ready first.
// Chrome calls this socket late binding.
// See https://insouciant.org/tech/connection-management-in-chromium/.)
// See https://www.chromium.org/developers/design-documents/network-stack#TOC-Connection-Management.)
key := pconn.cacheKey
if q, ok := t.idleConnWait[key]; ok {
done := false

View file

@ -0,0 +1,128 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"testing"
)
var res int64
var ures uint64
func TestFloatTruncation(t *testing.T) {
testdata := []struct {
input float64
convInt64 int64
convUInt64 uint64
overflow bool
}{
// max +- 1
{
input: 0x7fffffffffffffff,
convInt64: -0x8000000000000000,
convUInt64: 0x8000000000000000,
},
// For out-of-bounds conversion, the result is implementation-dependent.
// This test verifies the implementation of wasm architecture.
{
input: 0x8000000000000000,
convInt64: -0x8000000000000000,
convUInt64: 0x8000000000000000,
},
{
input: 0x7ffffffffffffffe,
convInt64: -0x8000000000000000,
convUInt64: 0x8000000000000000,
},
// neg max +- 1
{
input: -0x8000000000000000,
convInt64: -0x8000000000000000,
convUInt64: 0x8000000000000000,
},
{
input: -0x8000000000000001,
convInt64: -0x8000000000000000,
convUInt64: 0x8000000000000000,
},
{
input: -0x7fffffffffffffff,
convInt64: -0x8000000000000000,
convUInt64: 0x8000000000000000,
},
// trunc point +- 1
{
input: 0x7ffffffffffffdff,
convInt64: 0x7ffffffffffffc00,
convUInt64: 0x7ffffffffffffc00,
},
{
input: 0x7ffffffffffffe00,
convInt64: -0x8000000000000000,
convUInt64: 0x8000000000000000,
},
{
input: 0x7ffffffffffffdfe,
convInt64: 0x7ffffffffffffc00,
convUInt64: 0x7ffffffffffffc00,
},
// neg trunc point +- 1
{
input: -0x7ffffffffffffdff,
convInt64: -0x7ffffffffffffc00,
convUInt64: 0x8000000000000000,
},
{
input: -0x7ffffffffffffe00,
convInt64: -0x8000000000000000,
convUInt64: 0x8000000000000000,
},
{
input: -0x7ffffffffffffdfe,
convInt64: -0x7ffffffffffffc00,
convUInt64: 0x8000000000000000,
},
// umax +- 1
{
input: 0xffffffffffffffff,
convInt64: -0x8000000000000000,
convUInt64: 0x8000000000000000,
},
{
input: 0x10000000000000000,
convInt64: -0x8000000000000000,
convUInt64: 0x8000000000000000,
},
{
input: 0xfffffffffffffffe,
convInt64: -0x8000000000000000,
convUInt64: 0x8000000000000000,
},
// umax trunc +- 1
{
input: 0xfffffffffffffbff,
convInt64: -0x8000000000000000,
convUInt64: 0xfffffffffffff800,
},
{
input: 0xfffffffffffffc00,
convInt64: -0x8000000000000000,
convUInt64: 0x8000000000000000,
},
{
input: 0xfffffffffffffbfe,
convInt64: -0x8000000000000000,
convUInt64: 0xfffffffffffff800,
},
}
for _, item := range testdata {
if got, want := int64(item.input), item.convInt64; got != want {
t.Errorf("int64(%f): got %x, want %x", item.input, got, want)
}
if got, want := uint64(item.input), item.convUInt64; got != want {
t.Errorf("uint64(%f): got %x, want %x", item.input, got, want)
}
}
}

View file

@ -1192,23 +1192,24 @@ func preemptM(mp *m) {
// Does it want a preemption and is it safe to preempt?
gp := gFromTLS(mp)
if wantAsyncPreempt(gp) && isAsyncSafePoint(gp, c.ip(), c.sp(), c.lr()) {
// Inject call to asyncPreempt
targetPC := funcPC(asyncPreempt)
switch GOARCH {
default:
throw("unsupported architecture")
case "386", "amd64":
// Make it look like the thread called targetPC.
pc := c.ip()
sp := c.sp()
sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = pc
c.set_sp(sp)
c.set_ip(targetPC)
}
if wantAsyncPreempt(gp) {
if ok, newpc := isAsyncSafePoint(gp, c.ip(), c.sp(), c.lr()); ok {
// Inject call to asyncPreempt
targetPC := funcPC(asyncPreempt)
switch GOARCH {
default:
throw("unsupported architecture")
case "386", "amd64":
// Make it look like the thread called targetPC.
sp := c.sp()
sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = newpc
c.set_sp(sp)
c.set_ip(targetPC)
}
stdcall2(_SetThreadContext, thread, uintptr(unsafe.Pointer(c)))
stdcall2(_SetThreadContext, thread, uintptr(unsafe.Pointer(c)))
}
}
atomic.Store(&mp.preemptExtLock, 0)

View file

@ -61,6 +61,8 @@ import (
// Keep in sync with cmd/compile/internal/gc/plive.go:go115ReduceLiveness.
const go115ReduceLiveness = true
const go115RestartSeq = go115ReduceLiveness && true // enable restartable sequences
type suspendGState struct {
g *g
@ -359,31 +361,35 @@ func wantAsyncPreempt(gp *g) bool {
// 3. It's generally safe to interact with the runtime, even if we're
// in a signal handler stopped here. For example, there are no runtime
// locks held, so acquiring a runtime lock won't self-deadlock.
func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) bool {
//
// In some cases the PC is safe for asynchronous preemption but it
// also needs to adjust the resumption PC. The new PC is returned in
// the second result.
func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) {
mp := gp.m
// Only user Gs can have safe-points. We check this first
// because it's extremely common that we'll catch mp in the
// scheduler processing this G preemption.
if mp.curg != gp {
return false
return false, 0
}
// Check M state.
if mp.p == 0 || !canPreemptM(mp) {
return false
return false, 0
}
// Check stack space.
if sp < gp.stack.lo || sp-gp.stack.lo < asyncPreemptStack {
return false
return false, 0
}
// Check if PC is an unsafe-point.
f := findfunc(pc)
if !f.valid() {
// Not Go code.
return false
return false, 0
}
if (GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "mips64" || GOARCH == "mips64le") && lr == pc+8 && funcspdelta(f, pc, nil) == 0 {
// We probably stopped at a half-executed CALL instruction,
@ -394,23 +400,25 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) bool {
// stack for unwinding, not the LR value. But if this is a
// call to morestack, we haven't created the frame, and we'll
// use the LR for unwinding, which will be bad.
return false
return false, 0
}
var up int32
var startpc uintptr
if !go115ReduceLiveness {
smi := pcdatavalue(f, _PCDATA_RegMapIndex, pc, nil)
if smi == -2 {
// Unsafe-point marked by compiler. This includes
// atomic sequences (e.g., write barrier) and nosplit
// functions (except at calls).
return false
return false, 0
}
} else {
up := pcdatavalue(f, _PCDATA_UnsafePoint, pc, nil)
up, startpc = pcdatavalue2(f, _PCDATA_UnsafePoint, pc)
if up != _PCDATA_UnsafePointSafe {
// Unsafe-point marked by compiler. This includes
// atomic sequences (e.g., write barrier) and nosplit
// functions (except at calls).
return false
return false, 0
}
}
if fd := funcdata(f, _FUNCDATA_LocalsPointerMaps); fd == nil || fd == unsafe.Pointer(&no_pointers_stackmap) {
@ -422,7 +430,7 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) bool {
//
// TODO: Are there cases that are safe but don't have a
// locals pointer map, like empty frame functions?
return false
return false, 0
}
name := funcname(f)
if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil {
@ -445,10 +453,29 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) bool {
//
// TODO(austin): We should improve this, or opt things
// in incrementally.
return false
return false, 0
}
return true
if go115RestartSeq {
switch up {
case _PCDATA_Restart1, _PCDATA_Restart2:
// Restartable instruction sequence. Back off PC to
// the start PC.
if startpc == 0 || startpc > pc || pc-startpc > 20 {
throw("bad restart PC")
}
return true, startpc
case _PCDATA_RestartAtEntry:
// Restart from the function entry at resumption.
return true, f.entry
}
} else {
switch up {
case _PCDATA_Restart1, _PCDATA_Restart2, _PCDATA_RestartAtEntry:
// go115RestartSeq is not enabled. Treat it as unsafe point.
return false, 0
}
}
return true, pc
}
var no_pointers_stackmap uint64 // defined in assembly, for NO_LOCAL_POINTERS macro

View file

@ -252,7 +252,7 @@ func forcegchelper() {
throw("forcegc: phase error")
}
atomic.Store(&forcegc.idle, 1)
goparkunlock(&forcegc.lock, waitReasonForceGGIdle, traceEvGoBlock, 1)
goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1)
// this goroutine is explicitly resumed by sysmon
if debug.gctrace > 0 {
println("GC forced")

View file

@ -971,7 +971,7 @@ const (
waitReasonChanReceive // "chan receive"
waitReasonChanSend // "chan send"
waitReasonFinalizerWait // "finalizer wait"
waitReasonForceGGIdle // "force gc (idle)"
waitReasonForceGCIdle // "force gc (idle)"
waitReasonSemacquire // "semacquire"
waitReasonSleep // "sleep"
waitReasonSyncCondWait // "sync.Cond.Wait"
@ -1001,7 +1001,7 @@ var waitReasonStrings = [...]string{
waitReasonChanReceive: "chan receive",
waitReasonChanSend: "chan send",
waitReasonFinalizerWait: "finalizer wait",
waitReasonForceGGIdle: "force gc (idle)",
waitReasonForceGCIdle: "force gc (idle)",
waitReasonSemacquire: "semacquire",
waitReasonSleep: "sleep",
waitReasonSyncCondWait: "sync.Cond.Wait",

View file

@ -41,19 +41,18 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
sp := uintptr(c.esp())
if shouldPushSigpanic(gp, pc, *(*uintptr)(unsafe.Pointer(sp))) {
c.pushCall(funcPC(sigpanic))
c.pushCall(funcPC(sigpanic), pc)
} else {
// Not safe to push the call. Just clobber the frame.
c.set_eip(uint32(funcPC(sigpanic)))
}
}
func (c *sigctxt) pushCall(targetPC uintptr) {
// Make it look like the signaled instruction called target.
pc := uintptr(c.eip())
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Make it look like we called target at resumePC.
sp := uintptr(c.esp())
sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = pc
*(*uintptr)(unsafe.Pointer(sp)) = resumePC
c.set_esp(uint32(sp))
c.set_eip(uint32(targetPC))
}

View file

@ -66,19 +66,18 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
sp := uintptr(c.rsp())
if shouldPushSigpanic(gp, pc, *(*uintptr)(unsafe.Pointer(sp))) {
c.pushCall(funcPC(sigpanic))
c.pushCall(funcPC(sigpanic), pc)
} else {
// Not safe to push the call. Just clobber the frame.
c.set_rip(uint64(funcPC(sigpanic)))
}
}
func (c *sigctxt) pushCall(targetPC uintptr) {
// Make it look like the signaled instruction called target.
pc := uintptr(c.rip())
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Make it look like we called target at resumePC.
sp := uintptr(c.rsp())
sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = pc
*(*uintptr)(unsafe.Pointer(sp)) = resumePC
c.set_rsp(uint64(sp))
c.set_rip(uint64(targetPC))
}

View file

@ -63,7 +63,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
c.set_pc(uint32(funcPC(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC uintptr) {
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Push the LR to stack, as we'll clobber it in order to
// push the call. The function being pushed is responsible
// for restoring the LR and setting the SP back.
@ -72,7 +72,7 @@ func (c *sigctxt) pushCall(targetPC uintptr) {
c.set_sp(sp)
*(*uint32)(unsafe.Pointer(uintptr(sp))) = c.lr()
// Set up PC and LR to pretend the function being signaled
// calls targetPC at the faulting PC.
c.set_lr(c.pc())
// calls targetPC at resumePC.
c.set_lr(uint32(resumePC))
c.set_pc(uint32(targetPC))
}

View file

@ -79,7 +79,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
c.set_pc(uint64(funcPC(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC uintptr) {
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Push the LR to stack, as we'll clobber it in order to
// push the call. The function being pushed is responsible
// for restoring the LR and setting the SP back.
@ -88,7 +88,7 @@ func (c *sigctxt) pushCall(targetPC uintptr) {
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.lr()
// Set up PC and LR to pretend the function being signaled
// calls targetPC at the faulting PC.
c.set_lr(c.pc())
// calls targetPC at resumePC.
c.set_lr(uint64(resumePC))
c.set_pc(uint64(targetPC))
}

View file

@ -110,7 +110,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
c.set_pc(uint64(funcPC(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC uintptr) {
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Push the LR to stack, as we'll clobber it in order to
// push the call. The function being pushed is responsible
// for restoring the LR and setting the SP back.
@ -119,7 +119,7 @@ func (c *sigctxt) pushCall(targetPC uintptr) {
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link()
// Set up PC and LR to pretend the function being signaled
// calls targetPC at the faulting PC.
c.set_link(c.pc())
// calls targetPC at resumePC.
c.set_link(uint64(resumePC))
c.set_pc(uint64(targetPC))
}

View file

@ -85,7 +85,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
c.set_pc(sigpanicPC)
}
func (c *sigctxt) pushCall(targetPC uintptr) {
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Push the LR to stack, as we'll clobber it in order to
// push the call. The function being pushed is responsible
// for restoring the LR and setting the SP back.
@ -94,7 +94,7 @@ func (c *sigctxt) pushCall(targetPC uintptr) {
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link()
// Set up PC and LR to pretend the function being signaled
// calls targetPC at the faulting PC.
c.set_link(c.pc())
// calls targetPC at resumePC.
c.set_link(uint64(resumePC))
c.set_pc(uint64(targetPC))
}

View file

@ -80,7 +80,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
c.set_pc(uint32(funcPC(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC uintptr) {
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Push the LR to stack, as we'll clobber it in order to
// push the call. The function being pushed is responsible
// for restoring the LR and setting the SP back.
@ -89,7 +89,7 @@ func (c *sigctxt) pushCall(targetPC uintptr) {
c.set_sp(sp)
*(*uint32)(unsafe.Pointer(uintptr(sp))) = c.link()
// Set up PC and LR to pretend the function being signaled
// calls targetPC at the faulting PC.
c.set_link(c.pc())
// calls targetPC at resumePC.
c.set_link(uint32(resumePC))
c.set_pc(uint32(targetPC))
}

View file

@ -86,7 +86,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
c.set_pc(uint64(funcPC(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC uintptr) {
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Push the LR to stack, as we'll clobber it in order to
// push the call. The function being pushed is responsible
// for restoring the LR and setting the SP back.
@ -104,8 +104,8 @@ func (c *sigctxt) pushCall(targetPC uintptr) {
*(*uint64)(unsafe.Pointer(uintptr(sp) + 8)) = c.r2()
*(*uint64)(unsafe.Pointer(uintptr(sp) + 16)) = c.r12()
// Set up PC and LR to pretend the function being signaled
// calls targetPC at the faulting PC.
c.set_link(c.pc())
// calls targetPC at resumePC.
c.set_link(uint64(resumePC))
c.set_r12(uint64(targetPC))
c.set_pc(uint64(targetPC))
}

View file

@ -78,7 +78,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
c.set_pc(uint64(funcPC(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC uintptr) {
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Push the LR to stack, as we'll clobber it in order to
// push the call. The function being pushed is responsible
// for restoring the LR and setting the SP back.
@ -87,7 +87,7 @@ func (c *sigctxt) pushCall(targetPC uintptr) {
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.ra()
// Set up PC and LR to pretend the function being signaled
// calls targetPC at the faulting PC.
c.set_ra(c.pc())
// calls targetPC at resumePC.
c.set_ra(uint64(resumePC))
c.set_pc(uint64(targetPC))
}

View file

@ -326,9 +326,11 @@ func sigpipe() {
func doSigPreempt(gp *g, ctxt *sigctxt) {
// Check if this G wants to be preempted and is safe to
// preempt.
if wantAsyncPreempt(gp) && isAsyncSafePoint(gp, ctxt.sigpc(), ctxt.sigsp(), ctxt.siglr()) {
// Inject a call to asyncPreempt.
ctxt.pushCall(funcPC(asyncPreempt))
if wantAsyncPreempt(gp) {
if ok, newpc := isAsyncSafePoint(gp, ctxt.sigpc(), ctxt.sigsp(), ctxt.siglr()); ok {
// Adjust the PC and inject a call to asyncPreempt.
ctxt.pushCall(funcPC(asyncPreempt), newpc)
}
}
// Acknowledge the preemption.

View file

@ -287,6 +287,18 @@ const (
// PCDATA_UnsafePoint values.
_PCDATA_UnsafePointSafe = -1 // Safe for async preemption
_PCDATA_UnsafePointUnsafe = -2 // Unsafe for async preemption
// _PCDATA_Restart1(2) apply on a sequence of instructions, within
// which if an async preemption happens, we should back off the PC
// to the start of the sequence when resume.
// We need two so we can distinguish the start/end of the sequence
// in case that two sequences are next to each other.
_PCDATA_Restart1 = -3
_PCDATA_Restart2 = -4
// Like _PCDATA_RestartAtEntry, but back to function entry if async
// preempted.
_PCDATA_RestartAtEntry = -5
)
// A FuncID identifies particular functions that need to be treated
@ -708,9 +720,11 @@ func pcvalueCacheKey(targetpc uintptr) uintptr {
return (targetpc / sys.PtrSize) % uintptr(len(pcvalueCache{}.entries))
}
func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 {
// Returns the PCData value, and the PC where this value starts.
// TODO: the start PC is returned only when cache is nil.
func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, strict bool) (int32, uintptr) {
if off == 0 {
return -1
return -1, 0
}
// Check the cache. This speeds up walks of deep stacks, which
@ -729,7 +743,7 @@ func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, stric
// fail in the first clause.
ent := &cache.entries[x][i]
if ent.off == off && ent.targetpc == targetpc {
return ent.val
return ent.val, 0
}
}
}
@ -739,11 +753,12 @@ func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, stric
print("runtime: no module data for ", hex(f.entry), "\n")
throw("no module data")
}
return -1
return -1, 0
}
datap := f.datap
p := datap.pclntable[off:]
pc := f.entry
prevpc := pc
val := int32(-1)
for {
var ok bool
@ -770,14 +785,15 @@ func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, stric
}
}
return val
return val, prevpc
}
prevpc = pc
}
// If there was a table, it should have covered all program counters.
// If not, something is wrong.
if panicking != 0 || !strict {
return -1
return -1, 0
}
print("runtime: invalid pc-encoded table f=", funcname(f), " pc=", hex(pc), " targetpc=", hex(targetpc), " tab=", p, "\n")
@ -795,7 +811,7 @@ func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, stric
}
throw("invalid runtime symbol table")
return -1
return -1, 0
}
func cfuncname(f funcInfo) *byte {
@ -833,9 +849,9 @@ func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int
if !f.valid() {
return "?", 0
}
fileno := int(pcvalue(f, f.pcfile, targetpc, nil, strict))
line = pcvalue(f, f.pcln, targetpc, nil, strict)
if fileno == -1 || line == -1 || fileno >= len(datap.filetab) {
fileno, _ := pcvalue(f, f.pcfile, targetpc, nil, strict)
line, _ = pcvalue(f, f.pcln, targetpc, nil, strict)
if fileno == -1 || line == -1 || int(fileno) >= len(datap.filetab) {
// print("looking for ", hex(targetpc), " in ", funcname(f), " got file=", fileno, " line=", lineno, "\n")
return "?", 0
}
@ -848,7 +864,7 @@ func funcline(f funcInfo, targetpc uintptr) (file string, line int32) {
}
func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 {
x := pcvalue(f, f.pcsp, targetpc, cache, true)
x, _ := pcvalue(f, f.pcsp, targetpc, cache, true)
if x&(sys.PtrSize-1) != 0 {
print("invalid spdelta ", funcname(f), " ", hex(f.entry), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n")
}
@ -882,14 +898,25 @@ func pcdatavalue(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache)
if table < 0 || table >= f.npcdata {
return -1
}
return pcvalue(f, pcdatastart(f, table), targetpc, cache, true)
r, _ := pcvalue(f, pcdatastart(f, table), targetpc, cache, true)
return r
}
func pcdatavalue1(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 {
if table < 0 || table >= f.npcdata {
return -1
}
return pcvalue(f, pcdatastart(f, table), targetpc, cache, strict)
r, _ := pcvalue(f, pcdatastart(f, table), targetpc, cache, strict)
return r
}
// Like pcdatavalue, but also return the start PC of this PCData value.
// It doesn't take a cache.
func pcdatavalue2(f funcInfo, table int32, targetpc uintptr) (int32, uintptr) {
if table < 0 || table >= f.npcdata {
return -1, 0
}
return pcvalue(f, pcdatastart(f, table), targetpc, nil, true)
}
func funcdata(f funcInfo, i uint8) unsafe.Pointer {

View file

@ -99,7 +99,7 @@ TEXT runtime·wasmTruncS(SB), NOSPLIT, $0-0
End
Get R0
F64Const $9223372036854775807.
F64Const $0x7ffffffffffffc00p0 // Maximum truncated representation of 0x7fffffffffffffff
F64Gt
If
I64Const $0x8000000000000000
@ -107,7 +107,7 @@ TEXT runtime·wasmTruncS(SB), NOSPLIT, $0-0
End
Get R0
F64Const $-9223372036854775808.
F64Const $-0x7ffffffffffffc00p0 // Minimum truncated representation of -0x8000000000000000
F64Lt
If
I64Const $0x8000000000000000
@ -128,7 +128,7 @@ TEXT runtime·wasmTruncU(SB), NOSPLIT, $0-0
End
Get R0
F64Const $18446744073709551615.
F64Const $0xfffffffffffff800p0 // Maximum truncated representation of 0xffffffffffffffff
F64Gt
If
I64Const $0x8000000000000000

View file

@ -33,6 +33,8 @@ helperfuncs_test.go:45: 5
helperfuncs_test.go:21: 6
helperfuncs_test.go:44: 7
helperfuncs_test.go:56: 8
helperfuncs_test.go:64: 9
helperfuncs_test.go:60: 10
`
lines := strings.Split(buf.String(), "\n")
durationRE := regexp.MustCompile(`\(.*\)$`)

View file

@ -54,6 +54,17 @@ func testHelper(t *T) {
// has no effect.
t.Helper()
t.Error("8")
// Check that right caller is reported for func passed to Cleanup when
// multiple cleanup functions have been registered.
t.Cleanup(func() {
t.Helper()
t.Error("10")
})
t.Cleanup(func() {
t.Helper()
t.Error("9")
})
}
func parallelTestHelper(t *T) {

View file

@ -338,15 +338,17 @@ const maxStackLen = 50
// common holds the elements common between T and B and
// captures common methods such as Errorf.
type common struct {
mu sync.RWMutex // guards this group of fields
output []byte // Output generated by test or benchmark.
w io.Writer // For flushToParent.
ran bool // Test or benchmark (or one of its subtests) was executed.
failed bool // Test or benchmark has failed.
skipped bool // Test of benchmark has been skipped.
done bool // Test is finished and all subtests have completed.
helpers map[string]struct{} // functions to be skipped when writing file/line info
cleanup func() // optional function to be called at the end of the test
mu sync.RWMutex // guards this group of fields
output []byte // Output generated by test or benchmark.
w io.Writer // For flushToParent.
ran bool // Test or benchmark (or one of its subtests) was executed.
failed bool // Test or benchmark has failed.
skipped bool // Test of benchmark has been skipped.
done bool // Test is finished and all subtests have completed.
helpers map[string]struct{} // functions to be skipped when writing file/line info
cleanup func() // optional function to be called at the end of the test
cleanupName string // Name of the cleanup function.
cleanupPc []uintptr // The stack trace at the point where Cleanup was called.
chatty bool // A copy of the chatty flag.
finished bool // Test function has completed.
@ -426,6 +428,10 @@ func (c *common) frameSkip(skip int) runtime.Frame {
var firstFrame, prevFrame, frame runtime.Frame
for more := true; more; prevFrame = frame {
frame, more = frames.Next()
if frame.Function == c.cleanupName {
frames = runtime.CallersFrames(c.cleanupPc)
continue
}
if firstFrame.PC == 0 {
firstFrame = frame
}
@ -789,12 +795,21 @@ func (c *common) Cleanup(f func()) {
c.mu.Lock()
defer c.mu.Unlock()
oldCleanup := c.cleanup
oldCleanupPc := c.cleanupPc
c.cleanup = func() {
if oldCleanup != nil {
defer oldCleanup()
defer func() {
c.cleanupPc = oldCleanupPc
oldCleanup()
}()
}
c.cleanupName = callerName(0)
f()
}
var pc [maxStackLen]uintptr
// Skip two extra frames to account for this function and runtime.Callers itself.
n := runtime.Callers(2, pc[:])
c.cleanupPc = pc[:n]
}
var tempDirReplacer struct {

View file

@ -0,0 +1,65 @@
// compile
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Make sure that literal value can be passed to struct
// blank field of array/struct type, see issue #38690.
package main
type A1 = [0]int
type A2 = [1]int
type S1 struct{}
type S2 struct {
x int
}
type S3 = struct{}
type S4 = struct{ x int }
type S struct {
x int
_ [0]int
_ [1]int
_ A1
_ A2
_ S1
_ S2
_ S3
_ S4
_ [1]S4
}
var s = S{1, [0]int{}, [1]int{1}, A1{}, A2{1}, S1{}, S2{1}, S3{}, S4{1}, [1]S4{}}
func main() {
f1()
mustPanic(f2)
mustPanic(f3)
}
func f1() {
_ = S{1, [0]int{}, [1]int{1}, A1{}, A2{1}, S1{}, S2{1}, S3{}, S4{1}, [1]S4{}}
}
func f2() {
_ = S{1, [0]int{}, [1]int{1}, A1{}, A2{1}, S1{}, S2{1}, S3{}, func() S4 { panic("") }(), [1]S4{}}
}
func f3() {
_ = S{1, [0]int{}, [1]int{1}, A1{}, A2{1}, S1{}, S2{1}, S3{}, S4{1}, func() [1]S4 { panic("") }()}
}
func mustPanic(f func()) {
defer func() {
if recover() == nil {
panic("expected panic, got nil")
}
}()
f()
}

View file

@ -0,0 +1,17 @@
// compile
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
var g *uint64
func main() {
var v uint64
g = &v
v &^= (1 << 31)
v |= 1 << 63
v &^= (1 << 63)
}