1
0
mirror of https://github.com/golang/go synced 2024-07-05 09:50:19 +00:00

runtime: add mapassign_fast*

Add benchmarks for map assignment with int32/int64/string key

Benchmark results on darwin/amd64

name                  old time/op  new time/op  delta
MapAssignInt32_255-8  24.7ns ± 3%  17.4ns ± 2%  -29.75%  (p=0.000 n=10+10)
MapAssignInt32_64k-8  45.5ns ± 4%  37.6ns ± 4%  -17.18%  (p=0.000 n=10+10)
MapAssignInt64_255-8  26.0ns ± 3%  17.9ns ± 4%  -31.03%  (p=0.000 n=10+10)
MapAssignInt64_64k-8  46.9ns ± 5%  38.7ns ± 2%  -17.53%  (p=0.000 n=9+10)
MapAssignStr_255-8    47.8ns ± 3%  24.8ns ± 4%  -48.01%  (p=0.000 n=10+10)
MapAssignStr_64k-8    83.0ns ± 3%  51.9ns ± 3%  -37.45%  (p=0.000 n=10+9)

name                     old time/op    new time/op    delta
BinaryTree17-8              3.11s ±19%     2.78s ± 3%    ~     (p=0.095 n=5+5)
Fannkuch11-8                3.26s ± 1%     3.21s ± 2%    ~     (p=0.056 n=5+5)
FmtFprintfEmpty-8          50.3ns ± 1%    50.8ns ± 2%    ~     (p=0.246 n=5+5)
FmtFprintfString-8         82.7ns ± 4%    80.1ns ± 5%    ~     (p=0.238 n=5+5)
FmtFprintfInt-8            82.6ns ± 2%    81.9ns ± 3%    ~     (p=0.508 n=5+5)
FmtFprintfIntInt-8          124ns ± 4%     121ns ± 3%    ~     (p=0.111 n=5+5)
FmtFprintfPrefixedInt-8     158ns ± 6%     160ns ± 2%    ~     (p=0.341 n=5+5)
FmtFprintfFloat-8           249ns ± 2%     245ns ± 2%    ~     (p=0.095 n=5+5)
FmtManyArgs-8               513ns ± 2%     519ns ± 3%    ~     (p=0.151 n=5+5)
GobDecode-8                7.48ms ±12%    7.11ms ± 2%    ~     (p=0.222 n=5+5)
GobEncode-8                6.25ms ± 1%    6.03ms ± 2%  -3.56%  (p=0.008 n=5+5)
Gzip-8                      252ms ± 4%     252ms ± 4%    ~     (p=1.000 n=5+5)
Gunzip-8                   38.4ms ± 3%    38.6ms ± 2%    ~     (p=0.690 n=5+5)
HTTPClientServer-8         76.9µs ±41%    66.4µs ± 6%    ~     (p=0.310 n=5+5)
JSONEncode-8               16.5ms ± 3%    16.7ms ± 3%    ~     (p=0.421 n=5+5)
JSONDecode-8               54.6ms ± 1%    54.3ms ± 2%    ~     (p=0.548 n=5+5)
Mandelbrot200-8            4.45ms ± 3%    4.47ms ± 1%    ~     (p=0.841 n=5+5)
GoParse-8                  3.43ms ± 1%    3.32ms ± 2%  -3.28%  (p=0.008 n=5+5)
RegexpMatchEasy0_32-8      88.2ns ± 3%    89.4ns ± 2%    ~     (p=0.333 n=5+5)
RegexpMatchEasy0_1K-8       205ns ± 1%     206ns ± 1%    ~     (p=0.905 n=5+5)
RegexpMatchEasy1_32-8      85.1ns ± 1%    85.5ns ± 5%    ~     (p=0.690 n=5+5)
RegexpMatchEasy1_1K-8       365ns ± 1%     371ns ± 9%    ~     (p=1.000 n=5+5)
RegexpMatchMedium_32-8      129ns ± 2%     128ns ± 3%    ~     (p=0.730 n=5+5)
RegexpMatchMedium_1K-8     39.8µs ± 0%    39.7µs ± 4%    ~     (p=0.730 n=4+5)
RegexpMatchHard_32-8       1.99µs ± 3%    2.05µs ±16%    ~     (p=0.794 n=5+5)
RegexpMatchHard_1K-8       59.3µs ± 1%    60.3µs ± 7%    ~     (p=1.000 n=5+5)
Revcomp-8                   1.36s ±63%     0.52s ± 5%    ~     (p=0.095 n=5+5)
Template-8                 62.6ms ±14%    60.5ms ± 5%    ~     (p=0.690 n=5+5)
TimeParse-8                 330ns ± 2%     324ns ± 2%    ~     (p=0.087 n=5+5)
TimeFormat-8                350ns ± 3%     340ns ± 1%  -2.86%  (p=0.008 n=5+5)

name                     old speed      new speed      delta
GobDecode-8               103MB/s ±11%   108MB/s ± 2%    ~     (p=0.222 n=5+5)
GobEncode-8               123MB/s ± 1%   127MB/s ± 2%  +3.71%  (p=0.008 n=5+5)
Gzip-8                   77.1MB/s ± 4%  76.9MB/s ± 3%    ~     (p=1.000 n=5+5)
Gunzip-8                  505MB/s ± 3%   503MB/s ± 2%    ~     (p=0.690 n=5+5)
JSONEncode-8              118MB/s ± 3%   116MB/s ± 3%    ~     (p=0.421 n=5+5)
JSONDecode-8             35.5MB/s ± 1%  35.8MB/s ± 2%    ~     (p=0.397 n=5+5)
GoParse-8                16.9MB/s ± 1%  17.4MB/s ± 2%  +3.45%  (p=0.008 n=5+5)
RegexpMatchEasy0_32-8     363MB/s ± 3%   358MB/s ± 2%    ~     (p=0.421 n=5+5)
RegexpMatchEasy0_1K-8    4.98GB/s ± 1%  4.97GB/s ± 1%    ~     (p=0.548 n=5+5)
RegexpMatchEasy1_32-8     376MB/s ± 1%   375MB/s ± 5%    ~     (p=0.690 n=5+5)
RegexpMatchEasy1_1K-8    2.80GB/s ± 1%  2.76GB/s ± 9%    ~     (p=0.841 n=5+5)
RegexpMatchMedium_32-8   7.73MB/s ± 1%  7.76MB/s ± 3%    ~     (p=0.730 n=5+5)
RegexpMatchMedium_1K-8   25.8MB/s ± 0%  25.8MB/s ± 4%    ~     (p=0.651 n=4+5)
RegexpMatchHard_32-8     16.1MB/s ± 3%  15.7MB/s ±14%    ~     (p=0.794 n=5+5)
RegexpMatchHard_1K-8     17.3MB/s ± 1%  17.0MB/s ± 7%    ~     (p=0.984 n=5+5)
Revcomp-8                 273MB/s ±83%   488MB/s ± 5%    ~     (p=0.095 n=5+5)
Template-8               31.1MB/s ±13%  32.1MB/s ± 5%    ~     (p=0.690 n=5+5)

Updates #19495

Change-Id: I116e9a2a4594769318b22d736464de8a98499909
Reviewed-on: https://go-review.googlesource.com/38091
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
Reviewed-by: Keith Randall <khr@golang.org>
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
This commit is contained in:
Hugues Bruant 2017-03-12 14:47:59 -07:00 committed by Josh Bleecher Snyder
parent 53937aad99
commit ec091b6af2
7 changed files with 366 additions and 25 deletions

View File

@ -83,6 +83,9 @@ var runtimeDecls = [...]struct {
{"mapaccess2_faststr", funcTag, 64},
{"mapaccess2_fat", funcTag, 65},
{"mapassign", funcTag, 60},
{"mapassign_fast32", funcTag, 61},
{"mapassign_fast64", funcTag, 61},
{"mapassign_faststr", funcTag, 61},
{"mapiterinit", funcTag, 66},
{"mapdelete", funcTag, 66},
{"mapiternext", funcTag, 67},

View File

@ -103,6 +103,9 @@ func mapaccess2_fast64(mapType *byte, hmap map[any]any, key any) (val *any, pres
func mapaccess2_faststr(mapType *byte, hmap map[any]any, key any) (val *any, pres bool)
func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool)
func mapassign(mapType *byte, hmap map[any]any, key *any) (val *any)
func mapassign_fast32(mapType *byte, hmap map[any]any, key any) (val *any)
func mapassign_fast64(mapType *byte, hmap map[any]any, key any) (val *any)
func mapassign_faststr(mapType *byte, hmap map[any]any, key any) (val *any)
func mapiterinit(mapType *byte, hmap map[any]any, hiter *any)
func mapdelete(mapType *byte, hmap map[any]any, key *any)
func mapiternext(hiter *any)

View File

@ -206,13 +206,20 @@ func orderaddrtemp(n *Node, order *Order) *Node {
return ordercopyexpr(n, n.Type, order, 0)
}
// ordermapkeytemp prepares n.Right to be a key in a map lookup.
// ordermapkeytemp prepares n.Right to be a key in a map runtime call.
func ordermapkeytemp(n *Node, order *Order) {
// Most map calls need to take the address of the key.
// Exception: mapaccessN_fast* calls. See golang.org/issue/19015.
p, _ := mapaccessfast(n.Left.Type)
fastaccess := p != "" && n.Etype == 0 // Etype == 0 iff n is an rvalue
if fastaccess {
// Exception: map(accessN|assign)_fast* calls. See golang.org/issue/19015.
var p string
switch n.Etype {
case 0: // n is an rvalue
p, _ = mapaccessfast(n.Left.Type)
case 1: // n is an lvalue
p = mapassignfast(n.Left.Type)
default:
Fatalf("unexpected node type: %+v", n)
}
if p != "" {
return
}
n.Right = orderaddrtemp(n.Right, order)

View File

@ -1184,9 +1184,14 @@ opswitch:
t := map_.Type
if n.Etype == 1 {
// This m[k] expression is on the left-hand side of an assignment.
// orderexpr made sure key is addressable.
key = nod(OADDR, key, nil)
n = mkcall1(mapfn("mapassign", t), nil, init, typename(t), map_, key)
p := mapassignfast(t)
if p == "" {
// standard version takes key by reference.
// orderexpr made sure key is addressable.
key = nod(OADDR, key, nil)
p = "mapassign"
}
n = mkcall1(mapfn(p, t), nil, init, typename(t), map_, key)
} else {
// m[k] is not the target of an assignment.
p, _ := mapaccessfast(t)
@ -2628,7 +2633,7 @@ func mapfndel(name string, t *Type) *Node {
return fn
}
// mapaccessfast returns the names of the fast map access runtime routines for t.
// mapaccessfast returns the name of the fast map access runtime routine for t.
func mapaccessfast(t *Type) (access1, access2 string) {
// Check ../../runtime/hashmap.go:maxValueSize before changing.
if t.Val().Width > 128 {
@ -2645,6 +2650,23 @@ func mapaccessfast(t *Type) (access1, access2 string) {
return "", ""
}
// mapassignfast returns the name of the fast map assign runtime routine for t.
func mapassignfast(t *Type) (assign string) {
// Check ../../runtime/hashmap.go:maxValueSize before changing.
if t.Val().Width > 128 {
return ""
}
switch algtype(t.Key()) {
case AMEM32:
return "mapassign_fast32"
case AMEM64:
return "mapassign_fast64"
case ASTRING:
return "mapassign_faststr"
}
return ""
}
func writebarrierfn(name string, l *Type, r *Type) *Node {
fn := syslook(name)
fn = substArgTypes(fn, l, r)

View File

@ -45,7 +45,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if k != key {
continue
}
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@ -94,7 +94,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
if k != key {
continue
}
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@ -143,7 +143,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if k != key {
continue
}
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@ -192,7 +192,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
if k != key {
continue
}
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@ -223,7 +223,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if key.len < 32 {
// short key, doing lots of comparisons is ok
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@ -240,7 +240,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@ -295,7 +295,7 @@ dohash:
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x != top {
continue
}
@ -332,7 +332,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if key.len < 32 {
// short key, doing lots of comparisons is ok
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@ -349,7 +349,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@ -402,7 +402,7 @@ dohash:
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x != top {
continue
}
@ -420,3 +420,275 @@ dohash:
}
}
}
func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&t))
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign.
h.flags |= hashWriting
if h.buckets == nil {
h.buckets = newarray(t.bucket, 1)
}
again:
bucket := hash & (uintptr(1)<<h.B - 1)
if h.growing() {
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
var inserti *uint8
var insertk unsafe.Pointer
var val unsafe.Pointer
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
if b.tophash[i] == empty && inserti == nil {
inserti = &b.tophash[i]
insertk = add(unsafe.Pointer(b), dataOffset+i*4)
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
}
continue
}
k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
if k != key {
continue
}
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
goto done
}
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}
// Did not find mapping for key. Allocate new cell & add entry.
// If we hit the max load factor or we have too many overflow buckets,
// and we're not already in the middle of growing, start growing.
if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}
if inserti == nil {
// all current buckets are full, allocate a new one.
newb := (*bmap)(newobject(t.bucket))
h.setoverflow(t, b, newb)
inserti = &newb.tophash[0]
insertk = add(unsafe.Pointer(newb), dataOffset)
val = add(insertk, bucketCnt*4)
}
// store new key/value at insert position
*((*uint32)(insertk)) = key
*inserti = top
h.count++
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
return val
}
func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&t))
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign.
h.flags |= hashWriting
if h.buckets == nil {
h.buckets = newarray(t.bucket, 1)
}
again:
bucket := hash & (uintptr(1)<<h.B - 1)
if h.growing() {
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
var inserti *uint8
var insertk unsafe.Pointer
var val unsafe.Pointer
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
if b.tophash[i] == empty && inserti == nil {
inserti = &b.tophash[i]
insertk = add(unsafe.Pointer(b), dataOffset+i*8)
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
}
continue
}
k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
if k != key {
continue
}
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
goto done
}
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}
// Did not find mapping for key. Allocate new cell & add entry.
// If we hit the max load factor or we have too many overflow buckets,
// and we're not already in the middle of growing, start growing.
if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}
if inserti == nil {
// all current buckets are full, allocate a new one.
newb := (*bmap)(newobject(t.bucket))
h.setoverflow(t, b, newb)
inserti = &newb.tophash[0]
insertk = add(unsafe.Pointer(newb), dataOffset)
val = add(insertk, bucketCnt*8)
}
// store new key/value at insert position
*((*uint64)(insertk)) = key
*inserti = top
h.count++
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
return val
}
func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&t))
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
key := stringStructOf(&ky)
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign.
h.flags |= hashWriting
if h.buckets == nil {
h.buckets = newarray(t.bucket, 1)
}
again:
bucket := hash & (uintptr(1)<<h.B - 1)
if h.growing() {
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
var inserti *uint8
var insertk unsafe.Pointer
var val unsafe.Pointer
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
if b.tophash[i] == empty && inserti == nil {
inserti = &b.tophash[i]
insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
}
continue
}
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
continue
}
// already have a mapping for key. Update it.
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
goto done
}
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}
// Did not find mapping for key. Allocate new cell & add entry.
// If we hit the max load factor or we have too many overflow buckets,
// and we're not already in the middle of growing, start growing.
if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}
if inserti == nil {
// all current buckets are full, allocate a new one.
newb := (*bmap)(newobject(t.bucket))
h.setoverflow(t, b, newb)
inserti = &newb.tophash[0]
insertk = add(unsafe.Pointer(newb), dataOffset)
val = add(insertk, bucketCnt*2*sys.PtrSize)
}
// store new key/value at insert position
*((*stringStruct)(insertk)) = *key
*inserti = top
h.count++
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
return val
}

View File

@ -10,6 +10,7 @@ import (
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"testing"
@ -617,3 +618,36 @@ func TestNonEscapingMap(t *testing.T) {
t.Fatalf("want 0 allocs, got %v", n)
}
}
func benchmarkMapAssignInt32(b *testing.B, pow uint) {
a := make(map[int32]int)
for i := 0; i < b.N; i++ {
a[int32(i&((1<<pow)-1))] = i
}
}
func BenchmarkMapAssignInt32_255(b *testing.B) { benchmarkMapAssignInt32(b, 8) }
func BenchmarkMapAssignInt32_64k(b *testing.B) { benchmarkMapAssignInt32(b, 16) }
func benchmarkMapAssignInt64(b *testing.B, pow uint) {
a := make(map[int64]int)
for i := 0; i < b.N; i++ {
a[int64(i&((1<<pow)-1))] = i
}
}
func BenchmarkMapAssignInt64_255(b *testing.B) { benchmarkMapAssignInt64(b, 8) }
func BenchmarkMapAssignInt64_64k(b *testing.B) { benchmarkMapAssignInt64(b, 16) }
func benchmarkMapAssignStr(b *testing.B, pow uint) {
k := make([]string, (1 << pow))
for i := 0; i < len(k); i++ {
k[i] = strconv.Itoa(i)
}
b.ResetTimer()
a := make(map[string]int)
for i := 0; i < b.N; i++ {
a[k[i&((1<<pow)-1)]] = i
}
}
func BenchmarkMapAssignStr_255(b *testing.B) { benchmarkMapAssignStr(b, 8) }
func BenchmarkMapAssignStr_64k(b *testing.B) { benchmarkMapAssignStr(b, 16) }

View File

@ -283,19 +283,19 @@ func f17a(p *byte) { // ERROR "live at entry to f17a: p$"
func f17b(p *byte) { // ERROR "live at entry to f17b: p$"
// key temporary
if b {
m2s[str()] = p // ERROR "live at call to mapassign: p .autotmp_[0-9]+$" "live at call to str: p$"
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
}
m2s[str()] = p // ERROR "live at call to mapassign: p .autotmp_[0-9]+$" "live at call to str: p$"
m2s[str()] = p // ERROR "live at call to mapassign: p .autotmp_[0-9]+$" "live at call to str: p$"
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
}
func f17c() {
// key and value temporaries
if b {
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign: .autotmp_[0-9]+ .autotmp_[0-9]+$"
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
}
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign: .autotmp_[0-9]+ .autotmp_[0-9]+$"
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign: .autotmp_[0-9]+ .autotmp_[0-9]+$"
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
}
func f17d() *byte