1
0
mirror of https://github.com/golang/go synced 2024-07-01 07:56:09 +00:00

cmd/compile: optimize map-clearing range idiom

replace map clears of the form:

        for k := range m {
                delete(m, k)
        }

(where m is map with key type that is reflexive for ==)
with a new runtime function that clears the maps backing
array with a memclr and reinitializes the hmap struct.

Map key types that for example contain floats are not
replaced by this optimization since NaN keys cannot
be deleted from maps using delete.

name                           old time/op  new time/op  delta
GoMapClear/Reflexive/1         92.2ns ± 1%  47.1ns ± 2%  -48.89%  (p=0.000 n=9+9)
GoMapClear/Reflexive/10         108ns ± 1%    48ns ± 2%  -55.68%  (p=0.000 n=10+10)
GoMapClear/Reflexive/100        303ns ± 2%   110ns ± 3%  -63.56%  (p=0.000 n=10+10)
GoMapClear/Reflexive/1000      3.58µs ± 3%  1.23µs ± 2%  -65.49%  (p=0.000 n=9+10)
GoMapClear/Reflexive/10000     28.2µs ± 3%  10.3µs ± 2%  -63.55%  (p=0.000 n=9+10)
GoMapClear/NonReflexive/1       121ns ± 2%   124ns ± 7%     ~     (p=0.097 n=10+10)
GoMapClear/NonReflexive/10      137ns ± 2%   139ns ± 3%   +1.53%  (p=0.033 n=10+10)
GoMapClear/NonReflexive/100     331ns ± 3%   334ns ± 2%     ~     (p=0.342 n=10+10)
GoMapClear/NonReflexive/1000   3.64µs ± 3%  3.64µs ± 2%     ~     (p=0.887 n=9+10)
GoMapClear/NonReflexive/10000  28.1µs ± 2%  28.4µs ± 3%     ~     (p=0.247 n=10+10)

Fixes #20138

Change-Id: I181332a8ef434a4f0d89659f492d8711db3f3213
Reviewed-on: https://go-review.googlesource.com/110055
Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
Martin Möhrmann 2018-04-27 21:58:59 +02:00
parent cd1976dbef
commit aee71dd70b
8 changed files with 437 additions and 110 deletions

View File

@ -100,59 +100,60 @@ var runtimeDecls = [...]struct {
{"mapdelete_fast64", funcTag, 73},
{"mapdelete_faststr", funcTag, 73},
{"mapiternext", funcTag, 74},
{"makechan64", funcTag, 76},
{"makechan", funcTag, 77},
{"chanrecv1", funcTag, 79},
{"chanrecv2", funcTag, 80},
{"chansend1", funcTag, 82},
{"mapclear", funcTag, 75},
{"makechan64", funcTag, 77},
{"makechan", funcTag, 78},
{"chanrecv1", funcTag, 80},
{"chanrecv2", funcTag, 81},
{"chansend1", funcTag, 83},
{"closechan", funcTag, 23},
{"writeBarrier", varTag, 84},
{"typedmemmove", funcTag, 85},
{"typedmemclr", funcTag, 86},
{"typedslicecopy", funcTag, 87},
{"selectnbsend", funcTag, 88},
{"selectnbrecv", funcTag, 89},
{"selectnbrecv2", funcTag, 91},
{"writeBarrier", varTag, 85},
{"typedmemmove", funcTag, 86},
{"typedmemclr", funcTag, 87},
{"typedslicecopy", funcTag, 88},
{"selectnbsend", funcTag, 89},
{"selectnbrecv", funcTag, 90},
{"selectnbrecv2", funcTag, 92},
{"selectsetpc", funcTag, 56},
{"selectgo", funcTag, 92},
{"selectgo", funcTag, 93},
{"block", funcTag, 5},
{"makeslice", funcTag, 94},
{"makeslice64", funcTag, 95},
{"growslice", funcTag, 96},
{"memmove", funcTag, 97},
{"memclrNoHeapPointers", funcTag, 98},
{"memclrHasPointers", funcTag, 98},
{"memequal", funcTag, 99},
{"memequal8", funcTag, 100},
{"memequal16", funcTag, 100},
{"memequal32", funcTag, 100},
{"memequal64", funcTag, 100},
{"memequal128", funcTag, 100},
{"int64div", funcTag, 101},
{"uint64div", funcTag, 102},
{"int64mod", funcTag, 101},
{"uint64mod", funcTag, 102},
{"float64toint64", funcTag, 103},
{"float64touint64", funcTag, 104},
{"float64touint32", funcTag, 105},
{"int64tofloat64", funcTag, 106},
{"uint64tofloat64", funcTag, 107},
{"uint32tofloat64", funcTag, 108},
{"complex128div", funcTag, 109},
{"racefuncenter", funcTag, 110},
{"makeslice", funcTag, 95},
{"makeslice64", funcTag, 96},
{"growslice", funcTag, 97},
{"memmove", funcTag, 98},
{"memclrNoHeapPointers", funcTag, 99},
{"memclrHasPointers", funcTag, 99},
{"memequal", funcTag, 100},
{"memequal8", funcTag, 101},
{"memequal16", funcTag, 101},
{"memequal32", funcTag, 101},
{"memequal64", funcTag, 101},
{"memequal128", funcTag, 101},
{"int64div", funcTag, 102},
{"uint64div", funcTag, 103},
{"int64mod", funcTag, 102},
{"uint64mod", funcTag, 103},
{"float64toint64", funcTag, 104},
{"float64touint64", funcTag, 105},
{"float64touint32", funcTag, 106},
{"int64tofloat64", funcTag, 107},
{"uint64tofloat64", funcTag, 108},
{"uint32tofloat64", funcTag, 109},
{"complex128div", funcTag, 110},
{"racefuncenter", funcTag, 111},
{"racefuncexit", funcTag, 5},
{"raceread", funcTag, 110},
{"racewrite", funcTag, 110},
{"racereadrange", funcTag, 111},
{"racewriterange", funcTag, 111},
{"msanread", funcTag, 111},
{"msanwrite", funcTag, 111},
{"raceread", funcTag, 111},
{"racewrite", funcTag, 111},
{"racereadrange", funcTag, 112},
{"racewriterange", funcTag, 112},
{"msanread", funcTag, 112},
{"msanwrite", funcTag, 112},
{"support_popcnt", varTag, 11},
{"support_sse41", varTag, 11},
}
func runtimeTypes() []*types.Type {
var typs [112]*types.Type
var typs [113]*types.Type
typs[0] = types.Bytetype
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[TANY]
@ -228,42 +229,43 @@ func runtimeTypes() []*types.Type {
typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[62]), anonfield(typs[3])}, nil)
typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[62]), anonfield(typs[2])}, nil)
typs[74] = functype(nil, []*Node{anonfield(typs[3])}, nil)
typs[75] = types.NewChan(typs[2], types.Cboth)
typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[75])})
typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[32])}, []*Node{anonfield(typs[75])})
typs[78] = types.NewChan(typs[2], types.Crecv)
typs[79] = functype(nil, []*Node{anonfield(typs[78]), anonfield(typs[3])}, nil)
typs[80] = functype(nil, []*Node{anonfield(typs[78]), anonfield(typs[3])}, []*Node{anonfield(typs[11])})
typs[81] = types.NewChan(typs[2], types.Csend)
typs[82] = functype(nil, []*Node{anonfield(typs[81]), anonfield(typs[3])}, nil)
typs[83] = types.NewArray(typs[0], 3)
typs[84] = tostruct([]*Node{namedfield("enabled", typs[11]), namedfield("pad", typs[83]), namedfield("needed", typs[11]), namedfield("cgo", typs[11]), namedfield("alignme", typs[17])})
typs[85] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
typs[86] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
typs[87] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[32])})
typs[88] = functype(nil, []*Node{anonfield(typs[81]), anonfield(typs[3])}, []*Node{anonfield(typs[11])})
typs[89] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[78])}, []*Node{anonfield(typs[11])})
typs[90] = types.NewPtr(typs[11])
typs[91] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[90]), anonfield(typs[78])}, []*Node{anonfield(typs[11])})
typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[32])}, []*Node{anonfield(typs[32]), anonfield(typs[11])})
typs[93] = types.NewSlice(typs[2])
typs[94] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[32]), anonfield(typs[32])}, []*Node{anonfield(typs[93])})
typs[95] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[93])})
typs[96] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[93]), anonfield(typs[32])}, []*Node{anonfield(typs[93])})
typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[47])}, nil)
typs[98] = functype(nil, []*Node{anonfield(typs[58]), anonfield(typs[47])}, nil)
typs[99] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[47])}, []*Node{anonfield(typs[11])})
typs[100] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[11])})
typs[101] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
typs[102] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[17])}, []*Node{anonfield(typs[17])})
typs[103] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[15])})
typs[104] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[17])})
typs[105] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[60])})
typs[106] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[13])})
typs[107] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[13])})
typs[108] = functype(nil, []*Node{anonfield(typs[60])}, []*Node{anonfield(typs[13])})
typs[109] = functype(nil, []*Node{anonfield(typs[19]), anonfield(typs[19])}, []*Node{anonfield(typs[19])})
typs[110] = functype(nil, []*Node{anonfield(typs[47])}, nil)
typs[111] = functype(nil, []*Node{anonfield(typs[47]), anonfield(typs[47])}, nil)
typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[62])}, nil)
typs[76] = types.NewChan(typs[2], types.Cboth)
typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[76])})
typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[32])}, []*Node{anonfield(typs[76])})
typs[79] = types.NewChan(typs[2], types.Crecv)
typs[80] = functype(nil, []*Node{anonfield(typs[79]), anonfield(typs[3])}, nil)
typs[81] = functype(nil, []*Node{anonfield(typs[79]), anonfield(typs[3])}, []*Node{anonfield(typs[11])})
typs[82] = types.NewChan(typs[2], types.Csend)
typs[83] = functype(nil, []*Node{anonfield(typs[82]), anonfield(typs[3])}, nil)
typs[84] = types.NewArray(typs[0], 3)
typs[85] = tostruct([]*Node{namedfield("enabled", typs[11]), namedfield("pad", typs[84]), namedfield("needed", typs[11]), namedfield("cgo", typs[11]), namedfield("alignme", typs[17])})
typs[86] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
typs[87] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
typs[88] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[32])})
typs[89] = functype(nil, []*Node{anonfield(typs[82]), anonfield(typs[3])}, []*Node{anonfield(typs[11])})
typs[90] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[79])}, []*Node{anonfield(typs[11])})
typs[91] = types.NewPtr(typs[11])
typs[92] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[91]), anonfield(typs[79])}, []*Node{anonfield(typs[11])})
typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[32])}, []*Node{anonfield(typs[32]), anonfield(typs[11])})
typs[94] = types.NewSlice(typs[2])
typs[95] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[32]), anonfield(typs[32])}, []*Node{anonfield(typs[94])})
typs[96] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[94])})
typs[97] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[94]), anonfield(typs[32])}, []*Node{anonfield(typs[94])})
typs[98] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[47])}, nil)
typs[99] = functype(nil, []*Node{anonfield(typs[58]), anonfield(typs[47])}, nil)
typs[100] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[47])}, []*Node{anonfield(typs[11])})
typs[101] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[11])})
typs[102] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
typs[103] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[17])}, []*Node{anonfield(typs[17])})
typs[104] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[15])})
typs[105] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[17])})
typs[106] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[60])})
typs[107] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[13])})
typs[108] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[13])})
typs[109] = functype(nil, []*Node{anonfield(typs[60])}, []*Node{anonfield(typs[13])})
typs[110] = functype(nil, []*Node{anonfield(typs[19]), anonfield(typs[19])}, []*Node{anonfield(typs[19])})
typs[111] = functype(nil, []*Node{anonfield(typs[47])}, nil)
typs[112] = functype(nil, []*Node{anonfield(typs[47]), anonfield(typs[47])}, nil)
return typs[:]
}

View File

@ -122,6 +122,7 @@ func mapdelete_fast32(mapType *byte, hmap map[any]any, key any)
func mapdelete_fast64(mapType *byte, hmap map[any]any, key any)
func mapdelete_faststr(mapType *byte, hmap map[any]any, key any)
func mapiternext(hiter *any)
func mapclear(mapType *byte, hmap map[any]any)
// *byte is really *runtime.Type
func makechan64(chanType *byte, size int64) (hchan chan any)

View File

@ -695,6 +695,8 @@ func (o *Order) stmt(n *Node) {
t := o.markTemp()
n.Right = o.expr(n.Right, nil)
orderBody := true
switch n.Type.Etype {
default:
Fatalf("orderstmt range %v", n.Type)
@ -721,6 +723,14 @@ func (o *Order) stmt(n *Node) {
n.Right = o.copyExpr(r, r.Type, false)
case TMAP:
if isMapClear(n) {
// Preserve the body of the map clear pattern so it can
// be detected during walk. The loop body will not be used
// when optimizing away the range loop to a runtime call.
orderBody = false
break
}
// copy the map value in case it is a map literal.
// TODO(rsc): Make tmp = literal expressions reuse tmp.
// For maps tmp is just one word so it hardly matters.
@ -732,7 +742,9 @@ func (o *Order) stmt(n *Node) {
prealloc[n] = o.newTemp(hiter(n.Type), true)
}
o.exprListInPlace(n.List)
orderBlock(&n.Nbody)
if orderBody {
orderBlock(&n.Nbody)
}
o.out = append(o.out, n)
o.cleanTemp(t)

View File

@ -154,6 +154,14 @@ func cheapComputableIndex(width int64) bool {
// Node n may also be modified in place, and may also be
// the returned node.
func walkrange(n *Node) *Node {
if isMapClear(n) {
m := n.Right
lno := setlineno(m)
n = mapClear(m)
lineno = lno
return n
}
// variable name conventions:
// ohv1, hv1, hv2: hidden (old) val 1, 2
// ha, hit: hidden aggregate, iterator
@ -449,6 +457,69 @@ func walkrange(n *Node) *Node {
return n
}
// isMapClear checks if n is of the form:
//
// for k := range m {
// delete(m, k)
// }
//
// where == for keys of map m is reflexive.
func isMapClear(n *Node) bool {
if Debug['N'] != 0 || instrumenting {
return false
}
if n.Op != ORANGE || n.Type.Etype != TMAP || n.List.Len() != 1 {
return false
}
k := n.List.First()
if k == nil || k.isBlank() {
return false
}
// Require k to be a new variable name.
if k.Name == nil || k.Name.Defn != n {
return false
}
if n.Nbody.Len() != 1 {
return false
}
stmt := n.Nbody.First() // only stmt in body
if stmt == nil || stmt.Op != ODELETE {
return false
}
m := n.Right
if !samesafeexpr(stmt.List.First(), m) || !samesafeexpr(stmt.List.Second(), k) {
return false
}
// Keys where equality is not reflexive can not be deleted from maps.
if !isreflexive(m.Type.Key()) {
return false
}
return true
}
// mapClear constructs a call to runtime.mapclear for the map m.
func mapClear(m *Node) *Node {
t := m.Type
// instantiate mapclear(typ *type, hmap map[any]any)
fn := syslook("mapclear")
fn = substArgTypes(fn, t.Key(), t.Elem())
n := mkcall1(fn, nil, nil, typename(t), m)
n = typecheck(n, Etop)
n = walkstmt(n)
return n
}
// Lower n into runtime·memclr if possible, for
// fast zeroing of slices and arrays (issue 5373).
// Look for instances of

View File

@ -318,7 +318,7 @@ func makemap(t *maptype, hint int, h *hmap) *hmap {
// If hint is large zeroing this memory could take a while.
if h.B != 0 {
var nextOverflow *bmap
h.buckets, nextOverflow = makeBucketArray(t, h.B)
h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
if nextOverflow != nil {
h.extra = new(mapextra)
h.extra.nextOverflow = nextOverflow
@ -328,6 +328,57 @@ func makemap(t *maptype, hint int, h *hmap) *hmap {
return h
}
// makeBucketArray initializes a backing array for map buckets.
// 1<<b is the minimum number of buckets to allocate.
// dirtyalloc should either be nil or a bucket array previously
// allocated by makeBucketArray with the same t and b parameters.
// If dirtyalloc is nil a new backing array will be alloced and
// otherwise dirtyalloc will be cleared and reused as backing array.
func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
base := bucketShift(b)
nbuckets := base
// For small b, overflow buckets are unlikely.
// Avoid the overhead of the calculation.
if b >= 4 {
// Add on the estimated number of overflow buckets
// required to insert the median number of elements
// used with this value of b.
nbuckets += bucketShift(b - 4)
sz := t.bucket.size * nbuckets
up := roundupsize(sz)
if up != sz {
nbuckets = up / t.bucket.size
}
}
if dirtyalloc == nil {
buckets = newarray(t.bucket, int(nbuckets))
} else {
// dirtyalloc was previously generated by
// the above newarray(t.bucket, int(nbuckets))
// but may not be empty.
buckets = dirtyalloc
size := t.bucket.size * nbuckets
if t.bucket.kind&kindNoPointers == 0 {
memclrHasPointers(buckets, size)
} else {
memclrNoHeapPointers(buckets, size)
}
}
if base != nbuckets {
// We preallocated some overflow buckets.
// To keep the overhead of tracking these overflow buckets to a minimum,
// we use the convention that if a preallocated overflow bucket's overflow
// pointer is nil, then there are more available by bumping the pointer.
// We need a safe non-nil pointer for the last overflow bucket; just use buckets.
nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
last.setoverflow(t, (*bmap)(buckets))
}
return buckets, nextOverflow
}
// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
// it will return a reference to the zero object for the value type if
// the key is not in the map.
@ -855,34 +906,49 @@ next:
goto next
}
func makeBucketArray(t *maptype, b uint8) (buckets unsafe.Pointer, nextOverflow *bmap) {
base := bucketShift(b)
nbuckets := base
// For small b, overflow buckets are unlikely.
// Avoid the overhead of the calculation.
if b >= 4 {
// Add on the estimated number of overflow buckets
// required to insert the median number of elements
// used with this value of b.
nbuckets += bucketShift(b - 4)
sz := t.bucket.size * nbuckets
up := roundupsize(sz)
if up != sz {
nbuckets = up / t.bucket.size
}
// mapclear deletes all keys from a map.
func mapclear(t *maptype, h *hmap) {
if raceenabled && h != nil {
callerpc := getcallerpc()
pc := funcPC(mapclear)
racewritepc(unsafe.Pointer(h), callerpc, pc)
}
buckets = newarray(t.bucket, int(nbuckets))
if base != nbuckets {
// We preallocated some overflow buckets.
// To keep the overhead of tracking these overflow buckets to a minimum,
// we use the convention that if a preallocated overflow bucket's overflow
// pointer is nil, then there are more available by bumping the pointer.
// We need a safe non-nil pointer for the last overflow bucket; just use buckets.
nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
last.setoverflow(t, (*bmap)(buckets))
if h == nil || h.count == 0 {
return
}
return buckets, nextOverflow
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
h.flags |= hashWriting
h.flags &^= sameSizeGrow
h.oldbuckets = nil
h.nevacuate = 0
h.noverflow = 0
h.count = 0
// Keep the mapextra allocation but clear any extra information.
if h.extra != nil {
*h.extra = mapextra{}
}
// makeBucketArray clears the memory pointed to by h.buckets
// and recovers any overflow buckets by generating them
// as if h.buckets was newly alloced.
_, nextOverflow := makeBucketArray(t, h.B, h.buckets)
if nextOverflow != nil {
// If overflow buckets are created then h.extra
// will have been allocated during initial bucket creation.
h.extra.nextOverflow = nextOverflow
}
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
}
func hashGrow(t *maptype, h *hmap) {
@ -895,7 +961,7 @@ func hashGrow(t *maptype, h *hmap) {
h.flags |= sameSizeGrow
}
oldbuckets := h.buckets
newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger)
newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
flags := h.flags &^ (iterator | oldIterator)
if h.flags&iterator != 0 {

View File

@ -341,3 +341,32 @@ func BenchmarkComplexAlgMap(b *testing.B) {
_ = m[k]
}
}
func BenchmarkGoMapClear(b *testing.B) {
b.Run("Reflexive", func(b *testing.B) {
for size := 1; size < 100000; size *= 10 {
b.Run(strconv.Itoa(size), func(b *testing.B) {
m := make(map[int]int, size)
for i := 0; i < b.N; i++ {
m[0] = size // Add one element so len(m) != 0 avoiding fast paths.
for k := range m {
delete(m, k)
}
}
})
}
})
b.Run("NonReflexive", func(b *testing.B) {
for size := 1; size < 100000; size *= 10 {
b.Run(strconv.Itoa(size), func(b *testing.B) {
m := make(map[float64]int, size)
for i := 0; i < b.N; i++ {
m[1.0] = size // Add one element so len(m) != 0 avoiding fast paths.
for k := range m {
delete(m, k)
}
}
})
}
})
}

View File

@ -36,3 +36,60 @@ func AccessString2(m map[string]int) bool {
_, ok := m["abc"]
return ok
}
// ------------------- //
// Map Clear //
// ------------------- //
// Optimization of map clear idiom (Issue #20138).
func MapClearReflexive(m map[int]int) {
// amd64:`.*runtime\.mapclear`
// amd64:-`.*runtime\.mapiterinit`
for k := range m {
delete(m, k)
}
}
func MapClearIndirect(m map[int]int) {
s := struct{ m map[int]int }{m: m}
// amd64:`.*runtime\.mapclear`
// amd64:-`.*runtime\.mapiterinit`
for k := range s.m {
delete(s.m, k)
}
}
func MapClearPointer(m map[*byte]int) {
// amd64:`.*runtime\.mapclear`
// amd64:-`.*runtime\.mapiterinit`
for k := range m {
delete(m, k)
}
}
func MapClearNotReflexive(m map[float64]int) {
// amd64:`.*runtime\.mapiterinit`
// amd64:-`.*runtime\.mapclear`
for k := range m {
delete(m, k)
}
}
func MapClearInterface(m map[interface{}]int) {
// amd64:`.*runtime\.mapiterinit`
// amd64:-`.*runtime\.mapclear`
for k := range m {
delete(m, k)
}
}
func MapClearSideEffect(m map[int]int) int {
k := 0
// amd64:`.*runtime\.mapiterinit`
// amd64:-`.*runtime\.mapclear`
for k = range m {
delete(m, k)
}
return k
}

89
test/mapclear.go Normal file
View File

@ -0,0 +1,89 @@
// run
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Ensure that range loops over maps with delete statements
// have the requisite side-effects.
package main
import (
"fmt"
"os"
)
func checkcleared() {
m := make(map[byte]int)
m[1] = 1
m[2] = 2
for k := range m {
delete(m, k)
}
l := len(m)
if want := 0; l != want {
fmt.Printf("len after map clear = %d want %d\n", l, want)
os.Exit(1)
}
m[0] = 0 // To have non empty map and avoid internal map code fast paths.
n := 0
for range m {
n++
}
if want := 1; n != want {
fmt.Printf("number of keys found = %d want %d\n", n, want)
os.Exit(1)
}
}
func checkloopvars() {
k := 0
m := make(map[int]int)
m[42] = 0
for k = range m {
delete(m, k)
}
if want := 42; k != want {
fmt.Printf("var after range with side-effect = %d want %d\n", k, want)
os.Exit(1)
}
}
func checksideeffects() {
var x int
f := func() int {
x++
return 0
}
m := make(map[int]int)
m[0] = 0
m[1] = 1
for k := range m {
delete(m, k+f())
}
if want := 2; x != want {
fmt.Printf("var after range with side-effect = %d want %d\n", x, want)
os.Exit(1)
}
var n int
m = make(map[int]int)
m[0] = 0
m[1] = 1
for k := range m {
delete(m, k)
n++
}
if want := 2; n != want {
fmt.Printf("counter for range with side-effect = %d want %d\n", n, want)
os.Exit(1)
}
}
func main() {
checkcleared()
checkloopvars()
checksideeffects()
}