runtime: allocate from free and scav fairly

This change modifies the behavior of span allocations to no longer
prefer the free treap over the scavenged treap.

While there is an additional cost to allocating out of the scavenged
treap, the current behavior of preferring the unscavenged spans can
lead to unbounded growth of a program's virtual memory footprint.

In small programs (low # of Ps, low resident set size, low allocation
rate) this behavior isn't really apparent and is difficult to
reproduce.

However, in relatively large, long-running programs we see this
unbounded growth in free spans, and an unbounded amount of heap
growths.

It still remains unclear how this policy change actually ends up
increasing the number of heap growths over time, but switching the
policy back to best-fit does indeed solve the problem.

Change-Id: Ibb88d24f9ef6766baaa7f12b411974cc03341e7b
Reviewed-on: https://go-review.googlesource.com/c/148979
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Michael Anthony Knyszek 2018-11-08 20:06:58 +00:00 committed by Michael Knyszek
parent 3651476075
commit 064842450b
2 changed files with 31 additions and 23 deletions

View file

@ -297,14 +297,13 @@ func (root *mTreap) removeNode(t *treapNode) {
mheap_.treapalloc.free(unsafe.Pointer(t))
}
// remove searches for, finds, removes from the treap, and returns the smallest
// span that can hold npages. If no span has at least npages return nil.
// find searches for, finds, and returns the treap node containing the
// smallest span that can hold npages. If no span has at least npages
// it returns nil.
// This is slightly more complicated than a simple binary tree search
// since if an exact match is not found the next larger node is
// returned.
// If the last node inspected > npagesKey not holding
// a left node (a smaller npages) is the "best fit" node.
func (root *mTreap) remove(npages uintptr) *mspan {
func (root *mTreap) find(npages uintptr) *treapNode {
t := root.treap
for t != nil {
if t.spanKey == nil {
@ -315,9 +314,7 @@ func (root *mTreap) remove(npages uintptr) *mspan {
} else if t.left != nil && t.left.npagesKey >= npages {
t = t.left
} else {
result := t.spanKey
root.removeNode(t)
return result
return t
}
}
return nil

View file

@ -998,19 +998,34 @@ func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
}
}
// pickFreeSpan acquires a free span from internal free list
// structures if one is available. Otherwise returns nil.
// h must be locked.
func (h *mheap) pickFreeSpan(npage uintptr) *mspan {
tf := h.free.find(npage)
ts := h.scav.find(npage)
// Check for whichever treap gave us the smaller, non-nil result.
// Note that we want the _smaller_ free span, i.e. the free span
// closer in size to the amount we requested (npage).
var s *mspan
if tf != nil && (ts == nil || tf.spanKey.npages <= ts.spanKey.npages) {
s = tf.spanKey
h.free.removeNode(tf)
} else if ts != nil && (tf == nil || tf.spanKey.npages > ts.spanKey.npages) {
s = ts.spanKey
h.scav.removeNode(ts)
}
return s
}
// Allocates a span of the given size. h must be locked.
// The returned span has been removed from the
// free structures, but its state is still mSpanFree.
func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan {
var s *mspan
// First, attempt to allocate from free spans, then from
// scavenged spans, looking for best fit in each.
s = h.free.remove(npage)
if s != nil {
goto HaveSpan
}
s = h.scav.remove(npage)
s = h.pickFreeSpan(npage)
if s != nil {
goto HaveSpan
}
@ -1018,23 +1033,19 @@ func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan {
if !h.grow(npage) {
return nil
}
s = h.free.remove(npage)
s = h.pickFreeSpan(npage)
if s != nil {
goto HaveSpan
}
s = h.scav.remove(npage)
if s != nil {
goto HaveSpan
}
return nil
throw("grew heap, but no adequate free span found")
HaveSpan:
// Mark span in use.
if s.state != mSpanFree {
throw("mheap.allocLocked - mspan not free")
throw("candidate mspan for allocation is not free")
}
if s.npages < npage {
throw("mheap.allocLocked - bad npages")
throw("candidate mspan for allocation is too small")
}
// First, subtract any memory that was released back to