runtime: refactor findrunnable spinning recheck

Break the main components of the findrunnable spinning -> non-spinning
recheck out into their own functions, which simplifies both findrunnable
and the new functions, which can make use of fancy features like early
returns.

This CL should have no functional changes.

For #43997
For #44313

Change-Id: I6d3060fcecda9920a3471ff338f73d53b1d848a3
Reviewed-on: https://go-review.googlesource.com/c/go/+/307914
Trust: Michael Pratt <mpratt@google.com>
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
Michael Pratt 2021-04-07 09:58:18 -04:00
parent 7473a6a0eb
commit dbade774c3

View file

@ -2799,86 +2799,42 @@ top:
}
}
// check all runqueues once again
for id, _p_ := range allpSnapshot {
if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(_p_) {
lock(&sched.lock)
_p_ = pidleget()
unlock(&sched.lock)
if _p_ != nil {
acquirep(_p_)
if wasSpinning {
_g_.m.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
}
goto top
}
break
// Check all runqueues once again.
_p_ = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
if _p_ != nil {
acquirep(_p_)
if wasSpinning {
_g_.m.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
}
goto top
}
// Similar to above, check for timer creation or expiry concurrently with
// transitioning from spinning to non-spinning. Note that we cannot use
// checkTimers here because it calls adjusttimers which may need to allocate
// memory, and that isn't allowed when we don't have an active P.
for id, _p_ := range allpSnapshot {
if timerpMaskSnapshot.read(uint32(id)) {
w := nobarrierWakeTime(_p_)
if w != 0 && (pollUntil == 0 || w < pollUntil) {
pollUntil = w
}
pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
// Finally, check for idle-priority GC work.
_p_, gp = checkIdleGCNoP()
if _p_ != nil {
acquirep(_p_)
if wasSpinning {
_g_.m.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
}
// Run the idle worker.
_p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.enabled {
traceGoUnpark(gp, 0)
}
return gp, false
}
// Check for idle-priority GC work again.
//
// N.B. Since we have no P, gcBlackenEnabled may change at any time; we
// must check again after acquiring a P.
if atomic.Load(&gcBlackenEnabled) != 0 && gcMarkWorkAvailable(nil) {
// Work is available; we can start an idle GC worker only if
// there is an available P and available worker G.
//
// We can attempt to acquire these in either order. Workers are
// almost always available (see comment in findRunnableGCWorker
// for the one case there may be none). Since we're slightly
// less likely to find a P, check for that first.
lock(&sched.lock)
var node *gcBgMarkWorkerNode
_p_ = pidleget()
if _p_ != nil {
// Now that we own a P, gcBlackenEnabled can't change
// (as it requires STW).
if gcBlackenEnabled != 0 {
node = (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
if node == nil {
pidleput(_p_)
_p_ = nil
}
} else {
pidleput(_p_)
_p_ = nil
}
}
unlock(&sched.lock)
if _p_ != nil {
acquirep(_p_)
if wasSpinning {
_g_.m.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
}
// Run the idle worker.
_p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
gp := node.gp.ptr()
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.enabled {
traceGoUnpark(gp, 0)
}
return gp, false
}
}
// poll network
// Poll network until next timer.
if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
atomic.Store64(&sched.pollUntil, uint64(pollUntil))
if _g_.m.p != 0 {
@ -3038,6 +2994,93 @@ func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWo
return nil, false, now, pollUntil, ranTimer
}
// Check all Ps for a runnable G to steal.
//
// On entry we have no P. If a G is available to steal and a P is available,
// the P is returned which the caller should acquire and attempt to steal the
// work to.
func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
for id, p2 := range allpSnapshot {
if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
lock(&sched.lock)
pp := pidleget()
unlock(&sched.lock)
if pp != nil {
return pp
}
// Can't get a P, don't bother checking remaining Ps.
break
}
}
return nil
}
// Check all Ps for a timer expiring sooner than pollUntil.
//
// Returns updated pollUntil value.
func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
for id, p2 := range allpSnapshot {
if timerpMaskSnapshot.read(uint32(id)) {
w := nobarrierWakeTime(p2)
if w != 0 && (pollUntil == 0 || w < pollUntil) {
pollUntil = w
}
}
}
return pollUntil
}
// Check for idle-priority GC, without a P on entry.
//
// If some GC work, a P, and a worker G are all available, the P and G will be
// returned. The returned P has not been wired yet.
func checkIdleGCNoP() (*p, *g) {
// N.B. Since we have no P, gcBlackenEnabled may change at any time; we
// must check again after acquiring a P.
if atomic.Load(&gcBlackenEnabled) == 0 {
return nil, nil
}
if !gcMarkWorkAvailable(nil) {
return nil, nil
}
// Work is available; we can start an idle GC worker only if
// there is an available P and available worker G.
//
// We can attempt to acquire these in either order. Workers are
// almost always available (see comment in findRunnableGCWorker
// for the one case there may be none). Since we're slightly
// less likely to find a P, check for that first.
lock(&sched.lock)
pp := pidleget()
unlock(&sched.lock)
if pp == nil {
return nil, nil
}
// Now that we own a P, gcBlackenEnabled can't change
// (as it requires STW).
if gcBlackenEnabled == 0 {
lock(&sched.lock)
pidleput(pp)
unlock(&sched.lock)
return nil, nil
}
node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
if node == nil {
lock(&sched.lock)
pidleput(pp)
unlock(&sched.lock)
return nil, nil
}
return pp, node.gp.ptr()
}
// wakeNetPoller wakes up the thread sleeping in the network poller if it isn't
// going to wake up before the when argument; or it wakes an idle P to service
// timers and the network poller if there isn't one already.