runtime: rename _p_ to pp

_g_, _p_, and _m_ are primarily vestiges of the C version of the
runtime, while today we prefer Go-style variable names (generally gp,
pp, and mp).

This change replaces all remaining uses of _p_ with pp. These are all
trivial replacements (i.e., no conflicts). That said, there are several
functions that refer to two different Ps at once. There the naming
convention is generally that pp refers to the local P, and p2 refers to
the other P we are accessing.

Change-Id: I205b801be839216972e7644b1fbeacdbf2612859
Reviewed-on: https://go-review.googlesource.com/c/go/+/306674
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
Michael Pratt 2021-02-09 15:48:41 -05:00
parent 0ad2ec6596
commit 5e8d261918
7 changed files with 332 additions and 332 deletions

View file

@ -84,23 +84,23 @@ func GCMask(x any) (ret []byte) {
}
func RunSchedLocalQueueTest() {
_p_ := new(p)
gs := make([]g, len(_p_.runq))
pp := new(p)
gs := make([]g, len(pp.runq))
Escape(gs) // Ensure gs doesn't move, since we use guintptrs
for i := 0; i < len(_p_.runq); i++ {
if g, _ := runqget(_p_); g != nil {
for i := 0; i < len(pp.runq); i++ {
if g, _ := runqget(pp); g != nil {
throw("runq is not empty initially")
}
for j := 0; j < i; j++ {
runqput(_p_, &gs[i], false)
runqput(pp, &gs[i], false)
}
for j := 0; j < i; j++ {
if g, _ := runqget(_p_); g != &gs[i] {
if g, _ := runqget(pp); g != &gs[i] {
print("bad element at iter ", i, "/", j, "\n")
throw("bad element")
}
}
if g, _ := runqget(_p_); g != nil {
if g, _ := runqget(pp); g != nil {
throw("runq is not empty afterwards")
}
}

View file

@ -811,21 +811,21 @@ top:
// result in a deadlock as we attempt to preempt a worker that's
// trying to preempt us (e.g. for a stack scan).
casgstatus(gp, _Grunning, _Gwaiting)
forEachP(func(_p_ *p) {
forEachP(func(pp *p) {
// Flush the write barrier buffer, since this may add
// work to the gcWork.
wbBufFlush1(_p_)
wbBufFlush1(pp)
// Flush the gcWork, since this may create global work
// and set the flushedWork flag.
//
// TODO(austin): Break up these workbufs to
// better distribute work.
_p_.gcw.dispose()
pp.gcw.dispose()
// Collect the flushedWork flag.
if _p_.gcw.flushedWork {
if pp.gcw.flushedWork {
atomic.Xadd(&gcMarkDoneFlushed, 1)
_p_.gcw.flushedWork = false
pp.gcw.flushedWork = false
}
})
casgstatus(gp, _Gwaiting, _Grunning)
@ -1075,8 +1075,8 @@ func gcMarkTermination() {
// is necessary to sweep all spans, we need to ensure all
// mcaches are flushed before we start the next GC cycle.
systemstack(func() {
forEachP(func(_p_ *p) {
_p_.mcache.prepareForSweep()
forEachP(func(pp *p) {
pp.mcache.prepareForSweep()
})
})
// Now that we've swept stale spans in mcaches, they don't

View file

@ -595,15 +595,15 @@ func gcAssistAlloc1(gp *g, scanWork int64) {
}
now := nanotime()
duration := now - startTime
_p_ := gp.m.p.ptr()
_p_.gcAssistTime += duration
pp := gp.m.p.ptr()
pp.gcAssistTime += duration
if trackLimiterEvent {
_p_.limiterEvent.stop(limiterEventMarkAssist, now)
pp.limiterEvent.stop(limiterEventMarkAssist, now)
}
if _p_.gcAssistTime > gcAssistTimeSlack {
gcController.assistTime.Add(_p_.gcAssistTime)
if pp.gcAssistTime > gcAssistTimeSlack {
gcController.assistTime.Add(pp.gcAssistTime)
gcCPULimiter.update(now)
_p_.gcAssistTime = 0
pp.gcAssistTime = 0
}
}

View file

@ -805,9 +805,9 @@ func (c *gcControllerState) enlistWorker() {
}
}
// findRunnableGCWorker returns a background mark worker for _p_ if it
// findRunnableGCWorker returns a background mark worker for pp if it
// should be run. This must only be called when gcBlackenEnabled != 0.
func (c *gcControllerState) findRunnableGCWorker(_p_ *p, now int64) (*g, int64) {
func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) {
if gcBlackenEnabled == 0 {
throw("gcControllerState.findRunnable: blackening not enabled")
}
@ -823,7 +823,7 @@ func (c *gcControllerState) findRunnableGCWorker(_p_ *p, now int64) (*g, int64)
gcCPULimiter.update(now)
}
if !gcMarkWorkAvailable(_p_) {
if !gcMarkWorkAvailable(pp) {
// No work to be done right now. This can happen at
// the end of the mark phase when there are still
// assists tapering off. Don't bother running a worker
@ -864,7 +864,7 @@ func (c *gcControllerState) findRunnableGCWorker(_p_ *p, now int64) (*g, int64)
if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
// This P is now dedicated to marking until the end of
// the concurrent mark phase.
_p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
pp.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
} else if c.fractionalUtilizationGoal == 0 {
// No need for fractional workers.
gcBgMarkWorkerPool.push(&node.node)
@ -875,13 +875,13 @@ func (c *gcControllerState) findRunnableGCWorker(_p_ *p, now int64) (*g, int64)
//
// This should be kept in sync with pollFractionalWorkerExit.
delta := now - c.markStartTime
if delta > 0 && float64(_p_.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal {
if delta > 0 && float64(pp.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal {
// Nope. No need to run a fractional worker.
gcBgMarkWorkerPool.push(&node.node)
return nil, now
}
// Run a fractional worker.
_p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode
pp.gcMarkWorkerMode = gcMarkWorkerFractionalMode
}
// Run the background mark worker.

View file

@ -212,22 +212,22 @@ func wbBufFlush(dst *uintptr, src uintptr) {
//
//go:nowritebarrierrec
//go:systemstack
func wbBufFlush1(_p_ *p) {
func wbBufFlush1(pp *p) {
// Get the buffered pointers.
start := uintptr(unsafe.Pointer(&_p_.wbBuf.buf[0]))
n := (_p_.wbBuf.next - start) / unsafe.Sizeof(_p_.wbBuf.buf[0])
ptrs := _p_.wbBuf.buf[:n]
start := uintptr(unsafe.Pointer(&pp.wbBuf.buf[0]))
n := (pp.wbBuf.next - start) / unsafe.Sizeof(pp.wbBuf.buf[0])
ptrs := pp.wbBuf.buf[:n]
// Poison the buffer to make extra sure nothing is enqueued
// while we're processing the buffer.
_p_.wbBuf.next = 0
pp.wbBuf.next = 0
if useCheckmark {
// Slow path for checkmark mode.
for _, ptr := range ptrs {
shade(ptr)
}
_p_.wbBuf.reset()
pp.wbBuf.reset()
return
}
@ -245,7 +245,7 @@ func wbBufFlush1(_p_ *p) {
// could track whether any un-shaded goroutine has used the
// buffer, or just track globally whether there are any
// un-shaded stacks and flush after each stack scan.
gcw := &_p_.gcw
gcw := &pp.gcw
pos := 0
for _, ptr := range ptrs {
if ptr < minLegalPointer {
@ -286,5 +286,5 @@ func wbBufFlush1(_p_ *p) {
// Enqueue the greyed objects.
gcw.putBatch(ptrs[:pos])
_p_.wbBuf.reset()
pp.wbBuf.reset()
}

File diff suppressed because it is too large Load diff

View file

@ -1208,11 +1208,11 @@ func traceGCSTWDone() {
func traceGCSweepStart() {
// Delay the actual GCSweepStart event until the first span
// sweep. If we don't sweep anything, don't emit any events.
_p_ := getg().m.p.ptr()
if _p_.traceSweep {
pp := getg().m.p.ptr()
if pp.traceSweep {
throw("double traceGCSweepStart")
}
_p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
pp.traceSweep, pp.traceSwept, pp.traceReclaimed = true, 0, 0
}
// traceGCSweepSpan traces the sweep of a single page.
@ -1220,24 +1220,24 @@ func traceGCSweepStart() {
// This may be called outside a traceGCSweepStart/traceGCSweepDone
// pair; however, it will not emit any trace events in this case.
func traceGCSweepSpan(bytesSwept uintptr) {
_p_ := getg().m.p.ptr()
if _p_.traceSweep {
if _p_.traceSwept == 0 {
pp := getg().m.p.ptr()
if pp.traceSweep {
if pp.traceSwept == 0 {
traceEvent(traceEvGCSweepStart, 1)
}
_p_.traceSwept += bytesSwept
pp.traceSwept += bytesSwept
}
}
func traceGCSweepDone() {
_p_ := getg().m.p.ptr()
if !_p_.traceSweep {
pp := getg().m.p.ptr()
if !pp.traceSweep {
throw("missing traceGCSweepStart")
}
if _p_.traceSwept != 0 {
traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
if pp.traceSwept != 0 {
traceEvent(traceEvGCSweepDone, -1, uint64(pp.traceSwept), uint64(pp.traceReclaimed))
}
_p_.traceSweep = false
pp.traceSweep = false
}
func traceGCMarkAssistStart() {
@ -1258,14 +1258,14 @@ func traceGoCreate(newg *g, pc uintptr) {
func traceGoStart() {
_g_ := getg().m.curg
_p_ := _g_.m.p
pp := _g_.m.p
_g_.traceseq++
if _p_.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
} else if _g_.tracelastp == _p_ {
if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[pp.ptr().gcMarkWorkerMode])
} else if _g_.tracelastp == pp {
traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
} else {
_g_.tracelastp = _p_
_g_.tracelastp = pp
traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
}
}
@ -1294,12 +1294,12 @@ func traceGoPark(traceEv byte, skip int) {
}
func traceGoUnpark(gp *g, skip int) {
_p_ := getg().m.p
pp := getg().m.p
gp.traceseq++
if gp.tracelastp == _p_ {
if gp.tracelastp == pp {
traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
} else {
gp.tracelastp = _p_
gp.tracelastp = pp
traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
}
}