runtime: trivial replacements of _g_ in remaining files

Change-Id: I24d299b345bda1c9d6fa7876d4f03c05b8c1156d
Reviewed-on: https://go-review.googlesource.com/c/go/+/418587
TryBot-Result: Gopher Robot <gobot@golang.org>
Run-TryBot: Michael Pratt <mpratt@google.com>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Michael Pratt 2022-07-20 13:49:17 -04:00
parent 5999a28de8
commit 4400238ec8
5 changed files with 54 additions and 54 deletions

View file

@ -1190,7 +1190,7 @@ func fatalpanic(msgs *_panic) {
// //
//go:nowritebarrierrec //go:nowritebarrierrec
func startpanic_m() bool { func startpanic_m() bool {
_g_ := getg() gp := getg()
if mheap_.cachealloc.size == 0 { // very early if mheap_.cachealloc.size == 0 { // very early
print("runtime: panic before malloc heap initialized\n") print("runtime: panic before malloc heap initialized\n")
} }
@ -1198,18 +1198,18 @@ func startpanic_m() bool {
// could happen in a signal handler, or in a throw, or inside // could happen in a signal handler, or in a throw, or inside
// malloc itself. We want to catch if an allocation ever does // malloc itself. We want to catch if an allocation ever does
// happen (even if we're not in one of these situations). // happen (even if we're not in one of these situations).
_g_.m.mallocing++ gp.m.mallocing++
// If we're dying because of a bad lock count, set it to a // If we're dying because of a bad lock count, set it to a
// good lock count so we don't recursively panic below. // good lock count so we don't recursively panic below.
if _g_.m.locks < 0 { if gp.m.locks < 0 {
_g_.m.locks = 1 gp.m.locks = 1
} }
switch _g_.m.dying { switch gp.m.dying {
case 0: case 0:
// Setting dying >0 has the side-effect of disabling this G's writebuf. // Setting dying >0 has the side-effect of disabling this G's writebuf.
_g_.m.dying = 1 gp.m.dying = 1
atomic.Xadd(&panicking, 1) atomic.Xadd(&panicking, 1)
lock(&paniclk) lock(&paniclk)
if debug.schedtrace > 0 || debug.scheddetail > 0 { if debug.schedtrace > 0 || debug.scheddetail > 0 {
@ -1220,13 +1220,13 @@ func startpanic_m() bool {
case 1: case 1:
// Something failed while panicking. // Something failed while panicking.
// Just print a stack trace and exit. // Just print a stack trace and exit.
_g_.m.dying = 2 gp.m.dying = 2
print("panic during panic\n") print("panic during panic\n")
return false return false
case 2: case 2:
// This is a genuine bug in the runtime, we couldn't even // This is a genuine bug in the runtime, we couldn't even
// print the stack trace successfully. // print the stack trace successfully.
_g_.m.dying = 3 gp.m.dying = 3
print("stack trace unavailable\n") print("stack trace unavailable\n")
exit(4) exit(4)
fallthrough fallthrough

View file

@ -67,21 +67,21 @@ func RaceReleaseMerge(addr unsafe.Pointer) {
// Non-synchronization events (memory accesses, function entry/exit) still affect // Non-synchronization events (memory accesses, function entry/exit) still affect
// the race detector. // the race detector.
func RaceDisable() { func RaceDisable() {
_g_ := getg() gp := getg()
if _g_.raceignore == 0 { if gp.raceignore == 0 {
racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0) racecall(&__tsan_go_ignore_sync_begin, gp.racectx, 0, 0, 0)
} }
_g_.raceignore++ gp.raceignore++
} }
//go:nosplit //go:nosplit
// RaceEnable re-enables handling of race events in the current goroutine. // RaceEnable re-enables handling of race events in the current goroutine.
func RaceEnable() { func RaceEnable() {
_g_ := getg() gp := getg()
_g_.raceignore-- gp.raceignore--
if _g_.raceignore == 0 { if gp.raceignore == 0 {
racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0) racecall(&__tsan_go_ignore_sync_end, gp.racectx, 0, 0, 0)
} }
} }
@ -453,12 +453,12 @@ func racefree(p unsafe.Pointer, sz uintptr) {
//go:nosplit //go:nosplit
func racegostart(pc uintptr) uintptr { func racegostart(pc uintptr) uintptr {
_g_ := getg() gp := getg()
var spawng *g var spawng *g
if _g_.m.curg != nil { if gp.m.curg != nil {
spawng = _g_.m.curg spawng = gp.m.curg
} else { } else {
spawng = _g_ spawng = gp
} }
var racectx uintptr var racectx uintptr
@ -478,8 +478,8 @@ func racectxend(racectx uintptr) {
//go:nosplit //go:nosplit
func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) { func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
_g_ := getg() gp := getg()
if _g_ != _g_.m.curg { if gp != gp.m.curg {
// The call is coming from manual instrumentation of Go code running on g0/gsignal. // The call is coming from manual instrumentation of Go code running on g0/gsignal.
// Not interesting. // Not interesting.
return return
@ -495,8 +495,8 @@ func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
//go:nosplit //go:nosplit
func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) { func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
_g_ := getg() gp := getg()
if _g_ != _g_.m.curg { if gp != gp.m.curg {
// The call is coming from manual instrumentation of Go code running on g0/gsignal. // The call is coming from manual instrumentation of Go code running on g0/gsignal.
// Not interesting. // Not interesting.
return return

View file

@ -15,8 +15,8 @@ func setMaxStack(in int) (out int) {
//go:linkname setPanicOnFault runtime/debug.setPanicOnFault //go:linkname setPanicOnFault runtime/debug.setPanicOnFault
func setPanicOnFault(new bool) (old bool) { func setPanicOnFault(new bool) (old bool) {
_g_ := getg() gp := getg()
old = _g_.paniconfault old = gp.paniconfault
_g_.paniconfault = new gp.paniconfault = new
return old return old
} }

View file

@ -35,13 +35,13 @@ var traceback_env uint32
// //
//go:nosplit //go:nosplit
func gotraceback() (level int32, all, crash bool) { func gotraceback() (level int32, all, crash bool) {
_g_ := getg() gp := getg()
t := atomic.Load(&traceback_cache) t := atomic.Load(&traceback_cache)
crash = t&tracebackCrash != 0 crash = t&tracebackCrash != 0
all = _g_.m.throwing >= throwTypeUser || t&tracebackAll != 0 all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
if _g_.m.traceback != 0 { if gp.m.traceback != 0 {
level = int32(_g_.m.traceback) level = int32(gp.m.traceback)
} else if _g_.m.throwing >= throwTypeRuntime { } else if gp.m.throwing >= throwTypeRuntime {
// Always include runtime frames in runtime throws unless // Always include runtime frames in runtime throws unless
// otherwise overridden by m.traceback. // otherwise overridden by m.traceback.
level = 2 level = 2
@ -474,18 +474,18 @@ func timediv(v int64, div int32, rem *int32) int32 {
//go:nosplit //go:nosplit
func acquirem() *m { func acquirem() *m {
_g_ := getg() gp := getg()
_g_.m.locks++ gp.m.locks++
return _g_.m return gp.m
} }
//go:nosplit //go:nosplit
func releasem(mp *m) { func releasem(mp *m) {
_g_ := getg() gp := getg()
mp.locks-- mp.locks--
if mp.locks == 0 && _g_.preempt { if mp.locks == 0 && gp.preempt {
// restore the preemption request in case we've cleared it in newstack // restore the preemption request in case we've cleared it in newstack
_g_.stackguard0 = stackPreempt gp.stackguard0 = stackPreempt
} }
} }

View file

@ -1254,16 +1254,16 @@ func traceGoCreate(newg *g, pc uintptr) {
} }
func traceGoStart() { func traceGoStart() {
_g_ := getg().m.curg gp := getg().m.curg
pp := _g_.m.p pp := gp.m.p
_g_.traceseq++ gp.traceseq++
if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker { if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[pp.ptr().gcMarkWorkerMode]) traceEvent(traceEvGoStartLabel, -1, uint64(gp.goid), gp.traceseq, trace.markWorkerLabels[pp.ptr().gcMarkWorkerMode])
} else if _g_.tracelastp == pp { } else if gp.tracelastp == pp {
traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid)) traceEvent(traceEvGoStartLocal, -1, uint64(gp.goid))
} else { } else {
_g_.tracelastp = pp gp.tracelastp = pp
traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq) traceEvent(traceEvGoStart, -1, uint64(gp.goid), gp.traceseq)
} }
} }
@ -1272,14 +1272,14 @@ func traceGoEnd() {
} }
func traceGoSched() { func traceGoSched() {
_g_ := getg() gp := getg()
_g_.tracelastp = _g_.m.p gp.tracelastp = gp.m.p
traceEvent(traceEvGoSched, 1) traceEvent(traceEvGoSched, 1)
} }
func traceGoPreempt() { func traceGoPreempt() {
_g_ := getg() gp := getg()
_g_.tracelastp = _g_.m.p gp.tracelastp = gp.m.p
traceEvent(traceEvGoPreempt, 1) traceEvent(traceEvGoPreempt, 1)
} }
@ -1318,10 +1318,10 @@ func traceGoSysExit(ts int64) {
// aka right now), and assign a fresh time stamp to keep the log consistent. // aka right now), and assign a fresh time stamp to keep the log consistent.
ts = 0 ts = 0
} }
_g_ := getg().m.curg gp := getg().m.curg
_g_.traceseq++ gp.traceseq++
_g_.tracelastp = _g_.m.p gp.tracelastp = gp.m.p
traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv) traceEvent(traceEvGoSysExit, -1, uint64(gp.goid), gp.traceseq, uint64(ts)/traceTickDiv)
} }
func traceGoSysBlock(pp *p) { func traceGoSysBlock(pp *p) {