runtime/metrics: add /sync/mutex/wait/total:seconds metric

This change adds a metric to the runtime/metrics package which tracks
total mutex wait time for sync.Mutex and sync.RWMutex. The purpose of
this metric is to be able to quickly get an idea of the total mutex wait
time.

The implementation of this metric piggybacks off of the existing G
runnable tracking infrastructure, as well as the wait reason set on a G
when it goes into _Gwaiting.

Fixes #49881.

Change-Id: I4691abf64ac3574bec69b4d7d4428b1573130517
Reviewed-on: https://go-review.googlesource.com/c/go/+/427618
Reviewed-by: Michael Pratt <mpratt@google.com>
Auto-Submit: Michael Knyszek <mknyszek@google.com>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
Michael Anthony Knyszek 2022-08-31 21:34:23 +00:00 committed by Gopher Robot
parent 686b38b5b2
commit 63ceff95fa
7 changed files with 221 additions and 25 deletions

View file

@ -525,6 +525,12 @@ func Getg() *G {
return getg()
}
func GIsWaitingOnMutex(gp *G) bool {
return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
}
var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
//go:noinline
func PanicForTesting(b []byte, i int) byte {
return unexportedPanicForTesting(b, i)
@ -1617,3 +1623,5 @@ func (s *ScavengeIndex) Mark(base, limit uintptr) {
func (s *ScavengeIndex) Clear(ci ChunkIdx) {
s.i.clear(chunkIdx(ci))
}
const GTrackingPeriod = gTrackingPeriod

View file

@ -411,6 +411,12 @@ func initMetrics() {
hist.counts[len(hist.counts)-1] = sched.timeToRun.overflow.Load()
},
},
"/sync/mutex/wait/total:seconds": {
compute: func(_ *statAggregate, out *metricValue) {
out.kind = metricKindFloat64
out.scalar = float64bits(nsToSec(sched.totalMutexWaitTime.Load()))
},
},
}
metricsInit = true
}

View file

@ -366,6 +366,12 @@ var allDesc = []Description{
Description: "Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running.",
Kind: KindFloat64Histogram,
},
{
Name: "/sync/mutex/wait/total:seconds",
Description: "Approximate cumulative time goroutines have spent blocked on a sync.Mutex or sync.RWMutex. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data.",
Kind: KindFloat64,
Cumulative: true,
},
}
// All returns a slice of containing metric descriptions for all supported metrics.

View file

@ -272,5 +272,12 @@ Below is the full list of supported metrics, ordered lexicographically.
/sched/latencies:seconds
Distribution of the time goroutines have spent in the scheduler
in a runnable state before actually running.
/sync/mutex/wait/total:seconds
Approximate cumulative time goroutines have spent blocked on a
sync.Mutex or sync.RWMutex. This metric is useful for identifying
global changes in lock contention. Collect a mutex or block
profile using the runtime/pprof package for more detailed
contention data.
*/
package metrics

View file

@ -5,6 +5,7 @@
package runtime_test
import (
"reflect"
"runtime"
"runtime/metrics"
"sort"
@ -156,7 +157,7 @@ func TestReadMetricsConsistency(t *testing.T) {
// Tests whether readMetrics produces consistent, sensible values.
// The values are read concurrently with the runtime doing other
// things (e.g. allocating) so what we read can't reasonably compared
// to runtime values.
// to other runtime values (e.g. MemStats).
// Run a few GC cycles to get some of the stats to be non-zero.
runtime.GC()
@ -486,3 +487,127 @@ func TestReadMetricsCumulative(t *testing.T) {
func withinEpsilon(v1, v2, e float64) bool {
return v2-v2*e <= v1 && v1 <= v2+v2*e
}
func TestMutexWaitTimeMetric(t *testing.T) {
var sample [1]metrics.Sample
sample[0].Name = "/sync/mutex/wait/total:seconds"
locks := []locker2{
new(mutex),
new(rwmutexWrite),
new(rwmutexReadWrite),
new(rwmutexWriteRead),
}
for _, lock := range locks {
t.Run(reflect.TypeOf(lock).Elem().Name(), func(t *testing.T) {
metrics.Read(sample[:])
before := time.Duration(sample[0].Value.Float64() * 1e9)
minMutexWaitTime := generateMutexWaitTime(lock)
metrics.Read(sample[:])
after := time.Duration(sample[0].Value.Float64() * 1e9)
if wt := after - before; wt < minMutexWaitTime {
t.Errorf("too little mutex wait time: got %s, want %s", wt, minMutexWaitTime)
}
})
}
}
// locker2 represents an API surface of two concurrent goroutines
// locking the same resource, but through different APIs. It's intended
// to abstract over the relationship of two Lock calls or an RLock
// and a Lock call.
type locker2 interface {
Lock1()
Unlock1()
Lock2()
Unlock2()
}
type mutex struct {
mu sync.Mutex
}
func (m *mutex) Lock1() { m.mu.Lock() }
func (m *mutex) Unlock1() { m.mu.Unlock() }
func (m *mutex) Lock2() { m.mu.Lock() }
func (m *mutex) Unlock2() { m.mu.Unlock() }
type rwmutexWrite struct {
mu sync.RWMutex
}
func (m *rwmutexWrite) Lock1() { m.mu.Lock() }
func (m *rwmutexWrite) Unlock1() { m.mu.Unlock() }
func (m *rwmutexWrite) Lock2() { m.mu.Lock() }
func (m *rwmutexWrite) Unlock2() { m.mu.Unlock() }
type rwmutexReadWrite struct {
mu sync.RWMutex
}
func (m *rwmutexReadWrite) Lock1() { m.mu.RLock() }
func (m *rwmutexReadWrite) Unlock1() { m.mu.RUnlock() }
func (m *rwmutexReadWrite) Lock2() { m.mu.Lock() }
func (m *rwmutexReadWrite) Unlock2() { m.mu.Unlock() }
type rwmutexWriteRead struct {
mu sync.RWMutex
}
func (m *rwmutexWriteRead) Lock1() { m.mu.Lock() }
func (m *rwmutexWriteRead) Unlock1() { m.mu.Unlock() }
func (m *rwmutexWriteRead) Lock2() { m.mu.RLock() }
func (m *rwmutexWriteRead) Unlock2() { m.mu.RUnlock() }
// generateMutexWaitTime causes a couple of goroutines
// to block a whole bunch of times on a sync.Mutex, returning
// the minimum amount of time that should be visible in the
// /sync/mutex-wait:seconds metric.
func generateMutexWaitTime(mu locker2) time.Duration {
// Set up the runtime to always track casgstatus transitions for metrics.
*runtime.CasGStatusAlwaysTrack = true
mu.Lock1()
// Start up a goroutine to wait on the lock.
gc := make(chan *runtime.G)
done := make(chan bool)
go func() {
gc <- runtime.Getg()
for {
mu.Lock2()
mu.Unlock2()
if <-done {
return
}
}
}()
gp := <-gc
// Set the block time high enough so that it will always show up, even
// on systems with coarse timer granularity.
const blockTime = 100 * time.Millisecond
// Make sure the goroutine spawned above actually blocks on the lock.
for {
if runtime.GIsWaitingOnMutex(gp) {
break
}
runtime.Gosched()
}
// Let some amount of time pass.
time.Sleep(blockTime)
// Let the other goroutine acquire the lock.
mu.Unlock1()
done <- true
// Reset flag.
*runtime.CasGStatusAlwaysTrack = false
return blockTime
}

View file

@ -954,6 +954,10 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool {
panic("not reached")
}
// casgstatusAlwaysTrack is a debug flag that causes casgstatus to always track
// various latencies on every transition instead of sampling them.
var casgstatusAlwaysTrack = false
// If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
// and casfrom_Gscanstatus instead.
// casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
@ -994,36 +998,65 @@ func casgstatus(gp *g, oldval, newval uint32) {
}
}
// Handle tracking for scheduling latencies.
if oldval == _Grunning {
// Track every 8th time a goroutine transitions out of running.
if gp.trackingSeq%gTrackingPeriod == 0 {
// Track every gTrackingPeriod time a goroutine transitions out of running.
if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
gp.tracking = true
}
gp.trackingSeq++
}
if gp.tracking {
if oldval == _Grunnable {
// We transitioned out of runnable, so measure how much
// time we spent in this state and add it to
// runnableTime.
now := nanotime()
gp.runnableTime += now - gp.runnableStamp
gp.runnableStamp = 0
if !gp.tracking {
return
}
// Handle various kinds of tracking.
//
// Currently:
// - Time spent in runnable.
// - Time spent blocked on a sync.Mutex or sync.RWMutex.
switch oldval {
case _Grunnable:
// We transitioned out of runnable, so measure how much
// time we spent in this state and add it to
// runnableTime.
now := nanotime()
gp.runnableTime += now - gp.trackingStamp
gp.trackingStamp = 0
case _Gwaiting:
if !gp.waitreason.isMutexWait() {
// Not blocking on a lock.
break
}
if newval == _Grunnable {
// We just transitioned into runnable, so record what
// time that happened.
now := nanotime()
gp.runnableStamp = now
} else if newval == _Grunning {
// We're transitioning into running, so turn off
// tracking and record how much time we spent in
// runnable.
gp.tracking = false
sched.timeToRun.record(gp.runnableTime)
gp.runnableTime = 0
// Blocking on a lock, measure it. Note that because we're
// sampling, we have to multiply by our sampling period to get
// a more representative estimate of the absolute value.
// gTrackingPeriod also represents an accurate sampling period
// because we can only enter this state from _Grunning.
now := nanotime()
sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
gp.trackingStamp = 0
}
switch newval {
case _Gwaiting:
if !gp.waitreason.isMutexWait() {
// Not blocking on a lock.
break
}
// Blocking on a lock. Write down the timestamp.
now := nanotime()
gp.trackingStamp = now
case _Grunnable:
// We just transitioned into runnable, so record what
// time that happened.
now := nanotime()
gp.trackingStamp = now
case _Grunning:
// We're transitioning into running, so turn off
// tracking and record how much time we spent in
// runnable.
gp.tracking = false
sched.timeToRun.record(gp.runnableTime)
gp.runnableTime = 0
}
}
@ -1031,6 +1064,7 @@ func casgstatus(gp *g, oldval, newval uint32) {
//
// Use this over casgstatus when possible to ensure that a waitreason is set.
func casGToWaiting(gp *g, old uint32, reason waitReason) {
// Set the wait reason before calling casgstatus, because casgstatus will use it.
gp.waitreason = reason
casgstatus(gp, old, _Gwaiting)
}

View file

@ -468,7 +468,7 @@ type g struct {
sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
tracking bool // whether we're tracking this G for sched latency statistics
trackingSeq uint8 // used to decide whether to track this G
runnableStamp int64 // timestamp of when the G last became runnable, only used when tracking
trackingStamp int64 // timestamp of when the G last started being tracked
runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking
sysexitticks int64 // cputicks when syscall has returned (for tracing)
traceseq uint64 // trace event sequencer
@ -843,6 +843,10 @@ type schedt struct {
//
// Reset on each GC cycle.
idleTime atomic.Int64
// totalMutexWaitTime is the sum of time goroutines have spent in _Gwaiting
// with a waitreason of the form waitReasonSync{RW,}Mutex{R,}Lock.
totalMutexWaitTime atomic.Int64
}
// Values for the flags field of a sigTabT.
@ -1109,6 +1113,12 @@ func (w waitReason) String() string {
return waitReasonStrings[w]
}
func (w waitReason) isMutexWait() bool {
return w == waitReasonSyncMutexLock ||
w == waitReasonSyncRWMutexRLock ||
w == waitReasonSyncRWMutexLock
}
var (
allm *m
gomaxprocs int32