runtime: convert timeHistogram to atomic types

I've dropped the note that sched.timeToRun is protected by sched.lock,
as it does not seem to be true.

For #53821.

Change-Id: I03f8dc6ca0bcd4ccf3ec113010a0aa39c6f7d6ef
Reviewed-on: https://go-review.googlesource.com/c/go/+/419449
Reviewed-by: Austin Clements <austin@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Run-TryBot: Michael Pratt <mpratt@google.com>
This commit is contained in:
Michael Pratt 2022-07-25 15:58:23 -04:00
parent 09cc9bac72
commit b04e4637db
7 changed files with 11 additions and 29 deletions

View file

@ -17,8 +17,6 @@ var AtomicFields = []uintptr{
unsafe.Offsetof(p{}.timer0When),
unsafe.Offsetof(p{}.timerModifiedEarliest),
unsafe.Offsetof(p{}.gcFractionalMarkTime),
unsafe.Offsetof(schedt{}.timeToRun),
unsafe.Offsetof(timeHistogram{}.underflow),
unsafe.Offsetof(profBuf{}.overflow),
unsafe.Offsetof(profBuf{}.overflowTime),
unsafe.Offsetof(heapStatsDelta{}.tinyAllocCount),
@ -37,10 +35,8 @@ var AtomicFields = []uintptr{
unsafe.Offsetof(lfnode{}.next),
unsafe.Offsetof(mstats{}.last_gc_nanotime),
unsafe.Offsetof(mstats{}.last_gc_unix),
unsafe.Offsetof(mstats{}.gcPauseDist),
unsafe.Offsetof(ticksType{}.val),
unsafe.Offsetof(workType{}.bytesMarked),
unsafe.Offsetof(timeHistogram{}.counts),
}
// AtomicVariables is the set of global variables on which we perform

View file

@ -1244,9 +1244,9 @@ func (th *TimeHistogram) Count(bucket, subBucket uint) (uint64, bool) {
t := (*timeHistogram)(th)
i := bucket*TimeHistNumSubBuckets + subBucket
if i >= uint(len(t.counts)) {
return t.underflow, false
return t.underflow.Load(), false
}
return t.counts[i], true
return t.counts[i].Load(), true
}
func (th *TimeHistogram) Record(duration int64) {

View file

@ -66,18 +66,16 @@ const (
// It is an HDR histogram with exponentially-distributed
// buckets and linearly distributed sub-buckets.
//
// Counts in the histogram are updated atomically, so it is safe
// for concurrent use. It is also safe to read all the values
// atomically.
// The histogram is safe for concurrent reads and writes.
type timeHistogram struct {
counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]uint64
counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]atomic.Uint64
// underflow counts all the times we got a negative duration
// sample. Because of how time works on some platforms, it's
// possible to measure negative durations. We could ignore them,
// but we record them anyway because it's better to have some
// signal that it's happening than just missing samples.
underflow uint64
underflow atomic.Uint64
}
// record adds the given duration to the distribution.
@ -88,7 +86,7 @@ type timeHistogram struct {
//go:nosplit
func (h *timeHistogram) record(duration int64) {
if duration < 0 {
atomic.Xadd64(&h.underflow, 1)
h.underflow.Add(1)
return
}
// The index of the exponential bucket is just the index
@ -116,7 +114,7 @@ func (h *timeHistogram) record(duration int64) {
} else {
subBucket = uint(duration)
}
atomic.Xadd64(&h.counts[superBucket*timeHistNumSubBuckets+subBucket], 1)
h.counts[superBucket*timeHistNumSubBuckets+subBucket].Add(1)
}
const (

View file

@ -7,7 +7,6 @@ package runtime
// Metrics implementation exported to runtime/metrics.
import (
"runtime/internal/atomic"
"unsafe"
)
@ -197,9 +196,9 @@ func initMetrics() {
// The bottom-most bucket, containing negative values, is tracked
// as a separately as underflow, so fill that in manually and then
// iterate over the rest.
hist.counts[0] = atomic.Load64(&memstats.gcPauseDist.underflow)
hist.counts[0] = memstats.gcPauseDist.underflow.Load()
for i := range memstats.gcPauseDist.counts {
hist.counts[i+1] = atomic.Load64(&memstats.gcPauseDist.counts[i])
hist.counts[i+1] = memstats.gcPauseDist.counts[i].Load()
}
},
},
@ -327,9 +326,9 @@ func initMetrics() {
"/sched/latencies:seconds": {
compute: func(_ *statAggregate, out *metricValue) {
hist := out.float64HistOrInit(timeHistBuckets)
hist.counts[0] = atomic.Load64(&sched.timeToRun.underflow)
hist.counts[0] = sched.timeToRun.underflow.Load()
for i := range sched.timeToRun.counts {
hist.counts[i+1] = atomic.Load64(&sched.timeToRun.counts[i])
hist.counts[i+1] = sched.timeToRun.counts[i].Load()
}
},
},

View file

@ -334,10 +334,6 @@ func init() {
println(offset)
throw("memstats.heapStats not aligned to 8 bytes")
}
if offset := unsafe.Offsetof(memstats.gcPauseDist); offset%8 != 0 {
println(offset)
throw("memstats.gcPauseDist not aligned to 8 bytes")
}
// Ensure the size of heapStatsDelta causes adjacent fields/slots (e.g.
// [3]heapStatsDelta) to be 8-byte aligned.
if size := unsafe.Sizeof(heapStatsDelta{}); size%8 != 0 {

View file

@ -703,11 +703,6 @@ func schedinit() {
sigsave(&gp.m.sigmask)
initSigmask = gp.m.sigmask
if offset := unsafe.Offsetof(sched.timeToRun); offset%8 != 0 {
println(offset)
throw("sched.timeToRun not aligned to 8 bytes")
}
goargs()
goenvs()
parsedebugvars()

View file

@ -843,8 +843,6 @@ type schedt struct {
// timeToRun is a distribution of scheduling latencies, defined
// as the sum of time a G spends in the _Grunnable state before
// it transitions to _Grunning.
//
// timeToRun is protected by sched.lock.
timeToRun timeHistogram
}