mirror of
https://github.com/golang/go
synced 2024-11-02 09:28:34 +00:00
runtime: convert schedt.pollUntil to atomic type
Note that this converts pollUntil from uint64 to int64, the type used by nanotime(). For #53821. Change-Id: Iec9ec7e09d3350552561d0708ba6ea9e8a8ae7ab Reviewed-on: https://go-review.googlesource.com/c/go/+/419443 Run-TryBot: Michael Pratt <mpratt@google.com> Reviewed-by: Austin Clements <austin@google.com> TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
parent
5d7d50111f
commit
0fc774a68f
3 changed files with 5 additions and 7 deletions
|
@ -17,7 +17,6 @@ var AtomicFields = []uintptr{
|
|||
unsafe.Offsetof(p{}.timer0When),
|
||||
unsafe.Offsetof(p{}.timerModifiedEarliest),
|
||||
unsafe.Offsetof(p{}.gcFractionalMarkTime),
|
||||
unsafe.Offsetof(schedt{}.pollUntil),
|
||||
unsafe.Offsetof(schedt{}.timeToRun),
|
||||
unsafe.Offsetof(timeHistogram{}.underflow),
|
||||
unsafe.Offsetof(profBuf{}.overflow),
|
||||
|
|
|
@ -2804,7 +2804,7 @@ top:
|
|||
|
||||
// Poll network until next timer.
|
||||
if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
|
||||
atomic.Store64(&sched.pollUntil, uint64(pollUntil))
|
||||
sched.pollUntil.Store(pollUntil)
|
||||
if mp.p != 0 {
|
||||
throw("findrunnable: netpoll with p")
|
||||
}
|
||||
|
@ -2825,7 +2825,7 @@ top:
|
|||
delay = 0
|
||||
}
|
||||
list := netpoll(delay) // block until new work is available
|
||||
atomic.Store64(&sched.pollUntil, 0)
|
||||
sched.pollUntil.Store(0)
|
||||
sched.lastpoll.Store(now)
|
||||
if faketime != 0 && list.empty() {
|
||||
// Using fake time and nothing is ready; stop M.
|
||||
|
@ -2856,7 +2856,7 @@ top:
|
|||
goto top
|
||||
}
|
||||
} else if pollUntil != 0 && netpollinited() {
|
||||
pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
|
||||
pollerPollUntil := sched.pollUntil.Load()
|
||||
if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
|
||||
netpollBreak()
|
||||
}
|
||||
|
@ -3071,7 +3071,7 @@ func wakeNetPoller(when int64) {
|
|||
// field is either zero or the time to which the current
|
||||
// poll is expected to run. This can have a spurious wakeup
|
||||
// but should never miss a wakeup.
|
||||
pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
|
||||
pollerPollUntil := sched.pollUntil.Load()
|
||||
if pollerPollUntil == 0 || pollerPollUntil > when {
|
||||
netpollBreak()
|
||||
}
|
||||
|
|
|
@ -758,10 +758,9 @@ type p struct {
|
|||
}
|
||||
|
||||
type schedt struct {
|
||||
// accessed atomically. keep at top to ensure alignment on 32-bit systems.
|
||||
goidgen atomic.Uint64
|
||||
lastpoll atomic.Int64 // time of last network poll, 0 if currently polling
|
||||
pollUntil uint64 // time to which current poll is sleeping
|
||||
pollUntil atomic.Int64 // time to which current poll is sleeping
|
||||
|
||||
lock mutex
|
||||
|
||||
|
|
Loading…
Reference in a new issue