diff --git a/src/runtime/lock_sema.go b/src/runtime/lock_sema.go index 32d2235ad3..1c24cf6d30 100644 --- a/src/runtime/lock_sema.go +++ b/src/runtime/lock_sema.go @@ -77,11 +77,11 @@ Loop: osyield() } else { // Someone else has it. - // l->waitm points to a linked list of M's waiting - // for this lock, chained through m->nextwaitm. + // l.key points to a linked list of M's waiting + // for this lock, chained through m.mWaitList.next. // Queue this M. for { - gp.m.nextwaitm = muintptr(v &^ locked) + gp.m.mWaitList.next = muintptr(v &^ locked) if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) { break } @@ -119,7 +119,7 @@ func unlock2(l *mutex) { // Other M's are waiting for the lock. // Dequeue an M. mp = muintptr(v &^ locked).ptr() - if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) { + if atomic.Casuintptr(&l.key, v, uintptr(mp.mWaitList.next)) { // Dequeued an M. Wake it. semawakeup(mp) break @@ -200,7 +200,7 @@ func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool { // This reduces the nosplit footprint of notetsleep_internal. gp = getg() - // Register for wakeup on n->waitm. + // Register for wakeup on n.key. if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { // Must be locked (got wakeup). if n.key != locked { diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index b51a1ad3ce..b97fac787e 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -667,6 +667,17 @@ func (lt *lockTimer) end() { } } +// mWaitList is part of the M struct, and holds the list of Ms that are waiting +// for a particular runtime.mutex. +// +// When an M is unable to immediately obtain a lock, it adds itself to the list +// of Ms waiting for the lock. It does that via this struct's next field, +// forming a singly-linked list with the mutex's key field pointing to the head +// of the list. +type mWaitList struct { + next muintptr // next m waiting for lock (set by us, cleared by another during unlock) +} + type mLockProfile struct { waitTime atomic.Int64 // total nanoseconds spent waiting in runtime.lockWithRank stack []uintptr // stack that experienced contention in runtime.lockWithRank diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index ff11414e3e..470b829912 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -596,8 +596,8 @@ type m struct { createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it. lockedExt uint32 // tracking for external LockOSThread lockedInt uint32 // tracking for internal lockOSThread - nextwaitm muintptr // next m waiting for lock + mWaitList mWaitList // list of runtime lock waiters mLockProfile mLockProfile // fields relating to runtime.lock contention profStack []uintptr // used for memory/block/mutex stack traces