runtime: flush on every write barrier while debugging

Currently, we flush the write barrier buffer on every write barrier
once throwOnGCWork is set, but not during the mark completion
algorithm itself. As seen in recent failures like

  https://build.golang.org/log/317369853b803b4ee762b27653f367e1aa445ac1

by the time we actually catch a late gcWork put, the write barrier
buffer is full-size again.

As a result, we're probably not catching the actual problematic write
barrier, which is probably somewhere in the buffer.

Fix this by using the gcWork pause generation to also keep the write
barrier buffer small between the mark completion flushes it and when
mark completion is done.

For #27993.

Change-Id: I77618169441d42a7d562fb2a998cfaa89891edb2
Reviewed-on: https://go-review.googlesource.com/c/154638
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
Austin Clements 2018-12-17 20:57:34 -05:00
parent 9a258f3086
commit 2d00007bdb
2 changed files with 9 additions and 1 deletions

View file

@ -1434,6 +1434,7 @@ top:
if debugCachedWork {
b := &_p_.wbBuf
b.end = uintptr(unsafe.Pointer(&b.buf[wbBufEntryPointers]))
b.debugGen = gcWorkPauseGen
}
// Flush the gcWork, since this may create global work
// and set the flushedWork flag.

View file

@ -23,6 +23,7 @@
package runtime
import (
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
@ -56,6 +57,12 @@ type wbBuf struct {
// on. This must be a multiple of wbBufEntryPointers because
// the write barrier only checks for overflow once per entry.
buf [wbBufEntryPointers * wbBufEntries]uintptr
// debugGen causes the write barrier buffer to flush after
// every write barrier if equal to gcWorkPauseGen. This is for
// debugging #27993. This is only set if debugCachedWork is
// set.
debugGen uint32
}
const (
@ -79,7 +86,7 @@ const (
func (b *wbBuf) reset() {
start := uintptr(unsafe.Pointer(&b.buf[0]))
b.next = start
if writeBarrier.cgo || (debugCachedWork && throwOnGCWork) {
if writeBarrier.cgo || (debugCachedWork && (throwOnGCWork || b.debugGen == atomic.Load(&gcWorkPauseGen))) {
// Effectively disable the buffer by forcing a flush
// on every barrier.
b.end = uintptr(unsafe.Pointer(&b.buf[wbBufEntryPointers]))