1
0
mirror of https://github.com/golang/go synced 2024-07-08 12:18:55 +00:00

[dev.typeparams] runtime: undo go'd closure argument workaround

In CL 298669 we added defer/go wrapping, and, as it is not
allowed for closures to escape when compiling runtime, we worked
around it by rewriting go'd closures to argumentless
non-capturing closures, so it is not a real closure and so not
needed to escape.

Previous CL removes the restriction. Now we can undo the
workaround.

Updates #40724.

Change-Id: Ic7bf129da4aee7b7fdb7157414eca943a6a27264
Reviewed-on: https://go-review.googlesource.com/c/go/+/325110
Trust: Cherry Mui <cherryyz@google.com>
Reviewed-by: Than McIntosh <thanm@google.com>
This commit is contained in:
Cherry Mui 2021-06-03 18:29:05 -04:00
parent 46beeed0ac
commit 3298c749ac
4 changed files with 13 additions and 30 deletions

View File

@ -145,40 +145,28 @@ func RunSchedLocalQueueStealTest() {
}
}
// Temporary to enable register ABI bringup.
// TODO(register args): convert back to local variables in RunSchedLocalQueueEmptyTest that
// get passed to the "go" stmts there.
var RunSchedLocalQueueEmptyState struct {
done chan bool
ready *uint32
p *p
}
func RunSchedLocalQueueEmptyTest(iters int) {
// Test that runq is not spuriously reported as empty.
// Runq emptiness affects scheduling decisions and spurious emptiness
// can lead to underutilization (both runnable Gs and idle Ps coexist
// for arbitrary long time).
done := make(chan bool, 1)
RunSchedLocalQueueEmptyState.done = done
p := new(p)
RunSchedLocalQueueEmptyState.p = p
gs := make([]g, 2)
ready := new(uint32)
RunSchedLocalQueueEmptyState.ready = ready
for i := 0; i < iters; i++ {
*ready = 0
next0 := (i & 1) == 0
next1 := (i & 2) == 0
runqput(p, &gs[0], next0)
go func() {
for atomic.Xadd(RunSchedLocalQueueEmptyState.ready, 1); atomic.Load(RunSchedLocalQueueEmptyState.ready) != 2; {
for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
}
if runqempty(RunSchedLocalQueueEmptyState.p) {
//println("next:", next0, next1)
if runqempty(p) {
println("next:", next0, next1)
throw("queue is empty")
}
RunSchedLocalQueueEmptyState.done <- true
done <- true
}()
for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
}

View File

@ -167,22 +167,17 @@ func gcinit() {
lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
}
// Temporary in order to enable register ABI work.
// TODO(register args): convert back to local chan in gcenabled, passed to "go" stmts.
var gcenable_setup chan int
// gcenable is called after the bulk of the runtime initialization,
// just before we're about to start letting user code run.
// It kicks off the background sweeper goroutine, the background
// scavenger goroutine, and enables GC.
func gcenable() {
// Kick off sweeping and scavenging.
gcenable_setup = make(chan int, 2)
go bgsweep()
go bgscavenge()
<-gcenable_setup
<-gcenable_setup
gcenable_setup = nil
c := make(chan int, 2)
go bgsweep(c)
go bgscavenge(c)
<-c
<-c
memstats.enablegc = true // now that runtime is initialized, GC is okay
}

View File

@ -249,7 +249,7 @@ func scavengeSleep(ns int64) int64 {
// The background scavenger maintains the RSS of the application below
// the line described by the proportional scavenging statistics in
// the mheap struct.
func bgscavenge() {
func bgscavenge(c chan int) {
scavenge.g = getg()
lockInit(&scavenge.lock, lockRankScavenge)
@ -261,7 +261,7 @@ func bgscavenge() {
wakeScavenger()
}
gcenable_setup <- 1
c <- 1
goparkunlock(&scavenge.lock, waitReasonGCScavengeWait, traceEvGoBlock, 1)
// Exponentially-weighted moving average of the fraction of time this

View File

@ -153,13 +153,13 @@ func finishsweep_m() {
nextMarkBitArenaEpoch()
}
func bgsweep() {
func bgsweep(c chan int) {
sweep.g = getg()
lockInit(&sweep.lock, lockRankSweep)
lock(&sweep.lock)
sweep.parked = true
gcenable_setup <- 1
c <- 1
goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
for {