mirror of
https://github.com/golang/go
synced 2024-09-15 22:20:06 +00:00
runtime/gc: Run garbage collector on g0 stack
instead of regular g stack. We do this so that the g stack we're currently running on is no longer changing. Cuts the root set down a bit (g0 stacks are not scanned, and we don't need to scan gc's internal state). Also an enabler for copyable stacks. R=golang-dev, cshapiro, khr, 0xe2.0x9a.0x9b, dvyukov, rsc, iant CC=golang-dev https://golang.org/cl/9754044
This commit is contained in:
parent
af7f7b7515
commit
71f061043d
|
@ -1448,14 +1448,11 @@ addstackroots(G *gp)
|
||||||
stk = (Stktop*)gp->stackbase;
|
stk = (Stktop*)gp->stackbase;
|
||||||
guard = (byte*)gp->stackguard;
|
guard = (byte*)gp->stackguard;
|
||||||
|
|
||||||
if(gp == g) {
|
if(gp == g)
|
||||||
// Scanning our own stack: start at &gp.
|
runtime·throw("can't scan our own stack");
|
||||||
sp = runtime·getcallersp(&gp);
|
if((mp = gp->m) != nil && mp->helpgc)
|
||||||
pc = runtime·getcallerpc(&gp);
|
runtime·throw("can't scan gchelper stack");
|
||||||
} else if((mp = gp->m) != nil && mp->helpgc) {
|
if(gp->gcstack != (uintptr)nil) {
|
||||||
// gchelper's stack is in active use and has no interesting pointers.
|
|
||||||
return;
|
|
||||||
} else if(gp->gcstack != (uintptr)nil) {
|
|
||||||
// Scanning another goroutine that is about to enter or might
|
// Scanning another goroutine that is about to enter or might
|
||||||
// have just exited a system call. It may be executing code such
|
// have just exited a system call. It may be executing code such
|
||||||
// as schedlock and may have needed to start a new stack segment.
|
// as schedlock and may have needed to start a new stack segment.
|
||||||
|
@ -1890,13 +1887,14 @@ cachestats(GCStats *stats)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Structure of arguments passed to function gc().
|
// Structure of arguments passed to function gc().
|
||||||
// This allows the arguments to be passed via reflect·call.
|
// This allows the arguments to be passed via runtime·mcall.
|
||||||
struct gc_args
|
struct gc_args
|
||||||
{
|
{
|
||||||
int32 force;
|
int64 start_time; // start time of GC in ns (just before stoptheworld)
|
||||||
};
|
};
|
||||||
|
|
||||||
static void gc(struct gc_args *args);
|
static void gc(struct gc_args *args);
|
||||||
|
static void mgc(G *gp);
|
||||||
|
|
||||||
static int32
|
static int32
|
||||||
readgogc(void)
|
readgogc(void)
|
||||||
|
@ -1911,12 +1909,14 @@ readgogc(void)
|
||||||
return runtime·atoi(p);
|
return runtime·atoi(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static FuncVal runfinqv = {runfinq};
|
||||||
|
|
||||||
void
|
void
|
||||||
runtime·gc(int32 force)
|
runtime·gc(int32 force)
|
||||||
{
|
{
|
||||||
byte *p;
|
byte *p;
|
||||||
struct gc_args a, *ap;
|
struct gc_args a;
|
||||||
FuncVal gcv;
|
int32 i;
|
||||||
|
|
||||||
// The atomic operations are not atomic if the uint64s
|
// The atomic operations are not atomic if the uint64s
|
||||||
// are not aligned on uint64 boundaries. This has been
|
// are not aligned on uint64 boundaries. This has been
|
||||||
|
@ -1947,21 +1947,66 @@ runtime·gc(int32 force)
|
||||||
if(gcpercent < 0)
|
if(gcpercent < 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
// Run gc on a bigger stack to eliminate
|
runtime·semacquire(&runtime·worldsema);
|
||||||
// a potentially large number of calls to runtime·morestack.
|
if(!force && mstats.heap_alloc < mstats.next_gc) {
|
||||||
a.force = force;
|
// typically threads which lost the race to grab
|
||||||
ap = &a;
|
// worldsema exit here when gc is done.
|
||||||
m->moreframesize_minalloc = StackBig;
|
runtime·semrelease(&runtime·worldsema);
|
||||||
gcv.fn = (void*)gc;
|
return;
|
||||||
reflect·call(&gcv, (byte*)&ap, sizeof(ap));
|
}
|
||||||
|
|
||||||
if(gctrace > 1 && !force) {
|
// Ok, we're doing it! Stop everybody else
|
||||||
a.force = 1;
|
a.start_time = runtime·nanotime();
|
||||||
gc(&a);
|
m->gcing = 1;
|
||||||
|
runtime·stoptheworld();
|
||||||
|
|
||||||
|
// Run gc on the g0 stack. We do this so that the g stack
|
||||||
|
// we're currently running on will no longer change. Cuts
|
||||||
|
// the root set down a bit (g0 stacks are not scanned, and
|
||||||
|
// we don't need to scan gc's internal state). Also an
|
||||||
|
// enabler for copyable stacks.
|
||||||
|
for(i = 0; i < (gctrace > 1 ? 2 : 1); i++) {
|
||||||
|
if(g == m->g0) {
|
||||||
|
// already on g0
|
||||||
|
gc(&a);
|
||||||
|
} else {
|
||||||
|
// switch to g0, call gc(&a), then switch back
|
||||||
|
g->param = &a;
|
||||||
|
runtime·mcall(mgc);
|
||||||
|
}
|
||||||
|
// record a new start time in case we're going around again
|
||||||
|
a.start_time = runtime·nanotime();
|
||||||
|
}
|
||||||
|
|
||||||
|
// all done
|
||||||
|
runtime·semrelease(&runtime·worldsema);
|
||||||
|
runtime·starttheworld();
|
||||||
|
|
||||||
|
// now that gc is done and we're back on g stack, kick off finalizer thread if needed
|
||||||
|
if(finq != nil) {
|
||||||
|
runtime·lock(&finlock);
|
||||||
|
// kick off or wake up goroutine to run queued finalizers
|
||||||
|
if(fing == nil)
|
||||||
|
fing = runtime·newproc1(&runfinqv, nil, 0, 0, runtime·gc);
|
||||||
|
else if(fingwait) {
|
||||||
|
fingwait = 0;
|
||||||
|
runtime·ready(fing);
|
||||||
|
}
|
||||||
|
runtime·unlock(&finlock);
|
||||||
|
// give the queued finalizers, if any, a chance to run
|
||||||
|
runtime·gosched();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static FuncVal runfinqv = {runfinq};
|
static void
|
||||||
|
mgc(G *gp)
|
||||||
|
{
|
||||||
|
gp->status = Grunnable;
|
||||||
|
gc(gp->param);
|
||||||
|
gp->status = Grunning;
|
||||||
|
gp->param = nil;
|
||||||
|
runtime·gogo(&gp->sched, 0);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gc(struct gc_args *args)
|
gc(struct gc_args *args)
|
||||||
|
@ -1973,16 +2018,7 @@ gc(struct gc_args *args)
|
||||||
uint32 i;
|
uint32 i;
|
||||||
Eface eface;
|
Eface eface;
|
||||||
|
|
||||||
runtime·semacquire(&runtime·worldsema);
|
t0 = args->start_time;
|
||||||
if(!args->force && mstats.heap_alloc < mstats.next_gc) {
|
|
||||||
runtime·semrelease(&runtime·worldsema);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
t0 = runtime·nanotime();
|
|
||||||
|
|
||||||
m->gcing = 1;
|
|
||||||
runtime·stoptheworld();
|
|
||||||
|
|
||||||
if(CollectStats)
|
if(CollectStats)
|
||||||
runtime·memclr((byte*)&gcstats, sizeof(gcstats));
|
runtime·memclr((byte*)&gcstats, sizeof(gcstats));
|
||||||
|
@ -2096,22 +2132,6 @@ gc(struct gc_args *args)
|
||||||
}
|
}
|
||||||
|
|
||||||
runtime·MProf_GC();
|
runtime·MProf_GC();
|
||||||
runtime·semrelease(&runtime·worldsema);
|
|
||||||
runtime·starttheworld();
|
|
||||||
|
|
||||||
if(finq != nil) {
|
|
||||||
runtime·lock(&finlock);
|
|
||||||
// kick off or wake up goroutine to run queued finalizers
|
|
||||||
if(fing == nil)
|
|
||||||
fing = runtime·newproc1(&runfinqv, nil, 0, 0, runtime·gc);
|
|
||||||
else if(fingwait) {
|
|
||||||
fingwait = 0;
|
|
||||||
runtime·ready(fing);
|
|
||||||
}
|
|
||||||
runtime·unlock(&finlock);
|
|
||||||
// give the queued finalizers, if any, a chance to run
|
|
||||||
runtime·gosched();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -2183,6 +2203,8 @@ gchelperstart(void)
|
||||||
runtime·throw("gchelperstart: bad m->helpgc");
|
runtime·throw("gchelperstart: bad m->helpgc");
|
||||||
if(runtime·xchg(&bufferList[m->helpgc].busy, 1))
|
if(runtime·xchg(&bufferList[m->helpgc].busy, 1))
|
||||||
runtime·throw("gchelperstart: already busy");
|
runtime·throw("gchelperstart: already busy");
|
||||||
|
if(g != m->g0)
|
||||||
|
runtime·throw("gchelper not running on g0 stack");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
Loading…
Reference in a new issue