static byte finptrmask[FinBlockSize/PtrSize/PointersPerByte];
bool runtime·fingwait;
bool runtime·fingwake;
-static FinBlock *allfin; // list of all blocks
+static FinBlock *runtime·allfin; // list of all blocks
BitVector runtime·gcdatamask;
BitVector runtime·gcbssmask;
// Copy of mheap.allspans for marker or sweeper.
MSpan** spans;
uint32 nspan;
-} work;
+} runtime·work;
// scanblock scans a block of n bytes starting at pointer b for references
// to other objects, scanning any it finds recursively until there are no
}
// If another proc wants a pointer, give it some.
- if(work.nwait > 0 && nobj > 4 && work.full == 0) {
+ if(runtime·work.nwait > 0 && nobj > 4 && runtime·work.full == 0) {
wbuf->nobj = nobj;
wbuf = handoff(wbuf);
nobj = wbuf->nobj;
// quadruple is already marked. Otherwise we resort to CAS
// loop for marking.
if((xbits&(bitMask|(bitMask<<gcBits))) != (bitBoundary|(bitBoundary<<gcBits)) ||
- work.nproc == 1)
+ runtime·work.nproc == 1)
*bitp = xbits | (bitMarked<<shift);
else
runtime·atomicor8(bitp, bitMarked<<shift);
break;
case RootFinalizers:
- for(fb=allfin; fb; fb=fb->alllink)
+ for(fb=runtime·allfin; fb; fb=fb->alllink)
scanblock((byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]), finptrmask);
break;
case RootSpans:
// mark MSpan.specials
sg = runtime·mheap.sweepgen;
- for(spanidx=0; spanidx<work.nspan; spanidx++) {
+ for(spanidx=0; spanidx<runtime·work.nspan; spanidx++) {
Special *sp;
SpecialFinalizer *spf;
- s = work.spans[spanidx];
+ s = runtime·work.spans[spanidx];
if(s->state != MSpanInUse)
continue;
if(s->sweepgen != sg) {
// needed only to output in traceback
status = runtime·readgstatus(gp);
if((status == Gwaiting || status == Gsyscall) && gp->waitsince == 0)
- gp->waitsince = work.tstart;
+ gp->waitsince = runtime·work.tstart;
// Shrink a stack if not much of it is being used.
runtime·shrinkstack(gp);
if(runtime·readgstatus(gp) == Gdead)
MCache *c;
if(b != nil)
- runtime·lfstackpush(&work.full, &b->node);
+ runtime·lfstackpush(&runtime·work.full, &b->node);
b = nil;
c = g->m->mcache;
if(c->gcworkbuf != nil) {
c->gcworkbuf = nil;
}
if(b == nil)
- b = (Workbuf*)runtime·lfstackpop(&work.empty);
+ b = (Workbuf*)runtime·lfstackpop(&runtime·work.empty);
if(b == nil)
b = runtime·persistentalloc(sizeof(*b), CacheLineSize, &mstats.gc_sys);
b->nobj = 0;
c->gcworkbuf = b;
return;
}
- runtime·lfstackpush(&work.empty, &b->node);
+ runtime·lfstackpush(&runtime·work.empty, &b->node);
}
void
int32 i;
if(b != nil)
- runtime·lfstackpush(&work.empty, &b->node);
- b = (Workbuf*)runtime·lfstackpop(&work.full);
- if(b != nil || work.nproc == 1)
+ runtime·lfstackpush(&runtime·work.empty, &b->node);
+ b = (Workbuf*)runtime·lfstackpop(&runtime·work.full);
+ if(b != nil || runtime·work.nproc == 1)
return b;
- runtime·xadd(&work.nwait, +1);
+ runtime·xadd(&runtime·work.nwait, +1);
for(i=0;; i++) {
- if(work.full != 0) {
- runtime·xadd(&work.nwait, -1);
- b = (Workbuf*)runtime·lfstackpop(&work.full);
+ if(runtime·work.full != 0) {
+ runtime·xadd(&runtime·work.nwait, -1);
+ b = (Workbuf*)runtime·lfstackpop(&runtime·work.full);
if(b != nil)
return b;
- runtime·xadd(&work.nwait, +1);
+ runtime·xadd(&runtime·work.nwait, +1);
}
- if(work.nwait == work.nproc)
+ if(runtime·work.nwait == runtime·work.nproc)
return nil;
if(i < 10) {
g->m->gcstats.nprocyield++;
g->m->gcstats.nhandoffcnt += n;
// Put b on full list - let first half of b get stolen.
- runtime·lfstackpush(&work.full, &b->node);
+ runtime·lfstackpush(&runtime·work.full, &b->node);
return b1;
}
if(runtime·finc == nil) {
runtime·finc = runtime·persistentalloc(FinBlockSize, 0, &mstats.gc_sys);
runtime·finc->cap = (FinBlockSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
- runtime·finc->alllink = allfin;
- allfin = runtime·finc;
+ runtime·finc->alllink = runtime·allfin;
+ runtime·allfin = runtime·finc;
if(finptrmask[0] == 0) {
// Build pointer mask for Finalizer array in block.
// Check assumptions made in finalizer1 array above.
Finalizer *f;
uintptr i;
- for(fb = allfin; fb; fb = fb->alllink) {
+ for(fb = runtime·allfin; fb; fb = fb->alllink) {
for(i = 0; i < fb->cnt; i++) {
f = &fb->fin[i];
callback(f->fn, f->arg, f->nret, f->fint, f->ot);
sg = runtime·mheap.sweepgen;
for(;;) {
idx = runtime·xadd(&runtime·sweep.spanidx, 1) - 1;
- if(idx >= work.nspan) {
+ if(idx >= runtime·work.nspan) {
runtime·mheap.sweepdone = true;
g->m->locks--;
return -1;
}
- s = work.spans[idx];
+ s = runtime·work.spans[idx];
if(s->state != MSpanInUse) {
s->sweepgen = sg;
continue;
gchelperstart();
// parallel mark for over gc roots
- runtime·parfordo(work.markfor);
+ runtime·parfordo(runtime·work.markfor);
// help other threads scan secondary blocks
scanblock(nil, 0, nil);
- nproc = work.nproc; // work.nproc can change right after we increment work.ndone
- if(runtime·xadd(&work.ndone, +1) == nproc-1)
- runtime·notewakeup(&work.alldone);
+ nproc = runtime·work.nproc; // runtime·work.nproc can change right after we increment runtime·work.ndone
+ if(runtime·xadd(&runtime·work.ndone, +1) == nproc-1)
+ runtime·notewakeup(&runtime·work.alldone);
g->m->traceback = 0;
}
if(sizeof(Workbuf) != WorkbufSize)
runtime·throw("runtime: size of Workbuf is suboptimal");
- work.markfor = runtime·parforalloc(MaxGcproc);
+ runtime·work.markfor = runtime·parforalloc(MaxGcproc);
runtime·gcpercent = runtime·readgogc();
runtime·gcdatamask = unrollglobgcprog(runtime·gcdata, runtime·edata - runtime·data);
runtime·gcbssmask = unrollglobgcprog(runtime·gcbss, runtime·ebss - runtime·bss);
g->m->traceback = 2;
t0 = args->start_time;
- work.tstart = args->start_time;
+ runtime·work.tstart = args->start_time;
t1 = 0;
if(runtime·debug.gctrace)
// Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
runtime·lock(&runtime·mheap.lock);
// Free the old cached sweep array if necessary.
- if(work.spans != nil && work.spans != runtime·mheap.allspans)
- runtime·SysFree(work.spans, work.nspan*sizeof(work.spans[0]), &mstats.other_sys);
+ if(runtime·work.spans != nil && runtime·work.spans != runtime·mheap.allspans)
+ runtime·SysFree(runtime·work.spans, runtime·work.nspan*sizeof(runtime·work.spans[0]), &mstats.other_sys);
// Cache the current array for marking.
runtime·mheap.gcspans = runtime·mheap.allspans;
- work.spans = runtime·mheap.allspans;
- work.nspan = runtime·mheap.nspan;
+ runtime·work.spans = runtime·mheap.allspans;
+ runtime·work.nspan = runtime·mheap.nspan;
runtime·unlock(&runtime·mheap.lock);
- work.nwait = 0;
- work.ndone = 0;
- work.nproc = runtime·gcprocs();
- runtime·parforsetup(work.markfor, work.nproc, RootCount + runtime·allglen, nil, false, markroot);
- if(work.nproc > 1) {
- runtime·noteclear(&work.alldone);
- runtime·helpgc(work.nproc);
+ runtime·work.nwait = 0;
+ runtime·work.ndone = 0;
+ runtime·work.nproc = runtime·gcprocs();
+ runtime·parforsetup(runtime·work.markfor, runtime·work.nproc, RootCount + runtime·allglen, nil, false, markroot);
+ if(runtime·work.nproc > 1) {
+ runtime·noteclear(&runtime·work.alldone);
+ runtime·helpgc(runtime·work.nproc);
}
t2 = 0;
t2 = runtime·nanotime();
gchelperstart();
- runtime·parfordo(work.markfor);
+ runtime·parfordo(runtime·work.markfor);
scanblock(nil, 0, nil);
t3 = 0;
if(runtime·debug.gctrace)
t3 = runtime·nanotime();
- if(work.nproc > 1)
- runtime·notesleep(&work.alldone);
+ if(runtime·work.nproc > 1)
+ runtime·notesleep(&runtime·work.alldone);
cachestats();
// next_gc calculation is tricky with concurrent sweep since we don't know size of live heap
}
obj = mstats.nmalloc - mstats.nfree;
- stats.nprocyield += work.markfor->nprocyield;
- stats.nosyield += work.markfor->nosyield;
- stats.nsleep += work.markfor->nsleep;
+ stats.nprocyield += runtime·work.markfor->nprocyield;
+ stats.nosyield += runtime·work.markfor->nosyield;
+ stats.nsleep += runtime·work.markfor->nsleep;
runtime·printf("gc%d(%d): %D+%D+%D+%D us, %D -> %D MB, %D (%D-%D) objects,"
" %d goroutines,"
" %d/%d/%d sweeps,"
" %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
- mstats.numgc, work.nproc, (t1-t0)/1000, (t2-t1)/1000, (t3-t2)/1000, (t4-t3)/1000,
+ mstats.numgc, runtime·work.nproc, (t1-t0)/1000, (t2-t1)/1000, (t3-t2)/1000, (t4-t3)/1000,
heap0>>20, heap1>>20, obj,
mstats.nmalloc, mstats.nfree,
runtime·gcount(),
- work.nspan, runtime·sweep.nbgsweep, runtime·sweep.npausesweep,
+ runtime·work.nspan, runtime·sweep.nbgsweep, runtime·sweep.npausesweep,
stats.nhandoff, stats.nhandoffcnt,
- work.markfor->nsteal, work.markfor->nstealcnt,
+ runtime·work.markfor->nsteal, runtime·work.markfor->nstealcnt,
stats.nprocyield, stats.nosyield, stats.nsleep);
runtime·sweep.nbgsweep = runtime·sweep.npausesweep = 0;
}
// Even if this is still stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
runtime·lock(&runtime·mheap.lock);
// Free the old cached mark array if necessary.
- if(work.spans != nil && work.spans != runtime·mheap.allspans)
- runtime·SysFree(work.spans, work.nspan*sizeof(work.spans[0]), &mstats.other_sys);
+ if(runtime·work.spans != nil && runtime·work.spans != runtime·mheap.allspans)
+ runtime·SysFree(runtime·work.spans, runtime·work.nspan*sizeof(runtime·work.spans[0]), &mstats.other_sys);
// Cache the current array for sweeping.
runtime·mheap.gcspans = runtime·mheap.allspans;
runtime·mheap.sweepgen += 2;
runtime·mheap.sweepdone = false;
- work.spans = runtime·mheap.allspans;
- work.nspan = runtime·mheap.nspan;
+ runtime·work.spans = runtime·mheap.allspans;
+ runtime·work.nspan = runtime·mheap.nspan;
runtime·sweep.spanidx = 0;
runtime·unlock(&runtime·mheap.lock);
// Stacks are assigned an order according to size.
// order = log_2(size/FixedStack)
// There is a free list for each order.
-static MSpan stackpool[NumStackOrders];
-static Mutex stackpoolmu;
+static MSpan runtime·stackpool[NumStackOrders];
+static Mutex runtime·stackpoolmu;
// TODO: one lock per order?
void
runtime·throw("cache size must be a multiple of page size");
for(i = 0; i < NumStackOrders; i++)
- runtime·MSpanList_Init(&stackpool[i]);
+ runtime·MSpanList_Init(&runtime·stackpool[i]);
}
// Allocates a stack from the free pool. Must be called with
MLink *x;
uintptr i;
- list = &stackpool[order];
+ list = &runtime·stackpool[order];
s = list->next;
if(s == list) {
// no free stacks. Allocate another span worth.
runtime·throw("freeing stack not in a stack span");
if(s->freelist == nil) {
// s will now have a free stack
- runtime·MSpanList_Insert(&stackpool[order], s);
+ runtime·MSpanList_Insert(&runtime·stackpool[order], s);
}
x->next = s->freelist;
s->freelist = x;
// Grab half of the allowed capacity (to prevent thrashing).
list = nil;
size = 0;
- runtime·lock(&stackpoolmu);
+ runtime·lock(&runtime·stackpoolmu);
while(size < StackCacheSize/2) {
x = poolalloc(order);
x->next = list;
list = x;
size += FixedStack << order;
}
- runtime·unlock(&stackpoolmu);
+ runtime·unlock(&runtime·stackpoolmu);
c->stackcache[order].list = list;
c->stackcache[order].size = size;
runtime·printf("stackcacherelease order=%d\n", order);
x = c->stackcache[order].list;
size = c->stackcache[order].size;
- runtime·lock(&stackpoolmu);
+ runtime·lock(&runtime·stackpoolmu);
while(size > StackCacheSize/2) {
y = x->next;
poolfree(x, order);
x = y;
size -= FixedStack << order;
}
- runtime·unlock(&stackpoolmu);
+ runtime·unlock(&runtime·stackpoolmu);
c->stackcache[order].list = x;
c->stackcache[order].size = size;
}
if(StackDebug >= 1)
runtime·printf("stackcache clear\n");
- runtime·lock(&stackpoolmu);
+ runtime·lock(&runtime·stackpoolmu);
for(order = 0; order < NumStackOrders; order++) {
x = c->stackcache[order].list;
while(x != nil) {
c->stackcache[order].list = nil;
c->stackcache[order].size = 0;
}
- runtime·unlock(&stackpoolmu);
+ runtime·unlock(&runtime·stackpoolmu);
}
Stack
// procresize. Just get a stack from the global pool.
// Also don't touch stackcache during gc
// as it's flushed concurrently.
- runtime·lock(&stackpoolmu);
+ runtime·lock(&runtime·stackpoolmu);
x = poolalloc(order);
- runtime·unlock(&stackpoolmu);
+ runtime·unlock(&runtime·stackpoolmu);
} else {
x = c->stackcache[order].list;
if(x == nil) {
x = (MLink*)v;
c = g->m->mcache;
if(c == nil || g->m->gcing || g->m->helpgc) {
- runtime·lock(&stackpoolmu);
+ runtime·lock(&runtime·stackpoolmu);
poolfree(x, order);
- runtime·unlock(&stackpoolmu);
+ runtime·unlock(&runtime·stackpoolmu);
} else {
if(c->stackcache[order].size >= StackCacheSize)
stackcacherelease(c, order);