enum {
Debug = 0,
- DebugMark = 0, // run second pass to check mark
CollectStats = 0,
ScanStackByFrames = 1,
IgnorePreciseGC = 0,
BitsPointer = 1,
BitsIface = 2,
BitsEface = 3,
+
+ RootData = 0,
+ RootBss = 1,
+ RootFinalizers = 2,
+ RootSpanTypes = 3,
+ RootFlushCaches = 4,
+ RootCount = 5,
};
static struct
static void putempty(Workbuf*);
static Workbuf* handoff(Workbuf*);
static void gchelperstart(void);
-static void scanstack(G* gp, void *scanbuf);
+static void addfinroots(void *wbufp, void *v);
+static void flushallmcaches(void);
+static void scanframe(Stkframe *frame, void *wbufp);
+static void addstackroots(G *gp, Workbuf **wbufp);
static struct {
uint64 full; // lock-free list of full blocks
int64 tstart;
volatile uint32 nwait;
volatile uint32 ndone;
- volatile uint32 debugmarkdone;
Note alldone;
ParFor *markfor;
ParFor *sweepfor;
Lock;
byte *chunk;
uintptr nchunk;
-
- Obj *roots;
- uint32 nroot;
- uint32 rootcap;
} work;
enum {
GC_DEFAULT_PTR = GC_NUM_INSTR,
GC_CHAN,
- GC_G_PTR,
GC_NUM_INSTR2
};
// Hchan program
static uintptr chanProg[2] = {0, GC_CHAN};
-// G* program
-static uintptr gptrProg[2] = {0, GC_G_PTR};
-
// Local variables of a program fragment or loop
typedef struct Frame Frame;
struct Frame {
// a work list in the Workbuf* structures and loops in the main function
// body. Keeping an explicit work list is easier on the stack allocator and
// more efficient.
-//
-// wbuf: current work buffer
-// wp: storage for next queued pointer (write pointer)
-// nobj: number of queued objects
static void
-scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
+scanblock(Workbuf *wbuf, bool keepworking)
{
byte *b, *arena_start, *arena_used;
- uintptr n, i, end_b, elemsize, size, ti, objti, count, type;
+ uintptr n, i, end_b, elemsize, size, ti, objti, count, type, nobj;
uintptr *pc, precise_type, nominal_size;
uintptr *chan_ret, chancap;
void *obj;
Iface *iface;
Hchan *chan;
ChanType *chantype;
+ Obj *wp;
if(sizeof(Workbuf) % PageSize != 0)
runtime·throw("scanblock: size of Workbuf is suboptimal");
precise_type = false;
nominal_size = 0;
+ if(wbuf) {
+ nobj = wbuf->nobj;
+ wp = &wbuf->obj[nobj];
+ } else {
+ nobj = 0;
+ wp = nil;
+ }
+
// Initialize sbuf
scanbuffers = &bufferList[m->helpgc];
pc = chan_ret;
continue;
- case GC_G_PTR:
- obj = (void*)stack_top.b;
- scanstack(obj, &sbuf);
- goto next_block;
-
default:
runtime·throw("scanblock: invalid GC instruction");
return;
}
}
-// debug_scanblock is the debug copy of scanblock.
-// it is simpler, slower, single-threaded, recursive,
-// and uses bitSpecial as the mark bit.
-static void
-debug_scanblock(byte *b, uintptr n)
-{
- byte *obj, *p;
- void **vp;
- uintptr size, *bitp, bits, shift, i, xbits, off;
- MSpan *s;
-
- if(!DebugMark)
- runtime·throw("debug_scanblock without DebugMark");
-
- if((intptr)n < 0) {
- runtime·printf("debug_scanblock %p %D\n", b, (int64)n);
- runtime·throw("debug_scanblock");
- }
-
- // Align b to a word boundary.
- off = (uintptr)b & (PtrSize-1);
- if(off != 0) {
- b += PtrSize - off;
- n -= PtrSize - off;
- }
-
- vp = (void**)b;
- n /= PtrSize;
- for(i=0; i<n; i++) {
- obj = (byte*)vp[i];
-
- // Words outside the arena cannot be pointers.
- if((byte*)obj < runtime·mheap.arena_start || (byte*)obj >= runtime·mheap.arena_used)
- continue;
-
- // Round down to word boundary.
- obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
-
- // Consult span table to find beginning.
- s = runtime·MHeap_LookupMaybe(&runtime·mheap, obj);
- if(s == nil)
- continue;
-
- p = (byte*)((uintptr)s->start<<PageShift);
- size = s->elemsize;
- if(s->sizeclass == 0) {
- obj = p;
- } else {
- int32 i = ((byte*)obj - p)/size;
- obj = p+i*size;
- }
-
- // Now that we know the object header, reload bits.
- off = (uintptr*)obj - (uintptr*)runtime·mheap.arena_start;
- bitp = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1;
- shift = off % wordsPerBitmapWord;
- xbits = *bitp;
- bits = xbits >> shift;
-
- // Now we have bits, bitp, and shift correct for
- // obj pointing at the base of the object.
- // If not allocated or already marked, done.
- if((bits & bitAllocated) == 0 || (bits & bitSpecial) != 0) // NOTE: bitSpecial not bitMarked
- continue;
- *bitp |= bitSpecial<<shift;
- if(!(bits & bitMarked))
- runtime·printf("found unmarked block %p in %p\n", obj, vp+i);
-
- // If object has no pointers, don't need to scan further.
- if((bits & bitScan) == 0)
- continue;
-
- debug_scanblock(obj, size);
- }
-}
-
// Append obj to the work buffer.
// _wbuf, _wp, _nobj are input/output parameters and are specifying the work buffer.
static void
*_nobj = nobj;
}
+static void
+enqueue1(Workbuf **wbufp, Obj obj)
+{
+ Workbuf *wbuf;
+
+ wbuf = *wbufp;
+ if(wbuf->nobj >= nelem(wbuf->obj))
+ *wbufp = wbuf = getempty(wbuf);
+ wbuf->obj[wbuf->nobj++] = obj;
+}
+
static void
markroot(ParFor *desc, uint32 i)
{
- Obj *wp;
Workbuf *wbuf;
- uintptr nobj;
+ FinBlock *fb;
+ MSpan **allspans, *s;
+ uint32 spanidx;
+ G *gp;
USED(&desc);
- wp = nil;
- wbuf = nil;
- nobj = 0;
- enqueue(work.roots[i], &wbuf, &wp, &nobj);
- scanblock(wbuf, wp, nobj, false);
+ wbuf = getempty(nil);
+ switch(i) {
+ case RootData:
+ enqueue1(&wbuf, (Obj){data, edata - data, (uintptr)gcdata});
+ break;
+
+ case RootBss:
+ enqueue1(&wbuf, (Obj){bss, ebss - bss, (uintptr)gcbss});
+ break;
+
+ case RootFinalizers:
+ for(fb=allfin; fb; fb=fb->alllink)
+ enqueue1(&wbuf, (Obj){(byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]), 0});
+ break;
+
+ case RootSpanTypes:
+ // mark span types and MSpan.specials (to walk spans only once)
+ allspans = runtime·mheap.allspans;
+ for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) {
+ Special *sp;
+ SpecialFinalizer *spf;
+
+ s = allspans[spanidx];
+ if(s->state != MSpanInUse)
+ continue;
+ // The garbage collector ignores type pointers stored in MSpan.types:
+ // - Compiler-generated types are stored outside of heap.
+ // - The reflect package has runtime-generated types cached in its data structures.
+ // The garbage collector relies on finding the references via that cache.
+ if(s->types.compression == MTypes_Words || s->types.compression == MTypes_Bytes)
+ markonly((byte*)s->types.data);
+ for(sp = s->specials; sp != nil; sp = sp->next) {
+ if(sp->kind != KindSpecialFinalizer)
+ continue;
+ // don't mark finalized object, but scan it so we
+ // retain everything it points to.
+ spf = (SpecialFinalizer*)sp;
+ enqueue1(&wbuf, (Obj){(void*)((s->start << PageShift) + spf->offset), s->elemsize, 0});
+ enqueue1(&wbuf, (Obj){(void*)&spf->fn, PtrSize, 0});
+ enqueue1(&wbuf, (Obj){(void*)&spf->fint, PtrSize, 0});
+ enqueue1(&wbuf, (Obj){(void*)&spf->ot, PtrSize, 0});
+ }
+ }
+ break;
+
+ case RootFlushCaches:
+ flushallmcaches();
+ break;
+
+ default:
+ // the rest is scanning goroutine stacks
+ if(i - RootCount >= runtime·allglen)
+ runtime·throw("markroot: bad index");
+ gp = runtime·allg[i - RootCount];
+ // remember when we've first observed the G blocked
+ // needed only to output in traceback
+ if((gp->status == Gwaiting || gp->status == Gsyscall) && gp->waitsince == 0)
+ gp->waitsince = work.tstart;
+ addstackroots(gp, &wbuf);
+ break;
+
+ }
+
+ if(wbuf)
+ scanblock(wbuf, false);
}
// Get an empty work buffer off the work.empty list,
return b1;
}
-static void
-addroot(Obj obj)
-{
- uint32 cap;
- Obj *new;
-
- if(work.nroot >= work.rootcap) {
- cap = PageSize/sizeof(Obj);
- if(cap < 2*work.rootcap)
- cap = 2*work.rootcap;
- new = (Obj*)runtime·SysAlloc(cap*sizeof(Obj), &mstats.gc_sys);
- if(new == nil)
- runtime·throw("runtime: cannot allocate memory");
- if(work.roots != nil) {
- runtime·memmove(new, work.roots, work.rootcap*sizeof(Obj));
- runtime·SysFree(work.roots, work.rootcap*sizeof(Obj), &mstats.gc_sys);
- }
- work.roots = new;
- work.rootcap = cap;
- }
- work.roots[work.nroot] = obj;
- work.nroot++;
-}
-
extern byte pclntab[]; // base for f->ptrsoff
typedef struct BitVector BitVector;
// Scans an interface data value when the interface type indicates
// that it is a pointer.
static void
-scaninterfacedata(uintptr bits, byte *scanp, bool afterprologue, Scanbuf *sbuf)
+scaninterfacedata(uintptr bits, byte *scanp, bool afterprologue, void *wbufp)
{
Itab *tab;
Type *type;
return;
}
}
- *sbuf->obj.pos++ = (Obj){scanp+PtrSize, PtrSize, 0};
- if(sbuf->obj.pos == sbuf->obj.end)
- flushobjbuf(sbuf);
+ enqueue1(wbufp, (Obj){scanp+PtrSize, PtrSize, 0});
}
// Starting from scanp, scans words corresponding to set bits.
static void
-scanbitvector(byte *scanp, BitVector *bv, bool afterprologue, Scanbuf *sbuf)
+scanbitvector(byte *scanp, BitVector *bv, bool afterprologue, void *wbufp)
{
uintptr word, bits;
uint32 *wordp;
for(; i > 0; i--) {
bits = word & 3;
if(bits != BitsNoPointer && *(void**)scanp != nil)
- if(bits == BitsPointer) {
- *sbuf->obj.pos++ = (Obj){scanp, PtrSize, 0};
- if(sbuf->obj.pos == sbuf->obj.end)
- flushobjbuf(sbuf);
- } else
- scaninterfacedata(bits, scanp, afterprologue, sbuf);
+ if(bits == BitsPointer)
+ enqueue1(wbufp, (Obj){scanp, PtrSize, 0});
+ else
+ scaninterfacedata(bits, scanp, afterprologue, wbufp);
word >>= BitsPerPointer;
scanp += PtrSize;
}
// Scan a stack frame: local variables and function arguments/results.
static void
-scanframe(Stkframe *frame, void *arg)
+scanframe(Stkframe *frame, void *wbufp)
{
Func *f;
- Scanbuf *sbuf;
StackMap *stackmap;
BitVector *bv;
uintptr size;
pcdata = 0;
}
- sbuf = arg;
// Scan local variables if stack frame has been allocated.
// Use pointer information if known.
afterprologue = (frame->varp > (byte*)frame->sp);
if(stackmap == nil) {
// No locals information, scan everything.
size = frame->varp - (byte*)frame->sp;
- *sbuf->obj.pos++ = (Obj){frame->varp - size, size, 0};
- if(sbuf->obj.pos == sbuf->obj.end)
- flushobjbuf(sbuf);
+ enqueue1(wbufp, (Obj){frame->varp - size, size, 0});
} else if(stackmap->n < 0) {
// Locals size information, scan just the locals.
size = -stackmap->n;
- *sbuf->obj.pos++ = (Obj){frame->varp - size, size, 0};
- if(sbuf->obj.pos == sbuf->obj.end)
- flushobjbuf(sbuf); } else if(stackmap->n > 0) {
+ enqueue1(wbufp, (Obj){frame->varp - size, size, 0});
+ } else if(stackmap->n > 0) {
// Locals bitmap information, scan just the pointers in
// locals.
if(pcdata < 0 || pcdata >= stackmap->n) {
// don't know where we are
- runtime·printf("pcdata is %d and %d stack map entries\n", pcdata, stackmap->n);
- runtime·throw("addframeroots: bad symbol table");
+ runtime·printf("pcdata is %d and %d stack map entries for %s (targetpc=%p)\n",
+ pcdata, stackmap->n, runtime·funcname(f), targetpc);
+ runtime·throw("scanframe: bad symbol table");
}
bv = stackmapdata(stackmap, pcdata);
size = (bv->n * PtrSize) / BitsPerPointer;
- scanbitvector(frame->varp - size, bv, afterprologue, sbuf);
+ scanbitvector(frame->varp - size, bv, afterprologue, wbufp);
}
}
stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps);
if(stackmap != nil) {
bv = stackmapdata(stackmap, pcdata);
- scanbitvector(frame->argp, bv, false, sbuf);
- } else {
- *sbuf->obj.pos++ = (Obj){frame->argp, frame->arglen, 0};
- if(sbuf->obj.pos == sbuf->obj.end)
- flushobjbuf(sbuf);
- }
-}
-
-static void
-scanstack(G* gp, void *scanbuf)
-{
- runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, scanframe, scanbuf, false);
+ scanbitvector(frame->argp, bv, false, wbufp);
+ } else
+ enqueue1(wbufp, (Obj){frame->argp, frame->arglen, 0});
}
static void
-addstackroots(G *gp)
+addstackroots(G *gp, Workbuf **wbufp)
{
M *mp;
int32 n;
void *base;
uintptr size;
+ switch(gp->status){
+ default:
+ runtime·printf("unexpected G.status %d (goroutine %p %D)\n", gp->status, gp, gp->goid);
+ runtime·throw("mark - bad status");
+ case Gdead:
+ return;
+ case Grunning:
+ runtime·throw("mark - world not stopped");
+ case Grunnable:
+ case Gsyscall:
+ case Gwaiting:
+ break;
+ }
+
if(gp == g)
runtime·throw("can't scan our own stack");
if((mp = gp->m) != nil && mp->helpgc)
guard = gp->stackguard;
// For function about to start, context argument is a root too.
if(gp->sched.ctxt != 0 && runtime·mlookup(gp->sched.ctxt, &base, &size, nil))
- addroot((Obj){base, size, 0});
+ enqueue1(wbufp, (Obj){base, size, 0});
}
if(ScanStackByFrames) {
USED(sp);
USED(stk);
USED(guard);
- addroot((Obj){(byte*)gp, PtrSize, (uintptr)gptrProg});
+ runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, scanframe, wbufp, false);
} else {
n = 0;
while(stk) {
runtime·printf("scanstack inconsistent: g%D#%d sp=%p not in [%p,%p]\n", gp->goid, n, sp, guard-StackGuard, stk);
runtime·throw("scanstack");
}
- addroot((Obj){(byte*)sp, (uintptr)stk - sp, (uintptr)defaultProg | PRECISE | LOOP});
+ enqueue1(wbufp, (Obj){(byte*)sp, (uintptr)stk - sp, (uintptr)defaultProg | PRECISE | LOOP});
sp = stk->gobuf.sp;
guard = stk->stackguard;
stk = (Stktop*)stk->stackbase;
}
}
-static void
-addroots(void)
-{
- G *gp;
- FinBlock *fb;
- MSpan *s, **allspans;
- uint32 spanidx;
- Special *sp;
- SpecialFinalizer *spf;
-
- work.nroot = 0;
-
- // data & bss
- // TODO(atom): load balancing
- addroot((Obj){data, edata - data, (uintptr)gcdata});
- addroot((Obj){bss, ebss - bss, (uintptr)gcbss});
-
- // MSpan.types
- allspans = runtime·mheap.allspans;
- for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) {
- s = allspans[spanidx];
- if(s->state == MSpanInUse) {
- // The garbage collector ignores type pointers stored in MSpan.types:
- // - Compiler-generated types are stored outside of heap.
- // - The reflect package has runtime-generated types cached in its data structures.
- // The garbage collector relies on finding the references via that cache.
- switch(s->types.compression) {
- case MTypes_Empty:
- case MTypes_Single:
- break;
- case MTypes_Words:
- case MTypes_Bytes:
- markonly((byte*)s->types.data);
- break;
- }
- }
- }
-
- // MSpan.specials
- allspans = runtime·mheap.allspans;
- for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) {
- s = allspans[spanidx];
- if(s->state != MSpanInUse)
- continue;
- for(sp = s->specials; sp != nil; sp = sp->next) {
- switch(sp->kind) {
- case KindSpecialFinalizer:
- spf = (SpecialFinalizer*)sp;
- // don't mark finalized object, but scan it so we
- // retain everything it points to.
- addroot((Obj){(void*)((s->start << PageShift) + spf->offset), s->elemsize, 0});
- addroot((Obj){(void*)&spf->fn, PtrSize, 0});
- addroot((Obj){(void*)&spf->fint, PtrSize, 0});
- addroot((Obj){(void*)&spf->ot, PtrSize, 0});
- break;
- case KindSpecialProfile:
- break;
- }
- }
- }
-
- // stacks
- for(gp=runtime·allg; gp!=nil; gp=gp->alllink) {
- switch(gp->status){
- default:
- runtime·printf("unexpected G.status %d\n", gp->status);
- runtime·throw("mark - bad status");
- case Gdead:
- break;
- case Grunning:
- runtime·throw("mark - world not stopped");
- case Grunnable:
- case Gsyscall:
- case Gwaiting:
- addstackroots(gp);
- break;
- }
-
- // remember when we've first observed the G blocked
- // needed only to output in traceback
- if((gp->status == Gwaiting || gp->status == Gsyscall) && gp->waitsince == 0)
- gp->waitsince = work.tstart;
- }
-
- for(fb=allfin; fb; fb=fb->alllink)
- addroot((Obj){(byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]), 0});
-}
-
-static void
-addfreelists(void)
-{
- int32 i;
- P *p, **pp;
- MCache *c;
- MLink *m;
-
- // Mark objects in the MCache of each P so we don't collect them.
- for(pp=runtime·allp; p=*pp; pp++) {
- c = p->mcache;
- if(c==nil)
- continue;
- for(i = 0; i < NumSizeClasses; i++) {
- for(m = c->list[i].list; m != nil; m = m->next) {
- markonly(m);
- }
- }
- }
- // Note: the sweeper will mark objects in each span's freelist.
-}
-
void
runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, PtrType *ot)
{
continue;
if((bits & bitMarked) != 0) {
- if(DebugMark) {
- if(!(bits & bitSpecial))
- runtime·printf("found spurious mark on %p\n", p);
- *bitp &= ~(bitSpecial<<shift);
- }
*bitp &= ~(bitMarked<<shift);
continue;
}
runtime·parfordo(work.markfor);
// help other threads scan secondary blocks
- scanblock(nil, nil, 0, true);
-
- if(DebugMark) {
- // wait while the main thread executes mark(debug_scanblock)
- while(runtime·atomicload(&work.debugmarkdone) == 0)
- runtime·usleep(10);
- }
+ scanblock(nil, true);
runtime·parfordo(work.sweepfor);
bufferList[m->helpgc].busy = 0;
}
}
+static void
+flushallmcaches(void)
+{
+ P *p, **pp;
+ MCache *c;
+
+ // Flush MCache's to MCentral.
+ for(pp=runtime·allp; p=*pp; pp++) {
+ c = p->mcache;
+ if(c==nil)
+ continue;
+ runtime·MCache_ReleaseAll(c);
+ }
+}
+
static void
updatememstats(GCStats *stats)
{
M *mp;
MSpan *s;
- MCache *c;
- P *p, **pp;
int32 i;
uint64 stacks_inuse, smallfree;
uint64 *src, *dst;
}
// Flush MCache's to MCentral.
- for(pp=runtime·allp; p=*pp; pp++) {
- c = p->mcache;
- if(c==nil)
- continue;
- runtime·MCache_ReleaseAll(c);
- }
+ flushallmcaches();
// Aggregate local stats.
cachestats();
work.nwait = 0;
work.ndone = 0;
- work.debugmarkdone = 0;
work.nproc = runtime·gcprocs();
- addroots();
- addfreelists();
- runtime·parforsetup(work.markfor, work.nproc, work.nroot, nil, false, markroot);
+ runtime·parforsetup(work.markfor, work.nproc, RootCount + runtime·allglen, nil, false, markroot);
runtime·parforsetup(work.sweepfor, work.nproc, runtime·mheap.nspan, nil, true, sweepspan);
if(work.nproc > 1) {
runtime·noteclear(&work.alldone);
gchelperstart();
runtime·parfordo(work.markfor);
- scanblock(nil, nil, 0, true);
+ scanblock(nil, true);
- if(DebugMark) {
- for(i=0; i<work.nroot; i++)
- debug_scanblock(work.roots[i].p, work.roots[i].n);
- runtime·atomicstore(&work.debugmarkdone, 1);
- }
t2 = runtime·nanotime();
runtime·parfordo(work.sweepfor);
uint32 runtime·needextram;
bool runtime·iscgo;
M runtime·m0;
-G runtime·g0; // idle goroutine for m0
-G* runtime·allg;
+G runtime·g0; // idle goroutine for m0
G* runtime·lastg;
M* runtime·allm;
M* runtime·extram;
int32 runtime·ncpu;
static int32 newprocs;
+static Lock allglock; // the following vars are protected by this lock or by stoptheworld
+G** runtime·allg;
+uintptr runtime·allglen;
+static uintptr allgcap;
+
void runtime·mstart(void);
static void runqput(P*, G*);
static G* runqget(P*);
static bool preemptone(P*);
static bool exitsyscallfast(void);
static bool haveexperiment(int8*);
+static void allgadd(G*);
// The bootstrap sequence is:
//
{
G *gp;
int32 traceback;
+ uintptr i;
traceback = runtime·gotraceback(nil);
runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp);
}
- for(gp = runtime·allg; gp != nil; gp = gp->alllink) {
+ runtime·lock(&allglock);
+ for(i = 0; i < runtime·allglen; i++) {
+ gp = runtime·allg[i];
if(gp == me || gp == m->curg || gp->status == Gdead)
continue;
if(gp->issystem && traceback < 2)
} else
runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp);
}
+ runtime·unlock(&allglock);
}
static void
if(raceenabled)
gp->racectx = runtime·racegostart(runtime·newextram);
// put on allg for garbage collector
- runtime·lock(&runtime·sched);
- if(runtime·lastg == nil)
- runtime·allg = gp;
- else
- runtime·lastg->alllink = gp;
- runtime·lastg = gp;
- runtime·unlock(&runtime·sched);
+ allgadd(gp);
// Add m to the extra list.
mnext = lockextra(true);
runtime·throw("invalid stack in newg");
} else {
newg = runtime·malg(StackMin);
- runtime·lock(&runtime·sched);
- if(runtime·lastg == nil)
- runtime·allg = newg;
- else
- runtime·lastg->alllink = newg;
- runtime·lastg = newg;
- runtime·unlock(&runtime·sched);
+ allgadd(newg);
}
sp = (byte*)newg->stackbase;
return newg;
}
+static void
+allgadd(G *gp)
+{
+ G **new;
+ uintptr cap;
+
+ runtime·lock(&allglock);
+ if(runtime·allglen >= allgcap) {
+ cap = 4096/sizeof(new[0]);
+ if(cap < 2*allgcap)
+ cap = 2*allgcap;
+ new = runtime·malloc(cap*sizeof(new[0]));
+ if(new == nil)
+ runtime·throw("runtime: cannot allocate memory");
+ if(runtime·allg != nil) {
+ runtime·memmove(new, runtime·allg, runtime·allglen*sizeof(new[0]));
+ runtime·free(runtime·allg);
+ }
+ runtime·allg = new;
+ allgcap = cap;
+ }
+ runtime·allg[runtime·allglen++] = gp;
+ runtime·unlock(&allglock);
+}
+
// Put on gfree list.
// If local list is too long, transfer a batch to the global list.
static void
{
G *gp;
int32 n, s;
+ uintptr i;
n = 0;
- runtime·lock(&runtime·sched);
+ runtime·lock(&allglock);
// TODO(dvyukov): runtime.NumGoroutine() is O(N).
// We do not want to increment/decrement centralized counter in newproc/goexit,
// just to make runtime.NumGoroutine() faster.
// Compromise solution is to introduce per-P counters of active goroutines.
- for(gp = runtime·allg; gp; gp = gp->alllink) {
+ for(i = 0; i < runtime·allglen; i++) {
+ gp = runtime·allg[i];
s = gp->status;
if(s == Grunnable || s == Grunning || s == Gsyscall || s == Gwaiting)
n++;
}
- runtime·unlock(&runtime·sched);
+ runtime·unlock(&allglock);
return n;
}
{
G *gp;
int32 run, grunning, s;
+ uintptr i;
// -1 for sysmon
run = runtime·sched.mcount - runtime·sched.nmidle - runtime·sched.nmidlelocked - 1;
runtime·throw("checkdead: inconsistent counts");
}
grunning = 0;
- for(gp = runtime·allg; gp; gp = gp->alllink) {
+ runtime·lock(&allglock);
+ for(i = 0; i < runtime·allglen; i++) {
+ gp = runtime·allg[i];
if(gp->isbackground)
continue;
s = gp->status;
if(s == Gwaiting)
grunning++;
else if(s == Grunnable || s == Grunning || s == Gsyscall) {
+ runtime·unlock(&allglock);
runtime·printf("checkdead: find g %D in status %d\n", gp->goid, s);
runtime·throw("checkdead: runnable g");
}
}
+ runtime·unlock(&allglock);
if(grunning == 0) // possible if main goroutine calls runtime·Goexit()
runtime·exit(0);
m->throwing = -1; // do not dump full stacks
int64 now;
int64 id1, id2, id3;
int32 i, t, h;
+ uintptr gi;
int8 *fmt;
M *mp, *lockedm;
G *gp, *lockedg;
mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->dying, mp->helpgc,
mp->spinning, id3);
}
- for(gp = runtime·allg; gp; gp = gp->alllink) {
+ runtime·lock(&allglock);
+ for(gi = 0; gi < runtime·allglen; gi++) {
+ gp = runtime·allg[gi];
mp = gp->m;
lockedm = gp->lockedm;
runtime·printf(" G%D: status=%d(%s) m=%d lockedm=%d\n",
gp->goid, gp->status, gp->waitreason, mp ? mp->id : -1,
lockedm ? lockedm->id : -1);
}
+ runtime·unlock(&allglock);
runtime·unlock(&runtime·sched);
}