From: Russ Cox Date: Fri, 3 Oct 2014 16:22:19 +0000 (-0400) Subject: [dev.garbage] merge default into dev.garbage X-Git-Tag: go1.5beta1~2684^2~36 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=904ec0098137f742e0dd96da3bc033d6a0b615d1;p=gostls13.git [dev.garbage] merge default into dev.garbage --- 904ec0098137f742e0dd96da3bc033d6a0b615d1 diff --cc src/runtime/mgc0.c index b4cd3474d7,9b9bc0ef13..39fae9bbe4 --- a/src/runtime/mgc0.c +++ b/src/runtime/mgc0.c @@@ -160,174 -155,41 +161,174 @@@ struct WorkData // Copy of mheap.allspans for marker or sweeper. MSpan** spans; uint32 nspan; - } work; + }; + WorkData runtime·work; -// scanblock scans a block of n bytes starting at pointer b for references -// to other objects, scanning any it finds recursively until there are no -// unscanned objects left. Instead of using an explicit recursion, it keeps -// a work list in the Workbuf* structures and loops in the main function -// body. Keeping an explicit work list is easier on the stack allocator and -// more efficient. +// Is address b in the known heap. If it doesn't have a valid gcmap +// returns false. For example pointers into stacks will return false. +static bool +inheap(byte *b) +{ + MSpan *s; + pageID k; + uintptr x; + + if(b == nil || b < runtime·mheap.arena_start || b >= runtime·mheap.arena_used) + return false; + // Not a beginning of a block, consult span table to find the block beginning. + k = (uintptr)b>>PageShift; + x = k; + x -= (uintptr)runtime·mheap.arena_start>>PageShift; + s = runtime·mheap.spans[x]; + if(s == nil || k < s->start || b >= s->limit || s->state != MSpanInUse) + return false; + return true; +} + +// Given an address in the heap return the relevant byte from the gcmap. This routine +// can be used on addresses to the start of an object or to the interior of the an object. static void -scanblock(byte *b, uintptr n, byte *ptrmask) +slottombits(byte *obj, Markbits *mbits) { - byte *obj, *p, *arena_start, *arena_used, **wp, *scanbuf[8], *ptrbitp, *bitp, bits, xbits, shift, cached; - uintptr i, nobj, size, idx, x, off, scanbufpos; - intptr ncached; - Workbuf *wbuf; - Iface *iface; - Eface *eface; - Type *typ; + uintptr off; + + off = (uintptr*)((uintptr)obj&~(PtrSize-1)) - (uintptr*)runtime·mheap.arena_start; + mbits->bitp = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1; + mbits->shift = (off % wordsPerBitmapByte) * gcBits; + mbits->xbits = *mbits->bitp; + mbits->bits = (mbits->xbits >> mbits->shift) & bitMask; +} + +// b is a pointer into the heap. +// Find the start of the object refered to by b. +// Set mbits to the associated bits from the bit map. +static byte* +objectstart(byte *b, Markbits *mbits) +{ + byte *obj, *p; MSpan *s; pageID k; - bool keepworking; + uintptr x, size, idx; - // Cache memory arena parameters in local vars. - arena_start = runtime·mheap.arena_start; - arena_used = runtime·mheap.arena_used; + obj = (byte*)((uintptr)b&~(PtrSize-1)); + for(;;) { + slottombits(obj, mbits); + if(mbits->bits&bitBoundary == bitBoundary) + break; + + // Not a beginning of a block, consult span table to find the block beginning. + k = (uintptr)obj>>PageShift; + x = k; + x -= (uintptr)runtime·mheap.arena_start>>PageShift; + s = runtime·mheap.spans[x]; + if(s == nil || k < s->start || obj >= s->limit || s->state != MSpanInUse){ + if(s->state == MSpanStack) + break; // This is legit. + + // The following is catching some bugs left over from + // us not being rigerous about what data structures are + // hold valid pointers and different parts of the system + // considering different structures as roots. For example + // if there is a pointer into a stack that is left in + // a global data structure but that part of the runtime knows that + // those structures will be reinitialized before they are + // reused. Unfortunately the GC believes these roots are valid. + // Typically a stack gets moved and only the structures that part of + // the system knows are alive are updated. The span is freed + // after the stack copy and the pointer is still alive. This + // check is catching that bug but for now we will not throw, + // instead we will simply break out of this routine and depend + // on the caller to recognize that this pointer is not a valid + // heap pointer. I leave the code that catches the bug so that once + // resolved we can turn this check back on and throw. + + //runtime·printf("Runtime: Span weird: obj=%p, k=%p", obj, k); + //if (s == nil) + // runtime·printf(" s=nil\n"); + //else + // runtime·printf(" s->start=%p s->limit=%p, s->state=%d\n", s->start*PageSize, s->limit, s->state); + //runtime·throw("Blowup on weird span"); + break; // We are not in a real block throw?? + } + p = (byte*)((uintptr)s->start<sizeclass != 0) { + size = s->elemsize; + idx = ((byte*)obj - p)/size; + p = p+idx*size; + } + if(p == obj) { + runtime·printf("runtime: failed to find block beginning for %p s=%p s->limit=%p\n", + p, s->start*PageSize, s->limit); + runtime·throw("failed to find block beginning"); + } + obj = p; + } + // if size(obj.firstfield) < PtrSize, the &obj.secondfield could map to the boundary bit + // Clear any low bits to get to the start of the object. + // greyobject depends on this. + return obj; +} - wbuf = getempty(nil); - nobj = wbuf->nobj; - wp = &wbuf->obj[nobj]; - keepworking = b == nil; - scanbufpos = 0; - for(i = 0; i < nelem(scanbuf); i++) - scanbuf[i] = nil; +// obj is the start of an object with mark mbits. +// If it isn't already marked, mark it and enqueue into workbuf. +// Return possibly new workbuf to use. +static Workbuf* +greyobject(byte *obj, Markbits *mbits, Workbuf *wbuf) +{ + // obj should be start of allocation, and so must be at least pointer-aligned. + if(((uintptr)obj & (PtrSize-1)) != 0) + runtime·throw("greyobject: obj not pointer-aligned"); + + // If marked we have nothing to do. + if((mbits->bits&bitMarked) != 0) + return wbuf; + + // Each byte of GC bitmap holds info for two words. + // If the current object is larger than two words, or if the object is one word + // but the object it shares the byte with is already marked, + // then all the possible concurrent updates are trying to set the same bit, + // so we can use a non-atomic update. - if((mbits->xbits&(bitMask|(bitMask<xbits&(bitMask|(bitMask<bitp = mbits->xbits | (bitMarked<shift); + else + runtime·atomicor8(mbits->bitp, bitMarked<shift); + + if(((mbits->xbits>>(mbits->shift+2))&BitsMask) == BitsDead) + return wbuf; // noscan object + + // Queue the obj for scanning. The PREFETCH(obj) logic has been removed but + // seems like a nice optimization that can be added back in. + // There needs to be time between the PREFETCH and the use. + // Previously we put the obj in an 8 element buffer that is drained at a rate + // to give the PREFETCH time to do its work. + // Use of PREFETCHNTA might be more appropriate than PREFETCH + + // If workbuf is full, obtain an empty one. + if(wbuf->nobj >= nelem(wbuf->obj)) { + wbuf = getempty(wbuf); + } + + wbuf->obj[wbuf->nobj] = obj; + wbuf->nobj++; + return wbuf; +} +// Scan the object b of size n, adding pointers to wbuf. +// Return possibly new wbuf to use. +// If ptrmask != nil, it specifies where pointers are in b. +// If ptrmask == nil, the GC bitmap should be consulted. +// In this case, n may be an overestimate of the size; the GC bitmap +// must also be used to make sure the scan stops at the end of b. +static Workbuf* +scanobject(byte *b, uintptr n, byte *ptrmask, Workbuf *wbuf) +{ + byte *obj, *arena_start, *arena_used, *ptrbitp, bits, cshift, cached; + uintptr i; + intptr ncached; + Markbits mbits; + + arena_start = (byte*)runtime·mheap.arena_start; + arena_used = runtime·mheap.arena_used; ptrbitp = nil; cached = 0; ncached = 0; @@@ -424,17 -225,191 +425,17 @@@ scanblock(byte *b, uintptr n, byte *ptr } // If another proc wants a pointer, give it some. - if(work.nwait > 0 && wbuf->nobj > 4 && work.full == 0) { - if(runtime·work.nwait > 0 && nobj > 4 && runtime·work.full == 0) { - wbuf->nobj = nobj; ++ if(runtime·work.nwait > 0 && wbuf->nobj > 4 && runtime·work.full == 0) { wbuf = handoff(wbuf); - nobj = wbuf->nobj; - wp = &wbuf->obj[nobj]; - } - - wp--; - nobj--; - b = *wp; - n = arena_used - b; // scan until next bitBoundary or BitsDead - ptrmask = nil; // use GC bitmap for pointer info - - scanobj: - // Find bits of the beginning of the object. - if(ptrmask == nil) { - off = (uintptr*)b - (uintptr*)arena_start; - ptrbitp = arena_start - off/wordsPerBitmapByte - 1; - shift = (off % wordsPerBitmapByte) * gcBits; - cached = *ptrbitp >> shift; - cached &= ~bitBoundary; - ncached = (8 - shift)/gcBits; - } - for(i = 0; i < n; i += PtrSize) { - obj = nil; - // Find bits for this word. - if(ptrmask == nil) { - // Check is we have reached end of span. - if((((uintptr)b+i)%PageSize) == 0 && - runtime·mheap.spans[(b-arena_start)>>PageShift] != runtime·mheap.spans[(b+i-arena_start)>>PageShift]) - break; - // Consult GC bitmap. - if(ncached <= 0) { - // Refill cache. - cached = *--ptrbitp; - ncached = 2; - } - bits = cached; - cached >>= gcBits; - ncached--; - if((bits&bitBoundary) != 0) - break; // reached beginning of the next object - bits = (bits>>2)&BitsMask; - if(bits == BitsDead) - break; // reached no-scan part of the object - } else // dense mask (stack or data) - bits = (ptrmask[(i/PtrSize)/4]>>(((i/PtrSize)%4)*BitsPerPointer))&BitsMask; - - if(bits == BitsScalar || bits == BitsDead) - continue; - if(bits == BitsPointer) { - obj = *(byte**)(b+i); - goto markobj; - } - - // With those three out of the way, must be multi-word. - if(bits != BitsMultiWord) - runtime·throw("unexpected garbage collection bits"); - // Find the next pair of bits. - if(ptrmask == nil) { - if(ncached <= 0) { - // Refill cache. - cached = *--ptrbitp; - ncached = 2; - } - bits = (cached>>2)&BitsMask; - } else - bits = (ptrmask[((i+PtrSize)/PtrSize)/4]>>((((i+PtrSize)/PtrSize)%4)*BitsPerPointer))&BitsMask; - - switch(bits) { - default: - runtime·throw("unexpected garbage collection bits"); - case BitsIface: - iface = (Iface*)(b+i); - if(iface->tab != nil) { - typ = iface->tab->type; - if(!(typ->kind&KindDirectIface) || !(typ->kind&KindNoPointers)) - obj = iface->data; - } - break; - case BitsEface: - eface = (Eface*)(b+i); - typ = eface->type; - if(typ != nil) { - if(!(typ->kind&KindDirectIface) || !(typ->kind&KindNoPointers)) - obj = eface->data; - } - break; - } - - i += PtrSize; - cached >>= gcBits; - ncached--; - - markobj: - // At this point we have extracted the next potential pointer. - // Check if it points into heap. - if(obj == nil || obj < arena_start || obj >= arena_used) - continue; - // Mark the object. - off = (uintptr*)obj - (uintptr*)arena_start; - bitp = arena_start - off/wordsPerBitmapByte - 1; - shift = (off % wordsPerBitmapByte) * gcBits; - xbits = *bitp; - bits = (xbits >> shift) & bitMask; - if((bits&bitBoundary) == 0) { - // Not a beginning of a block, consult span table to find the block beginning. - k = (uintptr)obj>>PageShift; - x = k; - x -= (uintptr)arena_start>>PageShift; - s = runtime·mheap.spans[x]; - if(s == nil || k < s->start || obj >= s->limit || s->state != MSpanInUse) - continue; - p = (byte*)((uintptr)s->start<sizeclass != 0) { - size = s->elemsize; - idx = ((byte*)obj - p)/size; - p = p+idx*size; - } - if(p == obj) { - runtime·printf("runtime: failed to find block beginning for %p s=%p s->limit=%p\n", - p, s->start*PageSize, s->limit); - runtime·throw("failed to find block beginning"); - } - obj = p; - goto markobj; - } - - // Now we have bits, bitp, and shift correct for - // obj pointing at the base of the object. - // Only care about not marked objects. - if((bits&bitMarked) != 0) - continue; - // If obj size is greater than 8, then each byte of GC bitmap - // contains info for at most one object. In such case we use - // non-atomic byte store to mark the object. This can lead - // to double enqueue of the object for scanning, but scanning - // is an idempotent operation, so it is OK. This cannot lead - // to bitmap corruption because the single marked bit is the - // only thing that can change in the byte. - // For 8-byte objects we use non-atomic store, if the other - // quadruple is already marked. Otherwise we resort to CAS - // loop for marking. - if((xbits&(bitMask|(bitMask<>(shift+2))&BitsMask) == BitsDead) - continue; // noscan object - - // Queue the obj for scanning. - PREFETCH(obj); - obj = (byte*)((uintptr)obj & ~(PtrSize-1)); - p = scanbuf[scanbufpos]; - scanbuf[scanbufpos++] = obj; - if(scanbufpos == nelem(scanbuf)) - scanbufpos = 0; - if(p == nil) - continue; - - // If workbuf is full, obtain an empty one. - if(nobj >= nelem(wbuf->obj)) { - wbuf->nobj = nobj; - wbuf = getempty(wbuf); - nobj = wbuf->nobj; - wp = &wbuf->obj[nobj]; - } - *wp = p; - wp++; - nobj++; } - if(Debug && ptrmask == nil) { - // For heap objects ensure that we did not overscan. - n = 0; - p = nil; - if(!runtime·mlookup(b, &p, &n, nil) || b != p || i > n) { - runtime·printf("runtime: scanned (%p,%p), heap object (%p,%p)\n", b, i, p, n); - runtime·throw("scanblock: scanned invalid object"); - } - } + // This might be a good place to add prefetch code... + // if(wbuf->nobj > 4) { + // PREFETCH(wbuf->obj[wbuf->nobj - 3]; + // } + --wbuf->nobj; + b = wbuf->obj[wbuf->nobj]; + wbuf = scanobject(b, runtime·mheap.arena_used - b, nil, wbuf); } } @@@ -505,9 -479,9 +506,9 @@@ markroot(ParFor *desc, uint32 i gp = runtime·allg[i - RootCount]; // remember when we've first observed the G blocked // needed only to output in traceback - status = runtime·readgstatus(gp); + status = runtime·readgstatus(gp); // We are not in a scan state if((status == Gwaiting || status == Gsyscall) && gp->waitsince == 0) - gp->waitsince = work.tstart; + gp->waitsince = runtime·work.tstart; // Shrink a stack if not much of it is being used. runtime·shrinkstack(gp); if(runtime·readgstatus(gp) == Gdead) @@@ -562,13 -512,9 +563,13 @@@ getempty(Workbuf *b c->gcworkbuf = nil; } if(b == nil) - b = (Workbuf*)runtime·lfstackpop(&work.empty); + b = (Workbuf*)runtime·lfstackpop(&runtime·work.empty); - if(b == nil) + if(b == nil) { b = runtime·persistentalloc(sizeof(*b), CacheLineSize, &mstats.gc_sys); + b->nobj = 0; + } + if(b->nobj != 0) + runtime·throw("getempty: b->nobj not 0/n"); b->nobj = 0; return b; } @@@ -585,87 -529,38 +586,87 @@@ putempty(Workbuf *b c->gcworkbuf = b; return; } - runtime·lfstackpush(&work.empty, &b->node); + runtime·lfstackpush(&runtime·work.empty, &b->node); } +// Get an partially empty work buffer from the mcache structure +// and if non is available get an empty one. +static Workbuf* +getpartial(void) +{ + MCache *c; + Workbuf *b; + + c = g->m->mcache; + if(c->gcworkbuf != nil) { + b = c->gcworkbuf; + c->gcworkbuf = nil; + } else { + b = getempty(nil); + } + return b; +} + +static void +putpartial(Workbuf *b) +{ + MCache *c; + + c = g->m->mcache; + if(c->gcworkbuf == nil) { + c->gcworkbuf = b; + return; + } + + runtime·throw("putpartial: c->gcworkbuf is not nil\n"); + - runtime·lfstackpush(&work.full, &b->node); ++ runtime·lfstackpush(&runtime·work.full, &b->node); +} + void -runtime·gcworkbuffree(void *b) +runtime·gcworkbuffree(Workbuf *b) { - if(b != nil) + if(b != nil) { + if(b->nobj != 0) + runtime·throw("gcworkbufferfree: b->nobj not 0\n"); putempty(b); + } } + // Get a full work buffer off the work.full list, or return nil. +// getfull acts as a barrier for work.nproc helpers. As long as one +// gchelper is actively marking objects it +// may create a workbuffer that the other helpers can work on. +// The for loop either exits when a work buffer is found +// or when _all_ of the work.nproc gc helpers are in the loop +// looking for work and thus not capable of creating new work. +// This is in fact the termination condition for the STW mark +// phase. static Workbuf* getfull(Workbuf *b) { int32 i; - if(b != nil) + if(b != nil) { + if(b->nobj != 0) + runtime·printf("runtime:getfull: b->nobj=%D not 0.", b->nobj); - runtime·lfstackpush(&work.empty, &b->node); + runtime·lfstackpush(&runtime·work.empty, &b->node); + } - b = (Workbuf*)runtime·lfstackpop(&work.full); - if(b != nil || work.nproc == 1) + b = (Workbuf*)runtime·lfstackpop(&runtime·work.full); + if(b != nil || runtime·work.nproc == 1) return b; - runtime·xadd(&work.nwait, +1); + runtime·xadd(&runtime·work.nwait, +1); for(i=0;; i++) { - if(work.full != 0) { - runtime·xadd(&work.nwait, -1); - b = (Workbuf*)runtime·lfstackpop(&work.full); + if(runtime·work.full != 0) { + runtime·xadd(&runtime·work.nwait, -1); + b = (Workbuf*)runtime·lfstackpop(&runtime·work.full); if(b != nil) return b; - runtime·xadd(&work.nwait, +1); + runtime·xadd(&runtime·work.nwait, +1); } - if(work.nwait == work.nproc) + if(runtime·work.nwait == runtime·work.nproc) return nil; if(i < 10) { g->m->gcstats.nprocyield++; @@@ -1236,12 -1121,14 +1239,12 @@@ runtime·gchelper(void gchelperstart(); // parallel mark for over gc roots - runtime·parfordo(work.markfor); + runtime·parfordo(runtime·work.markfor); - - // help other threads scan secondary blocks - scanblock(nil, 0, nil); - - nproc = runtime·work.nproc; // runtime·work.nproc can change right after we increment runtime·work.ndone + if(runtime·gcphase != GCscan) + scanblock(nil, 0, nil); // blocks in getfull - nproc = work.nproc; // work.nproc can change right after we increment work.ndone - if(runtime·xadd(&work.ndone, +1) == nproc-1) - runtime·notewakeup(&work.alldone); ++ nproc = runtime·work.nproc; // work.nproc can change right after we increment work.ndone + if(runtime·xadd(&runtime·work.ndone, +1) == nproc-1) + runtime·notewakeup(&runtime·work.alldone); g->m->traceback = 0; } @@@ -1456,24 -1341,21 +1459,24 @@@ gc(struct gc_args *args // Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap. runtime·lock(&runtime·mheap.lock); // Free the old cached sweep array if necessary. - if(work.spans != nil && work.spans != runtime·mheap.allspans) - runtime·SysFree(work.spans, work.nspan*sizeof(work.spans[0]), &mstats.other_sys); + if(runtime·work.spans != nil && runtime·work.spans != runtime·mheap.allspans) + runtime·SysFree(runtime·work.spans, runtime·work.nspan*sizeof(runtime·work.spans[0]), &mstats.other_sys); // Cache the current array for marking. runtime·mheap.gcspans = runtime·mheap.allspans; - work.spans = runtime·mheap.allspans; - work.nspan = runtime·mheap.nspan; + runtime·work.spans = runtime·mheap.allspans; + runtime·work.nspan = runtime·mheap.nspan; runtime·unlock(&runtime·mheap.lock); + oldphase = runtime·gcphase; - work.nwait = 0; - work.ndone = 0; - work.nproc = runtime·gcprocs(); + runtime·work.nwait = 0; + runtime·work.ndone = 0; - runtime·work.nproc = runtime·gcprocs(); ++ runtime·work.nproc = runtime·gcprocs(); + runtime·gcphase = GCmark; //^^ vv + - runtime·parforsetup(work.markfor, work.nproc, RootCount + runtime·allglen, nil, false, markroot); - if(work.nproc > 1) { - runtime·noteclear(&work.alldone); - runtime·helpgc(work.nproc); + runtime·parforsetup(runtime·work.markfor, runtime·work.nproc, RootCount + runtime·allglen, nil, false, markroot); + if(runtime·work.nproc > 1) { + runtime·noteclear(&runtime·work.alldone); + runtime·helpgc(runtime·work.nproc); } t2 = 0; @@@ -1481,10 -1363,9 +1484,10 @@@ t2 = runtime·nanotime(); gchelperstart(); - runtime·parfordo(work.markfor); + runtime·parfordo(runtime·work.markfor); - scanblock(nil, 0, nil); + scanblock(nil, 0, nil); + runtime·gcphase = oldphase; //^^ vv t3 = 0; if(runtime·debug.gctrace) t3 = runtime·nanotime();