]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: tighten garbage collector
authorRuss Cox <rsc@golang.org>
Wed, 10 Feb 2010 22:59:39 +0000 (14:59 -0800)
committerRuss Cox <rsc@golang.org>
Wed, 10 Feb 2010 22:59:39 +0000 (14:59 -0800)
 * specialize sweepspan as sweepspan0 and sweepspan1.
 * in sweepspan1, inline "free" to avoid expensive mlookup.

R=iant
CC=golang-dev
https://golang.org/cl/206060

src/pkg/runtime/malloc.h
src/pkg/runtime/mcentral.c
src/pkg/runtime/mgc0.c

index 2d94872f77fd0dd0208ea0a8b657bbd960ee3f94..f018b6e216a364e39920069e0cd800532d7bd0b7 100644 (file)
@@ -321,6 +321,7 @@ MSpan*      MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass);
 void   MHeap_Free(MHeap *h, MSpan *s);
 MSpan* MHeap_Lookup(MHeap *h, PageID p);
 MSpan* MHeap_LookupMaybe(MHeap *h, PageID p);
+void   MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
 
 void*  mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
 int32  mlookup(void *v, byte **base, uintptr *size, uint32 **ref);
index 7e33e01af2beb778a6b83cffb7ddc7e6dd4f7409..ff366b1c53761daf2b0cd36d21d1564413c73835 100644 (file)
@@ -157,6 +157,19 @@ MCentral_Free(MCentral *c, void *v)
        }
 }
 
+void
+MGetSizeClassInfo(int32 sizeclass, int32 *sizep, int32 *npagesp, int32 *nobj)
+{
+       int32 size;
+       int32 npages;
+
+       npages = class_to_allocnpages[sizeclass];
+       size = class_to_size[sizeclass];
+       *npagesp = npages;
+       *sizep = size;
+       *nobj = (npages << PageShift) / (size + RefcountOverhead);
+}
+
 // Fetch a new span from the heap and
 // carve into objects for the free list.
 static bool
@@ -168,7 +181,7 @@ MCentral_Grow(MCentral *c)
        MSpan *s;
 
        unlock(c);
-       npages = class_to_allocnpages[c->sizeclass];
+       MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
        s = MHeap_Alloc(&mheap, npages, c->sizeclass);
        if(s == nil) {
                // TODO(rsc): Log out of memory
@@ -179,8 +192,6 @@ MCentral_Grow(MCentral *c)
        // Carve span into sequence of blocks.
        tailp = &s->freelist;
        p = (byte*)(s->start << PageShift);
-       size = class_to_size[c->sizeclass];
-       n = (npages << PageShift) / (size + RefcountOverhead);
        s->gcref = (uint32*)(p + size*n);
        for(i=0; i<n; i++) {
                v = (MLink*)p;
index 7cc965400e4562c52d816043b34c14bfed5db37a..0870b3a6b093c83cb7fdb23495bc3293b18447d9 100644 (file)
@@ -139,79 +139,115 @@ mark(void)
        }
 }
 
+// pass 0: mark RefNone with finalizer as RefFinalize and trace
 static void
-sweepspan(MSpan *s, int32 pass)
+sweepspan0(MSpan *s)
 {
-       int32 i, n, npages, size;
        byte *p;
-
-       if(s->state != MSpanInUse)
-               return;
+       uint32 ref, *gcrefp, *gcrefep;
+       int32 n, size, npages;
 
        p = (byte*)(s->start << PageShift);
        if(s->sizeclass == 0) {
                // Large block.
-               sweepblock(p, (uint64)s->npages<<PageShift, &s->gcref0, pass);
+               ref = s->gcref0;
+               if((ref&~RefNoPointers) == (RefNone|RefHasFinalizer)) {
+                       // Mark as finalizable.
+                       s->gcref0 = RefFinalize | RefHasFinalizer | (ref&RefNoPointers);
+                       if(!(ref & RefNoPointers))
+                               scanblock(100, p, s->npages<<PageShift);
+               }
                return;
        }
 
        // Chunk full of small blocks.
-       // Must match computation in MCentral_Grow.
-       size = class_to_size[s->sizeclass];
-       npages = class_to_allocnpages[s->sizeclass];
-       n = (npages << PageShift) / (size + RefcountOverhead);
-       for(i=0; i<n; i++)
-               sweepblock(p+i*size, size, &s->gcref[i], pass);
-}
+       MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
+       gcrefp = s->gcref;
+       gcrefep = s->gcref + n;
+       for(; gcrefp < gcrefep; gcrefp++) {
+               ref = *gcrefp;
+               if((ref&~RefNoPointers) == (RefNone|RefHasFinalizer)) {
+                       // Mark as finalizable.
+                       *gcrefp = RefFinalize | RefHasFinalizer | (ref&RefNoPointers);
+                       if(!(ref & RefNoPointers))
+                               scanblock(100, p+(gcrefp-s->gcref)*size, size);
+               }
+       }
+}      
 
+// pass 1: free RefNone, queue RefFinalize, reset RefSome
 static void
-sweepblock(byte *p, int64 n, uint32 *gcrefp, int32 pass)
+sweepspan1(MSpan *s)
 {
-       uint32 gcref;
-
-       gcref = *gcrefp;
-       switch(gcref & ~(RefNoPointers|RefHasFinalizer)) {
-       default:
-               throw("bad 'ref count'");
-       case RefFree:
-       case RefStack:
-               break;
-       case RefNone:
-               if(pass == 0 && (gcref & RefHasFinalizer)) {
-                       // Tentatively mark as finalizable.
-                       // Make sure anything it points at will not be collected.
-                       if(Debug > 0)
-                               printf("maybe finalize %p+%D\n", p, n);
-                       *gcrefp = RefFinalize | RefHasFinalizer | (gcref&RefNoPointers);
-                       scanblock(100, p, n);
-               } else if(pass == 1) {
-                       if(Debug > 0)
-                               printf("free %p+%D\n", p, n);
-                       free(p);
+       int32 n, npages, size;
+       byte *p;
+       uint32 ref, *gcrefp, *gcrefep;
+       MCache *c;
+
+       p = (byte*)(s->start << PageShift);
+       if(s->sizeclass == 0) {
+               // Large block.
+               ref = s->gcref0;
+               switch(ref & ~(RefNoPointers|RefHasFinalizer)) {
+               case RefNone:
+                       // Free large object.
+                       mstats.alloc -= s->npages<<PageShift;
+                       runtime_memclr(p, s->npages<<PageShift);
+                       s->gcref0 = RefFree;
+                       MHeap_Free(&mheap, s);
+                       break;
+               case RefFinalize:
+                       if(pfinq < efinq) {
+                               pfinq->p = p;
+                               pfinq->nret = 0;
+                               pfinq->fn = getfinalizer(p, 1, &pfinq->nret);
+                               ref &= ~RefHasFinalizer;
+                               if(pfinq->fn == nil)
+                                       throw("finalizer inconsistency");
+                               pfinq++;
+                       }
+                       // fall through
+               case RefSome:
+                       s->gcref0 = RefNone | (ref&(RefNoPointers|RefHasFinalizer));
+                       break;
                }
-               break;
-       case RefFinalize:
-               if(pass != 1)
-                       throw("sweepspan pass 0 RefFinalize");
-               if(pfinq < efinq) {
-                       if(Debug > 0)
-                               printf("finalize %p+%D\n", p, n);
-                       pfinq->p = p;
-                       pfinq->nret = 0;
-                       pfinq->fn = getfinalizer(p, 1, &pfinq->nret);
-                       gcref &= ~RefHasFinalizer;
-                       if(pfinq->fn == nil)
-                               throw("getfinalizer inconsistency");
-                       pfinq++;
+               return;
+       }
+
+       // Chunk full of small blocks.
+       MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
+       gcrefp = s->gcref;
+       gcrefep = s->gcref + n;
+       for(; gcrefp < gcrefep; gcrefp++, p += size) {
+               ref = *gcrefp;
+               if(ref < RefNone)       // RefFree or RefStack
+                       continue;
+               switch(ref & ~(RefNoPointers|RefHasFinalizer)) {
+               case RefNone:
+                       // Free small object.
+                       *gcrefp = RefFree;
+                       c = m->mcache;
+                       if(size > sizeof(uintptr))
+                               ((uintptr*)p)[1] = 1;   // mark as "needs to be zeroed"
+                       mstats.alloc -= size;
+                       mstats.by_size[s->sizeclass].nfree++;
+                       MCache_Free(c, p, s->sizeclass, size);
+                       break;
+               case RefFinalize:
+                       if(pfinq < efinq) {
+                               pfinq->p = p;
+                               pfinq->nret = 0;
+                               pfinq->fn = getfinalizer(p, 1, &pfinq->nret);
+                               ref &= ~RefHasFinalizer;
+                               if(pfinq->fn == nil)    
+                                       throw("finalizer inconsistency");
+                               pfinq++;
+                       }
+                       // fall through
+               case RefSome:
+                       *gcrefp = RefNone | (ref&(RefNoPointers|RefHasFinalizer));
+                       break;
                }
-               // Reset for next mark+sweep.
-               *gcrefp = RefNone | (gcref&(RefNoPointers|RefHasFinalizer));
-               break;
-       case RefSome:
-               // Reset for next mark+sweep.
-               if(pass == 1)
-                       *gcrefp = RefNone | (gcref&(RefNoPointers|RefHasFinalizer));
-               break;
        }
 }
 
@@ -222,11 +258,13 @@ sweep(void)
 
        // Sweep all the spans marking blocks to be finalized.
        for(s = mheap.allspans; s != nil; s = s->allnext)
-               sweepspan(s, 0);
+               if(s->state == MSpanInUse)
+                       sweepspan0(s);
 
        // Sweep again queueing finalizers and freeing the others.
        for(s = mheap.allspans; s != nil; s = s->allnext)
-               sweepspan(s, 1);
+               if(s->state == MSpanInUse)
+                       sweepspan1(s);
 }
 
 // Semaphore, not Lock, so that the goroutine