]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: introduce MSpan.needzero instead of writing to span data
authorRuss Cox <rsc@golang.org>
Thu, 13 Feb 2014 16:10:31 +0000 (11:10 -0500)
committerRuss Cox <rsc@golang.org>
Thu, 13 Feb 2014 16:10:31 +0000 (11:10 -0500)
This cleans up the code significantly, and it avoids any
possible problems with madvise zeroing out some but
not all of the data.

Fixes #6400.

LGTM=dave
R=dvyukov, dave
CC=golang-codereviews
https://golang.org/cl/57680046

src/pkg/runtime/malloc.goc
src/pkg/runtime/malloc.h
src/pkg/runtime/mcentral.c
src/pkg/runtime/mgc0.c
src/pkg/runtime/mheap.c

index db2f9537a9c63d808ebdfcc9d936cd4431841915..76ea34e0a25a319d6c671d8a935ee3029feda803 100644 (file)
@@ -315,7 +315,7 @@ runtime·free(void *v)
        c = m->mcache;
        if(sizeclass == 0) {
                // Large object.
-               *(uintptr*)(s->start<<PageShift) = (uintptr)0xfeedfeedfeedfeedll;       // mark as "needs to be zeroed"
+               s->needzero = 1;
                // Must mark v freed before calling unmarkspan and MHeap_Free:
                // they might coalesce v into other spans and change the bitmap further.
                runtime·markfreed(v, size);
index ac9e6a2883c60efd7240578d34d43d3ece84c6e4..de82c551bd311c72d1f4167de3c1020d6d1392c1 100644 (file)
@@ -412,6 +412,7 @@ struct MSpan
        uint16  ref;            // number of allocated objects in this span
        uint8   sizeclass;      // size class
        uint8   state;          // MSpanInUse etc
+       uint8   needzero;       // needs to be zeroed before allocation
        uintptr elemsize;       // computed from sizeclass or from npages
        int64   unusedsince;    // First time spotted by GC in MSpanFree state
        uintptr npreleased;     // number of pages released to the OS
@@ -501,7 +502,7 @@ struct MHeap
 extern MHeap runtime·mheap;
 
 void   runtime·MHeap_Init(MHeap *h);
-MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool zeroed);
+MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero);
 void   runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct);
 MSpan* runtime·MHeap_Lookup(MHeap *h, void *v);
 MSpan* runtime·MHeap_LookupMaybe(MHeap *h, void *v);
index d96a73394da2a7123900913fbc70a6c8497c2dc6..0dd5ac0fe5093308214d4efd56453de0f3b02fe6 100644 (file)
@@ -147,7 +147,7 @@ MCentral_Free(MCentral *c, void *v)
                size = runtime·class_to_size[c->sizeclass];
                runtime·MSpanList_Remove(s);
                runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
-               *(uintptr*)(s->start<<PageShift) = 1;  // needs zeroing
+               s->needzero = 1;
                s->freelist = nil;
                c->nfree -= (s->npages << PageShift) / size;
                runtime·unlock(c);
@@ -186,7 +186,7 @@ runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *
        // s is completely freed, return it to the heap.
        size = runtime·class_to_size[c->sizeclass];
        runtime·MSpanList_Remove(s);
-       *(uintptr*)(s->start<<PageShift) = 1;  // needs zeroing
+       s->needzero = 1;
        s->freelist = nil;
        c->nfree -= (s->npages << PageShift) / size;
        runtime·unlock(c);
index a9232b334b5b63a4bd61e89f499de031775ff210..688d3f47101f8956b2eeb784c9680d5d9bf5fb08 100644 (file)
@@ -1298,8 +1298,10 @@ markroot(ParFor *desc, uint32 i)
                        SpecialFinalizer *spf;
 
                        s = allspans[spanidx];
-                       if(s->sweepgen != sg)
+                       if(s->sweepgen != sg) {
+                               runtime·printf("sweep %d %d\n", s->sweepgen, sg);
                                runtime·throw("gc: unswept span");
+                       }
                        if(s->state != MSpanInUse)
                                continue;
                        // The garbage collector ignores type pointers stored in MSpan.types:
@@ -1826,7 +1828,7 @@ runtime·MSpan_Sweep(MSpan *s)
                if(cl == 0) {
                        // Free large span.
                        runtime·unmarkspan(p, 1<<PageShift);
-                       *(uintptr*)p = (uintptr)0xdeaddeaddeaddeadll;   // needs zeroing
+                       s->needzero = 1;
                        // important to set sweepgen before returning it to heap
                        runtime·atomicstore(&s->sweepgen, sweepgen);
                        sweepgenset = true;
index 05cc80a3451dcfa0a006da9e2adb0dda8eedeb91..5c5a6fe1643434ec413ce39b019969ee84c9345c 100644 (file)
@@ -168,7 +168,7 @@ MHeap_Reclaim(MHeap *h, uintptr npage)
 // Allocate a new span of npage pages from the heap
 // and record its size class in the HeapMap and HeapMapCache.
 MSpan*
-runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool zeroed)
+runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
 {
        MSpan *s;
 
@@ -189,8 +189,11 @@ runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool
                }
        }
        runtime·unlock(h);
-       if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed)
-               runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
+       if(s != nil) {
+               if(needzero && s->needzero)
+                       runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
+               s->needzero = 0;
+       }
        return s;
 }
 
@@ -233,26 +236,8 @@ HaveSpan:
        s->state = MSpanInUse;
        mstats.heap_idle -= s->npages<<PageShift;
        mstats.heap_released -= s->npreleased<<PageShift;
-       if(s->npreleased > 0) {
-               // We have called runtime·SysUnused with these pages, and on
-               // Unix systems it called madvise.  At this point at least
-               // some BSD-based kernels will return these pages either as
-               // zeros or with the old data.  For our caller, the first word
-               // in the page indicates whether the span contains zeros or
-               // not (this word was set when the span was freed by
-               // MCentral_Free or runtime·MCentral_FreeSpan).  If the first
-               // page in the span is returned as zeros, and some subsequent
-               // page is returned with the old data, then we will be
-               // returning a span that is assumed to be all zeros, but the
-               // actual data will not be all zeros.  Avoid that problem by
-               // explicitly marking the span as not being zeroed, just in
-               // case.  The beadbead constant we use here means nothing, it
-               // is just a unique constant not seen elsewhere in the
-               // runtime, as a clue in case it turns up unexpectedly in
-               // memory or in a stack trace.
+       if(s->npreleased > 0)
                runtime·SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
-               *(uintptr*)(s->start<<PageShift) = (uintptr)0xbeadbeadbeadbeadULL;
-       }
        s->npreleased = 0;
 
        if(s->npages > npage) {
@@ -266,7 +251,7 @@ HaveSpan:
                        h->spans[p-1] = s;
                h->spans[p] = t;
                h->spans[p+t->npages-1] = t;
-               *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift);  // copy "needs zeroing" mark
+               t->needzero = s->needzero;
                runtime·atomicstore(&t->sweepgen, h->sweepgen);
                t->state = MSpanInUse;
                MHeap_FreeLocked(h, t);
@@ -413,7 +398,6 @@ runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
 static void
 MHeap_FreeLocked(MHeap *h, MSpan *s)
 {
-       uintptr *sp, *tp;
        MSpan *t;
        PageID p;
 
@@ -427,7 +411,6 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
        mstats.heap_idle += s->npages<<PageShift;
        s->state = MSpanFree;
        runtime·MSpanList_Remove(s);
-       sp = (uintptr*)(s->start<<PageShift);
        // Stamp newly unused spans. The scavenger will use that
        // info to potentially give back some pages to the OS.
        s->unusedsince = runtime·nanotime();
@@ -437,13 +420,10 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
        p = s->start;
        p -= (uintptr)h->arena_start >> PageShift;
        if(p > 0 && (t = h->spans[p-1]) != nil && t->state != MSpanInUse) {
-               if(t->npreleased == 0) {  // cant't touch this otherwise
-                       tp = (uintptr*)(t->start<<PageShift);
-                       *tp |= *sp;     // propagate "needs zeroing" mark
-               }
                s->start = t->start;
                s->npages += t->npages;
                s->npreleased = t->npreleased; // absorb released pages
+               s->needzero |= t->needzero;
                p -= t->npages;
                h->spans[p] = s;
                runtime·MSpanList_Remove(t);
@@ -451,12 +431,9 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
                runtime·FixAlloc_Free(&h->spanalloc, t);
        }
        if((p+s->npages)*sizeof(h->spans[0]) < h->spans_mapped && (t = h->spans[p+s->npages]) != nil && t->state != MSpanInUse) {
-               if(t->npreleased == 0) {  // cant't touch this otherwise
-                       tp = (uintptr*)(t->start<<PageShift);
-                       *sp |= *tp;     // propagate "needs zeroing" mark
-               }
                s->npages += t->npages;
                s->npreleased += t->npreleased;
+               s->needzero |= t->needzero;
                h->spans[p + s->npages - 1] = s;
                runtime·MSpanList_Remove(t);
                t->state = MSpanDead;
@@ -601,6 +578,7 @@ runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages)
        span->types.compression = MTypes_Empty;
        span->specialLock.key = 0;
        span->specials = nil;
+       span->needzero = 0;
 }
 
 // Initialize an empty doubly-linked list.