From 86e3cb8da5a22794bbbdf34e934dc180e1c86d01 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 13 Feb 2014 11:10:31 -0500 Subject: [PATCH] runtime: introduce MSpan.needzero instead of writing to span data This cleans up the code significantly, and it avoids any possible problems with madvise zeroing out some but not all of the data. Fixes #6400. LGTM=dave R=dvyukov, dave CC=golang-codereviews https://golang.org/cl/57680046 --- src/pkg/runtime/malloc.goc | 2 +- src/pkg/runtime/malloc.h | 3 ++- src/pkg/runtime/mcentral.c | 4 ++-- src/pkg/runtime/mgc0.c | 6 ++++-- src/pkg/runtime/mheap.c | 44 ++++++++++---------------------------- 5 files changed, 20 insertions(+), 39 deletions(-) diff --git a/src/pkg/runtime/malloc.goc b/src/pkg/runtime/malloc.goc index db2f9537a9..76ea34e0a2 100644 --- a/src/pkg/runtime/malloc.goc +++ b/src/pkg/runtime/malloc.goc @@ -315,7 +315,7 @@ runtime·free(void *v) c = m->mcache; if(sizeclass == 0) { // Large object. - *(uintptr*)(s->start<needzero = 1; // Must mark v freed before calling unmarkspan and MHeap_Free: // they might coalesce v into other spans and change the bitmap further. runtime·markfreed(v, size); diff --git a/src/pkg/runtime/malloc.h b/src/pkg/runtime/malloc.h index ac9e6a2883..de82c551bd 100644 --- a/src/pkg/runtime/malloc.h +++ b/src/pkg/runtime/malloc.h @@ -412,6 +412,7 @@ struct MSpan uint16 ref; // number of allocated objects in this span uint8 sizeclass; // size class uint8 state; // MSpanInUse etc + uint8 needzero; // needs to be zeroed before allocation uintptr elemsize; // computed from sizeclass or from npages int64 unusedsince; // First time spotted by GC in MSpanFree state uintptr npreleased; // number of pages released to the OS @@ -501,7 +502,7 @@ struct MHeap extern MHeap runtime·mheap; void runtime·MHeap_Init(MHeap *h); -MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool zeroed); +MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero); void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct); MSpan* runtime·MHeap_Lookup(MHeap *h, void *v); MSpan* runtime·MHeap_LookupMaybe(MHeap *h, void *v); diff --git a/src/pkg/runtime/mcentral.c b/src/pkg/runtime/mcentral.c index d96a73394d..0dd5ac0fe5 100644 --- a/src/pkg/runtime/mcentral.c +++ b/src/pkg/runtime/mcentral.c @@ -147,7 +147,7 @@ MCentral_Free(MCentral *c, void *v) size = runtime·class_to_size[c->sizeclass]; runtime·MSpanList_Remove(s); runtime·unmarkspan((byte*)(s->start<npages<start<needzero = 1; s->freelist = nil; c->nfree -= (s->npages << PageShift) / size; runtime·unlock(c); @@ -186,7 +186,7 @@ runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink * // s is completely freed, return it to the heap. size = runtime·class_to_size[c->sizeclass]; runtime·MSpanList_Remove(s); - *(uintptr*)(s->start<needzero = 1; s->freelist = nil; c->nfree -= (s->npages << PageShift) / size; runtime·unlock(c); diff --git a/src/pkg/runtime/mgc0.c b/src/pkg/runtime/mgc0.c index a9232b334b..688d3f4710 100644 --- a/src/pkg/runtime/mgc0.c +++ b/src/pkg/runtime/mgc0.c @@ -1298,8 +1298,10 @@ markroot(ParFor *desc, uint32 i) SpecialFinalizer *spf; s = allspans[spanidx]; - if(s->sweepgen != sg) + if(s->sweepgen != sg) { + runtime·printf("sweep %d %d\n", s->sweepgen, sg); runtime·throw("gc: unswept span"); + } if(s->state != MSpanInUse) continue; // The garbage collector ignores type pointers stored in MSpan.types: @@ -1826,7 +1828,7 @@ runtime·MSpan_Sweep(MSpan *s) if(cl == 0) { // Free large span. runtime·unmarkspan(p, 1<needzero = 1; // important to set sweepgen before returning it to heap runtime·atomicstore(&s->sweepgen, sweepgen); sweepgenset = true; diff --git a/src/pkg/runtime/mheap.c b/src/pkg/runtime/mheap.c index 05cc80a345..5c5a6fe164 100644 --- a/src/pkg/runtime/mheap.c +++ b/src/pkg/runtime/mheap.c @@ -168,7 +168,7 @@ MHeap_Reclaim(MHeap *h, uintptr npage) // Allocate a new span of npage pages from the heap // and record its size class in the HeapMap and HeapMapCache. MSpan* -runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool zeroed) +runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero) { MSpan *s; @@ -189,8 +189,11 @@ runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool } } runtime·unlock(h); - if(s != nil && *(uintptr*)(s->start<start<npages<needzero) + runtime·memclr((byte*)(s->start<npages<needzero = 0; + } return s; } @@ -233,26 +236,8 @@ HaveSpan: s->state = MSpanInUse; mstats.heap_idle -= s->npages<npreleased<npreleased > 0) { - // We have called runtime·SysUnused with these pages, and on - // Unix systems it called madvise. At this point at least - // some BSD-based kernels will return these pages either as - // zeros or with the old data. For our caller, the first word - // in the page indicates whether the span contains zeros or - // not (this word was set when the span was freed by - // MCentral_Free or runtime·MCentral_FreeSpan). If the first - // page in the span is returned as zeros, and some subsequent - // page is returned with the old data, then we will be - // returning a span that is assumed to be all zeros, but the - // actual data will not be all zeros. Avoid that problem by - // explicitly marking the span as not being zeroed, just in - // case. The beadbead constant we use here means nothing, it - // is just a unique constant not seen elsewhere in the - // runtime, as a clue in case it turns up unexpectedly in - // memory or in a stack trace. + if(s->npreleased > 0) runtime·SysUsed((void*)(s->start<npages<start<npreleased = 0; if(s->npages > npage) { @@ -266,7 +251,7 @@ HaveSpan: h->spans[p-1] = s; h->spans[p] = t; h->spans[p+t->npages-1] = t; - *(uintptr*)(t->start<start<needzero = s->needzero; runtime·atomicstore(&t->sweepgen, h->sweepgen); t->state = MSpanInUse; MHeap_FreeLocked(h, t); @@ -413,7 +398,6 @@ runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct) static void MHeap_FreeLocked(MHeap *h, MSpan *s) { - uintptr *sp, *tp; MSpan *t; PageID p; @@ -427,7 +411,6 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) mstats.heap_idle += s->npages<state = MSpanFree; runtime·MSpanList_Remove(s); - sp = (uintptr*)(s->start<unusedsince = runtime·nanotime(); @@ -437,13 +420,10 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) p = s->start; p -= (uintptr)h->arena_start >> PageShift; if(p > 0 && (t = h->spans[p-1]) != nil && t->state != MSpanInUse) { - if(t->npreleased == 0) { // cant't touch this otherwise - tp = (uintptr*)(t->start<start = t->start; s->npages += t->npages; s->npreleased = t->npreleased; // absorb released pages + s->needzero |= t->needzero; p -= t->npages; h->spans[p] = s; runtime·MSpanList_Remove(t); @@ -451,12 +431,9 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) runtime·FixAlloc_Free(&h->spanalloc, t); } if((p+s->npages)*sizeof(h->spans[0]) < h->spans_mapped && (t = h->spans[p+s->npages]) != nil && t->state != MSpanInUse) { - if(t->npreleased == 0) { // cant't touch this otherwise - tp = (uintptr*)(t->start<npages += t->npages; s->npreleased += t->npreleased; + s->needzero |= t->needzero; h->spans[p + s->npages - 1] = s; runtime·MSpanList_Remove(t); t->state = MSpanDead; @@ -601,6 +578,7 @@ runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages) span->types.compression = MTypes_Empty; span->specialLock.key = 0; span->specials = nil; + span->needzero = 0; } // Initialize an empty doubly-linked list. -- 2.48.1