Currently per-sizeclass stats are lost for destroyed MCache's. This patch fixes this.
Also, only update mstats.heap_alloc on heap operations, because that's the only
stat that needs to be promptly updated. Everything else needs to be up-to-date only in ReadMemStats().
R=golang-dev, remyoudompheng, dave, iant
CC=golang-dev
https://golang.org/cl/
9207047
void
runtime·purgecachedstats(MCache *c)
{
+ int32 i;
+
// Protected by either heap or GC lock.
mstats.heap_alloc += c->local_cachealloc;
c->local_cachealloc = 0;
c->local_alloc= 0;
mstats.total_alloc += c->local_total_alloc;
c->local_total_alloc= 0;
+ for(i=0; i<nelem(c->local_by_size); i++) {
+ mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc;
+ c->local_by_size[i].nmalloc = 0;
+ mstats.by_size[i].nfree += c->local_by_size[i].nfree;
+ c->local_by_size[i].nfree = 0;
+ }
}
uintptr runtime·sizeof_C_MStats = sizeof(MStats);
struct MCache
{
- MCacheList list[NumSizeClasses];
+ // The following members are accessed on every malloc,
+ // so they are grouped here for better caching.
+ int32 next_sample; // trigger heap sample after allocating this many bytes
intptr local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap
+ // The rest is not accessed on every malloc.
+ MCacheList list[NumSizeClasses];
intptr local_objects; // objects allocated (or freed) from cache since last lock of heap
intptr local_alloc; // bytes allocated (or freed) since last lock of heap
uintptr local_total_alloc; // bytes allocated (even if freed) since last lock of heap
uintptr local_nmalloc; // number of mallocs since last lock of heap
uintptr local_nfree; // number of frees since last lock of heap
uintptr local_nlookup; // number of pointer lookups since last lock of heap
- int32 next_sample; // trigger heap sample after allocating this many bytes
// Statistics about allocation size classes since last lock of heap
struct {
uintptr nmalloc;
if(c==nil)
continue;
runtime·purgecachedstats(c);
- for(i=0; i<nelem(c->local_by_size); i++) {
- mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc;
- c->local_by_size[i].nmalloc = 0;
- mstats.by_size[i].nfree += c->local_by_size[i].nfree;
- c->local_by_size[i].nfree = 0;
- }
}
mstats.stacks_inuse = stacks_inuse;
}
MSpan *s;
runtime·lock(h);
- runtime·purgecachedstats(m->mcache);
+ mstats.heap_alloc += m->mcache->local_cachealloc;
+ m->mcache->local_cachealloc = 0;
s = MHeap_AllocLocked(h, npage, sizeclass);
if(s != nil) {
mstats.heap_inuse += npage<<PageShift;
runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
{
runtime·lock(h);
- runtime·purgecachedstats(m->mcache);
+ mstats.heap_alloc += m->mcache->local_cachealloc;
+ m->mcache->local_cachealloc = 0;
mstats.heap_inuse -= s->npages<<PageShift;
if(acct) {
mstats.heap_alloc -= s->npages<<PageShift;