if(size == 0)
size = 1;
- mstats.nmalloc++;
+ c = m->mcache;
+ c->local_nmalloc++;
if(size <= MaxSmallSize) {
// Allocate from mcache free lists.
sizeclass = runtime·SizeToClass(size);
size = runtime·class_to_size[sizeclass];
- c = m->mcache;
v = runtime·MCache_Alloc(c, sizeclass, size, zeroed);
if(v == nil)
runtime·throw("out of memory");
- mstats.alloc += size;
- mstats.total_alloc += size;
- mstats.by_size[sizeclass].nmalloc++;
+ c->local_alloc += size;
+ c->local_total_alloc += size;
+ c->local_by_size[sizeclass].nmalloc++;
} else {
// TODO(rsc): Report tracebacks for very large allocations.
if(s == nil)
runtime·throw("out of memory");
size = npages<<PageShift;
- mstats.alloc += size;
- mstats.total_alloc += size;
+ c->local_alloc += size;
+ c->local_total_alloc += size;
v = (void*)(s->start << PageShift);
// setup for mark sweep
// Find size class for v.
sizeclass = s->sizeclass;
+ c = m->mcache;
if(sizeclass == 0) {
// Large object.
size = s->npages<<PageShift;
runtime·MHeap_Free(&runtime·mheap, s, 1);
} else {
// Small object.
- c = m->mcache;
size = runtime·class_to_size[sizeclass];
if(size > sizeof(uintptr))
((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
// it might coalesce v and other blocks into a bigger span
// and change the bitmap further.
runtime·markfreed(v, size);
- mstats.by_size[sizeclass].nfree++;
+ c->local_by_size[sizeclass].nfree++;
runtime·MCache_Free(c, v, sizeclass, size);
}
- mstats.alloc -= size;
+ c->local_alloc -= size;
if(prof)
runtime·MProf_Free(v, size);
m->mallocing = 0;
byte *p;
MSpan *s;
- mstats.nlookup++;
+ m->mcache->local_nlookup++;
s = runtime·MHeap_LookupMaybe(&runtime·mheap, v);
if(sp)
*sp = s;
}
n = runtime·class_to_size[s->sizeclass];
- i = ((byte*)v - p)/n;
- if(base)
+ if(base) {
+ i = ((byte*)v - p)/n;
*base = p + i*n;
+ }
if(size)
*size = n;
return c;
}
+void
+runtime·purgecachedstats(M* m)
+{
+ MCache *c;
+
+ // Protected by either heap or GC lock.
+ c = m->mcache;
+ mstats.heap_alloc += c->local_cachealloc;
+ c->local_cachealloc = 0;
+ mstats.heap_objects += c->local_objects;
+ c->local_objects = 0;
+ mstats.nmalloc += c->local_nmalloc;
+ c->local_nmalloc = 0;
+ mstats.nfree += c->local_nfree;
+ c->local_nfree = 0;
+ mstats.nlookup += c->local_nlookup;
+ c->local_nlookup = 0;
+ mstats.alloc += c->local_alloc;
+ c->local_alloc= 0;
+ mstats.total_alloc += c->local_total_alloc;
+ c->local_total_alloc= 0;
+}
+
uintptr runtime·sizeof_C_MStats = sizeof(MStats);
#define MaxArena32 (2U<<30)
void*
runtime·stackalloc(uint32 n)
{
- void *v;
- uintptr sys0;
-
// Stackalloc must be called on scheduler stack, so that we
// never try to grow the stack during the code that stackalloc runs.
// Doing so would cause a deadlock (issue 1547).
runtime·printf("stackalloc: in malloc, size=%d want %d", FixedStack, n);
runtime·throw("stackalloc");
}
- sys0 = m->stackalloc->sys;
- v = runtime·FixAlloc_Alloc(m->stackalloc);
- mstats.stacks_inuse += FixedStack;
- mstats.stacks_sys += m->stackalloc->sys - sys0;
- return v;
+ return runtime·FixAlloc_Alloc(m->stackalloc);
}
return runtime·mallocgc(n, FlagNoProfiling|FlagNoGC, 0, 0);
}
void
runtime·stackfree(void *v, uintptr n)
{
- uintptr sys0;
-
if(m->mallocing || m->gcing || n == FixedStack) {
- sys0 = m->stackalloc->sys;
runtime·FixAlloc_Free(m->stackalloc, v);
- mstats.stacks_inuse -= FixedStack;
- mstats.stacks_sys += m->stackalloc->sys - sys0;
return;
}
runtime·free(v);
// Shared with Go: if you edit this structure, also edit extern.go.
struct MStats
{
- // General statistics. No locking; approximate.
+ // General statistics.
uint64 alloc; // bytes allocated and still in use
uint64 total_alloc; // bytes allocated (even if freed)
- uint64 sys; // bytes obtained from system (should be sum of xxx_sys below)
+ uint64 sys; // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
uint64 nlookup; // number of pointer lookups
uint64 nmalloc; // number of mallocs
uint64 nfree; // number of frees
bool debuggc;
// Statistics about allocation size classes.
- // No locking; approximate.
struct {
uint32 size;
uint64 nmalloc;
{
MCacheList list[NumSizeClasses];
uint64 size;
- int64 local_alloc; // bytes allocated (or freed) since last lock of heap
- int64 local_objects; // objects allocated (or freed) since last lock of heap
+ int64 local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap
+ int64 local_objects; // objects allocated (or freed) from cache since last lock of heap
+ int64 local_alloc; // bytes allocated and still in use since last lock of heap
+ int64 local_total_alloc; // bytes allocated (even if freed) since last lock of heap
+ int64 local_nmalloc; // number of mallocs since last lock of heap
+ int64 local_nfree; // number of frees since last lock of heap
+ int64 local_nlookup; // number of pointer lookups since last lock of heap
int32 next_sample; // trigger heap sample after allocating this many bytes
+ // Statistics about allocation size classes since last lock of heap
+ struct {
+ int64 nmalloc;
+ int64 nfree;
+ } local_by_size[NumSizeClasses];
+
};
void* runtime·MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed);
void runtime·unmarkspan(void *v, uintptr size);
bool runtime·blockspecial(void*);
void runtime·setblockspecial(void*);
+void runtime·purgecachedstats(M*);
enum
{
v->next = nil;
}
}
- c->local_alloc += size;
+ c->local_cachealloc += size;
c->local_objects++;
return v;
}
l->list = p;
l->nlist++;
c->size += size;
- c->local_alloc -= size;
+ c->local_cachealloc -= size;
c->local_objects--;
if(l->nlist >= MaxMCacheListLen) {
// Mark freed; restore block boundary bit.
*bitp = (*bitp & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
+ c = m->mcache;
if(s->sizeclass == 0) {
// Free large span.
runtime·unmarkspan(p, 1<<PageShift);
runtime·MHeap_Free(&runtime·mheap, s, 1);
} else {
// Free small object.
- c = m->mcache;
if(size > sizeof(uintptr))
((uintptr*)p)[1] = 1; // mark as "needs to be zeroed"
- mstats.by_size[s->sizeclass].nfree++;
+ c->local_by_size[s->sizeclass].nfree++;
runtime·MCache_Free(c, p, s->sizeclass, size);
}
- mstats.alloc -= size;
- mstats.nfree++;
+ c->local_alloc -= size;
+ c->local_nfree++;
}
}
}
{
M *m;
MCache *c;
+ int32 i;
+ uint64 stacks_inuse;
+ uint64 stacks_sys;
+ stacks_inuse = 0;
+ stacks_sys = 0;
for(m=runtime·allm; m; m=m->alllink) {
+ runtime·purgecachedstats(m);
+ stacks_inuse += m->stackalloc->inuse;
+ stacks_sys += m->stackalloc->sys;
c = m->mcache;
- mstats.heap_alloc += c->local_alloc;
- c->local_alloc = 0;
- mstats.heap_objects += c->local_objects;
- c->local_objects = 0;
+ for(i=0; i<nelem(c->local_by_size); i++) {
+ mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc;
+ c->local_by_size[i].nmalloc = 0;
+ mstats.by_size[i].nfree += c->local_by_size[i].nfree;
+ c->local_by_size[i].nfree = 0;
+ }
}
+ mstats.stacks_inuse = stacks_inuse;
+ mstats.stacks_sys = stacks_sys;
}
void
sweep();
t2 = runtime·nanotime();
stealcache();
+ cachestats();
mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100;
m->gcing = 0;
MSpan *s;
runtime·lock(h);
- mstats.heap_alloc += m->mcache->local_alloc;
- m->mcache->local_alloc = 0;
- mstats.heap_objects += m->mcache->local_objects;
- m->mcache->local_objects = 0;
+ runtime·purgecachedstats(m);
s = MHeap_AllocLocked(h, npage, sizeclass);
if(s != nil) {
mstats.heap_inuse += npage<<PageShift;
runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
{
runtime·lock(h);
- mstats.heap_alloc += m->mcache->local_alloc;
- m->mcache->local_alloc = 0;
- mstats.heap_objects += m->mcache->local_objects;
- m->mcache->local_objects = 0;
+ runtime·purgecachedstats(m);
mstats.heap_inuse -= s->npages<<PageShift;
if(acct) {
mstats.heap_alloc -= s->npages<<PageShift;
b := runtime.Alloc(uintptr(j))
during := runtime.MemStats.Alloc
runtime.Free(b)
+ runtime.GC()
if a := runtime.MemStats.Alloc; a != 0 {
println("allocated ", j, ": wrong stats: during=", during, " after=", a, " (want 0)")
panic("fail")
fmt.Printf("size=%d count=%d stats=%+v\n", size, count, *stats)
}
n3 := stats.Alloc
+ runtime.GC()
for j := 0; j < count; j++ {
i := j
if *reverse {
panic("fail")
}
runtime.Free(b[i])
+ runtime.GC()
if stats.Alloc != uint64(alloc-n) {
println("free alloc got", stats.Alloc, "expected", alloc-n, "after free of", n)
panic("fail")