]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: write memory profile statistics to the heap dump.
authorKeith Randall <khr@golang.org>
Thu, 8 May 2014 15:35:49 +0000 (08:35 -0700)
committerKeith Randall <khr@golang.org>
Thu, 8 May 2014 15:35:49 +0000 (08:35 -0700)
LGTM=rsc
R=rsc, khr
CC=golang-codereviews
https://golang.org/cl/97010043

src/pkg/runtime/heapdump.c
src/pkg/runtime/malloc.h
src/pkg/runtime/mprof.goc

index 9132e2c186ac121e51dbaa57002f528b1b608eb6..42d1601aa1aab0e2d592e4a13ea01bf9ca6e9a83 100644 (file)
@@ -49,6 +49,8 @@ enum {
        TagBss = 13,
        TagDefer = 14,
        TagPanic = 15,
+       TagMemProf = 16,
+       TagAllocSample = 17,
 
        TypeInfo_Conservative = 127,
 };
@@ -689,6 +691,74 @@ dumpmemstats(void)
        dumpint(mstats.numgc);
 }
 
+static void
+dumpmemprof_callback(Bucket *b, uintptr nstk, uintptr *stk, uintptr size, uintptr allocs, uintptr frees)
+{
+       uintptr i, pc;
+       Func *f;
+       byte buf[20];
+       String file;
+       int32 line;
+
+       dumpint(TagMemProf);
+       dumpint((uintptr)b);
+       dumpint(size);
+       dumpint(nstk);
+       for(i = 0; i < nstk; i++) {
+               pc = stk[i];
+               f = runtime·findfunc(pc);
+               if(f == nil) {
+                       runtime·snprintf(buf, sizeof(buf), "%X", (uint64)pc);
+                       dumpcstr((int8*)buf);
+                       dumpcstr("?");
+                       dumpint(0);
+               } else {
+                       dumpcstr(runtime·funcname(f));
+                       // TODO: Why do we need to back up to a call instruction here?
+                       // Maybe profiler should do this.
+                       if(i > 0 && pc > f->entry) {
+                               if(thechar == '6' || thechar == '8')
+                                       pc--;
+                               else
+                                       pc -= 4; // arm, etc
+                       }
+                       line = runtime·funcline(f, pc, &file);
+                       dumpstr(file);
+                       dumpint(line);
+               }
+       }
+       dumpint(allocs);
+       dumpint(frees);
+}
+
+static void
+dumpmemprof(void)
+{
+       MSpan *s, **allspans;
+       uint32 spanidx;
+       Special *sp;
+       SpecialProfile *spp;
+       byte *p;
+
+       runtime·iterate_memprof(dumpmemprof_callback);
+
+       allspans = runtime·mheap.allspans;
+       for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) {
+               s = allspans[spanidx];
+               if(s->state != MSpanInUse)
+                       continue;
+               for(sp = s->specials; sp != nil; sp = sp->next) {
+                       if(sp->kind != KindSpecialProfile)
+                               continue;
+                       spp = (SpecialProfile*)sp;
+                       p = (byte*)((s->start << PageShift) + spp->offset);
+                       dumpint(TagAllocSample);
+                       dumpint((uintptr)p);
+                       dumpint((uintptr)spp->b);
+               }
+       }
+}
+
 static void
 mdump(G *gp)
 {
@@ -713,6 +783,7 @@ mdump(G *gp)
        dumpms();
        dumproots();
        dumpmemstats();
+       dumpmemprof();
        dumpint(TagEOF);
        flush();
 
index dbea7ad1358d8275723454621397ac7ee9e092b1..798c130ad57ad2c87f353ab4b5d98d59a7ec82ca 100644 (file)
@@ -570,6 +570,7 @@ enum
 void   runtime·MProf_Malloc(void*, uintptr);
 void   runtime·MProf_Free(Bucket*, uintptr, bool);
 void   runtime·MProf_GC(void);
+void   runtime·iterate_memprof(void (*callback)(Bucket*, uintptr, uintptr*, uintptr, uintptr, uintptr));
 int32  runtime·gcprocs(void);
 void   runtime·helpgc(int32 nproc);
 void   runtime·gchelper(void);
index a1659a7b4288137c2a0918eff9e643eaa816cde1..9c23a16f88d22bd8e2b940b9aa867abe477c725d 100644 (file)
@@ -309,6 +309,18 @@ func MemProfile(p Slice, include_inuse_zero bool) (n int, ok bool) {
        runtime·unlock(&proflock);
 }
 
+void
+runtime·iterate_memprof(void (*callback)(Bucket*, uintptr, uintptr*, uintptr, uintptr, uintptr))
+{
+       Bucket *b;
+
+       runtime·lock(&proflock);
+       for(b=mbuckets; b; b=b->allnext) {
+               callback(b, b->nstk, b->stk, b->size, b->allocs, b->frees);
+       }
+       runtime·unlock(&proflock);
+}
+
 // Must match BlockProfileRecord in debug.go.
 typedef struct BRecord BRecord;
 struct BRecord {