Sys uint64 // bytes obtained from system (should be sum of XxxSys below)
Lookups uint64 // number of pointer lookups
Mallocs uint64 // number of mallocs
+ Frees uint64 // number of frees
// Main allocation heap statistics.
HeapAlloc uint64 // bytes allocated and still in use
BuckHashSys uint64 // profiling bucket hash table
// Garbage collector statistics.
- NextGC uint64
- PauseNs uint64
- NumGC uint32
- EnableGC bool
- DebugGC bool
+ NextGC uint64
+ PauseTotalNs uint64
+ PauseNs [256]uint64 // most recent GC pause times
+ NumGC uint32
+ EnableGC bool
+ DebugGC bool
// Per-size allocation statistics.
// Not locked during update; approximate.
uint64 sys; // bytes obtained from system (should be sum of xxx_sys below)
uint64 nlookup; // number of pointer lookups
uint64 nmalloc; // number of mallocs
+ uint64 nfree; // number of frees
// Statistics about malloc heap.
// protected by mheap.Lock
// Statistics about garbage collector.
// Protected by stopping the world during GC.
uint64 next_gc; // next GC (in heap_alloc time)
- uint64 pause_ns;
+ uint64 pause_total_ns;
+ uint64 pause_ns[256];
uint32 numgc;
bool enablegc;
bool debuggc;
case RefNone:
// Free large object.
mstats.alloc -= s->npages<<PageShift;
+ mstats.nfree++;
runtime·memclr(p, s->npages<<PageShift);
if(ref & RefProfiled)
runtime·MProf_Free(p, s->npages<<PageShift);
if(size > sizeof(uintptr))
((uintptr*)p)[1] = 1; // mark as "needs to be zeroed"
mstats.alloc -= size;
+ mstats.nfree++;
mstats.by_size[s->sizeclass].nfree++;
runtime·MCache_Free(c, p, s->sizeclass, size);
break;
t1 = runtime·nanotime();
mstats.numgc++;
- mstats.pause_ns += t1 - t0;
+ mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t1 - t0;
+ mstats.pause_total_ns += t1 - t0;
if(mstats.debuggc)
runtime·printf("pause %D\n", t1-t0);
runtime·semrelease(&gcsema);
all: $(addsuffix .out, $(ALL))
-%.$O: %.go
- $(GC) $*.go
+%.$O: %.go stats.go
+ $(GC) $*.go stats.go
%.out: %.$O
$(LD) -o $@ $*.$O
"fmt"
"go/ast"
"go/parser"
+ "go/token"
"os"
"path"
"runtime"
"strings"
"time"
+ "http"
+ _ "http/pprof"
+ "log"
)
+var serve = flag.String("serve", "", "serve http on this address at end")
+
func isGoFile(dir *os.FileInfo) bool {
return dir.IsRegular() &&
!strings.HasPrefix(dir.Name, ".") && // ignore .files
}
func pkgName(filename string) string {
- file, err := parser.ParseFile(filename, nil, parser.PackageClauseOnly)
+ file, err := parser.ParseFile(token.NewFileSet(), filename, nil, parser.PackageClauseOnly)
if err != nil || file == nil {
return ""
}
}
// get package AST
- pkgs, err := parser.ParseDir(dirpath, filter, parser.ParseComments)
+ pkgs, err := parser.ParseDir(token.NewFileSet(), dirpath, filter, parser.ParseComments)
if err != nil {
println("parse", dirpath, err.String())
panic("fail")
}
func main() {
+ runtime.GOMAXPROCS(4)
+ go func() {}()
+ go func() {}()
+ go func() {}()
st := &runtime.MemStats
+ packages = append(packages, packages...)
+ packages = append(packages, packages...)
n := flag.Int("n", 4, "iterations")
p := flag.Int("p", len(packages), "# of packages to keep in memory")
flag.BoolVar(&st.DebugGC, "d", st.DebugGC, "print GC debugging info (pause times)")
flag.Parse()
+ var lastParsed []map[string]*ast.Package
var t0 int64
pkgroot := runtime.GOROOT() + "/src/pkg/"
for pass := 0; pass < 2; pass++ {
// than the normal pauses and would otherwise make
// the average look much better than it actually is.
st.NumGC = 0
- st.PauseNs = 0
+ st.PauseTotalNs = 0
t0 = time.Nanoseconds()
for i := 0; i < *n; i++ {
for j := range parsed {
parsed[j] = parseDir(pkgroot + packages[j%len(packages)])
}
+ if i+1 == *n && *serve != "" {
+ lastParsed = parsed
+ }
}
runtime.GC()
+ runtime.GC()
}
t1 := time.Nanoseconds()
fmt.Printf("Alloc=%d/%d Heap=%d Mallocs=%d PauseTime=%.3f/%d = %.3f\n",
st.Alloc, st.TotalAlloc,
st.Sys,
- st.Mallocs, float64(st.PauseNs)/1e9,
- st.NumGC, float64(st.PauseNs)/1e9/float64(st.NumGC))
-
- fmt.Printf("%10s %10s %10s\n", "size", "#alloc", "#free")
- for _, s := range st.BySize {
- fmt.Printf("%10d %10d %10d\n", s.Size, s.Mallocs, s.Frees)
- }
+ st.Mallocs, float64(st.PauseTotalNs)/1e9,
+ st.NumGC, float64(st.PauseTotalNs)/1e9/float64(st.NumGC))
+ /*
+ fmt.Printf("%10s %10s %10s\n", "size", "#alloc", "#free")
+ for _, s := range st.BySize {
+ fmt.Printf("%10d %10d %10d\n", s.Size, s.Mallocs, s.Frees)
+ }
+ */
// Standard gotest benchmark output, collected by build dashboard.
- fmt.Printf("garbage.BenchmarkParser %d %d ns/op\n", *n, (t1-t0)/int64(*n))
- fmt.Printf("garbage.BenchmarkParserPause %d %d ns/op\n", st.NumGC, int64(st.PauseNs)/int64(st.NumGC))
+ gcstats("BenchmarkParser", *n, t1-t0)
+
+ if *serve != "" {
+ log.Exit(http.ListenAndServe(*serve, nil))
+ println(lastParsed)
+ }
}
"math",
"mime",
"net",
- "nntp",
"os",
"os/signal",
"patch",
"runtime",
"scanner",
"sort",
+ "smtp",
"strconv",
"strings",
"sync",
func main() {
- st := &runtime.MemStats
t0 := time.Nanoseconds()
verify()
for i := 0; i <= 9; i++ {
runtime.GC()
t1 := time.Nanoseconds()
- fmt.Printf("garbage.BenchmarkPeano 1 %d ns/op\n", t1-t0)
- fmt.Printf("garbage.BenchmarkPeanoPause %d %d ns/op\n", st.NumGC, int64(st.PauseNs)/int64(st.NumGC))
+ gcstats("BenchmarkPeano", 1, t1-t0)
}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "runtime"
+ "sort"
+)
+
+func gcstats(name string, n int, t int64) {
+ st := &runtime.MemStats
+ fmt.Printf("garbage.%sMem Alloc=%d/%d Heap=%d NextGC=%d Mallocs=%d\n", name, st.Alloc, st.TotalAlloc, st.Sys, st.NextGC, st.Mallocs)
+ fmt.Printf("garbage.%s %d %d ns/op\n", name, n, t/int64(n))
+ fmt.Printf("garbage.%sLastPause 1 %d ns/op\n", name, st.PauseNs[(st.NumGC-1)%uint32(len(st.PauseNs))])
+ fmt.Printf("garbage.%sPause %d %d ns/op\n", name, st.NumGC, int64(st.PauseTotalNs)/int64(st.NumGC))
+ nn := int(st.NumGC)
+ if nn >= len(st.PauseNs) {
+ nn = len(st.PauseNs)
+ }
+ t1, t2, t3, t4, t5 := tukey5(st.PauseNs[0:nn])
+ fmt.Printf("garbage.%sPause5: %d %d %d %d %d\n", name, t1, t2, t3, t4, t5)
+
+// fmt.Printf("garbage.%sScan: %v\n", name, st.ScanDist)
+}
+
+type T []uint64
+func (t T) Len() int { return len(t) }
+func (t T) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t T) Less(i, j int) bool { return t[i] < t[j] }
+
+func tukey5(raw []uint64) (lo, q1, q2, q3, hi uint64) {
+ x := make(T, len(raw))
+ copy(x, raw)
+ sort.Sort(T(x))
+ lo = x[0]
+ q1 = x[len(x)/4]
+ q2 = x[len(x)/2]
+ q3 = x[len(x)*3/4]
+ hi = x[len(x)-1]
+ return
+}
import (
"flag"
"fmt"
- "runtime"
"time"
)
fmt.Printf("long lived tree of depth %d\t check: %d\n", maxDepth, longLivedTree.itemCheck())
t1 := time.Nanoseconds()
- st := &runtime.MemStats
// Standard gotest benchmark output, collected by build dashboard.
- fmt.Printf("garbage.BenchmarkTree %d %d ns/op\n", *n, (t1-t0)/int64(*n))
- fmt.Printf("garbage.BenchmarkTreePause %d %d ns/op\n", st.NumGC, int64(st.PauseNs)/int64(st.NumGC))
-
+ gcstats("BenchmarkTree", *n, t1-t0)
}