"go/types": {"L4", "GOPARSER", "container/heap", "go/constant"},
// One of a kind.
- "archive/tar": {"L4", "OS", "syscall", "os/user"},
- "archive/zip": {"L4", "OS", "compress/flate"},
- "container/heap": {"sort"},
- "compress/bzip2": {"L4"},
- "compress/flate": {"L4"},
- "compress/gzip": {"L4", "compress/flate"},
- "compress/lzw": {"L4"},
- "compress/zlib": {"L4", "compress/flate"},
- "context": {"errors", "internal/reflectlite", "sync", "sync/atomic", "time"},
- "database/sql": {"L4", "container/list", "context", "database/sql/driver", "database/sql/internal"},
- "database/sql/driver": {"L4", "context", "time", "database/sql/internal"},
- "debug/dwarf": {"L4"},
- "debug/elf": {"L4", "OS", "debug/dwarf", "compress/zlib"},
- "debug/gosym": {"L4"},
- "debug/macho": {"L4", "OS", "debug/dwarf", "compress/zlib"},
- "debug/pe": {"L4", "OS", "debug/dwarf", "compress/zlib"},
- "debug/plan9obj": {"L4", "OS"},
- "encoding": {"L4"},
- "encoding/ascii85": {"L4"},
- "encoding/asn1": {"L4", "math/big"},
- "encoding/csv": {"L4"},
- "encoding/gob": {"L4", "OS", "encoding"},
- "encoding/hex": {"L4"},
- "encoding/json": {"L4", "encoding"},
- "encoding/pem": {"L4"},
- "encoding/xml": {"L4", "encoding"},
- "flag": {"L4", "OS"},
- "go/build": {"L4", "OS", "GOPARSER", "internal/goroot", "internal/goversion"},
- "html": {"L4"},
- "image/draw": {"L4", "image/internal/imageutil"},
- "image/gif": {"L4", "compress/lzw", "image/color/palette", "image/draw"},
- "image/internal/imageutil": {"L4"},
- "image/jpeg": {"L4", "image/internal/imageutil"},
- "image/png": {"L4", "compress/zlib"},
- "index/suffixarray": {"L4", "regexp"},
- "internal/goroot": {"L4", "OS"},
- "internal/singleflight": {"sync"},
- "internal/trace": {"L4", "OS", "container/heap"},
- "internal/xcoff": {"L4", "OS", "debug/dwarf"},
- "math/big": {"L4"},
- "mime": {"L4", "OS", "syscall", "internal/syscall/windows/registry"},
- "mime/quotedprintable": {"L4"},
- "net/internal/socktest": {"L4", "OS", "syscall", "internal/syscall/windows"},
- "net/url": {"L4"},
- "plugin": {"L0", "OS", "CGO"},
- "runtime/pprof/internal/profile": {"L4", "OS", "compress/gzip", "regexp"},
- "testing/internal/testdeps": {"L4", "internal/testlog", "runtime/pprof", "regexp"},
- "text/scanner": {"L4", "OS"},
- "text/template/parse": {"L4"},
+ "archive/tar": {"L4", "OS", "syscall", "os/user"},
+ "archive/zip": {"L4", "OS", "compress/flate"},
+ "container/heap": {"sort"},
+ "compress/bzip2": {"L4"},
+ "compress/flate": {"L4"},
+ "compress/gzip": {"L4", "compress/flate"},
+ "compress/lzw": {"L4"},
+ "compress/zlib": {"L4", "compress/flate"},
+ "context": {"errors", "internal/reflectlite", "sync", "sync/atomic", "time"},
+ "database/sql": {"L4", "container/list", "context", "database/sql/driver", "database/sql/internal"},
+ "database/sql/driver": {"L4", "context", "time", "database/sql/internal"},
+ "debug/dwarf": {"L4"},
+ "debug/elf": {"L4", "OS", "debug/dwarf", "compress/zlib"},
+ "debug/gosym": {"L4"},
+ "debug/macho": {"L4", "OS", "debug/dwarf", "compress/zlib"},
+ "debug/pe": {"L4", "OS", "debug/dwarf", "compress/zlib"},
+ "debug/plan9obj": {"L4", "OS"},
+ "encoding": {"L4"},
+ "encoding/ascii85": {"L4"},
+ "encoding/asn1": {"L4", "math/big"},
+ "encoding/csv": {"L4"},
+ "encoding/gob": {"L4", "OS", "encoding"},
+ "encoding/hex": {"L4"},
+ "encoding/json": {"L4", "encoding"},
+ "encoding/pem": {"L4"},
+ "encoding/xml": {"L4", "encoding"},
+ "flag": {"L4", "OS"},
+ "go/build": {"L4", "OS", "GOPARSER", "internal/goroot", "internal/goversion"},
+ "html": {"L4"},
+ "image/draw": {"L4", "image/internal/imageutil"},
+ "image/gif": {"L4", "compress/lzw", "image/color/palette", "image/draw"},
+ "image/internal/imageutil": {"L4"},
+ "image/jpeg": {"L4", "image/internal/imageutil"},
+ "image/png": {"L4", "compress/zlib"},
+ "index/suffixarray": {"L4", "regexp"},
+ "internal/goroot": {"L4", "OS"},
+ "internal/singleflight": {"sync"},
+ "internal/trace": {"L4", "OS", "container/heap"},
+ "internal/xcoff": {"L4", "OS", "debug/dwarf"},
+ "math/big": {"L4"},
+ "mime": {"L4", "OS", "syscall", "internal/syscall/windows/registry"},
+ "mime/quotedprintable": {"L4"},
+ "net/internal/socktest": {"L4", "OS", "syscall", "internal/syscall/windows"},
+ "net/url": {"L4"},
+ "plugin": {"L0", "OS", "CGO"},
+ "internal/profile": {"L4", "OS", "compress/gzip", "regexp"},
+ "testing/internal/testdeps": {"L4", "internal/testlog", "runtime/pprof", "regexp"},
+ "text/scanner": {"L4", "OS"},
+ "text/template/parse": {"L4"},
"html/template": {
"L4", "OS", "encoding/json", "html", "text/template",
"golang.org/x/net/http/httpguts",
},
"net/http/httputil": {"L4", "NET", "OS", "context", "net/http", "net/http/internal", "golang.org/x/net/http/httpguts"},
- "net/http/pprof": {"L4", "OS", "html/template", "net/http", "runtime/pprof", "runtime/trace"},
+ "net/http/pprof": {"L4", "OS", "context", "html/template", "net/http", "runtime/pprof", "runtime/trace", "internal/profile"},
"net/rpc": {"L4", "NET", "encoding/gob", "html/template", "net/http", "go/token"},
"net/rpc/jsonrpc": {"L4", "NET", "encoding/json", "net/rpc"},
}
--- /dev/null
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package profile
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Merge merges all the profiles in profs into a single Profile.
+// Returns a new profile independent of the input profiles. The merged
+// profile is compacted to eliminate unused samples, locations,
+// functions and mappings. Profiles must have identical profile sample
+// and period types or the merge will fail. profile.Period of the
+// resulting profile will be the maximum of all profiles, and
+// profile.TimeNanos will be the earliest nonzero one.
+func Merge(srcs []*Profile) (*Profile, error) {
+ if len(srcs) == 0 {
+ return nil, fmt.Errorf("no profiles to merge")
+ }
+ p, err := combineHeaders(srcs)
+ if err != nil {
+ return nil, err
+ }
+
+ pm := &profileMerger{
+ p: p,
+ samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)),
+ locations: make(map[locationKey]*Location, len(srcs[0].Location)),
+ functions: make(map[functionKey]*Function, len(srcs[0].Function)),
+ mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
+ }
+
+ for _, src := range srcs {
+ // Clear the profile-specific hash tables
+ pm.locationsByID = make(map[uint64]*Location, len(src.Location))
+ pm.functionsByID = make(map[uint64]*Function, len(src.Function))
+ pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
+
+ if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
+ // The Mapping list has the property that the first mapping
+ // represents the main binary. Take the first Mapping we see,
+ // otherwise the operations below will add mappings in an
+ // arbitrary order.
+ pm.mapMapping(src.Mapping[0])
+ }
+
+ for _, s := range src.Sample {
+ if !isZeroSample(s) {
+ pm.mapSample(s)
+ }
+ }
+ }
+
+ for _, s := range p.Sample {
+ if isZeroSample(s) {
+ // If there are any zero samples, re-merge the profile to GC
+ // them.
+ return Merge([]*Profile{p})
+ }
+ }
+
+ return p, nil
+}
+
+// Normalize normalizes the source profile by multiplying each value in profile by the
+// ratio of the sum of the base profile's values of that sample type to the sum of the
+// source profile's value of that sample type.
+func (p *Profile) Normalize(pb *Profile) error {
+
+ if err := p.compatible(pb); err != nil {
+ return err
+ }
+
+ baseVals := make([]int64, len(p.SampleType))
+ for _, s := range pb.Sample {
+ for i, v := range s.Value {
+ baseVals[i] += v
+ }
+ }
+
+ srcVals := make([]int64, len(p.SampleType))
+ for _, s := range p.Sample {
+ for i, v := range s.Value {
+ srcVals[i] += v
+ }
+ }
+
+ normScale := make([]float64, len(baseVals))
+ for i := range baseVals {
+ if srcVals[i] == 0 {
+ normScale[i] = 0.0
+ } else {
+ normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
+ }
+ }
+ p.ScaleN(normScale)
+ return nil
+}
+
+func isZeroSample(s *Sample) bool {
+ for _, v := range s.Value {
+ if v != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+type profileMerger struct {
+ p *Profile
+
+ // Memoization tables within a profile.
+ locationsByID map[uint64]*Location
+ functionsByID map[uint64]*Function
+ mappingsByID map[uint64]mapInfo
+
+ // Memoization tables for profile entities.
+ samples map[sampleKey]*Sample
+ locations map[locationKey]*Location
+ functions map[functionKey]*Function
+ mappings map[mappingKey]*Mapping
+}
+
+type mapInfo struct {
+ m *Mapping
+ offset int64
+}
+
+func (pm *profileMerger) mapSample(src *Sample) *Sample {
+ s := &Sample{
+ Location: make([]*Location, len(src.Location)),
+ Value: make([]int64, len(src.Value)),
+ Label: make(map[string][]string, len(src.Label)),
+ NumLabel: make(map[string][]int64, len(src.NumLabel)),
+ NumUnit: make(map[string][]string, len(src.NumLabel)),
+ }
+ for i, l := range src.Location {
+ s.Location[i] = pm.mapLocation(l)
+ }
+ for k, v := range src.Label {
+ vv := make([]string, len(v))
+ copy(vv, v)
+ s.Label[k] = vv
+ }
+ for k, v := range src.NumLabel {
+ u := src.NumUnit[k]
+ vv := make([]int64, len(v))
+ uu := make([]string, len(u))
+ copy(vv, v)
+ copy(uu, u)
+ s.NumLabel[k] = vv
+ s.NumUnit[k] = uu
+ }
+ // Check memoization table. Must be done on the remapped location to
+ // account for the remapped mapping. Add current values to the
+ // existing sample.
+ k := s.key()
+ if ss, ok := pm.samples[k]; ok {
+ for i, v := range src.Value {
+ ss.Value[i] += v
+ }
+ return ss
+ }
+ copy(s.Value, src.Value)
+ pm.samples[k] = s
+ pm.p.Sample = append(pm.p.Sample, s)
+ return s
+}
+
+// key generates sampleKey to be used as a key for maps.
+func (sample *Sample) key() sampleKey {
+ ids := make([]string, len(sample.Location))
+ for i, l := range sample.Location {
+ ids[i] = strconv.FormatUint(l.ID, 16)
+ }
+
+ labels := make([]string, 0, len(sample.Label))
+ for k, v := range sample.Label {
+ labels = append(labels, fmt.Sprintf("%q%q", k, v))
+ }
+ sort.Strings(labels)
+
+ numlabels := make([]string, 0, len(sample.NumLabel))
+ for k, v := range sample.NumLabel {
+ numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
+ }
+ sort.Strings(numlabels)
+
+ return sampleKey{
+ strings.Join(ids, "|"),
+ strings.Join(labels, ""),
+ strings.Join(numlabels, ""),
+ }
+}
+
+type sampleKey struct {
+ locations string
+ labels string
+ numlabels string
+}
+
+func (pm *profileMerger) mapLocation(src *Location) *Location {
+ if src == nil {
+ return nil
+ }
+
+ if l, ok := pm.locationsByID[src.ID]; ok {
+ pm.locationsByID[src.ID] = l
+ return l
+ }
+
+ mi := pm.mapMapping(src.Mapping)
+ l := &Location{
+ ID: uint64(len(pm.p.Location) + 1),
+ Mapping: mi.m,
+ Address: uint64(int64(src.Address) + mi.offset),
+ Line: make([]Line, len(src.Line)),
+ IsFolded: src.IsFolded,
+ }
+ for i, ln := range src.Line {
+ l.Line[i] = pm.mapLine(ln)
+ }
+ // Check memoization table. Must be done on the remapped location to
+ // account for the remapped mapping ID.
+ k := l.key()
+ if ll, ok := pm.locations[k]; ok {
+ pm.locationsByID[src.ID] = ll
+ return ll
+ }
+ pm.locationsByID[src.ID] = l
+ pm.locations[k] = l
+ pm.p.Location = append(pm.p.Location, l)
+ return l
+}
+
+// key generates locationKey to be used as a key for maps.
+func (l *Location) key() locationKey {
+ key := locationKey{
+ addr: l.Address,
+ isFolded: l.IsFolded,
+ }
+ if l.Mapping != nil {
+ // Normalizes address to handle address space randomization.
+ key.addr -= l.Mapping.Start
+ key.mappingID = l.Mapping.ID
+ }
+ lines := make([]string, len(l.Line)*2)
+ for i, line := range l.Line {
+ if line.Function != nil {
+ lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
+ }
+ lines[i*2+1] = strconv.FormatInt(line.Line, 16)
+ }
+ key.lines = strings.Join(lines, "|")
+ return key
+}
+
+type locationKey struct {
+ addr, mappingID uint64
+ lines string
+ isFolded bool
+}
+
+func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
+ if src == nil {
+ return mapInfo{}
+ }
+
+ if mi, ok := pm.mappingsByID[src.ID]; ok {
+ return mi
+ }
+
+ // Check memoization tables.
+ mk := src.key()
+ if m, ok := pm.mappings[mk]; ok {
+ mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
+ pm.mappingsByID[src.ID] = mi
+ return mi
+ }
+ m := &Mapping{
+ ID: uint64(len(pm.p.Mapping) + 1),
+ Start: src.Start,
+ Limit: src.Limit,
+ Offset: src.Offset,
+ File: src.File,
+ BuildID: src.BuildID,
+ HasFunctions: src.HasFunctions,
+ HasFilenames: src.HasFilenames,
+ HasLineNumbers: src.HasLineNumbers,
+ HasInlineFrames: src.HasInlineFrames,
+ }
+ pm.p.Mapping = append(pm.p.Mapping, m)
+
+ // Update memoization tables.
+ pm.mappings[mk] = m
+ mi := mapInfo{m, 0}
+ pm.mappingsByID[src.ID] = mi
+ return mi
+}
+
+// key generates encoded strings of Mapping to be used as a key for
+// maps.
+func (m *Mapping) key() mappingKey {
+ // Normalize addresses to handle address space randomization.
+ // Round up to next 4K boundary to avoid minor discrepancies.
+ const mapsizeRounding = 0x1000
+
+ size := m.Limit - m.Start
+ size = size + mapsizeRounding - 1
+ size = size - (size % mapsizeRounding)
+ key := mappingKey{
+ size: size,
+ offset: m.Offset,
+ }
+
+ switch {
+ case m.BuildID != "":
+ key.buildIDOrFile = m.BuildID
+ case m.File != "":
+ key.buildIDOrFile = m.File
+ default:
+ // A mapping containing neither build ID nor file name is a fake mapping. A
+ // key with empty buildIDOrFile is used for fake mappings so that they are
+ // treated as the same mapping during merging.
+ }
+ return key
+}
+
+type mappingKey struct {
+ size, offset uint64
+ buildIDOrFile string
+}
+
+func (pm *profileMerger) mapLine(src Line) Line {
+ ln := Line{
+ Function: pm.mapFunction(src.Function),
+ Line: src.Line,
+ }
+ return ln
+}
+
+func (pm *profileMerger) mapFunction(src *Function) *Function {
+ if src == nil {
+ return nil
+ }
+ if f, ok := pm.functionsByID[src.ID]; ok {
+ return f
+ }
+ k := src.key()
+ if f, ok := pm.functions[k]; ok {
+ pm.functionsByID[src.ID] = f
+ return f
+ }
+ f := &Function{
+ ID: uint64(len(pm.p.Function) + 1),
+ Name: src.Name,
+ SystemName: src.SystemName,
+ Filename: src.Filename,
+ StartLine: src.StartLine,
+ }
+ pm.functions[k] = f
+ pm.functionsByID[src.ID] = f
+ pm.p.Function = append(pm.p.Function, f)
+ return f
+}
+
+// key generates a struct to be used as a key for maps.
+func (f *Function) key() functionKey {
+ return functionKey{
+ f.StartLine,
+ f.Name,
+ f.SystemName,
+ f.Filename,
+ }
+}
+
+type functionKey struct {
+ startLine int64
+ name, systemName, fileName string
+}
+
+// combineHeaders checks that all profiles can be merged and returns
+// their combined profile.
+func combineHeaders(srcs []*Profile) (*Profile, error) {
+ for _, s := range srcs[1:] {
+ if err := srcs[0].compatible(s); err != nil {
+ return nil, err
+ }
+ }
+
+ var timeNanos, durationNanos, period int64
+ var comments []string
+ seenComments := map[string]bool{}
+ var defaultSampleType string
+ for _, s := range srcs {
+ if timeNanos == 0 || s.TimeNanos < timeNanos {
+ timeNanos = s.TimeNanos
+ }
+ durationNanos += s.DurationNanos
+ if period == 0 || period < s.Period {
+ period = s.Period
+ }
+ for _, c := range s.Comments {
+ if seen := seenComments[c]; !seen {
+ comments = append(comments, c)
+ seenComments[c] = true
+ }
+ }
+ if defaultSampleType == "" {
+ defaultSampleType = s.DefaultSampleType
+ }
+ }
+
+ p := &Profile{
+ SampleType: make([]*ValueType, len(srcs[0].SampleType)),
+
+ DropFrames: srcs[0].DropFrames,
+ KeepFrames: srcs[0].KeepFrames,
+
+ TimeNanos: timeNanos,
+ DurationNanos: durationNanos,
+ PeriodType: srcs[0].PeriodType,
+ Period: period,
+
+ Comments: comments,
+ DefaultSampleType: defaultSampleType,
+ }
+ copy(p.SampleType, srcs[0].SampleType)
+ return p, nil
+}
+
+// compatible determines if two profiles can be compared/merged.
+// returns nil if the profiles are compatible; otherwise an error with
+// details on the incompatibility.
+func (p *Profile) compatible(pb *Profile) error {
+ if !equalValueType(p.PeriodType, pb.PeriodType) {
+ return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
+ }
+
+ if len(p.SampleType) != len(pb.SampleType) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+
+ for i := range p.SampleType {
+ if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+ }
+ return nil
+}
+
+// equalValueType returns true if the two value types are semantically
+// equal. It ignores the internal fields used during encode/decode.
+func equalValueType(st1, st2 *ValueType) bool {
+ return st1.Type == st2.Type && st1.Unit == st2.Unit
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package profile provides a representation of profile.proto and
-// methods to encode/decode profiles in this format.
-//
-// This package is only for testing runtime/pprof.
-// It is not used by production Go programs.
+// Package profile provides a representation of
+// github.com/google/pprof/proto/profile.proto and
+// methods to encode/decode/merge profiles in this format.
package profile
import (
Value []int64
Label map[string][]string
NumLabel map[string][]int64
+ NumUnit map[string][]string
locationIDX []uint64
labelX []Label
// Location corresponds to Profile.Location
type Location struct {
- ID uint64
- Mapping *Mapping
- Address uint64
- Line []Line
+ ID uint64
+ Mapping *Mapping
+ Address uint64
+ Line []Line
+ IsFolded bool
mappingIDX uint64
}
func (p *Profile) Empty() bool {
return len(p.Sample) == 0
}
+
+// Scale multiplies all sample values in a profile by a constant.
+func (p *Profile) Scale(ratio float64) {
+ if ratio == 1 {
+ return
+ }
+ ratios := make([]float64, len(p.SampleType))
+ for i := range p.SampleType {
+ ratios[i] = ratio
+ }
+ p.ScaleN(ratios)
+}
+
+// ScaleN multiplies each sample values in a sample by a different amount.
+func (p *Profile) ScaleN(ratios []float64) error {
+ if len(p.SampleType) != len(ratios) {
+ return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType))
+ }
+ allOnes := true
+ for _, r := range ratios {
+ if r != 1 {
+ allOnes = false
+ break
+ }
+ }
+ if allOnes {
+ return nil
+ }
+ for _, s := range p.Sample {
+ for i, v := range s.Value {
+ if ratios[i] != 1 {
+ s.Value[i] = int64(float64(v) * ratios[i])
+ }
+ }
+ }
+ return nil
+}
import (
"bufio"
"bytes"
+ "context"
"fmt"
"html/template"
+ "internal/profile"
"io"
"log"
"net/http"
serveError(w, http.StatusNotFound, "Unknown profile")
return
}
+ if sec := r.FormValue("seconds"); sec != "" {
+ name.serveDeltaProfile(w, r, p, sec)
+ return
+ }
gc, _ := strconv.Atoi(r.FormValue("gc"))
if name == "heap" && gc > 0 {
runtime.GC()
p.WriteTo(w, debug)
}
+func (name handler) serveDeltaProfile(w http.ResponseWriter, r *http.Request, p *pprof.Profile, secStr string) {
+ sec, err := strconv.ParseInt(secStr, 10, 64)
+ if err != nil || sec <= 0 {
+ serveError(w, http.StatusBadRequest, `invalid value for "seconds" - must be a positive integer`)
+ return
+ }
+ if !profileSupportsDelta[name] {
+ serveError(w, http.StatusBadRequest, `"seconds" parameter is not supported for this profile type`)
+ return
+ }
+ // 'name' should be a key in profileSupportsDelta.
+ if durationExceedsWriteTimeout(r, float64(sec)) {
+ serveError(w, http.StatusBadRequest, "profile duration exceeds server's WriteTimeout")
+ return
+ }
+ debug, _ := strconv.Atoi(r.FormValue("debug"))
+ if debug != 0 {
+ serveError(w, http.StatusBadRequest, "seconds and debug params are incompatible")
+ return
+ }
+ p0, err := collectProfile(p)
+ if err != nil {
+ serveError(w, http.StatusInternalServerError, "failed to collect profile")
+ return
+ }
+
+ t := time.NewTimer(time.Duration(sec) * time.Second)
+ defer t.Stop()
+
+ select {
+ case <-r.Context().Done():
+ err := r.Context().Err()
+ if err == context.DeadlineExceeded {
+ serveError(w, http.StatusRequestTimeout, err.Error())
+ } else { // TODO: what's a good status code for cancelled requests? 400?
+ serveError(w, http.StatusInternalServerError, err.Error())
+ }
+ return
+ case <-t.C:
+ }
+
+ p1, err := collectProfile(p)
+ if err != nil {
+ serveError(w, http.StatusInternalServerError, "failed to collect profile")
+ return
+ }
+ ts := p1.TimeNanos
+ dur := p1.TimeNanos - p0.TimeNanos
+
+ p0.Scale(-1)
+
+ p1, err = profile.Merge([]*profile.Profile{p0, p1})
+ if err != nil {
+ serveError(w, http.StatusInternalServerError, "failed to compute delta")
+ return
+ }
+
+ p1.TimeNanos = ts // set since we don't know what profile.Merge set for TimeNanos.
+ p1.DurationNanos = dur
+
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s-delta"`, name))
+ p1.Write(w)
+}
+
+func collectProfile(p *pprof.Profile) (*profile.Profile, error) {
+ var buf bytes.Buffer
+ if err := p.WriteTo(&buf, 0); err != nil {
+ return nil, err
+ }
+ ts := time.Now().UnixNano()
+ p0, err := profile.Parse(&buf)
+ if err != nil {
+ return nil, err
+ }
+ p0.TimeNanos = ts
+ return p0, nil
+}
+
+var profileSupportsDelta = map[handler]bool{
+ "block": true,
+ "mutex": true,
+}
+
var profileDescriptions = map[string]string{
"allocs": "A sampling of all past memory allocations",
"block": "Stack traces that led to blocking on synchronization primitives",
import (
"bytes"
+ "fmt"
+ "internal/profile"
"io/ioutil"
"net/http"
"net/http/httptest"
+ "runtime"
"runtime/pprof"
+ "strings"
+ "sync"
+ "sync/atomic"
"testing"
+ "time"
)
// TestDescriptions checks that the profile names under runtime/pprof package
{"/debug/pprof/profile?seconds=1", Profile, http.StatusOK, "application/octet-stream", `attachment; filename="profile"`, nil},
{"/debug/pprof/symbol", Symbol, http.StatusOK, "text/plain; charset=utf-8", "", nil},
{"/debug/pprof/trace", Trace, http.StatusOK, "application/octet-stream", `attachment; filename="trace"`, nil},
+ {"/debug/pprof/mutex", Index, http.StatusOK, "application/octet-stream", `attachment; filename="mutex"`, nil},
+ {"/debug/pprof/block?seconds=1", Index, http.StatusOK, "application/octet-stream", `attachment; filename="block-delta"`, nil},
{"/debug/pprof/", Index, http.StatusOK, "text/html; charset=utf-8", "", []byte("Types of profiles available:")},
}
for _, tc := range testCases {
}
})
}
+}
+
+var Sink uint32
+
+func mutexHog1(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration) {
+ atomic.AddUint32(&Sink, 1)
+ for time.Since(start) < dt {
+ // When using gccgo the loop of mutex operations is
+ // not preemptible. This can cause the loop to block a GC,
+ // causing the time limits in TestDeltaContentionz to fail.
+ // Since this loop is not very realistic, when using
+ // gccgo add preemption points 100 times a second.
+ t1 := time.Now()
+ for time.Since(start) < dt && time.Since(t1) < 10*time.Millisecond {
+ mu1.Lock()
+ mu2.Lock()
+ mu1.Unlock()
+ mu2.Unlock()
+ }
+ if runtime.Compiler == "gccgo" {
+ runtime.Gosched()
+ }
+ }
+}
+
+// mutexHog2 is almost identical to mutexHog but we keep them separate
+// in order to distinguish them with function names in the stack trace.
+// We make them slightly different, using Sink, because otherwise
+// gccgo -c opt will merge them.
+func mutexHog2(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration) {
+ atomic.AddUint32(&Sink, 2)
+ for time.Since(start) < dt {
+ // See comment in mutexHog.
+ t1 := time.Now()
+ for time.Since(start) < dt && time.Since(t1) < 10*time.Millisecond {
+ mu1.Lock()
+ mu2.Lock()
+ mu1.Unlock()
+ mu2.Unlock()
+ }
+ if runtime.Compiler == "gccgo" {
+ runtime.Gosched()
+ }
+ }
+}
+
+// mutexHog starts multiple goroutines that runs the given hogger function for the specified duration.
+// The hogger function will be given two mutexes to lock & unlock.
+func mutexHog(duration time.Duration, hogger func(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration)) {
+ start := time.Now()
+ mu1 := new(sync.Mutex)
+ mu2 := new(sync.Mutex)
+ var wg sync.WaitGroup
+ wg.Add(10)
+ for i := 0; i < 10; i++ {
+ go func() {
+ defer wg.Done()
+ hogger(mu1, mu2, start, duration)
+ }()
+ }
+ wg.Wait()
+}
+
+func TestDeltaProfile(t *testing.T) {
+ rate := runtime.SetMutexProfileFraction(1)
+ defer func() {
+ runtime.SetMutexProfileFraction(rate)
+ }()
+
+ // mutexHog1 will appear in non-delta mutex profile
+ // if the mutex profile works.
+ mutexHog(20*time.Millisecond, mutexHog1)
+
+ // If mutexHog1 does not appear in the mutex profile,
+ // skip this test. Mutex profile is likely not working,
+ // so is the delta profile.
+
+ p, err := query("/debug/pprof/mutex")
+ if err != nil {
+ t.Skipf("mutex profile is unsupported: %v", err)
+ }
+
+ if !seen(p, "mutexHog1") {
+ t.Skipf("mutex profile is not working: %v", p)
+ }
+
+ // causes mutexHog2 call stacks to appear in the mutex profile.
+ done := make(chan bool)
+ go func() {
+ for {
+ mutexHog(20*time.Millisecond, mutexHog2)
+ select {
+ case <-done:
+ done <- true
+ return
+ default:
+ time.Sleep(10 * time.Millisecond)
+ }
+ }
+ }()
+ defer func() { // cleanup the above goroutine.
+ done <- true
+ <-done // wait for the goroutine to exit.
+ }()
+ for _, tc := range []struct {
+ endpoint string
+ seconds int
+ mutexHog1, mutexHog2 bool
+ }{
+ {"/debug/pprof/mutex?seconds=1", 1, false, true},
+ {"/debug/pprof/mutex", 0, true, true},
+ } {
+ t.Run(tc.endpoint, func(t *testing.T) {
+ p, err := query(tc.endpoint)
+ if err != nil {
+ t.Fatalf("failed to query profile: %v", err)
+ }
+ t.Logf("Profile=%v", p)
+
+ if got := seen(p, "mutexHog1"); got != tc.mutexHog1 {
+ t.Errorf("seen(mutexHog1) = %t, want %t", got, tc.mutexHog1)
+ }
+ if got := seen(p, "mutexHog2"); got != tc.mutexHog2 {
+ t.Errorf("seen(mutexHog2) = %t, want %t", got, tc.mutexHog2)
+ }
+
+ if tc.seconds > 0 {
+ got := time.Duration(p.DurationNanos) * time.Nanosecond
+ want := time.Duration(tc.seconds) * time.Second
+ if got < want/2 || got > 2*want {
+ t.Errorf("got duration = %v; want ~%v", got, want)
+ }
+ }
+
+ })
+ }
+}
+
+var srv = httptest.NewServer(nil)
+
+func query(endpoint string) (*profile.Profile, error) {
+ url := srv.URL + endpoint
+ r, err := http.Get(url)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch %q: %v", url, err)
+ }
+ if r.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("failed to fetch %q: %v", url, r.Status)
+ }
+
+ b, err := ioutil.ReadAll(r.Body)
+ r.Body.Close()
+ if err != nil {
+ return nil, fmt.Errorf("failed to read and parse the result from %q: %v", url, err)
+ }
+ return profile.Parse(bytes.NewBuffer(b))
+}
+
+// seen returns true if the profile includes samples whose stacks include
+// the specified function name (fname).
+func seen(p *profile.Profile, fname string) bool {
+ locIDs := map[*profile.Location]bool{}
+ for _, loc := range p.Location {
+ for _, l := range loc.Line {
+ if strings.Contains(l.Function.Name, fname) {
+ locIDs[loc] = true
+ break
+ }
+ }
+ }
+ for _, sample := range p.Sample {
+ for _, loc := range sample.Location {
+ if locIDs[loc] {
+ return true
+ }
+ }
+ }
+ return false
}
import (
"bytes"
"fmt"
+ "internal/profile"
"reflect"
"regexp"
"runtime"
- "runtime/pprof/internal/profile"
"testing"
"unsafe"
)
"bytes"
"context"
"fmt"
+ "internal/profile"
"internal/testenv"
"io"
"io/ioutil"
"os/exec"
"regexp"
"runtime"
- "runtime/pprof/internal/profile"
"strings"
"sync"
"sync/atomic"
return
}
- skip := 2 // Callers and dumpCallers
+ skip := 2 // Callers and dumpCallers
runtime.Callers(skip, pcs)
}
// can't make this conditional or this function won't be inlined.
dumpCallers(pcs)
- recursionChainTop(x - 1, pcs)
+ recursionChainTop(x-1, pcs)
}
func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {
"bytes"
"encoding/json"
"fmt"
+ "internal/profile"
"internal/testenv"
"io/ioutil"
"os"
"os/exec"
"reflect"
"runtime"
- "runtime/pprof/internal/profile"
"strings"
"testing"
)
import (
"bytes"
+ "internal/profile"
"runtime"
- "runtime/pprof/internal/profile"
"testing"
)