--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "fmt"
+
+type argvalues struct {
+ osargs []string
+ goos string
+ goarch string
+}
+
+type argstate struct {
+ state argvalues
+ initialized bool
+}
+
+func ssleq(s1 []string, s2 []string) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *argstate) Merge(state argvalues) {
+ if !a.initialized {
+ a.state = state
+ a.initialized = true
+ return
+ }
+ if !ssleq(a.state.osargs, state.osargs) {
+ a.state.osargs = nil
+ }
+ if state.goos != a.state.goos {
+ a.state.goos = ""
+ }
+ if state.goarch != a.state.goarch {
+ a.state.goarch = ""
+ }
+}
+
+func (a *argstate) ArgsSummary() map[string]string {
+ m := make(map[string]string)
+ if len(a.state.osargs) != 0 {
+ m["argc"] = fmt.Sprintf("%d", len(a.state.osargs))
+ for k, a := range a.state.osargs {
+ m[fmt.Sprintf("argv%d", k)] = a
+ }
+ }
+ if a.state.goos != "" {
+ m["GOOS"] = a.state.goos
+ }
+ if a.state.goarch != "" {
+ m["GOARCH"] = a.state.goarch
+ }
+ return m
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/cov"
+ "cmd/internal/pkgpattern"
+ "flag"
+ "fmt"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "strings"
+)
+
+var verbflag = flag.Int("v", 0, "Verbose trace output level")
+var hflag = flag.Bool("h", false, "Panic on fatal errors (for stack trace)")
+var hwflag = flag.Bool("hw", false, "Panic on warnings (for stack trace)")
+var indirsflag = flag.String("i", "", "Input dirs to examine (comma separated)")
+var pkgpatflag = flag.String("pkg", "", "Restrict output to package(s) matching specified package pattern.")
+var cpuprofileflag = flag.String("cpuprofile", "", "Write CPU profile to specified file")
+var memprofileflag = flag.String("memprofile", "", "Write memory profile to specified file")
+var memprofilerateflag = flag.Int("memprofilerate", 0, "Set memprofile sampling rate to value")
+
+var matchpkg func(name string) bool
+
+var atExitFuncs []func()
+
+func atExit(f func()) {
+ atExitFuncs = append(atExitFuncs, f)
+}
+
+func Exit(code int) {
+ for i := len(atExitFuncs) - 1; i >= 0; i-- {
+ f := atExitFuncs[i]
+ atExitFuncs = atExitFuncs[:i]
+ f()
+ }
+ os.Exit(code)
+}
+
+func dbgtrace(vlevel int, s string, a ...interface{}) {
+ if *verbflag >= vlevel {
+ fmt.Printf(s, a...)
+ fmt.Printf("\n")
+ }
+}
+
+func warn(s string, a ...interface{}) {
+ fmt.Fprintf(os.Stderr, "warning: ")
+ fmt.Fprintf(os.Stderr, s, a...)
+ fmt.Fprintf(os.Stderr, "\n")
+ if *hwflag {
+ panic("unexpected warning")
+ }
+}
+
+func fatal(s string, a ...interface{}) {
+ fmt.Fprintf(os.Stderr, "error: ")
+ fmt.Fprintf(os.Stderr, s, a...)
+ fmt.Fprintf(os.Stderr, "\n")
+ if *hflag {
+ panic("fatal error")
+ }
+ Exit(1)
+}
+
+func usage(msg string) {
+ if len(msg) > 0 {
+ fmt.Fprintf(os.Stderr, "error: %s\n", msg)
+ }
+ fmt.Fprintf(os.Stderr, "usage: go tool covdata [command]\n")
+ fmt.Fprintf(os.Stderr, `
+Commands are:
+
+textfmt convert coverage data to textual format
+percent output total percentage of statements covered
+pkglist output list of package import paths
+func output coverage profile information for each function
+merge merge data files together
+subtract subtract one set of data files from another set
+intersect generate intersection of two sets of data files
+debugdump dump data in human-readable format for debugging purposes
+`)
+ fmt.Fprintf(os.Stderr, "\nFor help on a specific subcommand, try:\n")
+ fmt.Fprintf(os.Stderr, "\ngo tool covdata <cmd> -help\n")
+ Exit(2)
+}
+
+type covOperation interface {
+ cov.CovDataVisitor
+ Setup()
+ Usage(string)
+}
+
+// Modes of operation.
+const (
+ funcMode = "func"
+ mergeMode = "merge"
+ intersectMode = "intersect"
+ subtractMode = "subtract"
+ percentMode = "percent"
+ pkglistMode = "pkglist"
+ textfmtMode = "textfmt"
+ debugDumpMode = "debugdump"
+)
+
+func main() {
+ // First argument should be mode/subcommand.
+ if len(os.Args) < 2 {
+ usage("missing command selector")
+ }
+
+ // Select mode
+ var op covOperation
+ cmd := os.Args[1]
+ switch cmd {
+ case mergeMode:
+ op = makeMergeOp()
+ case debugDumpMode:
+ op = makeDumpOp(debugDumpMode)
+ case textfmtMode:
+ op = makeDumpOp(textfmtMode)
+ case percentMode:
+ op = makeDumpOp(percentMode)
+ case funcMode:
+ op = makeDumpOp(funcMode)
+ case pkglistMode:
+ op = makeDumpOp(pkglistMode)
+ case subtractMode:
+ op = makeSubtractIntersectOp(subtractMode)
+ case intersectMode:
+ op = makeSubtractIntersectOp(intersectMode)
+ default:
+ usage(fmt.Sprintf("unknown command selector %q", cmd))
+ }
+
+ // Edit out command selector, then parse flags.
+ os.Args = append(os.Args[:1], os.Args[2:]...)
+ flag.Usage = func() {
+ op.Usage("")
+ }
+ flag.Parse()
+
+ // Mode-independent flag setup
+ dbgtrace(1, "starting mode-independent setup")
+ if flag.NArg() != 0 {
+ op.Usage("unknown extra arguments")
+ }
+ if *pkgpatflag != "" {
+ pats := strings.Split(*pkgpatflag, ",")
+ matchers := []func(name string) bool{}
+ for _, p := range pats {
+ if p == "" {
+ continue
+ }
+ f := pkgpattern.MatchSimplePattern(p)
+ matchers = append(matchers, f)
+ }
+ matchpkg = func(name string) bool {
+ for _, f := range matchers {
+ if f(name) {
+ return true
+ }
+ }
+ return false
+ }
+ }
+ if *cpuprofileflag != "" {
+ f, err := os.Create(*cpuprofileflag)
+ if err != nil {
+ fatal("%v", err)
+ }
+ if err := pprof.StartCPUProfile(f); err != nil {
+ fatal("%v", err)
+ }
+ atExit(pprof.StopCPUProfile)
+ }
+ if *memprofileflag != "" {
+ if *memprofilerateflag != 0 {
+ runtime.MemProfileRate = *memprofilerateflag
+ }
+ f, err := os.Create(*memprofileflag)
+ if err != nil {
+ fatal("%v", err)
+ }
+ atExit(func() {
+ runtime.GC()
+ const writeLegacyFormat = 1
+ if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil {
+ fatal("%v", err)
+ }
+ })
+ } else {
+ // Not doing memory profiling; disable it entirely.
+ runtime.MemProfileRate = 0
+ }
+
+ // Mode-dependent setup.
+ op.Setup()
+
+ // ... off and running now.
+ dbgtrace(1, "starting perform")
+
+ indirs := strings.Split(*indirsflag, ",")
+ vis := cov.CovDataVisitor(op)
+ var flags cov.CovDataReaderFlags
+ if *hflag {
+ flags |= cov.PanicOnError
+ }
+ if *hwflag {
+ flags |= cov.PanicOnWarning
+ }
+ reader := cov.MakeCovDataReader(vis, indirs, *verbflag, flags, matchpkg)
+ st := 0
+ if err := reader.Visit(); err != nil {
+ fmt.Fprintf(os.Stderr, "error: %v\n", err)
+ st = 1
+ }
+ dbgtrace(1, "leaving main")
+ Exit(st)
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+//
+// Covdata is a program for manipulating and generating reports
+// from 2nd-generation coverage testing output files, those produced
+// from running applications or integration tests. E.g.
+//
+// $ mkdir ./profiledir
+// $ go build -cover -o myapp.exe .
+// $ GOCOVERDIR=./profiledir ./myapp.exe <arguments>
+// $ ls ./profiledir
+// covcounters.cce1b350af34b6d0fb59cc1725f0ee27.821598.1663006712821344241
+// covmeta.cce1b350af34b6d0fb59cc1725f0ee27
+// $
+//
+// Run covdata via "go tool covdata <mode>", where 'mode' is a subcommand
+// selecting a specific reporting, merging, or data manipulation operation.
+// Descriptions on the various modes (run "go tool cover <mode> -help" for
+// specifics on usage of a given mode:
+//
+// 1. Report percent of statements covered in each profiled package
+//
+// $ go tool covdata percent -i=profiledir
+// cov-example/p coverage: 41.1% of statements
+// main coverage: 87.5% of statements
+// $
+//
+//
+// 2. Report import paths of packages profiled
+//
+// $ go tool covdata pkglist -i=profiledir
+// cov-example/p
+// main
+// $
+//
+// 3. Report percent statements covered by function:
+//
+// $ go tool covdata func -i=profiledir
+// cov-example/p/p.go:12: emptyFn 0.0%
+// cov-example/p/p.go:32: Small 100.0%
+// cov-example/p/p.go:47: Medium 90.9%
+// ...
+// $
+//
+// 4. Convert coverage data to legacy textual format:
+//
+// $ go tool covdata textfmt -i=profiledir -o=cov.txt
+// $ head cov.txt
+// mode: set
+// cov-example/p/p.go:12.22,13.2 0 0
+// cov-example/p/p.go:15.31,16.2 1 0
+// cov-example/p/p.go:16.3,18.3 0 0
+// cov-example/p/p.go:19.3,21.3 0 0
+// ...
+// $ go tool cover -html=cov.txt
+// $
+//
+// 5. Merge profiles together:
+//
+// $ go tool covdata merge -i=indir1,indir2 -o=outdir -modpaths=github.com/go-delve/delve
+// $
+//
+// 6. Subtract one profile from another
+//
+// $ go tool covdata subtract -i=indir1,indir2 -o=outdir
+// $
+//
+// 7. Intersect profiles
+//
+// $ go tool covdata intersect -i=indir1,indir2 -o=outdir
+// $
+//
+// 8. Dump a profile for debugging purposes.
+//
+// $ go tool covdata debugdump -i=indir
+// <human readable output>
+// $
+//
+*/
+
+package main
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// This file contains functions and apis to support the "go tool
+// covdata" sub-commands that relate to dumping text format summaries
+// and reports: "pkglist", "func", "debugdump", "percent", and
+// "textfmt".
+
+import (
+ "flag"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/calloc"
+ "internal/coverage/cformat"
+ "internal/coverage/cmerge"
+ "internal/coverage/decodecounter"
+ "internal/coverage/decodemeta"
+ "internal/coverage/pods"
+ "os"
+ "sort"
+ "strings"
+)
+
+var textfmtoutflag *string
+var liveflag *bool
+
+func makeDumpOp(cmd string) covOperation {
+ if cmd == textfmtMode || cmd == percentMode {
+ textfmtoutflag = flag.String("o", "", "Output text format to file")
+ }
+ if cmd == debugDumpMode {
+ liveflag = flag.Bool("live", false, "Select only live (executed) functions for dump output.")
+ }
+ d := &dstate{
+ cmd: cmd,
+ cm: &cmerge.Merger{},
+ }
+ if d.cmd == pkglistMode {
+ d.pkgpaths = make(map[string]struct{})
+ }
+ return d
+}
+
+// dstate encapsulates state and provides methods for implementing
+// various dump operations. Specifically, dstate implements the
+// CovDataVisitor interface, and is designed to be used in
+// concert with the CovDataReader utility, which abstracts away most
+// of the grubby details of reading coverage data files.
+type dstate struct {
+ // for batch allocation of counter arrays
+ calloc.BatchCounterAlloc
+
+ // counter merging state + methods
+ cm *cmerge.Merger
+
+ // counter data formatting helper
+ format *cformat.Formatter
+
+ // 'mm' stores values read from a counter data file; the pkfunc key
+ // is a pkgid/funcid pair that uniquely identifies a function in
+ // instrumented application.
+ mm map[pkfunc]decodecounter.FuncPayload
+
+ // pkm maps package ID to the number of functions in the package
+ // with that ID. It is used to report inconsistencies in counter
+ // data (for example, a counter data entry with pkgid=N funcid=10
+ // where package N only has 3 functions).
+ pkm map[uint32]uint32
+
+ // pkgpaths records all package import paths encountered while
+ // visiting coverage data files (used to implement the "pkglist"
+ // subcommand).
+ pkgpaths map[string]struct{}
+
+ // Current package name and import path.
+ pkgName string
+ pkgImportPath string
+
+ // Module path for current package (may be empty).
+ modulePath string
+
+ // Dump subcommand (ex: "textfmt", "debugdump", etc).
+ cmd string
+
+ // File to which we will write text format output, if enabled.
+ textfmtoutf *os.File
+
+ // Total and covered statements (used by "debugdump" subcommand).
+ totalStmts, coveredStmts int
+
+ // Records whether preamble has been emitted for current pkg
+ // (used when in "debugdump" mode)
+ preambleEmitted bool
+}
+
+func (d *dstate) Usage(msg string) {
+ if len(msg) > 0 {
+ fmt.Fprintf(os.Stderr, "error: %s\n", msg)
+ }
+ fmt.Fprintf(os.Stderr, "usage: go tool covdata %s -i=<directories>\n\n", d.cmd)
+ flag.PrintDefaults()
+ fmt.Fprintf(os.Stderr, "\nExamples:\n\n")
+ switch d.cmd {
+ case pkglistMode:
+ fmt.Fprintf(os.Stderr, " go tool covdata pkglist -i=dir1,dir2\n\n")
+ fmt.Fprintf(os.Stderr, " \treads coverage data files from dir1+dirs2\n")
+ fmt.Fprintf(os.Stderr, " \tand writes out a list of the import paths\n")
+ fmt.Fprintf(os.Stderr, " \tof all compiled packages.\n")
+ case textfmtMode:
+ fmt.Fprintf(os.Stderr, " go tool covdata textfmt -i=dir1,dir2 -o=out.txt\n\n")
+ fmt.Fprintf(os.Stderr, " \tmerges data from input directories dir1+dir2\n")
+ fmt.Fprintf(os.Stderr, " \tand emits text format into file 'out.txt'\n")
+ case percentMode:
+ fmt.Fprintf(os.Stderr, " go tool covdata percent -i=dir1,dir2\n\n")
+ fmt.Fprintf(os.Stderr, " \tmerges data from input directories dir1+dir2\n")
+ fmt.Fprintf(os.Stderr, " \tand emits percentage of statements covered\n\n")
+ case funcMode:
+ fmt.Fprintf(os.Stderr, " go tool covdata func -i=dir1,dir2\n\n")
+ fmt.Fprintf(os.Stderr, " \treads coverage data files from dir1+dirs2\n")
+ fmt.Fprintf(os.Stderr, " \tand writes out coverage profile data for\n")
+ fmt.Fprintf(os.Stderr, " \teach function.\n")
+ case debugDumpMode:
+ fmt.Fprintf(os.Stderr, " go tool covdata debugdump [flags] -i=dir1,dir2\n\n")
+ fmt.Fprintf(os.Stderr, " \treads coverage data from dir1+dir2 and dumps\n")
+ fmt.Fprintf(os.Stderr, " \tcontents in human-readable form to stdout, for\n")
+ fmt.Fprintf(os.Stderr, " \tdebugging purposes.\n")
+ default:
+ panic("unexpected")
+ }
+ Exit(2)
+}
+
+// Setup is called once at program startup time to vet flag values
+// and do any necessary setup operations.
+func (d *dstate) Setup() {
+ if *indirsflag == "" {
+ d.Usage("select input directories with '-i' option")
+ }
+ if d.cmd == textfmtMode || (d.cmd == percentMode && *textfmtoutflag != "") {
+ if *textfmtoutflag == "" {
+ d.Usage("select output file name with '-o' option")
+ }
+ var err error
+ d.textfmtoutf, err = os.Create(*textfmtoutflag)
+ if err != nil {
+ d.Usage(fmt.Sprintf("unable to open textfmt output file %q: %v", *textfmtoutflag, err))
+ }
+ }
+ if d.cmd == debugDumpMode {
+ fmt.Printf("/* WARNING: the format of this dump is not stable and is\n")
+ fmt.Printf(" * expected to change from one Go release to the next.\n")
+ fmt.Printf(" *\n")
+ fmt.Printf(" * produced by:\n")
+ args := append([]string{os.Args[0]}, debugDumpMode)
+ args = append(args, os.Args[1:]...)
+ fmt.Printf(" *\t%s\n", strings.Join(args, " "))
+ fmt.Printf(" */\n")
+ }
+}
+
+func (d *dstate) BeginPod(p pods.Pod) {
+ d.mm = make(map[pkfunc]decodecounter.FuncPayload)
+}
+
+func (d *dstate) EndPod(p pods.Pod) {
+ if d.cmd == debugDumpMode {
+ d.cm.ResetModeAndGranularity()
+ }
+}
+
+func (d *dstate) BeginCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) {
+ dbgtrace(2, "visit counter data file %s dirIdx %d", cdf, dirIdx)
+ if d.cmd == debugDumpMode {
+ fmt.Printf("data file %s", cdf)
+ if cdr.Goos() != "" {
+ fmt.Printf(" GOOS=%s", cdr.Goos())
+ }
+ if cdr.Goarch() != "" {
+ fmt.Printf(" GOARCH=%s", cdr.Goarch())
+ }
+ if len(cdr.OsArgs()) != 0 {
+ fmt.Printf(" program args: %+v\n", cdr.OsArgs())
+ }
+ fmt.Printf("\n")
+ }
+}
+
+func (d *dstate) EndCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) {
+}
+
+func (d *dstate) VisitFuncCounterData(data decodecounter.FuncPayload) {
+ if nf, ok := d.pkm[data.PkgIdx]; !ok || data.FuncIdx > nf {
+ warn("func payload inconsistency: id [p=%d,f=%d] nf=%d len(ctrs)=%d in VisitFuncCounterData, ignored", data.PkgIdx, data.FuncIdx, nf, len(data.Counters))
+ return
+ }
+ key := pkfunc{pk: data.PkgIdx, fcn: data.FuncIdx}
+ val, found := d.mm[key]
+
+ dbgtrace(5, "ctr visit pk=%d fid=%d found=%v len(val.ctrs)=%d len(data.ctrs)=%d", data.PkgIdx, data.FuncIdx, found, len(val.Counters), len(data.Counters))
+
+ if len(val.Counters) < len(data.Counters) {
+ t := val.Counters
+ val.Counters = d.AllocateCounters(len(data.Counters))
+ copy(val.Counters, t)
+ }
+ err, overflow := d.cm.MergeCounters(val.Counters, data.Counters)
+ if err != nil {
+ fatal("%v", err)
+ }
+ if overflow {
+ warn("uint32 overflow during counter merge")
+ }
+ d.mm[key] = val
+}
+
+func (d *dstate) EndCounters() {
+}
+
+func (d *dstate) VisitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) {
+ newgran := mfr.CounterGranularity()
+ newmode := mfr.CounterMode()
+ if err := d.cm.SetModeAndGranularity(mdf, newmode, newgran); err != nil {
+ fatal("%v", err)
+ }
+ if d.cmd == debugDumpMode {
+ fmt.Printf("Cover mode: %s\n", newmode.String())
+ fmt.Printf("Cover granularity: %s\n", newgran.String())
+ }
+ if d.format == nil {
+ d.format = cformat.NewFormatter(mfr.CounterMode())
+ }
+
+ // To provide an additional layer of checking when reading counter
+ // data, walk the meta-data file to determine the set of legal
+ // package/function combinations. This will help catch bugs in the
+ // counter file reader.
+ d.pkm = make(map[uint32]uint32)
+ np := uint32(mfr.NumPackages())
+ payload := []byte{}
+ for pkIdx := uint32(0); pkIdx < np; pkIdx++ {
+ var pd *decodemeta.CoverageMetaDataDecoder
+ var err error
+ pd, payload, err = mfr.GetPackageDecoder(pkIdx, payload)
+ if err != nil {
+ fatal("reading pkg %d from meta-file %s: %s", pkIdx, mdf, err)
+ }
+ d.pkm[pkIdx] = pd.NumFuncs()
+ }
+}
+
+func (d *dstate) BeginPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) {
+ d.preambleEmitted = false
+ d.pkgImportPath = pd.PackagePath()
+ d.pkgName = pd.PackageName()
+ d.modulePath = pd.ModulePath()
+ if d.cmd == pkglistMode {
+ d.pkgpaths[d.pkgImportPath] = struct{}{}
+ }
+ d.format.SetPackage(pd.PackagePath())
+}
+
+func (d *dstate) EndPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) {
+}
+
+func (d *dstate) VisitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc) {
+ var counters []uint32
+ key := pkfunc{pk: pkgIdx, fcn: fnIdx}
+ v, haveCounters := d.mm[key]
+
+ dbgtrace(5, "meta visit pk=%d fid=%d fname=%s file=%s found=%v len(val.ctrs)=%d", pkgIdx, fnIdx, fd.Funcname, fd.Srcfile, haveCounters, len(v.Counters))
+
+ suppressOutput := false
+ if haveCounters {
+ counters = v.Counters
+ } else if d.cmd == debugDumpMode && *liveflag {
+ suppressOutput = true
+ }
+
+ if d.cmd == debugDumpMode && !suppressOutput {
+ if !d.preambleEmitted {
+ fmt.Printf("\nPackage path: %s\n", d.pkgImportPath)
+ fmt.Printf("Package name: %s\n", d.pkgName)
+ fmt.Printf("Module path: %s\n", d.modulePath)
+ d.preambleEmitted = true
+ }
+ fmt.Printf("\nFunc: %s\n", fd.Funcname)
+ fmt.Printf("Srcfile: %s\n", fd.Srcfile)
+ }
+ for i := 0; i < len(fd.Units); i++ {
+ u := fd.Units[i]
+ var count uint32
+ if counters != nil {
+ count = counters[i]
+ }
+ d.format.AddUnit(fd.Srcfile, fd.Funcname, fd.Lit, u, count)
+ if d.cmd == debugDumpMode && !suppressOutput {
+ fmt.Printf("%d: L%d:C%d -- L%d:C%d ",
+ i, u.StLine, u.StCol, u.EnLine, u.EnCol)
+ if u.Parent != 0 {
+ fmt.Printf("Parent:%d = %d\n", u.Parent, count)
+ } else {
+ fmt.Printf("NS=%d = %d\n", u.NxStmts, count)
+ }
+ }
+ d.totalStmts += int(u.NxStmts)
+ if count != 0 {
+ d.coveredStmts += int(u.NxStmts)
+ }
+ }
+}
+
+func (d *dstate) Finish() {
+ // d.format maybe nil here if the specified input dir was empty.
+ if d.format != nil {
+ if d.cmd == percentMode {
+ d.format.EmitPercent(os.Stdout, "", false)
+ }
+ if d.cmd == funcMode {
+ d.format.EmitFuncs(os.Stdout)
+ }
+ if d.textfmtoutf != nil {
+ if err := d.format.EmitTextual(d.textfmtoutf); err != nil {
+ fatal("writing to %s: %v", *textfmtoutflag, err)
+ }
+ }
+ }
+ if d.textfmtoutf != nil {
+ if err := d.textfmtoutf.Close(); err != nil {
+ fatal("closing textfmt output file %s: %v", *textfmtoutflag, err)
+ }
+ }
+ if d.cmd == debugDumpMode {
+ fmt.Printf("totalStmts: %d coveredStmts: %d\n", d.totalStmts, d.coveredStmts)
+ }
+ if d.cmd == pkglistMode {
+ pkgs := make([]string, 0, len(d.pkgpaths))
+ for p := range d.pkgpaths {
+ pkgs = append(pkgs, p)
+ }
+ sort.Strings(pkgs)
+ for _, p := range pkgs {
+ fmt.Printf("%s\n", p)
+ }
+ }
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// This file contains functions and apis to support the "merge"
+// subcommand of "go tool covdata".
+
+import (
+ "flag"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/decodecounter"
+ "internal/coverage/decodemeta"
+ "internal/coverage/pods"
+ "os"
+)
+
+var outdirflag *string
+var pcombineflag *bool
+
+func makeMergeOp() covOperation {
+ outdirflag = flag.String("o", "", "Output directory to write")
+ pcombineflag = flag.Bool("pcombine", false, "Combine profiles derived from distinct program executables")
+ m := &mstate{
+ mm: newMetaMerge(),
+ }
+ return m
+}
+
+// dstate encapsulates state and provides methods for implementing the
+// merge operation. This type implements the CovDataVisitor interface,
+// and is designed to be used in concert with the CovDataReader
+// utility, which abstracts away most of the grubby details of reading
+// coverage data files. Most of the heavy lifting for merging is done
+// using apis from 'metaMerge' (this is mainly a wrapper around that
+// functionality).
+type mstate struct {
+ mm *metaMerge
+}
+
+func (m *mstate) Usage(msg string) {
+ if len(msg) > 0 {
+ fmt.Fprintf(os.Stderr, "error: %s\n", msg)
+ }
+ fmt.Fprintf(os.Stderr, "usage: go tool covdata merge -i=<directories> -o=<dir>\n\n")
+ flag.PrintDefaults()
+ fmt.Fprintf(os.Stderr, "\nExamples:\n\n")
+ fmt.Fprintf(os.Stderr, " go tool covdata merge -i=dir1,dir2,dir3 -o=outdir\n\n")
+ fmt.Fprintf(os.Stderr, " \tmerges all files in dir1/dir2/dir3\n")
+ fmt.Fprintf(os.Stderr, " \tinto output dir outdir\n")
+ Exit(2)
+}
+
+func (m *mstate) Setup() {
+ if *indirsflag == "" {
+ m.Usage("select input directories with '-i' option")
+ }
+ if *outdirflag == "" {
+ m.Usage("select output directory with '-o' option")
+ }
+}
+
+func (m *mstate) BeginPod(p pods.Pod) {
+ m.mm.beginPod()
+}
+
+func (m *mstate) EndPod(p pods.Pod) {
+ m.mm.endPod(*pcombineflag)
+}
+
+func (m *mstate) BeginCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) {
+ dbgtrace(2, "visit counter data file %s dirIdx %d", cdf, dirIdx)
+ m.mm.beginCounterDataFile(cdr)
+}
+
+func (m *mstate) EndCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) {
+}
+
+func (m *mstate) VisitFuncCounterData(data decodecounter.FuncPayload) {
+ m.mm.visitFuncCounterData(data)
+}
+
+func (m *mstate) EndCounters() {
+}
+
+func (m *mstate) VisitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) {
+ m.mm.visitMetaDataFile(mdf, mfr)
+}
+
+func (m *mstate) BeginPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) {
+ dbgtrace(3, "VisitPackage(pk=%d path=%s)", pkgIdx, pd.PackagePath())
+ m.mm.visitPackage(pd, pkgIdx, *pcombineflag)
+}
+
+func (m *mstate) EndPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) {
+}
+
+func (m *mstate) VisitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc) {
+ m.mm.visitFunc(pkgIdx, fnIdx, fd, mergeMode, *pcombineflag)
+}
+
+func (m *mstate) Finish() {
+ if *pcombineflag {
+ finalHash := m.mm.emitMeta(*outdirflag, true)
+ m.mm.emitCounters(*outdirflag, finalHash)
+ }
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// This file contains functions and apis that support merging of
+// meta-data information. It helps implement the "merge", "subtract",
+// and "intersect" subcommands.
+
+import (
+ "crypto/md5"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/calloc"
+ "internal/coverage/cmerge"
+ "internal/coverage/decodecounter"
+ "internal/coverage/decodemeta"
+ "internal/coverage/encodecounter"
+ "internal/coverage/encodemeta"
+ "internal/coverage/slicewriter"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "time"
+ "unsafe"
+)
+
+// metaMerge provides state and methods to help manage the process
+// of selecting or merging meta data files. There are three cases
+// of interest here: the "-pcombine" flag provided by merge, the
+// "-pkg" option provided by all merge/subtract/intersect, and
+// a regular vanilla merge with no package selection
+//
+// In the -pcombine case, we're essentially glomming together all the
+// meta-data for all packages and all functions, meaning that
+// everything we see in a given package needs to be added into the
+// meta-data file builder; we emit a single meta-data file at the end
+// of the run.
+//
+// In the -pkg case, we will typically emit a single meta-data file
+// per input pod, where that new meta-data file contains entries for
+// just the selected packages.
+//
+// In the third case (vanilla merge with no combining or package
+// selection) we can carry over meta-data files without touching them
+// at all (only counter data files will be merged).
+type metaMerge struct {
+ calloc.BatchCounterAlloc
+ cmerge.Merger
+ // maps package import path to package state
+ pkm map[string]*pkstate
+ // list of packages
+ pkgs []*pkstate
+ // current package state
+ p *pkstate
+ // current pod state
+ pod *podstate
+ // counter data file osargs/goos/goarch state
+ astate *argstate
+}
+
+// pkstate
+type pkstate struct {
+ // index of package within meta-data file.
+ pkgIdx uint32
+ // this maps function index within the package to counter data payload
+ ctab map[uint32]decodecounter.FuncPayload
+ // pointer to meta-data blob for package
+ mdblob []byte
+ // filled in only for -pcombine merges
+ *pcombinestate
+}
+
+type podstate struct {
+ pmm map[pkfunc]decodecounter.FuncPayload
+ mdf string
+ mfr *decodemeta.CoverageMetaFileReader
+ fileHash [16]byte
+}
+
+type pkfunc struct {
+ pk, fcn uint32
+}
+
+// pcombinestate
+type pcombinestate struct {
+ // Meta-data builder for the package.
+ cmdb *encodemeta.CoverageMetaDataBuilder
+ // Maps function meta-data hash to new function index in the
+ // new version of the package we're building.
+ ftab map[[16]byte]uint32
+}
+
+func newMetaMerge() *metaMerge {
+ return &metaMerge{
+ pkm: make(map[string]*pkstate),
+ astate: &argstate{},
+ }
+}
+
+func (mm *metaMerge) visitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) {
+ dbgtrace(2, "visitMetaDataFile(mdf=%s)", mdf)
+
+ // Record meta-data file name.
+ mm.pod.mdf = mdf
+ // Keep a pointer to the file-level reader.
+ mm.pod.mfr = mfr
+ // Record file hash.
+ mm.pod.fileHash = mfr.FileHash()
+ // Counter mode and granularity -- detect and record clashes here.
+ newgran := mfr.CounterGranularity()
+ newmode := mfr.CounterMode()
+ if err := mm.SetModeAndGranularity(mdf, newmode, newgran); err != nil {
+ fatal("%v", err)
+ }
+}
+
+func (mm *metaMerge) beginCounterDataFile(cdr *decodecounter.CounterDataReader) {
+ state := argvalues{
+ osargs: cdr.OsArgs(),
+ goos: cdr.Goos(),
+ goarch: cdr.Goarch(),
+ }
+ mm.astate.Merge(state)
+}
+
+func copyMetaDataFile(inpath, outpath string) {
+ inf, err := os.Open(inpath)
+ if err != nil {
+ fatal("opening input meta-data file %s: %v", inpath, err)
+ }
+ defer inf.Close()
+
+ fi, err := inf.Stat()
+ if err != nil {
+ fatal("accessing input meta-data file %s: %v", inpath, err)
+ }
+
+ outf, err := os.OpenFile(outpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode())
+ if err != nil {
+ fatal("opening output meta-data file %s: %v", outpath, err)
+ }
+
+ _, err = io.Copy(outf, inf)
+ outf.Close()
+ if err != nil {
+ fatal("writing output meta-data file %s: %v", outpath, err)
+ }
+}
+
+func (mm *metaMerge) beginPod() {
+ mm.pod = &podstate{
+ pmm: make(map[pkfunc]decodecounter.FuncPayload),
+ }
+}
+
+// metaEndPod handles actions needed when we're done visiting all of
+// the things in a pod -- counter files and meta-data file. There are
+// three cases of interest here:
+//
+// Case 1: in an unconditonal merge (we're not selecting a specific set of
+// packages using "-pkg", and the "-pcombine" option is not in use),
+// we can simply copy over the meta-data file from input to output.
+//
+// Case 2: if this is a select merge (-pkg is in effect), then at
+// this point we write out a new smaller meta-data file that includes
+// only the packages of interest). At this point we also emit a merged
+// counter data file as well.
+//
+// Case 3: if "-pcombine" is in effect, we don't write anything at
+// this point (all writes will happen at the end of the run).
+func (mm *metaMerge) endPod(pcombine bool) {
+ if pcombine {
+ // Just clear out the pod data, we'll do all the
+ // heavy lifting at the end.
+ mm.pod = nil
+ return
+ }
+
+ finalHash := mm.pod.fileHash
+ if matchpkg != nil {
+ // Emit modified meta-data file for this pod.
+ finalHash = mm.emitMeta(*outdirflag, pcombine)
+ } else {
+ // Copy meta-data file for this pod to the output directory.
+ inpath := mm.pod.mdf
+ mdfbase := filepath.Base(mm.pod.mdf)
+ outpath := filepath.Join(*outdirflag, mdfbase)
+ copyMetaDataFile(inpath, outpath)
+ }
+
+ // Emit acccumulated counter data for this pod.
+ mm.emitCounters(*outdirflag, finalHash)
+
+ // Reset package state.
+ mm.pkm = make(map[string]*pkstate)
+ mm.pkgs = nil
+ mm.pod = nil
+
+ // Reset counter mode and granularity
+ mm.ResetModeAndGranularity()
+}
+
+// emitMeta encodes and writes out a new coverage meta-data file as
+// part of a merge operation, specifically a merge with the
+// "-pcombine" flag.
+func (mm *metaMerge) emitMeta(outdir string, pcombine bool) [16]byte {
+ fh := md5.New()
+ blobs := [][]byte{}
+ tlen := uint64(unsafe.Sizeof(coverage.MetaFileHeader{}))
+ for _, p := range mm.pkgs {
+ var blob []byte
+ if pcombine {
+ mdw := &slicewriter.WriteSeeker{}
+ p.cmdb.Emit(mdw)
+ blob = mdw.BytesWritten()
+ } else {
+ blob = p.mdblob
+ }
+ ph := md5.Sum(blob)
+ blobs = append(blobs, blob)
+ if _, err := fh.Write(ph[:]); err != nil {
+ panic(fmt.Sprintf("internal error: md5 sum failed: %v", err))
+ }
+ tlen += uint64(len(blob))
+ }
+ var finalHash [16]byte
+ fhh := fh.Sum(nil)
+ copy(finalHash[:], fhh)
+
+ // Open meta-file for writing.
+ fn := fmt.Sprintf("%s.%x", coverage.MetaFilePref, finalHash)
+ fpath := filepath.Join(outdir, fn)
+ mf, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ fatal("unable to open output meta-data file %s: %v", fpath, err)
+ }
+
+ // Encode and write.
+ mfw := encodemeta.NewCoverageMetaFileWriter(fpath, mf)
+ err = mfw.Write(finalHash, blobs, mm.Mode(), mm.Granularity())
+ if err != nil {
+ fatal("error writing %s: %v\n", fpath, err)
+ }
+ return finalHash
+}
+
+func (mm *metaMerge) emitCounters(outdir string, metaHash [16]byte) {
+ // Open output file. The file naming scheme is intended to mimic
+ // that used when running a coverage-instrumented binary, for
+ // consistency (however the process ID is not meaningful here, so
+ // use a value of zero).
+ var dummyPID int
+ fn := fmt.Sprintf(coverage.CounterFileTempl, coverage.CounterFilePref, metaHash, dummyPID, time.Now().UnixNano())
+ fpath := filepath.Join(outdir, fn)
+ cf, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ fatal("opening counter data file %s: %v", fpath, err)
+ }
+ defer func() {
+ if err := cf.Close(); err != nil {
+ fatal("error closing output meta-data file %s: %v", fpath, err)
+ }
+ }()
+
+ args := mm.astate.ArgsSummary()
+ cfw := encodecounter.NewCoverageDataWriter(cf, coverage.CtrULeb128)
+ if err := cfw.Write(metaHash, args, mm); err != nil {
+ fatal("counter file write failed: %v", err)
+ }
+ mm.astate = &argstate{}
+}
+
+// NumFuncs is used while writing the counter data files; it
+// implements the 'NumFuncs' method required by the interface
+// internal/coverage/encodecounter/CounterVisitor.
+func (mm *metaMerge) NumFuncs() (int, error) {
+ rval := 0
+ for _, p := range mm.pkgs {
+ rval += len(p.ctab)
+ }
+ return rval, nil
+}
+
+// VisitFuncs is used while writing the counter data files; it
+// implements the 'VisitFuncs' method required by the interface
+// internal/coverage/encodecounter/CounterVisitor.
+func (mm *metaMerge) VisitFuncs(f encodecounter.CounterVisitorFn) error {
+ if *verbflag >= 4 {
+ fmt.Printf("counterVisitor invoked\n")
+ }
+ // For each package, for each function, construct counter
+ // array and then call "f" on it.
+ for pidx, p := range mm.pkgs {
+ fids := make([]int, 0, len(p.ctab))
+ for fid := range p.ctab {
+ fids = append(fids, int(fid))
+ }
+ sort.Ints(fids)
+ if *verbflag >= 4 {
+ fmt.Printf("fids for pk=%d: %+v\n", pidx, fids)
+ }
+ for _, fid := range fids {
+ fp := p.ctab[uint32(fid)]
+ if *verbflag >= 4 {
+ fmt.Printf("counter write for pk=%d fid=%d len(ctrs)=%d\n", pidx, fid, len(fp.Counters))
+ }
+ if err := f(uint32(pidx), uint32(fid), fp.Counters); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (mm *metaMerge) visitPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32, pcombine bool) {
+ p, ok := mm.pkm[pd.PackagePath()]
+ if !ok {
+ p = &pkstate{
+ pkgIdx: uint32(len(mm.pkgs)),
+ }
+ mm.pkgs = append(mm.pkgs, p)
+ mm.pkm[pd.PackagePath()] = p
+ if pcombine {
+ p.pcombinestate = new(pcombinestate)
+ cmdb, err := encodemeta.NewCoverageMetaDataBuilder(pd.PackagePath(), pd.PackageName(), pd.ModulePath())
+ if err != nil {
+ fatal("fatal error creating meta-data builder: %v", err)
+ }
+ dbgtrace(2, "install new pkm entry for package %s pk=%d", pd.PackagePath(), pkgIdx)
+ p.cmdb = cmdb
+ p.ftab = make(map[[16]byte]uint32)
+ } else {
+ var err error
+ p.mdblob, err = mm.pod.mfr.GetPackagePayload(pkgIdx, nil)
+ if err != nil {
+ fatal("error extracting package %d payload from %s: %v",
+ pkgIdx, mm.pod.mdf, err)
+ }
+ }
+ p.ctab = make(map[uint32]decodecounter.FuncPayload)
+ }
+ mm.p = p
+}
+
+func (mm *metaMerge) visitFuncCounterData(data decodecounter.FuncPayload) {
+ key := pkfunc{pk: data.PkgIdx, fcn: data.FuncIdx}
+ val := mm.pod.pmm[key]
+ // FIXME: in theory either A) len(val.Counters) is zero, or B)
+ // the two lengths are equal. Assert if not? Of course, we could
+ // see odd stuff if there is source file skew.
+ if *verbflag > 4 {
+ fmt.Printf("visit pk=%d fid=%d len(counters)=%d\n", data.PkgIdx, data.FuncIdx, len(data.Counters))
+ }
+ if len(val.Counters) < len(data.Counters) {
+ t := val.Counters
+ val.Counters = mm.AllocateCounters(len(data.Counters))
+ copy(val.Counters, t)
+ }
+ err, overflow := mm.MergeCounters(val.Counters, data.Counters)
+ if err != nil {
+ fatal("%v", err)
+ }
+ if overflow {
+ warn("uint32 overflow during counter merge")
+ }
+ mm.pod.pmm[key] = val
+}
+
+func (mm *metaMerge) visitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc, verb string, pcombine bool) {
+ if *verbflag >= 3 {
+ fmt.Printf("visit pk=%d fid=%d func %s\n", pkgIdx, fnIdx, fd.Funcname)
+ }
+
+ var counters []uint32
+ key := pkfunc{pk: pkgIdx, fcn: fnIdx}
+ v, haveCounters := mm.pod.pmm[key]
+ if haveCounters {
+ counters = v.Counters
+ }
+
+ if pcombine {
+ // If the merge is running in "combine programs" mode, then hash
+ // the function and look it up in the package ftab to see if we've
+ // encountered it before. If we haven't, then register it with the
+ // meta-data builder.
+ fnhash := encodemeta.HashFuncDesc(fd)
+ gfidx, ok := mm.p.ftab[fnhash]
+ if !ok {
+ // We haven't seen this function before, need to add it to
+ // the meta data.
+ gfidx = uint32(mm.p.cmdb.AddFunc(*fd))
+ mm.p.ftab[fnhash] = gfidx
+ if *verbflag >= 3 {
+ fmt.Printf("new meta entry for fn %s fid=%d\n", fd.Funcname, gfidx)
+ }
+ }
+ fnIdx = gfidx
+ }
+ if !haveCounters {
+ return
+ }
+
+ // Install counters in package ctab.
+ gfp, ok := mm.p.ctab[fnIdx]
+ if ok {
+ if verb == "subtract" || verb == "intersect" {
+ panic("should never see this for intersect/subtract")
+ }
+ if *verbflag >= 3 {
+ fmt.Printf("counter merge for %s fidx=%d\n", fd.Funcname, fnIdx)
+ }
+ // Merge.
+ err, overflow := mm.MergeCounters(gfp.Counters, counters)
+ if err != nil {
+ fatal("%v", err)
+ }
+ if overflow {
+ warn("uint32 overflow during counter merge")
+ }
+ mm.p.ctab[fnIdx] = gfp
+ } else {
+ if *verbflag >= 3 {
+ fmt.Printf("null merge for %s fidx %d\n", fd.Funcname, fnIdx)
+ }
+ gfp := v
+ gfp.PkgIdx = mm.p.pkgIdx
+ gfp.FuncIdx = fnIdx
+ mm.p.ctab[fnIdx] = gfp
+ }
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// This file contains functions and apis to support the "subtract" and
+// "intersect" subcommands of "go tool covdata".
+
+import (
+ "flag"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/decodecounter"
+ "internal/coverage/decodemeta"
+ "internal/coverage/pods"
+ "os"
+ "strings"
+)
+
+// makeSubtractIntersectOp creates a subtract or intersect operation.
+// 'mode' here must be either "subtract" or "intersect".
+func makeSubtractIntersectOp(mode string) covOperation {
+ outdirflag = flag.String("o", "", "Output directory to write")
+ s := &sstate{
+ mode: mode,
+ mm: newMetaMerge(),
+ inidx: -1,
+ }
+ return s
+}
+
+// sstate holds state needed to implement subtraction and intersection
+// operations on code coverage data files. This type provides methods
+// to implement the CovDataVisitor interface, and is designed to be
+// used in concert with the CovDataReader utility, which abstracts
+// away most of the grubby details of reading coverage data files.
+type sstate struct {
+ mm *metaMerge
+ indir string // current input directory
+ inidx int
+ mode string
+ // Used only for intersection; keyed by pkg/fn ID, it keeps track of
+ // just the set of functions for which we have data in the current
+ // input directory.
+ imm map[pkfunc]struct{}
+}
+
+func (s *sstate) Usage(msg string) {
+ if len(msg) > 0 {
+ fmt.Fprintf(os.Stderr, "error: %s\n", msg)
+ }
+ fmt.Fprintf(os.Stderr, "usage: go tool covdata %s -i=dir1,dir2 -o=<dir>\n\n", s.mode)
+ flag.PrintDefaults()
+ fmt.Fprintf(os.Stderr, "\nExamples:\n\n")
+ op := "from"
+ if s.mode == intersectMode {
+ op = "with"
+ }
+ fmt.Fprintf(os.Stderr, " go tool covdata %s -i=dir1,dir2 -o=outdir\n\n", s.mode)
+ fmt.Fprintf(os.Stderr, " \t%ss dir2 %s dir1, writing result\n", s.mode, op)
+ fmt.Fprintf(os.Stderr, " \tinto output dir outdir.\n")
+ os.Exit(2)
+}
+
+func (s *sstate) Setup() {
+ if *indirsflag == "" {
+ usage("select input directories with '-i' option")
+ }
+ indirs := strings.Split(*indirsflag, ",")
+ if s.mode == subtractMode && len(indirs) != 2 {
+ usage("supply exactly two input dirs for subtract operation")
+ }
+ if *outdirflag == "" {
+ usage("select output directory with '-o' option")
+ }
+}
+
+func (s *sstate) BeginPod(p pods.Pod) {
+ s.mm.beginPod()
+}
+
+func (s *sstate) EndPod(p pods.Pod) {
+ const pcombine = false
+ s.mm.endPod(pcombine)
+}
+
+func (s *sstate) EndCounters() {
+ if s.imm != nil {
+ s.pruneCounters()
+ }
+}
+
+// pruneCounters performs a function-level partial intersection using the
+// current POD counter data (s.mm.pod.pmm) and the intersected data from
+// PODs in previous dirs (s.imm).
+func (s *sstate) pruneCounters() {
+ pkeys := make([]pkfunc, 0, len(s.mm.pod.pmm))
+ for k := range s.mm.pod.pmm {
+ pkeys = append(pkeys, k)
+ }
+ // Remove anything from pmm not found in imm. We don't need to
+ // go the other way (removing things from imm not found in pmm)
+ // since we don't add anything to imm if there is no pmm entry.
+ for _, k := range pkeys {
+ if _, found := s.imm[k]; !found {
+ delete(s.mm.pod.pmm, k)
+ }
+ }
+ s.imm = nil
+}
+
+func (s *sstate) BeginCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) {
+ dbgtrace(2, "visiting counter data file %s diridx %d", cdf, dirIdx)
+ if s.inidx != dirIdx {
+ if s.inidx > dirIdx {
+ // We're relying on having data files presented in
+ // the order they appear in the inputs (e.g. first all
+ // data files from input dir 0, then dir 1, etc).
+ panic("decreasing dir index, internal error")
+ }
+ if dirIdx == 0 {
+ // No need to keep track of the functions in the first
+ // directory, since that info will be replicated in
+ // s.mm.pod.pmm.
+ s.imm = nil
+ } else {
+ // We're now starting to visit the Nth directory, N != 0.
+ if s.mode == intersectMode {
+ if s.imm != nil {
+ s.pruneCounters()
+ }
+ s.imm = make(map[pkfunc]struct{})
+ }
+ }
+ s.inidx = dirIdx
+ }
+}
+
+func (s *sstate) EndCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) {
+}
+
+func (s *sstate) VisitFuncCounterData(data decodecounter.FuncPayload) {
+ key := pkfunc{pk: data.PkgIdx, fcn: data.FuncIdx}
+
+ if *verbflag >= 5 {
+ fmt.Printf("ctr visit fid=%d pk=%d inidx=%d data.Counters=%+v\n", data.FuncIdx, data.PkgIdx, s.inidx, data.Counters)
+ }
+
+ // If we're processing counter data from the initial (first) input
+ // directory, then just install it into the counter data map
+ // as usual.
+ if s.inidx == 0 {
+ s.mm.visitFuncCounterData(data)
+ return
+ }
+
+ // If we're looking at counter data from a dir other than
+ // the first, then perform the intersect/subtract.
+ if val, ok := s.mm.pod.pmm[key]; ok {
+ if s.mode == subtractMode {
+ for i := 0; i < len(data.Counters); i++ {
+ if data.Counters[i] != 0 {
+ val.Counters[i] = 0
+ }
+ }
+ } else if s.mode == intersectMode {
+ s.imm[key] = struct{}{}
+ for i := 0; i < len(data.Counters); i++ {
+ if data.Counters[i] == 0 {
+ val.Counters[i] = 0
+ }
+ }
+ }
+ }
+}
+
+func (s *sstate) VisitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) {
+ if s.mode == intersectMode {
+ s.imm = make(map[pkfunc]struct{})
+ }
+ s.mm.visitMetaDataFile(mdf, mfr)
+}
+
+func (s *sstate) BeginPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) {
+ s.mm.visitPackage(pd, pkgIdx, false)
+}
+
+func (s *sstate) EndPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) {
+}
+
+func (s *sstate) VisitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc) {
+ s.mm.visitFunc(pkgIdx, fnIdx, fd, s.mode, false)
+}
+
+func (s *sstate) Finish() {
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dep
+
+func Dep1() int {
+ return 42
+}
+
+func PDep(x int) {
+ if x != 1010101 {
+ println(x)
+ } else {
+ panic("bad")
+ }
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "prog/dep"
+)
+
+//go:noinline
+func first() {
+ println("whee")
+}
+
+//go:noinline
+func second() {
+ println("oy")
+}
+
+//go:noinline
+func third(x int) int {
+ if x != 0 {
+ return 42
+ }
+ println("blarg")
+ return 0
+}
+
+//go:noinline
+func fourth() int {
+ return 99
+}
+
+func main() {
+ println(dep.Dep1())
+ dep.PDep(2)
+ if len(os.Args) > 1 {
+ second()
+ third(1)
+ } else if len(os.Args) > 2 {
+ fourth()
+ } else {
+ first()
+ third(0)
+ }
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "prog/dep"
+)
+
+//go:noinline
+func fifth() {
+ println("hubba")
+}
+
+//go:noinline
+func sixth() {
+ println("wha?")
+}
+
+func main() {
+ println(dep.Dep1())
+ if len(os.Args) > 1 {
+ fifth()
+ } else {
+ sixth()
+ }
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main_test
+
+import (
+ "fmt"
+ "internal/coverage/pods"
+ "internal/goexperiment"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+const debugtrace = false
+
+func gobuild(t *testing.T, indir string, bargs []string) {
+ t.Helper()
+
+ if debugtrace {
+ if indir != "" {
+ t.Logf("in dir %s: ", indir)
+ }
+ t.Logf("cmd: %s %+v\n", testenv.GoToolPath(t), bargs)
+ }
+ cmd := exec.Command(testenv.GoToolPath(t), bargs...)
+ cmd.Dir = indir
+ b, err := cmd.CombinedOutput()
+ if len(b) != 0 {
+ t.Logf("## build output:\n%s", b)
+ }
+ if err != nil {
+ t.Fatalf("build error: %v", err)
+ }
+}
+
+func emitFile(t *testing.T, dst, src string) {
+ payload, err := ioutil.ReadFile(src)
+ if err != nil {
+ t.Fatalf("error reading %q: %v", src, err)
+ }
+ if err := ioutil.WriteFile(dst, payload, 0666); err != nil {
+ t.Fatalf("writing %q: %v", dst, err)
+ }
+}
+
+func buildProg(t *testing.T, prog string, dir string, tag string, flags []string) (string, string) {
+
+ // Create subdirs.
+ subdir := filepath.Join(dir, prog+"dir"+tag)
+ if err := os.Mkdir(subdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", subdir, err)
+ }
+ depdir := filepath.Join(subdir, "dep")
+ if err := os.Mkdir(depdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", depdir, err)
+ }
+
+ // Emit program.
+ insrc := filepath.Join("testdata", prog+".go")
+ src := filepath.Join(subdir, prog+".go")
+ emitFile(t, src, insrc)
+ indep := filepath.Join("testdata", "dep.go")
+ dep := filepath.Join(depdir, "dep.go")
+ emitFile(t, dep, indep)
+
+ // Emit go.mod.
+ mod := filepath.Join(subdir, "go.mod")
+ modsrc := fmt.Sprintf("\nmodule prog\n\ngo 1.19\n")
+ if err := ioutil.WriteFile(mod, []byte(modsrc), 0666); err != nil {
+ t.Fatal(err)
+ }
+ exepath := filepath.Join(subdir, prog+".exe")
+ bargs := []string{"build", "-cover", "-o", exepath}
+ bargs = append(bargs, flags...)
+ gobuild(t, subdir, bargs)
+ return exepath, subdir
+}
+
+type state struct {
+ dir string
+ exedir1 string
+ exedir2 string
+ exedir3 string
+ exepath1 string
+ exepath2 string
+ exepath3 string
+ tool string
+ outdirs [4]string
+}
+
+const debugWorkDir = false
+
+func TestCovTool(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ if !goexperiment.CoverageRedesign {
+ t.Skipf("stubbed out due to goexperiment.CoverageRedesign=false")
+ }
+ dir := t.TempDir()
+ if testing.Short() {
+ t.Skip()
+ }
+ if debugWorkDir {
+ // debugging
+ dir = "/tmp/qqq"
+ os.RemoveAll(dir)
+ os.Mkdir(dir, 0777)
+ }
+
+ s := state{
+ dir: dir,
+ }
+ s.exepath1, s.exedir1 = buildProg(t, "prog1", dir, "", nil)
+ s.exepath2, s.exedir2 = buildProg(t, "prog2", dir, "", nil)
+ flags := []string{"-covermode=atomic"}
+ s.exepath3, s.exedir3 = buildProg(t, "prog1", dir, "atomic", flags)
+
+ // Build the tool.
+ s.tool = filepath.Join(dir, "tool.exe")
+ args := []string{"build", "-o", s.tool, "."}
+ gobuild(t, "", args)
+
+ // Create a few coverage output dirs.
+ for i := 0; i < 4; i++ {
+ d := filepath.Join(dir, fmt.Sprintf("covdata%d", i))
+ s.outdirs[i] = d
+ if err := os.Mkdir(d, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", d, err)
+ }
+ }
+
+ // Run instrumented program to generate some coverage data output files,
+ // as follows:
+ //
+ // <tmp>/covdata0 -- prog1.go compiled -cover
+ // <tmp>/covdata1 -- prog1.go compiled -cover
+ // <tmp>/covdata2 -- prog1.go compiled -covermode=atomic
+ // <tmp>/covdata3 -- prog1.go compiled -covermode=atomic
+ //
+ for m := 0; m < 2; m++ {
+ for k := 0; k < 2; k++ {
+ args := []string{}
+ if k != 0 {
+ args = append(args, "foo", "bar")
+ }
+ for i := 0; i <= k; i++ {
+ exepath := s.exepath1
+ if m != 0 {
+ exepath = s.exepath3
+ }
+ cmd := exec.Command(exepath, args...)
+ cmd.Env = append(cmd.Env, "GOCOVERDIR="+s.outdirs[m*2+k])
+ b, err := cmd.CombinedOutput()
+ if len(b) != 0 {
+ t.Logf("## instrumented run output:\n%s", b)
+ }
+ if err != nil {
+ t.Fatalf("instrumented run error: %v", err)
+ }
+ }
+ }
+ }
+
+ // At this point we can fork off a bunch of child tests
+ // to check different tool modes.
+ t.Run("MergeSimple", func(t *testing.T) {
+ t.Parallel()
+ testMergeSimple(t, s, s.outdirs[0], s.outdirs[1], "set")
+ testMergeSimple(t, s, s.outdirs[2], s.outdirs[3], "atomic")
+ })
+ t.Run("MergeSelect", func(t *testing.T) {
+ t.Parallel()
+ testMergeSelect(t, s, s.outdirs[0], s.outdirs[1], "set")
+ testMergeSelect(t, s, s.outdirs[2], s.outdirs[3], "atomic")
+ })
+ t.Run("MergePcombine", func(t *testing.T) {
+ t.Parallel()
+ testMergeCombinePrograms(t, s)
+ })
+ t.Run("Dump", func(t *testing.T) {
+ t.Parallel()
+ testDump(t, s)
+ })
+ t.Run("Percent", func(t *testing.T) {
+ t.Parallel()
+ testPercent(t, s)
+ })
+ t.Run("PkgList", func(t *testing.T) {
+ t.Parallel()
+ testPkgList(t, s)
+ })
+ t.Run("Textfmt", func(t *testing.T) {
+ t.Parallel()
+ testTextfmt(t, s)
+ })
+ t.Run("Subtract", func(t *testing.T) {
+ t.Parallel()
+ testSubtract(t, s)
+ })
+ t.Run("Intersect", func(t *testing.T) {
+ t.Parallel()
+ testIntersect(t, s, s.outdirs[0], s.outdirs[1], "set")
+ testIntersect(t, s, s.outdirs[2], s.outdirs[3], "atomic")
+ })
+ t.Run("CounterClash", func(t *testing.T) {
+ t.Parallel()
+ testCounterClash(t, s)
+ })
+ t.Run("TestEmpty", func(t *testing.T) {
+ t.Parallel()
+ testEmpty(t, s)
+ })
+ t.Run("TestCommandLineErrors", func(t *testing.T) {
+ t.Parallel()
+ testCommandLineErrors(t, s, s.outdirs[0])
+ })
+}
+
+const showToolInvocations = true
+
+func runToolOp(t *testing.T, s state, op string, args []string) []string {
+ // Perform tool run.
+ t.Helper()
+ args = append([]string{op}, args...)
+ if showToolInvocations {
+ t.Logf("%s cmd is: %s %+v", op, s.tool, args)
+ }
+ cmd := exec.Command(s.tool, args...)
+ b, err := cmd.CombinedOutput()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "## %s output: %s\n", op, string(b))
+ t.Fatalf("%q run error: %v", op, err)
+ }
+ output := strings.TrimSpace(string(b))
+ lines := strings.Split(output, "\n")
+ if len(lines) == 1 && lines[0] == "" {
+ lines = nil
+ }
+ return lines
+}
+
+func testDump(t *testing.T, s state) {
+ // Run the dumper on the two dirs we generated.
+ dargs := []string{"-pkg=main", "-live", "-i=" + s.outdirs[0] + "," + s.outdirs[1]}
+ lines := runToolOp(t, s, "debugdump", dargs)
+
+ // Sift through the output to make sure it has some key elements.
+ testpoints := []struct {
+ tag string
+ re *regexp.Regexp
+ }{
+ {
+ "args",
+ regexp.MustCompile(`^data file .+ GOOS=.+ GOARCH=.+ program args: .+$`),
+ },
+ {
+ "main package",
+ regexp.MustCompile(`^Package path: main\s*$`),
+ },
+ {
+ "main function",
+ regexp.MustCompile(`^Func: main\s*$`),
+ },
+ }
+
+ bad := false
+ for _, testpoint := range testpoints {
+ found := false
+ for _, line := range lines {
+ if m := testpoint.re.FindStringSubmatch(line); m != nil {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("dump output regexp match failed for %s", testpoint.tag)
+ bad = true
+ }
+ }
+ if bad {
+ dumplines(lines)
+ }
+}
+
+func testPercent(t *testing.T, s state) {
+ // Run the dumper on the two dirs we generated.
+ dargs := []string{"-pkg=main", "-i=" + s.outdirs[0] + "," + s.outdirs[1]}
+ lines := runToolOp(t, s, "percent", dargs)
+
+ // Sift through the output to make sure it has the needful.
+ testpoints := []struct {
+ tag string
+ re *regexp.Regexp
+ }{
+ {
+ "statement coverage percent",
+ regexp.MustCompile(`coverage: \d+\.\d% of statements\s*$`),
+ },
+ }
+
+ bad := false
+ for _, testpoint := range testpoints {
+ found := false
+ for _, line := range lines {
+ if m := testpoint.re.FindStringSubmatch(line); m != nil {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("percent output regexp match failed for %s", testpoint.tag)
+ bad = true
+ }
+ }
+ if bad {
+ dumplines(lines)
+ }
+}
+func testPkgList(t *testing.T, s state) {
+ dargs := []string{"-i=" + s.outdirs[0] + "," + s.outdirs[1]}
+ lines := runToolOp(t, s, "pkglist", dargs)
+
+ want := []string{"main", "prog/dep"}
+ bad := false
+ if len(lines) != 2 {
+ t.Errorf("expect pkglist to return two lines")
+ bad = true
+ } else {
+ for i := 0; i < 2; i++ {
+ lines[i] = strings.TrimSpace(lines[i])
+ if want[i] != lines[i] {
+ t.Errorf("line %d want %s got %s", i, want[i], lines[i])
+ bad = true
+ }
+ }
+ }
+ if bad {
+ dumplines(lines)
+ }
+}
+
+func testTextfmt(t *testing.T, s state) {
+ outf := s.dir + "/" + "t.txt"
+ dargs := []string{"-pkg=main", "-i=" + s.outdirs[0] + "," + s.outdirs[1],
+ "-o", outf}
+ lines := runToolOp(t, s, "textfmt", dargs)
+
+ // No output expected.
+ if len(lines) != 0 {
+ dumplines(lines)
+ t.Errorf("unexpected output from go tool covdata textfmt")
+ }
+
+ // Open and read the first few bits of the file.
+ payload, err := ioutil.ReadFile(outf)
+ if err != nil {
+ t.Errorf("opening %s: %v\n", outf, err)
+ }
+ lines = strings.Split(string(payload), "\n")
+ want0 := "mode: set"
+ if lines[0] != want0 {
+ dumplines(lines[0:10])
+ t.Errorf("textfmt: want %s got %s", want0, lines[0])
+ }
+ want1 := "prog/prog1.go:13.14,15.2 1 1"
+ if lines[1] != want1 {
+ dumplines(lines[0:10])
+ t.Errorf("textfmt: want %s got %s", want1, lines[1])
+ }
+}
+
+func dumplines(lines []string) {
+ for i := range lines {
+ fmt.Fprintf(os.Stderr, "%s\n", lines[i])
+ }
+}
+
+type dumpCheck struct {
+ tag string
+ re *regexp.Regexp
+ negate bool
+ nonzero bool
+ zero bool
+}
+
+// runDumpChecks examines the output of "go tool covdata debugdump"
+// for a given output directory, looking for the presence or absence
+// of specific markers.
+func runDumpChecks(t *testing.T, s state, dir string, flags []string, checks []dumpCheck) {
+ dargs := []string{"-i", dir}
+ dargs = append(dargs, flags...)
+ lines := runToolOp(t, s, "debugdump", dargs)
+ if len(lines) == 0 {
+ t.Fatalf("dump run produced no output")
+ }
+
+ bad := false
+ for _, check := range checks {
+ found := false
+ for _, line := range lines {
+ if m := check.re.FindStringSubmatch(line); m != nil {
+ found = true
+ if check.negate {
+ t.Errorf("tag %q: unexpected match", check.tag)
+ bad = true
+
+ }
+ if check.nonzero || check.zero {
+ if len(m) < 2 {
+ t.Errorf("tag %s: submatch failed (short m)", check.tag)
+ bad = true
+ continue
+ }
+ if m[1] == "" {
+ t.Errorf("tag %s: submatch failed", check.tag)
+ bad = true
+ continue
+ }
+ i, err := strconv.Atoi(m[1])
+ if err != nil {
+ t.Errorf("tag %s: match Atoi failed on %s",
+ check.tag, m[1])
+ continue
+ }
+ if check.zero && i != 0 {
+ t.Errorf("tag %s: match zero failed on %s",
+ check.tag, m[1])
+ } else if check.nonzero && i == 0 {
+ t.Errorf("tag %s: match nonzero failed on %s",
+ check.tag, m[1])
+ }
+ }
+ break
+ }
+ }
+ if !found && !check.negate {
+ t.Errorf("dump output regexp match failed for %s", check.tag)
+ bad = true
+ }
+ }
+ if bad {
+ fmt.Printf("output from 'dump' run:\n")
+ dumplines(lines)
+ }
+}
+
+func testMergeSimple(t *testing.T, s state, indir1, indir2, tag string) {
+ outdir := filepath.Join(s.dir, "simpleMergeOut"+tag)
+ if err := os.Mkdir(outdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", outdir, err)
+ }
+
+ // Merge the two dirs into a final result.
+ ins := fmt.Sprintf("-i=%s,%s", indir1, indir2)
+ out := fmt.Sprintf("-o=%s", outdir)
+ margs := []string{ins, out}
+ lines := runToolOp(t, s, "merge", margs)
+ if len(lines) != 0 {
+ t.Errorf("merge run produced %d lines of unexpected output", len(lines))
+ dumplines(lines)
+ }
+
+ // We expect the merge tool to produce exactly two files: a meta
+ // data file and a counter file. If we get more than just this one
+ // pair, something went wrong.
+ podlist, err := pods.CollectPods([]string{outdir}, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(podlist) != 1 {
+ t.Fatalf("expected 1 pod, got %d pods", len(podlist))
+ }
+ ncdfs := len(podlist[0].CounterDataFiles)
+ if ncdfs != 1 {
+ t.Fatalf("expected 1 counter data file, got %d", ncdfs)
+ }
+
+ // Sift through the output to make sure it has some key elements.
+ // In particular, we want to see entries for all three functions
+ // ("first", "second", and "third").
+ testpoints := []dumpCheck{
+ {
+ tag: "first function",
+ re: regexp.MustCompile(`^Func: first\s*$`),
+ },
+ {
+ tag: "second function",
+ re: regexp.MustCompile(`^Func: second\s*$`),
+ },
+ {
+ tag: "third function",
+ re: regexp.MustCompile(`^Func: third\s*$`),
+ },
+ {
+ tag: "third function unit 0",
+ re: regexp.MustCompile(`^0: L23:C23 -- L24:C12 NS=1 = (\d+)$`),
+ nonzero: true,
+ },
+ {
+ tag: "third function unit 1",
+ re: regexp.MustCompile(`^1: L27:C2 -- L28:C10 NS=2 = (\d+)$`),
+ nonzero: true,
+ },
+ {
+ tag: "third function unit 2",
+ re: regexp.MustCompile(`^2: L24:C12 -- L26:C3 NS=1 = (\d+)$`),
+ nonzero: true,
+ },
+ }
+ flags := []string{"-live", "-pkg=main"}
+ runDumpChecks(t, s, outdir, flags, testpoints)
+}
+
+func testMergeSelect(t *testing.T, s state, indir1, indir2 string, tag string) {
+ outdir := filepath.Join(s.dir, "selectMergeOut"+tag)
+ if err := os.Mkdir(outdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", outdir, err)
+ }
+
+ // Merge two input dirs into a final result, but filter
+ // based on package.
+ ins := fmt.Sprintf("-i=%s,%s", indir1, indir2)
+ out := fmt.Sprintf("-o=%s", outdir)
+ margs := []string{"-pkg=prog/dep", ins, out}
+ lines := runToolOp(t, s, "merge", margs)
+ if len(lines) != 0 {
+ t.Errorf("merge run produced %d lines of unexpected output", len(lines))
+ dumplines(lines)
+ }
+
+ // Dump the files in the merged output dir and examine the result.
+ // We expect to see only the functions in package "dep".
+ dargs := []string{"-i=" + outdir}
+ lines = runToolOp(t, s, "debugdump", dargs)
+ if len(lines) == 0 {
+ t.Fatalf("dump run produced no output")
+ }
+ want := map[string]int{
+ "Package path: prog/dep": 0,
+ "Func: Dep1": 0,
+ "Func: PDep": 0,
+ }
+ bad := false
+ for _, line := range lines {
+ if v, ok := want[line]; ok {
+ if v != 0 {
+ t.Errorf("duplicate line %s", line)
+ bad = true
+ break
+ }
+ want[line] = 1
+ continue
+ } else {
+ }
+ // no other functions or packages expected.
+ if strings.HasPrefix(line, "Func:") || strings.HasPrefix(line, "Package path:") {
+ t.Errorf("unexpected line: %s", line)
+ bad = true
+ break
+ }
+ }
+ if bad {
+ dumplines(lines)
+ }
+}
+
+func testMergeCombinePrograms(t *testing.T, s state) {
+
+ // Run the new program, emitting output into a new set
+ // of outdirs.
+ runout := [2]string{}
+ for k := 0; k < 2; k++ {
+ runout[k] = filepath.Join(s.dir, fmt.Sprintf("newcovdata%d", k))
+ if err := os.Mkdir(runout[k], 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", runout[k], err)
+ }
+ args := []string{}
+ if k != 0 {
+ args = append(args, "foo", "bar")
+ }
+ cmd := exec.Command(s.exepath2, args...)
+ cmd.Env = append(cmd.Env, "GOCOVERDIR="+runout[k])
+ b, err := cmd.CombinedOutput()
+ if len(b) != 0 {
+ t.Logf("## instrumented run output:\n%s", b)
+ }
+ if err != nil {
+ t.Fatalf("instrumented run error: %v", err)
+ }
+ }
+
+ // Create out dir for -pcombine merge.
+ moutdir := filepath.Join(s.dir, "mergeCombineOut")
+ if err := os.Mkdir(moutdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", moutdir, err)
+ }
+
+ // Run a merge over both programs, using the -pcombine
+ // flag to do maximal combining.
+ ins := fmt.Sprintf("-i=%s,%s,%s,%s", s.outdirs[0], s.outdirs[1],
+ runout[0], runout[1])
+ out := fmt.Sprintf("-o=%s", moutdir)
+ margs := []string{"-pcombine", ins, out}
+ lines := runToolOp(t, s, "merge", margs)
+ if len(lines) != 0 {
+ t.Errorf("merge run produced unexpected output: %v", lines)
+ }
+
+ // We expect the merge tool to produce exacty two files: a meta
+ // data file and a counter file. If we get more than just this one
+ // pair, something went wrong.
+ podlist, err := pods.CollectPods([]string{moutdir}, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(podlist) != 1 {
+ t.Fatalf("expected 1 pod, got %d pods", len(podlist))
+ }
+ ncdfs := len(podlist[0].CounterDataFiles)
+ if ncdfs != 1 {
+ t.Fatalf("expected 1 counter data file, got %d", ncdfs)
+ }
+
+ // Sift through the output to make sure it has some key elements.
+ testpoints := []dumpCheck{
+ {
+ tag: "first function",
+ re: regexp.MustCompile(`^Func: first\s*$`),
+ },
+ {
+ tag: "sixth function",
+ re: regexp.MustCompile(`^Func: sixth\s*$`),
+ },
+ }
+
+ flags := []string{"-live", "-pkg=main"}
+ runDumpChecks(t, s, moutdir, flags, testpoints)
+}
+
+func testSubtract(t *testing.T, s state) {
+ // Create out dir for subtract merge.
+ soutdir := filepath.Join(s.dir, "subtractOut")
+ if err := os.Mkdir(soutdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", soutdir, err)
+ }
+
+ // Subtract the two dirs into a final result.
+ ins := fmt.Sprintf("-i=%s,%s", s.outdirs[0], s.outdirs[1])
+ out := fmt.Sprintf("-o=%s", soutdir)
+ sargs := []string{ins, out}
+ lines := runToolOp(t, s, "subtract", sargs)
+ if len(lines) != 0 {
+ t.Errorf("subtract run produced unexpected output: %+v", lines)
+ }
+
+ // Dump the files in the subtract output dir and examine the result.
+ dargs := []string{"-pkg=main", "-live", "-i=" + soutdir}
+ lines = runToolOp(t, s, "debugdump", dargs)
+ if len(lines) == 0 {
+ t.Errorf("dump run produced no output")
+ }
+
+ // Vet the output.
+ testpoints := []dumpCheck{
+ {
+ tag: "first function",
+ re: regexp.MustCompile(`^Func: first\s*$`),
+ },
+ {
+ tag: "dep function",
+ re: regexp.MustCompile(`^Func: Dep1\s*$`),
+ },
+ {
+ tag: "third function",
+ re: regexp.MustCompile(`^Func: third\s*$`),
+ },
+ {
+ tag: "third function unit 0",
+ re: regexp.MustCompile(`^0: L23:C23 -- L24:C12 NS=1 = (\d+)$`),
+ zero: true,
+ },
+ {
+ tag: "third function unit 1",
+ re: regexp.MustCompile(`^1: L27:C2 -- L28:C10 NS=2 = (\d+)$`),
+ nonzero: true,
+ },
+ {
+ tag: "third function unit 2",
+ re: regexp.MustCompile(`^2: L24:C12 -- L26:C3 NS=1 = (\d+)$`),
+ zero: true,
+ },
+ }
+ flags := []string{}
+ runDumpChecks(t, s, soutdir, flags, testpoints)
+}
+
+func testIntersect(t *testing.T, s state, indir1, indir2, tag string) {
+ // Create out dir for intersection.
+ ioutdir := filepath.Join(s.dir, "intersectOut"+tag)
+ if err := os.Mkdir(ioutdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", ioutdir, err)
+ }
+
+ // Intersect the two dirs into a final result.
+ ins := fmt.Sprintf("-i=%s,%s", indir1, indir2)
+ out := fmt.Sprintf("-o=%s", ioutdir)
+ sargs := []string{ins, out}
+ lines := runToolOp(t, s, "intersect", sargs)
+ if len(lines) != 0 {
+ t.Errorf("intersect run produced unexpected output: %+v", lines)
+ }
+
+ // Dump the files in the subtract output dir and examine the result.
+ dargs := []string{"-pkg=main", "-live", "-i=" + ioutdir}
+ lines = runToolOp(t, s, "debugdump", dargs)
+ if len(lines) == 0 {
+ t.Errorf("dump run produced no output")
+ }
+
+ // Vet the output.
+ testpoints := []dumpCheck{
+ {
+ tag: "first function",
+ re: regexp.MustCompile(`^Func: first\s*$`),
+ negate: true,
+ },
+ {
+ tag: "third function",
+ re: regexp.MustCompile(`^Func: third\s*$`),
+ },
+ }
+ flags := []string{"-live"}
+ runDumpChecks(t, s, ioutdir, flags, testpoints)
+}
+
+func testCounterClash(t *testing.T, s state) {
+ // Create out dir.
+ ccoutdir := filepath.Join(s.dir, "ccOut")
+ if err := os.Mkdir(ccoutdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", ccoutdir, err)
+ }
+
+ // Try to merge covdata0 (from prog1.go -countermode=set) with
+ // covdata1 (from prog1.go -countermode=atomic"). This should
+ // produce a counter mode clash error.
+ ins := fmt.Sprintf("-i=%s,%s", s.outdirs[0], s.outdirs[3])
+ out := fmt.Sprintf("-o=%s", ccoutdir)
+ args := append([]string{}, "merge", ins, out, "-pcombine")
+ if debugtrace {
+ t.Logf("cc merge command is %s %v\n", s.tool, args)
+ }
+ cmd := exec.Command(s.tool, args...)
+ b, err := cmd.CombinedOutput()
+ t.Logf("%% output: %s\n", string(b))
+ if err == nil {
+ t.Fatalf("clash merge passed unexpectedly")
+ }
+ got := string(b)
+ want := "counter mode clash while reading meta-data"
+ if !strings.Contains(got, want) {
+ t.Errorf("counter clash merge: wanted %s got %s", want, got)
+ }
+}
+
+func testEmpty(t *testing.T, s state) {
+
+ // Create a new empty directory.
+ empty := filepath.Join(s.dir, "empty")
+ if err := os.Mkdir(empty, 0777); err != nil {
+ t.Fatalf("can't create dir %s: %v", empty, err)
+ }
+
+ // Create out dir.
+ eoutdir := filepath.Join(s.dir, "emptyOut")
+ if err := os.Mkdir(eoutdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", eoutdir, err)
+ }
+
+ // Run various operations (merge, dump, textfmt, and so on)
+ // using the empty directory. We're not interested in the output
+ // here, just making sure that you can do these runs without
+ // any error or crash.
+
+ scenarios := []struct {
+ tag string
+ args []string
+ }{
+ {
+ tag: "merge",
+ args: []string{"merge", "-o", eoutdir},
+ },
+ {
+ tag: "textfmt",
+ args: []string{"textfmt", "-o", filepath.Join(eoutdir, "foo.txt")},
+ },
+ {
+ tag: "func",
+ args: []string{"func"},
+ },
+ {
+ tag: "pkglist",
+ args: []string{"pkglist"},
+ },
+ {
+ tag: "debugdump",
+ args: []string{"debugdump"},
+ },
+ {
+ tag: "percent",
+ args: []string{"percent"},
+ },
+ }
+
+ for _, x := range scenarios {
+ ins := fmt.Sprintf("-i=%s", empty)
+ args := append([]string{}, x.args...)
+ args = append(args, ins)
+ if false {
+ t.Logf("cmd is %s %v\n", s.tool, args)
+ }
+ cmd := exec.Command(s.tool, args...)
+ b, err := cmd.CombinedOutput()
+ t.Logf("%% output: %s\n", string(b))
+ if err != nil {
+ t.Fatalf("command %s %+v failed with %v",
+ s.tool, x.args, err)
+ }
+ }
+}
+
+func testCommandLineErrors(t *testing.T, s state, outdir string) {
+
+ // Create out dir.
+ eoutdir := filepath.Join(s.dir, "errorsOut")
+ if err := os.Mkdir(eoutdir, 0777); err != nil {
+ t.Fatalf("can't create outdir %s: %v", eoutdir, err)
+ }
+
+ // Run various operations (merge, dump, textfmt, and so on)
+ // using the empty directory. We're not interested in the output
+ // here, just making sure that you can do these runs without
+ // any error or crash.
+
+ scenarios := []struct {
+ tag string
+ args []string
+ exp string
+ }{
+ {
+ tag: "input missing",
+ args: []string{"merge", "-o", eoutdir, "-i", "not there"},
+ exp: "error: reading inputs: ",
+ },
+ {
+ tag: "badv",
+ args: []string{"textfmt", "-i", outdir, "-v=abc"},
+ },
+ }
+
+ for _, x := range scenarios {
+ args := append([]string{}, x.args...)
+ if false {
+ t.Logf("cmd is %s %v\n", s.tool, args)
+ }
+ cmd := exec.Command(s.tool, args...)
+ b, err := cmd.CombinedOutput()
+ if err == nil {
+ t.Logf("%% output: %s\n", string(b))
+ t.Fatalf("command %s %+v unexpectedly succeeded",
+ s.tool, x.args)
+ } else {
+ if !strings.Contains(string(b), x.exp) {
+ t.Fatalf("command %s %+v:\ngot:\n%s\nwanted to see: %v\n",
+ s.tool, x.args, string(b), x.exp)
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cov
+
+import (
+ "cmd/internal/bio"
+ "io"
+ "os"
+)
+
+// This file contains the helper "MReader", a wrapper around bio plus
+// an "mmap'd read-only" view of the file obtained from bio.SliceRO().
+// MReader is designed to implement the io.ReaderSeeker interface.
+// Since bio.SliceOS() is not guaranteed to succeed, MReader falls back
+// on explicit reads + seeks provided by bio.Reader if needed.
+
+type MReader struct {
+ f *os.File
+ rdr *bio.Reader
+ fileView []byte
+ off int64
+}
+
+func NewMreader(f *os.File) (*MReader, error) {
+ rdr := bio.NewReader(f)
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ r := MReader{
+ f: f,
+ rdr: rdr,
+ fileView: rdr.SliceRO(uint64(fi.Size())),
+ }
+ return &r, nil
+}
+
+func (r *MReader) Read(p []byte) (int, error) {
+ if r.fileView != nil {
+ amt := len(p)
+ toread := r.fileView[r.off:]
+ if len(toread) < 1 {
+ return 0, io.EOF
+ }
+ if len(toread) < amt {
+ amt = len(toread)
+ }
+ copy(p, toread)
+ r.off += int64(amt)
+ return amt, nil
+ }
+ return r.rdr.Read(p)
+}
+
+func (r *MReader) ReadByte() (byte, error) {
+ if r.fileView != nil {
+ toread := r.fileView[r.off:]
+ if len(toread) < 1 {
+ return 0, io.EOF
+ }
+ rv := toread[0]
+ r.off++
+ return rv, nil
+ }
+ return r.rdr.ReadByte()
+}
+
+func (r *MReader) Seek(offset int64, whence int) (int64, error) {
+ if r.fileView == nil {
+ return r.rdr.MustSeek(offset, whence), nil
+ }
+ switch whence {
+ case os.SEEK_SET:
+ r.off = offset
+ return offset, nil
+ case os.SEEK_CUR:
+ return r.off, nil
+ case os.SEEK_END:
+ r.off = int64(len(r.fileView)) + offset
+ return r.off, nil
+ }
+ panic("other modes not implemented")
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cov
+
+import (
+ "cmd/internal/bio"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/decodecounter"
+ "internal/coverage/decodemeta"
+ "internal/coverage/pods"
+ "os"
+)
+
+// CovDataReader is a general-purpose helper/visitor object for
+// reading coverage data files in a structured way. Clients create a
+// CovDataReader to process a given collection of coverage data file
+// directories, then pass in a visitor object with methods that get
+// invoked at various important points. CovDataReader is intended
+// to facilitate common coverage data file operations such as
+// merging or intersecting data files, analyzing data files, or
+// dumping data files.
+type CovDataReader struct {
+ vis CovDataVisitor
+ indirs []string
+ matchpkg func(name string) bool
+ flags CovDataReaderFlags
+ err error
+ verbosityLevel int
+}
+
+// MakeCovDataReader creates a CovDataReader object to process the
+// given set of input directories. Here 'vis' is a visitor object
+// providing methods to be invoked as we walk through the data,
+// 'indirs' is the set of coverage data directories to examine,
+// 'verbosityLevel' controls the level of debugging trace messages
+// (zero for off, higher for more output), 'flags' stores flags that
+// indicate what to do if errors are detected, and 'matchpkg' is a
+// caller-provided function that can be used to select specific
+// packages by name (if nil, then all packages are included).
+func MakeCovDataReader(vis CovDataVisitor, indirs []string, verbosityLevel int, flags CovDataReaderFlags, matchpkg func(name string) bool) *CovDataReader {
+ return &CovDataReader{
+ vis: vis,
+ indirs: indirs,
+ matchpkg: matchpkg,
+ verbosityLevel: verbosityLevel,
+ flags: flags,
+ }
+}
+
+// CovDataVisitor defines hooks for clients of CovDataReader. When the
+// coverage data reader makes its way through a coverage meta-data
+// file and counter data files, it will invoke the methods below to
+// hand off info to the client. The normal sequence of expected
+// visitor method invocations is:
+//
+// for each pod P {
+// BeginPod(p)
+// let MF be the meta-data file for P
+// VisitMetaDataFile(MF)
+// for each counter data file D in P {
+// BeginCounterDataFile(D)
+// for each live function F in D {
+// VisitFuncCounterData(F)
+// }
+// EndCounterDataFile(D)
+// }
+// EndCounters(MF)
+// for each package PK in MF {
+// BeginPackage(PK)
+// if <PK matched according to package pattern and/or modpath> {
+// for each function PF in PK {
+// VisitFunc(PF)
+// }
+// }
+// EndPackage(PK)
+// }
+// EndPod(p)
+// }
+// Finish()
+
+type CovDataVisitor interface {
+ // Invoked at the start and end of a given pod (a pod here is a
+ // specific coverage meta-data files with the counter data files
+ // that correspond to it).
+ BeginPod(p pods.Pod)
+ EndPod(p pods.Pod)
+
+ // Invoked when the reader is starting to examine the meta-data
+ // file for a pod. Here 'mdf' is the path of the file, and 'mfr'
+ // is an open meta-data reader.
+ VisitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader)
+
+ // Invoked when the reader processes a counter data file, first
+ // the 'begin' method at the start, then the 'end' method when
+ // we're done with the file.
+ BeginCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int)
+ EndCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int)
+
+ // Invoked once for each live function in the counter data file.
+ VisitFuncCounterData(payload decodecounter.FuncPayload)
+
+ // Invoked when we've finished processing the counter files in a
+ // POD (e.g. no more calls to VisitFuncCounterData).
+ EndCounters()
+
+ // Invoked for each package in the meta-data file for the pod,
+ // first the 'begin' method when processinf of hte package starts,
+ // then the 'end' method when we're done
+ BeginPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32)
+ EndPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32)
+
+ // Invoked for each function the package being visited.
+ VisitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc)
+
+ // Invoked when all counter + meta-data file processing is complete.
+ Finish()
+}
+
+type CovDataReaderFlags uint32
+
+const (
+ CovDataReaderNoFlags CovDataReaderFlags = 0
+ PanicOnError = 1 << iota
+ PanicOnWarning
+)
+
+func (r *CovDataReader) Visit() error {
+ podlist, err := pods.CollectPods(r.indirs, false)
+ if err != nil {
+ return fmt.Errorf("reading inputs: %v", err)
+ }
+ if len(podlist) == 0 {
+ r.warn("no applicable files found in input directories")
+ }
+ for _, p := range podlist {
+ if err := r.visitPod(p); err != nil {
+ return err
+ }
+ }
+ r.vis.Finish()
+ return nil
+}
+
+func (r *CovDataReader) verb(vlevel int, s string, a ...interface{}) {
+ if r.verbosityLevel >= vlevel {
+ fmt.Fprintf(os.Stderr, s, a...)
+ fmt.Fprintf(os.Stderr, "\n")
+ }
+}
+
+func (r *CovDataReader) warn(s string, a ...interface{}) {
+ fmt.Fprintf(os.Stderr, "warning: ")
+ fmt.Fprintf(os.Stderr, s, a...)
+ fmt.Fprintf(os.Stderr, "\n")
+ if (r.flags & PanicOnWarning) != 0 {
+ panic("unexpected warning")
+ }
+}
+
+func (r *CovDataReader) fatal(s string, a ...interface{}) error {
+ if r.err != nil {
+ return nil
+ }
+ errstr := "error: " + fmt.Sprintf(s, a...) + "\n"
+ if (r.flags & PanicOnError) != 0 {
+ fmt.Fprintf(os.Stderr, "%s", errstr)
+ panic("fatal error")
+ }
+ r.err = fmt.Errorf("%s", errstr)
+ return r.err
+}
+
+// visitPod examines a coverage data 'pod', that is, a meta-data file and
+// zero or more counter data files that refer to that meta-data file.
+func (r *CovDataReader) visitPod(p pods.Pod) error {
+ r.verb(1, "visiting pod: metafile %s with %d counter files",
+ p.MetaFile, len(p.CounterDataFiles))
+ r.vis.BeginPod(p)
+
+ // Open meta-file
+ f, err := os.Open(p.MetaFile)
+ if err != nil {
+ return r.fatal("unable to open meta-file %s", p.MetaFile)
+ }
+ br := bio.NewReader(f)
+ fi, err := f.Stat()
+ if err != nil {
+ return r.fatal("unable to stat metafile %s: %v", p.MetaFile, err)
+ }
+ fileView := br.SliceRO(uint64(fi.Size()))
+ br.MustSeek(0, os.SEEK_SET)
+
+ r.verb(1, "fileView for pod is length %d", len(fileView))
+
+ var mfr *decodemeta.CoverageMetaFileReader
+ mfr, err = decodemeta.NewCoverageMetaFileReader(f, fileView)
+ if err != nil {
+ return r.fatal("decoding meta-file %s: %s", p.MetaFile, err)
+ }
+ r.vis.VisitMetaDataFile(p.MetaFile, mfr)
+
+ // Read counter data files.
+ for k, cdf := range p.CounterDataFiles {
+ cf, err := os.Open(cdf)
+ if err != nil {
+ return r.fatal("opening counter data file %s: %s", cdf, err)
+ }
+ var mr *MReader
+ mr, err = NewMreader(cf)
+ if err != nil {
+ return r.fatal("creating reader for counter data file %s: %s", cdf, err)
+ }
+ var cdr *decodecounter.CounterDataReader
+ cdr, err = decodecounter.NewCounterDataReader(cdf, mr)
+ if err != nil {
+ return r.fatal("reading counter data file %s: %s", cdf, err)
+ }
+ r.vis.BeginCounterDataFile(cdf, cdr, p.Origins[k])
+ var data decodecounter.FuncPayload
+ for {
+ ok, err := cdr.NextFunc(&data)
+ if err != nil {
+ return r.fatal("reading counter data file %s: %v", cdf, err)
+ }
+ if !ok {
+ break
+ }
+ r.vis.VisitFuncCounterData(data)
+ }
+ r.vis.EndCounterDataFile(cdf, cdr, p.Origins[k])
+ }
+ r.vis.EndCounters()
+
+ // NB: packages in the meta-file will be in dependency order (basically
+ // the order in which init files execute). Do we want an additional sort
+ // pass here, say by packagepath?
+ np := uint32(mfr.NumPackages())
+ payload := []byte{}
+ for pkIdx := uint32(0); pkIdx < np; pkIdx++ {
+ var pd *decodemeta.CoverageMetaDataDecoder
+ pd, payload, err = mfr.GetPackageDecoder(pkIdx, payload)
+ if err != nil {
+ return r.fatal("reading pkg %d from meta-file %s: %s", pkIdx, p.MetaFile, err)
+ }
+ r.processPackage(p.MetaFile, pd, pkIdx)
+ }
+ r.vis.EndPod(p)
+
+ return nil
+}
+
+func (r *CovDataReader) processPackage(mfname string, pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) error {
+ if r.matchpkg != nil {
+ if !r.matchpkg(pd.PackagePath()) {
+ return nil
+ }
+ }
+ r.vis.BeginPackage(pd, pkgIdx)
+ nf := pd.NumFuncs()
+ var fd coverage.FuncDesc
+ for fidx := uint32(0); fidx < nf; fidx++ {
+ if err := pd.ReadFunc(fidx, &fd); err != nil {
+ return r.fatal("reading meta-data file %s: %v", mfname, err)
+ }
+ r.vis.VisitFunc(pkgIdx, fidx, &fd)
+ }
+ r.vis.EndPackage(pd, pkgIdx)
+ return nil
+}
# No dependencies allowed for any of these packages.
NONE
< constraints, container/list, container/ring,
- internal/cfg, internal/cpu, internal/coverage,
- internal/coverage/uleb128, internal/coverage/rtcov, internal/goarch,
+ internal/cfg, internal/coverage, internal/coverage/rtcov,
+ internal/coverage/uleb128, internal/coverage/calloc,
+ internal/cpu, internal/goarch,
internal/goexperiment, internal/goos,
internal/goversion, internal/nettrace,
unicode/utf8, unicode/utf16, unicode,
encoding/binary, internal/unsafeheader, unsafe
< internal/coverage/slicereader;
+ FMT, math, internal/coverage
+ < internal/coverage/cmerge;
+
+ FMT, math, internal/coverage, internal/coverage/cmerge, text/tabwriter
+ < internal/coverage/cformat;
+
FMT, io, internal/coverage/slicereader, internal/coverage/uleb128
< internal/coverage/stringtab;
FMT, encoding/binary, internal/coverage, io, os,
crypto/md5, internal/coverage/stringtab
< internal/coverage/decodemeta;
+
+ FMT, internal/coverage, os,
+ path/filepath, regexp, sort, strconv
+ < internal/coverage/pods;
`
// listStdPkgs returns the same list of packages as "go list std".
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package calloc
+
+// This package contains a simple "batch" allocator for allocating
+// coverage counters (slices of uint32 basically), for working with
+// coverage data files. Collections of counter arrays tend to all be
+// live/dead over the same time period, so a good fit for batch
+// allocation.
+
+type BatchCounterAlloc struct {
+ pool []uint32
+}
+
+func (ca *BatchCounterAlloc) AllocateCounters(n int) []uint32 {
+ const chunk = 8192
+ if n > cap(ca.pool) {
+ siz := chunk
+ if n > chunk {
+ siz = n
+ }
+ ca.pool = make([]uint32, siz)
+ }
+ rv := ca.pool[:n]
+ ca.pool = ca.pool[n:]
+ return rv
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cformat_test
+
+import (
+ "internal/coverage"
+ "internal/coverage/cformat"
+ "strings"
+ "testing"
+)
+
+func TestBasics(t *testing.T) {
+ fm := cformat.NewFormatter(coverage.CtrModeAtomic)
+ fm.SetPackage("my/pack")
+
+ mku := func(stl, enl, nx uint32) coverage.CoverableUnit {
+ return coverage.CoverableUnit{
+ StLine: stl,
+ EnLine: enl,
+ NxStmts: nx,
+ }
+ }
+ fn1units := []coverage.CoverableUnit{
+ mku(10, 11, 2),
+ mku(15, 11, 1),
+ }
+ fn2units := []coverage.CoverableUnit{
+ mku(20, 25, 3),
+ mku(30, 31, 2),
+ mku(33, 40, 7),
+ }
+ fn3units := []coverage.CoverableUnit{
+ mku(99, 100, 1),
+ }
+ for k, u := range fn1units {
+ fm.AddUnit("p.go", "f1", false, u, uint32(k))
+ }
+ for k, u := range fn2units {
+ fm.AddUnit("q.go", "f2", false, u, 0)
+ fm.AddUnit("q.go", "f2", false, u, uint32(k))
+ }
+ for _, u := range fn3units {
+ fm.AddUnit("lit.go", "f3", true, u, 0)
+ }
+
+ var b1, b2, b3 strings.Builder
+ if err := fm.EmitTextual(&b1); err != nil {
+ t.Fatalf("EmitTextual returned %v", err)
+ }
+ wantText := strings.TrimSpace(`
+mode: atomic
+lit.go:99.0,100.0 1 0
+p.go:10.0,11.0 2 0
+p.go:15.0,11.0 1 1
+q.go:20.0,25.0 3 0
+q.go:30.0,31.0 2 1
+q.go:33.0,40.0 7 2`)
+ gotText := strings.TrimSpace(b1.String())
+ if wantText != gotText {
+ t.Errorf("emit text: got:\n%s\nwant:\n%s\n", gotText, wantText)
+ }
+
+ if err := fm.EmitPercent(&b2, "", false); err != nil {
+ t.Fatalf("EmitPercent returned %v", err)
+ }
+ wantPercent := strings.TrimSpace(`
+my/pack coverage: 62.5% of statements
+`)
+ gotPercent := strings.TrimSpace(b2.String())
+ if wantPercent != gotPercent {
+ t.Errorf("emit percent: got:\n%s\nwant:\n%s\n", gotPercent, wantPercent)
+ }
+
+ if err := fm.EmitFuncs(&b3); err != nil {
+ t.Fatalf("EmitFuncs returned %v", err)
+ }
+ wantFuncs := strings.TrimSpace(`
+p.go:10: f1 33.3%
+q.go:20: f2 75.0%
+total (statements) 62.5%`)
+ gotFuncs := strings.TrimSpace(b3.String())
+ if wantFuncs != gotFuncs {
+ t.Errorf("emit funcs: got:\n%s\nwant:\n%s\n", gotFuncs, wantFuncs)
+ }
+ if false {
+ t.Logf("text is %s\n", b1.String())
+ t.Logf("perc is %s\n", b2.String())
+ t.Logf("funcs is %s\n", b3.String())
+ }
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cformat
+
+// This package provides apis for producing human-readable summaries
+// of coverage data (e.g. a coverage percentage for a given package or
+// set of packages) and for writing data in the legacy test format
+// emitted by "go test -coverprofile=<outfile>".
+//
+// The model for using these apis is to create a Formatter object,
+// then make a series of calls to SetPackage and AddUnit passing in
+// data read from coverage meta-data and counter-data files. E.g.
+//
+// myformatter := cformat.NewFormatter()
+// ...
+// for each package P in meta-data file: {
+// myformatter.SetPackage(P)
+// for each function F in P: {
+// for each coverable unit U in F: {
+// myformatter.AddUnit(U)
+// }
+// }
+// }
+// myformatter.EmitPercent(os.Stdout, "")
+// myformatter.EmitTextual(somefile)
+//
+// These apis are linked into tests that are built with "-cover", and
+// called at the end of test execution to produce text output or
+// emit coverage percentages.
+
+import (
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/cmerge"
+ "io"
+ "sort"
+ "text/tabwriter"
+)
+
+type Formatter struct {
+ // Maps import path to package state.
+ pm map[string]*pstate
+ // Records current package being visited.
+ pkg string
+ // Pointer to current package state.
+ p *pstate
+ // Counter mode.
+ cm coverage.CounterMode
+}
+
+// pstate records package-level coverage data state:
+// - a table of functions (file/fname/literal)
+// - a map recording the index/ID of each func encountered so far
+// - a table storing execution count for the coverable units in each func
+type pstate struct {
+ // slice of unique functions
+ funcs []fnfile
+ // maps function to index in slice above (index acts as function ID)
+ funcTable map[fnfile]uint32
+
+ // A table storing coverage counts for each coverable unit.
+ unitTable map[extcu]uint32
+}
+
+// extcu encapsulates a coverable unit within some function.
+type extcu struct {
+ fnfid uint32 // index into p.funcs slice
+ coverage.CoverableUnit
+}
+
+// fnfile is a function-name/file-name tuple.
+type fnfile struct {
+ file string
+ fname string
+ lit bool
+}
+
+func NewFormatter(cm coverage.CounterMode) *Formatter {
+ return &Formatter{
+ pm: make(map[string]*pstate),
+ cm: cm,
+ }
+}
+
+// SetPackage tells the formatter that we're about to visit the
+// coverage data for the package with the specified import path.
+// Note that it's OK to call SetPackage more than once with the
+// same import path; counter data values will be accumulated.
+func (fm *Formatter) SetPackage(importpath string) {
+ if importpath == fm.pkg {
+ return
+ }
+ fm.pkg = importpath
+ ps, ok := fm.pm[importpath]
+ if !ok {
+ ps = new(pstate)
+ fm.pm[importpath] = ps
+ ps.unitTable = make(map[extcu]uint32)
+ ps.funcTable = make(map[fnfile]uint32)
+ }
+ fm.p = ps
+}
+
+// AddUnit passes info on a single coverable unit (file, funcname,
+// literal flag, range of lines, and counter value) to the formatter.
+// Counter values will be accumulated where appropriate.
+func (fm *Formatter) AddUnit(file string, fname string, isfnlit bool, unit coverage.CoverableUnit, count uint32) {
+ if fm.p == nil {
+ panic("AddUnit invoked before SetPackage")
+ }
+ fkey := fnfile{file: file, fname: fname, lit: isfnlit}
+ idx, ok := fm.p.funcTable[fkey]
+ if !ok {
+ idx = uint32(len(fm.p.funcs))
+ fm.p.funcs = append(fm.p.funcs, fkey)
+ fm.p.funcTable[fkey] = idx
+ }
+ ukey := extcu{fnfid: idx, CoverableUnit: unit}
+ pcount := fm.p.unitTable[ukey]
+ var result uint32
+ if fm.cm == coverage.CtrModeSet {
+ if count != 0 || pcount != 0 {
+ result = 1
+ }
+ } else {
+ // Use saturating arithmetic.
+ result, _ = cmerge.SaturatingAdd(pcount, count)
+ }
+ fm.p.unitTable[ukey] = result
+}
+
+// sortUnits sorts a slice of extcu objects in a package according to
+// source position information (e.g. file and line). Note that we don't
+// include function name as part of the sorting criteria, the thinking
+// being that is better to provide things in the original source order.
+func (p *pstate) sortUnits(units []extcu) {
+ sort.Slice(units, func(i, j int) bool {
+ ui := units[i]
+ uj := units[j]
+ ifile := p.funcs[ui.fnfid].file
+ jfile := p.funcs[uj.fnfid].file
+ if ifile != jfile {
+ return ifile < jfile
+ }
+ // NB: not taking function literal flag into account here (no
+ // need, since other fields are guaranteed to be distinct).
+ if units[i].StLine != units[j].StLine {
+ return units[i].StLine < units[j].StLine
+ }
+ if units[i].EnLine != units[j].EnLine {
+ return units[i].EnLine < units[j].EnLine
+ }
+ if units[i].StCol != units[j].StCol {
+ return units[i].StCol < units[j].StCol
+ }
+ if units[i].EnCol != units[j].EnCol {
+ return units[i].EnCol < units[j].EnCol
+ }
+ return units[i].NxStmts < units[j].NxStmts
+ })
+}
+
+// EmitTextual writes the accumulated coverage data in the legacy
+// cmd/cover text format to the writer 'w'. We sort the data items by
+// importpath, source file, and line number before emitting (this sorting
+// is not explicitly mandated by the format, but seems like a good idea
+// for repeatable/deterministic dumps).
+func (fm *Formatter) EmitTextual(w io.Writer) error {
+ if fm.cm == coverage.CtrModeInvalid {
+ panic("internal error, counter mode unset")
+ }
+ if _, err := fmt.Fprintf(w, "mode: %s\n", fm.cm.String()); err != nil {
+ return err
+ }
+ pkgs := make([]string, 0, len(fm.pm))
+ for importpath := range fm.pm {
+ pkgs = append(pkgs, importpath)
+ }
+ sort.Strings(pkgs)
+ for _, importpath := range pkgs {
+ p := fm.pm[importpath]
+ units := make([]extcu, 0, len(p.unitTable))
+ for u := range p.unitTable {
+ units = append(units, u)
+ }
+ p.sortUnits(units)
+ for _, u := range units {
+ count := p.unitTable[u]
+ file := p.funcs[u.fnfid].file
+ if _, err := fmt.Fprintf(w, "%s:%d.%d,%d.%d %d %d\n",
+ file, u.StLine, u.StCol,
+ u.EnLine, u.EnCol, u.NxStmts, count); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// EmitPercent writes out a "percentage covered" string to the writer 'w'.
+func (fm *Formatter) EmitPercent(w io.Writer, covpkgs string, noteEmpty bool) error {
+ pkgs := make([]string, 0, len(fm.pm))
+ for importpath := range fm.pm {
+ pkgs = append(pkgs, importpath)
+ }
+ sort.Strings(pkgs)
+ seenPkg := false
+ for _, importpath := range pkgs {
+ seenPkg = true
+ p := fm.pm[importpath]
+ var totalStmts, coveredStmts uint64
+ for unit, count := range p.unitTable {
+ nx := uint64(unit.NxStmts)
+ totalStmts += nx
+ if count != 0 {
+ coveredStmts += nx
+ }
+ }
+ if _, err := fmt.Fprintf(w, "\t%s\t", importpath); err != nil {
+ return err
+ }
+ if totalStmts == 0 {
+ if _, err := fmt.Fprintf(w, "coverage: [no statements]\n"); err != nil {
+ return err
+ }
+ } else {
+ if _, err := fmt.Fprintf(w, "coverage: %.1f%% of statements%s\n", 100*float64(coveredStmts)/float64(totalStmts), covpkgs); err != nil {
+ return err
+ }
+ }
+ }
+ if noteEmpty && !seenPkg {
+ if _, err := fmt.Fprintf(w, "coverage: [no statements]\n"); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// EmitFuncs writes out a function-level summary to the writer 'w'. A
+// note on handling function literals: although we collect coverage
+// data for unnamed literals, it probably does not make sense to
+// include them in the function summary since there isn't any good way
+// to name them (this is also consistent with the legacy cmd/cover
+// implementation). We do want to include their counts in the overall
+// summary however.
+func (fm *Formatter) EmitFuncs(w io.Writer) error {
+ if fm.cm == coverage.CtrModeInvalid {
+ panic("internal error, counter mode unset")
+ }
+ perc := func(covered, total uint64) float64 {
+ if total == 0 {
+ total = 1
+ }
+ return 100.0 * float64(covered) / float64(total)
+ }
+ tabber := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
+ defer tabber.Flush()
+ allStmts := uint64(0)
+ covStmts := uint64(0)
+
+ pkgs := make([]string, 0, len(fm.pm))
+ for importpath := range fm.pm {
+ pkgs = append(pkgs, importpath)
+ }
+ sort.Strings(pkgs)
+
+ // Emit functions for each package, sorted by import path.
+ for _, importpath := range pkgs {
+ p := fm.pm[importpath]
+ if len(p.unitTable) == 0 {
+ continue
+ }
+ units := make([]extcu, 0, len(p.unitTable))
+ for u := range p.unitTable {
+ units = append(units, u)
+ }
+
+ // Within a package, sort the units, then walk through the
+ // sorted array. Each time we hit a new function, emit the
+ // summary entry for the previous function, then make one last
+ // emit call at the end of the loop.
+ p.sortUnits(units)
+ fname := ""
+ ffile := ""
+ flit := false
+ var fline uint32
+ var cstmts, tstmts uint64
+ captureFuncStart := func(u extcu) {
+ fname = p.funcs[u.fnfid].fname
+ ffile = p.funcs[u.fnfid].file
+ flit = p.funcs[u.fnfid].lit
+ fline = u.StLine
+ }
+ emitFunc := func(u extcu) error {
+ // Don't emit entries for function literals (see discussion
+ // in function header comment above).
+ if !flit {
+ if _, err := fmt.Fprintf(tabber, "%s:%d:\t%s\t%.1f%%\n",
+ ffile, fline, fname, perc(cstmts, tstmts)); err != nil {
+ return err
+ }
+ }
+ captureFuncStart(u)
+ allStmts += tstmts
+ covStmts += cstmts
+ tstmts = 0
+ cstmts = 0
+ return nil
+ }
+ for k, u := range units {
+ if k == 0 {
+ captureFuncStart(u)
+ } else {
+ if fname != p.funcs[u.fnfid].fname {
+ // New function; emit entry for previous one.
+ if err := emitFunc(u); err != nil {
+ return err
+ }
+ }
+ }
+ tstmts += uint64(u.NxStmts)
+ count := p.unitTable[u]
+ if count != 0 {
+ cstmts += uint64(u.NxStmts)
+ }
+ }
+ if err := emitFunc(extcu{}); err != nil {
+ return err
+ }
+ }
+ if _, err := fmt.Fprintf(tabber, "%s\t%s\t%.1f%%\n",
+ "total", "(statements)", perc(covStmts, allStmts)); err != nil {
+ return err
+ }
+ return nil
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmerge
+
+// package cmerge provides a few small utility APIs for helping
+// with merging of counter data for a given function.
+
+import (
+ "fmt"
+ "internal/coverage"
+ "math"
+)
+
+// Merger provides state and methods to help manage the process of
+// merging together coverage counter data for a given function, for
+// tools that need to implicitly merge counter as they read multiple
+// coverage counter data files.
+type Merger struct {
+ cmode coverage.CounterMode
+ cgran coverage.CounterGranularity
+ overflow bool
+}
+
+// MergeCounters takes the counter values in 'src' and merges them
+// into 'dst' according to the correct counter mode.
+func (m *Merger) MergeCounters(dst, src []uint32) (error, bool) {
+ if len(src) != len(dst) {
+ return fmt.Errorf("merging counters: len(dst)=%d len(src)=%d", len(dst), len(src)), false
+ }
+ if m.cmode == coverage.CtrModeSet {
+ for i := 0; i < len(src); i++ {
+ if src[i] != 0 {
+ dst[i] = 1
+ }
+ }
+ } else {
+ for i := 0; i < len(src); i++ {
+ dst[i] = m.SaturatingAdd(dst[i], src[i])
+ }
+ }
+ ovf := m.overflow
+ m.overflow = false
+ return nil, ovf
+}
+
+// Saturating add does a saturating addition of 'dst' and 'src',
+// returning added value or math.MaxUint32 if there is an overflow.
+// Overflows are recorded in case the client needs to track them.
+func (m *Merger) SaturatingAdd(dst, src uint32) uint32 {
+ result, overflow := SaturatingAdd(dst, src)
+ if overflow {
+ m.overflow = true
+ }
+ return result
+}
+
+// Saturating add does a saturing addition of 'dst' and 'src',
+// returning added value or math.MaxUint32 plus an overflow flag.
+func SaturatingAdd(dst, src uint32) (uint32, bool) {
+ d, s := uint64(dst), uint64(src)
+ sum := d + s
+ overflow := false
+ if uint64(uint32(sum)) != sum {
+ overflow = true
+ sum = math.MaxUint32
+ }
+ return uint32(sum), overflow
+}
+
+// SetModeAndGranularity records the counter mode and granularity for
+// the current merge. In the specific case of merging across coverage
+// data files from different binaries, where we're combining data from
+// more than one meta-data file, we need to check for mode/granularity
+// clashes.
+func (cm *Merger) SetModeAndGranularity(mdf string, cmode coverage.CounterMode, cgran coverage.CounterGranularity) error {
+ // Collect counter mode and granularity so as to detect clashes.
+ if cm.cmode != coverage.CtrModeInvalid {
+ if cm.cmode != cmode {
+ return fmt.Errorf("counter mode clash while reading meta-data file %s: previous file had %s, new file has %s", mdf, cm.cmode.String(), cmode.String())
+ }
+ if cm.cgran != cgran {
+ return fmt.Errorf("counter granularity clash while reading meta-data file %s: previous file had %s, new file has %s", mdf, cm.cgran.String(), cgran.String())
+ }
+ }
+ cm.cmode = cmode
+ cm.cgran = cgran
+ return nil
+}
+
+func (cm *Merger) ResetModeAndGranularity() {
+ cm.cmode = coverage.CtrModeInvalid
+ cm.cgran = coverage.CtrGranularityInvalid
+ cm.overflow = false
+}
+
+func (cm *Merger) Mode() coverage.CounterMode {
+ return cm.cmode
+}
+
+func (cm *Merger) Granularity() coverage.CounterGranularity {
+ return cm.cgran
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmerge_test
+
+import (
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/cmerge"
+ "testing"
+)
+
+func TestClash(t *testing.T) {
+ m := &cmerge.Merger{}
+ err := m.SetModeAndGranularity("mdf1.data", coverage.CtrModeSet, coverage.CtrGranularityPerBlock)
+ if err != nil {
+ t.Fatalf("unexpected clash")
+ }
+ err = m.SetModeAndGranularity("mdf1.data", coverage.CtrModeSet, coverage.CtrGranularityPerBlock)
+ if err != nil {
+ t.Fatalf("unexpected clash")
+ }
+ err = m.SetModeAndGranularity("mdf1.data", coverage.CtrModeCount, coverage.CtrGranularityPerBlock)
+ if err == nil {
+ t.Fatalf("expected mode clash, not found")
+ }
+ err = m.SetModeAndGranularity("mdf1.data", coverage.CtrModeSet, coverage.CtrGranularityPerFunc)
+ if err == nil {
+ t.Fatalf("expected granularity clash, not found")
+ }
+ m.ResetModeAndGranularity()
+ err = m.SetModeAndGranularity("mdf1.data", coverage.CtrModeCount, coverage.CtrGranularityPerFunc)
+ if err != nil {
+ t.Fatalf("unexpected clash after reset")
+ }
+}
+
+func TestBasic(t *testing.T) {
+ scenarios := []struct {
+ cmode coverage.CounterMode
+ cgran coverage.CounterGranularity
+ src, dst, res []uint32
+ iters int
+ merr bool
+ overflow bool
+ }{
+ {
+ cmode: coverage.CtrModeSet,
+ cgran: coverage.CtrGranularityPerBlock,
+ src: []uint32{1, 0, 1},
+ dst: []uint32{1, 1, 0},
+ res: []uint32{1, 1, 1},
+ iters: 2,
+ overflow: false,
+ },
+ {
+ cmode: coverage.CtrModeCount,
+ cgran: coverage.CtrGranularityPerBlock,
+ src: []uint32{1, 0, 3},
+ dst: []uint32{5, 7, 0},
+ res: []uint32{6, 7, 3},
+ iters: 1,
+ overflow: false,
+ },
+ {
+ cmode: coverage.CtrModeCount,
+ cgran: coverage.CtrGranularityPerBlock,
+ src: []uint32{4294967200, 0, 3},
+ dst: []uint32{4294967001, 7, 0},
+ res: []uint32{4294967295, 7, 3},
+ iters: 1,
+ overflow: true,
+ },
+ }
+
+ for k, scenario := range scenarios {
+ var err error
+ var ovf bool
+ m := &cmerge.Merger{}
+ mdf := fmt.Sprintf("file%d", k)
+ err = m.SetModeAndGranularity(mdf, scenario.cmode, scenario.cgran)
+ if err != nil {
+ t.Fatalf("case %d SetModeAndGranularity failed: %v", k, err)
+ }
+ for i := 0; i < scenario.iters; i++ {
+ err, ovf = m.MergeCounters(scenario.dst, scenario.src)
+ if ovf != scenario.overflow {
+ t.Fatalf("case %d overflow mismatch: got %v want %v", k, ovf, scenario.overflow)
+ }
+ if !scenario.merr && err != nil {
+ t.Fatalf("case %d unexpected err %v", k, err)
+ }
+ if scenario.merr && err == nil {
+ t.Fatalf("case %d expected err, not received", k)
+ }
+ for i := range scenario.dst {
+ if scenario.dst[i] != scenario.res[i] {
+ t.Fatalf("case %d: bad merge at %d got %d want %d",
+ k, i, scenario.dst[i], scenario.res[i])
+ }
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pods
+
+import (
+ "fmt"
+ "internal/coverage"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+)
+
+// Pod encapsulates a set of files emitted during the executions of a
+// coverage-instrumented binary. Each pod contains a single meta-data
+// file, and then 0 or more counter data files that refer to that
+// meta-data file. Pods are intended to simplify processing of
+// coverage output files in the case where we have several coverage
+// output directories containing output files derived from more
+// than one instrumented executable. In the case where the files that
+// make up a pod are spread out across multiple directories, each
+// element of the "Origins" field below will be populated with the
+// index of the originating directory for the corresponding counter
+// data file (within the slice of input dirs handed to CollectPods).
+// The ProcessIDs field will be populated with the process ID of each
+// data file in the CounterDataFiles slice.
+type Pod struct {
+ MetaFile string
+ CounterDataFiles []string
+ Origins []int
+ ProcessIDs []int
+}
+
+// CollectPods visits the files contained within the directories in
+// the list 'dirs', collects any coverage-related files, partitions
+// them into pods, and returns a list of the pods to the caller, along
+// with an error if something went wrong during directory/file
+// reading.
+//
+// CollectPods skips over any file that is not related to coverage
+// (e.g. avoids looking at things that are not meta-data files or
+// counter-data files). CollectPods also skips over 'orphaned' counter
+// data files (e.g. counter data files for which we can't find the
+// corresponding meta-data file). If "warn" is true, CollectPods will
+// issue warnings to stderr when it encounters non-fatal problems (for
+// orphans or a directory with no meta-data files).
+func CollectPods(dirs []string, warn bool) ([]Pod, error) {
+ files := []string{}
+ dirIndices := []int{}
+ for k, dir := range dirs {
+ dents, err := os.ReadDir(dir)
+ if err != nil {
+ return nil, err
+ }
+ for _, e := range dents {
+ if e.IsDir() {
+ continue
+ }
+ files = append(files, filepath.Join(dir, e.Name()))
+ dirIndices = append(dirIndices, k)
+ }
+ }
+ return collectPodsImpl(files, dirIndices, warn), nil
+}
+
+// CollectPodsFromFiles functions the same as "CollectPods" but
+// operates on an explicit list of files instead of a directory.
+func CollectPodsFromFiles(files []string, warn bool) []Pod {
+ return collectPodsImpl(files, nil, warn)
+}
+
+type fileWithAnnotations struct {
+ file string
+ origin int
+ pid int
+}
+
+type protoPod struct {
+ mf string
+ elements []fileWithAnnotations
+}
+
+// collectPodsImpl examines the specified list of files and picks out
+// subsets that correspond to coverage pods. The first stage in this
+// process is collecting a set { M1, M2, ... MN } where each M_k is a
+// distinct coverage meta-data file. We then create a single pod for
+// each meta-data file M_k, then find all of the counter data files
+// that refer to that meta-data file (recall that the counter data
+// file name incorporates the meta-data hash), and add the counter
+// data file to the appropriate pod.
+//
+// This process is complicated by the fact that we need to keep track
+// of directory indices for counter data files. Here is an example to
+// motivate:
+//
+// directory 1:
+//
+// M1 covmeta.9bbf1777f47b3fcacb05c38b035512d6
+// C1 covcounters.9bbf1777f47b3fcacb05c38b035512d6.1677673.1662138360208416486
+// C2 covcounters.9bbf1777f47b3fcacb05c38b035512d6.1677637.1662138359974441782
+//
+// directory 2:
+//
+// M2 covmeta.9bbf1777f47b3fcacb05c38b035512d6
+// C3 covcounters.9bbf1777f47b3fcacb05c38b035512d6.1677445.1662138360208416480
+// C4 covcounters.9bbf1777f47b3fcacb05c38b035512d6.1677677.1662138359974441781
+// M3 covmeta.a723844208cea2ae80c63482c78b2245
+// C5 covcounters.a723844208cea2ae80c63482c78b2245.3677445.1662138360208416480
+// C6 covcounters.a723844208cea2ae80c63482c78b2245.1877677.1662138359974441781
+//
+// In these two directories we have three meta-data files, but only
+// two are distinct, meaning that we'll wind up with two pods. The
+// first pod (with meta-file M1) will have four counter data files
+// (C1, C2, C3, C4) and the second pod will have two counter data files
+// (C5, C6).
+func collectPodsImpl(files []string, dirIndices []int, warn bool) []Pod {
+ metaRE := regexp.MustCompile(fmt.Sprintf(`^%s\.(\S+)$`, coverage.MetaFilePref))
+ mm := make(map[string]protoPod)
+ for _, f := range files {
+ base := filepath.Base(f)
+ if m := metaRE.FindStringSubmatch(base); m != nil {
+ tag := m[1]
+ // We need to allow for the possibility of duplicate
+ // meta-data files. If we hit this case, use the
+ // first encountered as the canonical version.
+ if _, ok := mm[tag]; !ok {
+ mm[tag] = protoPod{mf: f}
+ }
+ // FIXME: should probably check file length and hash here for
+ // the duplicate.
+ }
+ }
+ counterRE := regexp.MustCompile(fmt.Sprintf(coverage.CounterFileRegexp, coverage.CounterFilePref))
+ for k, f := range files {
+ base := filepath.Base(f)
+ if m := counterRE.FindStringSubmatch(base); m != nil {
+ tag := m[1] // meta hash
+ pid, err := strconv.Atoi(m[2])
+ if err != nil {
+ continue
+ }
+ if v, ok := mm[tag]; ok {
+ idx := -1
+ if dirIndices != nil {
+ idx = dirIndices[k]
+ }
+ fo := fileWithAnnotations{file: f, origin: idx, pid: pid}
+ v.elements = append(v.elements, fo)
+ mm[tag] = v
+ } else {
+ if warn {
+ warning("skipping orphaned counter file: %s", f)
+ }
+ }
+ }
+ }
+ if len(mm) == 0 {
+ if warn {
+ warning("no coverage data files found")
+ }
+ return nil
+ }
+ pods := make([]Pod, 0, len(mm))
+ for _, p := range mm {
+ sort.Slice(p.elements, func(i, j int) bool {
+ return p.elements[i].file < p.elements[j].file
+ })
+ pod := Pod{
+ MetaFile: p.mf,
+ CounterDataFiles: make([]string, 0, len(p.elements)),
+ Origins: make([]int, 0, len(p.elements)),
+ ProcessIDs: make([]int, 0, len(p.elements)),
+ }
+ for _, e := range p.elements {
+ pod.CounterDataFiles = append(pod.CounterDataFiles, e.file)
+ pod.Origins = append(pod.Origins, e.origin)
+ pod.ProcessIDs = append(pod.ProcessIDs, e.pid)
+ }
+ pods = append(pods, pod)
+ }
+ sort.Slice(pods, func(i, j int) bool {
+ return pods[i].MetaFile < pods[j].MetaFile
+ })
+ return pods
+}
+
+func warning(s string, a ...interface{}) {
+ fmt.Fprintf(os.Stderr, "warning: ")
+ fmt.Fprintf(os.Stderr, s, a...)
+ fmt.Fprintf(os.Stderr, "\n")
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pods_test
+
+import (
+ "crypto/md5"
+ "fmt"
+ "internal/coverage"
+ "internal/coverage/pods"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+)
+
+func TestPodCollection(t *testing.T) {
+ //testenv.MustHaveGoBuild(t)
+
+ mkdir := func(d string, perm os.FileMode) string {
+ dp := filepath.Join(t.TempDir(), d)
+ if err := os.Mkdir(dp, perm); err != nil {
+ t.Fatal(err)
+ }
+ return dp
+ }
+
+ mkfile := func(d string, fn string) string {
+ fp := filepath.Join(d, fn)
+ if err := ioutil.WriteFile(fp, []byte("foo"), 0666); err != nil {
+ t.Fatal(err)
+ }
+ return fp
+ }
+
+ mkmeta := func(dir string, tag string) string {
+ hash := md5.Sum([]byte(tag))
+ fn := fmt.Sprintf("%s.%x", coverage.MetaFilePref, hash)
+ return mkfile(dir, fn)
+ }
+
+ mkcounter := func(dir string, tag string, nt int) string {
+ hash := md5.Sum([]byte(tag))
+ dummyPid := int(42)
+ fn := fmt.Sprintf(coverage.CounterFileTempl, coverage.CounterFilePref, hash, dummyPid, nt)
+ return mkfile(dir, fn)
+ }
+
+ trim := func(path string) string {
+ b := filepath.Base(path)
+ d := filepath.Dir(path)
+ db := filepath.Base(d)
+ return db + "/" + b
+ }
+
+ podToString := func(p pods.Pod) string {
+ rv := trim(p.MetaFile) + " [\n"
+ for k, df := range p.CounterDataFiles {
+ rv += trim(df)
+ if p.Origins != nil {
+ rv += fmt.Sprintf(" o:%d", p.Origins[k])
+ }
+ rv += "\n"
+ }
+ return rv + "]"
+ }
+
+ // Create a couple of directories.
+ o1 := mkdir("o1", 0777)
+ o2 := mkdir("o2", 0777)
+
+ // Add some random files (not coverage related)
+ mkfile(o1, "blah.txt")
+ mkfile(o1, "something.exe")
+
+ // Add a meta-data file with two counter files to first dir.
+ mkmeta(o1, "m1")
+ mkcounter(o1, "m1", 1)
+ mkcounter(o1, "m1", 2)
+ mkcounter(o1, "m1", 2)
+
+ // Add a counter file with no associated meta file.
+ mkcounter(o1, "orphan", 9)
+
+ // Add a meta-data file with three counter files to second dir.
+ mkmeta(o2, "m2")
+ mkcounter(o2, "m2", 1)
+ mkcounter(o2, "m2", 2)
+ mkcounter(o2, "m2", 3)
+
+ // Add a duplicate of the first meta-file and a corresponding
+ // counter file to the second dir. This is intended to capture
+ // the scenario where we have two different runs of the same
+ // coverage-instrumented binary, but with the output files
+ // sent to separate directories.
+ mkmeta(o2, "m1")
+ mkcounter(o2, "m1", 11)
+
+ // Collect pods.
+ podlist, err := pods.CollectPods([]string{o1, o2}, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify pods
+ if len(podlist) != 2 {
+ t.Fatalf("expected 2 pods got %d pods", len(podlist))
+ }
+
+ for k, p := range podlist {
+ t.Logf("%d: mf=%s\n", k, p.MetaFile)
+ }
+
+ expected := []string{
+ `o1/covmeta.ae7be26cdaa742ca148068d5ac90eaca [
+o1/covcounters.ae7be26cdaa742ca148068d5ac90eaca.42.1 o:0
+o1/covcounters.ae7be26cdaa742ca148068d5ac90eaca.42.2 o:0
+o2/covcounters.ae7be26cdaa742ca148068d5ac90eaca.42.11 o:1
+]`,
+ `o2/covmeta.aaf2f89992379705dac844c0a2a1d45f [
+o2/covcounters.aaf2f89992379705dac844c0a2a1d45f.42.1 o:1
+o2/covcounters.aaf2f89992379705dac844c0a2a1d45f.42.2 o:1
+o2/covcounters.aaf2f89992379705dac844c0a2a1d45f.42.3 o:1
+]`,
+ }
+ for k, exp := range expected {
+ got := podToString(podlist[k])
+ if exp != got {
+ t.Errorf("pod %d: expected:\n%s\ngot:\n%s", k, exp, got)
+ }
+ }
+
+ // Check handling of bad/unreadable dir.
+ if runtime.GOOS == "linux" {
+ dbad := "/dev/null"
+ _, err = pods.CollectPods([]string{dbad}, true)
+ if err == nil {
+ t.Errorf("exected error due to unreadable dir")
+ }
+ }
+}