]> Cypherpunks repositories - gostls13.git/commitdiff
internal/coverage: add coverage meta-data decoder
authorThan McIntosh <thanm@google.com>
Wed, 29 Sep 2021 20:42:55 +0000 (16:42 -0400)
committerThan McIntosh <thanm@google.com>
Mon, 26 Sep 2022 20:52:13 +0000 (20:52 +0000)
Add a coverage meta-data decoder, which provides APIs for reading
encoded coverage meta-data and expanding it usable form. This package
is intended to be used in the coverage tooling that reads data files
emitted from coverage runs. Along with the new decoding package is a
unit test that runs the encode/decode paths together to check to make
sure that "decode(encode(X)) == X".

Updates #51430.

Change-Id: I81d27d8da0b2fcfa5039114a6e35a4b463d19b3c
Reviewed-on: https://go-review.googlesource.com/c/go/+/353454
Reviewed-by: David Chase <drchase@google.com>
Run-TryBot: Than McIntosh <thanm@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>

src/go/build/deps_test.go
src/internal/coverage/decodemeta/decode.go [new file with mode: 0644]
src/internal/coverage/decodemeta/decodefile.go [new file with mode: 0644]
src/internal/coverage/slicereader/slicereader.go [new file with mode: 0644]
src/internal/coverage/slicereader/slr_test.go [new file with mode: 0644]
src/internal/coverage/slicewriter/slicewriter.go [new file with mode: 0644]
src/internal/coverage/slicewriter/slw_test.go [new file with mode: 0644]
src/internal/coverage/stringtab/stringtab.go
src/internal/coverage/test/roundtrip_test.go [new file with mode: 0644]

index 2d88053382e52ca625bea240229eba1f577d61e6..120d60ee24c6bd03465740a53d550b914075966b 100644 (file)
@@ -549,12 +549,22 @@ var depsRules = `
        FMT
        < internal/diff, internal/txtar;
 
-    FMT, io, internal/coverage/uleb128
+    FMT, os
+    < internal/coverage/slicewriter;
+
+    encoding/binary, internal/unsafeheader, unsafe
+    < internal/coverage/slicereader;
+
+    FMT, io, internal/coverage/slicereader, internal/coverage/uleb128
     < internal/coverage/stringtab;
 
     FMT, encoding/binary, internal/coverage, internal/coverage/stringtab,
     io, os, bufio, crypto/md5
     < internal/coverage/encodemeta;
+
+    FMT, encoding/binary, internal/coverage, io, os,
+    crypto/md5, internal/coverage/stringtab
+    < internal/coverage/decodemeta;
 `
 
 // listStdPkgs returns the same list of packages as "go list std".
diff --git a/src/internal/coverage/decodemeta/decode.go b/src/internal/coverage/decodemeta/decode.go
new file mode 100644 (file)
index 0000000..4e80c07
--- /dev/null
@@ -0,0 +1,130 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package decodemeta
+
+// This package contains APIs and helpers for decoding a single package's
+// meta data "blob" emitted by the compiler when coverage instrumentation
+// is turned on.
+
+import (
+       "encoding/binary"
+       "fmt"
+       "internal/coverage"
+       "internal/coverage/slicereader"
+       "internal/coverage/stringtab"
+       "os"
+)
+
+// See comments in the encodecovmeta package for details on the format.
+
+type CoverageMetaDataDecoder struct {
+       r      *slicereader.Reader
+       hdr    coverage.MetaSymbolHeader
+       strtab *stringtab.Reader
+       tmp    []byte
+       debug  bool
+}
+
+func NewCoverageMetaDataDecoder(b []byte, readonly bool) (*CoverageMetaDataDecoder, error) {
+       slr := slicereader.NewReader(b, readonly)
+       x := &CoverageMetaDataDecoder{
+               r:   slr,
+               tmp: make([]byte, 0, 256),
+       }
+       if err := x.readHeader(); err != nil {
+               return nil, err
+       }
+       if err := x.readStringTable(); err != nil {
+               return nil, err
+       }
+       return x, nil
+}
+
+func (d *CoverageMetaDataDecoder) readHeader() error {
+       if err := binary.Read(d.r, binary.LittleEndian, &d.hdr); err != nil {
+               return err
+       }
+       if d.debug {
+               fmt.Fprintf(os.Stderr, "=-= after readHeader: %+v\n", d.hdr)
+       }
+       return nil
+}
+
+func (d *CoverageMetaDataDecoder) readStringTable() error {
+       // Seek to the correct location to read the string table.
+       stringTableLocation := int64(coverage.CovMetaHeaderSize + 4*d.hdr.NumFuncs)
+       d.r.SeekTo(stringTableLocation)
+
+       // Read the table itself.
+       d.strtab = stringtab.NewReader(d.r)
+       d.strtab.Read()
+       return nil
+}
+
+func (d *CoverageMetaDataDecoder) PackagePath() string {
+       return d.strtab.Get(d.hdr.PkgPath)
+}
+
+func (d *CoverageMetaDataDecoder) PackageName() string {
+       return d.strtab.Get(d.hdr.PkgName)
+}
+
+func (d *CoverageMetaDataDecoder) ModulePath() string {
+       return d.strtab.Get(d.hdr.ModulePath)
+}
+
+func (d *CoverageMetaDataDecoder) NumFuncs() uint32 {
+       return d.hdr.NumFuncs
+}
+
+// ReadFunc reads the coverage meta-data for the function with index
+// 'findex', filling it into the FuncDesc pointed to by 'f'.
+func (d *CoverageMetaDataDecoder) ReadFunc(fidx uint32, f *coverage.FuncDesc) error {
+       if fidx >= d.hdr.NumFuncs {
+               return fmt.Errorf("illegal function index")
+       }
+
+       // Seek to the correct location to read the function offset and read it.
+       funcOffsetLocation := int64(coverage.CovMetaHeaderSize + 4*fidx)
+       d.r.SeekTo(funcOffsetLocation)
+       foff := d.r.ReadUint32()
+
+       // Check assumptions
+       if foff < uint32(funcOffsetLocation) || foff > d.hdr.Length {
+               return fmt.Errorf("malformed func offset %d", foff)
+       }
+
+       // Seek to the correct location to read the function.
+       d.r.SeekTo(int64(foff))
+
+       // Preamble containing number of units, file, and function.
+       numUnits := uint32(d.r.ReadULEB128())
+       fnameidx := uint32(d.r.ReadULEB128())
+       fileidx := uint32(d.r.ReadULEB128())
+
+       f.Srcfile = d.strtab.Get(fileidx)
+       f.Funcname = d.strtab.Get(fnameidx)
+
+       // Now the units
+       f.Units = f.Units[:0]
+       if cap(f.Units) < int(numUnits) {
+               f.Units = make([]coverage.CoverableUnit, 0, numUnits)
+       }
+       for k := uint32(0); k < numUnits; k++ {
+               f.Units = append(f.Units,
+                       coverage.CoverableUnit{
+                               StLine:  uint32(d.r.ReadULEB128()),
+                               StCol:   uint32(d.r.ReadULEB128()),
+                               EnLine:  uint32(d.r.ReadULEB128()),
+                               EnCol:   uint32(d.r.ReadULEB128()),
+                               NxStmts: uint32(d.r.ReadULEB128()),
+                       })
+       }
+       lit := d.r.ReadULEB128()
+       if lit != 0 {
+               f.Lit = true
+       }
+       return nil
+}
diff --git a/src/internal/coverage/decodemeta/decodefile.go b/src/internal/coverage/decodemeta/decodefile.go
new file mode 100644 (file)
index 0000000..dd602c5
--- /dev/null
@@ -0,0 +1,223 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package decodemeta
+
+// This package contains APIs and helpers for reading and decoding
+// meta-data output files emitted by the runtime when a
+// coverage-instrumented binary executes. A meta-data file contains
+// top-level info (counter mode, number of packages) and then a
+// separate self-contained meta-data section for each Go package.
+
+import (
+       "bufio"
+       "crypto/md5"
+       "encoding/binary"
+       "fmt"
+       "internal/coverage"
+       "internal/coverage/slicereader"
+       "internal/coverage/stringtab"
+       "io"
+       "os"
+)
+
+// CoverageMetaFileReader provides state and methods for reading
+// a meta-data file from a code coverage run.
+type CoverageMetaFileReader struct {
+       f          *os.File
+       hdr        coverage.MetaFileHeader
+       tmp        []byte
+       pkgOffsets []uint64
+       pkgLengths []uint64
+       strtab     *stringtab.Reader
+       fileRdr    *bufio.Reader
+       fileView   []byte
+       debug      bool
+}
+
+// NewCoverageMetaFileReader returns a new helper object for reading
+// the coverage meta-data output file 'f'. The param 'fileView' is a
+// read-only slice containing the contents of 'f' obtained by mmap'ing
+// the file read-only; 'fileView' may be nil, in which case the helper
+// will read the contents of the file using regular file Read
+// operations.
+func NewCoverageMetaFileReader(f *os.File, fileView []byte) (*CoverageMetaFileReader, error) {
+       r := &CoverageMetaFileReader{
+               f:        f,
+               fileView: fileView,
+               tmp:      make([]byte, 256),
+       }
+
+       if err := r.readFileHeader(); err != nil {
+               return nil, err
+       }
+       return r, nil
+}
+
+func (r *CoverageMetaFileReader) readFileHeader() error {
+       var err error
+
+       r.fileRdr = bufio.NewReader(r.f)
+
+       // Read file header.
+       if err := binary.Read(r.fileRdr, binary.LittleEndian, &r.hdr); err != nil {
+               return err
+       }
+
+       // Verify magic string
+       m := r.hdr.Magic
+       g := coverage.CovMetaMagic
+       if m[0] != g[0] || m[1] != g[1] || m[2] != g[2] || m[3] != g[3] {
+               return fmt.Errorf("invalid meta-data file magic string")
+       }
+
+       // Vet the version. If this is a meta-data file from the future,
+       // we won't be able to read it.
+       if r.hdr.Version > coverage.MetaFileVersion {
+               return fmt.Errorf("meta-data file withn unknown version %d (expected %d)", r.hdr.Version, coverage.MetaFileVersion)
+       }
+
+       // Read package offsets for good measure
+       r.pkgOffsets = make([]uint64, r.hdr.Entries)
+       for i := uint64(0); i < r.hdr.Entries; i++ {
+               if r.pkgOffsets[i], err = r.rdUint64(); err != nil {
+                       return err
+               }
+               if r.pkgOffsets[i] > r.hdr.TotalLength {
+                       return fmt.Errorf("insane pkg offset %d: %d > totlen %d",
+                               i, r.pkgOffsets[i], r.hdr.TotalLength)
+               }
+       }
+       r.pkgLengths = make([]uint64, r.hdr.Entries)
+       for i := uint64(0); i < r.hdr.Entries; i++ {
+               if r.pkgLengths[i], err = r.rdUint64(); err != nil {
+                       return err
+               }
+               if r.pkgLengths[i] > r.hdr.TotalLength {
+                       return fmt.Errorf("insane pkg length %d: %d > totlen %d",
+                               i, r.pkgLengths[i], r.hdr.TotalLength)
+               }
+       }
+
+       // Read string table.
+       b := make([]byte, r.hdr.StrTabLength)
+       nr, err := r.fileRdr.Read(b)
+       if err != nil {
+               return err
+       }
+       if nr != int(r.hdr.StrTabLength) {
+               return fmt.Errorf("error: short read on string table")
+       }
+       slr := slicereader.NewReader(b, false /* not readonly */)
+       r.strtab = stringtab.NewReader(slr)
+       r.strtab.Read()
+
+       if r.debug {
+               fmt.Fprintf(os.Stderr, "=-= read-in header is: %+v\n", *r)
+       }
+
+       return nil
+}
+
+func (r *CoverageMetaFileReader) rdUint64() (uint64, error) {
+       r.tmp = r.tmp[:0]
+       r.tmp = append(r.tmp, make([]byte, 8)...)
+       n, err := r.fileRdr.Read(r.tmp)
+       if err != nil {
+               return 0, err
+       }
+       if n != 8 {
+               return 0, fmt.Errorf("premature end of file on read")
+       }
+       v := binary.LittleEndian.Uint64(r.tmp)
+       return v, nil
+}
+
+// NumPackages returns the number of packages for which this file
+// contains meta-data.
+func (r *CoverageMetaFileReader) NumPackages() uint64 {
+       return r.hdr.Entries
+}
+
+// CounterMode returns the counter mode (set, count, atomic) used
+// when building for coverage for the program that produce this
+// meta-data file.
+func (r *CoverageMetaFileReader) CounterMode() coverage.CounterMode {
+       return r.hdr.CMode
+}
+
+// CounterMode returns the counter granularity (single counter per
+// function, or counter per block) selected when building for coverage
+// for the program that produce this meta-data file.
+func (r *CoverageMetaFileReader) CounterGranularity() coverage.CounterGranularity {
+       return r.hdr.CGranularity
+}
+
+// FileHash returns the hash computed for all of the package meta-data
+// blobs. Coverage counter data files refer to this hash, and the
+// hash will be encoded into the meta-data file name.
+func (r *CoverageMetaFileReader) FileHash() [16]byte {
+       return r.hdr.MetaFileHash
+}
+
+// GetPackageDecoder requests a decoder object for the package within
+// the meta-data file whose index is 'pkIdx'. If the
+// CoverageMetaFileReader was set up with a read-only file view, a
+// pointer into that file view will be returned, otherwise the buffer
+// 'payloadbuf' will be written to (or if it is not of sufficient
+// size, a new buffer will be allocated). Return value is the decoder,
+// a byte slice with the encoded meta-data, and an error.
+func (r *CoverageMetaFileReader) GetPackageDecoder(pkIdx uint32, payloadbuf []byte) (*CoverageMetaDataDecoder, []byte, error) {
+       pp, err := r.GetPackagePayload(pkIdx, payloadbuf)
+       if r.debug {
+               fmt.Fprintf(os.Stderr, "=-= pkidx=%d payload length is %d hash=%s\n",
+                       pkIdx, len(pp), fmt.Sprintf("%x", md5.Sum(pp)))
+       }
+       if err != nil {
+               return nil, nil, err
+       }
+       mdd, err := NewCoverageMetaDataDecoder(pp, r.fileView != nil)
+       if err != nil {
+               return nil, nil, err
+       }
+       return mdd, pp, nil
+}
+
+// GetPackagePayload returns the raw (encoded) meta-data payload for the
+// package with index 'pkIdx'. As with GetPackageDecoder, if the
+// CoverageMetaFileReader was set up with a read-only file view, a
+// pointer into that file view will be returned, otherwise the buffer
+// 'payloadbuf' will be written to (or if it is not of sufficient
+// size, a new buffer will be allocated). Return value is the decoder,
+// a byte slice with the encoded meta-data, and an error.
+func (r *CoverageMetaFileReader) GetPackagePayload(pkIdx uint32, payloadbuf []byte) ([]byte, error) {
+
+       // Determine correct offset/length.
+       if uint64(pkIdx) >= r.hdr.Entries {
+               return nil, fmt.Errorf("GetPackagePayload: illegal pkg index %d", pkIdx)
+       }
+       off := r.pkgOffsets[pkIdx]
+       len := r.pkgLengths[pkIdx]
+
+       if r.debug {
+               fmt.Fprintf(os.Stderr, "=-= for pk %d, off=%d len=%d\n", pkIdx, off, len)
+       }
+
+       if r.fileView != nil {
+               return r.fileView[off : off+len], nil
+       }
+
+       payload := payloadbuf[:0]
+       if cap(payload) < int(len) {
+               payload = make([]byte, 0, len)
+       }
+       payload = append(payload, make([]byte, len)...)
+       if _, err := r.f.Seek(int64(off), os.SEEK_SET); err != nil {
+               return nil, err
+       }
+       if _, err := io.ReadFull(r.f, payload); err != nil {
+               return nil, err
+       }
+       return payload, nil
+}
diff --git a/src/internal/coverage/slicereader/slicereader.go b/src/internal/coverage/slicereader/slicereader.go
new file mode 100644 (file)
index 0000000..c949e17
--- /dev/null
@@ -0,0 +1,105 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slicereader
+
+import (
+       "encoding/binary"
+       "internal/unsafeheader"
+       "unsafe"
+)
+
+// This file contains the helper "SliceReader", a utility for
+// reading values from a byte slice that may or may not be backed
+// by a read-only mmap'd region.
+
+type Reader struct {
+       b        []byte
+       readonly bool
+       off      int64
+}
+
+func NewReader(b []byte, readonly bool) *Reader {
+       r := Reader{
+               b:        b,
+               readonly: readonly,
+       }
+       return &r
+}
+
+func (r *Reader) Read(b []byte) (int, error) {
+       amt := len(b)
+       toread := r.b[r.off:]
+       if len(toread) < amt {
+               amt = len(toread)
+       }
+       copy(b, toread)
+       r.off += int64(amt)
+       return amt, nil
+}
+
+func (r *Reader) SeekTo(off int64) {
+       r.off = off
+}
+
+func (r *Reader) Offset() int64 {
+       return r.off
+}
+
+func (r *Reader) ReadUint8() uint8 {
+       rv := uint8(r.b[int(r.off)])
+       r.off += 1
+       return rv
+}
+
+func (r *Reader) ReadUint32() uint32 {
+       end := int(r.off) + 4
+       rv := binary.LittleEndian.Uint32(r.b[int(r.off):end:end])
+       r.off += 4
+       return rv
+}
+
+func (r *Reader) ReadUint64() uint64 {
+       end := int(r.off) + 8
+       rv := binary.LittleEndian.Uint64(r.b[int(r.off):end:end])
+       r.off += 8
+       return rv
+}
+
+func (r *Reader) ReadULEB128() (value uint64) {
+       var shift uint
+
+       for {
+               b := r.b[r.off]
+               r.off++
+               value |= (uint64(b&0x7F) << shift)
+               if b&0x80 == 0 {
+                       break
+               }
+               shift += 7
+       }
+       return
+}
+
+func (r *Reader) ReadString(len int64) string {
+       b := r.b[r.off : r.off+len]
+       r.off += len
+       if r.readonly {
+               return toString(b) // backed by RO memory, ok to make unsafe string
+       }
+       return string(b)
+}
+
+func toString(b []byte) string {
+       if len(b) == 0 {
+               return ""
+       }
+
+       var s string
+       hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
+       hdr.Data = unsafe.Pointer(&b[0])
+       hdr.Len = len(b)
+
+       return s
+}
diff --git a/src/internal/coverage/slicereader/slr_test.go b/src/internal/coverage/slicereader/slr_test.go
new file mode 100644 (file)
index 0000000..2f7cef0
--- /dev/null
@@ -0,0 +1,92 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slicereader
+
+import (
+       "encoding/binary"
+       "testing"
+)
+
+func TestSliceReader(t *testing.T) {
+       b := []byte{}
+
+       bt := make([]byte, 4)
+       e32 := uint32(1030507)
+       binary.LittleEndian.PutUint32(bt, e32)
+       b = append(b, bt...)
+
+       bt = make([]byte, 8)
+       e64 := uint64(907050301)
+       binary.LittleEndian.PutUint64(bt, e64)
+       b = append(b, bt...)
+
+       b = appendUleb128(b, uint(e32))
+       b = appendUleb128(b, uint(e64))
+       b = appendUleb128(b, 6)
+       s1 := "foobar"
+       s1b := []byte(s1)
+       b = append(b, s1b...)
+       b = appendUleb128(b, 9)
+       s2 := "bazbasher"
+       s2b := []byte(s2)
+       b = append(b, s2b...)
+
+       readStr := func(slr *Reader) string {
+               len := slr.ReadULEB128()
+               return slr.ReadString(int64(len))
+       }
+
+       for i := 0; i < 2; i++ {
+               slr := NewReader(b, i == 0)
+               g32 := slr.ReadUint32()
+               if g32 != e32 {
+                       t.Fatalf("slr.ReadUint32() got %d want %d", g32, e32)
+               }
+               g64 := slr.ReadUint64()
+               if g64 != e64 {
+                       t.Fatalf("slr.ReadUint64() got %d want %d", g64, e64)
+               }
+               g32 = uint32(slr.ReadULEB128())
+               if g32 != e32 {
+                       t.Fatalf("slr.ReadULEB128() got %d want %d", g32, e32)
+               }
+               g64 = slr.ReadULEB128()
+               if g64 != e64 {
+                       t.Fatalf("slr.ReadULEB128() got %d want %d", g64, e64)
+               }
+               gs1 := readStr(slr)
+               if gs1 != s1 {
+                       t.Fatalf("readStr got %s want %s", gs1, s1)
+               }
+               gs2 := readStr(slr)
+               if gs2 != s2 {
+                       t.Fatalf("readStr got %s want %s", gs2, s2)
+               }
+               slr.SeekTo(4)
+               off := slr.Offset()
+               if off != 4 {
+                       t.Fatalf("Offset(0 returned %d wanted 4", off)
+               }
+               g64 = slr.ReadUint64()
+               if g64 != e64 {
+                       t.Fatalf("post-seek slr.ReadUint64() got %d want %d", g64, e64)
+               }
+       }
+}
+
+func appendUleb128(b []byte, v uint) []byte {
+       for {
+               c := uint8(v & 0x7f)
+               v >>= 7
+               if v != 0 {
+                       c |= 0x80
+               }
+               b = append(b, c)
+               if c&0x80 == 0 {
+                       break
+               }
+       }
+       return b
+}
diff --git a/src/internal/coverage/slicewriter/slicewriter.go b/src/internal/coverage/slicewriter/slicewriter.go
new file mode 100644 (file)
index 0000000..e606db9
--- /dev/null
@@ -0,0 +1,80 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slicewriter
+
+import (
+       "fmt"
+       "os"
+)
+
+// WriteSeeker is a helper object that implements the io.WriteSeeker
+// interface. Clients can create a WriteSeeker, make a series of Write
+// calls to add data to it (and possibly Seek calls to update
+// previously written portions), then finally invoke BytesWritten() to
+// get a pointer to the constructed byte slice.
+type WriteSeeker struct {
+       payload []byte
+       off     int64
+}
+
+func (sws *WriteSeeker) Write(p []byte) (n int, err error) {
+       amt := len(p)
+       towrite := sws.payload[sws.off:]
+       if len(towrite) < amt {
+               sws.payload = append(sws.payload, make([]byte, amt-len(towrite))...)
+               towrite = sws.payload[sws.off:]
+       }
+       copy(towrite, p)
+       sws.off += int64(amt)
+       return amt, nil
+}
+
+// Seek repositions the read/write position of the WriteSeeker within
+// its internally maintained slice. Note that it is not possible to
+// expand the size of the slice using SEEK_SET; trying to seek outside
+// the slice will result in an error.
+func (sws *WriteSeeker) Seek(offset int64, whence int) (int64, error) {
+       switch whence {
+       case os.SEEK_SET:
+               if sws.off != offset && (offset < 0 || offset >= int64(len(sws.payload))) {
+                       return 0, fmt.Errorf("invalid seek: new offset %d (out of range [0 %d]", offset, len(sws.payload))
+               }
+               sws.off = offset
+               return offset, nil
+       case os.SEEK_CUR:
+               newoff := sws.off + offset
+               if newoff != sws.off && (newoff < 0 || newoff >= int64(len(sws.payload))) {
+                       return 0, fmt.Errorf("invalid seek: new offset %d (out of range [0 %d]", newoff, len(sws.payload))
+               }
+               sws.off += offset
+               return sws.off, nil
+       case os.SEEK_END:
+               newoff := int64(len(sws.payload)) + offset
+               if newoff != sws.off && (newoff < 0 || newoff >= int64(len(sws.payload))) {
+                       return 0, fmt.Errorf("invalid seek: new offset %d (out of range [0 %d]", newoff, len(sws.payload))
+               }
+               sws.off = newoff
+               return sws.off, nil
+       }
+       // other modes not supported
+       return 0, fmt.Errorf("unsupported seek mode %d", whence)
+}
+
+// BytesWritten returns the underlying byte slice for the WriteSeeker,
+// containing the data written to it via Write/Seek calls.
+func (sws *WriteSeeker) BytesWritten() []byte {
+       return sws.payload
+}
+
+func (sws *WriteSeeker) Read(p []byte) (n int, err error) {
+       amt := len(p)
+       toread := sws.payload[sws.off:]
+       if len(toread) < amt {
+               amt = len(toread)
+       }
+       copy(p, toread)
+       sws.off += int64(amt)
+       return amt, nil
+}
diff --git a/src/internal/coverage/slicewriter/slw_test.go b/src/internal/coverage/slicewriter/slw_test.go
new file mode 100644 (file)
index 0000000..f4553af
--- /dev/null
@@ -0,0 +1,131 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slicewriter
+
+import (
+       "os"
+       "testing"
+)
+
+func TestSliceWriter(t *testing.T) {
+
+       sleq := func(t *testing.T, got []byte, want []byte) {
+               t.Helper()
+               if len(got) != len(want) {
+                       t.Fatalf("bad length got %d want %d", len(got), len(want))
+               }
+               for i := range got {
+                       if got[i] != want[i] {
+                               t.Fatalf("bad read at %d got %d want %d", i, got[i], want[i])
+                       }
+               }
+       }
+
+       wf := func(t *testing.T, ws *WriteSeeker, p []byte) {
+               t.Helper()
+               nw, werr := ws.Write(p)
+               if werr != nil {
+                       t.Fatalf("unexpected write error: %v", werr)
+               }
+               if nw != len(p) {
+                       t.Fatalf("wrong amount written want %d got %d", len(p), nw)
+               }
+       }
+
+       rf := func(t *testing.T, ws *WriteSeeker, p []byte) {
+               t.Helper()
+               b := make([]byte, len(p))
+               nr, rerr := ws.Read(b)
+               if rerr != nil {
+                       t.Fatalf("unexpected read error: %v", rerr)
+               }
+               if nr != len(p) {
+                       t.Fatalf("wrong amount read want %d got %d", len(p), nr)
+               }
+               sleq(t, b, p)
+       }
+
+       sk := func(t *testing.T, ws *WriteSeeker, offset int64, whence int) {
+               t.Helper()
+               _, err := ws.Seek(offset, whence)
+               if err != nil {
+                       t.Fatalf("unexpected seek error: %v", err)
+               }
+       }
+
+       wp1 := []byte{1, 2}
+       ws := &WriteSeeker{}
+
+       // write some stuff
+       wf(t, ws, wp1)
+       // check that BytesWritten returns what we wrote.
+       sleq(t, ws.BytesWritten(), wp1)
+       // offset is at end of slice, so reading should return zero bytes.
+       rf(t, ws, []byte{})
+
+       // write some more stuff
+       wp2 := []byte{7, 8, 9}
+       wf(t, ws, wp2)
+       // check that BytesWritten returns what we expect.
+       wpex := []byte{1, 2, 7, 8, 9}
+       sleq(t, ws.BytesWritten(), wpex)
+       rf(t, ws, []byte{})
+
+       // seeks and reads.
+       sk(t, ws, 1, os.SEEK_SET)
+       rf(t, ws, []byte{2, 7})
+       sk(t, ws, -2, os.SEEK_CUR)
+       rf(t, ws, []byte{2, 7})
+       sk(t, ws, -4, os.SEEK_END)
+       rf(t, ws, []byte{2, 7})
+
+       // seek back and overwrite
+       sk(t, ws, 1, os.SEEK_SET)
+       wf(t, ws, []byte{9, 11})
+       wpex = []byte{1, 9, 11, 8, 9}
+       sleq(t, ws.BytesWritten(), wpex)
+
+       // seeks on empty writer.
+       ws2 := &WriteSeeker{}
+       sk(t, ws2, 0, os.SEEK_SET)
+       sk(t, ws2, 0, os.SEEK_CUR)
+       sk(t, ws2, 0, os.SEEK_END)
+
+       // check for seek errors.
+       _, err := ws.Seek(-1, os.SEEK_SET)
+       if err == nil {
+               t.Fatalf("expected error on invalid -1 seek")
+       }
+       _, err = ws.Seek(int64(len(ws.BytesWritten())), os.SEEK_SET)
+       if err == nil {
+               t.Fatalf("expected error on invalid %d seek", len(ws.BytesWritten()))
+       }
+
+       ws.Seek(0, os.SEEK_SET)
+       _, err = ws.Seek(-1, os.SEEK_CUR)
+       if err == nil {
+               t.Fatalf("expected error on invalid -1 seek")
+       }
+       _, err = ws.Seek(int64(len(ws.BytesWritten())), os.SEEK_CUR)
+       if err == nil {
+               t.Fatalf("expected error on invalid %d seek", len(ws.BytesWritten()))
+       }
+
+       _, err = ws.Seek(1, os.SEEK_END)
+       if err == nil {
+               t.Fatalf("expected error on invalid 1 seek")
+       }
+       bsamt := int64(-1*len(ws.BytesWritten()) - 1)
+       _, err = ws.Seek(bsamt, os.SEEK_END)
+       if err == nil {
+               t.Fatalf("expected error on invalid %d seek", bsamt)
+       }
+
+       // bad seek mode
+       _, err = ws.Seek(-1, os.SEEK_SET+9)
+       if err == nil {
+               t.Fatalf("expected error on invalid seek mode")
+       }
+}
index 2aba3321be771ba564f5eac62021261a9c29e281..f093e2cd15be13f5b0776ca1e989302c0efb96f2 100644 (file)
@@ -6,13 +6,16 @@ package stringtab
 
 import (
        "fmt"
+       "internal/coverage/slicereader"
        "internal/coverage/uleb128"
        "io"
 )
 
-// This package implements a string table writer utility for use in
-// emitting coverage meta-data and counter-data files.
+// This package implements string table writer and reader utilities,
+// for use in emitting and reading/decoding coverage meta-data and
+// counter-data files.
 
+// Writer implements a string table writing utility.
 type Writer struct {
        stab   map[string]uint32
        strs   []string
@@ -87,3 +90,33 @@ func (stw *Writer) Write(w io.Writer) error {
        }
        return nil
 }
+
+type Reader struct {
+       r    *slicereader.Reader
+       strs []string
+}
+
+func NewReader(r *slicereader.Reader) *Reader {
+       str := &Reader{
+               r: r,
+       }
+       return str
+}
+
+func (str *Reader) Entries() int {
+       return len(str.strs)
+}
+
+func (str *Reader) Get(idx uint32) string {
+       return str.strs[idx]
+}
+
+func (str *Reader) Read() {
+       // Read the table itself.
+       numEntries := int(str.r.ReadULEB128())
+       str.strs = make([]string, 0, numEntries)
+       for idx := 0; idx < numEntries; idx++ {
+               slen := str.r.ReadULEB128()
+               str.strs = append(str.strs, str.r.ReadString(int64(slen)))
+       }
+}
diff --git a/src/internal/coverage/test/roundtrip_test.go b/src/internal/coverage/test/roundtrip_test.go
new file mode 100644 (file)
index 0000000..ebd2f6c
--- /dev/null
@@ -0,0 +1,275 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+       "fmt"
+       "internal/coverage"
+       "internal/coverage/decodemeta"
+       "internal/coverage/encodemeta"
+       "internal/coverage/slicewriter"
+       "os"
+       "path/filepath"
+       "testing"
+)
+
+func cmpFuncDesc(want, got coverage.FuncDesc) string {
+       swant := fmt.Sprintf("%+v", want)
+       sgot := fmt.Sprintf("%+v", got)
+       if swant == sgot {
+               return ""
+       }
+       return fmt.Sprintf("wanted %q got %q", swant, sgot)
+}
+
+func TestMetaDataEmptyPackage(t *testing.T) {
+       // Make sure that encoding/decoding works properly with packages
+       // that don't actually have any functions.
+       p := "empty/package"
+       pn := "package"
+       mp := "m"
+       b, err := encodemeta.NewCoverageMetaDataBuilder(p, pn, mp)
+       if err != nil {
+               t.Fatalf("making builder: %v", err)
+       }
+       drws := &slicewriter.WriteSeeker{}
+       b.Emit(drws)
+       drws.Seek(0, os.SEEK_SET)
+       dec, err := decodemeta.NewCoverageMetaDataDecoder(drws.BytesWritten(), false)
+       if err != nil {
+               t.Fatalf("making decoder: %v", err)
+       }
+       nf := dec.NumFuncs()
+       if nf != 0 {
+               t.Errorf("dec.NumFuncs(): got %d want %d", nf, 0)
+       }
+       pp := dec.PackagePath()
+       if pp != p {
+               t.Errorf("dec.PackagePath(): got %s want %s", pp, p)
+       }
+       ppn := dec.PackageName()
+       if ppn != pn {
+               t.Errorf("dec.PackageName(): got %s want %s", ppn, pn)
+       }
+       pmp := dec.ModulePath()
+       if pmp != mp {
+               t.Errorf("dec.ModulePath(): got %s want %s", pmp, mp)
+       }
+}
+
+func TestMetaDataEncoderDecoder(t *testing.T) {
+       // Test encode path.
+       pp := "foo/bar/pkg"
+       pn := "pkg"
+       mp := "barmod"
+       b, err := encodemeta.NewCoverageMetaDataBuilder(pp, pn, mp)
+       if err != nil {
+               t.Fatalf("making builder: %v", err)
+       }
+       f1 := coverage.FuncDesc{
+               Funcname: "func",
+               Srcfile:  "foo.go",
+               Units: []coverage.CoverableUnit{
+                       coverage.CoverableUnit{StLine: 1, StCol: 2, EnLine: 3, EnCol: 4, NxStmts: 5},
+                       coverage.CoverableUnit{StLine: 6, StCol: 7, EnLine: 8, EnCol: 9, NxStmts: 10},
+               },
+       }
+       idx := b.AddFunc(f1)
+       if idx != 0 {
+               t.Errorf("b.AddFunc(f1) got %d want %d", idx, 0)
+       }
+
+       f2 := coverage.FuncDesc{
+               Funcname: "xfunc",
+               Srcfile:  "bar.go",
+               Units: []coverage.CoverableUnit{
+                       coverage.CoverableUnit{StLine: 1, StCol: 2, EnLine: 3, EnCol: 4, NxStmts: 5},
+                       coverage.CoverableUnit{StLine: 6, StCol: 7, EnLine: 8, EnCol: 9, NxStmts: 10},
+                       coverage.CoverableUnit{StLine: 11, StCol: 12, EnLine: 13, EnCol: 14, NxStmts: 15},
+               },
+       }
+       idx = b.AddFunc(f2)
+       if idx != 1 {
+               t.Errorf("b.AddFunc(f2) got %d want %d", idx, 0)
+       }
+
+       // Emit into a writer.
+       drws := &slicewriter.WriteSeeker{}
+       b.Emit(drws)
+
+       // Test decode path.
+       drws.Seek(0, os.SEEK_SET)
+       dec, err := decodemeta.NewCoverageMetaDataDecoder(drws.BytesWritten(), false)
+       if err != nil {
+               t.Fatalf("NewCoverageMetaDataDecoder error: %v", err)
+       }
+       nf := dec.NumFuncs()
+       if nf != 2 {
+               t.Errorf("dec.NumFuncs(): got %d want %d", nf, 2)
+       }
+
+       gotpp := dec.PackagePath()
+       if gotpp != pp {
+               t.Errorf("packagepath: got %s want %s", gotpp, pp)
+       }
+       gotpn := dec.PackageName()
+       if gotpn != pn {
+               t.Errorf("packagename: got %s want %s", gotpn, pn)
+       }
+
+       cases := []coverage.FuncDesc{f1, f2}
+       for i := uint32(0); i < uint32(len(cases)); i++ {
+               var fn coverage.FuncDesc
+               if err := dec.ReadFunc(i, &fn); err != nil {
+                       t.Fatalf("err reading function %d: %v", i, err)
+               }
+               res := cmpFuncDesc(cases[i], fn)
+               if res != "" {
+                       t.Errorf("ReadFunc(%d): %s", i, res)
+               }
+       }
+}
+
+func createFuncs(i int) []coverage.FuncDesc {
+       res := []coverage.FuncDesc{}
+       lc := uint32(1)
+       for fi := 0; fi < i+1; fi++ {
+               units := []coverage.CoverableUnit{}
+               for ui := 0; ui < (fi+1)*(i+1); ui++ {
+                       units = append(units,
+                               coverage.CoverableUnit{StLine: lc, StCol: lc + 1,
+                                       EnLine: lc + 2, EnCol: lc + 3, NxStmts: lc + 4,
+                               })
+                       lc += 5
+               }
+               f := coverage.FuncDesc{
+                       Funcname: fmt.Sprintf("func_%d_%d", i, fi),
+                       Srcfile:  fmt.Sprintf("foo_%d.go", i),
+                       Units:    units,
+               }
+               res = append(res, f)
+       }
+       return res
+}
+
+func createBlob(t *testing.T, i int) []byte {
+       nomodule := ""
+       b, err := encodemeta.NewCoverageMetaDataBuilder("foo/pkg", "pkg", nomodule)
+       if err != nil {
+               t.Fatalf("making builder: %v", err)
+       }
+
+       funcs := createFuncs(i)
+       for _, f := range funcs {
+               b.AddFunc(f)
+       }
+       drws := &slicewriter.WriteSeeker{}
+       b.Emit(drws)
+       return drws.BytesWritten()
+}
+
+func createMetaDataBlobs(t *testing.T, nb int) [][]byte {
+       res := [][]byte{}
+       for i := 0; i < nb; i++ {
+               res = append(res, createBlob(t, i))
+       }
+       return res
+}
+
+func TestMetaDataFileWriterReader(t *testing.T) {
+       d := t.TempDir()
+
+       // Emit a meta-file...
+       mfpath := filepath.Join(d, "covmeta.hash.0")
+       of, err := os.OpenFile(mfpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+       if err != nil {
+               t.Fatalf("opening covmeta: %v", err)
+       }
+       //t.Logf("meta-file path is %s", mfpath)
+       blobs := createMetaDataBlobs(t, 7)
+       gran := coverage.CtrGranularityPerBlock
+       mfw := encodemeta.NewCoverageMetaFileWriter(mfpath, of)
+       finalHash := [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+       err = mfw.Write(finalHash, blobs, coverage.CtrModeAtomic, gran)
+       if err != nil {
+               t.Fatalf("writing meta-file: %v", err)
+       }
+       if err = of.Close(); err != nil {
+               t.Fatalf("closing meta-file: %v", err)
+       }
+
+       // ... then read it back in, first time without setting fileView,
+       // second time setting it.
+       for k := 0; k < 2; k++ {
+               var fileView []byte
+
+               inf, err := os.Open(mfpath)
+               if err != nil {
+                       t.Fatalf("open() on meta-file: %v", err)
+               }
+
+               if k != 0 {
+                       // Use fileview to exercise different paths in reader.
+                       fi, err := os.Stat(mfpath)
+                       if err != nil {
+                               t.Fatalf("stat() on meta-file: %v", err)
+                       }
+                       fileView = make([]byte, fi.Size())
+                       if _, err := inf.Read(fileView); err != nil {
+                               t.Fatalf("read() on meta-file: %v", err)
+                       }
+                       if _, err := inf.Seek(int64(0), os.SEEK_SET); err != nil {
+                               t.Fatalf("seek() on meta-file: %v", err)
+                       }
+               }
+
+               mfr, err := decodemeta.NewCoverageMetaFileReader(inf, fileView)
+               if err != nil {
+                       t.Fatalf("k=%d NewCoverageMetaFileReader failed with: %v", k, err)
+               }
+               np := mfr.NumPackages()
+               if np != 7 {
+                       t.Fatalf("k=%d wanted 7 packages got %d", k, np)
+               }
+               md := mfr.CounterMode()
+               wmd := coverage.CtrModeAtomic
+               if md != wmd {
+                       t.Fatalf("k=%d wanted mode %d got %d", k, wmd, md)
+               }
+               gran := mfr.CounterGranularity()
+               wgran := coverage.CtrGranularityPerBlock
+               if gran != wgran {
+                       t.Fatalf("k=%d wanted gran %d got %d", k, wgran, gran)
+               }
+
+               payload := []byte{}
+               for pi := 0; pi < int(np); pi++ {
+                       var pd *decodemeta.CoverageMetaDataDecoder
+                       var err error
+                       pd, payload, err = mfr.GetPackageDecoder(uint32(pi), payload)
+                       if err != nil {
+                               t.Fatalf("GetPackageDecoder(%d) failed with: %v", pi, err)
+                       }
+                       efuncs := createFuncs(pi)
+                       nf := pd.NumFuncs()
+                       if len(efuncs) != int(nf) {
+                               t.Fatalf("decoding pk %d wanted %d funcs got %d",
+                                       pi, len(efuncs), nf)
+                       }
+                       var f coverage.FuncDesc
+                       for fi := 0; fi < int(nf); fi++ {
+                               if err := pd.ReadFunc(uint32(fi), &f); err != nil {
+                                       t.Fatalf("ReadFunc(%d) pk %d got error %v",
+                                               fi, pi, err)
+                               }
+                               res := cmpFuncDesc(efuncs[fi], f)
+                               if res != "" {
+                                       t.Errorf("ReadFunc(%d) pk %d: %s", fi, pi, res)
+                               }
+                       }
+               }
+               inf.Close()
+       }
+}