locs = b.appendLocsForStack(locs[:0], expandedStack[:n])
b.pbSample(values, locs, nil)
}
- b.build()
- return nil
+ return b.build()
}
// printCountProfile prints a countProfile at the specified debug level.
}
b.pbSample(values, locs, labels)
}
- b.build()
- return nil
+ return b.build()
}
// keysByCount sorts keys with higher counts first, breaking ties by key string order.
}
// build completes and returns the constructed profile.
-func (b *profileBuilder) build() {
+func (b *profileBuilder) build() error {
b.end = time.Now()
b.pb.int64Opt(tagProfile_TimeNanos, b.start.UnixNano())
// TODO: Anything for tagProfile_KeepFrames?
b.pb.strings(tagProfile_StringTable, b.strings)
- b.zw.Write(b.pb.data)
- b.zw.Close()
+ _, err := b.zw.Write(b.pb.data)
+ if err != nil {
+ return err
+ }
+ return b.zw.Close()
}
// appendLocsForStack appends the location IDs for the given stack trace to the given
import (
"bytes"
"encoding/json"
+ "errors"
"fmt"
"internal/abi"
"internal/profile"
if err := b.addCPUData(data, tags); err != nil {
return nil, err
}
- b.build()
+ if err := b.build(); err != nil {
+ return nil, err
+ }
return profile.Parse(&buf)
}
t.Fatalf("translating profile: %v", err)
}
}
+
+var errWrite = errors.New("error from writer")
+
+type errWriter struct{}
+
+func (errWriter) Write(p []byte) (int, error) { return 0, errWrite }
+
+func TestWriteToErr(t *testing.T) {
+ err := Lookup("heap").WriteTo(&errWriter{}, 0)
+ if !errors.Is(err, errWrite) {
+ t.Fatalf("want error from writer, got: %v", err)
+ }
+}
}
})
}
- b.build()
- return nil
+ return b.build()
}
// scaleHeapSample adjusts the data from a heap Sample to