Currently parseRecord will always start with a nil
slice and then resize the slice on append. For input
with a fixed number of fields per record we can preallocate
the slice to avoid having to resize the slice.
This change implements this optimization by using
FieldsPerRecord as capacity if it's > 0 and also adds a
benchmark to better show the differences.
benchmark old ns/op new ns/op delta
BenchmarkRead 19741 17909 -9.28%
benchmark old allocs new allocs delta
BenchmarkRead 59 41 -30.51%
benchmark old bytes new bytes delta
BenchmarkRead 6276 5844 -6.88%
Change-Id: I7c2abc9c80a23571369bcfcc99a8ffc474eae7ab
Reviewed-on: https://go-review.googlesource.com/8880
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
}
r.r.UnreadRune()
+ // If FieldsPerRecord is greater then 0 we can assume the final
+ // length of fields to be equal to FieldsPerRecord.
+ if r.FieldsPerRecord > 0 {
+ fields = make([]string, 0, r.FieldsPerRecord)
+ }
+
// At this point we have at least one field.
for {
haveField, delim, err := r.parseField()
}
}
}
+
+func BenchmarkRead(b *testing.B) {
+ data := `x,y,z,w
+x,y,z,
+x,y,,
+x,,,
+,,,
+"x","y","z","w"
+"x","y","z",""
+"x","y","",""
+"x","","",""
+"","","",""
+`
+
+ for i := 0; i < b.N; i++ {
+ _, err := NewReader(strings.NewReader(data)).ReadAll()
+
+ if err != nil {
+ b.Fatalf("could not read data: %s", err)
+ }
+ }
+}