From 2db58f8f2daea1c6f6134584f7811bb229177b28 Mon Sep 17 00:00:00 2001 From: Justin Nuß Date: Mon, 13 Apr 2015 20:13:14 +0200 Subject: encoding/csv: Preallocate records slice Currently parseRecord will always start with a nil slice and then resize the slice on append. For input with a fixed number of fields per record we can preallocate the slice to avoid having to resize the slice. This change implements this optimization by using FieldsPerRecord as capacity if it's > 0 and also adds a benchmark to better show the differences. benchmark old ns/op new ns/op delta BenchmarkRead 19741 17909 -9.28% benchmark old allocs new allocs delta BenchmarkRead 59 41 -30.51% benchmark old bytes new bytes delta BenchmarkRead 6276 5844 -6.88% Change-Id: I7c2abc9c80a23571369bcfcc99a8ffc474eae7ab Reviewed-on: https://go-review.googlesource.com/8880 Reviewed-by: Brad Fitzpatrick Run-TryBot: Brad Fitzpatrick TryBot-Result: Gobot Gobot --- src/encoding/csv/reader.go | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'src/encoding/csv/reader.go') diff --git a/src/encoding/csv/reader.go b/src/encoding/csv/reader.go index d9432954ac..d0a09044fb 100644 --- a/src/encoding/csv/reader.go +++ b/src/encoding/csv/reader.go @@ -228,6 +228,12 @@ func (r *Reader) parseRecord() (fields []string, err error) { } r.r.UnreadRune() + // If FieldsPerRecord is greater then 0 we can assume the final + // length of fields to be equal to FieldsPerRecord. + if r.FieldsPerRecord > 0 { + fields = make([]string, 0, r.FieldsPerRecord) + } + // At this point we have at least one field. for { haveField, delim, err := r.parseField() -- cgit v1.3