aboutsummaryrefslogtreecommitdiff
path: root/src/encoding/binary/binary_test.go
diff options
context:
space:
mode:
authorLorenz Bauer <oss@lmb.io>2024-05-16 11:22:36 +0100
committerGopher Robot <gobot@golang.org>2024-05-20 19:16:18 +0000
commit447ad32a1db8492ce8549ae27e0b72b611938253 (patch)
treebb1628bded4f12ec83bf97829e2c1605666145b1 /src/encoding/binary/binary_test.go
parent04bf36e97305197d09554739391f607afde1fd74 (diff)
downloadgo-447ad32a1db8492ce8549ae27e0b72b611938253.tar.xz
encoding/binary: speed up Size
Size() is currently not called from the fast path, since the package handles the buffer sizing for Read and Write internally. This will change when adding Append() because callers can use Size to avoid allocations when writing into bytes.Buffer via AvailableBuffer for example. Add a fast path for simple types and extend the existing struct size cache to arrays of structs. Change-Id: I3af16a2b6c9e2dbe6166a2f8c96bcd2e936719e2 Reviewed-on: https://go-review.googlesource.com/c/go/+/584358 Reviewed-by: Austin Clements <austin@google.com> Reviewed-by: Keith Randall <khr@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Auto-Submit: Austin Clements <austin@google.com>
Diffstat (limited to 'src/encoding/binary/binary_test.go')
-rw-r--r--src/encoding/binary/binary_test.go63
1 files changed, 63 insertions, 0 deletions
diff --git a/src/encoding/binary/binary_test.go b/src/encoding/binary/binary_test.go
index ca80c54c15..9eb536c990 100644
--- a/src/encoding/binary/binary_test.go
+++ b/src/encoding/binary/binary_test.go
@@ -429,10 +429,14 @@ func TestSizeStructCache(t *testing.T) {
want int
}{
{new(foo), 1},
+ {new([1]foo), 0},
+ {make([]foo, 1), 0},
{new(bar), 1},
{new(bar), 0},
{new(struct{ A Struct }), 1},
{new(struct{ A Struct }), 0},
+ {new([1]struct{ A Struct }), 0},
+ {make([]struct{ A Struct }, 1), 0},
}
for _, tc := range testcases {
@@ -458,6 +462,18 @@ func TestSizeInvalid(t *testing.T) {
[]int(nil),
new([]int),
(*[]int)(nil),
+ (*int8)(nil),
+ (*uint8)(nil),
+ (*int16)(nil),
+ (*uint16)(nil),
+ (*int32)(nil),
+ (*uint32)(nil),
+ (*int64)(nil),
+ (*uint64)(nil),
+ (*float32)(nil),
+ (*float64)(nil),
+ (*complex64)(nil),
+ (*complex128)(nil),
}
for _, tc := range testcases {
if got := Size(tc); got != -1 {
@@ -704,6 +720,43 @@ func TestAppendAllocs(t *testing.T) {
}
}
+var sizableTypes = []any{
+ bool(false),
+ int8(0),
+ int16(0),
+ int32(0),
+ int64(0),
+ uint8(0),
+ uint16(0),
+ uint32(0),
+ uint64(0),
+ float32(0),
+ float64(0),
+ complex64(0),
+ complex128(0),
+ Struct{},
+ &Struct{},
+ []Struct{},
+ ([]Struct)(nil),
+ [1]Struct{},
+}
+
+func TestSizeAllocs(t *testing.T) {
+ for _, data := range sizableTypes {
+ t.Run(fmt.Sprintf("%T", data), func(t *testing.T) {
+ // Size uses a sync.Map behind the scenes. The slow lookup path of
+ // that does allocate, so we need a couple of runs here to be
+ // allocation free.
+ allocs := testing.AllocsPerRun(10, func() {
+ _ = Size(data)
+ })
+ if allocs != 0 {
+ t.Fatalf("Expected no allocations, got %v", allocs)
+ }
+ })
+ }
+}
+
type byteSliceReader struct {
remain []byte
}
@@ -1075,6 +1128,16 @@ func BenchmarkWriteSlice1000Uint8s(b *testing.B) {
}
}
+func BenchmarkSize(b *testing.B) {
+ for _, data := range sizableTypes {
+ b.Run(fmt.Sprintf("%T", data), func(b *testing.B) {
+ for range b.N {
+ _ = Size(data)
+ }
+ })
+ }
+}
+
func TestNativeEndian(t *testing.T) {
const val = 0x12345678
i := uint32(val)