diff options
| author | Paul E. Murphy <murp@ibm.com> | 2022-09-27 13:13:10 -0500 |
|---|---|---|
| committer | Paul Murphy <murp@ibm.com> | 2023-02-23 22:26:39 +0000 |
| commit | f98dd299103f66b86bbef402a63a0c6f8fc8b11e (patch) | |
| tree | cc42a0e8efd554f28846c49d0aa1f14912dec02c /src/cmd/compile/internal/test | |
| parent | f60a2a9c9423222b52ecb5f6463787f913195127 (diff) | |
| download | go-f98dd299103f66b86bbef402a63a0c6f8fc8b11e.tar.xz | |
cmd/compile: rework unbounded shift lowering on PPC64
This reduces unbounded shift latency by one cycle, and may
generate less instructions in some cases.
When there is a choice whether to use doubleword or word shifts, use
doubleword shifts. Doubleword shifts have fewer hardware scheduling
restrictions across P8/P9/P10.
Likewise, rework the shift sequence to allow the compare/shift/overshift
values to compute in parallel, then choose the correct value.
Some ANDCCconst rules also need reworked to ensure they simplify when
used for their flag value. This commonly occurs when prove fails to
identify a bounded shift (e.g foo32<<uint(x&31)).
Change-Id: Ifc6ff4a865d68675e57745056db414b0eb6f2d34
Reviewed-on: https://go-review.googlesource.com/c/go/+/442597
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Lynn Boger <laboger@linux.vnet.ibm.com>
Reviewed-by: Than McIntosh <thanm@google.com>
Run-TryBot: Paul Murphy <murp@ibm.com>
Reviewed-by: Ian Lance Taylor <iant@google.com>
Diffstat (limited to 'src/cmd/compile/internal/test')
| -rw-r--r-- | src/cmd/compile/internal/test/shift_test.go | 89 |
1 files changed, 89 insertions, 0 deletions
diff --git a/src/cmd/compile/internal/test/shift_test.go b/src/cmd/compile/internal/test/shift_test.go index 278a47da29..dd893a1121 100644 --- a/src/cmd/compile/internal/test/shift_test.go +++ b/src/cmd/compile/internal/test/shift_test.go @@ -1061,3 +1061,92 @@ func TestIncorrectRotate(t *testing.T) { t.Errorf("got %x want 0", got) } } + +//go:noinline +func variableShiftOverflow64x8(x int64, y, z uint8) (a, b, c int64) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int64(uint64(x) >> (y + z)) +} + +//go:noinline +func variableShiftOverflow32x8(x int32, y, z uint8) (a, b, c int32) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int32(uint32(x) >> (y + z)) +} + +//go:noinline +func variableShiftOverflow16x8(x int16, y, z uint8) (a, b, c int16) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int16(uint16(x) >> (y + z)) +} + +//go:noinline +func variableShiftOverflow8x8(x int8, y, z uint8) (a, b, c int8) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int8(uint8(x) >> (y + z)) +} + +//go:noinline +func variableShiftOverflow64x16(x int64, y, z uint16) (a, b, c int64) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int64(uint64(x) >> (y + z)) +} + +//go:noinline +func variableShiftOverflow32x16(x int32, y, z uint16) (a, b, c int32) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int32(uint32(x) >> (y + z)) +} + +//go:noinline +func variableShiftOverflow16x16(x int16, y, z uint16) (a, b, c int16) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int16(uint16(x) >> (y + z)) +} + +//go:noinline +func variableShiftOverflow8x16(x int8, y, z uint16) (a, b, c int8) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int8(uint8(x) >> (y + z)) +} + +//go:noinline +func makeU8(x uint64) uint8 { + // Ensure the upper portions of the register are clear before testing large shift values + // using non-native types (e.g uint8 on PPC64). + return uint8(x) +} + +//go:noinline +func makeU16(x uint64) uint16 { + // Ensure the upper portions of the register are clear before testing large shift values + // using non-native types (e.g uint8 on PPC64). + return uint16(x) +} + +func TestShiftOverflow(t *testing.T) { + if v, w, z := variableShiftOverflow64x8(-64, makeU8(255), 2); v != -32 || w != -128 || z != 0x7fffffffffffffe0 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fffffffffffffe0", v, w, z) + } + if v, w, z := variableShiftOverflow32x8(-64, makeU8(255), 2); v != -32 || w != -128 || z != 0x7fffffe0 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fffffe0", v, w, z) + } + if v, w, z := variableShiftOverflow16x8(-64, makeU8(255), 2); v != -32 || w != -128 || z != 0x7fe0 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fe0", v, w, z) + } + if v, w, z := variableShiftOverflow8x8(-64, makeU8(255), 2); v != -32 || w != -128 || z != 0x60 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x60", v, w, z) + } + if v, w, z := variableShiftOverflow64x16(-64, makeU16(0xffff), 2); v != -32 || w != -128 || z != 0x7fffffffffffffe0 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fffffffffffffe0", v, w, z) + } + if v, w, z := variableShiftOverflow32x16(-64, makeU16(0xffff), 2); v != -32 || w != -128 || z != 0x7fffffe0 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fffffe0,", v, w, z) + } + if v, w, z := variableShiftOverflow16x16(-64, makeU16(0xffff), 2); v != -32 || w != -128 || z != 0x7fe0 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fe0", v, w, z) + } + if v, w, z := variableShiftOverflow8x16(-64, makeU16(0xffff), 2); v != -32 || w != -128 || z != 0x60 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x60", v, w, z) + } +} |
