aboutsummaryrefslogtreecommitdiff
path: root/test/codegen
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2023-08-21 15:55:35 -0700
committerKeith Randall <khr@google.com>2023-10-12 18:09:26 +0000
commit657c885fb9278f03d5b18bfb7eeca98c25ef67ac (patch)
tree23115ea07b0ff35fdd54c66172928f8f6c7f9fb0 /test/codegen
parente4f72f773666b8f13ed5d053abf87ca42c68cc16 (diff)
downloadgo-657c885fb9278f03d5b18bfb7eeca98c25ef67ac.tar.xz
cmd/compile: when combining stores, use line number of first store
var p *[2]uint32 = ... p[0] = 0 p[1] = 0 When we combine these two 32-bit stores into a single 64-bit store, use the line number of the first store, not the second one. This differs from the default behavior because usually with the combining that the compiler does, we use the line number of the last instruction in the combo (e.g. load+add, we use the line number of the add). This is the same behavior that gcc does in C (picking the line number of the first of a set of combined stores). Change-Id: Ie70bf6151755322d33ecd50e4d9caf62f7881784 Reviewed-on: https://go-review.googlesource.com/c/go/+/521678 TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: Keith Randall <khr@golang.org> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: David Chase <drchase@google.com>
Diffstat (limited to 'test/codegen')
-rw-r--r--test/codegen/memcombine.go36
1 files changed, 30 insertions, 6 deletions
diff --git a/test/codegen/memcombine.go b/test/codegen/memcombine.go
index adad9c613d..1b8abc348a 100644
--- a/test/codegen/memcombine.go
+++ b/test/codegen/memcombine.go
@@ -748,16 +748,16 @@ func zero_byte_4(b1, b2 []byte) {
func zero_byte_8(b []byte) {
_ = b[7]
- b[0], b[1], b[2], b[3] = 0, 0, 0, 0
- b[4], b[5], b[6], b[7] = 0, 0, 0, 0 // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW"
+ b[0], b[1], b[2], b[3] = 0, 0, 0, 0 // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW"
+ b[4], b[5], b[6], b[7] = 0, 0, 0, 0
}
func zero_byte_16(b []byte) {
_ = b[15]
- b[0], b[1], b[2], b[3] = 0, 0, 0, 0
+ b[0], b[1], b[2], b[3] = 0, 0, 0, 0 // arm64:"STP",-"MOVB",-"MOVH",-"MOVW"
b[4], b[5], b[6], b[7] = 0, 0, 0, 0
b[8], b[9], b[10], b[11] = 0, 0, 0, 0
- b[12], b[13], b[14], b[15] = 0, 0, 0, 0 // arm64:"STP",-"MOVB",-"MOVH",-"MOVW"
+ b[12], b[13], b[14], b[15] = 0, 0, 0, 0
}
func zero_byte_30(a *[30]byte) {
@@ -809,8 +809,8 @@ func zero_uint16_4(h1, h2 []uint16) {
func zero_uint16_8(h []uint16) {
_ = h[7]
- h[0], h[1], h[2], h[3] = 0, 0, 0, 0
- h[4], h[5], h[6], h[7] = 0, 0, 0, 0 // arm64:"STP",-"MOVB",-"MOVH"
+ h[0], h[1], h[2], h[3] = 0, 0, 0, 0 // arm64:"STP",-"MOVB",-"MOVH"
+ h[4], h[5], h[6], h[7] = 0, 0, 0, 0
}
func zero_uint32_2(w1, w2 []uint32) {
@@ -858,3 +858,27 @@ func loadstore2(p, q *S1) {
// arm64:"MOVW",-"MOVH"
q.a, q.b = a, b
}
+
+func wideStore(p *[8]uint64) {
+ if p == nil {
+ return
+ }
+
+ // amd64:"MOVUPS",-"MOVQ"
+ // arm64:"STP",-"MOVD"
+ p[0] = 0
+ // amd64:-"MOVUPS",-"MOVQ"
+ // arm64:-"STP",-"MOVD"
+ p[1] = 0
+}
+
+func wideStore2(p *[8]uint64, x, y uint64) {
+ if p == nil {
+ return
+ }
+
+ // s390x:"STMG"
+ p[0] = x
+ // s390x:-"STMG",-"MOVD"
+ p[1] = y
+}