aboutsummaryrefslogtreecommitdiff
path: root/test/codegen
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2024-06-25 14:56:11 -0700
committerKeith Randall <khr@golang.org>2024-07-23 21:29:38 +0000
commitc18ff292959f18965ab6fa47d5dc7aeea1b2374f (patch)
treefa520cfd6790de904a3305cf9bd8aeb020c355cb /test/codegen
parentdbfa3cacc7a4178ff3b81c79f7678ac9d61c54ab (diff)
downloadgo-c18ff292959f18965ab6fa47d5dc7aeea1b2374f.tar.xz
cmd/compile: make sync/atomic AND/OR operations intrinsic on amd64
Update #61395 Change-Id: I59a950f48efc587dfdffce00e2f4f3ab99d8df00 Reviewed-on: https://go-review.googlesource.com/c/go/+/594738 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Cherry Mui <cherryyz@google.com> Reviewed-by: Nicolas Hillegeer <aktau@google.com>
Diffstat (limited to 'test/codegen')
-rw-r--r--test/codegen/atomics.go68
1 files changed, 68 insertions, 0 deletions
diff --git a/test/codegen/atomics.go b/test/codegen/atomics.go
index feaa31b9c1..14024dcd83 100644
--- a/test/codegen/atomics.go
+++ b/test/codegen/atomics.go
@@ -22,6 +22,74 @@ func (c *Counter) Increment() {
// arm64/v8.1:"LDADDALW"
// arm64/v8.0:".*arm64HasATOMICS"
// arm64/v8.1:-".*arm64HasATOMICS"
+ // amd64:"LOCK",-"CMPXCHG"
atomic.AddInt32(&c.count, 1)
}
+func atomicLogical64(x *atomic.Uint64) uint64 {
+ var r uint64
+
+ // arm64/v8.0:"LDCLRALD"
+ // arm64/v8.1:"LDCLRALD"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // On amd64, make sure we use LOCK+AND instead of CMPXCHG when we don't use the result.
+ // amd64:"LOCK",-"CMPXCHGQ"
+ x.And(11)
+ // arm64/v8.0:"LDCLRALD"
+ // arm64/v8.1:"LDCLRALD"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // amd64:"LOCK","CMPXCHGQ"
+ r += x.And(22)
+
+ // arm64/v8.0:"LDORALD"
+ // arm64/v8.1:"LDORALD"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // On amd64, make sure we use LOCK+OR instead of CMPXCHG when we don't use the result.
+ // amd64:"LOCK",-"CMPXCHGQ"
+ x.Or(33)
+ // arm64/v8.0:"LDORALD"
+ // arm64/v8.1:"LDORALD"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // amd64:"LOCK","CMPXCHGQ"
+ r += x.Or(44)
+
+ return r
+}
+
+func atomicLogical32(x *atomic.Uint32) uint32 {
+ var r uint32
+
+ // arm64/v8.0:"LDCLRALW"
+ // arm64/v8.1:"LDCLRALW"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // On amd64, make sure we use LOCK+AND instead of CMPXCHG when we don't use the result.
+ // amd64:"LOCK",-"CMPXCHGL"
+ x.And(11)
+ // arm64/v8.0:"LDCLRALW"
+ // arm64/v8.1:"LDCLRALW"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // amd64:"LOCK","CMPXCHGL"
+ r += x.And(22)
+
+ // arm64/v8.0:"LDORALW"
+ // arm64/v8.1:"LDORALW"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // On amd64, make sure we use LOCK+OR instead of CMPXCHG when we don't use the result.
+ // amd64:"LOCK",-"CMPXCHGL"
+ x.Or(33)
+ // arm64/v8.0:"LDORALW"
+ // arm64/v8.1:"LDORALW"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // amd64:"LOCK","CMPXCHGL"
+ r += x.Or(44)
+
+ return r
+}