aboutsummaryrefslogtreecommitdiff
path: root/test/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'test/codegen')
-rw-r--r--test/codegen/atomics.go68
1 files changed, 68 insertions, 0 deletions
diff --git a/test/codegen/atomics.go b/test/codegen/atomics.go
index feaa31b9c1..14024dcd83 100644
--- a/test/codegen/atomics.go
+++ b/test/codegen/atomics.go
@@ -22,6 +22,74 @@ func (c *Counter) Increment() {
// arm64/v8.1:"LDADDALW"
// arm64/v8.0:".*arm64HasATOMICS"
// arm64/v8.1:-".*arm64HasATOMICS"
+ // amd64:"LOCK",-"CMPXCHG"
atomic.AddInt32(&c.count, 1)
}
+func atomicLogical64(x *atomic.Uint64) uint64 {
+ var r uint64
+
+ // arm64/v8.0:"LDCLRALD"
+ // arm64/v8.1:"LDCLRALD"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // On amd64, make sure we use LOCK+AND instead of CMPXCHG when we don't use the result.
+ // amd64:"LOCK",-"CMPXCHGQ"
+ x.And(11)
+ // arm64/v8.0:"LDCLRALD"
+ // arm64/v8.1:"LDCLRALD"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // amd64:"LOCK","CMPXCHGQ"
+ r += x.And(22)
+
+ // arm64/v8.0:"LDORALD"
+ // arm64/v8.1:"LDORALD"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // On amd64, make sure we use LOCK+OR instead of CMPXCHG when we don't use the result.
+ // amd64:"LOCK",-"CMPXCHGQ"
+ x.Or(33)
+ // arm64/v8.0:"LDORALD"
+ // arm64/v8.1:"LDORALD"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // amd64:"LOCK","CMPXCHGQ"
+ r += x.Or(44)
+
+ return r
+}
+
+func atomicLogical32(x *atomic.Uint32) uint32 {
+ var r uint32
+
+ // arm64/v8.0:"LDCLRALW"
+ // arm64/v8.1:"LDCLRALW"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // On amd64, make sure we use LOCK+AND instead of CMPXCHG when we don't use the result.
+ // amd64:"LOCK",-"CMPXCHGL"
+ x.And(11)
+ // arm64/v8.0:"LDCLRALW"
+ // arm64/v8.1:"LDCLRALW"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // amd64:"LOCK","CMPXCHGL"
+ r += x.And(22)
+
+ // arm64/v8.0:"LDORALW"
+ // arm64/v8.1:"LDORALW"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // On amd64, make sure we use LOCK+OR instead of CMPXCHG when we don't use the result.
+ // amd64:"LOCK",-"CMPXCHGL"
+ x.Or(33)
+ // arm64/v8.0:"LDORALW"
+ // arm64/v8.1:"LDORALW"
+ // arm64/v8.0:".*arm64HasATOMICS"
+ // arm64/v8.1:-".*arm64HasATOMICS"
+ // amd64:"LOCK","CMPXCHGL"
+ r += x.Or(44)
+
+ return r
+}