aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/internal/atomic
diff options
context:
space:
mode:
authorMichael Munday <mike.munday@ibm.com>2019-06-04 19:17:41 +0100
committerMichael Munday <mike.munday@ibm.com>2019-06-06 16:15:43 +0000
commitac8dbe7747971007d58eb39e2e7e615cf9f04493 (patch)
tree626720b251dd583ead7900a31c3ab221ec84b858 /src/runtime/internal/atomic
parent53deb812196d857ce8a7c46c1f7c1559bb167630 (diff)
downloadgo-ac8dbe7747971007d58eb39e2e7e615cf9f04493.tar.xz
cmd/compile, runtime: make atomic loads/stores sequentially consistent on s390x
The z/Architecture does not guarantee that a load following a store will not be reordered with that store, unless they access the same address. Therefore if we want to ensure the sequential consistency of atomic loads and stores we need to perform serialization operations after atomic stores. We do not need to serialize in the runtime when using StoreRel[ease] and LoadAcq[uire]. The z/Architecture already provides sufficient ordering guarantees for these operations. name old time/op new time/op delta AtomicLoad64-16 0.51ns ± 0% 0.51ns ± 0% ~ (all equal) AtomicStore64-16 0.51ns ± 0% 0.60ns ± 9% +16.47% (p=0.000 n=17+20) AtomicLoad-16 0.51ns ± 0% 0.51ns ± 0% ~ (all equal) AtomicStore-16 0.51ns ± 0% 0.60ns ± 9% +16.50% (p=0.000 n=18+20) Fixes #32428. Change-Id: I88d19a4010c46070e4fff4b41587efe4c628d4d9 Reviewed-on: https://go-review.googlesource.com/c/go/+/180439 Run-TryBot: Michael Munday <mike.munday@ibm.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/internal/atomic')
-rw-r--r--src/runtime/internal/atomic/asm_s390x.s24
-rw-r--r--src/runtime/internal/atomic/atomic_s390x.go25
2 files changed, 30 insertions, 19 deletions
diff --git a/src/runtime/internal/atomic/asm_s390x.s b/src/runtime/internal/atomic/asm_s390x.s
index 512fde5a12..084f5b5163 100644
--- a/src/runtime/internal/atomic/asm_s390x.s
+++ b/src/runtime/internal/atomic/asm_s390x.s
@@ -4,6 +4,30 @@
#include "textflag.h"
+// func Store(ptr *uint32, val uint32)
+TEXT ·Store(SB), NOSPLIT, $0
+ MOVD ptr+0(FP), R2
+ MOVWZ val+8(FP), R3
+ MOVW R3, 0(R2)
+ SYNC
+ RET
+
+// func Store64(ptr *uint64, val uint64)
+TEXT ·Store64(SB), NOSPLIT, $0
+ MOVD ptr+0(FP), R2
+ MOVD val+8(FP), R3
+ MOVD R3, 0(R2)
+ SYNC
+ RET
+
+// func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
+TEXT ·StorepNoWB(SB), NOSPLIT, $0
+ MOVD ptr+0(FP), R2
+ MOVD val+8(FP), R3
+ MOVD R3, 0(R2)
+ SYNC
+ RET
+
// func Cas(ptr *uint32, old, new uint32) bool
// Atomically:
// if *ptr == old {
diff --git a/src/runtime/internal/atomic/atomic_s390x.go b/src/runtime/internal/atomic/atomic_s390x.go
index 0ad96d3502..5a1f411ca1 100644
--- a/src/runtime/internal/atomic/atomic_s390x.go
+++ b/src/runtime/internal/atomic/atomic_s390x.go
@@ -36,30 +36,17 @@ func LoadAcq(ptr *uint32) uint32 {
return *ptr
}
-//go:noinline
-//go:nosplit
-func Store(ptr *uint32, val uint32) {
- *ptr = val
-}
-
-//go:noinline
-//go:nosplit
-func Store64(ptr *uint64, val uint64) {
- *ptr = val
-}
+//go:noescape
+func Store(ptr *uint32, val uint32)
-//go:notinheap
-type noWB struct{}
+//go:noescape
+func Store64(ptr *uint64, val uint64)
// NO go:noescape annotation; see atomic_pointer.go.
-//go:noinline
-//go:nosplit
-func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) {
- *(**noWB)(ptr) = (*noWB)(val)
-}
+func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
-//go:noinline
//go:nosplit
+//go:noinline
func StoreRel(ptr *uint32, val uint32) {
*ptr = val
}