aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2022-11-01 14:18:09 -0700
committerKeith Randall <khr@golang.org>2023-02-17 22:21:22 +0000
commitd49719b1f75ea745937bdbc09ae5927c5378780b (patch)
tree1e68379c42183f99f70986129d2a75c6adcfb6f2 /src/runtime
parentd3daeb5267b626db36adf2f39c36f6caf94447e3 (diff)
downloadgo-d49719b1f75ea745937bdbc09ae5927c5378780b.tar.xz
cmd/compile: move raw writes out of write barrier code
Previously, the write barrier calls themselves did the actual writes to memory. Instead, move those writes out to a common location that both the wb-enabled and wb-disabled code paths share. This enables us to optimize the write barrier path without having to worry about performing the actual writes. Change-Id: Ia71ab651908ec124cc33141afb52e4ca19733ac6 Reviewed-on: https://go-review.googlesource.com/c/go/+/447780 Reviewed-by: Michael Knyszek <mknyszek@google.com> TryBot-Bypass: Keith Randall <khr@golang.org> Run-TryBot: Keith Randall <khr@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/asm_386.s2
-rw-r--r--src/runtime/asm_amd64.s2
-rw-r--r--src/runtime/asm_arm.s2
-rw-r--r--src/runtime/asm_arm64.s2
-rw-r--r--src/runtime/asm_loong64.s2
-rw-r--r--src/runtime/asm_mips64x.s2
-rw-r--r--src/runtime/asm_mipsx.s2
-rw-r--r--src/runtime/asm_ppc64x.s2
-rw-r--r--src/runtime/asm_riscv64.s2
-rw-r--r--src/runtime/asm_s390x.s2
-rw-r--r--src/runtime/asm_wasm.s3
-rw-r--r--src/runtime/mbarrier.go18
12 files changed, 18 insertions, 23 deletions
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index a03e5b0fe0..8865f5502e 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -1398,8 +1398,6 @@ retry:
MOVL BX, -4(CX) // Record *slot
MOVL 20(SP), CX
MOVL 24(SP), BX
- // Do the write.
- MOVL AX, (DI)
RET
flush:
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index 6acb7ddaef..69a363320d 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -1660,8 +1660,6 @@ retry:
MOVQ R13, -8(R12) // Record *slot
MOVQ 96(SP), R12
MOVQ 104(SP), R13
- // Do the write.
- MOVQ AX, (DI)
RET
flush:
diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s
index 40a6e47792..3cabe748cd 100644
--- a/src/runtime/asm_arm.s
+++ b/src/runtime/asm_arm.s
@@ -899,8 +899,6 @@ retry:
MOVW (R2), R0 // TODO: This turns bad writes into bad reads.
MOVW R0, -4(R1) // Record *slot
MOVM.IA.W (R13), [R0,R1]
- // Do the write.
- MOVW R3, (R2)
RET
flush:
diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s
index bc9e73ffd6..e8399712de 100644
--- a/src/runtime/asm_arm64.s
+++ b/src/runtime/asm_arm64.s
@@ -1220,8 +1220,6 @@ retry:
MOVD (R2), R0 // TODO: This turns bad writes into bad reads.
MOVD R0, -8(R1) // Record *slot
LDP 184(RSP), (R0, R1)
- // Do the write.
- MOVD R3, (R2)
RET
flush:
diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s
index 09a2964511..dfa3497b69 100644
--- a/src/runtime/asm_loong64.s
+++ b/src/runtime/asm_loong64.s
@@ -645,8 +645,6 @@ retry:
MOVV R19, -8(R13) // Record *slot
MOVV 208(R3), R19
MOVV 216(R3), R13
- // Do the write.
- MOVV R28, (R27)
RET
flush:
diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s
index 6f413db84b..c6677d0014 100644
--- a/src/runtime/asm_mips64x.s
+++ b/src/runtime/asm_mips64x.s
@@ -662,8 +662,6 @@ retry:
MOVV R1, -8(R2) // Record *slot
MOVV 184(R29), R1
MOVV 192(R29), R2
- // Do the write.
- MOVV R21, (R20)
RET
flush:
diff --git a/src/runtime/asm_mipsx.s b/src/runtime/asm_mipsx.s
index 2fbbf13672..a43177ec13 100644
--- a/src/runtime/asm_mipsx.s
+++ b/src/runtime/asm_mipsx.s
@@ -655,8 +655,6 @@ retry:
MOVW R1, -4(R2) // Record *slot
MOVW 100(R29), R1
MOVW 104(R29), R2
- // Do the write.
- MOVW R21, (R20)
RET
flush:
diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s
index 4a30f38fc9..0f6421f6f5 100644
--- a/src/runtime/asm_ppc64x.s
+++ b/src/runtime/asm_ppc64x.s
@@ -955,8 +955,6 @@ retry:
MOVD R21, -16(R19) // Record value
MOVD (R20), R18 // TODO: This turns bad writes into bad reads.
MOVD R18, -8(R19) // Record *slot
- // Do the write.
- MOVD R21, (R20)
RET
flush:
diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s
index 4c434ea551..4fd9c427e3 100644
--- a/src/runtime/asm_riscv64.s
+++ b/src/runtime/asm_riscv64.s
@@ -742,8 +742,6 @@ retry:
MOV A0, -8(A1) // Record *slot
MOV 24*8(X2), A0
MOV 25*8(X2), A1
- // Do the write.
- MOV T1, (T0)
RET
flush:
diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s
index 5332c9b234..094e25c40f 100644
--- a/src/runtime/asm_s390x.s
+++ b/src/runtime/asm_s390x.s
@@ -806,8 +806,6 @@ retry:
MOVD (R2), R10 // TODO: This turns bad writes into bad reads.
MOVD R10, -8(R4) // Record *slot
MOVD 96(R15), R4
- // Do the write.
- MOVD R3, (R2)
RET
flush:
diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s
index 6666b554d6..e108bb4362 100644
--- a/src/runtime/asm_wasm.s
+++ b/src/runtime/asm_wasm.s
@@ -443,9 +443,6 @@ TEXT runtime·gcWriteBarrier(SB), NOSPLIT, $16
// Record *slot
MOVD (R0), 8(R5)
- // Do the write
- MOVD R1, (R0)
-
RET
End
diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go
index 0e49794854..c9e06d443d 100644
--- a/src/runtime/mbarrier.go
+++ b/src/runtime/mbarrier.go
@@ -175,6 +175,24 @@ func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
}
}
+// wbZero performs the write barrier operations necessary before
+// zeroing a region of memory at address dst of type typ.
+// Does not actually do the zeroing.
+//go:nowritebarrierrec
+//go:nosplit
+func wbZero(typ *_type, dst unsafe.Pointer) {
+ bulkBarrierPreWrite(uintptr(dst), 0, typ.ptrdata)
+}
+
+// wbMove performs the write barrier operations necessary before
+// copying a region of memory from src to dst of type typ.
+// Does not actually do the copying.
+//go:nowritebarrierrec
+//go:nosplit
+func wbMove(typ *_type, dst, src unsafe.Pointer) {
+ bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.ptrdata)
+}
+
//go:linkname reflect_typedmemmove reflect.typedmemmove
func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
if raceenabled {