aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/asm_wasm.s
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2022-10-25 17:58:07 -0700
committerKeith Randall <khr@golang.org>2023-02-17 22:19:26 +0000
commitd3daeb5267b626db36adf2f39c36f6caf94447e3 (patch)
tree9260d979d13b9cd790a2f1167069a34dfbedeef2 /src/runtime/asm_wasm.s
parent209df389c215d9a1eee15ce1c1e4d82f43e026db (diff)
downloadgo-d3daeb5267b626db36adf2f39c36f6caf94447e3.tar.xz
runtime: remove the restriction that write barrier ptrs come in pairs
Future CLs will remove the invariant that pointers are always put in the write barrier in pairs. The behavior of the assembly code changes a bit, where instead of writing the pointers unconditionally and then checking for overflow, check for overflow first and then write the pointers. Also changed the write barrier flush function to not take the src/dst as arguments. Change-Id: I2ef708038367b7b82ea67cbaf505a1d5904c775c Reviewed-on: https://go-review.googlesource.com/c/go/+/447779 Run-TryBot: Keith Randall <khr@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com> Reviewed-by: Michael Knyszek <mknyszek@google.com> TryBot-Bypass: Keith Randall <khr@golang.org>
Diffstat (limited to 'src/runtime/asm_wasm.s')
-rw-r--r--src/runtime/asm_wasm.s66
1 files changed, 41 insertions, 25 deletions
diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s
index e075c72598..6666b554d6 100644
--- a/src/runtime/asm_wasm.s
+++ b/src/runtime/asm_wasm.s
@@ -410,36 +410,52 @@ TEXT runtime·cgocallback(SB), NOSPLIT, $0-24
// R0: the destination of the write (i64)
// R1: the value being written (i64)
TEXT runtime·gcWriteBarrier(SB), NOSPLIT, $16
- // R3 = g.m
- MOVD g_m(g), R3
- // R4 = p
- MOVD m_p(R3), R4
- // R5 = wbBuf.next
- MOVD p_wbBuf+wbBuf_next(R4), R5
+ Loop
+ // R3 = g.m
+ MOVD g_m(g), R3
+ // R4 = p
+ MOVD m_p(R3), R4
+ // R5 = wbBuf.next
+ MOVD p_wbBuf+wbBuf_next(R4), R5
- // Record value
- MOVD R1, 0(R5)
- // Record *slot
- MOVD (R0), 8(R5)
+ // Increment wbBuf.next
+ Get R5
+ I64Const $16
+ I64Add
+ Set R5
- // Increment wbBuf.next
- Get R5
- I64Const $16
- I64Add
- Set R5
- MOVD R5, p_wbBuf+wbBuf_next(R4)
+ // Is the buffer full?
+ Get R5
+ I64Load (p_wbBuf+wbBuf_end)(R4)
+ I64LeU
+ If
+ // Commit to the larger buffer.
+ MOVD R5, p_wbBuf+wbBuf_next(R4)
+
+ // Back up to write position (wasm stores can't use negative offsets)
+ Get R5
+ I64Const $16
+ I64Sub
+ Set R5
+
+ // Record value
+ MOVD R1, 0(R5)
+ // Record *slot
+ MOVD (R0), 8(R5)
+
+ // Do the write
+ MOVD R1, (R0)
+
+ RET
+ End
- Get R5
- I64Load (p_wbBuf+wbBuf_end)(R4)
- I64Eq
- If
// Flush
MOVD R0, 0(SP)
MOVD R1, 8(SP)
CALLNORESUME runtime·wbBufFlush(SB)
- End
-
- // Do the write
- MOVD R1, (R0)
+ MOVD 0(SP), R0
+ MOVD 8(SP), R1
- RET
+ // Retry
+ Br $0
+ End