aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorwangboyao <wangboyao@bytedance.com>2025-12-04 19:06:20 +0800
committerJoel Sing <joel@sing.id.au>2026-01-27 03:43:00 -0800
commit2d1f571c6b420757b2a72b9e53d486840a1317f9 (patch)
tree5a2ccd3ded657a3f7d0ebb8a35cb63a2b0322fc7 /src/runtime
parentf532f87a9895afc60b6ab17969c67e33d1ed1564 (diff)
downloadgo-2d1f571c6b420757b2a72b9e53d486840a1317f9.tar.xz
cmd/compile, runtime: avoid improper control transfer instruction hints on riscv64
On RISC-V the JAL and JALR instructions provide Return Address Stack(RAS) prediction hints based on the registers used (as per section 2.5.1 of the RISC-V ISA manual). When a JALR instruction uses X1 or X5 as the source register, it hints that a pop should occur. When making a function call, avoid the use of X5 as a source register since this results in the RAS performing a pop-then-push instead of a push, breaking call/return pairing and significantly degrading front-end branch prediction performance. Based on test result of golang.org/x/benchmarks/json on SpacemiT K1, fix version has a performance improvement of about 7% Fixes #76654 Change-Id: I867c8d7cfb54f5decbe176f3ab3bb3d78af1cf64 Reviewed-on: https://go-review.googlesource.com/c/go/+/726760 Reviewed-by: Dmitri Shuralyov <dmitshur@google.com> Reviewed-by: Cherry Mui <cherryyz@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Joel Sing <joel@sing.id.au> Run-TryBot: Joel Sing <joel@sing.id.au>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/asm_riscv64.s28
-rw-r--r--src/runtime/race_riscv64.s70
-rw-r--r--src/runtime/sys_openbsd_riscv64.s32
3 files changed, 65 insertions, 65 deletions
diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s
index 428701a503..d74f388752 100644
--- a/src/runtime/asm_riscv64.s
+++ b/src/runtime/asm_riscv64.s
@@ -44,15 +44,15 @@ TEXT _rt0_riscv64_lib(SB),NOSPLIT,$224
MOV A1, _rt0_riscv64_lib_argv<>(SB)
// Synchronous initialization.
- MOV $runtime·libpreinit(SB), T0
- JALR RA, T0
+ MOV $runtime·libpreinit(SB), T1
+ JALR RA, T1
// Create a new thread to do the runtime initialization and return.
- MOV _cgo_sys_thread_create(SB), T0
- BEQZ T0, nocgo
+ MOV _cgo_sys_thread_create(SB), T1
+ BEQZ T1, nocgo
MOV $_rt0_riscv64_lib_go(SB), A0
MOV $0, A1
- JALR RA, T0
+ JALR RA, T1
JMP restore
nocgo:
@@ -60,8 +60,8 @@ nocgo:
MOV $_rt0_riscv64_lib_go(SB), A1
MOV A0, 8(X2)
MOV A1, 16(X2)
- MOV $runtime·newosproc0(SB), T0
- JALR RA, T0
+ MOV $runtime·newosproc0(SB), T1
+ JALR RA, T1
restore:
// Restore callee-save registers, along with X1 (LR).
@@ -122,14 +122,14 @@ TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
MOV X2, (g_stack+stack_hi)(g)
// if there is a _cgo_init, call it using the gcc ABI.
- MOV _cgo_init(SB), T0
- BEQ T0, ZERO, nocgo
+ MOV _cgo_init(SB), T2
+ BEQ T2, ZERO, nocgo
MOV ZERO, A3 // arg 3: not used
MOV ZERO, A2 // arg 2: not used
MOV $setg_gcc<>(SB), A1 // arg 1: setg
MOV g, A0 // arg 0: G
- JALR RA, T0
+ JALR RA, T2
nocgo:
// update stackguard after _cgo_init
@@ -421,9 +421,9 @@ TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0
// Call fn(arg) aligned appropriately for the gcc ABI.
// Called on a system stack, and there may be no g yet (during needm).
TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
- MOV fn+0(FP), X5
+ MOV fn+0(FP), X11
MOV arg+8(FP), X10
- JALR RA, (X5)
+ JALR RA, (X11)
RET
// func asmcgocall(fn, arg unsafe.Pointer) int32
@@ -431,7 +431,7 @@ TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
// aligned appropriately for the gcc ABI.
// See cgocall.go for more details.
TEXT ·asmcgocall(SB),NOSPLIT,$0-20
- MOV fn+0(FP), X5
+ MOV fn+0(FP), X11
MOV arg+8(FP), X10
MOV X2, X8 // save original stack pointer
@@ -461,7 +461,7 @@ g0:
SUB X8, X9, X8
MOV X8, 8(X2) // save depth in old g stack (can't just save SP, as stack might be copied during a callback)
- JALR RA, (X5)
+ JALR RA, (X11)
// Restore g, stack pointer. X10 is return value.
MOV 0(X2), g
diff --git a/src/runtime/race_riscv64.s b/src/runtime/race_riscv64.s
index 9992a519eb..a455c9e496 100644
--- a/src/runtime/race_riscv64.s
+++ b/src/runtime/race_riscv64.s
@@ -27,7 +27,7 @@
// Called from instrumented code.
TEXT runtime·raceread<ABIInternal>(SB), NOSPLIT, $0-8
// void __tsan_read(ThreadState *thr, void *addr, void *pc);
- MOV $__tsan_read(SB), X5
+ MOV $__tsan_read(SB), X23
MOV X10, X11
MOV X1, X12
JMP racecalladdr<>(SB)
@@ -40,7 +40,7 @@ TEXT runtime·RaceRead(SB), NOSPLIT, $0-8
// func runtime·racereadpc(void *addr, void *callpc, void *pc)
TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
// void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
- MOV $__tsan_read_pc(SB), X5
+ MOV $__tsan_read_pc(SB), X23
MOV addr+0(FP), X11
MOV callpc+8(FP), X12
MOV pc+16(FP), X13
@@ -50,7 +50,7 @@ TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
// Called from instrumented code.
TEXT runtime·racewrite<ABIInternal>(SB), NOSPLIT, $0-8
// void __tsan_write(ThreadState *thr, void *addr, void *pc);
- MOV $__tsan_write(SB), X5
+ MOV $__tsan_write(SB), X23
MOV X10, X11
MOV X1, X12
JMP racecalladdr<>(SB)
@@ -63,7 +63,7 @@ TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8
// func runtime·racewritepc(void *addr, void *callpc, void *pc)
TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
// void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
- MOV $__tsan_write_pc(SB), X5
+ MOV $__tsan_write_pc(SB), X23
MOV addr+0(FP), X11
MOV callpc+8(FP), X12
MOV pc+16(FP), X13
@@ -73,7 +73,7 @@ TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
// Called from instrumented code.
TEXT runtime·racereadrange<ABIInternal>(SB), NOSPLIT, $0-16
// void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
- MOV $__tsan_read_range(SB), X5
+ MOV $__tsan_read_range(SB), X23
MOV X11, X12
MOV X10, X11
MOV X1, X13
@@ -87,7 +87,7 @@ TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16
// func runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
// void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
- MOV $__tsan_read_range(SB), X5
+ MOV $__tsan_read_range(SB), X23
MOV addr+0(FP), X11
MOV size+8(FP), X12
MOV pc+16(FP), X13
@@ -101,7 +101,7 @@ TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
// Called from instrumented code.
TEXT runtime·racewriterange<ABIInternal>(SB), NOSPLIT, $0-16
// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
- MOV $__tsan_write_range(SB), X5
+ MOV $__tsan_write_range(SB), X23
MOV X11, X12
MOV X10, X11
MOV X1, X13
@@ -115,7 +115,7 @@ TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16
// func runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
- MOV $__tsan_write_range(SB), X5
+ MOV $__tsan_write_range(SB), X23
MOV addr+0(FP), X11
MOV size+8(FP), X12
MOV pc+16(FP), X13
@@ -145,7 +145,7 @@ ret:
// func runtime·racefuncenter(pc uintptr)
// Called from instrumented code.
TEXT runtime·racefuncenter<ABIInternal>(SB), NOSPLIT, $0-8
- MOV $__tsan_func_enter(SB), X5
+ MOV $__tsan_func_enter(SB), X23
MOV X10, X11
MOV g_racectx(g), X10
JMP racecall<>(SB)
@@ -154,7 +154,7 @@ TEXT runtime·racefuncenter<ABIInternal>(SB), NOSPLIT, $0-8
// X1 = caller's return address
TEXT racefuncenter<>(SB), NOSPLIT, $0-0
// void __tsan_func_enter(ThreadState *thr, void *pc);
- MOV $__tsan_func_enter(SB), X5
+ MOV $__tsan_func_enter(SB), X23
MOV g_racectx(g), X10
MOV X1, X11
JMP racecall<>(SB)
@@ -163,7 +163,7 @@ TEXT racefuncenter<>(SB), NOSPLIT, $0-0
// Called from instrumented code.
TEXT runtime·racefuncexit<ABIInternal>(SB), NOSPLIT, $0-0
// void __tsan_func_exit(ThreadState *thr);
- MOV $__tsan_func_exit(SB), X5
+ MOV $__tsan_func_exit(SB), X23
MOV g_racectx(g), X10
JMP racecall<>(SB)
@@ -173,13 +173,13 @@ TEXT runtime·racefuncexit<ABIInternal>(SB), NOSPLIT, $0-0
TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12
GO_ARGS
- MOV $__tsan_go_atomic32_load(SB), X5
+ MOV $__tsan_go_atomic32_load(SB), X23
CALL racecallatomic<>(SB)
RET
TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16
GO_ARGS
- MOV $__tsan_go_atomic64_load(SB), X5
+ MOV $__tsan_go_atomic64_load(SB), X23
CALL racecallatomic<>(SB)
RET
@@ -203,13 +203,13 @@ TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16
TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12
GO_ARGS
- MOV $__tsan_go_atomic32_store(SB), X5
+ MOV $__tsan_go_atomic32_store(SB), X23
CALL racecallatomic<>(SB)
RET
TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16
GO_ARGS
- MOV $__tsan_go_atomic64_store(SB), X5
+ MOV $__tsan_go_atomic64_store(SB), X23
CALL racecallatomic<>(SB)
RET
@@ -229,13 +229,13 @@ TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16
TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20
GO_ARGS
- MOV $__tsan_go_atomic32_exchange(SB), X5
+ MOV $__tsan_go_atomic32_exchange(SB), X23
CALL racecallatomic<>(SB)
RET
TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24
GO_ARGS
- MOV $__tsan_go_atomic64_exchange(SB), X5
+ MOV $__tsan_go_atomic64_exchange(SB), X23
CALL racecallatomic<>(SB)
RET
@@ -255,7 +255,7 @@ TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24
TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
GO_ARGS
- MOV $__tsan_go_atomic32_fetch_add(SB), X5
+ MOV $__tsan_go_atomic32_fetch_add(SB), X23
CALL racecallatomic<>(SB)
// TSan performed fetch_add, but Go needs add_fetch.
MOVW add+8(FP), X5
@@ -266,7 +266,7 @@ TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24
GO_ARGS
- MOV $__tsan_go_atomic64_fetch_add(SB), X5
+ MOV $__tsan_go_atomic64_fetch_add(SB), X23
CALL racecallatomic<>(SB)
// TSan performed fetch_add, but Go needs add_fetch.
MOV add+8(FP), X5
@@ -290,13 +290,13 @@ TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
// And
TEXT sync∕atomic·AndInt32(SB), NOSPLIT, $0-20
GO_ARGS
- MOV $__tsan_go_atomic32_fetch_and(SB), X5
+ MOV $__tsan_go_atomic32_fetch_and(SB), X23
CALL racecallatomic<>(SB)
RET
TEXT sync∕atomic·AndInt64(SB), NOSPLIT, $0-24
GO_ARGS
- MOV $__tsan_go_atomic64_fetch_and(SB), X5
+ MOV $__tsan_go_atomic64_fetch_and(SB), X23
CALL racecallatomic<>(SB)
RET
@@ -315,13 +315,13 @@ TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24
// Or
TEXT sync∕atomic·OrInt32(SB), NOSPLIT, $0-20
GO_ARGS
- MOV $__tsan_go_atomic32_fetch_or(SB), X5
+ MOV $__tsan_go_atomic32_fetch_or(SB), X23
CALL racecallatomic<>(SB)
RET
TEXT sync∕atomic·OrInt64(SB), NOSPLIT, $0-24
GO_ARGS
- MOV $__tsan_go_atomic64_fetch_or(SB), X5
+ MOV $__tsan_go_atomic64_fetch_or(SB), X23
CALL racecallatomic<>(SB)
RET
@@ -341,13 +341,13 @@ TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24
TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
GO_ARGS
- MOV $__tsan_go_atomic32_compare_exchange(SB), X5
+ MOV $__tsan_go_atomic32_compare_exchange(SB), X23
CALL racecallatomic<>(SB)
RET
TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25
GO_ARGS
- MOV $__tsan_go_atomic64_compare_exchange(SB), X5
+ MOV $__tsan_go_atomic64_compare_exchange(SB), X23
CALL racecallatomic<>(SB)
RET
@@ -364,7 +364,7 @@ TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25
JMP sync∕atomic·CompareAndSwapInt64(SB)
// Generic atomic operation implementation.
-// X5 = addr of target function
+// X23 = addr of target function
TEXT racecallatomic<>(SB), NOSPLIT, $0
// Set up these registers
// X10 = *ThreadState
@@ -398,11 +398,11 @@ racecallatomic_ignore:
// Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op.
// An attempt to synchronize on the address would cause crash.
MOV X1, X20 // save PC
- MOV X5, X21 // save target function
- MOV $__tsan_go_ignore_sync_begin(SB), X5
+ MOV X23, X21 // save target function
+ MOV $__tsan_go_ignore_sync_begin(SB), X23
MOV g_racectx(g), X10 // goroutine context
CALL racecall<>(SB)
- MOV X21, X5 // restore the target function
+ MOV X21, X23 // restore the target function
// Call the atomic function.
MOV g_racectx(g), X10 // goroutine context
MOV 8(X2), X11 // caller pc
@@ -410,7 +410,7 @@ racecallatomic_ignore:
ADD $24, X2, X13 // arguments
CALL racecall<>(SB)
// Call __tsan_go_ignore_sync_end.
- MOV $__tsan_go_ignore_sync_end(SB), X5
+ MOV $__tsan_go_ignore_sync_end(SB), X23
MOV g_racectx(g), X10 // goroutine context
CALL racecall<>(SB)
RET
@@ -420,14 +420,14 @@ racecallatomic_ignore:
// The arguments are never heap-object-preserving pointers, so we pretend there
// are no arguments.
TEXT runtime·racecall(SB), NOSPLIT, $0-0
- MOV fn+0(FP), X5
+ MOV fn+0(FP), X23
MOV arg0+8(FP), X10
MOV arg1+16(FP), X11
MOV arg2+24(FP), X12
MOV arg3+32(FP), X13
JMP racecall<>(SB)
-// Switches SP to g0 stack and calls X5. Arguments are already set.
+// Switches SP to g0 stack and calls X23. Arguments are already set.
TEXT racecall<>(SB), NOSPLIT|NOFRAME, $0-0
MOV X1, X18 // Save RA in callee save register
MOV X2, X19 // Save SP in callee save register
@@ -443,7 +443,7 @@ TEXT racecall<>(SB), NOSPLIT|NOFRAME, $0-0
MOV (g_sched+gobuf_sp)(X7), X2 // Switch to g0 stack
call:
- JALR RA, (X5) // Call C function
+ JALR RA, (X23) // Call C function
MOV X19, X2 // Restore SP
JMP (X18) // Return to Go.
@@ -458,7 +458,7 @@ TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0
// can be executed on g0. Second, it is called frequently, so will
// benefit from this fast path.
BNEZ X10, rest
- MOV X1, X5
+ MOV X1, X23
MOV g, X6
CALL runtime·load_g(SB)
MOV g_m(g), X7
@@ -466,7 +466,7 @@ TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0
MOV p_raceprocctx(X7), X7
MOV X7, (X11)
MOV X6, g
- JMP (X5)
+ JMP (X23)
rest:
// Save callee-save registers (X8, X9, X18..X27, F8, F9, F18..F27),
// since Go code will not respect this.
diff --git a/src/runtime/sys_openbsd_riscv64.s b/src/runtime/sys_openbsd_riscv64.s
index 3262b41052..bfdd9e1738 100644
--- a/src/runtime/sys_openbsd_riscv64.s
+++ b/src/runtime/sys_openbsd_riscv64.s
@@ -89,8 +89,8 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
MOVW sig+8(FP), X10
MOV info+16(FP), X11
MOV ctx+24(FP), X12
- MOV fn+0(FP), X5
- JALR X1, X5
+ MOV fn+0(FP), X6
+ JALR X1, X6
RET
TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME,$224
@@ -127,8 +127,8 @@ TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME,$224
MOVW X10, 8(X2)
MOV X11, 16(X2)
MOV X12, 24(X2)
- MOV $runtime·sigtrampgo(SB), X5
- JALR X1, X5
+ MOV $runtime·sigtrampgo(SB), X6
+ JALR X1, X6
// Restore callee-save registers.
MOV (4*8)(X2), X8
@@ -458,13 +458,13 @@ TEXT runtime·issetugid_trampoline(SB),NOSPLIT,$0
TEXT runtime·syscall(SB),NOSPLIT,$8
MOV X10, X9 // pointer to args
- MOV (0*8)(X9), X5 // fn
+ MOV (0*8)(X9), X6 // fn
MOV (1*8)(X9), X10 // a1
MOV (2*8)(X9), X11 // a2
MOV (3*8)(X9), X12 // a3
MOV $0, X13 // vararg
- JALR X1, X5
+ JALR X1, X6
MOV X10, (4*8)(X9) // r1
MOV X11, (5*8)(X9) // r2
@@ -502,13 +502,13 @@ ok:
TEXT runtime·syscallX(SB),NOSPLIT,$8
MOV X10, X9 // pointer to args
- MOV (0*8)(X9), X5 // fn
+ MOV (0*8)(X9), X6 // fn
MOV (1*8)(X9), X10 // a1
MOV (2*8)(X9), X11 // a2
MOV (3*8)(X9), X12 // a3
MOV $0, X13 // vararg
- JALR X1, X5
+ JALR X1, X6
MOV X10, (4*8)(X9) // r1
MOV X11, (5*8)(X9) // r2
@@ -548,7 +548,7 @@ ok:
TEXT runtime·syscall6(SB),NOSPLIT,$8
MOV X10, X9 // pointer to args
- MOV (0*8)(X9), X5 // fn
+ MOV (0*8)(X9), X6 // fn
MOV (1*8)(X9), X10 // a1
MOV (2*8)(X9), X11 // a2
MOV (3*8)(X9), X12 // a3
@@ -557,7 +557,7 @@ TEXT runtime·syscall6(SB),NOSPLIT,$8
MOV (6*8)(X9), X15 // a6
MOV $0, X16 // vararg
- JALR X1, X5
+ JALR X1, X6
MOV X10, (7*8)(X9) // r1
MOV X11, (8*8)(X9) // r2
@@ -598,7 +598,7 @@ ok:
TEXT runtime·syscall6X(SB),NOSPLIT,$8
MOV X10, X9 // pointer to args
- MOV (0*8)(X9), X5 // fn
+ MOV (0*8)(X9), X6 // fn
MOV (1*8)(X9), X10 // a1
MOV (2*8)(X9), X11 // a2
MOV (3*8)(X9), X12 // a3
@@ -607,7 +607,7 @@ TEXT runtime·syscall6X(SB),NOSPLIT,$8
MOV (6*8)(X9), X15 // a6
MOV $0, X16 // vararg
- JALR X1, X5
+ JALR X1, X6
MOV X10, (7*8)(X9) // r1
MOV X11, (8*8)(X9) // r2
@@ -652,7 +652,7 @@ TEXT runtime·syscall10(SB),NOSPLIT,$0
ADD $-16, X2
- MOV (0*8)(X9), X5 // fn
+ MOV (0*8)(X9), X6 // fn
MOV (1*8)(X9), X10 // a1
MOV (2*8)(X9), X11 // a2
MOV (3*8)(X9), X12 // a3
@@ -662,7 +662,7 @@ TEXT runtime·syscall10(SB),NOSPLIT,$0
MOV (7*8)(X9), X16 // a7
MOV (8*8)(X9), X17 // a8
- JALR X1, X5
+ JALR X1, X6
MOV X10, (11*8)(X9) // r1
MOV X11, (12*8)(X9) // r2
@@ -712,7 +712,7 @@ TEXT runtime·syscall10X(SB),NOSPLIT,$0
ADD $-16, X2
- MOV (0*8)(X9), X5 // fn
+ MOV (0*8)(X9), X6 // fn
MOV (1*8)(X9), X10 // a1
MOV (2*8)(X9), X11 // a2
MOV (3*8)(X9), X12 // a3
@@ -722,7 +722,7 @@ TEXT runtime·syscall10X(SB),NOSPLIT,$0
MOV (7*8)(X9), X16 // a7
MOV (8*8)(X9), X17 // a8
- JALR X1, X5
+ JALR X1, X6
MOV X10, (11*8)(X9) // r1
MOV X11, (12*8)(X9) // r2