aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorqmuntal <quimmuntal@gmail.com>2026-03-02 08:46:46 +0100
committerQuim Muntal <quimmuntal@gmail.com>2026-03-18 13:33:58 -0700
commit0a56bf885884d07f6391afcbb122041f193eebb2 (patch)
tree346118bd43b4f85b0fffe995cf5a13c3430a71a1 /src/runtime
parenta481ef071e0b30b33b2857919957be151b2d2a6d (diff)
downloadgo-0a56bf885884d07f6391afcbb122041f193eebb2.tar.xz
runtime: make asmcgocall more robust to missing G
Being able to call asmcgocall without a G is useful for code shared between different stages of the runtime initialization and thread creation. Cq-Include-Trybots: luci.golang.try:gotip-darwin-arm64_15,gotip-linux-mips64le,gotip-linux-ppc64le_power10,gotip-linux-riscv64,gotip-openbsd-ppc64,gotip-openbsd-amd64 Change-Id: Ic427764de197e648e8b9987c98c3b7521512cc5c Reviewed-on: https://go-review.googlesource.com/c/go/+/750541 Reviewed-by: Carlos Amedee <carlos@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Cherry Mui <cherryyz@google.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/asm_386.s9
-rw-r--r--src/runtime/asm_amd64.s4
-rw-r--r--src/runtime/asm_arm.s4
-rw-r--r--src/runtime/asm_arm64.s4
-rw-r--r--src/runtime/asm_loong64.s15
-rw-r--r--src/runtime/asm_mips64x.s15
-rw-r--r--src/runtime/asm_ppc64x.s4
-rw-r--r--src/runtime/asm_riscv64.s15
-rw-r--r--src/runtime/asm_s390x.s20
-rw-r--r--src/runtime/sys_openbsd.go2
10 files changed, 74 insertions, 18 deletions
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index 48c77c8471..839d0ad147 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -665,6 +665,13 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-12
// We get called to create new OS threads too, and those
// come in on the m->g0 stack already. Or we might already
// be on the m->gsignal stack.
+#ifdef GOOS_windows
+ // On Windows, get_tls might return garbage if the thread
+ // has never called into Go, so check tls_g directly.
+ MOVL runtime·tls_g(SB), CX
+ CMPL CX, $0
+ JEQ nosave
+#endif
get_tls(CX)
MOVL g(CX), DI
CMPL DI, $0
@@ -741,7 +748,7 @@ loadg:
#ifdef GOOS_windows
MOVL $0, BP
CMPL CX, $0
- JEQ 2(PC) // TODO
+ JEQ needm
#endif
MOVL g(CX), BP
CMPL BP, $0
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index 39219babcc..09fd8e11d2 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -1015,10 +1015,6 @@ nosave:
// This code is like the above sequence but without saving/restoring g
// and without worrying about the stack moving out from under us
// (because we're on a system stack, not a goroutine stack).
- // The above code could be used directly if already on a system stack,
- // but then the only path through this code would be a rare case on Solaris.
- // Using this code for all "already on system stack" calls exercises it more,
- // which should help keep it correct.
MOVQ fn+0(FP), AX
MOVQ arg+8(FP), BX
MOVQ SP, DX
diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s
index 9373846c74..d9bf04f4c3 100644
--- a/src/runtime/asm_arm.s
+++ b/src/runtime/asm_arm.s
@@ -630,10 +630,6 @@ nosave:
// This code is like the above sequence but without saving/restoring g
// and without worrying about the stack moving out from under us
// (because we're on a system stack, not a goroutine stack).
- // The above code could be used directly if already on a system stack,
- // but then the only path through this code would be a rare case on Solaris.
- // Using this code for all "already on system stack" calls exercises it more,
- // which should help keep it correct.
SUB $24, R13
BIC $0x7, R13 // alignment for gcc ABI
// save null g in case someone looks during debugging.
diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s
index a9da26990e..02ecfc51a1 100644
--- a/src/runtime/asm_arm64.s
+++ b/src/runtime/asm_arm64.s
@@ -1244,10 +1244,6 @@ nosave:
// This code is like the above sequence but without saving/restoring g
// and without worrying about the stack moving out from under us
// (because we're on a system stack, not a goroutine stack).
- // The above code could be used directly if already on a system stack,
- // but then the only path through this code would be a rare case on Solaris.
- // Using this code for all "already on system stack" calls exercises it more,
- // which should help keep it correct.
MOVD fn+0(FP), R1
MOVD arg+8(FP), R0
MOVD RSP, R2
diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s
index a6a5519afb..88173679e6 100644
--- a/src/runtime/asm_loong64.s
+++ b/src/runtime/asm_loong64.s
@@ -539,6 +539,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
// Figure out if we need to switch to m->g0 stack.
// We get called to create new OS threads too, and those
// come in on the m->g0 stack already.
+ BEQ g, R0, nosave
MOVV g_m(g), R5
MOVV m_gsignal(R5), R6
BEQ R6, g, g0
@@ -571,6 +572,20 @@ g0:
MOVW R4, ret+16(FP)
RET
+nosave:
+ // Running on a system stack, perhaps even without a g.
+ // Having no g can happen during thread creation or thread teardown.
+ MOVV fn+0(FP), R25
+ MOVV arg+8(FP), R4
+ MOVV R3, R12
+ ADDV $-16, R3
+ MOVV R0, 0(R3) // Where above code stores g, in case someone looks during debugging.
+ MOVV R12, 8(R3) // Save original stack pointer.
+ JAL (R25)
+ MOVV 8(R3), R3 // Restore stack pointer.
+ MOVW R4, ret+16(FP)
+ RET
+
// func cgocallback(fn, frame unsafe.Pointer, ctxt uintptr)
// See cgocall.go for more details.
TEXT ·cgocallback(SB),NOSPLIT,$24-24
diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s
index 532eca752f..471bc01819 100644
--- a/src/runtime/asm_mips64x.s
+++ b/src/runtime/asm_mips64x.s
@@ -452,6 +452,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
// We get called to create new OS threads too, and those
// come in on the m->g0 stack already. Or we might already
// be on the m->gsignal stack.
+ BEQ g, R0, nosave
MOVV g_m(g), R5
MOVV m_gsignal(R5), R6
BEQ R6, g, g0
@@ -484,6 +485,20 @@ g0:
MOVW R2, ret+16(FP)
RET
+nosave:
+ // Running on a system stack, perhaps even without a g.
+ // Having no g can happen during thread creation or thread teardown.
+ MOVV fn+0(FP), R25
+ MOVV arg+8(FP), R4
+ MOVV R29, R3
+ ADDV $-16, R29
+ MOVV R0, 0(R29) // Where above code stores g, in case someone looks during debugging.
+ MOVV R3, 8(R29) // Save original stack pointer.
+ JAL (R25)
+ MOVV 8(R29), R29 // Restore stack pointer.
+ MOVW R2, ret+16(FP)
+ RET
+
// func cgocallback(fn, frame unsafe.Pointer, ctxt uintptr)
// See cgocall.go for more details.
TEXT ·cgocallback(SB),NOSPLIT,$24-24
diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s
index f61a8db854..a72974b555 100644
--- a/src/runtime/asm_ppc64x.s
+++ b/src/runtime/asm_ppc64x.s
@@ -782,10 +782,6 @@ nosave:
// This code is like the above sequence but without saving/restoring g
// and without worrying about the stack moving out from under us
// (because we're on a system stack, not a goroutine stack).
- // The above code could be used directly if already on a system stack,
- // but then the only path through this code would be a rare case.
- // Using this code for all "already on system stack" calls exercises it more,
- // which should help keep it correct.
SUB $(asmcgocallSaveOffset+8), R1, R10
RLDCR $0, R10, $~15, R1 // 16-byte alignment for gcc ABI
diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s
index b59998960d..cafef51eb9 100644
--- a/src/runtime/asm_riscv64.s
+++ b/src/runtime/asm_riscv64.s
@@ -398,6 +398,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
// We get called to create new OS threads too, and those
// come in on the m->g0 stack already. Or we might already
// be on the m->gsignal stack.
+ BEQZ g, nosave
MOV g_m(g), X6
MOV m_gsignal(X6), X7
BEQ X7, g, g0
@@ -431,6 +432,20 @@ g0:
MOVW X10, ret+16(FP)
RET
+nosave:
+ // Running on a system stack, perhaps even without a g.
+ // Having no g can happen during thread creation or thread teardown.
+ MOV fn+0(FP), X11
+ MOV arg+8(FP), X10
+ MOV X2, X8
+ SUB $16, X2
+ MOV ZERO, 0(X2) // Where above code stores g, in case someone looks during debugging.
+ MOV X8, 8(X2) // Save original stack pointer.
+ JALR RA, (X11)
+ MOV 8(X2), X2 // Restore stack pointer.
+ MOVW X10, ret+16(FP)
+ RET
+
// func asminit()
TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
RET
diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s
index 791ea80bc2..2870e32420 100644
--- a/src/runtime/asm_s390x.s
+++ b/src/runtime/asm_s390x.s
@@ -552,6 +552,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
// We get called to create new OS threads too, and those
// come in on the m->g0 stack already. Or we might already
// be on the m->gsignal stack.
+ CMPBEQ g, $0, nosave
MOVD g_m(g), R6
MOVD m_gsignal(R6), R7
CMPBEQ R7, g, g0
@@ -589,6 +590,25 @@ g0:
MOVW R2, ret+16(FP)
RET
+nosave:
+ // Running on a system stack, perhaps even without a g.
+ // Having no g can happen during thread creation or thread teardown.
+ MOVD fn+0(FP), R3
+ MOVD arg+8(FP), R4
+ MOVD R15, R2
+ SUB $176, R15
+ MOVD $~7, R6
+ AND R6, R15
+ MOVD $0, 168(R15) // Where above code stores g, in case someone looks during debugging.
+ MOVD R2, 160(R15) // Save original stack pointer.
+ MOVD $0, 0(R15) // clear back chain pointer
+ MOVD R4, R2 // arg in R2
+ BL R3
+ XOR R0, R0
+ MOVD 160(R15), R15 // Restore stack pointer.
+ MOVW R2, ret+16(FP)
+ RET
+
// cgocallback(fn, frame unsafe.Pointer, ctxt uintptr)
// See cgocall.go for more details.
TEXT ·cgocallback(SB),NOSPLIT,$24-24
diff --git a/src/runtime/sys_openbsd.go b/src/runtime/sys_openbsd.go
index df503d24c6..16ecc92753 100644
--- a/src/runtime/sys_openbsd.go
+++ b/src/runtime/sys_openbsd.go
@@ -85,7 +85,7 @@ func sched_yield_trampoline()
//go:nosplit
func osyield_no_g() {
- asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil))
+ asmcgocall(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil))
}
// This is exported via linkname to assembly in runtime/cgo.