aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorCherry Mui <cherryyz@google.com>2025-10-03 10:11:20 -0400
committerCherry Mui <cherryyz@google.com>2025-10-03 10:11:21 -0400
commitfb1749a3fe6ac40c56127d8fcea2e8b13db820e8 (patch)
tree72dd38fc84286bf88311850a99e6becf2cbd99bf /src/runtime
parent703a5fbaad81f1285776bf6f2900506d3c751ea1 (diff)
parentadce7f196e6ac6d22e9bc851efea5f3ab650947c (diff)
downloadgo-fb1749a3fe6ac40c56127d8fcea2e8b13db820e8.tar.xz
[dev.simd] all: merge master (adce7f1) into dev.simd
Conflicts: - src/internal/goexperiment/flags.go - src/runtime/export_test.go Merge List: + 2025-10-03 adce7f196e cmd/link: support .def file with MSVC clang toolchain + 2025-10-03 d5b950399d cmd/cgo: fix unaligned arguments typedmemmove crash on iOS + 2025-10-02 53845004d6 net/http/httputil: deprecate ReverseProxy.Director + 2025-10-02 bbdff9e8e1 net/http: update bundled x/net/http2 and delete obsolete http2inTests + 2025-10-02 4008e07080 io/fs: move path name documentation up to the package doc comment + 2025-10-02 0e4e2e6832 runtime: skip TestGoroutineLeakProfile under mayMoreStackPreempt + 2025-10-02 f03c392295 runtime: fix aix/ppc64 library initialization + 2025-10-02 707454b41f cmd/go: update `go help mod edit` with the tool and ignore sections + 2025-10-02 8c68a1c1ab runtime,net/http/pprof: goroutine leak detection by using the garbage collector + 2025-10-02 84db201ae1 cmd/compile: propagate len([]T{}) to make builtin to allow stack allocation + 2025-10-02 5799c139a7 crypto/tls: rm marshalEncryptedClientHelloConfigList dead code + 2025-10-01 633dd1d475 encoding/json: fix Decoder.InputOffset regression in goexperiment.jsonv2 + 2025-10-01 8ad27fb656 doc/go_spec.html: update date + 2025-10-01 3f451f2c54 testing/synctest: fix inverted test failure message in TestContextAfterFunc + 2025-10-01 be0fed8a5f cmd/go/testdata/script/test_fuzz_fuzztime.txt: disable + 2025-09-30 eb1c7f6e69 runtime: move loong64 library entry point to os-agnostic file + 2025-09-30 c9257151e5 runtime: unify ppc64/ppc64le library entry point + 2025-09-30 4ff8a457db test/codegen: codify handling of floating point constants on arm64 + 2025-09-30 fcb893fc4b cmd/compile/internal/ssa: remove redundant "type:" prefix check + 2025-09-30 19cc1022ba mime: reduce allocs incurred by ParseMediaType + 2025-09-30 08afc50bea mime: extend "builtinTypes" to include a more complete list of common types + 2025-09-30 97da068774 cmd/compile: eliminate nil checks on .dict arg + 2025-09-30 300d9d2714 runtime: initialise debug settings much earlier in startup process + 2025-09-30 a846bb0aa5 errors: add AsType + 2025-09-30 7c8166d02d cmd/link/internal/arm64: support Mach-O ARM64_RELOC_SUBTRACTOR in internal linking + 2025-09-30 6e95748335 cmd/link/internal/arm64: support Mach-O ARM64_RELOC_POINTER_TO_GOT in internal linking + 2025-09-30 742f92063e cmd/compile, runtime: always enable Wasm signext and satconv features + 2025-09-30 db10db6be3 internal/poll: remove operation fields from FD + 2025-09-29 75c87df58e internal/poll: pass the I/O mode instead of an overlapped object in execIO + 2025-09-29 fc88e18b4a crypto/internal/fips140/entropy: add CPU jitter-based entropy source + 2025-09-29 db4fade759 crypto/internal/fips140/mlkem: make CAST conditional + 2025-09-29 db3cb3fd9a runtime: correct reference to getStackMap in comment + 2025-09-29 690fc2fb05 internal/poll: remove buf field from operation + 2025-09-29 eaf2345256 cmd/link: use a .def file to mark exported symbols on Windows + 2025-09-29 4b77733565 internal/syscall/windows: regenerate GetFileSizeEx + 2025-09-29 4e9006a716 crypto/tls: quote protocols in ALPN error message + 2025-09-29 047c2ab841 cmd/link: don't pass -Wl,-S on Solaris + 2025-09-29 ae8eba071b cmd/link: use correct length for pcln.cutab + 2025-09-29 fe3ba74b9e cmd/link: skip TestFlagW on platforms without DWARF symbol table + 2025-09-29 d42d56b764 encoding/xml: make use of reflect.TypeAssert + 2025-09-29 6d51f93257 runtime: jump instead of branch in netbsd/arm64 entry point + 2025-09-28 5500cbf0e4 debug/elf: prevent offset overflow + 2025-09-27 34e67623a8 all: fix typos + 2025-09-27 af6999e60d cmd/compile: implement jump table on loong64 + 2025-09-26 63cd912083 os/user: simplify go:build + 2025-09-26 53009b26dd runtime: use a smaller arena size on Wasm + 2025-09-26 3a5df9d2b2 net/http: add HTTP2Config.StrictMaxConcurrentRequests + 2025-09-26 16be34df02 net/http: add more tests of transport connection pool + 2025-09-26 3e4540b49d os/user: use getgrouplist on illumos && cgo + 2025-09-26 15fbe3480b internal/poll: simplify WriteMsg and ReadMsg on Windows + 2025-09-26 16ae11a9e1 runtime: move TestReadMetricsSched to testprog + 2025-09-26 459f3a3adc cmd/link: don't pass -Wl,-S on AIX + 2025-09-26 4631a2d3c6 cmd/link: skip TestFlagW on AIX + 2025-09-26 0f31d742cd cmd/compile: fix ICE with new(<untyped expr>) + 2025-09-26 7d7cd6e07b internal/poll: don't call SetFilePointerEx in Seek for overlapped handles + 2025-09-26 41cba31e66 mime/multipart: percent-encode CR and LF in header values to avoid CRLF injection + 2025-09-26 dd1d597c3a Revert "cmd/internal/obj/loong64: use the MOVVP instruction to optimize prologue" + 2025-09-26 45d6bc76af runtime: unify arm64 entry point code + 2025-09-25 fdea7da3e6 runtime: use common library entry point on windows amd64/386 + 2025-09-25 e8a4f508d1 lib/fips140: re-seal v1.0.0 + 2025-09-25 9b7a328089 crypto/internal/fips140: remove key import PCTs, make keygen PCTs fatal + 2025-09-25 7f9ab7203f crypto/internal/fips140: update frozen module version to "v1.0.0" + 2025-09-25 fb5719cbda crypto/internal/fips140/ecdsa: make TestingOnlyNewDRBG generic + 2025-09-25 56067e31f2 std: remove unused declarations Change-Id: Iecb28fd62c69fbed59da557f46d31bae55889e2c
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/asm_amd64.s18
-rw-r--r--src/runtime/asm_arm64.s72
-rw-r--r--src/runtime/asm_loong64.s51
-rw-r--r--src/runtime/asm_ppc64x.s60
-rw-r--r--src/runtime/cgo/gcc_libinit_windows.c9
-rw-r--r--src/runtime/cgo/windows.go22
-rw-r--r--src/runtime/chan.go32
-rw-r--r--src/runtime/conv_wasm_test.go45
-rw-r--r--src/runtime/crash_test.go16
-rw-r--r--src/runtime/decoratemappings_test.go72
-rw-r--r--src/runtime/export_test.go4
-rw-r--r--src/runtime/goroutineleakprofile_test.go603
-rw-r--r--src/runtime/malloc.go21
-rw-r--r--src/runtime/mbitmap.go22
-rw-r--r--src/runtime/metrics_test.go212
-rw-r--r--src/runtime/mgc.go306
-rw-r--r--src/runtime/mgcmark.go107
-rw-r--r--src/runtime/mgcmark_greenteagc.go2
-rw-r--r--src/runtime/mgcmark_nogreenteagc.go2
-rw-r--r--src/runtime/mgcscavenge_test.go143
-rw-r--r--src/runtime/mheap.go2
-rw-r--r--src/runtime/mpagealloc.go8
-rw-r--r--src/runtime/mpagealloc_test.go217
-rw-r--r--src/runtime/mpagecache_test.go103
-rw-r--r--src/runtime/mpallocbits_test.go189
-rw-r--r--src/runtime/mprof.go56
-rw-r--r--src/runtime/os_windows.go9
-rw-r--r--src/runtime/pprof/pprof.go45
-rw-r--r--src/runtime/pprof/runtime.go6
-rw-r--r--src/runtime/preempt.go3
-rw-r--r--src/runtime/proc.go40
-rw-r--r--src/runtime/rt0_aix_ppc64.s153
-rw-r--r--src/runtime/rt0_android_arm64.s10
-rw-r--r--src/runtime/rt0_darwin_arm64.s60
-rw-r--r--src/runtime/rt0_freebsd_arm64.s68
-rw-r--r--src/runtime/rt0_ios_arm64.s2
-rw-r--r--src/runtime/rt0_linux_arm64.s68
-rw-r--r--src/runtime/rt0_linux_loong64.s50
-rw-r--r--src/runtime/rt0_linux_ppc64le.s49
-rw-r--r--src/runtime/rt0_netbsd_arm64.s64
-rw-r--r--src/runtime/rt0_openbsd_arm64.s72
-rw-r--r--src/runtime/rt0_windows_386.s29
-rw-r--r--src/runtime/rt0_windows_amd64.s29
-rw-r--r--src/runtime/rt0_windows_arm64.s25
-rw-r--r--src/runtime/runtime-gdb_test.go2
-rw-r--r--src/runtime/runtime1.go15
-rw-r--r--src/runtime/runtime2.go121
-rw-r--r--src/runtime/select.go12
-rw-r--r--src/runtime/sema.go27
-rw-r--r--src/runtime/set_vma_name_linux.go6
-rw-r--r--src/runtime/set_vma_name_stub.go2
-rw-r--r--src/runtime/sizeof_test.go2
-rw-r--r--src/runtime/stack.go17
-rw-r--r--src/runtime/stack_test.go2
-rw-r--r--src/runtime/string_test.go2
-rw-r--r--src/runtime/sys_wasm.s58
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go277
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE21
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/README.md1847
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go145
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go115
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go98
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go82
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go66
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go167
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go108
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go60
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go125
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go78
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go92
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go124
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go135
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go122
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go62
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go87
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go183
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go77
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go72
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go126
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go100
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go81
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go98
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go163
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go87
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go111
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go105
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go84
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go123
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go65
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go74
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go105
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go81
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go317
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go129
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go144
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go154
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go95
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go118
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go166
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go100
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go72
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go87
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go223
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go83
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go68
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go108
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go120
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go86
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go97
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/main.go39
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go68
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go146
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go59
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go242
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go125
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go67
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go71
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go53
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go101
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go55
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go122
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go87
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go103
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/main.go39
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/simple.go253
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/stresstests.go89
-rw-r--r--src/runtime/testdata/testprog/pipe_unix.go (renamed from src/runtime/pipe_unix_test.go)2
-rw-r--r--src/runtime/testdata/testprog/pipe_windows.go (renamed from src/runtime/pipe_windows_test.go)2
-rw-r--r--src/runtime/testdata/testprog/schedmetrics.go267
-rw-r--r--src/runtime/traceback.go8
-rw-r--r--src/runtime/tracestatus.go2
131 files changed, 11798 insertions, 1357 deletions
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index f8ebd030b6..1cd1b0afc5 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -37,7 +37,12 @@ TEXT _rt0_amd64_lib(SB),NOSPLIT|NOFRAME,$0
MOVQ SI, _rt0_amd64_lib_argv<>(SB)
// Synchronous initialization.
+#ifndef GOOS_windows
+ // Avoid calling it on Windows because it is not used
+ // and it would crash the application due to the autogenerated
+ // ABI wrapper trying to access a non-existent TLS slot.
CALL runtime·libpreinit(SB)
+#endif
// Create a new thread to finish Go runtime initialization.
MOVQ _cgo_sys_thread_create(SB), AX
@@ -45,12 +50,23 @@ TEXT _rt0_amd64_lib(SB),NOSPLIT|NOFRAME,$0
JZ nocgo
// We're calling back to C.
- // Align stack per ELF ABI requirements.
+ // Align stack per C ABI requirements.
MOVQ SP, BX // Callee-save in C ABI
ANDQ $~15, SP
MOVQ $_rt0_amd64_lib_go(SB), DI
MOVQ $0, SI
+#ifdef GOOS_windows
+ // For Windows ABI
+ MOVQ DI, CX
+ MOVQ SI, DX
+ // Leave space for four words on the stack as required
+ // by the Windows amd64 calling convention.
+ ADJSP $32
+#endif
CALL AX
+#ifdef GOOS_windows
+ ADJSP $-32 // just to make the assembler not complain about unbalanced stack
+#endif
MOVQ BX, SP
JMP restore
diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s
index a0072a3931..a0e82ec830 100644
--- a/src/runtime/asm_arm64.s
+++ b/src/runtime/asm_arm64.s
@@ -7,6 +7,78 @@
#include "tls_arm64.h"
#include "funcdata.h"
#include "textflag.h"
+#include "cgo/abi_arm64.h"
+
+// _rt0_arm64 is common startup code for most arm64 systems when using
+// internal linking. This is the entry point for the program from the
+// kernel for an ordinary -buildmode=exe program. The stack holds the
+// number of arguments and the C-style argv.
+TEXT _rt0_arm64(SB),NOSPLIT,$0
+ MOVD 0(RSP), R0 // argc
+ ADD $8, RSP, R1 // argv
+ JMP runtime·rt0_go(SB)
+
+// main is common startup code for most amd64 systems when using
+// external linking. The C startup code will call the symbol "main"
+// passing argc and argv in the usual C ABI registers R0 and R1.
+TEXT main(SB),NOSPLIT,$0
+ JMP runtime·rt0_go(SB)
+
+// _rt0_arm64_lib is common startup code for most arm64 systems when
+// using -buildmode=c-archive or -buildmode=c-shared. The linker will
+// arrange to invoke this function as a global constructor (for
+// c-archive) or when the shared library is loaded (for c-shared).
+// We expect argc and argv to be passed in the usual C ABI registers
+// R0 and R1.
+TEXT _rt0_arm64_lib(SB),NOSPLIT,$184
+ // Preserve callee-save registers.
+ SAVE_R19_TO_R28(24)
+ SAVE_F8_TO_F15(104)
+
+ // Initialize g as null in case of using g later e.g. sigaction in cgo_sigaction.go
+ MOVD ZR, g
+
+ MOVD R0, _rt0_arm64_lib_argc<>(SB)
+ MOVD R1, _rt0_arm64_lib_argv<>(SB)
+
+ // Synchronous initialization.
+ MOVD $runtime·libpreinit(SB), R4
+ BL (R4)
+
+ // Create a new thread to do the runtime initialization and return.
+ MOVD _cgo_sys_thread_create(SB), R4
+ CBZ R4, nocgo
+ MOVD $_rt0_arm64_lib_go(SB), R0
+ MOVD $0, R1
+ SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved.
+ BL (R4)
+ ADD $16, RSP
+ B restore
+
+nocgo:
+ MOVD $0x800000, R0 // stacksize = 8192KB
+ MOVD $_rt0_arm64_lib_go(SB), R1
+ MOVD R0, 8(RSP)
+ MOVD R1, 16(RSP)
+ MOVD $runtime·newosproc0(SB),R4
+ BL (R4)
+
+restore:
+ // Restore callee-save registers.
+ RESTORE_R19_TO_R28(24)
+ RESTORE_F8_TO_F15(104)
+ RET
+
+TEXT _rt0_arm64_lib_go(SB),NOSPLIT,$0
+ MOVD _rt0_arm64_lib_argc<>(SB), R0
+ MOVD _rt0_arm64_lib_argv<>(SB), R1
+ MOVD $runtime·rt0_go(SB),R4
+ B (R4)
+
+DATA _rt0_arm64_lib_argc<>(SB)/8, $0
+GLOBL _rt0_arm64_lib_argc<>(SB),NOPTR, $8
+DATA _rt0_arm64_lib_argv<>(SB)/8, $0
+GLOBL _rt0_arm64_lib_argv<>(SB),NOPTR, $8
#ifdef GOARM64_LSE
DATA no_lse_msg<>+0x00(SB)/64, $"This program can only run on ARM64 processors with LSE support.\n"
diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s
index ee7f825e1f..586bf89a5d 100644
--- a/src/runtime/asm_loong64.s
+++ b/src/runtime/asm_loong64.s
@@ -6,6 +6,57 @@
#include "go_tls.h"
#include "funcdata.h"
#include "textflag.h"
+#include "cgo/abi_loong64.h"
+
+// When building with -buildmode=c-shared, this symbol is called when the shared
+// library is loaded.
+TEXT _rt0_loong64_lib(SB),NOSPLIT,$168
+ // Preserve callee-save registers.
+ SAVE_R22_TO_R31(3*8)
+ SAVE_F24_TO_F31(13*8)
+
+ // Initialize g as nil in case of using g later e.g. sigaction in cgo_sigaction.go
+ MOVV R0, g
+
+ MOVV R4, _rt0_loong64_lib_argc<>(SB)
+ MOVV R5, _rt0_loong64_lib_argv<>(SB)
+
+ // Synchronous initialization.
+ MOVV $runtime·libpreinit(SB), R19
+ JAL (R19)
+
+ // Create a new thread to do the runtime initialization and return.
+ MOVV _cgo_sys_thread_create(SB), R19
+ BEQ R19, nocgo
+ MOVV $_rt0_loong64_lib_go(SB), R4
+ MOVV $0, R5
+ JAL (R19)
+ JMP restore
+
+nocgo:
+ MOVV $0x800000, R4 // stacksize = 8192KB
+ MOVV $_rt0_loong64_lib_go(SB), R5
+ MOVV R4, 8(R3)
+ MOVV R5, 16(R3)
+ MOVV $runtime·newosproc0(SB), R19
+ JAL (R19)
+
+restore:
+ // Restore callee-save registers.
+ RESTORE_R22_TO_R31(3*8)
+ RESTORE_F24_TO_F31(13*8)
+ RET
+
+TEXT _rt0_loong64_lib_go(SB),NOSPLIT,$0
+ MOVV _rt0_loong64_lib_argc<>(SB), R4
+ MOVV _rt0_loong64_lib_argv<>(SB), R5
+ MOVV $runtime·rt0_go(SB),R19
+ JMP (R19)
+
+DATA _rt0_loong64_lib_argc<>(SB)/8, $0
+GLOBL _rt0_loong64_lib_argc<>(SB),NOPTR, $8
+DATA _rt0_loong64_lib_argv<>(SB)/8, $0
+GLOBL _rt0_loong64_lib_argv<>(SB),NOPTR, $8
#define REGCTXT R29
diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s
index fc70fa8204..b42e0b62f8 100644
--- a/src/runtime/asm_ppc64x.s
+++ b/src/runtime/asm_ppc64x.s
@@ -9,6 +9,66 @@
#include "funcdata.h"
#include "textflag.h"
#include "asm_ppc64x.h"
+#include "cgo/abi_ppc64x.h"
+
+
+TEXT _rt0_ppc64x_lib(SB),NOSPLIT|NOFRAME,$0
+ // This is called with ELFv2 calling conventions. Convert to Go.
+ // Allocate argument storage for call to newosproc0.
+ STACK_AND_SAVE_HOST_TO_GO_ABI(16)
+
+ MOVD R3, _rt0_ppc64x_lib_argc<>(SB)
+ MOVD R4, _rt0_ppc64x_lib_argv<>(SB)
+
+ // Synchronous initialization.
+ MOVD $runtime·reginit(SB), R12
+ MOVD R12, CTR
+ BL (CTR)
+ MOVD $runtime·libpreinit(SB), R12
+ MOVD R12, CTR
+ BL (CTR)
+
+ // Create a new thread to do the runtime initialization and return.
+ MOVD _cgo_sys_thread_create(SB), R12
+ CMP $0, R12
+ BEQ nocgo
+ MOVD $_rt0_ppc64x_lib_go(SB), R3
+ MOVD $0, R4
+ MOVD R12, CTR
+ BL (CTR)
+ BR done
+
+nocgo:
+ MOVD $0x800000, R12 // stacksize = 8192KB
+ MOVD R12, 8+FIXED_FRAME(R1)
+ MOVD $_rt0_ppc64x_lib_go(SB), R12
+ MOVD R12, 16+FIXED_FRAME(R1)
+ MOVD $runtime·newosproc0(SB),R12
+ MOVD R12, CTR
+ BL (CTR)
+
+done:
+ // Restore and return to ELFv2 caller.
+ UNSTACK_AND_RESTORE_GO_TO_HOST_ABI(16)
+ RET
+
+#ifdef GO_PPC64X_HAS_FUNCDESC
+DEFINE_PPC64X_FUNCDESC(_rt0_ppc64x_lib_go, __rt0_ppc64x_lib_go)
+TEXT __rt0_ppc64x_lib_go(SB),NOSPLIT,$0
+#else
+TEXT _rt0_ppc64x_lib_go(SB),NOSPLIT,$0
+#endif
+ MOVD _rt0_ppc64x_lib_argc<>(SB), R3
+ MOVD _rt0_ppc64x_lib_argv<>(SB), R4
+ MOVD $runtime·rt0_go(SB), R12
+ MOVD R12, CTR
+ BR (CTR)
+
+DATA _rt0_ppc64x_lib_argc<>(SB)/8, $0
+GLOBL _rt0_ppc64x_lib_argc<>(SB),NOPTR, $8
+DATA _rt0_ppc64x_lib_argv<>(SB)/8, $0
+GLOBL _rt0_ppc64x_lib_argv<>(SB),NOPTR, $8
+
#ifdef GOOS_aix
#define cgoCalleeStackSize 48
diff --git a/src/runtime/cgo/gcc_libinit_windows.c b/src/runtime/cgo/gcc_libinit_windows.c
index 926f916843..7e7ff3e667 100644
--- a/src/runtime/cgo/gcc_libinit_windows.c
+++ b/src/runtime/cgo/gcc_libinit_windows.c
@@ -15,15 +15,6 @@
#include "libcgo.h"
#include "libcgo_windows.h"
-// Ensure there's one symbol marked __declspec(dllexport).
-// If there are no exported symbols, the unfortunate behavior of
-// the binutils linker is to also strip the relocations table,
-// resulting in non-PIE binary. The other option is the
-// --export-all-symbols flag, but we don't need to export all symbols
-// and this may overflow the export table (#40795).
-// See https://sourceware.org/bugzilla/show_bug.cgi?id=19011
-__declspec(dllexport) int _cgo_dummy_export;
-
static volatile LONG runtime_init_once_gate = 0;
static volatile LONG runtime_init_once_done = 0;
diff --git a/src/runtime/cgo/windows.go b/src/runtime/cgo/windows.go
new file mode 100644
index 0000000000..7ba61753df
--- /dev/null
+++ b/src/runtime/cgo/windows.go
@@ -0,0 +1,22 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package cgo
+
+import _ "unsafe" // for go:linkname
+
+// _cgo_stub_export is only used to ensure there's at least one symbol
+// in the .def file passed to the external linker.
+// If there are no exported symbols, the unfortunate behavior of
+// the binutils linker is to also strip the relocations table,
+// resulting in non-PIE binary. The other option is the
+// --export-all-symbols flag, but we don't need to export all symbols
+// and this may overflow the export table (#40795).
+// See https://sourceware.org/bugzilla/show_bug.cgi?id=19011
+//
+//go:cgo_export_static _cgo_stub_export
+//go:linkname _cgo_stub_export _cgo_stub_export
+var _cgo_stub_export uintptr
diff --git a/src/runtime/chan.go b/src/runtime/chan.go
index 639d29dc83..3320c248b4 100644
--- a/src/runtime/chan.go
+++ b/src/runtime/chan.go
@@ -263,11 +263,11 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
}
// No stack splits between assigning elem and enqueuing mysg
// on gp.waiting where copystack can find it.
- mysg.elem = ep
+ mysg.elem.set(ep)
mysg.waitlink = nil
mysg.g = gp
mysg.isSelect = false
- mysg.c = c
+ mysg.c.set(c)
gp.waiting = mysg
gp.param = nil
c.sendq.enqueue(mysg)
@@ -298,7 +298,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
if mysg.releasetime > 0 {
blockevent(mysg.releasetime-t0, 2)
}
- mysg.c = nil
+ mysg.c.set(nil)
releaseSudog(mysg)
if closed {
if c.closed == 0 {
@@ -336,9 +336,9 @@ func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
}
}
- if sg.elem != nil {
+ if sg.elem.get() != nil {
sendDirect(c.elemtype, sg, ep)
- sg.elem = nil
+ sg.elem.set(nil)
}
gp := sg.g
unlockf()
@@ -395,7 +395,7 @@ func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
// Once we read sg.elem out of sg, it will no longer
// be updated if the destination's stack gets copied (shrunk).
// So make sure that no preemption points can happen between read & use.
- dst := sg.elem
+ dst := sg.elem.get()
typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_)
// No need for cgo write barrier checks because dst is always
// Go memory.
@@ -406,7 +406,7 @@ func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) {
// dst is on our stack or the heap, src is on another stack.
// The channel is locked, so src will not move during this
// operation.
- src := sg.elem
+ src := sg.elem.get()
typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_)
memmove(dst, src, t.Size_)
}
@@ -441,9 +441,9 @@ func closechan(c *hchan) {
if sg == nil {
break
}
- if sg.elem != nil {
- typedmemclr(c.elemtype, sg.elem)
- sg.elem = nil
+ if sg.elem.get() != nil {
+ typedmemclr(c.elemtype, sg.elem.get())
+ sg.elem.set(nil)
}
if sg.releasetime != 0 {
sg.releasetime = cputicks()
@@ -463,7 +463,7 @@ func closechan(c *hchan) {
if sg == nil {
break
}
- sg.elem = nil
+ sg.elem.set(nil)
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
@@ -642,13 +642,13 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
}
// No stack splits between assigning elem and enqueuing mysg
// on gp.waiting where copystack can find it.
- mysg.elem = ep
+ mysg.elem.set(ep)
mysg.waitlink = nil
gp.waiting = mysg
mysg.g = gp
mysg.isSelect = false
- mysg.c = c
+ mysg.c.set(c)
gp.param = nil
c.recvq.enqueue(mysg)
if c.timer != nil {
@@ -680,7 +680,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
}
success := mysg.success
gp.param = nil
- mysg.c = nil
+ mysg.c.set(nil)
releaseSudog(mysg)
return true, success
}
@@ -727,14 +727,14 @@ func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
typedmemmove(c.elemtype, ep, qp)
}
// copy data from sender to queue
- typedmemmove(c.elemtype, qp, sg.elem)
+ typedmemmove(c.elemtype, qp, sg.elem.get())
c.recvx++
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
}
- sg.elem = nil
+ sg.elem.set(nil)
gp := sg.g
unlockf()
gp.param = unsafe.Pointer(sg)
diff --git a/src/runtime/conv_wasm_test.go b/src/runtime/conv_wasm_test.go
index 5054fca04d..3979a7b618 100644
--- a/src/runtime/conv_wasm_test.go
+++ b/src/runtime/conv_wasm_test.go
@@ -11,6 +11,8 @@ import (
var res int64
var ures uint64
+// TODO: This test probably should be in a different place.
+
func TestFloatTruncation(t *testing.T) {
testdata := []struct {
input float64
@@ -21,36 +23,37 @@ func TestFloatTruncation(t *testing.T) {
// max +- 1
{
input: 0x7fffffffffffffff,
- convInt64: -0x8000000000000000,
+ convInt64: 0x7fffffffffffffff,
convUInt64: 0x8000000000000000,
},
// For out-of-bounds conversion, the result is implementation-dependent.
- // This test verifies the implementation of wasm architecture.
+ // This test verifies the implementation of wasm architecture, which is,
+ // saturating to the min/max value.
{
input: 0x8000000000000000,
- convInt64: -0x8000000000000000,
+ convInt64: 0x7fffffffffffffff,
convUInt64: 0x8000000000000000,
},
{
input: 0x7ffffffffffffffe,
- convInt64: -0x8000000000000000,
+ convInt64: 0x7fffffffffffffff,
convUInt64: 0x8000000000000000,
},
// neg max +- 1
{
input: -0x8000000000000000,
convInt64: -0x8000000000000000,
- convUInt64: 0x8000000000000000,
+ convUInt64: 0,
},
{
input: -0x8000000000000001,
convInt64: -0x8000000000000000,
- convUInt64: 0x8000000000000000,
+ convUInt64: 0,
},
{
input: -0x7fffffffffffffff,
convInt64: -0x8000000000000000,
- convUInt64: 0x8000000000000000,
+ convUInt64: 0,
},
// trunc point +- 1
{
@@ -60,7 +63,7 @@ func TestFloatTruncation(t *testing.T) {
},
{
input: 0x7ffffffffffffe00,
- convInt64: -0x8000000000000000,
+ convInt64: 0x7fffffffffffffff,
convUInt64: 0x8000000000000000,
},
{
@@ -72,48 +75,48 @@ func TestFloatTruncation(t *testing.T) {
{
input: -0x7ffffffffffffdff,
convInt64: -0x7ffffffffffffc00,
- convUInt64: 0x8000000000000000,
+ convUInt64: 0,
},
{
input: -0x7ffffffffffffe00,
convInt64: -0x8000000000000000,
- convUInt64: 0x8000000000000000,
+ convUInt64: 0,
},
{
input: -0x7ffffffffffffdfe,
convInt64: -0x7ffffffffffffc00,
- convUInt64: 0x8000000000000000,
+ convUInt64: 0,
},
// umax +- 1
{
input: 0xffffffffffffffff,
- convInt64: -0x8000000000000000,
- convUInt64: 0x8000000000000000,
+ convInt64: 0x7fffffffffffffff,
+ convUInt64: 0xffffffffffffffff,
},
{
input: 0x10000000000000000,
- convInt64: -0x8000000000000000,
- convUInt64: 0x8000000000000000,
+ convInt64: 0x7fffffffffffffff,
+ convUInt64: 0xffffffffffffffff,
},
{
input: 0xfffffffffffffffe,
- convInt64: -0x8000000000000000,
- convUInt64: 0x8000000000000000,
+ convInt64: 0x7fffffffffffffff,
+ convUInt64: 0xffffffffffffffff,
},
// umax trunc +- 1
{
input: 0xfffffffffffffbff,
- convInt64: -0x8000000000000000,
+ convInt64: 0x7fffffffffffffff,
convUInt64: 0xfffffffffffff800,
},
{
input: 0xfffffffffffffc00,
- convInt64: -0x8000000000000000,
- convUInt64: 0x8000000000000000,
+ convInt64: 0x7fffffffffffffff,
+ convUInt64: 0xffffffffffffffff,
},
{
input: 0xfffffffffffffbfe,
- convInt64: -0x8000000000000000,
+ convInt64: 0x7fffffffffffffff,
convUInt64: 0xfffffffffffff800,
},
}
diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go
index 2db86e0562..2b8ca549ad 100644
--- a/src/runtime/crash_test.go
+++ b/src/runtime/crash_test.go
@@ -186,6 +186,22 @@ func buildTestProg(t *testing.T, binary string, flags ...string) (string, error)
t.Logf("running %v", cmd)
cmd.Dir = "testdata/" + binary
cmd = testenv.CleanCmdEnv(cmd)
+
+ // If tests need any experimental flags, add them here.
+ //
+ // TODO(vsaioc): Remove `goroutineleakprofile` once the feature is no longer experimental.
+ edited := false
+ for i := range cmd.Env {
+ e := cmd.Env[i]
+ if _, vars, ok := strings.Cut(e, "GOEXPERIMENT="); ok {
+ cmd.Env[i] = "GOEXPERIMENT=" + vars + ",goroutineleakprofile"
+ edited, _ = true, vars
+ }
+ }
+ if !edited {
+ cmd.Env = append(cmd.Env, "GOEXPERIMENT=goroutineleakprofile")
+ }
+
out, err := cmd.CombinedOutput()
if err != nil {
target.err = fmt.Errorf("building %s %v: %v\n%s", binary, flags, err, out)
diff --git a/src/runtime/decoratemappings_test.go b/src/runtime/decoratemappings_test.go
new file mode 100644
index 0000000000..7d1121c125
--- /dev/null
+++ b/src/runtime/decoratemappings_test.go
@@ -0,0 +1,72 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "os"
+ "regexp"
+ "runtime"
+ "testing"
+)
+
+func validateMapLabels(t *testing.T, labels []string) {
+ // These are the specific region labels that need get added during the
+ // runtime phase. Hence they are the ones that need to be confirmed as
+ // present at the time the test reads its own region labels, which
+ // is sufficient to validate that the default `decoratemappings` value
+ // (enabled) was set early enough in the init process.
+ regions := map[string]bool{
+ "allspans array": false,
+ "gc bits": false,
+ "heap": false,
+ "heap index": false,
+ "heap reservation": false,
+ "immortal metadata": false,
+ "page alloc": false,
+ "page alloc index": false,
+ "page summary": false,
+ "scavenge index": false,
+ }
+ for _, label := range labels {
+ if _, ok := regions[label]; !ok {
+ t.Logf("unexpected region label found: \"%s\"", label)
+ }
+ regions[label] = true
+ }
+ for label, found := range regions {
+ if !found {
+ t.Logf("region label missing: \"%s\"", label)
+ }
+ }
+}
+
+func TestDecorateMappings(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skip("decoratemappings is only supported on Linux")
+ // /proc/self/maps is also Linux-specific
+ }
+
+ var labels []string
+ if rawMaps, err := os.ReadFile("/proc/self/maps"); err != nil {
+ t.Fatalf("failed to read /proc/self/maps: %v", err)
+ } else {
+ t.Logf("maps:%s\n", string(rawMaps))
+ matches := regexp.MustCompile("[^[]+ \\[anon: Go: (.+)\\]\n").FindAllSubmatch(rawMaps, -1)
+ for _, match_pair := range matches {
+ // match_pair consists of the matching substring and the parenthesized group
+ labels = append(labels, string(match_pair[1]))
+ }
+ }
+ t.Logf("DebugDecorateMappings: %v", *runtime.DebugDecorateMappings)
+ if *runtime.DebugDecorateMappings != 0 && runtime.SetVMANameSupported() {
+ validateMapLabels(t, labels)
+ } else {
+ if len(labels) > 0 {
+ t.Errorf("unexpected mapping labels present: %v", labels)
+ } else {
+ t.Skip("mapping labels absent as expected")
+ }
+ }
+}
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index da3b620d49..ae22263172 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -1938,3 +1938,7 @@ func TraceStack(gp *G, tab *TraceStackTable) {
}
var X86HasAVX = &x86HasAVX
+
+var DebugDecorateMappings = &debug.decoratemappings
+
+func SetVMANameSupported() bool { return setVMANameSupported() }
diff --git a/src/runtime/goroutineleakprofile_test.go b/src/runtime/goroutineleakprofile_test.go
new file mode 100644
index 0000000000..6e26bcab13
--- /dev/null
+++ b/src/runtime/goroutineleakprofile_test.go
@@ -0,0 +1,603 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "fmt"
+ "internal/testenv"
+ "os"
+ "regexp"
+ "strings"
+ "testing"
+)
+
+func TestGoroutineLeakProfile(t *testing.T) {
+ if strings.Contains(os.Getenv("GOFLAGS"), "mayMoreStackPreempt") {
+ // Some tests have false negatives under mayMoreStackPreempt. This may be a test-only issue,
+ // but needs more investigation.
+ testenv.SkipFlaky(t, 75729)
+ }
+
+ // Goroutine leak test case.
+ //
+ // Test cases can be configured with test name, the name of the entry point function,
+ // a set of expected leaks identified by regular expressions, and the number of times
+ // the test should be repeated.
+ //
+ // Repeated runs reduce flakiness in some tests.
+ type testCase struct {
+ name string
+ simple bool
+ repetitions int
+ expectedLeaks map[*regexp.Regexp]bool
+
+ // flakyLeaks are goroutine leaks that are too flaky to be reliably detected.
+ // Still, they might pop up every once in a while. The test will pass regardless
+ // if they occur or nor, as they are not unexpected.
+ //
+ // Note that all flaky leaks are true positives, i.e. real goroutine leaks,
+ // and it is only their detection that is unreliable due to scheduling
+ // non-determinism.
+ flakyLeaks map[*regexp.Regexp]struct{}
+ }
+
+ // makeAnyTest is a short-hand for creating test cases.
+ // Each of the leaks in the list is identified by a regular expression.
+ // If a leak is flaky, it is added to the flakyLeaks map.
+ makeAnyTest := func(name string, flaky bool, repetitions int, leaks ...string) testCase {
+ tc := testCase{
+ name: name,
+ expectedLeaks: make(map[*regexp.Regexp]bool, len(leaks)),
+ flakyLeaks: make(map[*regexp.Regexp]struct{}, len(leaks)),
+ // Make sure the test is repeated at least once.
+ repetitions: repetitions | 1,
+ }
+
+ for _, leak := range leaks {
+ if !flaky {
+ tc.expectedLeaks[regexp.MustCompile(leak)] = false
+ } else {
+ tc.flakyLeaks[regexp.MustCompile(leak)] = struct{}{}
+ }
+ }
+
+ return tc
+ }
+
+ // makeTest is a short-hand for creating non-flaky test cases.
+ makeTest := func(name string, leaks ...string) testCase {
+ tcase := makeAnyTest(name, false, 2, leaks...)
+ tcase.simple = true
+ return tcase
+ }
+
+ // makeFlakyTest is a short-hand for creating flaky test cases.
+ makeFlakyTest := func(name string, leaks ...string) testCase {
+ if testing.Short() {
+ return makeAnyTest(name, true, 2, leaks...)
+ }
+ return makeAnyTest(name, true, 10, leaks...)
+ }
+
+ goroutineHeader := regexp.MustCompile(`goroutine \d+ \[`)
+
+ // extractLeaks takes the output of a test and splits it into a
+ // list of strings denoting goroutine leaks.
+ //
+ // If the input is:
+ //
+ // goroutine 1 [wait reason (leaked)]:
+ // main.leaked()
+ // ./testdata/testgoroutineleakprofile/foo.go:37 +0x100
+ // created by main.main()
+ // ./testdata/testgoroutineleakprofile/main.go:10 +0x20
+ //
+ // goroutine 2 [wait reason (leaked)]:
+ // main.leaked2()
+ // ./testdata/testgoroutineleakprofile/foo.go:37 +0x100
+ // created by main.main()
+ // ./testdata/testgoroutineleakprofile/main.go:10 +0x20
+ //
+ // The output is (as a list of strings):
+ //
+ // leaked() [wait reason]
+ // leaked2() [wait reason]
+ extractLeaks := func(output string) []string {
+ stacks := strings.Split(output, "\n\ngoroutine")
+ var leaks []string
+ for _, stack := range stacks {
+ lines := strings.Split(stack, "\n")
+ if len(lines) < 5 {
+ // Expecting at least the following lines (where n=len(lines)-1):
+ //
+ // [0] goroutine n [wait reason (leaked)]
+ // ...
+ // [n-3] bottom.leak.frame(...)
+ // [n-2] ./bottom/leak/frame/source.go:line
+ // [n-1] created by go.instruction()
+ // [n] ./go/instruction/source.go:line
+ continue
+ }
+
+ if !strings.Contains(lines[0], "(leaked)") {
+ // Ignore non-leaked goroutines.
+ continue
+ }
+
+ // Get the wait reason from the goroutine header.
+ header := lines[0]
+ waitReason := goroutineHeader.ReplaceAllString(header, "[")
+ waitReason = strings.ReplaceAll(waitReason, " (leaked)", "")
+
+ // Get the function name from the stack trace (should be two lines above `created by`).
+ var funcName string
+ for i := len(lines) - 1; i >= 0; i-- {
+ if strings.Contains(lines[i], "created by") {
+ funcName = strings.TrimPrefix(lines[i-2], "main.")
+ break
+ }
+ }
+ if funcName == "" {
+ t.Fatalf("failed to extract function name from stack trace: %s", lines)
+ }
+
+ leaks = append(leaks, funcName+" "+waitReason)
+ }
+ return leaks
+ }
+
+ // Micro tests involve very simple leaks for each type of concurrency primitive operation.
+ microTests := []testCase{
+ makeTest("NilRecv",
+ `NilRecv\.func1\(.* \[chan receive \(nil chan\)\]`,
+ ),
+ makeTest("NilSend",
+ `NilSend\.func1\(.* \[chan send \(nil chan\)\]`,
+ ),
+ makeTest("SelectNoCases",
+ `SelectNoCases\.func1\(.* \[select \(no cases\)\]`,
+ ),
+ makeTest("ChanRecv",
+ `ChanRecv\.func1\(.* \[chan receive\]`,
+ ),
+ makeTest("ChanSend",
+ `ChanSend\.func1\(.* \[chan send\]`,
+ ),
+ makeTest("Select",
+ `Select\.func1\(.* \[select\]`,
+ ),
+ makeTest("WaitGroup",
+ `WaitGroup\.func1\(.* \[sync\.WaitGroup\.Wait\]`,
+ ),
+ makeTest("MutexStack",
+ `MutexStack\.func1\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeTest("MutexHeap",
+ `MutexHeap\.func1.1\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeTest("Cond",
+ `Cond\.func1\(.* \[sync\.Cond\.Wait\]`,
+ ),
+ makeTest("RWMutexRLock",
+ `RWMutexRLock\.func1\(.* \[sync\.RWMutex\.RLock\]`,
+ ),
+ makeTest("RWMutexLock",
+ `RWMutexLock\.func1\(.* \[sync\.(RW)?Mutex\.Lock\]`,
+ ),
+ makeTest("Mixed",
+ `Mixed\.func1\(.* \[sync\.WaitGroup\.Wait\]`,
+ `Mixed\.func1.1\(.* \[chan send\]`,
+ ),
+ makeTest("NoLeakGlobal"),
+ }
+
+ // Stress tests are flaky and we do not strictly care about their output.
+ // They are only intended to stress the goroutine leak detector and profiling
+ // infrastructure in interesting ways.
+ stressTestCases := []testCase{
+ makeFlakyTest("SpawnGC",
+ `spawnGC.func1\(.* \[chan receive\]`,
+ ),
+ makeTest("DaisyChain"),
+ }
+
+ // Common goroutine leak patterns.
+ // Extracted from "Unveiling and Vanquishing Goroutine Leaks in Enterprise Microservices: A Dynamic Analysis Approach"
+ // doi:10.1109/CGO57630.2024.10444835
+ patternTestCases := []testCase{
+ makeTest("NoCloseRange",
+ `noCloseRange\(.* \[chan send\]`,
+ `noCloseRange\.func1\(.* \[chan receive\]`,
+ ),
+ makeTest("MethodContractViolation",
+ `worker\.Start\.func1\(.* \[select\]`,
+ ),
+ makeTest("DoubleSend",
+ `DoubleSend\.func3\(.* \[chan send\]`,
+ ),
+ makeTest("EarlyReturn",
+ `earlyReturn\.func1\(.* \[chan send\]`,
+ ),
+ makeTest("NCastLeak",
+ `nCastLeak\.func1\(.* \[chan send\]`,
+ `NCastLeak\.func2\(.* \[chan receive\]`,
+ ),
+ makeTest("Timeout",
+ // (vsaioc): Timeout is *theoretically* flaky, but the
+ // pseudo-random choice for select case branches makes it
+ // practically impossible for it to fail.
+ `timeout\.func1\(.* \[chan send\]`,
+ ),
+ }
+
+ // GoKer tests from "GoBench: A Benchmark Suite of Real-World Go Concurrency Bugs".
+ // Refer to testdata/testgoroutineleakprofile/goker/README.md.
+ //
+ // This list is curated for tests that are not excessively flaky.
+ // Some tests are also excluded because they are redundant.
+ //
+ // TODO(vsaioc): Some of these might be removable (their patterns may overlap).
+ gokerTestCases := []testCase{
+ makeFlakyTest("Cockroach584",
+ `Cockroach584\.func2\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeFlakyTest("Cockroach1055",
+ `Cockroach1055\.func2\(.* \[chan receive\]`,
+ `Cockroach1055\.func2\.2\(.* \[sync\.WaitGroup\.Wait\]`,
+ `Cockroach1055\.func2\.1\(.* \[chan receive\]`,
+ `Cockroach1055\.func2\.1\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeFlakyTest("Cockroach1462",
+ `\(\*Stopper_cockroach1462\)\.RunWorker\.func1\(.* \[chan send\]`,
+ `Cockroach1462\.func2\(.* \[sync\.WaitGroup\.Wait\]`,
+ ),
+ makeFlakyTest("Cockroach2448",
+ `\(\*Store_cockroach2448\)\.processRaft\(.* \[select\]`,
+ `\(\*state_cockroach2448\)\.start\(.* \[select\]`,
+ ),
+ makeFlakyTest("Cockroach3710",
+ `\(\*Store_cockroach3710\)\.ForceRaftLogScanAndProcess\(.* \[sync\.RWMutex\.RLock\]`,
+ `\(\*Store_cockroach3710\)\.processRaft\.func1\(.* \[sync\.RWMutex\.Lock\]`,
+ ),
+ makeFlakyTest("Cockroach6181",
+ `testRangeCacheCoalescedRequests_cockroach6181\(.* \[sync\.WaitGroup\.Wait\]`,
+ `testRangeCacheCoalescedRequests_cockroach6181\.func1\.1\(.* \[sync\.(RW)?Mutex\.Lock\]`,
+ `testRangeCacheCoalescedRequests_cockroach6181\.func1\.1\(.* \[sync\.RWMutex\.RLock\]`,
+ ),
+ makeTest("Cockroach7504",
+ `Cockroach7504\.func2\.1.* \[sync\.Mutex\.Lock\]`,
+ `Cockroach7504\.func2\.2.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeFlakyTest("Cockroach9935",
+ `\(\*loggingT_cockroach9935\)\.outputLogEntry\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeFlakyTest("Cockroach10214",
+ `\(*Store_cockroach10214\)\.sendQueuedHeartbeats\(.* \[sync\.Mutex\.Lock\]`,
+ `\(*Replica_cockroach10214\)\.tick\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeFlakyTest("Cockroach10790",
+ `\(\*Replica_cockroach10790\)\.beginCmds\.func1\(.* \[chan receive\]`,
+ ),
+ makeTest("Cockroach13197",
+ `\(\*Tx_cockroach13197\)\.awaitDone\(.* \[chan receive\]`,
+ ),
+ makeTest("Cockroach13755",
+ `\(\*Rows_cockroach13755\)\.awaitDone\(.* \[chan receive\]`,
+ ),
+ makeFlakyTest("Cockroach16167",
+ `Cockroach16167\.func2\(.* \[sync\.RWMutex\.RLock\]`,
+ `\(\*Executor_cockroach16167\)\.Start\(.* \[sync\.RWMutex\.Lock\]`,
+ ),
+ makeFlakyTest("Cockroach18101",
+ `restore_cockroach18101\.func1\(.* \[chan send\]`,
+ ),
+ makeTest("Cockroach24808",
+ `Cockroach24808\.func2\(.* \[chan send\]`,
+ ),
+ makeTest("Cockroach25456",
+ `Cockroach25456\.func2\(.* \[chan receive\]`,
+ ),
+ makeTest("Cockroach35073",
+ `Cockroach35073\.func2.1\(.* \[chan send\]`,
+ `Cockroach35073\.func2\(.* \[chan send\]`,
+ ),
+ makeTest("Cockroach35931",
+ `Cockroach35931\.func2\(.* \[chan send\]`,
+ ),
+ makeTest("Etcd5509",
+ `Etcd5509\.func2\(.* \[sync\.RWMutex\.Lock\]`,
+ ),
+ makeTest("Etcd6708",
+ `Etcd6708\.func2\(.* \[sync\.RWMutex\.RLock\]`,
+ ),
+ makeFlakyTest("Etcd6857",
+ `\(\*node_etcd6857\)\.Status\(.* \[chan send\]`,
+ ),
+ makeFlakyTest("Etcd6873",
+ `\(\*watchBroadcasts_etcd6873\)\.stop\(.* \[chan receive\]`,
+ `newWatchBroadcasts_etcd6873\.func1\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeFlakyTest("Etcd7492",
+ `Etcd7492\.func2\(.* \[sync\.WaitGroup\.Wait\]`,
+ `Etcd7492\.func2\.1\(.* \[chan send\]`,
+ `\(\*simpleTokenTTLKeeper_etcd7492\)\.run\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeFlakyTest("Etcd7902",
+ `doRounds_etcd7902\.func1\(.* \[chan receive\]`,
+ `doRounds_etcd7902\.func1\(.* \[sync\.Mutex\.Lock\]`,
+ `runElectionFunc_etcd7902\(.* \[sync\.WaitGroup\.Wait\]`,
+ ),
+ makeTest("Etcd10492",
+ `Etcd10492\.func2\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeTest("Grpc660",
+ `\(\*benchmarkClient_grpc660\)\.doCloseLoopUnary\.func1\(.* \[chan send\]`,
+ ),
+ makeFlakyTest("Grpc795",
+ `\(\*Server_grpc795\)\.Serve\(.* \[sync\.Mutex\.Lock\]`,
+ `testServerGracefulStopIdempotent_grpc795\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeTest("Grpc862",
+ `DialContext_grpc862\.func2\(.* \[chan receive\]`),
+ makeTest("Grpc1275",
+ `testInflightStreamClosing_grpc1275\.func1\(.* \[chan receive\]`),
+ makeTest("Grpc1424",
+ `DialContext_grpc1424\.func1\(.* \[chan receive\]`),
+ makeFlakyTest("Grpc1460",
+ `\(\*http2Client_grpc1460\)\.keepalive\(.* \[chan receive\]`,
+ `\(\*http2Client_grpc1460\)\.NewStream\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeFlakyTest("Grpc3017",
+ // grpc/3017 involves a goroutine leak that also simultaneously engages many GC assists.
+ `Grpc3017\.func2\(.* \[chan receive\]`,
+ `Grpc3017\.func2\.1\(.* \[sync\.Mutex\.Lock\]`,
+ `\(\*lbCacheClientConn_grpc3017\)\.RemoveSubConn\.func1\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeFlakyTest("Hugo3251",
+ `Hugo3251\.func2\(.* \[sync\.WaitGroup\.Wait\]`,
+ `Hugo3251\.func2\.1\(.* \[sync\.Mutex\.Lock\]`,
+ `Hugo3251\.func2\.1\(.* \[sync\.RWMutex\.RLock\]`,
+ ),
+ makeFlakyTest("Hugo5379",
+ `\(\*Page_hugo5379\)\.initContent\.func1\.1\(.* \[sync\.Mutex\.Lock\]`,
+ `pageRenderer_hugo5379\(.* \[sync\.Mutex\.Lock\]`,
+ `Hugo5379\.func2\(.* \[sync\.WaitGroup\.Wait\]`,
+ ),
+ makeFlakyTest("Istio16224",
+ `Istio16224\.func2\(.* \[sync\.Mutex\.Lock\]`,
+ `\(\*controller_istio16224\)\.Run\(.* \[chan send\]`,
+ `\(\*controller_istio16224\)\.Run\(.* \[chan receive\]`,
+ ),
+ makeFlakyTest("Istio17860",
+ `\(\*agent_istio17860\)\.runWait\(.* \[chan send\]`,
+ ),
+ makeFlakyTest("Istio18454",
+ `\(\*Worker_istio18454\)\.Start\.func1\(.* \[chan receive\]`,
+ `\(\*Worker_istio18454\)\.Start\.func1\(.* \[chan send\]`,
+ ),
+ // NOTE(vsaioc):
+ // Kubernetes/1321 is excluded due to a race condition in the original program
+ // that may, in extremely rare cases, lead to nil pointer dereference crashes.
+ // (Reproducible even with regular GC). Only kept here for posterity.
+ //
+ // makeTest(testCase{name: "Kubernetes1321"},
+ // `NewMux_kubernetes1321\.gowrap1\(.* \[chan send\]`,
+ // `testMuxWatcherClose_kubernetes1321\(.* \[sync\.Mutex\.Lock\]`),
+ makeTest("Kubernetes5316",
+ `finishRequest_kubernetes5316\.func1\(.* \[chan send\]`,
+ ),
+ makeFlakyTest("Kubernetes6632",
+ `\(\*idleAwareFramer_kubernetes6632\)\.monitor\(.* \[sync\.Mutex\.Lock\]`,
+ `\(\*idleAwareFramer_kubernetes6632\)\.WriteFrame\(.* \[chan send\]`,
+ ),
+ makeFlakyTest("Kubernetes10182",
+ `\(\*statusManager_kubernetes10182\)\.Start\.func1\(.* \[sync\.Mutex\.Lock\]`,
+ `\(\*statusManager_kubernetes10182\)\.SetPodStatus\(.* \[chan send\]`,
+ ),
+ makeFlakyTest("Kubernetes11298",
+ `After_kubernetes11298\.func1\(.* \[chan receive\]`,
+ `After_kubernetes11298\.func1\(.* \[sync\.Cond\.Wait\]`,
+ `Kubernetes11298\.func2\(.* \[chan receive\]`,
+ ),
+ makeFlakyTest("Kubernetes13135",
+ `Util_kubernetes13135\(.* \[sync\.Mutex\.Lock\]`,
+ `\(\*WatchCache_kubernetes13135\)\.Add\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeTest("Kubernetes25331",
+ `\(\*watchChan_kubernetes25331\)\.run\(.* \[chan send\]`,
+ ),
+ makeFlakyTest("Kubernetes26980",
+ `Kubernetes26980\.func2\(.* \[chan receive\]`,
+ `Kubernetes26980\.func2\.1\(.* \[sync\.Mutex\.Lock\]`,
+ `\(\*processorListener_kubernetes26980\)\.pop\(.* \[chan receive\]`,
+ ),
+ makeFlakyTest("Kubernetes30872",
+ `\(\*DelayingDeliverer_kubernetes30872\)\.StartWithHandler\.func1\(.* \[sync\.Mutex\.Lock\]`,
+ `\(\*Controller_kubernetes30872\)\.Run\(.* \[sync\.Mutex\.Lock\]`,
+ `\(\*NamespaceController_kubernetes30872\)\.Run\.func1\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeTest("Kubernetes38669",
+ `\(\*cacheWatcher_kubernetes38669\)\.process\(.* \[chan send\]`,
+ ),
+ makeFlakyTest("Kubernetes58107",
+ `\(\*ResourceQuotaController_kubernetes58107\)\.worker\(.* \[sync\.Cond\.Wait\]`,
+ `\(\*ResourceQuotaController_kubernetes58107\)\.worker\(.* \[sync\.RWMutex\.RLock\]`,
+ `\(\*ResourceQuotaController_kubernetes58107\)\.Sync\(.* \[sync\.RWMutex\.Lock\]`,
+ ),
+ makeFlakyTest("Kubernetes62464",
+ `\(\*manager_kubernetes62464\)\.reconcileState\(.* \[sync\.RWMutex\.RLock\]`,
+ `\(\*staticPolicy_kubernetes62464\)\.RemoveContainer\(.* \[sync\.(RW)?Mutex\.Lock\]`,
+ ),
+ makeFlakyTest("Kubernetes70277",
+ `Kubernetes70277\.func2\(.* \[chan receive\]`,
+ ),
+ makeFlakyTest("Moby4951",
+ `\(\*DeviceSet_moby4951\)\.DeleteDevice\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeTest("Moby7559",
+ `\(\*UDPProxy_moby7559\)\.Run\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeTest("Moby17176",
+ `testDevmapperLockReleasedDeviceDeletion_moby17176\.func1\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeFlakyTest("Moby21233",
+ `\(\*Transfer_moby21233\)\.Watch\.func1\(.* \[chan send\]`,
+ `\(\*Transfer_moby21233\)\.Watch\.func1\(.* \[select\]`,
+ `testTransfer_moby21233\(.* \[chan receive\]`,
+ ),
+ makeTest("Moby25348",
+ `\(\*Manager_moby25348\)\.init\(.* \[sync\.WaitGroup\.Wait\]`,
+ ),
+ makeFlakyTest("Moby27782",
+ `\(\*JSONFileLogger_moby27782\)\.readLogs\(.* \[sync\.Cond\.Wait\]`,
+ `\(\*Watcher_moby27782\)\.readEvents\(.* \[select\]`,
+ ),
+ makeFlakyTest("Moby28462",
+ `monitor_moby28462\(.* \[sync\.Mutex\.Lock\]`,
+ `\(\*Daemon_moby28462\)\.StateChanged\(.* \[chan send\]`,
+ ),
+ makeTest("Moby30408",
+ `Moby30408\.func2\(.* \[chan receive\]`,
+ `testActive_moby30408\.func1\(.* \[sync\.Cond\.Wait\]`,
+ ),
+ makeFlakyTest("Moby33781",
+ `monitor_moby33781\.func1\(.* \[chan send\]`,
+ ),
+ makeFlakyTest("Moby36114",
+ `\(\*serviceVM_moby36114\)\.hotAddVHDsAtStart\(.* \[sync\.Mutex\.Lock\]`,
+ ),
+ makeFlakyTest("Serving2137",
+ `\(\*Breaker_serving2137\)\.concurrentRequest\.func1\(.* \[chan send\]`,
+ `\(\*Breaker_serving2137\)\.concurrentRequest\.func1\(.* \[sync\.Mutex\.Lock\]`,
+ `Serving2137\.func2\(.* \[chan receive\]`,
+ ),
+ makeTest("Syncthing4829",
+ `Syncthing4829\.func2\(.* \[sync\.RWMutex\.RLock\]`,
+ ),
+ makeTest("Syncthing5795",
+ `\(\*rawConnection_syncthing5795\)\.dispatcherLoop\(.* \[chan receive\]`,
+ `Syncthing5795\.func2.* \[chan receive\]`,
+ ),
+ }
+
+ // Combine all test cases into a single list.
+ testCases := append(microTests, stressTestCases...)
+ testCases = append(testCases, patternTestCases...)
+
+ // Test cases must not panic or cause fatal exceptions.
+ failStates := regexp.MustCompile(`fatal|panic`)
+
+ testApp := func(exepath string, testCases []testCase) {
+
+ // Build the test program once.
+ exe, err := buildTestProg(t, exepath)
+ if err != nil {
+ t.Fatal(fmt.Sprintf("building testgoroutineleakprofile failed: %v", err))
+ }
+
+ for _, tcase := range testCases {
+ t.Run(tcase.name, func(t *testing.T) {
+ t.Parallel()
+
+ cmdEnv := []string{
+ "GODEBUG=asyncpreemptoff=1",
+ "GOEXPERIMENT=greenteagc,goroutineleakprofile",
+ }
+
+ if tcase.simple {
+ // If the test is simple, set GOMAXPROCS=1 in order to better
+ // control the behavior of the scheduler.
+ cmdEnv = append(cmdEnv, "GOMAXPROCS=1")
+ }
+
+ var output string
+ for i := 0; i < tcase.repetitions; i++ {
+ // Run program for one repetition and get runOutput trace.
+ runOutput := runBuiltTestProg(t, exe, tcase.name, cmdEnv...)
+ if len(runOutput) == 0 {
+ t.Errorf("Test %s produced no output. Is the goroutine leak profile collected?", tcase.name)
+ }
+
+ // Zero tolerance policy for fatal exceptions or panics.
+ if failStates.MatchString(runOutput) {
+ t.Errorf("unexpected fatal exception or panic!\noutput:\n%s\n\n", runOutput)
+ }
+
+ output += runOutput + "\n\n"
+ }
+
+ // Extract all the goroutine leaks
+ foundLeaks := extractLeaks(output)
+
+ // If the test case was not expected to produce leaks, but some were reported,
+ // stop the test immediately. Zero tolerance policy for false positives.
+ if len(tcase.expectedLeaks)+len(tcase.flakyLeaks) == 0 && len(foundLeaks) > 0 {
+ t.Errorf("output:\n%s\n\ngoroutines leaks detected in case with no leaks", output)
+ }
+
+ unexpectedLeaks := make([]string, 0, len(foundLeaks))
+
+ // Parse every leak and check if it is expected (maybe as a flaky leak).
+ LEAKS:
+ for _, leak := range foundLeaks {
+ // Check if the leak is expected.
+ // If it is, check whether it has been encountered before.
+ var foundNew bool
+ var leakPattern *regexp.Regexp
+
+ for expectedLeak, ok := range tcase.expectedLeaks {
+ if expectedLeak.MatchString(leak) {
+ if !ok {
+ foundNew = true
+ }
+
+ leakPattern = expectedLeak
+ break
+ }
+ }
+
+ if foundNew {
+ // Only bother writing if we found a new leak.
+ tcase.expectedLeaks[leakPattern] = true
+ }
+
+ if leakPattern == nil {
+ // We are dealing with a leak not marked as expected.
+ // Check if it is a flaky leak.
+ for flakyLeak := range tcase.flakyLeaks {
+ if flakyLeak.MatchString(leak) {
+ // The leak is flaky. Carry on to the next line.
+ continue LEAKS
+ }
+ }
+
+ unexpectedLeaks = append(unexpectedLeaks, leak)
+ }
+ }
+
+ missingLeakStrs := make([]string, 0, len(tcase.expectedLeaks))
+ for expectedLeak, found := range tcase.expectedLeaks {
+ if !found {
+ missingLeakStrs = append(missingLeakStrs, expectedLeak.String())
+ }
+ }
+
+ var errors []error
+ if len(unexpectedLeaks) > 0 {
+ errors = append(errors, fmt.Errorf("unexpected goroutine leaks:\n%s\n", strings.Join(unexpectedLeaks, "\n")))
+ }
+ if len(missingLeakStrs) > 0 {
+ errors = append(errors, fmt.Errorf("missing expected leaks:\n%s\n", strings.Join(missingLeakStrs, ", ")))
+ }
+ if len(errors) > 0 {
+ t.Fatalf("Failed with the following errors:\n%s\n\noutput:\n%s", errors, output)
+ }
+ })
+ }
+ }
+
+ testApp("testgoroutineleakprofile", testCases)
+ testApp("testgoroutineleakprofile/goker", gokerTestCases)
+}
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index db91e89359..fc4f21b532 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -233,19 +233,22 @@ const (
// ios/arm64 40 4MB 1 256K (2MB)
// */32-bit 32 4MB 1 1024 (4KB)
// */mips(le) 31 4MB 1 512 (2KB)
+ // wasm 32 512KB 1 8192 (64KB)
// heapArenaBytes is the size of a heap arena. The heap
// consists of mappings of size heapArenaBytes, aligned to
// heapArenaBytes. The initial heap mapping is one arena.
//
- // This is currently 64MB on 64-bit non-Windows and 4MB on
- // 32-bit and on Windows. We use smaller arenas on Windows
- // because all committed memory is charged to the process,
- // even if it's not touched. Hence, for processes with small
- // heaps, the mapped arena space needs to be commensurate.
- // This is particularly important with the race detector,
- // since it significantly amplifies the cost of committed
- // memory.
+ // This is currently 64MB on 64-bit non-Windows, 4MB on
+ // 32-bit and on Windows, and 512KB on Wasm. We use smaller
+ // arenas on Windows because all committed memory is charged
+ // to the process, even if it's not touched. Hence, for
+ // processes with small heaps, the mapped arena space needs
+ // to be commensurate. This is particularly important with
+ // the race detector, since it significantly amplifies the
+ // cost of committed memory. We use smaller arenas on Wasm
+ // because some Wasm programs have very small heap, and
+ // everything in the Wasm linear memory is charged.
heapArenaBytes = 1 << logHeapArenaBytes
heapArenaWords = heapArenaBytes / goarch.PtrSize
@@ -253,7 +256,7 @@ const (
// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
// prefer using heapArenaBytes where possible (we need the
// constant to compute some other constants).
- logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
+ logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (9+10)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
// heapArenaBitmapWords is the size of each heap arena's bitmap in uintptrs.
heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize)
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 508de9a115..37a92c64bc 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -1267,6 +1267,28 @@ func markBitsForSpan(base uintptr) (mbits markBits) {
return mbits
}
+// isMarkedOrNotInHeap returns true if a pointer is in the heap and marked,
+// or if the pointer is not in the heap. Used by goroutine leak detection
+// to determine if concurrency resources are reachable in memory.
+func isMarkedOrNotInHeap(p unsafe.Pointer) bool {
+ obj, span, objIndex := findObject(uintptr(p), 0, 0)
+ if obj != 0 {
+ mbits := span.markBitsForIndex(objIndex)
+ return mbits.isMarked()
+ }
+
+ // If we fall through to get here, the object is not in the heap.
+ // In this case, it is either a pointer to a stack object or a global resource.
+ // Treat it as reachable in memory by default, to be safe.
+ //
+ // TODO(vsaioc): we could be more precise by checking against the stacks
+ // of runnable goroutines. I don't think this is necessary, based on what we've seen, but
+ // let's keep the option open in case the runtime evolves.
+ // This will (naively) lead to quadratic blow-up for goroutine leak detection,
+ // but if it is only run on demand, maybe the extra cost is not a show-stopper.
+ return true
+}
+
// advance advances the markBits to the next object in the span.
func (m *markBits) advance() {
if m.mask == 1<<7 {
diff --git a/src/runtime/metrics_test.go b/src/runtime/metrics_test.go
index af042f4445..b67424301b 100644
--- a/src/runtime/metrics_test.go
+++ b/src/runtime/metrics_test.go
@@ -22,7 +22,6 @@ import (
"strings"
"sync"
"sync/atomic"
- "syscall"
"testing"
"time"
"unsafe"
@@ -1578,211 +1577,10 @@ func TestReadMetricsFinalizers(t *testing.T) {
}
func TestReadMetricsSched(t *testing.T) {
- const (
- notInGo = iota
- runnable
- running
- waiting
- created
- threads
- numSamples
- )
- var s [numSamples]metrics.Sample
- s[notInGo].Name = "/sched/goroutines/not-in-go:goroutines"
- s[runnable].Name = "/sched/goroutines/runnable:goroutines"
- s[running].Name = "/sched/goroutines/running:goroutines"
- s[waiting].Name = "/sched/goroutines/waiting:goroutines"
- s[created].Name = "/sched/goroutines-created:goroutines"
- s[threads].Name = "/sched/threads/total:threads"
-
- logMetrics := func(t *testing.T, s []metrics.Sample) {
- for i := range s {
- t.Logf("%s: %d", s[i].Name, s[i].Value.Uint64())
- }
- }
-
- // generalSlack is the amount of goroutines we allow ourselves to be
- // off by in any given category, either due to background system
- // goroutines or testing package goroutines.
- const generalSlack = 4
-
- // waitingSlack is the max number of blocked goroutines left
- // from other tests, the testing package, or system
- // goroutines.
- const waitingSlack = 100
-
- // threadsSlack is the maximum number of threads left over
- // from other tests and the runtime (sysmon, the template thread, etc.)
- const threadsSlack = 20
-
- // Make sure GC isn't running, since GC workers interfere with
- // expected counts.
- defer debug.SetGCPercent(debug.SetGCPercent(-1))
- runtime.GC()
-
- check := func(t *testing.T, s *metrics.Sample, min, max uint64) {
- val := s.Value.Uint64()
- if val < min {
- t.Errorf("%s too low; %d < %d", s.Name, val, min)
- }
- if val > max {
- t.Errorf("%s too high; %d > %d", s.Name, val, max)
- }
- }
- checkEq := func(t *testing.T, s *metrics.Sample, value uint64) {
- check(t, s, value, value)
+ // This test is run in a subprocess to prevent other tests from polluting the metrics.
+ output := runTestProg(t, "testprog", "SchedMetrics")
+ want := "OK\n"
+ if output != want {
+ t.Fatalf("output:\n%s\n\nwanted:\n%s", output, want)
}
- spinUntil := func(f func() bool) bool {
- for {
- if f() {
- return true
- }
- time.Sleep(50 * time.Millisecond)
- }
- }
-
- // Check base values.
- t.Run("base", func(t *testing.T) {
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
- metrics.Read(s[:])
- logMetrics(t, s[:])
- check(t, &s[notInGo], 0, generalSlack)
- check(t, &s[runnable], 0, generalSlack)
- checkEq(t, &s[running], 1)
- check(t, &s[waiting], 0, waitingSlack)
- })
-
- metrics.Read(s[:])
- createdAfterBase := s[created].Value.Uint64()
-
- // Force Running count to be high. We'll use these goroutines
- // for Runnable, too.
- const count = 10
- var ready, exit atomic.Uint32
- for i := 0; i < count-1; i++ {
- go func() {
- ready.Add(1)
- for exit.Load() == 0 {
- // Spin to get us and keep us running, but check
- // the exit condition so we exit out early if we're
- // done.
- start := time.Now()
- for time.Since(start) < 10*time.Millisecond && exit.Load() == 0 {
- }
- runtime.Gosched()
- }
- }()
- }
- for ready.Load() < count-1 {
- runtime.Gosched()
- }
-
- // Be careful. We've entered a dangerous state for platforms
- // that do not return back to the underlying system unless all
- // goroutines are blocked, like js/wasm, since we have a bunch
- // of runnable goroutines all spinning. We cannot write anything
- // out.
- if testenv.HasParallelism() {
- t.Run("created", func(t *testing.T) {
- metrics.Read(s[:])
- logMetrics(t, s[:])
- checkEq(t, &s[created], createdAfterBase+count)
- })
- t.Run("running", func(t *testing.T) {
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(count + 4))
- // It can take a little bit for the scheduler to
- // distribute the goroutines to Ps, so retry until
- // we see the count we expect or the test times out.
- spinUntil(func() bool {
- metrics.Read(s[:])
- return s[running].Value.Uint64() >= count
- })
- logMetrics(t, s[:])
- check(t, &s[running], count, count+4)
- check(t, &s[threads], count, count+4+threadsSlack)
- })
-
- // Force runnable count to be high.
- t.Run("runnable", func(t *testing.T) {
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
- metrics.Read(s[:])
- logMetrics(t, s[:])
- checkEq(t, &s[running], 1)
- check(t, &s[runnable], count-1, count+generalSlack)
- })
-
- // Done with the running/runnable goroutines.
- exit.Store(1)
- } else {
- // Read metrics and then exit all the other goroutines,
- // so that system calls may proceed.
- metrics.Read(s[:])
-
- // Done with the running/runnable goroutines.
- exit.Store(1)
-
- // Now we can check our invariants.
- t.Run("created", func(t *testing.T) {
- // Look for count-1 goroutines because we read metrics
- // *before* t.Run goroutine was created for this sub-test.
- checkEq(t, &s[created], createdAfterBase+count-1)
- })
- t.Run("running", func(t *testing.T) {
- logMetrics(t, s[:])
- checkEq(t, &s[running], 1)
- checkEq(t, &s[threads], 1)
- })
- t.Run("runnable", func(t *testing.T) {
- logMetrics(t, s[:])
- check(t, &s[runnable], count-1, count+generalSlack)
- })
- }
-
- // Force not-in-go count to be high. This is a little tricky since
- // we try really hard not to let things block in system calls.
- // We have to drop to the syscall package to do this reliably.
- t.Run("not-in-go", func(t *testing.T) {
- // Block a bunch of goroutines on an OS pipe.
- pr, pw, err := pipe()
- if err != nil {
- switch runtime.GOOS {
- case "js", "wasip1":
- t.Skip("creating pipe:", err)
- }
- t.Fatal("creating pipe:", err)
- }
- for i := 0; i < count; i++ {
- go syscall.Read(pr, make([]byte, 1))
- }
-
- // Let the goroutines block.
- spinUntil(func() bool {
- metrics.Read(s[:])
- return s[notInGo].Value.Uint64() >= count
- })
- logMetrics(t, s[:])
- check(t, &s[notInGo], count, count+generalSlack)
-
- syscall.Close(pw)
- syscall.Close(pr)
- })
-
- t.Run("waiting", func(t *testing.T) {
- // Force waiting count to be high.
- const waitingCount = 1000
- stop := make(chan bool)
- for i := 0; i < waitingCount; i++ {
- go func() { <-stop }()
- }
-
- // Let the goroutines block.
- spinUntil(func() bool {
- metrics.Read(s[:])
- return s[waiting].Value.Uint64() >= waitingCount
- })
- logMetrics(t, s[:])
- check(t, &s[waiting], waitingCount, waitingCount+waitingSlack)
-
- close(stop)
- })
}
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index 68cbfda500..b13ec845fc 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -376,8 +376,8 @@ type workType struct {
// (and thus 8-byte alignment even on 32-bit architectures).
bytesMarked uint64
- markrootNext uint32 // next markroot job
- markrootJobs uint32 // number of markroot jobs
+ markrootNext atomic.Uint32 // next markroot job
+ markrootJobs atomic.Uint32 // number of markroot jobs
nproc uint32
tstart int64
@@ -385,17 +385,44 @@ type workType struct {
// Number of roots of various root types. Set by gcPrepareMarkRoots.
//
- // nStackRoots == len(stackRoots), but we have nStackRoots for
- // consistency.
- nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int
+ // During normal GC cycle, nStackRoots == nMaybeRunnableStackRoots == len(stackRoots);
+ // during goroutine leak detection, nMaybeRunnableStackRoots is the number of stackRoots
+ // scheduled for marking.
+ // In both variants, nStackRoots == len(stackRoots).
+ nDataRoots, nBSSRoots, nSpanRoots, nStackRoots, nMaybeRunnableStackRoots int
+
+ // The following fields monitor the GC phase of the current cycle during
+ // goroutine leak detection.
+ goroutineLeak struct {
+ // Once set, it indicates that the GC will perform goroutine leak detection during
+ // the next GC cycle; it is set by goroutineLeakGC and unset during gcStart.
+ pending atomic.Bool
+ // Once set, it indicates that the GC has started a goroutine leak detection run;
+ // it is set during gcStart and unset during gcMarkTermination;
+ //
+ // Protected by STW.
+ enabled bool
+ // Once set, it indicates that the GC has performed goroutine leak detection during
+ // the current GC cycle; it is set during gcMarkDone, right after goroutine leak detection,
+ // and unset during gcMarkTermination;
+ //
+ // Protected by STW.
+ done bool
+ // The number of leaked goroutines during the last leak detection GC cycle.
+ //
+ // Write-protected by STW in findGoroutineLeaks.
+ count int
+ }
// Base indexes of each root type. Set by gcPrepareMarkRoots.
baseData, baseBSS, baseSpans, baseStacks, baseEnd uint32
- // stackRoots is a snapshot of all of the Gs that existed
- // before the beginning of concurrent marking. The backing
- // store of this must not be modified because it might be
- // shared with allgs.
+ // stackRoots is a snapshot of all of the Gs that existed before the
+ // beginning of concurrent marking. During goroutine leak detection, stackRoots
+ // is partitioned into two sets; to the left of nMaybeRunnableStackRoots are stackRoots
+ // of running / runnable goroutines and to the right of nMaybeRunnableStackRoots are
+ // stackRoots of unmarked / not runnable goroutines
+ // The stackRoots array is re-partitioned after each marking phase iteration.
stackRoots []*g
// Each type of GC state transition is protected by a lock.
@@ -562,6 +589,55 @@ func GC() {
releasem(mp)
}
+// goroutineLeakGC runs a GC cycle that performs goroutine leak detection.
+//
+//go:linkname goroutineLeakGC runtime/pprof.runtime_goroutineLeakGC
+func goroutineLeakGC() {
+ // Set the pending flag to true, instructing the next GC cycle to
+ // perform goroutine leak detection.
+ work.goroutineLeak.pending.Store(true)
+
+ // Spin GC cycles until the pending flag is unset.
+ // This ensures that goroutineLeakGC waits for a GC cycle that
+ // actually performs goroutine leak detection.
+ //
+ // This is needed in case multiple concurrent calls to GC
+ // are simultaneously fired by the system, wherein some
+ // of them are dropped.
+ //
+ // In the vast majority of cases, only one loop iteration is needed;
+ // however, multiple concurrent calls to goroutineLeakGC could lead to
+ // the execution of additional GC cycles.
+ //
+ // Examples:
+ //
+ // pending? | G1 | G2
+ // ---------|-------------------------|-----------------------
+ // - | goroutineLeakGC() | goroutineLeakGC()
+ // - | pending.Store(true) | .
+ // X | for pending.Load() | .
+ // X | GC() | .
+ // X | > gcStart() | .
+ // X | pending.Store(false) | .
+ // ...
+ // - | > gcMarkDone() | .
+ // - | . | pending.Store(true)
+ // ...
+ // X | > gcMarkTermination() | .
+ // X | ...
+ // X | < GC returns | .
+ // X | for pending.Load | .
+ // X | GC() | .
+ // X | . | for pending.Load()
+ // X | . | GC()
+ // ...
+ // The first to pick up the pending flag will start a
+ // leak detection cycle.
+ for work.goroutineLeak.pending.Load() {
+ GC()
+ }
+}
+
// gcWaitOnMark blocks until GC finishes the Nth mark phase. If GC has
// already completed this mark phase, it returns immediately.
func gcWaitOnMark(n uint32) {
@@ -785,6 +861,15 @@ func gcStart(trigger gcTrigger) {
schedEnableUser(false)
}
+ // If goroutine leak detection is pending, enable it for this GC cycle.
+ if work.goroutineLeak.pending.Load() {
+ work.goroutineLeak.enabled = true
+ work.goroutineLeak.pending.Store(false)
+ // Set all sync objects of blocked goroutines as untraceable
+ // by the GC. Only set as traceable at the end of the GC cycle.
+ setSyncObjectsUntraceable()
+ }
+
// Enter concurrent mark phase and enable
// write barriers.
//
@@ -995,8 +1080,20 @@ top:
}
}
})
- if restart {
- gcDebugMarkDone.restartedDueTo27993 = true
+
+ // Check whether we need to resume the marking phase because of issue #27993
+ // or because of goroutine leak detection.
+ if restart || (work.goroutineLeak.enabled && !work.goroutineLeak.done) {
+ if restart {
+ // Restart because of issue #27993.
+ gcDebugMarkDone.restartedDueTo27993 = true
+ } else {
+ // Marking has reached a fixed-point. Attempt to detect goroutine leaks.
+ //
+ // If the returned value is true, then detection already concluded for this cycle.
+ // Otherwise, more runnable goroutines were discovered, requiring additional mark work.
+ work.goroutineLeak.done = findGoroutineLeaks()
+ }
getg().m.preemptoff = ""
systemstack(func() {
@@ -1047,6 +1144,172 @@ top:
gcMarkTermination(stw)
}
+// isMaybeRunnable checks whether a goroutine may still be semantically runnable.
+// For goroutines which are semantically runnable, this will eventually return true
+// as the GC marking phase progresses. It returns false for leaked goroutines, or for
+// goroutines which are not yet computed as possibly runnable by the GC.
+func (gp *g) isMaybeRunnable() bool {
+ // Check whether the goroutine is actually in a waiting state first.
+ if readgstatus(gp) != _Gwaiting {
+ // If the goroutine is not waiting, then clearly it is maybe runnable.
+ return true
+ }
+
+ switch gp.waitreason {
+ case waitReasonSelectNoCases,
+ waitReasonChanSendNilChan,
+ waitReasonChanReceiveNilChan:
+ // Select with no cases or communicating on nil channels
+ // make goroutines unrunnable by definition.
+ return false
+ case waitReasonChanReceive,
+ waitReasonSelect,
+ waitReasonChanSend:
+ // Cycle all through all *sudog to check whether
+ // the goroutine is waiting on a marked channel.
+ for sg := gp.waiting; sg != nil; sg = sg.waitlink {
+ if isMarkedOrNotInHeap(unsafe.Pointer(sg.c.get())) {
+ return true
+ }
+ }
+ return false
+ case waitReasonSyncCondWait,
+ waitReasonSyncWaitGroupWait,
+ waitReasonSyncMutexLock,
+ waitReasonSyncRWMutexLock,
+ waitReasonSyncRWMutexRLock:
+ // If waiting on mutexes, wait groups, or condition variables,
+ // check if the synchronization primitive attached to the sudog is marked.
+ if gp.waiting != nil {
+ return isMarkedOrNotInHeap(gp.waiting.elem.get())
+ }
+ }
+ return true
+}
+
+// findMaybeRunnableGoroutines checks to see if more blocked but maybe-runnable goroutines exist.
+// If so, it adds them into root set and increments work.markrootJobs accordingly.
+// Returns true if we need to run another phase of markroots; returns false otherwise.
+func findMaybeRunnableGoroutines() (moreWork bool) {
+ oldRootJobs := work.markrootJobs.Load()
+
+ // To begin with we have a set of unchecked stackRoots between
+ // vIndex and ivIndex. During the loop, anything < vIndex should be
+ // valid stackRoots and anything >= ivIndex should be invalid stackRoots.
+ // The loop terminates when the two indices meet.
+ var vIndex, ivIndex int = work.nMaybeRunnableStackRoots, work.nStackRoots
+ // Reorder goroutine list
+ for vIndex < ivIndex {
+ if work.stackRoots[vIndex].isMaybeRunnable() {
+ vIndex = vIndex + 1
+ continue
+ }
+ for ivIndex = ivIndex - 1; ivIndex != vIndex; ivIndex = ivIndex - 1 {
+ if gp := work.stackRoots[ivIndex]; gp.isMaybeRunnable() {
+ work.stackRoots[ivIndex] = work.stackRoots[vIndex]
+ work.stackRoots[vIndex] = gp
+ vIndex = vIndex + 1
+ break
+ }
+ }
+ }
+
+ newRootJobs := work.baseStacks + uint32(vIndex)
+ if newRootJobs > oldRootJobs {
+ work.nMaybeRunnableStackRoots = vIndex
+ work.markrootJobs.Store(newRootJobs)
+ }
+ return newRootJobs > oldRootJobs
+}
+
+// setSyncObjectsUntraceable scans allgs and sets the elem and c fields of all sudogs to
+// an untrackable pointer. This prevents the GC from marking these objects as live in memory
+// by following these pointers when runnning deadlock detection.
+func setSyncObjectsUntraceable() {
+ assertWorldStopped()
+
+ forEachGRace(func(gp *g) {
+ // Set as untraceable all synchronization objects of goroutines
+ // blocked at concurrency operations that could leak.
+ switch {
+ case gp.waitreason.isSyncWait():
+ // Synchronization primitives are reachable from the *sudog via
+ // via the elem field.
+ for sg := gp.waiting; sg != nil; sg = sg.waitlink {
+ sg.elem.setUntraceable()
+ }
+ case gp.waitreason.isChanWait():
+ // Channels and select statements are reachable from the *sudog via the c field.
+ for sg := gp.waiting; sg != nil; sg = sg.waitlink {
+ sg.c.setUntraceable()
+ }
+ }
+ })
+}
+
+// gcRestoreSyncObjects restores the elem and c fields of all sudogs to their original values.
+// Should be invoked after the goroutine leak detection phase.
+func gcRestoreSyncObjects() {
+ assertWorldStopped()
+
+ forEachGRace(func(gp *g) {
+ for sg := gp.waiting; sg != nil; sg = sg.waitlink {
+ sg.elem.setTraceable()
+ sg.c.setTraceable()
+ }
+ })
+}
+
+// findGoroutineLeaks scans the remaining stackRoots and marks any which are
+// blocked over exclusively unreachable concurrency primitives as leaked (deadlocked).
+// Returns true if the goroutine leak check was performed (or unnecessary).
+// Returns false if the GC cycle has not yet computed all maybe-runnable goroutines.
+func findGoroutineLeaks() bool {
+ assertWorldStopped()
+
+ // Report goroutine leaks and mark them unreachable, and resume marking
+ // we still need to mark these unreachable *g structs as they
+ // get reused, but their stack won't get scanned
+ if work.nMaybeRunnableStackRoots == work.nStackRoots {
+ // nMaybeRunnableStackRoots == nStackRoots means that all goroutines are marked.
+ return true
+ }
+
+ // Check whether any more maybe-runnable goroutines can be found by the GC.
+ if findMaybeRunnableGoroutines() {
+ // We found more work, so we need to resume the marking phase.
+ return false
+ }
+
+ // For the remaining goroutines, mark them as unreachable and leaked.
+ work.goroutineLeak.count = work.nStackRoots - work.nMaybeRunnableStackRoots
+
+ for i := work.nMaybeRunnableStackRoots; i < work.nStackRoots; i++ {
+ gp := work.stackRoots[i]
+ casgstatus(gp, _Gwaiting, _Gleaked)
+
+ // Add the primitives causing the goroutine leaks
+ // to the GC work queue, to ensure they are marked.
+ //
+ // NOTE(vsaioc): these primitives should also be reachable
+ // from the goroutine's stack, but let's play it safe.
+ switch {
+ case gp.waitreason.isChanWait():
+ for sg := gp.waiting; sg != nil; sg = sg.waitlink {
+ shade(sg.c.uintptr())
+ }
+ case gp.waitreason.isSyncWait():
+ for sg := gp.waiting; sg != nil; sg = sg.waitlink {
+ shade(sg.elem.uintptr())
+ }
+ }
+ }
+ // Put the remaining roots as ready for marking and drain them.
+ work.markrootJobs.Add(int32(work.nStackRoots - work.nMaybeRunnableStackRoots))
+ work.nMaybeRunnableStackRoots = work.nStackRoots
+ return true
+}
+
// World must be stopped and mark assists and background workers must be
// disabled.
func gcMarkTermination(stw worldStop) {
@@ -1199,7 +1462,18 @@ func gcMarkTermination(stw worldStop) {
throw("non-concurrent sweep failed to drain all sweep queues")
}
+ if work.goroutineLeak.enabled {
+ // Restore the elem and c fields of all sudogs to their original values.
+ gcRestoreSyncObjects()
+ }
+
+ var goroutineLeakDone bool
systemstack(func() {
+ // Pull the GC out of goroutine leak detection mode.
+ work.goroutineLeak.enabled = false
+ goroutineLeakDone = work.goroutineLeak.done
+ work.goroutineLeak.done = false
+
// The memstats updated above must be updated with the world
// stopped to ensure consistency of some values, such as
// sched.idleTime and sched.totaltime. memstats also include
@@ -1273,7 +1547,11 @@ func gcMarkTermination(stw worldStop) {
printlock()
print("gc ", memstats.numgc,
" @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ",
- util, "%: ")
+ util, "%")
+ if goroutineLeakDone {
+ print(" (checking for goroutine leaks)")
+ }
+ print(": ")
prev := work.tSweepTerm
for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} {
if i != 0 {
@@ -1647,8 +1925,8 @@ func gcMark(startTime int64) {
work.tstart = startTime
// Check that there's no marking work remaining.
- if work.full != 0 || work.markrootNext < work.markrootJobs {
- print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n")
+ if next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); work.full != 0 || next < jobs {
+ print("runtime: full=", hex(work.full), " next=", next, " jobs=", jobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n")
panic("non-empty mark queue after concurrent mark")
}
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index f85ebda260..ba3824f00d 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -50,9 +50,58 @@ const (
//
// Must be a multiple of the pageInUse bitmap element size and
// must also evenly divide pagesPerArena.
- pagesPerSpanRoot = 512
+ pagesPerSpanRoot = min(512, pagesPerArena)
)
+// internalBlocked returns true if the goroutine is blocked due to an
+// internal (non-leaking) waitReason, e.g. waiting for the netpoller or garbage collector.
+// Such goroutines are never leak detection candidates according to the GC.
+//
+//go:nosplit
+func (gp *g) internalBlocked() bool {
+ reason := gp.waitreason
+ return reason < waitReasonChanReceiveNilChan || waitReasonSyncWaitGroupWait < reason
+}
+
+// allGsSnapshotSortedForGC takes a snapshot of allgs and returns a sorted
+// array of Gs. The array is sorted by the G's status, with running Gs
+// first, followed by blocked Gs. The returned index indicates the cutoff
+// between runnable and blocked Gs.
+//
+// The world must be stopped or allglock must be held.
+func allGsSnapshotSortedForGC() ([]*g, int) {
+ assertWorldStoppedOrLockHeld(&allglock)
+
+ // Reset the status of leaked goroutines in order to improve
+ // the precision of goroutine leak detection.
+ for _, gp := range allgs {
+ gp.atomicstatus.CompareAndSwap(_Gleaked, _Gwaiting)
+ }
+
+ allgsSorted := make([]*g, len(allgs))
+
+ // Indices cutting off runnable and blocked Gs.
+ var currIndex, blockedIndex = 0, len(allgsSorted) - 1
+ for _, gp := range allgs {
+ // not sure if we need atomic load because we are stopping the world,
+ // but do it just to be safe for now
+ if status := readgstatus(gp); status != _Gwaiting || gp.internalBlocked() {
+ allgsSorted[currIndex] = gp
+ currIndex++
+ } else {
+ allgsSorted[blockedIndex] = gp
+ blockedIndex--
+ }
+ }
+
+ // Because the world is stopped or allglock is held, allgadd
+ // cannot happen concurrently with this. allgs grows
+ // monotonically and existing entries never change, so we can
+ // simply return a copy of the slice header. For added safety,
+ // we trim everything past len because that can still change.
+ return allgsSorted, blockedIndex + 1
+}
+
// gcPrepareMarkRoots queues root scanning jobs (stacks, globals, and
// some miscellany) and initializes scanning-related state.
//
@@ -102,11 +151,20 @@ func gcPrepareMarkRoots() {
// ignore them because they begin life without any roots, so
// there's nothing to scan, and any roots they create during
// the concurrent phase will be caught by the write barrier.
- work.stackRoots = allGsSnapshot()
+ if work.goroutineLeak.enabled {
+ // goroutine leak finder GC --- only prepare runnable
+ // goroutines for marking.
+ work.stackRoots, work.nMaybeRunnableStackRoots = allGsSnapshotSortedForGC()
+ } else {
+ // regular GC --- scan every goroutine
+ work.stackRoots = allGsSnapshot()
+ work.nMaybeRunnableStackRoots = len(work.stackRoots)
+ }
+
work.nStackRoots = len(work.stackRoots)
- work.markrootNext = 0
- work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
+ work.markrootNext.Store(0)
+ work.markrootJobs.Store(uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nMaybeRunnableStackRoots))
// Calculate base indexes of each root type
work.baseData = uint32(fixedRootCount)
@@ -119,8 +177,8 @@ func gcPrepareMarkRoots() {
// gcMarkRootCheck checks that all roots have been scanned. It is
// purely for debugging.
func gcMarkRootCheck() {
- if work.markrootNext < work.markrootJobs {
- print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
+ if next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); next < jobs {
+ print(next, " of ", jobs, " markroot jobs done\n")
throw("left over markroot jobs")
}
@@ -858,7 +916,7 @@ func scanstack(gp *g, gcw *gcWork) int64 {
case _Grunning:
print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
throw("scanstack: goroutine not stopped")
- case _Grunnable, _Gsyscall, _Gwaiting:
+ case _Grunnable, _Gsyscall, _Gwaiting, _Gleaked:
// ok
}
@@ -1126,6 +1184,28 @@ func gcDrainMarkWorkerFractional(gcw *gcWork) {
gcDrain(gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
}
+// gcNextMarkRoot safely increments work.markrootNext and returns the
+// index of the next root job. The returned boolean is true if the root job
+// is valid, and false if there are no more root jobs to be claimed,
+// i.e. work.markrootNext >= work.markrootJobs.
+func gcNextMarkRoot() (uint32, bool) {
+ if !work.goroutineLeak.enabled {
+ // If not running goroutine leak detection, assume regular GC behavior.
+ job := work.markrootNext.Add(1) - 1
+ return job, job < work.markrootJobs.Load()
+ }
+
+ // Otherwise, use a CAS loop to increment markrootNext.
+ for next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); next < jobs; next = work.markrootNext.Load() {
+ // There is still work available at the moment.
+ if work.markrootNext.CompareAndSwap(next, next+1) {
+ // We manage to snatch a root job. Return the root index.
+ return next, true
+ }
+ }
+ return 0, false
+}
+
// gcDrain scans roots and objects in work buffers, blackening grey
// objects until it is unable to get more work. It may return before
// GC is done; it's the caller's responsibility to balance work from
@@ -1184,13 +1264,12 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
}
}
- // Drain root marking jobs.
- if work.markrootNext < work.markrootJobs {
+ if work.markrootNext.Load() < work.markrootJobs.Load() {
// Stop if we're preemptible, if someone wants to STW, or if
// someone is calling forEachP.
for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
- job := atomic.Xadd(&work.markrootNext, +1) - 1
- if job >= work.markrootJobs {
+ job, ok := gcNextMarkRoot()
+ if !ok {
break
}
markroot(gcw, job, flushBgCredit)
@@ -1342,9 +1421,9 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 {
if b = gcw.tryGetObj(); b == 0 {
if s = gcw.tryGetSpan(); s == 0 {
// Try to do a root job.
- if work.markrootNext < work.markrootJobs {
- job := atomic.Xadd(&work.markrootNext, +1) - 1
- if job < work.markrootJobs {
+ if work.markrootNext.Load() < work.markrootJobs.Load() {
+ job, ok := gcNextMarkRoot()
+ if ok {
workFlushed += markroot(gcw, job, false)
continue
}
diff --git a/src/runtime/mgcmark_greenteagc.go b/src/runtime/mgcmark_greenteagc.go
index 53fcd3d966..3975e1e76b 100644
--- a/src/runtime/mgcmark_greenteagc.go
+++ b/src/runtime/mgcmark_greenteagc.go
@@ -1143,7 +1143,7 @@ func gcMarkWorkAvailable() bool {
if !work.full.empty() {
return true // global work available
}
- if work.markrootNext < work.markrootJobs {
+ if work.markrootNext.Load() < work.markrootJobs.Load() {
return true // root scan work available
}
if work.spanqMask.any() {
diff --git a/src/runtime/mgcmark_nogreenteagc.go b/src/runtime/mgcmark_nogreenteagc.go
index e450503291..9838887f7b 100644
--- a/src/runtime/mgcmark_nogreenteagc.go
+++ b/src/runtime/mgcmark_nogreenteagc.go
@@ -124,7 +124,7 @@ func gcMarkWorkAvailable() bool {
if !work.full.empty() {
return true // global work available
}
- if work.markrootNext < work.markrootJobs {
+ if work.markrootNext.Load() < work.markrootJobs.Load() {
return true // root scan work available
}
return false
diff --git a/src/runtime/mgcscavenge_test.go b/src/runtime/mgcscavenge_test.go
index 9c4cf1f277..7b86ae8ffc 100644
--- a/src/runtime/mgcscavenge_test.go
+++ b/src/runtime/mgcscavenge_test.go
@@ -121,13 +121,17 @@ func TestPallocDataFindScavengeCandidate(t *testing.T) {
max: PallocChunkPages,
want: BitRange{41, 1},
},
- "MultiMin1": {
- alloc: []BitRange{{0, 63}, {65, 20}, {87, PallocChunkPages - 87}},
+ }
+ if PallocChunkPages >= 512 {
+ // avoid constant overflow when PallocChunkPages is small
+ var pallocChunkPages uint = PallocChunkPages
+ tests["MultiMin1"] = test{
+ alloc: []BitRange{{0, 63}, {65, 20}, {87, pallocChunkPages - 87}},
scavenged: []BitRange{{86, 1}},
min: 1,
max: PallocChunkPages,
want: BitRange{85, 1},
- },
+ }
}
// Try out different page minimums.
for m := uintptr(1); m <= 64; m *= 2 {
@@ -162,25 +166,27 @@ func TestPallocDataFindScavengeCandidate(t *testing.T) {
max: PallocChunkPages,
want: BitRange{PallocChunkPages - uint(m), uint(m)},
}
- tests["Straddle64"+suffix] = test{
- alloc: []BitRange{{0, 64 - uint(m)}, {64 + uint(m), PallocChunkPages - (64 + uint(m))}},
- min: m,
- max: 2 * m,
- want: BitRange{64 - uint(m), 2 * uint(m)},
- }
- tests["BottomEdge64WithFull"+suffix] = test{
- alloc: []BitRange{{64, 64}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}},
- scavenged: []BitRange{{1, 10}},
- min: m,
- max: 3 * m,
- want: BitRange{128, 3 * uint(m)},
- }
- tests["BottomEdge64WithPocket"+suffix] = test{
- alloc: []BitRange{{64, 62}, {127, 1}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}},
- scavenged: []BitRange{{1, 10}},
- min: m,
- max: 3 * m,
- want: BitRange{128, 3 * uint(m)},
+ if PallocChunkPages >= 512 {
+ tests["Straddle64"+suffix] = test{
+ alloc: []BitRange{{0, 64 - uint(m)}, {64 + uint(m), PallocChunkPages - (64 + uint(m))}},
+ min: m,
+ max: 2 * m,
+ want: BitRange{64 - uint(m), 2 * uint(m)},
+ }
+ tests["BottomEdge64WithFull"+suffix] = test{
+ alloc: []BitRange{{64, 64}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}},
+ scavenged: []BitRange{{1, 10}},
+ min: m,
+ max: 3 * m,
+ want: BitRange{128, 3 * uint(m)},
+ }
+ tests["BottomEdge64WithPocket"+suffix] = test{
+ alloc: []BitRange{{64, 62}, {127, 1}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}},
+ scavenged: []BitRange{{1, 10}},
+ min: m,
+ max: 3 * m,
+ want: BitRange{128, 3 * uint(m)},
+ }
}
tests["Max0"+suffix] = test{
scavenged: []BitRange{{0, PallocChunkPages - uint(m)}},
@@ -204,23 +210,29 @@ func TestPallocDataFindScavengeCandidate(t *testing.T) {
}
}
if m > 1 {
- tests["MaxUnaligned"+suffix] = test{
- scavenged: []BitRange{{0, PallocChunkPages - uint(m*2-1)}},
- min: m,
- max: m - 2,
- want: BitRange{PallocChunkPages - uint(m), uint(m)},
- }
- tests["SkipSmall"+suffix] = test{
- alloc: []BitRange{{0, 64 - uint(m)}, {64, 5}, {70, 11}, {82, PallocChunkPages - 82}},
- min: m,
- max: m,
- want: BitRange{64 - uint(m), uint(m)},
+ if PallocChunkPages >= m*2 {
+ tests["MaxUnaligned"+suffix] = test{
+ scavenged: []BitRange{{0, PallocChunkPages - uint(m*2-1)}},
+ min: m,
+ max: m - 2,
+ want: BitRange{PallocChunkPages - uint(m), uint(m)},
+ }
}
- tests["SkipMisaligned"+suffix] = test{
- alloc: []BitRange{{0, 64 - uint(m)}, {64, 63}, {127 + uint(m), PallocChunkPages - (127 + uint(m))}},
- min: m,
- max: m,
- want: BitRange{64 - uint(m), uint(m)},
+ if PallocChunkPages >= 512 {
+ // avoid constant overflow when PallocChunkPages is small
+ var PallocChunkPages uint = PallocChunkPages
+ tests["SkipSmall"+suffix] = test{
+ alloc: []BitRange{{0, 64 - uint(m)}, {64, 5}, {70, 11}, {82, PallocChunkPages - 82}},
+ min: m,
+ max: m,
+ want: BitRange{64 - uint(m), uint(m)},
+ }
+ tests["SkipMisaligned"+suffix] = test{
+ alloc: []BitRange{{0, 64 - uint(m)}, {64, 63}, {127 + uint(m), PallocChunkPages - (127 + uint(m))}},
+ min: m,
+ max: m,
+ want: BitRange{64 - uint(m), uint(m)},
+ }
}
tests["MaxLessThan"+suffix] = test{
scavenged: []BitRange{{0, PallocChunkPages - uint(m)}},
@@ -641,7 +653,7 @@ func TestScavengeIndex(t *testing.T) {
mark func(markFunc)
find func(findFunc)
}
- for _, test := range []testCase{
+ tests := []testCase{
{
name: "Uninitialized",
mark: func(_ markFunc) {},
@@ -693,26 +705,6 @@ func TestScavengeIndex(t *testing.T) {
},
},
{
- name: "TwoChunks",
- mark: func(mark markFunc) {
- mark(PageBase(BaseChunkIdx, 128), PageBase(BaseChunkIdx+1, 128))
- },
- find: func(find findFunc) {
- find(BaseChunkIdx+1, 127)
- find(BaseChunkIdx, PallocChunkPages-1)
- },
- },
- {
- name: "TwoChunksOffset",
- mark: func(mark markFunc) {
- mark(PageBase(BaseChunkIdx+7, 128), PageBase(BaseChunkIdx+8, 129))
- },
- find: func(find findFunc) {
- find(BaseChunkIdx+8, 128)
- find(BaseChunkIdx+7, PallocChunkPages-1)
- },
- },
- {
name: "SevenChunksOffset",
mark: func(mark markFunc) {
mark(PageBase(BaseChunkIdx+6, 11), PageBase(BaseChunkIdx+13, 15))
@@ -793,7 +785,32 @@ func TestScavengeIndex(t *testing.T) {
}
},
},
- } {
+ }
+ if PallocChunkPages >= 512 {
+ tests = append(tests,
+ testCase{
+ name: "TwoChunks",
+ mark: func(mark markFunc) {
+ mark(PageBase(BaseChunkIdx, 128), PageBase(BaseChunkIdx+1, 128))
+ },
+ find: func(find findFunc) {
+ find(BaseChunkIdx+1, 127)
+ find(BaseChunkIdx, PallocChunkPages-1)
+ },
+ },
+ testCase{
+ name: "TwoChunksOffset",
+ mark: func(mark markFunc) {
+ mark(PageBase(BaseChunkIdx+7, 128), PageBase(BaseChunkIdx+8, 129))
+ },
+ find: func(find findFunc) {
+ find(BaseChunkIdx+8, 128)
+ find(BaseChunkIdx+7, PallocChunkPages-1)
+ },
+ },
+ )
+ }
+ for _, test := range tests {
test := test
t.Run("Bg/"+test.name, func(t *testing.T) {
mark, find, nextGen := setup(t, false)
@@ -830,8 +847,10 @@ func TestScavengeIndex(t *testing.T) {
}
func TestScavChunkDataPack(t *testing.T) {
- if !CheckPackScavChunkData(1918237402, 512, 512, 0b11) {
- t.Error("failed pack/unpack check for scavChunkData 1")
+ if PallocChunkPages >= 512 {
+ if !CheckPackScavChunkData(1918237402, 512, 512, 0b11) {
+ t.Error("failed pack/unpack check for scavChunkData 1")
+ }
}
if !CheckPackScavChunkData(^uint32(0), 12, 0, 0b00) {
t.Error("failed pack/unpack check for scavChunkData 2")
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 049b7798a8..f2dc3717b1 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -47,7 +47,7 @@ const (
//
// Must be a multiple of the pageInUse bitmap element size and
// must also evenly divide pagesPerArena.
- pagesPerReclaimerChunk = 512
+ pagesPerReclaimerChunk = min(512, pagesPerArena)
// physPageAlignedStacks indicates whether stack allocations must be
// physical page aligned. This is a requirement for MAP_STACK on
diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go
index 83db005051..a30434a534 100644
--- a/src/runtime/mpagealloc.go
+++ b/src/runtime/mpagealloc.go
@@ -48,6 +48,7 @@
package runtime
import (
+ "internal/goarch"
"internal/runtime/atomic"
"internal/runtime/gc"
"unsafe"
@@ -55,10 +56,12 @@ import (
const (
// The size of a bitmap chunk, i.e. the amount of bits (that is, pages) to consider
- // in the bitmap at once.
+ // in the bitmap at once. It is 4MB on most platforms, except on Wasm it is 512KB.
+ // We use a smaller chuck size on Wasm for the same reason as the smaller arena
+ // size (see heapArenaBytes).
pallocChunkPages = 1 << logPallocChunkPages
pallocChunkBytes = pallocChunkPages * pageSize
- logPallocChunkPages = 9
+ logPallocChunkPages = 9*(1-goarch.IsWasm) + 6*goarch.IsWasm
logPallocChunkBytes = logPallocChunkPages + gc.PageShift
// The number of radix bits for each level.
@@ -220,6 +223,7 @@ type pageAlloc struct {
// heapAddrBits | L1 Bits | L2 Bits | L2 Entry Size
// ------------------------------------------------
// 32 | 0 | 10 | 128 KiB
+ // 32 (wasm) | 0 | 13 | 128 KiB
// 33 (iOS) | 0 | 11 | 256 KiB
// 48 | 13 | 13 | 1 MiB
//
diff --git a/src/runtime/mpagealloc_test.go b/src/runtime/mpagealloc_test.go
index f2b82e3f50..ded7a79922 100644
--- a/src/runtime/mpagealloc_test.go
+++ b/src/runtime/mpagealloc_test.go
@@ -343,38 +343,6 @@ func TestPageAllocAlloc(t *testing.T) {
BaseChunkIdx: {{0, 25}},
},
},
- "AllFree64": {
- before: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {},
- },
- scav: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{21, 1}, {63, 65}},
- },
- hits: []hit{
- {64, PageBase(BaseChunkIdx, 0), 2 * PageSize},
- {64, PageBase(BaseChunkIdx, 64), 64 * PageSize},
- {64, PageBase(BaseChunkIdx, 128), 0},
- },
- after: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, 192}},
- },
- },
- "AllFree65": {
- before: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {},
- },
- scav: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{129, 1}},
- },
- hits: []hit{
- {65, PageBase(BaseChunkIdx, 0), 0},
- {65, PageBase(BaseChunkIdx, 65), PageSize},
- {65, PageBase(BaseChunkIdx, 130), 0},
- },
- after: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, 195}},
- },
- },
"ExhaustPallocChunkPages-3": {
before: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
@@ -410,25 +378,6 @@ func TestPageAllocAlloc(t *testing.T) {
BaseChunkIdx: {{0, PallocChunkPages}},
},
},
- "StraddlePallocChunkPages": {
- before: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, PallocChunkPages / 2}},
- BaseChunkIdx + 1: {{PallocChunkPages / 2, PallocChunkPages / 2}},
- },
- scav: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {},
- BaseChunkIdx + 1: {{3, 100}},
- },
- hits: []hit{
- {PallocChunkPages, PageBase(BaseChunkIdx, PallocChunkPages/2), 100 * PageSize},
- {PallocChunkPages, 0, 0},
- {1, 0, 0},
- },
- after: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, PallocChunkPages}},
- BaseChunkIdx + 1: {{0, PallocChunkPages}},
- },
- },
"StraddlePallocChunkPages+1": {
before: map[ChunkIdx][]BitRange{
BaseChunkIdx: {{0, PallocChunkPages / 2}},
@@ -489,28 +438,6 @@ func TestPageAllocAlloc(t *testing.T) {
BaseChunkIdx + 0x41: {{0, PallocChunkPages}},
},
},
- "StraddlePallocChunkPages*2": {
- before: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, PallocChunkPages / 2}},
- BaseChunkIdx + 1: {},
- BaseChunkIdx + 2: {{PallocChunkPages / 2, PallocChunkPages / 2}},
- },
- scav: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, 7}},
- BaseChunkIdx + 1: {{3, 5}, {121, 10}},
- BaseChunkIdx + 2: {{PallocChunkPages/2 + 12, 2}},
- },
- hits: []hit{
- {PallocChunkPages * 2, PageBase(BaseChunkIdx, PallocChunkPages/2), 15 * PageSize},
- {PallocChunkPages * 2, 0, 0},
- {1, 0, 0},
- },
- after: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, PallocChunkPages}},
- BaseChunkIdx + 1: {{0, PallocChunkPages}},
- BaseChunkIdx + 2: {{0, PallocChunkPages}},
- },
- },
"StraddlePallocChunkPages*5/4": {
before: map[ChunkIdx][]BitRange{
BaseChunkIdx: {{0, PallocChunkPages}},
@@ -536,7 +463,60 @@ func TestPageAllocAlloc(t *testing.T) {
BaseChunkIdx + 3: {{0, PallocChunkPages}},
},
},
- "AllFreePallocChunkPages*7+5": {
+ }
+ if PallocChunkPages >= 512 {
+ tests["AllFree64"] = test{
+ before: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {},
+ },
+ scav: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{21, 1}, {63, 65}},
+ },
+ hits: []hit{
+ {64, PageBase(BaseChunkIdx, 0), 2 * PageSize},
+ {64, PageBase(BaseChunkIdx, 64), 64 * PageSize},
+ {64, PageBase(BaseChunkIdx, 128), 0},
+ },
+ after: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{0, 192}},
+ },
+ }
+ tests["AllFree65"] = test{
+ before: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {},
+ },
+ scav: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{129, 1}},
+ },
+ hits: []hit{
+ {65, PageBase(BaseChunkIdx, 0), 0},
+ {65, PageBase(BaseChunkIdx, 65), PageSize},
+ {65, PageBase(BaseChunkIdx, 130), 0},
+ },
+ after: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{0, 195}},
+ },
+ }
+ tests["StraddlePallocChunkPages"] = test{
+ before: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{0, PallocChunkPages / 2}},
+ BaseChunkIdx + 1: {{PallocChunkPages / 2, PallocChunkPages / 2}},
+ },
+ scav: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {},
+ BaseChunkIdx + 1: {{3, 100}},
+ },
+ hits: []hit{
+ {PallocChunkPages, PageBase(BaseChunkIdx, PallocChunkPages/2), 100 * PageSize},
+ {PallocChunkPages, 0, 0},
+ {1, 0, 0},
+ },
+ after: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{0, PallocChunkPages}},
+ BaseChunkIdx + 1: {{0, PallocChunkPages}},
+ },
+ }
+ tests["AllFreePallocChunkPages*7+5"] = test{
before: map[ChunkIdx][]BitRange{
BaseChunkIdx: {},
BaseChunkIdx + 1: {},
@@ -572,7 +552,29 @@ func TestPageAllocAlloc(t *testing.T) {
BaseChunkIdx + 6: {{0, PallocChunkPages}},
BaseChunkIdx + 7: {{0, 6}},
},
- },
+ }
+ tests["StraddlePallocChunkPages*2"] = test{
+ before: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{0, PallocChunkPages / 2}},
+ BaseChunkIdx + 1: {},
+ BaseChunkIdx + 2: {{PallocChunkPages / 2, PallocChunkPages / 2}},
+ },
+ scav: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{0, 7}},
+ BaseChunkIdx + 1: {{3, 5}, {121, 10}},
+ BaseChunkIdx + 2: {{PallocChunkPages/2 + 12, 2}},
+ },
+ hits: []hit{
+ {PallocChunkPages * 2, PageBase(BaseChunkIdx, PallocChunkPages/2), 15 * PageSize},
+ {PallocChunkPages * 2, 0, 0},
+ {1, 0, 0},
+ },
+ after: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{0, PallocChunkPages}},
+ BaseChunkIdx + 1: {{0, PallocChunkPages}},
+ BaseChunkIdx + 2: {{0, PallocChunkPages}},
+ },
+ }
}
// Disable these tests on iOS since we have a small address space.
// See #46860.
@@ -754,12 +756,13 @@ func TestPageAllocFree(t *testing.T) {
if GOOS == "openbsd" && testing.Short() {
t.Skip("skipping because virtual memory is limited; see #36210")
}
- tests := map[string]struct {
+ type test struct {
before map[ChunkIdx][]BitRange
after map[ChunkIdx][]BitRange
npages uintptr
frees []uintptr
- }{
+ }
+ tests := map[string]test{
"Free1": {
npages: 1,
before: map[ChunkIdx][]BitRange{
@@ -840,34 +843,6 @@ func TestPageAllocFree(t *testing.T) {
BaseChunkIdx: {{25, PallocChunkPages - 25}},
},
},
- "Free64": {
- npages: 64,
- before: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, PallocChunkPages}},
- },
- frees: []uintptr{
- PageBase(BaseChunkIdx, 0),
- PageBase(BaseChunkIdx, 64),
- PageBase(BaseChunkIdx, 128),
- },
- after: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{192, PallocChunkPages - 192}},
- },
- },
- "Free65": {
- npages: 65,
- before: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, PallocChunkPages}},
- },
- frees: []uintptr{
- PageBase(BaseChunkIdx, 0),
- PageBase(BaseChunkIdx, 65),
- PageBase(BaseChunkIdx, 130),
- },
- after: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{195, PallocChunkPages - 195}},
- },
- },
"FreePallocChunkPages": {
npages: PallocChunkPages,
before: map[ChunkIdx][]BitRange{
@@ -965,6 +940,38 @@ func TestPageAllocFree(t *testing.T) {
},
},
}
+ if PallocChunkPages >= 512 {
+ // avoid constant overflow when PallocChunkPages is small
+ var PallocChunkPages uint = PallocChunkPages
+ tests["Free64"] = test{
+ npages: 64,
+ before: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{0, PallocChunkPages}},
+ },
+ frees: []uintptr{
+ PageBase(BaseChunkIdx, 0),
+ PageBase(BaseChunkIdx, 64),
+ PageBase(BaseChunkIdx, 128),
+ },
+ after: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{192, PallocChunkPages - 192}},
+ },
+ }
+ tests["Free65"] = test{
+ npages: 65,
+ before: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{0, PallocChunkPages}},
+ },
+ frees: []uintptr{
+ PageBase(BaseChunkIdx, 0),
+ PageBase(BaseChunkIdx, 65),
+ PageBase(BaseChunkIdx, 130),
+ },
+ after: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{195, PallocChunkPages - 195}},
+ },
+ }
+ }
for name, v := range tests {
v := v
t.Run(name, func(t *testing.T) {
diff --git a/src/runtime/mpagecache_test.go b/src/runtime/mpagecache_test.go
index 6cb0620f7b..19b4e04807 100644
--- a/src/runtime/mpagecache_test.go
+++ b/src/runtime/mpagecache_test.go
@@ -269,23 +269,6 @@ func TestPageAllocAllocToCache(t *testing.T) {
afterScav map[ChunkIdx][]BitRange
}
tests := map[string]test{
- "AllFree": {
- beforeAlloc: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {},
- },
- beforeScav: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{1, 1}, {64, 64}},
- },
- hits: []PageCache{
- NewPageCache(PageBase(BaseChunkIdx, 0), ^uint64(0), 0x2),
- NewPageCache(PageBase(BaseChunkIdx, 64), ^uint64(0), ^uint64(0)),
- NewPageCache(PageBase(BaseChunkIdx, 128), ^uint64(0), 0),
- NewPageCache(PageBase(BaseChunkIdx, 192), ^uint64(0), 0),
- },
- afterAlloc: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, 256}},
- },
- },
"ManyArena": {
beforeAlloc: map[ChunkIdx][]BitRange{
BaseChunkIdx: {{0, PallocChunkPages}},
@@ -306,72 +289,92 @@ func TestPageAllocAllocToCache(t *testing.T) {
BaseChunkIdx + 2: {{0, PallocChunkPages}},
},
},
- "NotContiguous": {
+
+ "Fail": {
beforeAlloc: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, PallocChunkPages}},
- BaseChunkIdx + 0xff: {{0, 0}},
+ BaseChunkIdx: {{0, PallocChunkPages}},
+ },
+ hits: []PageCache{
+ NewPageCache(0, 0, 0),
+ NewPageCache(0, 0, 0),
+ NewPageCache(0, 0, 0),
+ },
+ afterAlloc: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{0, PallocChunkPages}},
+ },
+ },
+ "RetainScavBits": {
+ beforeAlloc: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{0, 1}, {10, 2}},
},
beforeScav: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, PallocChunkPages}},
- BaseChunkIdx + 0xff: {{31, 67}},
+ BaseChunkIdx: {{0, 4}, {11, 1}},
},
hits: []PageCache{
- NewPageCache(PageBase(BaseChunkIdx+0xff, 0), ^uint64(0), ((uint64(1)<<33)-1)<<31),
+ NewPageCache(PageBase(BaseChunkIdx, 0), ^uint64(0x1|(0x3<<10)), 0x7<<1),
},
afterAlloc: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, PallocChunkPages}},
- BaseChunkIdx + 0xff: {{0, 64}},
+ BaseChunkIdx: {{0, 64}},
},
afterScav: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, PallocChunkPages}},
- BaseChunkIdx + 0xff: {{64, 34}},
+ BaseChunkIdx: {{0, 1}, {11, 1}},
},
},
- "First": {
+ }
+ if PallocChunkPages >= 512 {
+ tests["AllFree"] = test{
beforeAlloc: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, 32}, {33, 31}, {96, 32}},
+ BaseChunkIdx: {},
},
beforeScav: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{1, 4}, {31, 5}, {66, 2}},
+ BaseChunkIdx: {{1, 1}, {64, 64}},
},
hits: []PageCache{
- NewPageCache(PageBase(BaseChunkIdx, 0), 1<<32, 1<<32),
- NewPageCache(PageBase(BaseChunkIdx, 64), (uint64(1)<<32)-1, 0x3<<2),
+ NewPageCache(PageBase(BaseChunkIdx, 0), ^uint64(0), 0x2),
+ NewPageCache(PageBase(BaseChunkIdx, 64), ^uint64(0), ^uint64(0)),
+ NewPageCache(PageBase(BaseChunkIdx, 128), ^uint64(0), 0),
+ NewPageCache(PageBase(BaseChunkIdx, 192), ^uint64(0), 0),
},
afterAlloc: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, 128}},
+ BaseChunkIdx: {{0, 256}},
},
- },
- "Fail": {
+ }
+ tests["NotContiguous"] = test{
beforeAlloc: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, PallocChunkPages}},
+ BaseChunkIdx: {{0, PallocChunkPages}},
+ BaseChunkIdx + 0xff: {{0, 0}},
+ },
+ beforeScav: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{0, PallocChunkPages}},
+ BaseChunkIdx + 0xff: {{31, 67}},
},
hits: []PageCache{
- NewPageCache(0, 0, 0),
- NewPageCache(0, 0, 0),
- NewPageCache(0, 0, 0),
+ NewPageCache(PageBase(BaseChunkIdx+0xff, 0), ^uint64(0), ((uint64(1)<<33)-1)<<31),
},
afterAlloc: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, PallocChunkPages}},
+ BaseChunkIdx: {{0, PallocChunkPages}},
+ BaseChunkIdx + 0xff: {{0, 64}},
},
- },
- "RetainScavBits": {
+ afterScav: map[ChunkIdx][]BitRange{
+ BaseChunkIdx: {{0, PallocChunkPages}},
+ BaseChunkIdx + 0xff: {{64, 34}},
+ },
+ }
+ tests["First"] = test{
beforeAlloc: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, 1}, {10, 2}},
+ BaseChunkIdx: {{0, 32}, {33, 31}, {96, 32}},
},
beforeScav: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, 4}, {11, 1}},
+ BaseChunkIdx: {{1, 4}, {31, 5}, {66, 2}},
},
hits: []PageCache{
- NewPageCache(PageBase(BaseChunkIdx, 0), ^uint64(0x1|(0x3<<10)), 0x7<<1),
+ NewPageCache(PageBase(BaseChunkIdx, 0), 1<<32, 1<<32),
+ NewPageCache(PageBase(BaseChunkIdx, 64), (uint64(1)<<32)-1, 0x3<<2),
},
afterAlloc: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, 64}},
- },
- afterScav: map[ChunkIdx][]BitRange{
- BaseChunkIdx: {{0, 1}, {11, 1}},
+ BaseChunkIdx: {{0, 128}},
},
- },
+ }
}
// Disable these tests on iOS since we have a small address space.
// See #46860.
diff --git a/src/runtime/mpallocbits_test.go b/src/runtime/mpallocbits_test.go
index 5095e24220..cf49f77507 100644
--- a/src/runtime/mpallocbits_test.go
+++ b/src/runtime/mpallocbits_test.go
@@ -54,35 +54,39 @@ func TestPallocBitsAllocRange(t *testing.T) {
want[PallocChunkPages/64-1] = 1 << 63
test(t, PallocChunkPages-1, 1, want)
})
- t.Run("Inner", func(t *testing.T) {
- want := new(PallocBits)
- want[2] = 0x3e
- test(t, 129, 5, want)
- })
- t.Run("Aligned", func(t *testing.T) {
- want := new(PallocBits)
- want[2] = ^uint64(0)
- want[3] = ^uint64(0)
- test(t, 128, 128, want)
- })
- t.Run("Begin", func(t *testing.T) {
- want := new(PallocBits)
- want[0] = ^uint64(0)
- want[1] = ^uint64(0)
- want[2] = ^uint64(0)
- want[3] = ^uint64(0)
- want[4] = ^uint64(0)
- want[5] = 0x1
- test(t, 0, 321, want)
- })
- t.Run("End", func(t *testing.T) {
- want := new(PallocBits)
- want[PallocChunkPages/64-1] = ^uint64(0)
- want[PallocChunkPages/64-2] = ^uint64(0)
- want[PallocChunkPages/64-3] = ^uint64(0)
- want[PallocChunkPages/64-4] = 1 << 63
- test(t, PallocChunkPages-(64*3+1), 64*3+1, want)
- })
+ if PallocChunkPages >= 512 {
+ t.Run("Inner", func(t *testing.T) {
+ want := new(PallocBits)
+ want[:][2] = 0x3e
+ test(t, 129, 5, want)
+ })
+ t.Run("Aligned", func(t *testing.T) {
+ want := new(PallocBits)
+ want[:][2] = ^uint64(0)
+ want[:][3] = ^uint64(0)
+ test(t, 128, 128, want)
+ })
+ t.Run("Begin", func(t *testing.T) {
+ want := new(PallocBits)
+ want[:][0] = ^uint64(0)
+ want[:][1] = ^uint64(0)
+ want[:][2] = ^uint64(0)
+ want[:][3] = ^uint64(0)
+ want[:][4] = ^uint64(0)
+ want[:][5] = 0x1
+ test(t, 0, 321, want)
+ })
+ t.Run("End", func(t *testing.T) {
+ // avoid constant overflow when PallocChunkPages is small
+ var PallocChunkPages uint = PallocChunkPages
+ want := new(PallocBits)
+ want[PallocChunkPages/64-1] = ^uint64(0)
+ want[PallocChunkPages/64-2] = ^uint64(0)
+ want[PallocChunkPages/64-3] = ^uint64(0)
+ want[PallocChunkPages/64-4] = 1 << 63
+ test(t, PallocChunkPages-(64*3+1), 64*3+1, want)
+ })
+ }
t.Run("All", func(t *testing.T) {
want := new(PallocBits)
for i := range want {
@@ -118,10 +122,11 @@ func TestMallocBitsPopcntRange(t *testing.T) {
i, n uint // bit range to popcnt over.
want uint // expected popcnt result on that range.
}
- tests := map[string]struct {
+ type testCase struct {
init []BitRange // bit ranges to set to 1 in the bitmap.
tests []test // a set of popcnt tests to run over the bitmap.
- }{
+ }
+ tests := map[string]testCase{
"None": {
tests: []test{
{0, 1, 0},
@@ -157,7 +162,9 @@ func TestMallocBitsPopcntRange(t *testing.T) {
{0, PallocChunkPages, PallocChunkPages / 2},
},
},
- "OddBound": {
+ }
+ if PallocChunkPages >= 512 {
+ tests["OddBound"] = testCase{
init: []BitRange{{0, 111}},
tests: []test{
{0, 1, 1},
@@ -172,8 +179,8 @@ func TestMallocBitsPopcntRange(t *testing.T) {
{PallocChunkPages / 2, PallocChunkPages / 2, 0},
{0, PallocChunkPages, 111},
},
- },
- "Scattered": {
+ }
+ tests["Scattered"] = testCase{
init: []BitRange{
{1, 3}, {5, 1}, {7, 1}, {10, 2}, {13, 1}, {15, 4},
{21, 1}, {23, 1}, {26, 2}, {30, 5}, {36, 2}, {40, 3},
@@ -190,7 +197,7 @@ func TestMallocBitsPopcntRange(t *testing.T) {
{1, 128, 74},
{0, PallocChunkPages, 75},
},
- },
+ }
}
for name, v := range tests {
v := v
@@ -251,23 +258,25 @@ func TestPallocBitsSummarize(t *testing.T) {
PackPallocSum(11, 23, 23),
},
}
- tests["StartMaxEnd"] = test{
- free: []BitRange{{0, 4}, {50, 100}, {PallocChunkPages - 4, 4}},
- hits: []PallocSum{
- PackPallocSum(4, 100, 4),
- },
- }
- tests["OnlyMax"] = test{
- free: []BitRange{{1, 20}, {35, 241}, {PallocChunkPages - 50, 30}},
- hits: []PallocSum{
- PackPallocSum(0, 241, 0),
- },
- }
- tests["MultiMax"] = test{
- free: []BitRange{{35, 2}, {40, 5}, {100, 5}},
- hits: []PallocSum{
- PackPallocSum(0, 5, 0),
- },
+ if PallocChunkPages >= 512 {
+ tests["StartMaxEnd"] = test{
+ free: []BitRange{{0, 4}, {50, 100}, {PallocChunkPages - 4, 4}},
+ hits: []PallocSum{
+ PackPallocSum(4, 100, 4),
+ },
+ }
+ tests["OnlyMax"] = test{
+ free: []BitRange{{1, 20}, {35, 241}, {PallocChunkPages - 50, 30}},
+ hits: []PallocSum{
+ PackPallocSum(0, 241, 0),
+ },
+ }
+ tests["MultiMax"] = test{
+ free: []BitRange{{35, 2}, {40, 5}, {100, 5}},
+ hits: []PallocSum{
+ PackPallocSum(0, 5, 0),
+ },
+ }
}
tests["One"] = test{
free: []BitRange{{2, 1}},
@@ -329,12 +338,13 @@ func BenchmarkPallocBitsSummarize(b *testing.B) {
// Ensures page allocation works.
func TestPallocBitsAlloc(t *testing.T) {
- tests := map[string]struct {
+ type test struct {
before []BitRange
after []BitRange
npages uintptr
hits []uint
- }{
+ }
+ tests := map[string]test{
"AllFree1": {
npages: 1,
hits: []uint{0, 1, 2, 3, 4, 5},
@@ -350,22 +360,6 @@ func TestPallocBitsAlloc(t *testing.T) {
hits: []uint{0, 5, 10, 15, 20},
after: []BitRange{{0, 25}},
},
- "AllFree64": {
- npages: 64,
- hits: []uint{0, 64, 128},
- after: []BitRange{{0, 192}},
- },
- "AllFree65": {
- npages: 65,
- hits: []uint{0, 65, 130},
- after: []BitRange{{0, 195}},
- },
- "SomeFree64": {
- before: []BitRange{{0, 32}, {64, 32}, {100, PallocChunkPages - 100}},
- npages: 64,
- hits: []uint{^uint(0)},
- after: []BitRange{{0, 32}, {64, 32}, {100, PallocChunkPages - 100}},
- },
"NoneFree1": {
before: []BitRange{{0, PallocChunkPages}},
npages: 1,
@@ -408,18 +402,38 @@ func TestPallocBitsAlloc(t *testing.T) {
hits: []uint{PallocChunkPages/2 - 3, ^uint(0)},
after: []BitRange{{0, PallocChunkPages}},
},
- "ExactFit65": {
+ }
+ if PallocChunkPages >= 512 {
+ // avoid constant overflow when PallocChunkPages is small
+ var PallocChunkPages uint = PallocChunkPages
+ tests["AllFree64"] = test{
+ npages: 64,
+ hits: []uint{0, 64, 128},
+ after: []BitRange{{0, 192}},
+ }
+ tests["AllFree65"] = test{
+ npages: 65,
+ hits: []uint{0, 65, 130},
+ after: []BitRange{{0, 195}},
+ }
+ tests["SomeFree64"] = test{
+ before: []BitRange{{0, 32}, {64, 32}, {100, PallocChunkPages - 100}},
+ npages: 64,
+ hits: []uint{^uint(0)},
+ after: []BitRange{{0, 32}, {64, 32}, {100, PallocChunkPages - 100}},
+ }
+ tests["ExactFit65"] = test{
before: []BitRange{{0, PallocChunkPages/2 - 31}, {PallocChunkPages/2 + 34, PallocChunkPages/2 - 34}},
npages: 65,
hits: []uint{PallocChunkPages/2 - 31, ^uint(0)},
after: []BitRange{{0, PallocChunkPages}},
- },
- "SomeFree161": {
+ }
+ tests["SomeFree161"] = test{
before: []BitRange{{0, 185}, {331, 1}},
npages: 161,
hits: []uint{332},
after: []BitRange{{0, 185}, {331, 162}},
- },
+ }
}
for name, v := range tests {
v := v
@@ -442,18 +456,13 @@ func TestPallocBitsAlloc(t *testing.T) {
// Ensures page freeing works.
func TestPallocBitsFree(t *testing.T) {
- tests := map[string]struct {
+ type test struct {
beforeInv []BitRange
afterInv []BitRange
frees []uint
npages uintptr
- }{
- "SomeFree": {
- npages: 1,
- beforeInv: []BitRange{{0, 32}, {64, 32}, {100, 1}},
- frees: []uint{32},
- afterInv: []BitRange{{0, 33}, {64, 32}, {100, 1}},
- },
+ }
+ tests := map[string]test{
"NoneFree1": {
npages: 1,
frees: []uint{0, 1, 2, 3, 4, 5},
@@ -469,16 +478,24 @@ func TestPallocBitsFree(t *testing.T) {
frees: []uint{0, 5, 10, 15, 20},
afterInv: []BitRange{{0, 25}},
},
- "NoneFree64": {
+ }
+ if PallocChunkPages >= 512 {
+ tests["SomeFree"] = test{
+ npages: 1,
+ beforeInv: []BitRange{{0, 32}, {64, 32}, {100, 1}},
+ frees: []uint{32},
+ afterInv: []BitRange{{0, 33}, {64, 32}, {100, 1}},
+ }
+ tests["NoneFree64"] = test{
npages: 64,
frees: []uint{0, 64, 128},
afterInv: []BitRange{{0, 192}},
- },
- "NoneFree65": {
+ }
+ tests["NoneFree65"] = test{
npages: 65,
frees: []uint{0, 65, 130},
afterInv: []BitRange{{0, 195}},
- },
+ }
}
for name, v := range tests {
v := v
diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go
index 97b2907652..0957e67b50 100644
--- a/src/runtime/mprof.go
+++ b/src/runtime/mprof.go
@@ -1259,6 +1259,20 @@ func goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.P
return goroutineProfileWithLabelsConcurrent(p, labels)
}
+//go:linkname pprof_goroutineLeakProfileWithLabels
+func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
+ return goroutineLeakProfileWithLabelsConcurrent(p, labels)
+}
+
+// labels may be nil. If labels is non-nil, it must have the same length as p.
+func goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
+ if labels != nil && len(labels) != len(p) {
+ labels = nil
+ }
+
+ return goroutineLeakProfileWithLabelsConcurrent(p, labels)
+}
+
var goroutineProfile = struct {
sema uint32
active bool
@@ -1302,6 +1316,48 @@ func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileSt
return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
}
+func goroutineLeakProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
+ if len(p) == 0 {
+ // An empty slice is obviously too small. Return a rough
+ // allocation estimate.
+ return work.goroutineLeak.count, false
+ }
+
+ // Use the same semaphore as goroutineProfileWithLabelsConcurrent,
+ // because ultimately we still use goroutine profiles.
+ semacquire(&goroutineProfile.sema)
+
+ // Unlike in goroutineProfileWithLabelsConcurrent, we don't need to
+ // save the current goroutine stack, because it is obviously not leaked.
+
+ pcbuf := makeProfStack() // see saveg() for explanation
+
+ // Prepare a profile large enough to store all leaked goroutines.
+ n = work.goroutineLeak.count
+
+ if n > len(p) {
+ // There's not enough space in p to store the whole profile, so (per the
+ // contract of runtime.GoroutineProfile) we're not allowed to write to p
+ // at all and must return n, false.
+ semrelease(&goroutineProfile.sema)
+ return n, false
+ }
+
+ // Visit each leaked goroutine and try to record its stack.
+ forEachGRace(func(gp1 *g) {
+ if readgstatus(gp1) == _Gleaked {
+ doRecordGoroutineProfile(gp1, pcbuf)
+ }
+ })
+
+ if raceenabled {
+ raceacquire(unsafe.Pointer(&labelSync))
+ }
+
+ semrelease(&goroutineProfile.sema)
+ return n, true
+}
+
func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
if len(p) == 0 {
// An empty slice is obviously too small. Return a rough
diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go
index f47419cf7d..7610802e0f 100644
--- a/src/runtime/os_windows.go
+++ b/src/runtime/os_windows.go
@@ -777,13 +777,14 @@ func newosproc(mp *m) {
//
//go:nowritebarrierrec
//go:nosplit
-func newosproc0(mp *m, stk unsafe.Pointer) {
- // TODO: this is completely broken. The args passed to newosproc0 (in asm_amd64.s)
- // are stacksize and function, not *m and stack.
- // Check os_linux.go for an implementation that might actually work.
+func newosproc0(stacksize uintptr, fn uintptr) {
throw("bad newosproc0")
}
+//go:nosplit
+//go:nowritebarrierrec
+func libpreinit() {}
+
func exitThread(wait *atomic.Uint32) {
// We should never reach exitThread on Windows because we let
// the OS clean up threads.
diff --git a/src/runtime/pprof/pprof.go b/src/runtime/pprof/pprof.go
index 55563009b3..b524e992b8 100644
--- a/src/runtime/pprof/pprof.go
+++ b/src/runtime/pprof/pprof.go
@@ -80,6 +80,7 @@ import (
"cmp"
"fmt"
"internal/abi"
+ "internal/goexperiment"
"internal/profilerecord"
"io"
"runtime"
@@ -105,12 +106,13 @@ import (
//
// Each Profile has a unique name. A few profiles are predefined:
//
-// goroutine - stack traces of all current goroutines
-// heap - a sampling of memory allocations of live objects
-// allocs - a sampling of all past memory allocations
-// threadcreate - stack traces that led to the creation of new OS threads
-// block - stack traces that led to blocking on synchronization primitives
-// mutex - stack traces of holders of contended mutexes
+// goroutine - stack traces of all current goroutines
+// goroutineleak - stack traces of all leaked goroutines
+// allocs - a sampling of all past memory allocations
+// heap - a sampling of memory allocations of live objects
+// threadcreate - stack traces that led to the creation of new OS threads
+// block - stack traces that led to blocking on synchronization primitives
+// mutex - stack traces of holders of contended mutexes
//
// These predefined profiles maintain themselves and panic on an explicit
// [Profile.Add] or [Profile.Remove] method call.
@@ -169,6 +171,7 @@ import (
// holds a lock for 1s while 5 other goroutines are waiting for the entire
// second to acquire the lock, its unlock call stack will report 5s of
// contention.
+
type Profile struct {
name string
mu sync.Mutex
@@ -189,6 +192,12 @@ var goroutineProfile = &Profile{
write: writeGoroutine,
}
+var goroutineLeakProfile = &Profile{
+ name: "goroutineleak",
+ count: runtime_goroutineleakcount,
+ write: writeGoroutineLeak,
+}
+
var threadcreateProfile = &Profile{
name: "threadcreate",
count: countThreadCreate,
@@ -231,6 +240,9 @@ func lockProfiles() {
"block": blockProfile,
"mutex": mutexProfile,
}
+ if goexperiment.GoroutineLeakProfile {
+ profiles.m["goroutineleak"] = goroutineLeakProfile
+ }
}
}
@@ -275,6 +287,7 @@ func Profiles() []*Profile {
all := make([]*Profile, 0, len(profiles.m))
for _, p := range profiles.m {
+
all = append(all, p)
}
@@ -747,6 +760,23 @@ func writeGoroutine(w io.Writer, debug int) error {
return writeRuntimeProfile(w, debug, "goroutine", pprof_goroutineProfileWithLabels)
}
+// writeGoroutineLeak first invokes a GC cycle that performs goroutine leak detection.
+// It then writes the goroutine profile, filtering for leaked goroutines.
+func writeGoroutineLeak(w io.Writer, debug int) error {
+ // Run the GC with leak detection first so that leaked goroutines
+ // may transition to the leaked state.
+ runtime_goroutineLeakGC()
+
+ // If the debug flag is set sufficiently high, just defer to writing goroutine stacks
+ // like in a regular goroutine profile. Include non-leaked goroutines, too.
+ if debug >= 2 {
+ return writeGoroutineStacks(w)
+ }
+
+ // Otherwise, write the goroutine leak profile.
+ return writeRuntimeProfile(w, debug, "goroutineleak", pprof_goroutineLeakProfileWithLabels)
+}
+
func writeGoroutineStacks(w io.Writer) error {
// We don't know how big the buffer needs to be to collect
// all the goroutines. Start with 1 MB and try a few times, doubling each time.
@@ -969,6 +999,9 @@ func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile fu
//go:linkname pprof_goroutineProfileWithLabels runtime.pprof_goroutineProfileWithLabels
func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)
+//go:linkname pprof_goroutineLeakProfileWithLabels runtime.pprof_goroutineLeakProfileWithLabels
+func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)
+
//go:linkname pprof_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond
func pprof_cyclesPerSecond() int64
diff --git a/src/runtime/pprof/runtime.go b/src/runtime/pprof/runtime.go
index 8d37c7d3ad..690cab81ab 100644
--- a/src/runtime/pprof/runtime.go
+++ b/src/runtime/pprof/runtime.go
@@ -29,6 +29,12 @@ func runtime_setProfLabel(labels unsafe.Pointer)
// runtime_getProfLabel is defined in runtime/proflabel.go.
func runtime_getProfLabel() unsafe.Pointer
+// runtime_goroutineleakcount is defined in runtime/proc.go.
+func runtime_goroutineleakcount() int
+
+// runtime_goroutineLeakGC is defined in runtime/mgc.go.
+func runtime_goroutineLeakGC()
+
// SetGoroutineLabels sets the current goroutine's labels to match ctx.
// A new goroutine inherits the labels of the goroutine that created it.
// This is a lower-level API than [Do], which should be used instead when possible.
diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go
index 22727df74e..5367f66213 100644
--- a/src/runtime/preempt.go
+++ b/src/runtime/preempt.go
@@ -160,7 +160,7 @@ func suspendG(gp *g) suspendGState {
s = _Gwaiting
fallthrough
- case _Grunnable, _Gsyscall, _Gwaiting:
+ case _Grunnable, _Gsyscall, _Gwaiting, _Gleaked:
// Claim goroutine by setting scan bit.
// This may race with execution or readying of gp.
// The scan bit keeps it from transition state.
@@ -269,6 +269,7 @@ func resumeG(state suspendGState) {
case _Grunnable | _Gscan,
_Gwaiting | _Gscan,
+ _Gleaked | _Gscan,
_Gsyscall | _Gscan:
casfrom_Gscanstatus(gp, s, s&^_Gscan)
}
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 887063638b..38299d82c0 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -517,7 +517,7 @@ func acquireSudog() *sudog {
s := pp.sudogcache[n-1]
pp.sudogcache[n-1] = nil
pp.sudogcache = pp.sudogcache[:n-1]
- if s.elem != nil {
+ if s.elem.get() != nil {
throw("acquireSudog: found s.elem != nil in cache")
}
releasem(mp)
@@ -526,7 +526,7 @@ func acquireSudog() *sudog {
//go:nosplit
func releaseSudog(s *sudog) {
- if s.elem != nil {
+ if s.elem.get() != nil {
throw("runtime: sudog with non-nil elem")
}
if s.isSelect {
@@ -541,7 +541,7 @@ func releaseSudog(s *sudog) {
if s.waitlink != nil {
throw("runtime: sudog with non-nil waitlink")
}
- if s.c != nil {
+ if s.c.get() != nil {
throw("runtime: sudog with non-nil c")
}
gp := getg()
@@ -790,7 +790,9 @@ func cpuinit(env string) {
// getGodebugEarly extracts the environment variable GODEBUG from the environment on
// Unix-like operating systems and returns it. This function exists to extract GODEBUG
// early before much of the runtime is initialized.
-func getGodebugEarly() string {
+//
+// Returns nil, false if OS doesn't provide env vars early in the init sequence.
+func getGodebugEarly() (string, bool) {
const prefix = "GODEBUG="
var env string
switch GOOS {
@@ -808,12 +810,16 @@ func getGodebugEarly() string {
s := unsafe.String(p, findnull(p))
if stringslite.HasPrefix(s, prefix) {
- env = gostring(p)[len(prefix):]
+ env = gostringnocopy(p)[len(prefix):]
break
}
}
+ break
+
+ default:
+ return "", false
}
- return env
+ return env, true
}
// The bootstrap sequence is:
@@ -860,12 +866,15 @@ func schedinit() {
// The world starts stopped.
worldStopped()
+ godebug, parsedGodebug := getGodebugEarly()
+ if parsedGodebug {
+ parseRuntimeDebugVars(godebug)
+ }
ticks.init() // run as early as possible
moduledataverify()
stackinit()
randinit() // must run before mallocinit, alginit, mcommoninit
mallocinit()
- godebug := getGodebugEarly()
cpuinit(godebug) // must run before alginit
alginit() // maps, hash, rand must not be used before this call
mcommoninit(gp.m, -1)
@@ -881,7 +890,12 @@ func schedinit() {
goenvs()
secure()
checkfds()
- parsedebugvars()
+ if !parsedGodebug {
+ // Some platforms, e.g., Windows, didn't make env vars available "early",
+ // so try again now.
+ parseRuntimeDebugVars(gogetenv("GODEBUG"))
+ }
+ finishDebugVarsSetup()
gcinit()
// Allocate stack space that can be used when crashing due to bad stack
@@ -1209,6 +1223,7 @@ func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
_Gscanwaiting,
_Gscanrunning,
_Gscansyscall,
+ _Gscanleaked,
_Gscanpreempted:
if newval == oldval&^_Gscan {
success = gp.atomicstatus.CompareAndSwap(oldval, newval)
@@ -1229,6 +1244,7 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool {
case _Grunnable,
_Grunning,
_Gwaiting,
+ _Gleaked,
_Gsyscall:
if newval == oldval|_Gscan {
r := gp.atomicstatus.CompareAndSwap(oldval, newval)
@@ -5552,6 +5568,14 @@ func gcount(includeSys bool) int32 {
return n
}
+// goroutineleakcount returns the number of leaked goroutines last reported by
+// the runtime.
+//
+//go:linkname goroutineleakcount runtime/pprof.runtime_goroutineleakcount
+func goroutineleakcount() int {
+ return work.goroutineLeak.count
+}
+
func mcount() int32 {
return int32(sched.mnext - sched.nmfreed)
}
diff --git a/src/runtime/rt0_aix_ppc64.s b/src/runtime/rt0_aix_ppc64.s
index 74c57bb1dc..518fcb3b88 100644
--- a/src/runtime/rt0_aix_ppc64.s
+++ b/src/runtime/rt0_aix_ppc64.s
@@ -41,152 +41,7 @@ TEXT _main(SB),NOSPLIT,$-8
MOVD R12, CTR
BR (CTR)
-// Paramater save space required to cross-call into _cgo_sys_thread_create
-#define PARAM_SPACE 16
-
-TEXT _rt0_ppc64_aix_lib(SB),NOSPLIT,$-8
- // Start with standard C stack frame layout and linkage.
- MOVD LR, R0
- MOVD R0, 16(R1) // Save LR in caller's frame.
- MOVW CR, R0 // Save CR in caller's frame
- MOVD R0, 8(R1)
-
- MOVDU R1, -344-PARAM_SPACE(R1) // Allocate frame.
-
- // Preserve callee-save registers.
- MOVD R14, 48+PARAM_SPACE(R1)
- MOVD R15, 56+PARAM_SPACE(R1)
- MOVD R16, 64+PARAM_SPACE(R1)
- MOVD R17, 72+PARAM_SPACE(R1)
- MOVD R18, 80+PARAM_SPACE(R1)
- MOVD R19, 88+PARAM_SPACE(R1)
- MOVD R20, 96+PARAM_SPACE(R1)
- MOVD R21,104+PARAM_SPACE(R1)
- MOVD R22, 112+PARAM_SPACE(R1)
- MOVD R23, 120+PARAM_SPACE(R1)
- MOVD R24, 128+PARAM_SPACE(R1)
- MOVD R25, 136+PARAM_SPACE(R1)
- MOVD R26, 144+PARAM_SPACE(R1)
- MOVD R27, 152+PARAM_SPACE(R1)
- MOVD R28, 160+PARAM_SPACE(R1)
- MOVD R29, 168+PARAM_SPACE(R1)
- MOVD g, 176+PARAM_SPACE(R1) // R30
- MOVD R31, 184+PARAM_SPACE(R1)
- FMOVD F14, 192+PARAM_SPACE(R1)
- FMOVD F15, 200+PARAM_SPACE(R1)
- FMOVD F16, 208+PARAM_SPACE(R1)
- FMOVD F17, 216+PARAM_SPACE(R1)
- FMOVD F18, 224+PARAM_SPACE(R1)
- FMOVD F19, 232+PARAM_SPACE(R1)
- FMOVD F20, 240+PARAM_SPACE(R1)
- FMOVD F21, 248+PARAM_SPACE(R1)
- FMOVD F22, 256+PARAM_SPACE(R1)
- FMOVD F23, 264+PARAM_SPACE(R1)
- FMOVD F24, 272+PARAM_SPACE(R1)
- FMOVD F25, 280+PARAM_SPACE(R1)
- FMOVD F26, 288+PARAM_SPACE(R1)
- FMOVD F27, 296+PARAM_SPACE(R1)
- FMOVD F28, 304+PARAM_SPACE(R1)
- FMOVD F29, 312+PARAM_SPACE(R1)
- FMOVD F30, 320+PARAM_SPACE(R1)
- FMOVD F31, 328+PARAM_SPACE(R1)
-
- // Synchronous initialization.
- MOVD $runtime·reginit(SB), R12
- MOVD R12, CTR
- BL (CTR)
-
- MOVBZ runtime·isarchive(SB), R3 // Check buildmode = c-archive
- CMP $0, R3
- BEQ done
-
- MOVD R14, _rt0_ppc64_aix_lib_argc<>(SB)
- MOVD R15, _rt0_ppc64_aix_lib_argv<>(SB)
-
- MOVD $runtime·libpreinit(SB), R12
- MOVD R12, CTR
- BL (CTR)
-
- // Create a new thread to do the runtime initialization and return.
- MOVD _cgo_sys_thread_create(SB), R12
- CMP $0, R12
- BEQ nocgo
- MOVD $_rt0_ppc64_aix_lib_go(SB), R3
- MOVD $0, R4
- MOVD R2, 40(R1)
- MOVD 8(R12), R2
- MOVD (R12), R12
- MOVD R12, CTR
- BL (CTR)
- MOVD 40(R1), R2
- BR done
-
-nocgo:
- MOVD $0x800000, R12 // stacksize = 8192KB
- MOVD R12, 8(R1)
- MOVD $_rt0_ppc64_aix_lib_go(SB), R12
- MOVD R12, 16(R1)
- MOVD $runtime·newosproc0(SB),R12
- MOVD R12, CTR
- BL (CTR)
-
-done:
- // Restore saved registers.
- MOVD 48+PARAM_SPACE(R1), R14
- MOVD 56+PARAM_SPACE(R1), R15
- MOVD 64+PARAM_SPACE(R1), R16
- MOVD 72+PARAM_SPACE(R1), R17
- MOVD 80+PARAM_SPACE(R1), R18
- MOVD 88+PARAM_SPACE(R1), R19
- MOVD 96+PARAM_SPACE(R1), R20
- MOVD 104+PARAM_SPACE(R1), R21
- MOVD 112+PARAM_SPACE(R1), R22
- MOVD 120+PARAM_SPACE(R1), R23
- MOVD 128+PARAM_SPACE(R1), R24
- MOVD 136+PARAM_SPACE(R1), R25
- MOVD 144+PARAM_SPACE(R1), R26
- MOVD 152+PARAM_SPACE(R1), R27
- MOVD 160+PARAM_SPACE(R1), R28
- MOVD 168+PARAM_SPACE(R1), R29
- MOVD 176+PARAM_SPACE(R1), g // R30
- MOVD 184+PARAM_SPACE(R1), R31
- FMOVD 196+PARAM_SPACE(R1), F14
- FMOVD 200+PARAM_SPACE(R1), F15
- FMOVD 208+PARAM_SPACE(R1), F16
- FMOVD 216+PARAM_SPACE(R1), F17
- FMOVD 224+PARAM_SPACE(R1), F18
- FMOVD 232+PARAM_SPACE(R1), F19
- FMOVD 240+PARAM_SPACE(R1), F20
- FMOVD 248+PARAM_SPACE(R1), F21
- FMOVD 256+PARAM_SPACE(R1), F22
- FMOVD 264+PARAM_SPACE(R1), F23
- FMOVD 272+PARAM_SPACE(R1), F24
- FMOVD 280+PARAM_SPACE(R1), F25
- FMOVD 288+PARAM_SPACE(R1), F26
- FMOVD 296+PARAM_SPACE(R1), F27
- FMOVD 304+PARAM_SPACE(R1), F28
- FMOVD 312+PARAM_SPACE(R1), F29
- FMOVD 320+PARAM_SPACE(R1), F30
- FMOVD 328+PARAM_SPACE(R1), F31
-
- ADD $344+PARAM_SPACE, R1
-
- MOVD 8(R1), R0
- MOVFL R0, $0xff
- MOVD 16(R1), R0
- MOVD R0, LR
- RET
-
-DEFINE_PPC64X_FUNCDESC(_rt0_ppc64_aix_lib_go, __rt0_ppc64_aix_lib_go)
-
-TEXT __rt0_ppc64_aix_lib_go(SB),NOSPLIT,$0
- MOVD _rt0_ppc64_aix_lib_argc<>(SB), R3
- MOVD _rt0_ppc64_aix_lib_argv<>(SB), R4
- MOVD $runtime·rt0_go(SB), R12
- MOVD R12, CTR
- BR (CTR)
-
-DATA _rt0_ppc64_aix_lib_argc<>(SB)/8, $0
-GLOBL _rt0_ppc64_aix_lib_argc<>(SB),NOPTR, $8
-DATA _rt0_ppc64_aix_lib_argv<>(SB)/8, $0
-GLOBL _rt0_ppc64_aix_lib_argv<>(SB),NOPTR, $8
+TEXT _rt0_ppc64_aix_lib(SB),NOSPLIT,$0
+ MOVD R14, R3 // argc
+ MOVD R15, R4 // argv
+ JMP _rt0_ppc64x_lib(SB)
diff --git a/src/runtime/rt0_android_arm64.s b/src/runtime/rt0_android_arm64.s
index 4135bf07d5..ff4b691222 100644
--- a/src/runtime/rt0_android_arm64.s
+++ b/src/runtime/rt0_android_arm64.s
@@ -4,17 +4,15 @@
#include "textflag.h"
-TEXT _rt0_arm64_android(SB),NOSPLIT|NOFRAME,$0
- MOVD $_rt0_arm64_linux(SB), R4
- B (R4)
+TEXT _rt0_arm64_android(SB),NOSPLIT,$0
+ JMP _rt0_arm64(SB)
// When building with -buildmode=c-shared, this symbol is called when the shared
// library is loaded.
-TEXT _rt0_arm64_android_lib(SB),NOSPLIT|NOFRAME,$0
+TEXT _rt0_arm64_android_lib(SB),NOSPLIT,$0
MOVW $1, R0 // argc
MOVD $_rt0_arm64_android_argv(SB), R1 // **argv
- MOVD $_rt0_arm64_linux_lib(SB), R4
- B (R4)
+ JMP _rt0_arm64_lib(SB)
DATA _rt0_arm64_android_argv+0x00(SB)/8,$_rt0_arm64_android_argv0(SB)
DATA _rt0_arm64_android_argv+0x08(SB)/8,$0 // end argv
diff --git a/src/runtime/rt0_darwin_arm64.s b/src/runtime/rt0_darwin_arm64.s
index 697104ac64..e9cd5fc7f5 100644
--- a/src/runtime/rt0_darwin_arm64.s
+++ b/src/runtime/rt0_darwin_arm64.s
@@ -3,61 +3,13 @@
// license that can be found in the LICENSE file.
#include "textflag.h"
-#include "cgo/abi_arm64.h"
-TEXT _rt0_arm64_darwin(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·rt0_go(SB), R2
- BL (R2)
-exit:
- MOVD $0, R0
- MOVD $1, R16 // sys_exit
- SVC $0x80
- B exit
+TEXT _rt0_arm64_darwin(SB),NOSPLIT,$0
+ // Darwin puts argc and argv in R0 and R1,
+ // so there is no need to go through _rt0_arm64.
+ JMP runtime·rt0_go(SB)
// When linking with -buildmode=c-archive or -buildmode=c-shared,
// this symbol is called from a global initialization function.
-//
-// Note that all currently shipping darwin/arm64 platforms require
-// cgo and do not support c-shared.
-TEXT _rt0_arm64_darwin_lib(SB),NOSPLIT,$152
- // Preserve callee-save registers.
- SAVE_R19_TO_R28(8)
- SAVE_F8_TO_F15(88)
-
- MOVD R0, _rt0_arm64_darwin_lib_argc<>(SB)
- MOVD R1, _rt0_arm64_darwin_lib_argv<>(SB)
-
- MOVD $0, g // initialize g to nil
-
- // Synchronous initialization.
- MOVD $runtime·libpreinit(SB), R4
- BL (R4)
-
- // Create a new thread to do the runtime initialization and return.
- MOVD _cgo_sys_thread_create(SB), R4
- MOVD $_rt0_arm64_darwin_lib_go(SB), R0
- MOVD $0, R1
- SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved.
- BL (R4)
- ADD $16, RSP
-
- // Restore callee-save registers.
- RESTORE_R19_TO_R28(8)
- RESTORE_F8_TO_F15(88)
-
- RET
-
-TEXT _rt0_arm64_darwin_lib_go(SB),NOSPLIT,$0
- MOVD _rt0_arm64_darwin_lib_argc<>(SB), R0
- MOVD _rt0_arm64_darwin_lib_argv<>(SB), R1
- MOVD $runtime·rt0_go(SB), R4
- B (R4)
-
-DATA _rt0_arm64_darwin_lib_argc<>(SB)/8, $0
-GLOBL _rt0_arm64_darwin_lib_argc<>(SB),NOPTR, $8
-DATA _rt0_arm64_darwin_lib_argv<>(SB)/8, $0
-GLOBL _rt0_arm64_darwin_lib_argv<>(SB),NOPTR, $8
-
-// external linking entry point.
-TEXT main(SB),NOSPLIT|NOFRAME,$0
- JMP _rt0_arm64_darwin(SB)
+TEXT _rt0_arm64_darwin_lib(SB),NOSPLIT,$0
+ JMP _rt0_arm64_lib(SB)
diff --git a/src/runtime/rt0_freebsd_arm64.s b/src/runtime/rt0_freebsd_arm64.s
index e517ae059d..a7a952664e 100644
--- a/src/runtime/rt0_freebsd_arm64.s
+++ b/src/runtime/rt0_freebsd_arm64.s
@@ -3,72 +3,12 @@
// license that can be found in the LICENSE file.
#include "textflag.h"
-#include "cgo/abi_arm64.h"
// On FreeBSD argc/argv are passed in R0, not RSP
-TEXT _rt0_arm64_freebsd(SB),NOSPLIT|NOFRAME,$0
- ADD $8, R0, R1 // argv
- MOVD 0(R0), R0 // argc
- BL main(SB)
+TEXT _rt0_arm64_freebsd(SB),NOSPLIT,$0
+ JMP _rt0_arm64(SB)
// When building with -buildmode=c-shared, this symbol is called when the shared
// library is loaded.
-TEXT _rt0_arm64_freebsd_lib(SB),NOSPLIT,$184
- // Preserve callee-save registers.
- SAVE_R19_TO_R28(24)
- SAVE_F8_TO_F15(104)
-
- // Initialize g as null in case of using g later e.g. sigaction in cgo_sigaction.go
- MOVD ZR, g
-
- MOVD R0, _rt0_arm64_freebsd_lib_argc<>(SB)
- MOVD R1, _rt0_arm64_freebsd_lib_argv<>(SB)
-
- // Synchronous initialization.
- MOVD $runtime·libpreinit(SB), R4
- BL (R4)
-
- // Create a new thread to do the runtime initialization and return.
- MOVD _cgo_sys_thread_create(SB), R4
- CBZ R4, nocgo
- MOVD $_rt0_arm64_freebsd_lib_go(SB), R0
- MOVD $0, R1
- SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved.
- BL (R4)
- ADD $16, RSP
- B restore
-
-nocgo:
- MOVD $0x800000, R0 // stacksize = 8192KB
- MOVD $_rt0_arm64_freebsd_lib_go(SB), R1
- MOVD R0, 8(RSP)
- MOVD R1, 16(RSP)
- MOVD $runtime·newosproc0(SB),R4
- BL (R4)
-
-restore:
- // Restore callee-save registers.
- RESTORE_R19_TO_R28(24)
- RESTORE_F8_TO_F15(104)
- RET
-
-TEXT _rt0_arm64_freebsd_lib_go(SB),NOSPLIT,$0
- MOVD _rt0_arm64_freebsd_lib_argc<>(SB), R0
- MOVD _rt0_arm64_freebsd_lib_argv<>(SB), R1
- MOVD $runtime·rt0_go(SB),R4
- B (R4)
-
-DATA _rt0_arm64_freebsd_lib_argc<>(SB)/8, $0
-GLOBL _rt0_arm64_freebsd_lib_argc<>(SB),NOPTR, $8
-DATA _rt0_arm64_freebsd_lib_argv<>(SB)/8, $0
-GLOBL _rt0_arm64_freebsd_lib_argv<>(SB),NOPTR, $8
-
-
-TEXT main(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·rt0_go(SB), R2
- BL (R2)
-exit:
- MOVD $0, R0
- MOVD $1, R8 // SYS_exit
- SVC
- B exit
+TEXT _rt0_arm64_freebsd_lib(SB),NOSPLIT,$0
+ JMP _rt0_arm64_lib(SB)
diff --git a/src/runtime/rt0_ios_arm64.s b/src/runtime/rt0_ios_arm64.s
index dcc83656e2..1ab945b31c 100644
--- a/src/runtime/rt0_ios_arm64.s
+++ b/src/runtime/rt0_ios_arm64.s
@@ -11,4 +11,4 @@ TEXT _rt0_arm64_ios(SB),NOSPLIT|NOFRAME,$0
// library entry point.
TEXT _rt0_arm64_ios_lib(SB),NOSPLIT|NOFRAME,$0
- JMP _rt0_arm64_darwin_lib(SB)
+ JMP _rt0_arm64_lib(SB)
diff --git a/src/runtime/rt0_linux_arm64.s b/src/runtime/rt0_linux_arm64.s
index 0eb8fc2f48..eed9c5f920 100644
--- a/src/runtime/rt0_linux_arm64.s
+++ b/src/runtime/rt0_linux_arm64.s
@@ -3,71 +3,11 @@
// license that can be found in the LICENSE file.
#include "textflag.h"
-#include "cgo/abi_arm64.h"
-TEXT _rt0_arm64_linux(SB),NOSPLIT|NOFRAME,$0
- MOVD 0(RSP), R0 // argc
- ADD $8, RSP, R1 // argv
- BL main(SB)
+TEXT _rt0_arm64_linux(SB),NOSPLIT,$0
+ JMP _rt0_arm64(SB)
// When building with -buildmode=c-shared, this symbol is called when the shared
// library is loaded.
-TEXT _rt0_arm64_linux_lib(SB),NOSPLIT,$184
- // Preserve callee-save registers.
- SAVE_R19_TO_R28(24)
- SAVE_F8_TO_F15(104)
-
- // Initialize g as null in case of using g later e.g. sigaction in cgo_sigaction.go
- MOVD ZR, g
-
- MOVD R0, _rt0_arm64_linux_lib_argc<>(SB)
- MOVD R1, _rt0_arm64_linux_lib_argv<>(SB)
-
- // Synchronous initialization.
- MOVD $runtime·libpreinit(SB), R4
- BL (R4)
-
- // Create a new thread to do the runtime initialization and return.
- MOVD _cgo_sys_thread_create(SB), R4
- CBZ R4, nocgo
- MOVD $_rt0_arm64_linux_lib_go(SB), R0
- MOVD $0, R1
- SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved.
- BL (R4)
- ADD $16, RSP
- B restore
-
-nocgo:
- MOVD $0x800000, R0 // stacksize = 8192KB
- MOVD $_rt0_arm64_linux_lib_go(SB), R1
- MOVD R0, 8(RSP)
- MOVD R1, 16(RSP)
- MOVD $runtime·newosproc0(SB),R4
- BL (R4)
-
-restore:
- // Restore callee-save registers.
- RESTORE_R19_TO_R28(24)
- RESTORE_F8_TO_F15(104)
- RET
-
-TEXT _rt0_arm64_linux_lib_go(SB),NOSPLIT,$0
- MOVD _rt0_arm64_linux_lib_argc<>(SB), R0
- MOVD _rt0_arm64_linux_lib_argv<>(SB), R1
- MOVD $runtime·rt0_go(SB),R4
- B (R4)
-
-DATA _rt0_arm64_linux_lib_argc<>(SB)/8, $0
-GLOBL _rt0_arm64_linux_lib_argc<>(SB),NOPTR, $8
-DATA _rt0_arm64_linux_lib_argv<>(SB)/8, $0
-GLOBL _rt0_arm64_linux_lib_argv<>(SB),NOPTR, $8
-
-
-TEXT main(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·rt0_go(SB), R2
- BL (R2)
-exit:
- MOVD $0, R0
- MOVD $94, R8 // sys_exit
- SVC
- B exit
+TEXT _rt0_arm64_linux_lib(SB),NOSPLIT,$0
+ JMP _rt0_arm64_lib(SB)
diff --git a/src/runtime/rt0_linux_loong64.s b/src/runtime/rt0_linux_loong64.s
index b52f7d530a..d8da4461a2 100644
--- a/src/runtime/rt0_linux_loong64.s
+++ b/src/runtime/rt0_linux_loong64.s
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
#include "textflag.h"
-#include "cgo/abi_loong64.h"
TEXT _rt0_loong64_linux(SB),NOSPLIT|NOFRAME,$0
// In a statically linked binary, the stack contains argc,
@@ -16,53 +15,8 @@ TEXT _rt0_loong64_linux(SB),NOSPLIT|NOFRAME,$0
// When building with -buildmode=c-shared, this symbol is called when the shared
// library is loaded.
-TEXT _rt0_loong64_linux_lib(SB),NOSPLIT,$168
- // Preserve callee-save registers.
- SAVE_R22_TO_R31(3*8)
- SAVE_F24_TO_F31(13*8)
-
- // Initialize g as nil in case of using g later e.g. sigaction in cgo_sigaction.go
- MOVV R0, g
-
- MOVV R4, _rt0_loong64_linux_lib_argc<>(SB)
- MOVV R5, _rt0_loong64_linux_lib_argv<>(SB)
-
- // Synchronous initialization.
- MOVV $runtime·libpreinit(SB), R19
- JAL (R19)
-
- // Create a new thread to do the runtime initialization and return.
- MOVV _cgo_sys_thread_create(SB), R19
- BEQ R19, nocgo
- MOVV $_rt0_loong64_linux_lib_go(SB), R4
- MOVV $0, R5
- JAL (R19)
- JMP restore
-
-nocgo:
- MOVV $0x800000, R4 // stacksize = 8192KB
- MOVV $_rt0_loong64_linux_lib_go(SB), R5
- MOVV R4, 8(R3)
- MOVV R5, 16(R3)
- MOVV $runtime·newosproc0(SB), R19
- JAL (R19)
-
-restore:
- // Restore callee-save registers.
- RESTORE_R22_TO_R31(3*8)
- RESTORE_F24_TO_F31(13*8)
- RET
-
-TEXT _rt0_loong64_linux_lib_go(SB),NOSPLIT,$0
- MOVV _rt0_loong64_linux_lib_argc<>(SB), R4
- MOVV _rt0_loong64_linux_lib_argv<>(SB), R5
- MOVV $runtime·rt0_go(SB),R19
- JMP (R19)
-
-DATA _rt0_loong64_linux_lib_argc<>(SB)/8, $0
-GLOBL _rt0_loong64_linux_lib_argc<>(SB),NOPTR, $8
-DATA _rt0_loong64_linux_lib_argv<>(SB)/8, $0
-GLOBL _rt0_loong64_linux_lib_argv<>(SB),NOPTR, $8
+TEXT _rt0_loong64_linux_lib(SB),NOSPLIT,$0
+ JMP _rt0_loong64_lib(SB)
TEXT main(SB),NOSPLIT|NOFRAME,$0
// in external linking, glibc jumps to main with argc in R4
diff --git a/src/runtime/rt0_linux_ppc64le.s b/src/runtime/rt0_linux_ppc64le.s
index 4b7d8e1b94..3a6e8863b2 100644
--- a/src/runtime/rt0_linux_ppc64le.s
+++ b/src/runtime/rt0_linux_ppc64le.s
@@ -5,60 +5,13 @@
#include "go_asm.h"
#include "textflag.h"
#include "asm_ppc64x.h"
-#include "cgo/abi_ppc64x.h"
TEXT _rt0_ppc64le_linux(SB),NOSPLIT,$0
XOR R0, R0 // Make sure R0 is zero before _main
BR _main<>(SB)
TEXT _rt0_ppc64le_linux_lib(SB),NOSPLIT|NOFRAME,$0
- // This is called with ELFv2 calling conventions. Convert to Go.
- // Allocate argument storage for call to newosproc0.
- STACK_AND_SAVE_HOST_TO_GO_ABI(16)
-
- MOVD R3, _rt0_ppc64le_linux_lib_argc<>(SB)
- MOVD R4, _rt0_ppc64le_linux_lib_argv<>(SB)
-
- // Synchronous initialization.
- MOVD $runtime·libpreinit(SB), R12
- MOVD R12, CTR
- BL (CTR)
-
- // Create a new thread to do the runtime initialization and return.
- MOVD _cgo_sys_thread_create(SB), R12
- CMP $0, R12
- BEQ nocgo
- MOVD $_rt0_ppc64le_linux_lib_go(SB), R3
- MOVD $0, R4
- MOVD R12, CTR
- BL (CTR)
- BR done
-
-nocgo:
- MOVD $0x800000, R12 // stacksize = 8192KB
- MOVD R12, 8+FIXED_FRAME(R1)
- MOVD $_rt0_ppc64le_linux_lib_go(SB), R12
- MOVD R12, 16+FIXED_FRAME(R1)
- MOVD $runtime·newosproc0(SB),R12
- MOVD R12, CTR
- BL (CTR)
-
-done:
- // Restore and return to ELFv2 caller.
- UNSTACK_AND_RESTORE_GO_TO_HOST_ABI(16)
- RET
-
-TEXT _rt0_ppc64le_linux_lib_go(SB),NOSPLIT,$0
- MOVD _rt0_ppc64le_linux_lib_argc<>(SB), R3
- MOVD _rt0_ppc64le_linux_lib_argv<>(SB), R4
- MOVD $runtime·rt0_go(SB), R12
- MOVD R12, CTR
- BR (CTR)
-
-DATA _rt0_ppc64le_linux_lib_argc<>(SB)/8, $0
-GLOBL _rt0_ppc64le_linux_lib_argc<>(SB),NOPTR, $8
-DATA _rt0_ppc64le_linux_lib_argv<>(SB)/8, $0
-GLOBL _rt0_ppc64le_linux_lib_argv<>(SB),NOPTR, $8
+ JMP _rt0_ppc64x_lib(SB)
TEXT _main<>(SB),NOSPLIT,$-8
// In a statically linked binary, the stack contains argc,
diff --git a/src/runtime/rt0_netbsd_arm64.s b/src/runtime/rt0_netbsd_arm64.s
index 691a8e4be7..80802209b7 100644
--- a/src/runtime/rt0_netbsd_arm64.s
+++ b/src/runtime/rt0_netbsd_arm64.s
@@ -3,69 +3,13 @@
// license that can be found in the LICENSE file.
#include "textflag.h"
-#include "cgo/abi_arm64.h"
-TEXT _rt0_arm64_netbsd(SB),NOSPLIT|NOFRAME,$0
+TEXT _rt0_arm64_netbsd(SB),NOSPLIT,$0
MOVD 0(RSP), R0 // argc
ADD $8, RSP, R1 // argv
- BL main(SB)
+ JMP main(SB)
// When building with -buildmode=c-shared, this symbol is called when the shared
// library is loaded.
-TEXT _rt0_arm64_netbsd_lib(SB),NOSPLIT,$184
- // Preserve callee-save registers.
- SAVE_R19_TO_R28(24)
- SAVE_F8_TO_F15(104)
-
- // Initialize g as null in case of using g later e.g. sigaction in cgo_sigaction.go
- MOVD ZR, g
-
- MOVD R0, _rt0_arm64_netbsd_lib_argc<>(SB)
- MOVD R1, _rt0_arm64_netbsd_lib_argv<>(SB)
-
- // Synchronous initialization.
- MOVD $runtime·libpreinit(SB), R4
- BL (R4)
-
- // Create a new thread to do the runtime initialization and return.
- MOVD _cgo_sys_thread_create(SB), R4
- CBZ R4, nocgo
- MOVD $_rt0_arm64_netbsd_lib_go(SB), R0
- MOVD $0, R1
- SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved.
- BL (R4)
- ADD $16, RSP
- B restore
-
-nocgo:
- MOVD $0x800000, R0 // stacksize = 8192KB
- MOVD $_rt0_arm64_netbsd_lib_go(SB), R1
- MOVD R0, 8(RSP)
- MOVD R1, 16(RSP)
- MOVD $runtime·newosproc0(SB),R4
- BL (R4)
-
-restore:
- // Restore callee-save registers.
- RESTORE_R19_TO_R28(24)
- RESTORE_F8_TO_F15(104)
- RET
-
-TEXT _rt0_arm64_netbsd_lib_go(SB),NOSPLIT,$0
- MOVD _rt0_arm64_netbsd_lib_argc<>(SB), R0
- MOVD _rt0_arm64_netbsd_lib_argv<>(SB), R1
- MOVD $runtime·rt0_go(SB),R4
- B (R4)
-
-DATA _rt0_arm64_netbsd_lib_argc<>(SB)/8, $0
-GLOBL _rt0_arm64_netbsd_lib_argc<>(SB),NOPTR, $8
-DATA _rt0_arm64_netbsd_lib_argv<>(SB)/8, $0
-GLOBL _rt0_arm64_netbsd_lib_argv<>(SB),NOPTR, $8
-
-
-TEXT main(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·rt0_go(SB), R2
- BL (R2)
-exit:
- MOVD $0, R0
- SVC $1 // sys_exit
+TEXT _rt0_arm64_netbsd_lib(SB),NOSPLIT,$0
+ JMP _rt0_arm64_lib(SB)
diff --git a/src/runtime/rt0_openbsd_arm64.s b/src/runtime/rt0_openbsd_arm64.s
index 49d49b34ac..2050f6a200 100644
--- a/src/runtime/rt0_openbsd_arm64.s
+++ b/src/runtime/rt0_openbsd_arm64.s
@@ -3,77 +3,11 @@
// license that can be found in the LICENSE file.
#include "textflag.h"
-#include "cgo/abi_arm64.h"
-// See comment in runtime/sys_openbsd_arm64.s re this construction.
-#define INVOKE_SYSCALL \
- SVC; \
- NOOP; \
- NOOP
-
-TEXT _rt0_arm64_openbsd(SB),NOSPLIT|NOFRAME,$0
- MOVD 0(RSP), R0 // argc
- ADD $8, RSP, R1 // argv
- BL main(SB)
+TEXT _rt0_arm64_openbsd(SB),NOSPLIT,$0
+ JMP _rt0_arm64(SB)
// When building with -buildmode=c-shared, this symbol is called when the shared
// library is loaded.
TEXT _rt0_arm64_openbsd_lib(SB),NOSPLIT,$184
- // Preserve callee-save registers.
- SAVE_R19_TO_R28(24)
- SAVE_F8_TO_F15(104)
-
- // Initialize g as null in case of using g later e.g. sigaction in cgo_sigaction.go
- MOVD ZR, g
-
- MOVD R0, _rt0_arm64_openbsd_lib_argc<>(SB)
- MOVD R1, _rt0_arm64_openbsd_lib_argv<>(SB)
-
- // Synchronous initialization.
- MOVD $runtime·libpreinit(SB), R4
- BL (R4)
-
- // Create a new thread to do the runtime initialization and return.
- MOVD _cgo_sys_thread_create(SB), R4
- CBZ R4, nocgo
- MOVD $_rt0_arm64_openbsd_lib_go(SB), R0
- MOVD $0, R1
- SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved.
- BL (R4)
- ADD $16, RSP
- B restore
-
-nocgo:
- MOVD $0x800000, R0 // stacksize = 8192KB
- MOVD $_rt0_arm64_openbsd_lib_go(SB), R1
- MOVD R0, 8(RSP)
- MOVD R1, 16(RSP)
- MOVD $runtime·newosproc0(SB),R4
- BL (R4)
-
-restore:
- // Restore callee-save registers.
- RESTORE_R19_TO_R28(24)
- RESTORE_F8_TO_F15(104)
- RET
-
-TEXT _rt0_arm64_openbsd_lib_go(SB),NOSPLIT,$0
- MOVD _rt0_arm64_openbsd_lib_argc<>(SB), R0
- MOVD _rt0_arm64_openbsd_lib_argv<>(SB), R1
- MOVD $runtime·rt0_go(SB),R4
- B (R4)
-
-DATA _rt0_arm64_openbsd_lib_argc<>(SB)/8, $0
-GLOBL _rt0_arm64_openbsd_lib_argc<>(SB),NOPTR, $8
-DATA _rt0_arm64_openbsd_lib_argv<>(SB)/8, $0
-GLOBL _rt0_arm64_openbsd_lib_argv<>(SB),NOPTR, $8
-
-
-TEXT main(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·rt0_go(SB), R2
- BL (R2)
-exit:
- MOVD $0, R0
- MOVD $1, R8 // sys_exit
- INVOKE_SYSCALL
- B exit
+ JMP _rt0_arm64_lib(SB)
diff --git a/src/runtime/rt0_windows_386.s b/src/runtime/rt0_windows_386.s
index fa39edd787..bb92a99f6b 100644
--- a/src/runtime/rt0_windows_386.s
+++ b/src/runtime/rt0_windows_386.s
@@ -12,33 +12,8 @@ TEXT _rt0_386_windows(SB),NOSPLIT,$0
// library is loaded. For static libraries it is called when the
// final executable starts, during the C runtime initialization
// phase.
-TEXT _rt0_386_windows_lib(SB),NOSPLIT,$0x1C
- MOVL BP, 0x08(SP)
- MOVL BX, 0x0C(SP)
- MOVL AX, 0x10(SP)
- MOVL CX, 0x14(SP)
- MOVL DX, 0x18(SP)
-
- // Create a new thread to do the runtime initialization and return.
- MOVL _cgo_sys_thread_create(SB), AX
- MOVL $_rt0_386_windows_lib_go(SB), 0x00(SP)
- MOVL $0, 0x04(SP)
-
- // Top two items on the stack are passed to _cgo_sys_thread_create
- // as parameters. This is the calling convention on 32-bit Windows.
- CALL AX
-
- MOVL 0x08(SP), BP
- MOVL 0x0C(SP), BX
- MOVL 0x10(SP), AX
- MOVL 0x14(SP), CX
- MOVL 0x18(SP), DX
- RET
-
-TEXT _rt0_386_windows_lib_go(SB),NOSPLIT,$0
- PUSHL $0
- PUSHL $0
- JMP runtime·rt0_go(SB)
+TEXT _rt0_386_windows_lib(SB),NOSPLIT,$0
+ JMP _rt0_386_lib(SB)
TEXT _main(SB),NOSPLIT,$0
// Remove the return address from the stack.
diff --git a/src/runtime/rt0_windows_amd64.s b/src/runtime/rt0_windows_amd64.s
index bd18bdd311..09869c534c 100644
--- a/src/runtime/rt0_windows_amd64.s
+++ b/src/runtime/rt0_windows_amd64.s
@@ -6,31 +6,10 @@
#include "go_tls.h"
#include "textflag.h"
-TEXT _rt0_amd64_windows(SB),NOSPLIT|NOFRAME,$-8
+TEXT _rt0_amd64_windows(SB),NOSPLIT,$0
JMP _rt0_amd64(SB)
// When building with -buildmode=(c-shared or c-archive), this
-// symbol is called. For dynamic libraries it is called when the
-// library is loaded. For static libraries it is called when the
-// final executable starts, during the C runtime initialization
-// phase.
-// Leave space for four pointers on the stack as required
-// by the Windows amd64 calling convention.
-TEXT _rt0_amd64_windows_lib(SB),NOSPLIT|NOFRAME,$40
- // Create a new thread to do the runtime initialization and return.
- MOVQ BX, 32(SP) // callee-saved, preserved across the CALL
- MOVQ SP, BX
- ANDQ $~15, SP // alignment as per Windows requirement
- MOVQ _cgo_sys_thread_create(SB), AX
- MOVQ $_rt0_amd64_windows_lib_go(SB), CX
- MOVQ $0, DX
- CALL AX
- MOVQ BX, SP
- MOVQ 32(SP), BX
- RET
-
-TEXT _rt0_amd64_windows_lib_go(SB),NOSPLIT|NOFRAME,$0
- MOVQ $0, DI
- MOVQ $0, SI
- MOVQ $runtime·rt0_go(SB), AX
- JMP AX
+// symbol is called.
+TEXT _rt0_amd64_windows_lib(SB),NOSPLIT,$0
+ JMP _rt0_amd64_lib(SB)
diff --git a/src/runtime/rt0_windows_arm64.s b/src/runtime/rt0_windows_arm64.s
index 8802c2b82e..dc68cff5f2 100644
--- a/src/runtime/rt0_windows_arm64.s
+++ b/src/runtime/rt0_windows_arm64.s
@@ -8,22 +8,15 @@
// This is the entry point for the program from the
// kernel for an ordinary -buildmode=exe program.
-TEXT _rt0_arm64_windows(SB),NOSPLIT|NOFRAME,$0
- B ·rt0_go(SB)
+TEXT _rt0_arm64_windows(SB),NOSPLIT,$0
+ // Windows doesn't use argc and argv,
+ // so there is no need to go through _rt0_arm64.
+ JMP runtime·rt0_go(SB)
-TEXT _rt0_arm64_windows_lib(SB),NOSPLIT|NOFRAME,$0
- MOVD $_rt0_arm64_windows_lib_go(SB), R0
- MOVD $0, R1
- MOVD _cgo_sys_thread_create(SB), R2
- B (R2)
-
-TEXT _rt0_arm64_windows_lib_go(SB),NOSPLIT|NOFRAME,$0
+// When building with -buildmode=c-shared, this symbol is called when the shared
+// library is loaded.
+TEXT _rt0_arm64_windows_lib(SB),NOSPLIT,$0
+ // We get the argc and argv parameters from Win32.
MOVD $0, R0
MOVD $0, R1
- MOVD $runtime·rt0_go(SB), R2
- B (R2)
-
-TEXT main(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·rt0_go(SB), R2
- B (R2)
-
+ JMP _rt0_arm64_lib(SB)
diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go
index e81efadeb3..47d125bd67 100644
--- a/src/runtime/runtime-gdb_test.go
+++ b/src/runtime/runtime-gdb_test.go
@@ -77,7 +77,7 @@ func checkGdbVersion(t *testing.T) {
t.Skipf("skipping: gdb version %d.%d too old", major, minor)
}
if major < 12 || (major == 12 && minor < 1) {
- t.Logf("gdb version <12.1 is known to crash due to a SIGWINCH recieved in non-interactive mode; if you see a crash, some test may be sending SIGWINCH to the whole process group. See go.dev/issue/58932.")
+ t.Logf("gdb version <12.1 is known to crash due to a SIGWINCH received in non-interactive mode; if you see a crash, some test may be sending SIGWINCH to the whole process group. See go.dev/issue/58932.")
}
t.Logf("gdb version %d.%d", major, minor)
}
diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go
index 424745d235..15b546783b 100644
--- a/src/runtime/runtime1.go
+++ b/src/runtime/runtime1.go
@@ -402,7 +402,7 @@ var dbgvars = []*dbgVar{
{name: "updatemaxprocs", value: &debug.updatemaxprocs, def: 1},
}
-func parsedebugvars() {
+func parseRuntimeDebugVars(godebug string) {
// defaults
debug.cgocheck = 1
debug.invalidptr = 1
@@ -420,12 +420,6 @@ func parsedebugvars() {
}
debug.traceadvanceperiod = defaultTraceAdvancePeriod
- godebug := gogetenv("GODEBUG")
-
- p := new(string)
- *p = godebug
- godebugEnv.Store(p)
-
// apply runtime defaults, if any
for _, v := range dbgvars {
if v.def != 0 {
@@ -437,7 +431,6 @@ func parsedebugvars() {
}
}
}
-
// apply compile-time GODEBUG settings
parsegodebug(godebugDefault, nil)
@@ -463,6 +456,12 @@ func parsedebugvars() {
if debug.gccheckmark > 0 {
debug.asyncpreemptoff = 1
}
+}
+
+func finishDebugVarsSetup() {
+ p := new(string)
+ *p = gogetenv("GODEBUG")
+ godebugEnv.Store(p)
setTraceback(gogetenv("GOTRACEBACK"))
traceback_env = traceback_cache
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index 042c3137cd..6016c6fde0 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -87,6 +87,9 @@ const (
// ready()ing this G.
_Gpreempted // 9
+ // _Gleaked represents a leaked goroutine caught by the GC.
+ _Gleaked // 10
+
// _Gscan combined with one of the above states other than
// _Grunning indicates that GC is scanning the stack. The
// goroutine is not executing user code and the stack is owned
@@ -104,6 +107,7 @@ const (
_Gscansyscall = _Gscan + _Gsyscall // 0x1003
_Gscanwaiting = _Gscan + _Gwaiting // 0x1004
_Gscanpreempted = _Gscan + _Gpreempted // 0x1009
+ _Gscanleaked = _Gscan + _Gleaked // 0x100a
)
const (
@@ -315,6 +319,78 @@ type gobuf struct {
bp uintptr // for framepointer-enabled architectures
}
+// maybeTraceablePtr is a special pointer that is conditionally trackable
+// by the GC. It consists of an address as a uintptr (vu) and a pointer
+// to a data element (vp).
+//
+// maybeTraceablePtr values can be in one of three states:
+// 1. Unset: vu == 0 && vp == nil
+// 2. Untracked: vu != 0 && vp == nil
+// 3. Tracked: vu != 0 && vp != nil
+//
+// Do not set fields manually. Use methods instead.
+// Extend this type with additional methods if needed.
+type maybeTraceablePtr struct {
+ vp unsafe.Pointer // For liveness only.
+ vu uintptr // Source of truth.
+}
+
+// untrack unsets the pointer but preserves the address.
+// This is used to hide the pointer from the GC.
+//
+//go:nosplit
+func (p *maybeTraceablePtr) setUntraceable() {
+ p.vp = nil
+}
+
+// setTraceable resets the pointer to the stored address.
+// This is used to make the pointer visible to the GC.
+//
+//go:nosplit
+func (p *maybeTraceablePtr) setTraceable() {
+ p.vp = unsafe.Pointer(p.vu)
+}
+
+// set sets the pointer to the data element and updates the address.
+//
+//go:nosplit
+func (p *maybeTraceablePtr) set(v unsafe.Pointer) {
+ p.vp = v
+ p.vu = uintptr(v)
+}
+
+// get retrieves the pointer to the data element.
+//
+//go:nosplit
+func (p *maybeTraceablePtr) get() unsafe.Pointer {
+ return unsafe.Pointer(p.vu)
+}
+
+// uintptr returns the uintptr address of the pointer.
+//
+//go:nosplit
+func (p *maybeTraceablePtr) uintptr() uintptr {
+ return p.vu
+}
+
+// maybeTraceableChan extends conditionally trackable pointers (maybeTraceablePtr)
+// to track hchan pointers.
+//
+// Do not set fields manually. Use methods instead.
+type maybeTraceableChan struct {
+ maybeTraceablePtr
+}
+
+//go:nosplit
+func (p *maybeTraceableChan) set(c *hchan) {
+ p.maybeTraceablePtr.set(unsafe.Pointer(c))
+}
+
+//go:nosplit
+func (p *maybeTraceableChan) get() *hchan {
+ return (*hchan)(p.maybeTraceablePtr.get())
+}
+
// sudog (pseudo-g) represents a g in a wait list, such as for sending/receiving
// on a channel.
//
@@ -334,7 +410,8 @@ type sudog struct {
next *sudog
prev *sudog
- elem unsafe.Pointer // data element (may point to stack)
+
+ elem maybeTraceablePtr // data element (may point to stack)
// The following fields are never accessed concurrently.
// For channels, waitlink is only accessed by g.
@@ -362,10 +439,10 @@ type sudog struct {
// in the second entry in the list.)
waiters uint16
- parent *sudog // semaRoot binary tree
- waitlink *sudog // g.waiting list or semaRoot
- waittail *sudog // semaRoot
- c *hchan // channel
+ parent *sudog // semaRoot binary tree
+ waitlink *sudog // g.waiting list or semaRoot
+ waittail *sudog // semaRoot
+ c maybeTraceableChan // channel
}
type libcall struct {
@@ -1072,24 +1149,24 @@ const (
waitReasonZero waitReason = iota // ""
waitReasonGCAssistMarking // "GC assist marking"
waitReasonIOWait // "IO wait"
- waitReasonChanReceiveNilChan // "chan receive (nil chan)"
- waitReasonChanSendNilChan // "chan send (nil chan)"
waitReasonDumpingHeap // "dumping heap"
waitReasonGarbageCollection // "garbage collection"
waitReasonGarbageCollectionScan // "garbage collection scan"
waitReasonPanicWait // "panicwait"
- waitReasonSelect // "select"
- waitReasonSelectNoCases // "select (no cases)"
waitReasonGCAssistWait // "GC assist wait"
waitReasonGCSweepWait // "GC sweep wait"
waitReasonGCScavengeWait // "GC scavenge wait"
- waitReasonChanReceive // "chan receive"
- waitReasonChanSend // "chan send"
waitReasonFinalizerWait // "finalizer wait"
waitReasonForceGCIdle // "force gc (idle)"
waitReasonUpdateGOMAXPROCSIdle // "GOMAXPROCS updater (idle)"
waitReasonSemacquire // "semacquire"
waitReasonSleep // "sleep"
+ waitReasonChanReceiveNilChan // "chan receive (nil chan)"
+ waitReasonChanSendNilChan // "chan send (nil chan)"
+ waitReasonSelectNoCases // "select (no cases)"
+ waitReasonSelect // "select"
+ waitReasonChanReceive // "chan receive"
+ waitReasonChanSend // "chan send"
waitReasonSyncCondWait // "sync.Cond.Wait"
waitReasonSyncMutexLock // "sync.Mutex.Lock"
waitReasonSyncRWMutexRLock // "sync.RWMutex.RLock"
@@ -1175,12 +1252,34 @@ func (w waitReason) String() string {
return waitReasonStrings[w]
}
+// isMutexWait returns true if the goroutine is blocked because of
+// sync.Mutex.Lock or sync.RWMutex.[R]Lock.
+//
+//go:nosplit
func (w waitReason) isMutexWait() bool {
return w == waitReasonSyncMutexLock ||
w == waitReasonSyncRWMutexRLock ||
w == waitReasonSyncRWMutexLock
}
+// isSyncWait returns true if the goroutine is blocked because of
+// sync library primitive operations.
+//
+//go:nosplit
+func (w waitReason) isSyncWait() bool {
+ return waitReasonSyncCondWait <= w && w <= waitReasonSyncWaitGroupWait
+}
+
+// isChanWait is true if the goroutine is blocked because of non-nil
+// channel operations or a select statement with at least one case.
+//
+//go:nosplit
+func (w waitReason) isChanWait() bool {
+ return w == waitReasonSelect ||
+ w == waitReasonChanReceive ||
+ w == waitReasonChanSend
+}
+
func (w waitReason) isWaitingForSuspendG() bool {
return isWaitingForSuspendG[w]
}
diff --git a/src/runtime/select.go b/src/runtime/select.go
index 113dc8ad19..553f6960eb 100644
--- a/src/runtime/select.go
+++ b/src/runtime/select.go
@@ -83,7 +83,7 @@ func selparkcommit(gp *g, _ unsafe.Pointer) bool {
// channels in lock order.
var lastc *hchan
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
- if sg.c != lastc && lastc != nil {
+ if sg.c.get() != lastc && lastc != nil {
// As soon as we unlock the channel, fields in
// any sudog with that channel may change,
// including c and waitlink. Since multiple
@@ -92,7 +92,7 @@ func selparkcommit(gp *g, _ unsafe.Pointer) bool {
// of a channel.
unlock(&lastc.lock)
}
- lastc = sg.c
+ lastc = sg.c.get()
}
if lastc != nil {
unlock(&lastc.lock)
@@ -320,12 +320,12 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo
sg.isSelect = true
// No stack splits between assigning elem and enqueuing
// sg on gp.waiting where copystack can find it.
- sg.elem = cas.elem
+ sg.elem.set(cas.elem)
sg.releasetime = 0
if t0 != 0 {
sg.releasetime = -1
}
- sg.c = c
+ sg.c.set(c)
// Construct waiting list in lock order.
*nextp = sg
nextp = &sg.waitlink
@@ -368,8 +368,8 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo
// Clear all elem before unlinking from gp.waiting.
for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
sg1.isSelect = false
- sg1.elem = nil
- sg1.c = nil
+ sg1.elem.set(nil)
+ sg1.c.set(nil)
}
gp.waiting = nil
diff --git a/src/runtime/sema.go b/src/runtime/sema.go
index 6af49b1b0c..0fe0f2a2c2 100644
--- a/src/runtime/sema.go
+++ b/src/runtime/sema.go
@@ -303,7 +303,10 @@ func cansemacquire(addr *uint32) bool {
// queue adds s to the blocked goroutines in semaRoot.
func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) {
s.g = getg()
- s.elem = unsafe.Pointer(addr)
+ s.elem.set(unsafe.Pointer(addr))
+ // Storing this pointer so that we can trace the semaphore address
+ // from the blocked goroutine when checking for goroutine leaks.
+ s.g.waiting = s
s.next = nil
s.prev = nil
s.waiters = 0
@@ -311,7 +314,7 @@ func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) {
var last *sudog
pt := &root.treap
for t := *pt; t != nil; t = *pt {
- if t.elem == unsafe.Pointer(addr) {
+ if uintptr(unsafe.Pointer(addr)) == t.elem.uintptr() {
// Already have addr in list.
if lifo {
// Substitute s in t's place in treap.
@@ -357,7 +360,7 @@ func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) {
return
}
last = t
- if uintptr(unsafe.Pointer(addr)) < uintptr(t.elem) {
+ if uintptr(unsafe.Pointer(addr)) < t.elem.uintptr() {
pt = &t.prev
} else {
pt = &t.next
@@ -402,11 +405,13 @@ func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) {
func (root *semaRoot) dequeue(addr *uint32) (found *sudog, now, tailtime int64) {
ps := &root.treap
s := *ps
+
for ; s != nil; s = *ps {
- if s.elem == unsafe.Pointer(addr) {
+ if uintptr(unsafe.Pointer(addr)) == s.elem.uintptr() {
goto Found
}
- if uintptr(unsafe.Pointer(addr)) < uintptr(s.elem) {
+
+ if uintptr(unsafe.Pointer(addr)) < s.elem.uintptr() {
ps = &s.prev
} else {
ps = &s.next
@@ -470,8 +475,10 @@ Found:
}
tailtime = s.acquiretime
}
+ // Goroutine is no longer blocked. Clear the waiting pointer.
+ s.g.waiting = nil
s.parent = nil
- s.elem = nil
+ s.elem.set(nil)
s.next = nil
s.prev = nil
s.ticket = 0
@@ -590,6 +597,10 @@ func notifyListWait(l *notifyList, t uint32) {
// Enqueue itself.
s := acquireSudog()
s.g = getg()
+ // Storing this pointer so that we can trace the condvar address
+ // from the blocked goroutine when checking for goroutine leaks.
+ s.elem.set(unsafe.Pointer(l))
+ s.g.waiting = s
s.ticket = t
s.releasetime = 0
t0 := int64(0)
@@ -607,6 +618,10 @@ func notifyListWait(l *notifyList, t uint32) {
if t0 != 0 {
blockevent(s.releasetime-t0, 2)
}
+ // Goroutine is no longer blocked. Clear up its waiting pointer,
+ // and clean up the sudog before releasing it.
+ s.g.waiting = nil
+ s.elem.set(nil)
releaseSudog(s)
}
diff --git a/src/runtime/set_vma_name_linux.go b/src/runtime/set_vma_name_linux.go
index 9b6654f332..03c7739c34 100644
--- a/src/runtime/set_vma_name_linux.go
+++ b/src/runtime/set_vma_name_linux.go
@@ -14,9 +14,13 @@ import (
var prSetVMAUnsupported atomic.Bool
+func setVMANameSupported() bool {
+ return !prSetVMAUnsupported.Load()
+}
+
// setVMAName calls prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, start, len, name)
func setVMAName(start unsafe.Pointer, length uintptr, name string) {
- if debug.decoratemappings == 0 || prSetVMAUnsupported.Load() {
+ if debug.decoratemappings == 0 || !setVMANameSupported() {
return
}
diff --git a/src/runtime/set_vma_name_stub.go b/src/runtime/set_vma_name_stub.go
index 38f65fd592..6cb01ebf50 100644
--- a/src/runtime/set_vma_name_stub.go
+++ b/src/runtime/set_vma_name_stub.go
@@ -10,3 +10,5 @@ import "unsafe"
// setVMAName isn’t implemented
func setVMAName(start unsafe.Pointer, len uintptr, name string) {}
+
+func setVMANameSupported() bool { return false }
diff --git a/src/runtime/sizeof_test.go b/src/runtime/sizeof_test.go
index de859866a5..5888177f0e 100644
--- a/src/runtime/sizeof_test.go
+++ b/src/runtime/sizeof_test.go
@@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) {
_64bit uintptr // size on 64bit platforms
}{
{runtime.G{}, 280 + xreg, 440 + xreg}, // g, but exported for testing
- {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing
+ {runtime.Sudog{}, 64, 104}, // sudog, but exported for testing
}
if xreg > runtime.PtrSize {
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index d809820b07..55e97e77af 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -821,7 +821,8 @@ func adjustsudogs(gp *g, adjinfo *adjustinfo) {
// the data elements pointed to by a SudoG structure
// might be in the stack.
for s := gp.waiting; s != nil; s = s.waitlink {
- adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
+ adjustpointer(adjinfo, unsafe.Pointer(&s.elem.vu))
+ adjustpointer(adjinfo, unsafe.Pointer(&s.elem.vp))
}
}
@@ -834,7 +835,7 @@ func fillstack(stk stack, b byte) {
func findsghi(gp *g, stk stack) uintptr {
var sghi uintptr
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
- p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
+ p := sg.elem.uintptr() + uintptr(sg.c.get().elemsize)
if stk.lo <= p && p < stk.hi && p > sghi {
sghi = p
}
@@ -853,7 +854,7 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
// Lock channels to prevent concurrent send/receive.
var lastc *hchan
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
- if sg.c != lastc {
+ if sg.c.get() != lastc {
// There is a ranking cycle here between gscan bit and
// hchan locks. Normally, we only allow acquiring hchan
// locks and then getting a gscan bit. In this case, we
@@ -863,9 +864,9 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
// suspended. So, we get a special hchan lock rank here
// that is lower than gscan, but doesn't allow acquiring
// any other locks other than hchan.
- lockWithRank(&sg.c.lock, lockRankHchanLeaf)
+ lockWithRank(&sg.c.get().lock, lockRankHchanLeaf)
}
- lastc = sg.c
+ lastc = sg.c.get()
}
// Adjust sudogs.
@@ -885,10 +886,10 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
// Unlock channels.
lastc = nil
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
- if sg.c != lastc {
- unlock(&sg.c.lock)
+ if sg.c.get() != lastc {
+ unlock(&sg.c.get().lock)
}
- lastc = sg.c
+ lastc = sg.c.get()
}
return sgsize
diff --git a/src/runtime/stack_test.go b/src/runtime/stack_test.go
index a7c6fc8e5a..eb8de47e86 100644
--- a/src/runtime/stack_test.go
+++ b/src/runtime/stack_test.go
@@ -290,7 +290,7 @@ func setBig(p *int, x int, b bigBuf) {
func TestDeferPtrsPanic(t *testing.T) {
for i := 0; i < 100; i++ {
c := make(chan int, 1)
- go testDeferPtrsGoexit(c, i)
+ go testDeferPtrsPanic(c, i)
if n := <-c; n != 42 {
t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
}
diff --git a/src/runtime/string_test.go b/src/runtime/string_test.go
index 522a502a1c..942e7ac04a 100644
--- a/src/runtime/string_test.go
+++ b/src/runtime/string_test.go
@@ -388,8 +388,6 @@ func TestString2Slice(t *testing.T) {
}
}
-const intSize = 32 << (^uint(0) >> 63)
-
func TestParseByteCount(t *testing.T) {
for _, test := range []struct {
in string
diff --git a/src/runtime/sys_wasm.s b/src/runtime/sys_wasm.s
index b7965ec3fa..95c162eb85 100644
--- a/src/runtime/sys_wasm.s
+++ b/src/runtime/sys_wasm.s
@@ -22,64 +22,6 @@ TEXT runtime·wasmDiv(SB), NOSPLIT, $0-0
I64DivS
Return
-TEXT runtime·wasmTruncS(SB), NOSPLIT, $0-0
- Get R0
- Get R0
- F64Ne // NaN
- If
- I64Const $0x8000000000000000
- Return
- End
-
- Get R0
- F64Const $0x7ffffffffffffc00p0 // Maximum truncated representation of 0x7fffffffffffffff
- F64Gt
- If
- I64Const $0x8000000000000000
- Return
- End
-
- Get R0
- F64Const $-0x7ffffffffffffc00p0 // Minimum truncated representation of -0x8000000000000000
- F64Lt
- If
- I64Const $0x8000000000000000
- Return
- End
-
- Get R0
- I64TruncF64S
- Return
-
-TEXT runtime·wasmTruncU(SB), NOSPLIT, $0-0
- Get R0
- Get R0
- F64Ne // NaN
- If
- I64Const $0x8000000000000000
- Return
- End
-
- Get R0
- F64Const $0xfffffffffffff800p0 // Maximum truncated representation of 0xffffffffffffffff
- F64Gt
- If
- I64Const $0x8000000000000000
- Return
- End
-
- Get R0
- F64Const $0.
- F64Lt
- If
- I64Const $0x8000000000000000
- Return
- End
-
- Get R0
- I64TruncF64U
- Return
-
TEXT runtime·exitThread(SB), NOSPLIT, $0-0
UNDEF
diff --git a/src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go b/src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go
new file mode 100644
index 0000000000..353e48ee70
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go
@@ -0,0 +1,277 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "time"
+)
+
+// Common goroutine leak patterns. Extracted from:
+// "Unveiling and Vanquishing Goroutine Leaks in Enterprise Microservices: A Dynamic Analysis Approach"
+// doi:10.1109/CGO57630.2024.10444835
+//
+// Tests in this file are not flaky iff. the test is run with GOMAXPROCS=1.
+// The main goroutine forcefully yields via `runtime.Gosched()` before
+// running the profiler. This moves them to the back of the run queue,
+// allowing the leaky goroutines to be scheduled beforehand and get stuck.
+
+func init() {
+ register("NoCloseRange", NoCloseRange)
+ register("MethodContractViolation", MethodContractViolation)
+ register("DoubleSend", DoubleSend)
+ register("EarlyReturn", EarlyReturn)
+ register("NCastLeak", NCastLeak)
+ register("Timeout", Timeout)
+}
+
+// Incoming list of items and the number of workers.
+func noCloseRange(list []any, workers int) {
+ ch := make(chan any)
+
+ // Create each worker
+ for i := 0; i < workers; i++ {
+ go func() {
+
+ // Each worker waits for an item and processes it.
+ for item := range ch {
+ // Process each item
+ _ = item
+ }
+ }()
+ }
+
+ // Send each item to one of the workers.
+ for _, item := range list {
+ // Sending can leak if workers == 0 or if one of the workers panics
+ ch <- item
+ }
+ // The channel is never closed, so workers leak once there are no more
+ // items left to process.
+}
+
+func NoCloseRange() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go noCloseRange([]any{1, 2, 3}, 0)
+ go noCloseRange([]any{1, 2, 3}, 3)
+}
+
+// A worker processes items pushed to `ch` one by one in the background.
+// When the worker is no longer needed, it must be closed with `Stop`.
+//
+// Specifications:
+//
+// A worker may be started any number of times, but must be stopped only once.
+// Stopping a worker multiple times will lead to a close panic.
+// Any worker that is started must eventually be stopped.
+// Failing to stop a worker results in a goroutine leak
+type worker struct {
+ ch chan any
+ done chan any
+}
+
+// Start spawns a background goroutine that extracts items pushed to the queue.
+func (w worker) Start() {
+ go func() {
+
+ for {
+ select {
+ case <-w.ch: // Normal workflow
+ case <-w.done:
+ return // Shut down
+ }
+ }
+ }()
+}
+
+func (w worker) Stop() {
+ // Allows goroutine created by Start to terminate
+ close(w.done)
+}
+
+func (w worker) AddToQueue(item any) {
+ w.ch <- item
+}
+
+// worker limited in scope by workerLifecycle
+func workerLifecycle(items []any) {
+ // Create a new worker
+ w := worker{
+ ch: make(chan any),
+ done: make(chan any),
+ }
+ // Start worker
+ w.Start()
+
+ // Operate on worker
+ for _, item := range items {
+ w.AddToQueue(item)
+ }
+
+ runtime.Gosched()
+ // Exits without calling ’Stop’. Goroutine created by `Start` eventually leaks.
+}
+
+func MethodContractViolation() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ runtime.Gosched()
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ workerLifecycle(make([]any, 10))
+ runtime.Gosched()
+}
+
+// doubleSend incoming channel must send a message (incoming error simulates an error generated internally).
+func doubleSend(ch chan any, err error) {
+ if err != nil {
+ // In case of an error, send nil.
+ ch <- nil
+ // Return is missing here.
+ }
+ // Otherwise, continue with normal behaviour
+ // This send is still executed in the error case, which may lead to a goroutine leak.
+ ch <- struct{}{}
+}
+
+func DoubleSend() {
+ prof := pprof.Lookup("goroutineleak")
+ ch := make(chan any)
+ defer func() {
+ runtime.Gosched()
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() {
+ doubleSend(ch, nil)
+ }()
+ <-ch
+
+ go func() {
+ doubleSend(ch, fmt.Errorf("error"))
+ }()
+ <-ch
+
+ ch1 := make(chan any, 1)
+ go func() {
+ doubleSend(ch1, fmt.Errorf("error"))
+ }()
+ <-ch1
+}
+
+// earlyReturn demonstrates a common pattern of goroutine leaks.
+// A return statement interrupts the evaluation of the parent goroutine before it can consume a message.
+// Incoming error simulates an error produced internally.
+func earlyReturn(err error) {
+ // Create a synchronous channel
+ ch := make(chan any)
+
+ go func() {
+
+ // Send something to the channel.
+ // Leaks if the parent goroutine terminates early.
+ ch <- struct{}{}
+ }()
+
+ if err != nil {
+ // Interrupt evaluation of parent early in case of error.
+ // Sender leaks.
+ return
+ }
+
+ // Only receive if there is no error.
+ <-ch
+}
+
+func EarlyReturn() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ runtime.Gosched()
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go earlyReturn(fmt.Errorf("error"))
+}
+
+// nCastLeak processes a number of items. First result to pass the post is retrieved from the channel queue.
+func nCastLeak(items []any) {
+ // Channel is synchronous.
+ ch := make(chan any)
+
+ // Iterate over every item
+ for range items {
+ go func() {
+
+ // Process item and send result to channel
+ ch <- struct{}{}
+ // Channel is synchronous: only one sender will synchronise
+ }()
+ }
+ // Retrieve first result. All other senders block.
+ // Receiver blocks if there are no senders.
+ <-ch
+}
+
+func NCastLeak() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ for i := 0; i < yieldCount; i++ {
+ // Yield enough times to allow all the leaky goroutines to
+ // reach the execution point.
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() {
+ nCastLeak(nil)
+ }()
+
+ go func() {
+ nCastLeak(make([]any, 5))
+ }()
+}
+
+// A context is provided to short-circuit evaluation, leading
+// the sender goroutine to leak.
+func timeout(ctx context.Context) {
+ ch := make(chan any)
+
+ go func() {
+ ch <- struct{}{}
+ }()
+
+ select {
+ case <-ch: // Receive message
+ // Sender is released
+ case <-ctx.Done(): // Context was cancelled or timed out
+ // Sender is leaked
+ }
+}
+
+func Timeout() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ runtime.Gosched()
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+
+ for i := 0; i < 100; i++ {
+ go timeout(ctx)
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE b/src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE
new file mode 100644
index 0000000000..f4b4b8abc4
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+Copyright © 2021 Institute of Computing Technology, University of New South Wales
+
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the “Software”),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/README.md b/src/runtime/testdata/testgoroutineleakprofile/goker/README.md
new file mode 100644
index 0000000000..88c50e1e48
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/README.md
@@ -0,0 +1,1847 @@
+# GoKer
+
+The following examples are obtained from the publication
+"GoBench: A Benchmark Suite of Real-World Go Concurrency Bugs"
+(doi:10.1109/CGO51591.2021.9370317).
+
+**Authors**
+Ting Yuan (yuanting@ict.ac.cn):
+ State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences,
+ University of Chinese Academy of Sciences, Beijing, China;
+Guangwei Li (liguangwei@ict.ac.cn):
+ State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences,
+ University of Chinese Academy of Sciences, Beijing, China;
+Jie Lu† (lujie@ict.ac.an):
+ State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences;
+Chen Liu (liuchen17z@ict.ac.cn):
+ State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences,
+ University of Chinese Academy of Sciences, Beijing, China
+Lian Li (lianli@ict.ac.cn):
+ State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences,
+ University of Chinese Academy of Sciences, Beijing, China;
+Jingling Xue (jingling@cse.unsw.edu.au):
+ University of New South Wales, School of Computer Science and Engineering, Sydney, Australia
+
+White paper: https://lujie.ac.cn/files/papers/GoBench.pdf
+
+The examples have been modified in order to run the goroutine leak
+profiler. Buggy snippets are moved from within a unit test to separate
+applications. Each is then independently executed, possibly as multiple
+copies within the same application in order to exercise more interleavings.
+Concurrently, the main program sets up a waiting period (typically 1ms), followed
+by a goroutine leak profile request. Other modifications may involve injecting calls
+to `runtime.Gosched()`, to more reliably exercise buggy interleavings, or reductions
+in waiting periods when calling `time.Sleep`, in order to reduce overall testing time.
+
+The resulting goroutine leak profile is analyzed to ensure that no unexpected leaks occurred,
+and that the expected leaks did occur. If the leak is flaky, the only purpose of the expected
+leak list is to protect against unexpected leaks.
+
+The entries below document each of the corresponding leaks.
+
+## Cockroach/10214
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#10214]|[pull request]|[patch]| Resource | AB-BA leak |
+
+[cockroach#10214]:(cockroach10214_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/10214/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/10214
+
+### Description
+
+This goroutine leak is caused by different order when acquiring
+coalescedMu.Lock() and raftMu.Lock(). The fix is to refactor sendQueuedHeartbeats()
+so that cockroachdb can unlock coalescedMu before locking raftMu.
+
+### Example execution
+
+```go
+G1 G2
+------------------------------------------------------------------------------------
+s.sendQueuedHeartbeats() .
+s.coalescedMu.Lock() [L1] .
+s.sendQueuedHeartbeatsToNode() .
+s.mu.replicas[0].reportUnreachable() .
+s.mu.replicas[0].raftMu.Lock() [L2] .
+. s.mu.replicas[0].tick()
+. s.mu.replicas[0].raftMu.Lock() [L2]
+. s.mu.replicas[0].tickRaftMuLocked()
+. s.mu.replicas[0].mu.Lock() [L3]
+. s.mu.replicas[0].maybeQuiesceLocked()
+. s.mu.replicas[0].maybeCoalesceHeartbeat()
+. s.coalescedMu.Lock() [L1]
+--------------------------------G1,G2 leak------------------------------------------
+```
+
+## Cockroach/1055
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#1055]|[pull request]|[patch]| Mixed | Channel & WaitGroup |
+
+[cockroach#1055]:(cockroach1055_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/1055/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/1055
+
+### Description
+
+1. `Stop()` is called and blocked at `s.stop.Wait()` after acquiring the lock.
+2. `StartTask()` is called and attempts to acquire the lock. It is then blocked.
+3. `Stop()` never finishes since the task doesn't call SetStopped.
+
+### Example execution
+
+```go
+G1 G2.0 G2.1 G2.2 G3
+-------------------------------------------------------------------------------------------------------------------------------
+s[0].stop.Add(1) [1]
+go func() [G2.0]
+s[1].stop.Add(1) [1] .
+go func() [G2.1] .
+s[2].stop.Add(1) [1] . .
+go func() [G2.2] . .
+go func() [G3] . . .
+<-done . . . .
+. s[0].StartTask() . . .
+. s[0].draining == 0 . . .
+. . s[1].StartTask() . .
+. . s[1].draining == 0 . .
+. . . s[2].StartTask() .
+. . . s[2].draining == 0 .
+. . . . s[0].Quiesce()
+. . . . s[0].mu.Lock() [L1[0]]
+. s[0].mu.Lock() [L1[0]] . . .
+. s[0].drain.Add(1) [1] . . .
+. s[0].mu.Unlock() [L1[0]] . . .
+. <-s[0].ShouldStop() . . .
+. . . . s[0].draining = 1
+. . . . s[0].drain.Wait()
+. . s[0].mu.Lock() [L1[1]] . .
+. . s[1].drain.Add(1) [1] . .
+. . s[1].mu.Unlock() [L1[1]] . .
+. . <-s[1].ShouldStop() . .
+. . . s[2].mu.Lock() [L1[2]] .
+. . . s[2].drain.Add() [1] .
+. . . s[2].mu.Unlock() [L1[2]] .
+. . . <-s[2].ShouldStop() .
+----------------------------------------------------G1, G2.[0..2], G3 leak-----------------------------------------------------
+```
+
+## Cockroach/10790
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#10790]|[pull request]|[patch]| Communication | Channel & Context |
+
+[cockroach#10790]:(cockroach10790_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/10790/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/10790
+
+### Description
+
+It is possible that a message from `ctxDone` will make `beginCmds`
+return without draining the channel `ch`, so that anonymous function
+goroutines will leak.
+
+### Example execution
+
+```go
+G1 G2 helper goroutine
+-----------------------------------------------------
+. . r.sendChans()
+r.beginCmds() . .
+. . ch1 <- true
+<- ch1 . .
+. . ch2 <- true
+...
+. cancel()
+<- ch1
+------------------G1 leak----------------------------
+```
+
+## Cockroach/13197
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#13197]|[pull request]|[patch]| Communication | Channel & Context |
+
+[cockroach#13197]:(cockroach13197_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/13197/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/13197
+
+### Description
+
+One goroutine executing `(*Tx).awaitDone()` blocks and
+waiting for a signal `context.Done()`.
+
+### Example execution
+
+```go
+G1 G2
+-------------------------------
+begin()
+. awaitDone()
+return .
+. <-tx.ctx.Done()
+-----------G2 leaks------------
+```
+
+## Cockroach/13755
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#13755]|[pull request]|[patch]| Communication | Channel & Context |
+
+[cockroach#13755]:(cockroach13755_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/13755/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/13755
+
+### Description
+
+The buggy code does not close the db query result (rows),
+so that one goroutine running `(*Rows).awaitDone` is blocked forever.
+The blocking goroutine is waiting for cancel signal from context.
+
+### Example execution
+
+```go
+G1 G2
+---------------------------------------
+initContextClose()
+. awaitDone()
+return .
+. <-tx.ctx.Done()
+---------------G2 leaks----------------
+```
+
+## Cockroach/1462
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#1462]|[pull request]|[patch]| Mixed | Channel & WaitGroup |
+
+[cockroach#1462]:(cockroach1462_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/1462/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/1462
+
+### Description
+
+Executing `<-stopper.ShouldStop()` in `processEventsUntil` may cause
+goroutines created by `lt.RunWorker` in `lt.start` to be stuck sending
+a message over `lt.Events`. The main thread is then stuck at `s.stop.Wait()`,
+since the sender goroutines cannot call `s.stop.Done()`.
+
+### Example execution
+
+```go
+G1 G2 G3
+-------------------------------------------------------------------------------------------------------
+NewLocalInterceptableTransport()
+lt.start()
+lt.stopper.RunWorker()
+s.AddWorker()
+s.stop.Add(1) [1]
+go func() [G2]
+stopper.RunWorker() .
+s.AddWorker() .
+s.stop.Add(1) [2] .
+go func() [G3] .
+s.Stop() . .
+s.Quiesce() . .
+. select [default] .
+. lt.Events <- interceptMessage(0) .
+close(s.stopper) . .
+. . select [<-stopper.ShouldStop()]
+. . <<<done>>>
+s.stop.Wait() .
+----------------------------------------------G1,G2 leak-----------------------------------------------
+```
+
+## Cockroach/16167
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#16167]|[pull request]|[patch]| Resource | Double Locking |
+
+[cockroach#16167]:(cockroach16167_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/16167/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/16167
+
+### Description
+
+This is another example of goroutine leaks caused by recursively
+acquiring `RWLock`.
+There are two lock variables (`systemConfigCond` and `systemConfigMu`)
+which refer to the same underlying lock. The leak invovlves two goroutines.
+The first acquires `systemConfigMu.Lock()`, then tries to acquire `systemConfigMu.RLock()`.
+The second acquires `systemConfigMu.Lock()`.
+If the second goroutine interleaves in between the two lock operations of the
+first goroutine, both goroutines will leak.
+
+### Example execution
+
+```go
+G1 G2
+---------------------------------------------------------------
+. e.Start()
+. e.updateSystemConfig()
+e.execParsed() .
+e.systemConfigCond.L.Lock() [L1] .
+. e.systemConfigMu.Lock() [L1]
+e.systemConfigMu.RLock() [L1] .
+------------------------G1,G2 leak-----------------------------
+```
+
+## Cockroach/18101
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#18101]|[pull request]|[patch]| Resource | Double Locking |
+
+[cockroach#18101]:(cockroach18101_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/18101/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/18101
+
+### Description
+
+The `context.Done()` signal short-circuits the reader goroutine, but not
+the senders, leading them to leak.
+
+### Example execution
+
+```go
+G1 G2 helper goroutine
+--------------------------------------------------------------
+restore()
+. splitAndScatter()
+<-readyForImportCh .
+<-readyForImportCh <==> readyForImportCh<-
+...
+. . cancel()
+<<done>> . <<done>>
+ readyForImportCh<-
+-----------------------G2 leaks--------------------------------
+```
+
+## Cockroach/2448
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#2448]|[pull request]|[patch]| Communication | Channel |
+
+[cockroach#2448]:(cockroach2448_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/2448/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/2448
+
+### Description
+
+This bug is caused by two goroutines waiting for each other
+to unblock their channels:
+
+1) `MultiRaft` sends the commit event for the Membership change
+2) `store.processRaft` takes it and begins processing
+3) another command commits and triggers another `sendEvent`, but
+ this blocks since `store.processRaft` isn't ready for another
+ `select`. Consequently the main `MultiRaft` loop is waiting for
+ that as well.
+4) the `Membership` change was applied to the range, and the store
+ now tries to execute the callback
+5) the callback tries to write to `callbackChan`, but that is
+ consumed by the `MultiRaft` loop, which is currently waiting
+ for `store.processRaft` to consume from the events channel,
+ which it will only do after the callback has completed.
+
+### Example execution
+
+```go
+G1 G2
+--------------------------------------------------------------------------
+s.processRaft() st.start()
+select .
+. select [default]
+. s.handleWriteResponse()
+. s.sendEvent()
+. select
+<-s.multiraft.Events <----> m.Events <- event
+. select [default]
+. s.handleWriteResponse()
+. s.sendEvent()
+. select [m.Events<-, <-s.stopper.ShouldStop()]
+callback() .
+select [
+ m.callbackChan<-,
+ <-s.stopper.ShouldStop()
+] .
+------------------------------G1,G2 leak----------------------------------
+```
+
+## Cockroach/24808
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#24808]|[pull request]|[patch]| Communication | Channel |
+
+[cockroach#24808]:(cockroach24808_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/24808/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/24808
+
+### Description
+
+When we `Start` the `Compactor`, it may already have received
+`Suggestions`, leaking the previously blocking write to a full channel.
+
+### Example execution
+
+```go
+G1
+------------------------------------------------
+...
+compactor.ch <-
+compactor.Start()
+compactor.ch <-
+--------------------G1 leaks--------------------
+```
+
+## Cockroach/25456
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#25456]|[pull request]|[patch]| Communication | Channel |
+
+[cockroach#25456]:(cockroach25456_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/25456/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/25456
+
+### Description
+
+When `CheckConsistency` (in the complete code) returns an error, the queue
+checks whether the store is draining to decide whether the error is worth
+logging. This check was incorrect and would block until the store actually
+started draining.
+
+### Example execution
+
+```go
+G1
+---------------------------------------
+...
+<-repl.store.Stopper().ShouldQuiesce()
+---------------G1 leaks----------------
+```
+
+## Cockroach/35073
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#35073]|[pull request]|[patch]| Communication | Channel |
+
+[cockroach#35073]:(cockroach35073_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/35073/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/35073
+
+### Description
+
+Previously, the outbox could fail during startup without closing its
+`RowChannel`. This could lead to goroutine leaks in rare cases due
+to channel communication mismatch.
+
+### Example execution
+
+## Cockroach/35931
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#35931]|[pull request]|[patch]| Communication | Channel |
+
+[cockroach#35931]:(cockroach35931_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/35931/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/35931
+
+### Description
+
+Previously, if a processor that reads from multiple inputs was waiting
+on one input to provide more data, and the other input was full, and
+both inputs were connected to inbound streams, it was possible to
+cause goroutine leaks during flow cancellation when trying to propagate
+the cancellation metadata messages into the flow. The cancellation method
+wrote metadata messages to each inbound stream one at a time, so if the
+first one was full, the canceller would block and never send a cancellation
+message to the second stream, which was the one actually being read from.
+
+### Example execution
+
+## Cockroach/3710
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#3710]|[pull request]|[patch]| Resource | RWR Deadlock |
+
+[cockroach#3710]:(cockroach3710_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/3710/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/3710
+
+### Description
+
+The goroutine leak is caused by acquiring a RLock twice in a call chain.
+`ForceRaftLogScanAndProcess(acquire s.mu.RLock())`
+`-> MaybeAdd()`
+`-> shouldQueue()`
+`-> getTruncatableIndexes()`
+`->RaftStatus(acquire s.mu.Rlock())`
+
+### Example execution
+
+```go
+G1 G2
+------------------------------------------------------------
+store.ForceRaftLogScanAndProcess()
+s.mu.RLock()
+s.raftLogQueue.MaybeAdd()
+bq.impl.shouldQueue()
+getTruncatableIndexes()
+r.store.RaftStatus()
+. store.processRaft()
+. s.mu.Lock()
+s.mu.RLock()
+----------------------G1,G2 leak-----------------------------
+```
+
+## Cockroach/584
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#584]|[pull request]|[patch]| Resource | Double Locking |
+
+[cockroach#584]:(cockroach584_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/584/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/584
+
+### Description
+
+Missing call to `mu.Unlock()` before the `break` in the loop.
+
+### Example execution
+
+```go
+G1
+---------------------------
+g.bootstrap()
+g.mu.Lock() [L1]
+if g.closed { ==> break
+g.manage()
+g.mu.Lock() [L1]
+----------G1 leaks---------
+```
+
+## Cockroach/6181
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#6181]|[pull request]|[patch]| Resource | RWR Deadlock |
+
+[cockroach#6181]:(cockroach6181_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/6181/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/6181
+
+### Description
+
+The same `RWMutex` may be recursively acquired for both reading and writing.
+
+### Example execution
+
+```go
+G1 G2 G3 ...
+-----------------------------------------------------------------------------------------------
+testRangeCacheCoalescedRquests()
+initTestDescriptorDB()
+pauseLookupResumeAndAssert()
+return
+. doLookupWithToken()
+. . doLookupWithToken()
+. rc.LookupRangeDescriptor() .
+. . rc.LookupRangeDescriptor()
+. rdc.rangeCacheMu.RLock() .
+. rdc.String() .
+. . rdc.rangeCacheMu.RLock()
+. . fmt.Printf()
+. . rdc.rangeCacheMu.RUnlock()
+. . rdc.rangeCacheMu.Lock()
+. rdc.rangeCacheMu.RLock() .
+-----------------------------------G2,G3,... leak----------------------------------------------
+```
+
+## Cockroach/7504
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#7504]|[pull request]|[patch]| Resource | AB-BA Deadlock |
+
+[cockroach#7504]:(cockroach7504_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/7504/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/7504
+
+### Description
+
+The locks are acquired as `leaseState` and `tableNameCache` in `Release()`, but
+as `tableNameCache` and `leaseState` in `AcquireByName`, leading to an AB-BA deadlock.
+
+### Example execution
+
+```go
+G1 G2
+-----------------------------------------------------
+mgr.AcquireByName() mgr.Release()
+m.tableNames.get(id) .
+c.mu.Lock() [L2] .
+. t.release(lease)
+. t.mu.Lock() [L3]
+. s.mu.Lock() [L1]
+lease.mu.Lock() [L1] .
+. t.removeLease(s)
+. t.tableNameCache.remove()
+. c.mu.Lock() [L2]
+---------------------G1, G2 leak---------------------
+```
+
+## Cockroach/9935
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#9935]|[pull request]|[patch]| Resource | Double Locking |
+
+[cockroach#9935]:(cockroach9935_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/9935/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/9935
+
+### Description
+
+This bug is caused by acquiring `l.mu.Lock()` twice.
+
+### Example execution
+
+## Etcd/10492
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[etcd#10492]|[pull request]|[patch]| Resource | Double locking |
+
+[etcd#10492]:(etcd10492_test.go)
+[patch]:https://github.com/etcd-io/etcd/pull/10492/files
+[pull request]:https://github.com/etcd-io/etcd/pull/10492
+
+### Description
+
+A simple double locking case for lines 19, 31.
+
+## Etcd/5509
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[etcd#5509]|[pull request]|[patch]| Resource | Double locking |
+
+[etcd#5509]:(etcd5509_test.go)
+[patch]:https://github.com/etcd-io/etcd/pull/5509/files
+[pull request]:https://github.com/etcd-io/etcd/pull/5509
+
+### Description
+
+`r.acquire()` returns holding `r.client.mu.RLock()` on a failure path (line 42).
+This causes any call to `client.Close()` to leak goroutines.
+
+## Etcd/6708
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[etcd#6708]|[pull request]|[patch]| Resource | Double locking |
+
+[etcd#6708]:(etcd6708_test.go)
+[patch]:https://github.com/etcd-io/etcd/pull/6708/files
+[pull request]:https://github.com/etcd-io/etcd/pull/6708
+
+### Description
+
+Line 54, 49 double locking
+
+## Etcd/6857
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[etcd#6857]|[pull request]|[patch]| Communication | Channel |
+
+[etcd#6857]:(etcd6857_test.go)
+[patch]:https://github.com/etcd-io/etcd/pull/6857/files
+[pull request]:https://github.com/etcd-io/etcd/pull/6857
+
+### Description
+
+Choosing a different case in a `select` statement (`n.stop`) will
+lead to goroutine leaks when sending over `n.status`.
+
+### Example execution
+
+```go
+G1 G2 G3
+-------------------------------------------
+n.run() . .
+. . n.Stop()
+. . n.stop<-
+<-n.stop . .
+. . <-n.done
+close(n.done) . .
+return . .
+. . return
+. n.Status()
+. n.status<-
+----------------G2 leaks-------------------
+```
+
+## Etcd/6873
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[etcd#6873]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[etcd#6873]:(etcd6873_test.go)
+[patch]:https://github.com/etcd-io/etcd/pull/6873/files
+[pull request]:https://github.com/etcd-io/etcd/pull/6873
+
+### Description
+
+This goroutine leak involves a goroutine acquiring a lock and being
+blocked over a channel operation with no partner, while another tries
+to acquire the same lock.
+
+### Example execution
+
+```go
+G1 G2 G3
+--------------------------------------------------------------
+newWatchBroadcasts()
+wbs.update()
+wbs.updatec <-
+return
+. <-wbs.updatec .
+. wbs.coalesce() .
+. . wbs.stop()
+. . wbs.mu.Lock()
+. . close(wbs.updatec)
+. . <-wbs.donec
+. wbs.mu.Lock() .
+---------------------G2,G3 leak--------------------------------
+```
+
+## Etcd/7492
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[etcd#7492]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[etcd#7492]:(etcd7492_test.go)
+[patch]:https://github.com/etcd-io/etcd/pull/7492/files
+[pull request]:https://github.com/etcd-io/etcd/pull/7492
+
+### Description
+
+This goroutine leak involves a goroutine acquiring a lock and being
+blocked over a channel operation with no partner, while another tries
+to acquire the same lock.
+
+### Example execution
+
+```go
+G2 G1
+---------------------------------------------------------------
+. stk.run()
+ts.assignSimpleTokenToUser() .
+t.simpleTokensMu.Lock() .
+t.simpleTokenKeeper.addSimpleToken() .
+tm.addSimpleTokenCh <- true .
+. <-tm.addSimpleTokenCh
+t.simpleTokensMu.Unlock() .
+ts.assignSimpleTokenToUser() .
+...
+t.simpleTokensMu.Lock()
+. <-tokenTicker.C
+tm.addSimpleTokenCh <- true .
+. tm.deleteTokenFunc()
+. t.simpleTokensMu.Lock()
+---------------------------G1,G2 leak--------------------------
+```
+
+## Etcd/7902
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[etcd#7902]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[etcd#7902]:(etcd7902_test.go)
+[patch]:https://github.com/etcd-io/etcd/pull/7902/files
+[pull request]:https://github.com/etcd-io/etcd/pull/7902
+
+### Description
+
+If the follower gooroutine acquires `mu.Lock()` first and calls
+`rc.release()`, it will be blocked sending over `rcNextc`.
+Only the leader can `close(nextc)` to unblock the follower.
+However, in order to invoke `rc.release()`, the leader needs
+to acquires `mu.Lock()`.
+The fix is to remove the lock and unlock around `rc.release()`.
+
+### Example execution
+
+```go
+G1 G2 (leader) G3 (follower)
+---------------------------------------------------------------------
+runElectionFunc()
+doRounds()
+wg.Wait()
+. ...
+. mu.Lock()
+. rc.validate()
+. rcNextc = nextc
+. mu.Unlock() ...
+. . mu.Lock()
+. . rc.validate()
+. . mu.Unlock()
+. . mu.Lock()
+. . rc.release()
+. . <-rcNextc
+. mu.Lock()
+-------------------------G1,G2,G3 leak--------------------------
+```
+
+## Grpc/1275
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[grpc#1275]|[pull request]|[patch]| Communication | Channel |
+
+[grpc#1275]:(grpc1275_test.go)
+[patch]:https://github.com/grpc/grpc-go/pull/1275/files
+[pull request]:https://github.com/grpc/grpc-go/pull/1275
+
+### Description
+
+Two goroutines are involved in this leak. The main goroutine
+is blocked at `case <- donec`, and is waiting for the second goroutine
+to close the channel.
+The second goroutine is created by the main goroutine. It is blocked
+when calling `stream.Read()`, which invokes `recvBufferRead.Read()`.
+The second goroutine is blocked at case `i := r.recv.get()`, and it is
+waiting for someone to send a message to this channel.
+It is the `client.CloseSream()` method called by the main goroutine that
+should send the message, but it is not. The patch is to send out this message.
+
+### Example execution
+
+```go
+G1 G2
+-----------------------------------------------------
+testInflightStreamClosing()
+. stream.Read()
+. io.ReadFull()
+. <-r.recv.get()
+CloseStream()
+<-donec
+---------------------G1, G2 leak---------------------
+```
+
+## Grpc/1424
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[grpc#1424]|[pull request]|[patch]| Communication | Channel |
+
+[grpc#1424]:(grpc1424_test.go)
+[patch]:https://github.com/grpc/grpc-go/pull/1424/files
+[pull request]:https://github.com/grpc/grpc-go/pull/1424
+
+### Description
+
+The goroutine running `cc.lbWatcher` returns without
+draining the `done` channel.
+
+### Example execution
+
+```go
+G1 G2 G3
+-----------------------------------------------------------------
+DialContext() . .
+. cc.dopts.balancer.Notify() .
+. . cc.lbWatcher()
+. <-doneChan
+close()
+---------------------------G2 leaks-------------------------------
+```
+
+## Grpc/1460
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[grpc#1460]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[grpc#1460]:(grpc1460_test.go)
+[patch]:https://github.com/grpc/grpc-go/pull/1460/files
+[pull request]:https://github.com/grpc/grpc-go/pull/1460
+
+### Description
+
+When gRPC keepalives are enabled (which isn't the case
+by default at this time) and PermitWithoutStream is false
+(the default), the client can leak goroutines when transitioning
+between having no active stream and having one active
+stream.The keepalive() goroutine is stuck at “<-t.awakenKeepalive”,
+while the main goroutine is stuck in NewStream() on t.mu.Lock().
+
+### Example execution
+
+```go
+G1 G2
+--------------------------------------------
+client.keepalive()
+. client.NewStream()
+t.mu.Lock()
+<-t.awakenKeepalive
+. t.mu.Lock()
+---------------G1,G2 leak-------------------
+```
+
+## Grpc/3017
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[grpc#3017]|[pull request]|[patch]| Resource | Missing unlock |
+
+[grpc#3017]:(grpc3017_test.go)
+[patch]:https://github.com/grpc/grpc-go/pull/3017/files
+[pull request]:https://github.com/grpc/grpc-go/pull/3017
+
+### Description
+
+Line 65 is an execution path with a missing unlock.
+
+### Example execution
+
+```go
+G1 G2 G3
+------------------------------------------------------------------------------------------------
+NewSubConn([1])
+ccc.mu.Lock() [L1]
+sc = 1
+ccc.subConnToAddr[1] = 1
+go func() [G2]
+<-done .
+. ccc.RemoveSubConn(1)
+. ccc.mu.Lock()
+. addr = 1
+. entry = &subConnCacheEntry_grpc3017{}
+. cc.subConnCache[1] = entry
+. timer = time.AfterFunc() [G3]
+. entry.cancel = func()
+. sc = ccc.NewSubConn([1])
+. ccc.mu.Lock() [L1]
+. entry.cancel()
+. !timer.Stop() [true]
+. entry.abortDeleting = true
+. . ccc.mu.Lock()
+. . <<<done>>>
+. ccc.RemoveSubConn(1)
+. ccc.mu.Lock() [L1]
+-------------------------------------------G1, G2 leak-----------------------------------------
+```
+
+## Grpc/660
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[grpc#660]|[pull request]|[patch]| Communication | Channel |
+
+[grpc#660]:(grpc660_test.go)
+[patch]:https://github.com/grpc/grpc-go/pull/660/files
+[pull request]:https://github.com/grpc/grpc-go/pull/660
+
+### Description
+
+The parent function could return without draining the done channel.
+
+### Example execution
+
+```go
+G1 G2 helper goroutine
+-------------------------------------------------------------
+doCloseLoopUnary()
+. bc.stop <- true
+<-bc.stop
+return
+. done <-
+----------------------G2 leak--------------------------------
+```
+
+## Grpc/795
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[grpc#795]|[pull request]|[patch]| Resource | Double locking |
+
+[grpc#795]:(grpc795_test.go)
+[patch]:https://github.com/grpc/grpc-go/pull/795/files
+[pull request]:https://github.com/grpc/grpc-go/pull/795
+
+### Description
+
+Line 20 is an execution path with a missing unlock.
+
+## Grpc/862
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[grpc#862]|[pull request]|[patch]| Communication | Channel & Context |
+
+[grpc#862]:(grpc862_test.go)
+[patch]:https://github.com/grpc/grpc-go/pull/862/files
+[pull request]:https://github.com/grpc/grpc-go/pull/862
+
+### Description
+
+When return value `conn` is `nil`, `cc(ClientConn)` is not closed.
+The goroutine executing resetAddrConn is leaked. The patch is to
+close `ClientConn` in `defer func()`.
+
+### Example execution
+
+```go
+G1 G2
+---------------------------------------
+DialContext()
+. cc.resetAddrConn()
+. resetTransport()
+. <-ac.ctx.Done()
+--------------G2 leak------------------
+```
+
+## Hugo/3251
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[hugo#3251]|[pull request]|[patch]| Resource | RWR deadlock |
+
+[hugo#3251]:(hugo3251_test.go)
+[patch]:https://github.com/gohugoio/hugo/pull/3251/files
+[pull request]:https://github.com/gohugoio/hugo/pull/3251
+
+### Description
+
+A goroutine can hold `Lock()` at line 20 then acquire `RLock()` at
+line 29. `RLock()` at line 29 will never be acquired because `Lock()`
+at line 20 will never be released.
+
+### Example execution
+
+```go
+G1 G2 G3
+------------------------------------------------------------------------------------------
+wg.Add(1) [W1: 1]
+go func() [G2]
+go func() [G3]
+. resGetRemote()
+. remoteURLLock.URLLock(url)
+. l.Lock() [L1]
+. l.m[url] = &sync.Mutex{} [L2]
+. l.m[url].Lock() [L2]
+. l.Unlock() [L1]
+. . resGetRemote()
+. . remoteURLLock.URLLock(url)
+. . l.Lock() [L1]
+. . l.m[url].Lock() [L2]
+. remoteURLLock.URLUnlock(url)
+. l.RLock() [L1]
+...
+wg.Wait() [W1]
+----------------------------------------G1,G2,G3 leak--------------------------------------
+```
+
+## Hugo/5379
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[hugo#5379]|[pull request]|[patch]| Resource | Double locking |
+
+[hugo#5379]:(hugo5379_test.go)
+[patch]:https://github.com/gohugoio/hugo/pull/5379/files
+[pull request]:https://github.com/gohugoio/hugo/pull/5379
+
+### Description
+
+A goroutine first acquire `contentInitMu` at line 99 then
+acquire the same `Mutex` at line 66
+
+## Istio/16224
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[istio#16224]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[istio#16224]:(istio16224_test.go)
+[patch]:https://github.com/istio/istio/pull/16224/files
+[pull request]:https://github.com/istio/istio/pull/16224
+
+### Description
+
+A goroutine holds a `Mutex` at line 91 and is then blocked at line 93.
+Another goroutine attempts to acquire the same `Mutex` at line 101 to
+further drains the same channel at 103.
+
+## Istio/17860
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[istio#17860]|[pull request]|[patch]| Communication | Channel |
+
+[istio#17860]:(istio17860_test.go)
+[patch]:https://github.com/istio/istio/pull/17860/files
+[pull request]:https://github.com/istio/istio/pull/17860
+
+### Description
+
+`a.statusCh` can't be drained at line 70.
+
+## Istio/18454
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[istio#18454]|[pull request]|[patch]| Communication | Channel & Context |
+
+[istio#18454]:(istio18454_test.go)
+[patch]:https://github.com/istio/istio/pull/18454/files
+[pull request]:https://github.com/istio/istio/pull/18454
+
+### Description
+
+`s.timer.Stop()` at line 56 and 61 can be called concurrency
+(i.e. from their entry point at line 104 and line 66).
+See [Timer](https://golang.org/pkg/time/#Timer).
+
+## Kubernetes/10182
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#10182]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[kubernetes#10182]:(kubernetes10182_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/10182/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/10182
+
+### Description
+
+Goroutine 1 is blocked on a lock held by goroutine 3,
+while goroutine 3 is blocked on sending message to `ch`,
+which is read by goroutine 1.
+
+### Example execution
+
+```go
+G1 G2 G3
+-------------------------------------------------------------------------------
+s.Start()
+s.syncBatch()
+. s.SetPodStatus()
+. s.podStatusesLock.Lock()
+<-s.podStatusChannel <===> s.podStatusChannel <- true
+. s.podStatusesLock.Unlock()
+. return
+s.DeletePodStatus() .
+. . s.podStatusesLock.Lock()
+. . s.podStatusChannel <- true
+s.podStatusesLock.Lock()
+-----------------------------G1,G3 leak-----------------------------------------
+```
+
+## Kubernetes/11298
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#11298]|[pull request]|[patch]| Communication | Channel & Condition Variable |
+
+[kubernetes#11298]:(kubernetes11298_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/11298/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/11298
+
+### Description
+
+`n.node` used the `n.lock` as underlaying locker. The service loop initially
+locked it, the `Notify` function tried to lock it before calling `n.node.Signal()`,
+leading to a goroutine leak. `n.cond.Signal()` at line 59 and line 81 are not
+guaranteed to unblock the `n.cond.Wait` at line 56.
+
+## Kubernetes/13135
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#13135]|[pull request]|[patch]| Resource | AB-BA deadlock |
+
+[kubernetes#13135]:(kubernetes13135_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/13135/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/13135
+
+### Description
+
+```go
+G1 G2 G3
+----------------------------------------------------------------------------------
+NewCacher()
+watchCache.SetOnReplace()
+watchCache.SetOnEvent()
+. cacher.startCaching()
+. c.Lock()
+. c.reflector.ListAndWatch()
+. r.syncWith()
+. r.store.Replace()
+. w.Lock()
+. w.onReplace()
+. cacher.initOnce.Do()
+. cacher.Unlock()
+return cacher .
+. . c.watchCache.Add()
+. . w.processEvent()
+. . w.Lock()
+. cacher.startCaching() .
+. c.Lock() .
+...
+. c.Lock()
+. w.Lock()
+--------------------------------G2,G3 leak-----------------------------------------
+```
+
+## Kubernetes/1321
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#1321]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[kubernetes#1321]:(kubernetes1321_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/1321/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/1321
+
+### Description
+
+This is a lock-channel bug. The first goroutine invokes
+`distribute()`, which holds `m.lock.Lock()`, while blocking
+at sending message to `w.result`. The second goroutine
+invokes `stopWatching()` function, which can unblock the first
+goroutine by closing `w.result`. However, in order to close `w.result`,
+`stopWatching()` function needs to acquire `m.lock.Lock()`.
+
+The fix is to introduce another channel and put receive message
+from the second channel in the same `select` statement as the
+`w.result`. Close the second channel can unblock the first
+goroutine, while no need to hold `m.lock.Lock()`.
+
+### Example execution
+
+```go
+G1 G2
+----------------------------------------------
+testMuxWatcherClose()
+NewMux()
+. m.loop()
+. m.distribute()
+. m.lock.Lock()
+. w.result <- true
+w := m.Watch()
+w.Stop()
+mw.m.stopWatching()
+m.lock.Lock()
+---------------G1,G2 leak---------------------
+```
+
+## Kubernetes/25331
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#25331]|[pull request]|[patch]| Communication | Channel & Context |
+
+[kubernetes#25331]:(kubernetes25331_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/25331/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/25331
+
+### Description
+
+A potential goroutine leak occurs when an error has happened,
+blocking `resultChan`, while cancelling context in `Stop()`.
+
+### Example execution
+
+```go
+G1 G2
+------------------------------------
+wc.run()
+. wc.Stop()
+. wc.errChan <-
+. wc.cancel()
+<-wc.errChan
+wc.cancel()
+wc.resultChan <-
+-------------G1 leak----------------
+```
+
+## Kubernetes/26980
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#26980]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[kubernetes#26980]:(kubernetes26980_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/26980/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/26980
+
+### Description
+
+A goroutine holds a `Mutex` at line 24 and blocked at line 35.
+Another goroutine blocked at line 58 by acquiring the same `Mutex`.
+
+## Kubernetes/30872
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#30872]|[pull request]|[patch]| Resource | AB-BA deadlock |
+
+[kubernetes#30872]:(kubernetes30872_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/30872/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/30872
+
+### Description
+
+The lock is acquired both at lines 92 and 157.
+
+## Kubernetes/38669
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#38669]|[pull request]|[patch]| Communication | Channel |
+
+[kubernetes#38669]:(kubernetes38669_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/38669/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/38669
+
+### Description
+
+No sender for line 33.
+
+## Kubernetes/5316
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#5316]|[pull request]|[patch]| Communication | Channel |
+
+[kubernetes#5316]:(kubernetes5316_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/5316/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/5316
+
+### Description
+
+If the main goroutine selects a case that doesn’t consumes
+the channels, the anonymous goroutine will be blocked on sending
+to channel.
+
+### Example execution
+
+```go
+G1 G2
+--------------------------------------
+finishRequest()
+. fn()
+time.After()
+. errCh<-/ch<-
+--------------G2 leaks----------------
+```
+
+## Kubernetes/58107
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#58107]|[pull request]|[patch]| Resource | RWR deadlock |
+
+[kubernetes#58107]:(kubernetes58107_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/58107/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/58107
+
+### Description
+
+The rules for read and write lock: allows concurrent read lock;
+write lock has higher priority than read lock.
+
+There are two queues (queue 1 and queue 2) involved in this bug,
+and the two queues are protected by the same read-write lock
+(`rq.workerLock.RLock()`). Before getting an element from queue 1 or
+queue 2, `rq.workerLock.RLock()` is acquired. If the queue is empty,
+`cond.Wait()` will be invoked. There is another goroutine (goroutine D),
+which will periodically invoke `rq.workerLock.Lock()`. Under the following
+situation, deadlock will happen. Queue 1 is empty, so that some goroutines
+hold `rq.workerLock.RLock()`, and block at `cond.Wait()`. Goroutine D is
+blocked when acquiring `rq.workerLock.Lock()`. Some goroutines try to process
+jobs in queue 2, but they are blocked when acquiring `rq.workerLock.RLock()`,
+since write lock has a higher priority.
+
+The fix is to not acquire `rq.workerLock.RLock()`, while pulling data
+from any queue. Therefore, when a goroutine is blocked at `cond.Wait()`,
+`rq.workLock.RLock()` is not held.
+
+### Example execution
+
+```go
+G3 G4 G5
+--------------------------------------------------------------------
+. . Sync()
+rq.workerLock.RLock() . .
+q.cond.Wait() . .
+. . rq.workerLock.Lock()
+. rq.workerLock.RLock()
+. q.cond.L.Lock()
+-----------------------------G3,G4,G5 leak-----------------------------
+```
+
+## Kubernetes/62464
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#62464]|[pull request]|[patch]| Resource | RWR deadlock |
+
+[kubernetes#62464]:(kubernetes62464_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/62464/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/62464
+
+### Description
+
+This is another example for recursive read lock bug. It has
+been noticed by the go developers that RLock should not be
+recursively used in the same thread.
+
+### Example execution
+
+```go
+G1 G2
+--------------------------------------------------------
+m.reconcileState()
+m.state.GetCPUSetOrDefault()
+s.RLock()
+s.GetCPUSet()
+. p.RemoveContainer()
+. s.GetDefaultCPUSet()
+. s.SetDefaultCPUSet()
+. s.Lock()
+s.RLock()
+---------------------G1,G2 leak--------------------------
+```
+
+## Kubernetes/6632
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#6632]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[kubernetes#6632]:(kubernetes6632_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/6632/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/6632
+
+### Description
+
+When `resetChan` is full, `WriteFrame` holds the lock and blocks
+on the channel. Then `monitor()` fails to close the `resetChan`
+because the lock is already held by `WriteFrame`.
+
+
+### Example execution
+
+```go
+G1 G2 helper goroutine
+----------------------------------------------------------------
+i.monitor()
+<-i.conn.closeChan
+. i.WriteFrame()
+. i.writeLock.Lock()
+. i.resetChan <-
+. . i.conn.closeChan<-
+i.writeLock.Lock()
+----------------------G1,G2 leak--------------------------------
+```
+
+## Kubernetes/70277
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#70277]|[pull request]|[patch]| Communication | Channel |
+
+[kubernetes#70277]:kubernetes70277_test.go
+[patch]:https://github.com/kubernetes/kubernetes/pull/70277/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/70277
+
+### Description
+
+`wait.poller()` returns a function with type `WaitFunc`.
+the function creates a goroutine and the goroutine only
+quits when after or done closed.
+
+The `doneCh` defined at line 70 is never closed.
+
+## Moby/17176
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#17176]|[pull request]|[patch]| Resource | Double locking |
+
+[moby#17176]:(moby17176_test.go)
+[patch]:https://github.com/moby/moby/pull/17176/files
+[pull request]:https://github.com/moby/moby/pull/17176
+
+### Description
+
+`devices.nrDeletedDevices` takes `devices.Lock()` but does
+not release it (line 36) if there are no deleted devices. This will block
+other goroutines trying to acquire `devices.Lock()`.
+
+## Moby/21233
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#21233]|[pull request]|[patch]| Communication | Channel |
+
+[moby#21233]:(moby21233_test.go)
+[patch]:https://github.com/moby/moby/pull/21233/files
+[pull request]:https://github.com/moby/moby/pull/21233
+
+### Description
+
+This test was checking that it received every progress update that was
+produced. But delivery of these intermediate progress updates is not
+guaranteed. A new update can overwrite the previous one if the previous
+one hasn't been sent to the channel yet.
+
+The call to `t.Fatalf` terminated the current goroutine which was consuming
+the channel, which caused a deadlock and eventual test timeout rather
+than a proper failure message.
+
+### Example execution
+
+```go
+G1 G2 G3
+----------------------------------------------------------
+testTransfer() . .
+tm.Transfer() . .
+t.Watch() . .
+. WriteProgress() .
+. ProgressChan<- .
+. . <-progressChan
+. ... ...
+. return .
+. <-progressChan
+<-watcher.running
+----------------------G1,G3 leak--------------------------
+```
+
+## Moby/25384
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#25384]|[pull request]|[patch]| Mixed | Misuse WaitGroup |
+
+[moby#25384]:(moby25384_test.go)
+[patch]:https://github.com/moby/moby/pull/25384/files
+[pull request]:https://github.com/moby/moby/pull/25384
+
+### Description
+
+When `n=1` (where `n` is `len(pm.plugins)`), the location of `group.Wait()` doesn’t matter.
+When `n > 1`, `group.Wait()` is invoked in each iteration. Whenever
+`group.Wait()` is invoked, it waits for `group.Done()` to be executed `n` times.
+However, `group.Done()` is only executed once in one iteration.
+
+Misuse of sync.WaitGroup
+
+## Moby/27782
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#27782]|[pull request]|[patch]| Communication | Channel & Condition Variable |
+
+[moby#27782]:(moby27782_test.go)
+[patch]:https://github.com/moby/moby/pull/27782/files
+[pull request]:https://github.com/moby/moby/pull/27782
+
+### Description
+
+### Example execution
+
+```go
+G1 G2 G3
+-----------------------------------------------------------------------
+InitializeStdio()
+startLogging()
+l.ReadLogs()
+NewLogWatcher()
+. l.readLogs()
+container.Reset() .
+LogDriver.Close() .
+r.Close() .
+close(w.closeNotifier) .
+. followLogs(logWatcher)
+. watchFile()
+. New()
+. NewEventWatcher()
+. NewWatcher()
+. . w.readEvents()
+. . event.ignoreLinux()
+. . return false
+. <-logWatcher.WatchClose() .
+. fileWatcher.Remove() .
+. w.cv.Wait() .
+. . w.Events <- event
+------------------------------G2,G3 leak-------------------------------
+```
+
+## Moby/28462
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#28462]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[moby#28462]:(moby28462_test.go)
+[patch]:https://github.com/moby/moby/pull/28462/files
+[pull request]:https://github.com/moby/moby/pull/28462
+
+### Description
+
+One goroutine may acquire a lock and try to send a message over channel `stop`,
+while the other will try to acquire the same lock. With the wrong ordering,
+both goroutines will leak.
+
+### Example execution
+
+```go
+G1 G2
+--------------------------------------------------------------
+monitor()
+handleProbeResult()
+. d.StateChanged()
+. c.Lock()
+. d.updateHealthMonitorElseBranch()
+. h.CloseMonitorChannel()
+. s.stop <- struct{}{}
+c.Lock()
+----------------------G1,G2 leak------------------------------
+```
+
+## Moby/30408
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#30408]|[pull request]|[patch]| Communication | Condition Variable |
+
+[moby#30408]:(moby30408_test.go)
+[patch]:https://github.com/moby/moby/pull/30408/files
+[pull request]:https://github.com/moby/moby/pull/30408
+
+### Description
+
+`Wait()` at line 22 has no corresponding `Signal()` or `Broadcast()`.
+
+### Example execution
+
+```go
+G1 G2
+------------------------------------------
+testActive()
+. p.waitActive()
+. p.activateWait.L.Lock()
+. p.activateWait.Wait()
+<-done
+-----------------G1,G2 leak---------------
+```
+
+## Moby/33781
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#33781]|[pull request]|[patch]| Communication | Channel & Context |
+
+[moby#33781]:(moby33781_test.go)
+[patch]:https://github.com/moby/moby/pull/33781/files
+[pull request]:https://github.com/moby/moby/pull/33781
+
+### Description
+
+The goroutine created using an anonymous function is blocked
+sending a message over an unbuffered channel. However there
+exists a path in the parent goroutine where the parent function
+will return without draining the channel.
+
+### Example execution
+
+```go
+G1 G2 G3
+----------------------------------------
+monitor() .
+<-time.After() .
+. .
+<-stop stop<-
+.
+cancelProbe()
+return
+. result<-
+----------------G3 leak------------------
+```
+
+## Moby/36114
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#36114]|[pull request]|[patch]| Resource | Double locking |
+
+[moby#36114]:(moby36114_test.go)
+[patch]:https://github.com/moby/moby/pull/36114/files
+[pull request]:https://github.com/moby/moby/pull/36114
+
+### Description
+
+The the lock for the struct svm has already been locked when calling
+`svm.hotRemoveVHDsAtStart()`.
+
+## Moby/4951
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#4951]|[pull request]|[patch]| Resource | AB-BA deadlock |
+
+[moby#4951]:(moby4951_test.go)
+[patch]:https://github.com/moby/moby/pull/4951/files
+[pull request]:https://github.com/moby/moby/pull/4951
+
+### Description
+
+The root cause and patch is clearly explained in the commit
+description. The global lock is `devices.Lock()`, and the device
+lock is `baseInfo.lock.Lock()`. It is very likely that this bug
+can be reproduced.
+
+## Moby/7559
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#7559]|[pull request]|[patch]| Resource | Double locking |
+
+[moby#7559]:(moby7559_test.go)
+[patch]:https://github.com/moby/moby/pull/7559/files
+[pull request]:https://github.com/moby/moby/pull/7559
+
+### Description
+
+Line 25 is missing a call to `.Unlock`.
+
+### Example execution
+
+```go
+G1
+---------------------------
+proxy.connTrackLock.Lock()
+if err != nil { continue }
+proxy.connTrackLock.Lock()
+-----------G1 leaks--------
+```
+
+## Serving/2137
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[serving#2137]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[serving#2137]:(serving2137_test.go)
+[patch]:https://github.com/ knative/serving/pull/2137/files
+[pull request]:https://github.com/ knative/serving/pull/2137
+
+### Description
+
+### Example execution
+
+```go
+G1 G2 G3
+----------------------------------------------------------------------------------
+b.concurrentRequests(2) . .
+b.concurrentRequest() . .
+r.lock.Lock() . .
+. start.Done() .
+start.Wait() . .
+b.concurrentRequest() . .
+r.lock.Lock() . .
+. . start.Done()
+start.Wait() . .
+unlockAll(locks) . .
+unlock(lc) . .
+req.lock.Unlock() . .
+ok := <-req.accepted . .
+. b.Maybe() .
+. b.activeRequests <- t .
+. thunk() .
+. r.lock.Lock() .
+. . b.Maybe()
+. . b.activeRequests <- t
+----------------------------G1,G2,G3 leak-----------------------------------------
+```
+
+## Syncthing/4829
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[syncthing#4829]|[pull request]|[patch]| Resource | Double locking |
+
+[syncthing#4829]:(syncthing4829_test.go)
+[patch]:https://github.com/syncthing/syncthing/pull/4829/files
+[pull request]:https://github.com/syncthing/syncthing/pull/4829
+
+### Description
+
+Double locking at line 17 and line 30.
+
+### Example execution
+
+```go
+G1
+---------------------------
+mapping.clearAddresses()
+m.mut.Lock() [L2]
+m.notify(...)
+m.mut.RLock() [L2]
+----------G1 leaks---------
+```
+
+## Syncthing/5795
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[syncthing#5795]|[pull request]|[patch]| Communication | Channel |
+
+[syncthing#5795]:(syncthing5795_test.go)
+[patch]:https://github.com/syncthing/syncthing/pull/5795/files
+[pull request]:https://github.com/syncthing/syncthing/pull/5795
+
+### Description
+
+`<-c.dispatcherLoopStopped` at line 82 is blocking forever because
+`dispatcherLoop()` is blocking at line 72.
+
+### Example execution
+
+```go
+G1 G2
+--------------------------------------------------------------
+c.Start()
+go c.dispatcherLoop() [G3]
+. select [<-c.inbox, <-c.closed]
+c.inbox <- <================> [<-c.inbox]
+<-c.dispatcherLoopStopped .
+. default
+. c.ccFn()/c.Close()
+. close(c.closed)
+. <-c.dispatcherLoopStopped
+---------------------G1,G2 leak-------------------------------
+``` \ No newline at end of file
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go
new file mode 100644
index 0000000000..4f5ef3b0fc
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go
@@ -0,0 +1,145 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/10214
+ * Buggy version: 7207111aa3a43df0552509365fdec741a53f873f
+ * fix commit-id: 27e863d90ab0660494778f1c35966cc5ddc38e32
+ * Flaky: 3/100
+ * Description: This goroutine leak is caused by different order when acquiring
+ * coalescedMu.Lock() and raftMu.Lock(). The fix is to refactor sendQueuedHeartbeats()
+ * so that cockroachdb can unlock coalescedMu before locking raftMu.
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+ "unsafe"
+)
+
+func init() {
+ register("Cockroach10214", Cockroach10214)
+}
+
+type Store_cockroach10214 struct {
+ coalescedMu struct {
+ sync.Mutex // L1
+ heartbeatResponses []int
+ }
+ mu struct {
+ replicas map[int]*Replica_cockroach10214
+ }
+}
+
+func (s *Store_cockroach10214) sendQueuedHeartbeats() {
+ s.coalescedMu.Lock() // L1 acquire
+ defer s.coalescedMu.Unlock() // L2 release
+ for i := 0; i < len(s.coalescedMu.heartbeatResponses); i++ {
+ s.sendQueuedHeartbeatsToNode() // L2
+ }
+}
+
+func (s *Store_cockroach10214) sendQueuedHeartbeatsToNode() {
+ for i := 0; i < len(s.mu.replicas); i++ {
+ r := s.mu.replicas[i]
+ r.reportUnreachable() // L2
+ }
+}
+
+type Replica_cockroach10214 struct {
+ raftMu sync.Mutex // L2
+ mu sync.Mutex // L3
+ store *Store_cockroach10214
+}
+
+func (r *Replica_cockroach10214) reportUnreachable() {
+ r.raftMu.Lock() // L2 acquire
+ time.Sleep(time.Millisecond)
+ defer r.raftMu.Unlock() // L2 release
+}
+
+func (r *Replica_cockroach10214) tick() {
+ r.raftMu.Lock() // L2 acquire
+ defer r.raftMu.Unlock() // L2 release
+ r.tickRaftMuLocked()
+}
+
+func (r *Replica_cockroach10214) tickRaftMuLocked() {
+ r.mu.Lock() // L3 acquire
+ defer r.mu.Unlock() // L3 release
+ if r.maybeQuiesceLocked() {
+ return
+ }
+}
+
+func (r *Replica_cockroach10214) maybeQuiesceLocked() bool {
+ for i := 0; i < 2; i++ {
+ if !r.maybeCoalesceHeartbeat() {
+ return true
+ }
+ }
+ return false
+}
+
+func (r *Replica_cockroach10214) maybeCoalesceHeartbeat() bool {
+ msgtype := uintptr(unsafe.Pointer(r)) % 3
+ switch msgtype {
+ case 0, 1, 2:
+ r.store.coalescedMu.Lock() // L1 acquire
+ default:
+ return false
+ }
+ r.store.coalescedMu.Unlock() // L1 release
+ return true
+}
+
+func Cockroach10214() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 1000; i++ {
+ go func() {
+ store := &Store_cockroach10214{}
+ responses := &store.coalescedMu.heartbeatResponses
+ *responses = append(*responses, 1, 2)
+ store.mu.replicas = make(map[int]*Replica_cockroach10214)
+
+ rp1 := &Replica_cockroach10214{ // L2,3[0]
+ store: store,
+ }
+ rp2 := &Replica_cockroach10214{ // L2,3[1]
+ store: store,
+ }
+ store.mu.replicas[0] = rp1
+ store.mu.replicas[1] = rp2
+
+ go store.sendQueuedHeartbeats() // G1
+ go rp1.tick() // G2
+ }()
+ }
+}
+
+// Example of goroutine leak trace:
+//
+// G1 G2
+//------------------------------------------------------------------------------------
+// s.sendQueuedHeartbeats() .
+// s.coalescedMu.Lock() [L1] .
+// s.sendQueuedHeartbeatsToNode() .
+// s.mu.replicas[0].reportUnreachable() .
+// s.mu.replicas[0].raftMu.Lock() [L2] .
+// . s.mu.replicas[0].tick()
+// . s.mu.replicas[0].raftMu.Lock() [L2]
+// . s.mu.replicas[0].tickRaftMuLocked()
+// . s.mu.replicas[0].mu.Lock() [L3]
+// . s.mu.replicas[0].maybeQuiesceLocked()
+// . s.mu.replicas[0].maybeCoalesceHeartbeat()
+// . s.coalescedMu.Lock() [L1]
+//--------------------------------G1,G2 leak------------------------------------------ \ No newline at end of file
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go
new file mode 100644
index 0000000000..687baed25a
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go
@@ -0,0 +1,115 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+func init() {
+ register("Cockroach1055", Cockroach1055)
+}
+
+type Stopper_cockroach1055 struct {
+ stopper chan struct{}
+ stop sync.WaitGroup
+ mu sync.Mutex
+ draining int32
+ drain sync.WaitGroup
+}
+
+func (s *Stopper_cockroach1055) AddWorker() {
+ s.stop.Add(1)
+}
+
+func (s *Stopper_cockroach1055) ShouldStop() <-chan struct{} {
+ if s == nil {
+ return nil
+ }
+ return s.stopper
+}
+
+func (s *Stopper_cockroach1055) SetStopped() {
+ if s != nil {
+ s.stop.Done()
+ }
+}
+
+func (s *Stopper_cockroach1055) Quiesce() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.draining = 1
+ s.drain.Wait()
+ s.draining = 0
+}
+
+func (s *Stopper_cockroach1055) Stop() {
+ s.mu.Lock() // L1
+ defer s.mu.Unlock()
+ atomic.StoreInt32(&s.draining, 1)
+ s.drain.Wait()
+ close(s.stopper)
+ s.stop.Wait()
+}
+
+func (s *Stopper_cockroach1055) StartTask() bool {
+ if atomic.LoadInt32(&s.draining) == 0 {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.drain.Add(1)
+ return true
+ }
+ return false
+}
+
+func NewStopper_cockroach1055() *Stopper_cockroach1055 {
+ return &Stopper_cockroach1055{
+ stopper: make(chan struct{}),
+ }
+}
+
+func Cockroach1055() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i <= 1000; i++ {
+ go func() { // G1
+ var stoppers []*Stopper_cockroach1055
+ for i := 0; i < 2; i++ {
+ stoppers = append(stoppers, NewStopper_cockroach1055())
+ }
+
+ for i := range stoppers {
+ s := stoppers[i]
+ s.AddWorker()
+ go func() { // G2
+ s.StartTask()
+ <-s.ShouldStop()
+ s.SetStopped()
+ }()
+ }
+
+ done := make(chan struct{})
+ go func() { // G3
+ for _, s := range stoppers {
+ s.Quiesce()
+ }
+ for _, s := range stoppers {
+ s.Stop()
+ }
+ close(done)
+ }()
+
+ <-done
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go
new file mode 100644
index 0000000000..636f45b3e0
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go
@@ -0,0 +1,98 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/10790
+ * Buggy version: 96b5452557ebe26bd9d85fe7905155009204d893
+ * fix commit-id: f1a5c19125c65129b966fbdc0e6408e8df214aba
+ * Flaky: 28/100
+ * Description:
+ * It is possible that a message from ctxDone will make the function beginCmds
+ * returns without draining the channel ch, so that goroutines created by anonymous
+ * function will leak.
+ */
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Cockroach10790", Cockroach10790)
+}
+
+type Replica_cockroach10790 struct {
+ chans []chan bool
+}
+
+func (r *Replica_cockroach10790) beginCmds(ctx context.Context) {
+ ctxDone := ctx.Done()
+ for _, ch := range r.chans {
+ select {
+ case <-ch:
+ case <-ctxDone:
+ go func() { // G3
+ for _, ch := range r.chans {
+ <-ch
+ }
+ }()
+ }
+ }
+}
+
+func (r *Replica_cockroach10790) sendChans(ctx context.Context) {
+ for _, ch := range r.chans {
+ select {
+ case ch <- true:
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func NewReplica_cockroach10790() *Replica_cockroach10790 {
+ r := &Replica_cockroach10790{}
+ r.chans = append(r.chans, make(chan bool), make(chan bool))
+ return r
+}
+
+// Example of goroutine leak trace:
+//
+// G1 G2 G3 helper goroutine
+//--------------------------------------------------------------------------------------
+// . . r.sendChans()
+// r.beginCmds() . .
+// . . ch1 <-
+// <-ch1 <================================================> ch1 <-
+// . . select [ch2<-, <-ctx.Done()]
+// . cancel() .
+// . <<done>> [<-ctx.Done()] ==> return
+// . <<done>>
+// go func() [G3] .
+// . <-ch1
+// ------------------------------G3 leaks----------------------------------------------
+//
+
+func Cockroach10790() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ r := NewReplica_cockroach10790()
+ ctx, cancel := context.WithCancel(context.Background())
+ go r.sendChans(ctx) // helper goroutine
+ go r.beginCmds(ctx) // G1
+ go cancel() // G2
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go
new file mode 100644
index 0000000000..a0a9a79267
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go
@@ -0,0 +1,82 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/13197
+ * Buggy version: fff27aedabafe20cef57f75905fe340cab48c2a4
+ * fix commit-id: 9bf770cd8f6eaff5441b80d3aec1a5614e8747e1
+ * Flaky: 100/100
+ * Description: One goroutine executing (*Tx).awaitDone() blocks
+ * waiting for a signal over context.Done() that never comes.
+ */
+package main
+
+import (
+ "context"
+ "os"
+ "runtime"
+ "runtime/pprof"
+)
+
+func init() {
+ register("Cockroach13197", Cockroach13197)
+}
+
+type DB_cockroach13197 struct{}
+
+func (db *DB_cockroach13197) begin(ctx context.Context) *Tx_cockroach13197 {
+ ctx, cancel := context.WithCancel(ctx)
+ tx := &Tx_cockroach13197{
+ cancel: cancel,
+ ctx: ctx,
+ }
+ go tx.awaitDone() // G2
+ return tx
+}
+
+type Tx_cockroach13197 struct {
+ cancel context.CancelFunc
+ ctx context.Context
+}
+
+func (tx *Tx_cockroach13197) awaitDone() {
+ <-tx.ctx.Done()
+}
+
+func (tx *Tx_cockroach13197) Rollback() {
+ tx.rollback()
+}
+
+func (tx *Tx_cockroach13197) rollback() {
+ tx.close()
+}
+
+func (tx *Tx_cockroach13197) close() {
+ tx.cancel()
+}
+
+// Example of goroutine leak trace:
+//
+// G1 G2
+//--------------------------------
+// begin()
+// . awaitDone()
+// <<done>> .
+// <-tx.ctx.Done()
+//------------G2 leak-------------
+
+func Cockroach13197() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ db := &DB_cockroach13197{}
+ db.begin(context.Background()) // G1
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go
new file mode 100644
index 0000000000..5ef6fa1e28
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go
@@ -0,0 +1,66 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/13755
+ * Buggy version: 7acb881bbb8f23e87b69fce9568d9a3316b5259c
+ * fix commit-id: ef906076adc1d0e3721944829cfedfed51810088
+ * Flaky: 100/100
+ */
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime"
+ "runtime/pprof"
+)
+
+func init() {
+ register("Cockroach13755", Cockroach13755)
+}
+
+type Rows_cockroach13755 struct {
+ cancel context.CancelFunc
+}
+
+func (rs *Rows_cockroach13755) initContextClose(ctx context.Context) {
+ ctx, rs.cancel = context.WithCancel(ctx)
+ go rs.awaitDone(ctx)
+}
+
+func (rs *Rows_cockroach13755) awaitDone(ctx context.Context) {
+ <-ctx.Done()
+ rs.close(ctx.Err())
+}
+
+func (rs *Rows_cockroach13755) close(err error) {
+ rs.cancel()
+}
+
+// Example of goroutine leak trace:
+//
+// G1 G2
+//----------------------------------------
+// initContextClose()
+// . awaitDone()
+// <<done>> .
+// <-tx.ctx.Done()
+//----------------G2 leak-----------------
+
+func Cockroach13755() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ rs := &Rows_cockroach13755{}
+ rs.initContextClose(context.Background())
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go
new file mode 100644
index 0000000000..108d7884a3
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go
@@ -0,0 +1,167 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Cockroach1462", Cockroach1462)
+}
+
+type Stopper_cockroach1462 struct {
+ stopper chan struct{}
+ stopped chan struct{}
+ stop sync.WaitGroup
+ mu sync.Mutex
+ drain *sync.Cond
+ draining bool
+ numTasks int
+}
+
+func NewStopper_cockroach1462() *Stopper_cockroach1462 {
+ s := &Stopper_cockroach1462{
+ stopper: make(chan struct{}),
+ stopped: make(chan struct{}),
+ }
+ s.drain = sync.NewCond(&s.mu)
+ return s
+}
+
+func (s *Stopper_cockroach1462) RunWorker(f func()) {
+ s.AddWorker()
+ go func() { // G2, G3
+ defer s.SetStopped()
+ f()
+ }()
+}
+
+func (s *Stopper_cockroach1462) AddWorker() {
+ s.stop.Add(1)
+}
+func (s *Stopper_cockroach1462) StartTask() bool {
+ s.mu.Lock()
+ runtime.Gosched()
+ defer s.mu.Unlock()
+ if s.draining {
+ return false
+ }
+ s.numTasks++
+ return true
+}
+
+func (s *Stopper_cockroach1462) FinishTask() {
+ s.mu.Lock()
+ runtime.Gosched()
+ defer s.mu.Unlock()
+ s.numTasks--
+ s.drain.Broadcast()
+}
+func (s *Stopper_cockroach1462) SetStopped() {
+ if s != nil {
+ s.stop.Done()
+ }
+}
+func (s *Stopper_cockroach1462) ShouldStop() <-chan struct{} {
+ if s == nil {
+ return nil
+ }
+ return s.stopper
+}
+
+func (s *Stopper_cockroach1462) Quiesce() {
+ s.mu.Lock()
+ runtime.Gosched()
+ defer s.mu.Unlock()
+ s.draining = true
+ for s.numTasks > 0 {
+ // Unlock s.mu, wait for the signal, and lock s.mu.
+ s.drain.Wait()
+ }
+}
+
+func (s *Stopper_cockroach1462) Stop() {
+ s.Quiesce()
+ close(s.stopper)
+ s.stop.Wait()
+ s.mu.Lock()
+ runtime.Gosched()
+ defer s.mu.Unlock()
+ close(s.stopped)
+}
+
+type interceptMessage_cockroach1462 int
+
+type localInterceptableTransport_cockroach1462 struct {
+ mu sync.Mutex
+ Events chan interceptMessage_cockroach1462
+ stopper *Stopper_cockroach1462
+}
+
+func (lt *localInterceptableTransport_cockroach1462) Close() {}
+
+type Transport_cockroach1462 interface {
+ Close()
+}
+
+func NewLocalInterceptableTransport_cockroach1462(stopper *Stopper_cockroach1462) Transport_cockroach1462 {
+ lt := &localInterceptableTransport_cockroach1462{
+ Events: make(chan interceptMessage_cockroach1462),
+ stopper: stopper,
+ }
+ lt.start()
+ return lt
+}
+
+func (lt *localInterceptableTransport_cockroach1462) start() {
+ lt.stopper.RunWorker(func() {
+ for {
+ select {
+ case <-lt.stopper.ShouldStop():
+ return
+ default:
+ lt.Events <- interceptMessage_cockroach1462(0)
+ }
+ }
+ })
+}
+
+func processEventsUntil_cockroach1462(ch <-chan interceptMessage_cockroach1462, stopper *Stopper_cockroach1462) {
+ for {
+ select {
+ case _, ok := <-ch:
+ runtime.Gosched()
+ if !ok {
+ return
+ }
+ case <-stopper.ShouldStop():
+ return
+ }
+ }
+}
+
+func Cockroach1462() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(2000 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i <= 1000; i++ {
+ go func() { // G1
+ stopper := NewStopper_cockroach1462()
+ transport := NewLocalInterceptableTransport_cockroach1462(stopper).(*localInterceptableTransport_cockroach1462)
+ stopper.RunWorker(func() {
+ processEventsUntil_cockroach1462(transport.Events, stopper)
+ })
+ stopper.Stop()
+ }()
+ }
+}
+
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go
new file mode 100644
index 0000000000..4cd14c7a5b
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go
@@ -0,0 +1,108 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/16167
+ * Buggy version: 36fa784aa846b46c29e077634c4e362635f6e74a
+ * fix commit-id: d064942b067ab84628f79cbfda001fa3138d8d6e
+ * Flaky: 1/100
+ */
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Cockroach16167", Cockroach16167)
+}
+
+type PreparedStatements_cockroach16167 struct {
+ session *Session_cockroach16167
+}
+
+func (ps PreparedStatements_cockroach16167) New(e *Executor_cockroach16167) {
+ e.Prepare(ps.session)
+}
+
+type Session_cockroach16167 struct {
+ PreparedStatements PreparedStatements_cockroach16167
+}
+
+func (s *Session_cockroach16167) resetForBatch(e *Executor_cockroach16167) {
+ e.getDatabaseCache()
+}
+
+type Executor_cockroach16167 struct {
+ systemConfigCond *sync.Cond
+ systemConfigMu sync.RWMutex // L1
+}
+
+func (e *Executor_cockroach16167) Start() {
+ e.updateSystemConfig()
+}
+
+func (e *Executor_cockroach16167) execParsed(session *Session_cockroach16167) {
+ e.systemConfigCond.L.Lock() // Same as e.systemConfigMu.RLock()
+ runtime.Gosched()
+ defer e.systemConfigCond.L.Unlock()
+ runTxnAttempt_cockroach16167(e, session)
+}
+
+func (e *Executor_cockroach16167) execStmtsInCurrentTxn(session *Session_cockroach16167) {
+ e.execStmtInOpenTxn(session)
+}
+
+func (e *Executor_cockroach16167) execStmtInOpenTxn(session *Session_cockroach16167) {
+ session.PreparedStatements.New(e)
+}
+
+func (e *Executor_cockroach16167) Prepare(session *Session_cockroach16167) {
+ session.resetForBatch(e)
+}
+
+func (e *Executor_cockroach16167) getDatabaseCache() {
+ e.systemConfigMu.RLock()
+ defer e.systemConfigMu.RUnlock()
+}
+
+func (e *Executor_cockroach16167) updateSystemConfig() {
+ e.systemConfigMu.Lock()
+ runtime.Gosched()
+ defer e.systemConfigMu.Unlock()
+}
+
+func runTxnAttempt_cockroach16167(e *Executor_cockroach16167, session *Session_cockroach16167) {
+ e.execStmtsInCurrentTxn(session)
+}
+
+func NewExectorAndSession_cockroach16167() (*Executor_cockroach16167, *Session_cockroach16167) {
+ session := &Session_cockroach16167{}
+ session.PreparedStatements = PreparedStatements_cockroach16167{session}
+ e := &Executor_cockroach16167{}
+ return e, session
+}
+
+func Cockroach16167() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() { // G1
+ e, s := NewExectorAndSession_cockroach16167()
+ e.systemConfigCond = sync.NewCond(e.systemConfigMu.RLocker())
+ go e.Start() // G2
+ e.execParsed(s)
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go
new file mode 100644
index 0000000000..17b0320330
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go
@@ -0,0 +1,60 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/18101
+ * Buggy version: f7a8e2f57b6bcf00b9abaf3da00598e4acd3a57f
+ * fix commit-id: 822bd176cc725c6b50905ea615023200b395e14f
+ * Flaky: 100/100
+ */
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Cockroach18101", Cockroach18101)
+}
+
+const chanSize_cockroach18101 = 6
+
+func restore_cockroach18101(ctx context.Context) bool {
+ readyForImportCh := make(chan bool, chanSize_cockroach18101)
+ go func() { // G2
+ defer close(readyForImportCh)
+ splitAndScatter_cockroach18101(ctx, readyForImportCh)
+ }()
+ for readyForImportSpan := range readyForImportCh {
+ select {
+ case <-ctx.Done():
+ return readyForImportSpan
+ }
+ }
+ return true
+}
+
+func splitAndScatter_cockroach18101(ctx context.Context, readyForImportCh chan bool) {
+ for i := 0; i < chanSize_cockroach18101+2; i++ {
+ readyForImportCh <- (false || i != 0)
+ }
+}
+
+func Cockroach18101() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ ctx, cancel := context.WithCancel(context.Background())
+ go restore_cockroach18101(ctx) // G1
+ go cancel() // helper goroutine
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go
new file mode 100644
index 0000000000..a7544bc8a4
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go
@@ -0,0 +1,125 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Cockroach2448", Cockroach2448)
+}
+
+type Stopper_cockroach2448 struct {
+ Done chan bool
+}
+
+func (s *Stopper_cockroach2448) ShouldStop() <-chan bool {
+ return s.Done
+}
+
+type EventMembershipChangeCommitted_cockroach2448 struct {
+ Callback func()
+}
+
+type MultiRaft_cockroach2448 struct {
+ stopper *Stopper_cockroach2448
+ Events chan interface{}
+ callbackChan chan func()
+}
+
+// sendEvent can be invoked many times
+func (m *MultiRaft_cockroach2448) sendEvent(event interface{}) {
+ select {
+ case m.Events <- event: // Waiting for events consumption
+ case <-m.stopper.ShouldStop():
+ }
+}
+
+type state_cockroach2448 struct {
+ *MultiRaft_cockroach2448
+}
+
+func (s *state_cockroach2448) start() {
+ for {
+ select {
+ case <-s.stopper.ShouldStop():
+ return
+ case cb := <-s.callbackChan:
+ cb()
+ default:
+ s.handleWriteResponse()
+ time.Sleep(100 * time.Microsecond)
+ }
+ }
+}
+
+func (s *state_cockroach2448) handleWriteResponse() {
+ s.sendEvent(&EventMembershipChangeCommitted_cockroach2448{
+ Callback: func() {
+ select {
+ case s.callbackChan <- func() { // Waiting for callbackChan consumption
+ time.Sleep(time.Nanosecond)
+ }:
+ case <-s.stopper.ShouldStop():
+ }
+ },
+ })
+}
+
+type Store_cockroach2448 struct {
+ multiraft *MultiRaft_cockroach2448
+}
+
+func (s *Store_cockroach2448) processRaft() {
+ for {
+ select {
+ case e := <-s.multiraft.Events:
+ switch e := e.(type) {
+ case *EventMembershipChangeCommitted_cockroach2448:
+ callback := e.Callback
+ runtime.Gosched()
+ if callback != nil {
+ callback() // Waiting for callbackChan consumption
+ }
+ }
+ case <-s.multiraft.stopper.ShouldStop():
+ return
+ }
+ }
+}
+
+func NewStoreAndState_cockroach2448() (*Store_cockroach2448, *state_cockroach2448) {
+ stopper := &Stopper_cockroach2448{
+ Done: make(chan bool),
+ }
+ mltrft := &MultiRaft_cockroach2448{
+ stopper: stopper,
+ Events: make(chan interface{}),
+ callbackChan: make(chan func()),
+ }
+ st := &state_cockroach2448{mltrft}
+ s := &Store_cockroach2448{mltrft}
+ return s, st
+}
+
+func Cockroach2448() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 1000; i++ {
+ go func() {
+ s, st := NewStoreAndState_cockroach2448()
+ go s.processRaft() // G1
+ go st.start() // G2
+ }()
+ }
+}
+
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go
new file mode 100644
index 0000000000..a916d3c928
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go
@@ -0,0 +1,78 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Cockroach24808", Cockroach24808)
+}
+
+type Compactor_cockroach24808 struct {
+ ch chan struct{}
+}
+
+type Stopper_cockroach24808 struct {
+ stop sync.WaitGroup
+ stopper chan struct{}
+}
+
+func (s *Stopper_cockroach24808) RunWorker(ctx context.Context, f func(context.Context)) {
+ s.stop.Add(1)
+ go func() {
+ defer s.stop.Done()
+ f(ctx)
+ }()
+}
+
+func (s *Stopper_cockroach24808) ShouldStop() <-chan struct{} {
+ if s == nil {
+ return nil
+ }
+ return s.stopper
+}
+
+func (s *Stopper_cockroach24808) Stop() {
+ close(s.stopper)
+}
+
+func (c *Compactor_cockroach24808) Start(ctx context.Context, stopper *Stopper_cockroach24808) {
+ c.ch <- struct{}{}
+ stopper.RunWorker(ctx, func(ctx context.Context) {
+ for {
+ select {
+ case <-stopper.ShouldStop():
+ return
+ case <-c.ch:
+ }
+ }
+ })
+}
+
+func Cockroach24808() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() { // G1
+ stopper := &Stopper_cockroach24808{stopper: make(chan struct{})}
+ defer stopper.Stop()
+
+ compactor := &Compactor_cockroach24808{ch: make(chan struct{}, 1)}
+ compactor.ch <- struct{}{}
+
+ compactor.Start(context.Background(), stopper)
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go
new file mode 100644
index 0000000000..b9259c9f91
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go
@@ -0,0 +1,92 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+)
+
+func init() {
+ register("Cockroach25456", Cockroach25456)
+}
+
+type Stopper_cockroach25456 struct {
+ quiescer chan struct{}
+}
+
+func (s *Stopper_cockroach25456) ShouldQuiesce() <-chan struct{} {
+ if s == nil {
+ return nil
+ }
+ return s.quiescer
+}
+
+func NewStopper_cockroach25456() *Stopper_cockroach25456 {
+ return &Stopper_cockroach25456{quiescer: make(chan struct{})}
+}
+
+type Store_cockroach25456 struct {
+ stopper *Stopper_cockroach25456
+ consistencyQueue *consistencyQueue_cockroach25456
+}
+
+func (s *Store_cockroach25456) Stopper() *Stopper_cockroach25456 {
+ return s.stopper
+}
+
+type Replica_cockroach25456 struct {
+ store *Store_cockroach25456
+}
+
+func NewReplica_cockroach25456(store *Store_cockroach25456) *Replica_cockroach25456 {
+ return &Replica_cockroach25456{store: store}
+}
+
+type consistencyQueue_cockroach25456 struct{}
+
+func (q *consistencyQueue_cockroach25456) process(repl *Replica_cockroach25456) {
+ <-repl.store.Stopper().ShouldQuiesce()
+}
+
+func newConsistencyQueue_cockroach25456() *consistencyQueue_cockroach25456 {
+ return &consistencyQueue_cockroach25456{}
+}
+
+type testContext_cockroach25456 struct {
+ store *Store_cockroach25456
+ repl *Replica_cockroach25456
+}
+
+func (tc *testContext_cockroach25456) StartWithStoreConfig(stopper *Stopper_cockroach25456) {
+ if tc.store == nil {
+ tc.store = &Store_cockroach25456{
+ consistencyQueue: newConsistencyQueue_cockroach25456(),
+ }
+ }
+ tc.store.stopper = stopper
+ tc.repl = NewReplica_cockroach25456(tc.store)
+}
+
+func Cockroach25456() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() { // G1
+ stopper := NewStopper_cockroach25456()
+ tc := testContext_cockroach25456{}
+ tc.StartWithStoreConfig(stopper)
+
+ for i := 0; i < 2; i++ {
+ tc.store.consistencyQueue.process(tc.repl)
+ }
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go
new file mode 100644
index 0000000000..f00a7bd462
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go
@@ -0,0 +1,124 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "sync/atomic"
+)
+
+func init() {
+ register("Cockroach35073", Cockroach35073)
+}
+
+type ConsumerStatus_cockroach35073 uint32
+
+const (
+ NeedMoreRows_cockroach35073 ConsumerStatus_cockroach35073 = iota
+ DrainRequested_cockroach35073
+ ConsumerClosed_cockroach35073
+)
+
+const rowChannelBufSize_cockroach35073 = 16
+const outboxBufRows_cockroach35073 = 16
+
+type rowSourceBase_cockroach35073 struct {
+ consumerStatus ConsumerStatus_cockroach35073
+}
+
+func (rb *rowSourceBase_cockroach35073) consumerClosed() {
+ atomic.StoreUint32((*uint32)(&rb.consumerStatus), uint32(ConsumerClosed_cockroach35073))
+}
+
+type RowChannelMsg_cockroach35073 int
+
+type RowChannel_cockroach35073 struct {
+ rowSourceBase_cockroach35073
+ dataChan chan RowChannelMsg_cockroach35073
+}
+
+func (rc *RowChannel_cockroach35073) ConsumerClosed() {
+ rc.consumerClosed()
+ select {
+ case <-rc.dataChan:
+ default:
+ }
+}
+
+func (rc *RowChannel_cockroach35073) Push() ConsumerStatus_cockroach35073 {
+ consumerStatus := ConsumerStatus_cockroach35073(
+ atomic.LoadUint32((*uint32)(&rc.consumerStatus)))
+ switch consumerStatus {
+ case NeedMoreRows_cockroach35073:
+ rc.dataChan <- RowChannelMsg_cockroach35073(0)
+ case DrainRequested_cockroach35073:
+ case ConsumerClosed_cockroach35073:
+ }
+ return consumerStatus
+}
+
+func (rc *RowChannel_cockroach35073) InitWithNumSenders() {
+ rc.initWithBufSizeAndNumSenders(rowChannelBufSize_cockroach35073)
+}
+
+func (rc *RowChannel_cockroach35073) initWithBufSizeAndNumSenders(chanBufSize int) {
+ rc.dataChan = make(chan RowChannelMsg_cockroach35073, chanBufSize)
+}
+
+type outbox_cockroach35073 struct {
+ RowChannel_cockroach35073
+}
+
+func (m *outbox_cockroach35073) init() {
+ m.RowChannel_cockroach35073.InitWithNumSenders()
+}
+
+func (m *outbox_cockroach35073) start(wg *sync.WaitGroup) {
+ if wg != nil {
+ wg.Add(1)
+ }
+ go m.run(wg)
+}
+
+func (m *outbox_cockroach35073) run(wg *sync.WaitGroup) {
+ if wg != nil {
+ wg.Done()
+ }
+}
+
+func Cockroach35073() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ outbox := &outbox_cockroach35073{}
+ outbox.init()
+
+ var wg sync.WaitGroup
+ for i := 0; i < outboxBufRows_cockroach35073; i++ {
+ outbox.Push()
+ }
+
+ var blockedPusherWg sync.WaitGroup
+ blockedPusherWg.Add(1)
+ go func() {
+ outbox.Push()
+ blockedPusherWg.Done()
+ }()
+
+ outbox.start(&wg)
+
+ wg.Wait()
+ outbox.RowChannel_cockroach35073.Push()
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go
new file mode 100644
index 0000000000..9ddcda1b62
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go
@@ -0,0 +1,135 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Cockroach35931", Cockroach35931)
+}
+
+type RowReceiver_cockroach35931 interface {
+ Push()
+}
+
+type inboundStreamInfo_cockroach35931 struct {
+ receiver RowReceiver_cockroach35931
+}
+
+type RowChannel_cockroach35931 struct {
+ dataChan chan struct{}
+}
+
+func (rc *RowChannel_cockroach35931) Push() {
+ // The buffer size can be either 0 or 1 when this function is entered.
+ // We need context sensitivity or a path-condition on the buffer size
+ // to find this bug.
+ rc.dataChan <- struct{}{}
+}
+
+func (rc *RowChannel_cockroach35931) initWithBufSizeAndNumSenders(chanBufSize int) {
+ rc.dataChan = make(chan struct{}, chanBufSize)
+}
+
+type flowEntry_cockroach35931 struct {
+ flow *Flow_cockroach35931
+ inboundStreams map[int]*inboundStreamInfo_cockroach35931
+}
+
+type flowRegistry_cockroach35931 struct {
+ sync.Mutex
+ flows map[int]*flowEntry_cockroach35931
+}
+
+func (fr *flowRegistry_cockroach35931) getEntryLocked(id int) *flowEntry_cockroach35931 {
+ entry, ok := fr.flows[id]
+ if !ok {
+ entry = &flowEntry_cockroach35931{}
+ fr.flows[id] = entry
+ }
+ return entry
+}
+
+func (fr *flowRegistry_cockroach35931) cancelPendingStreamsLocked(id int) []RowReceiver_cockroach35931 {
+ entry := fr.flows[id]
+ pendingReceivers := make([]RowReceiver_cockroach35931, 0)
+ for _, is := range entry.inboundStreams {
+ pendingReceivers = append(pendingReceivers, is.receiver)
+ }
+ return pendingReceivers
+}
+
+type Flow_cockroach35931 struct {
+ id int
+ flowRegistry *flowRegistry_cockroach35931
+ inboundStreams map[int]*inboundStreamInfo_cockroach35931
+}
+
+func (f *Flow_cockroach35931) cancel() {
+ f.flowRegistry.Lock()
+ timedOutReceivers := f.flowRegistry.cancelPendingStreamsLocked(f.id)
+ f.flowRegistry.Unlock()
+
+ for _, receiver := range timedOutReceivers {
+ receiver.Push()
+ }
+}
+
+func (fr *flowRegistry_cockroach35931) RegisterFlow(f *Flow_cockroach35931, inboundStreams map[int]*inboundStreamInfo_cockroach35931) {
+ entry := fr.getEntryLocked(f.id)
+ entry.flow = f
+ entry.inboundStreams = inboundStreams
+}
+
+func makeFlowRegistry_cockroach35931() *flowRegistry_cockroach35931 {
+ return &flowRegistry_cockroach35931{
+ flows: make(map[int]*flowEntry_cockroach35931),
+ }
+}
+
+func Cockroach35931() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ fr := makeFlowRegistry_cockroach35931()
+
+ left := &RowChannel_cockroach35931{}
+ left.initWithBufSizeAndNumSenders(1)
+ right := &RowChannel_cockroach35931{}
+ right.initWithBufSizeAndNumSenders(1)
+
+ inboundStreams := map[int]*inboundStreamInfo_cockroach35931{
+ 0: {
+ receiver: left,
+ },
+ 1: {
+ receiver: right,
+ },
+ }
+
+ left.Push()
+
+ flow := &Flow_cockroach35931{
+ id: 0,
+ flowRegistry: fr,
+ inboundStreams: inboundStreams,
+ }
+
+ fr.RegisterFlow(flow, inboundStreams)
+
+ flow.cancel()
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go
new file mode 100644
index 0000000000..e419cd2fc3
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go
@@ -0,0 +1,122 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/3710
+ * Buggy version: 4afdd4860fd7c3bd9e92489f84a95e5cc7d11a0d
+ * fix commit-id: cb65190f9caaf464723e7d072b1f1b69a044ef7b
+ * Flaky: 2/100
+ */
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+ "unsafe"
+)
+
+func init() {
+ register("Cockroach3710", Cockroach3710)
+}
+
+type Store_cockroach3710 struct {
+ raftLogQueue *baseQueue
+ replicas map[int]*Replica_cockroach3710
+
+ mu struct {
+ sync.RWMutex
+ }
+}
+
+func (s *Store_cockroach3710) ForceRaftLogScanAndProcess() {
+ s.mu.RLock()
+ runtime.Gosched()
+ for _, r := range s.replicas {
+ s.raftLogQueue.MaybeAdd(r)
+ }
+ s.mu.RUnlock()
+}
+
+func (s *Store_cockroach3710) RaftStatus() {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+}
+
+func (s *Store_cockroach3710) processRaft() {
+ go func() {
+ for {
+ var replicas []*Replica_cockroach3710
+ s.mu.Lock()
+ for _, r := range s.replicas {
+ replicas = append(replicas, r)
+ }
+ s.mu.Unlock()
+ break
+ }
+ }()
+}
+
+type Replica_cockroach3710 struct {
+ store *Store_cockroach3710
+}
+
+type baseQueue struct {
+ sync.Mutex
+ impl *raftLogQueue
+}
+
+func (bq *baseQueue) MaybeAdd(repl *Replica_cockroach3710) {
+ bq.Lock()
+ defer bq.Unlock()
+ bq.impl.shouldQueue(repl)
+}
+
+type raftLogQueue struct{}
+
+func (*raftLogQueue) shouldQueue(r *Replica_cockroach3710) {
+ getTruncatableIndexes(r)
+}
+
+func getTruncatableIndexes(r *Replica_cockroach3710) {
+ r.store.RaftStatus()
+}
+
+func NewStore_cockroach3710() *Store_cockroach3710 {
+ rlq := &raftLogQueue{}
+ bq := &baseQueue{impl: rlq}
+ store := &Store_cockroach3710{
+ raftLogQueue: bq,
+ replicas: make(map[int]*Replica_cockroach3710),
+ }
+ r1 := &Replica_cockroach3710{store}
+ r2 := &Replica_cockroach3710{store}
+
+ makeKey := func(r *Replica_cockroach3710) int {
+ return int((uintptr(unsafe.Pointer(r)) >> 1) % 7)
+ }
+ store.replicas[makeKey(r1)] = r1
+ store.replicas[makeKey(r2)] = r2
+
+ return store
+}
+
+func Cockroach3710() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 10000; i++ {
+ go func() {
+ store := NewStore_cockroach3710()
+ go store.ForceRaftLogScanAndProcess() // G1
+ go store.processRaft() // G2
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go
new file mode 100644
index 0000000000..33f7ba7a45
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go
@@ -0,0 +1,62 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Cockroach584", Cockroach584)
+}
+
+type gossip_cockroach584 struct {
+ mu sync.Mutex // L1
+ closed bool
+}
+
+func (g *gossip_cockroach584) bootstrap() {
+ for {
+ g.mu.Lock()
+ if g.closed {
+ // Missing g.mu.Unlock
+ break
+ }
+ g.mu.Unlock()
+ }
+}
+
+func (g *gossip_cockroach584) manage() {
+ for {
+ g.mu.Lock()
+ if g.closed {
+ // Missing g.mu.Unlock
+ break
+ }
+ g.mu.Unlock()
+ }
+}
+
+func Cockroach584() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ for i := 0; i < yieldCount; i++ {
+ // Yield several times to allow the child goroutine to run.
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ g := &gossip_cockroach584{
+ closed: true,
+ }
+ go func() { // G1
+ g.bootstrap()
+ g.manage()
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go
new file mode 100644
index 0000000000..80f1dd504d
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go
@@ -0,0 +1,87 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/6181
+ * Buggy version: c0a232b5521565904b851699853bdbd0c670cf1e
+ * fix commit-id: d5814e4886a776bf7789b3c51b31f5206480d184
+ * Flaky: 57/100
+ */
+package main
+
+import (
+ "io"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Cockroach6181", Cockroach6181)
+}
+
+type testDescriptorDB_cockroach6181 struct {
+ cache *rangeDescriptorCache_cockroach6181
+}
+
+func initTestDescriptorDB_cockroach6181() *testDescriptorDB_cockroach6181 {
+ return &testDescriptorDB_cockroach6181{&rangeDescriptorCache_cockroach6181{}}
+}
+
+type rangeDescriptorCache_cockroach6181 struct {
+ rangeCacheMu sync.RWMutex
+}
+
+func (rdc *rangeDescriptorCache_cockroach6181) LookupRangeDescriptor() {
+ rdc.rangeCacheMu.RLock()
+ runtime.Gosched()
+ io.Discard.Write([]byte(rdc.String()))
+ rdc.rangeCacheMu.RUnlock()
+ rdc.rangeCacheMu.Lock()
+ rdc.rangeCacheMu.Unlock()
+}
+
+func (rdc *rangeDescriptorCache_cockroach6181) String() string {
+ rdc.rangeCacheMu.RLock()
+ defer rdc.rangeCacheMu.RUnlock()
+ return rdc.stringLocked()
+}
+
+func (rdc *rangeDescriptorCache_cockroach6181) stringLocked() string {
+ return "something here"
+}
+
+func doLookupWithToken_cockroach6181(rc *rangeDescriptorCache_cockroach6181) {
+ rc.LookupRangeDescriptor()
+}
+
+func testRangeCacheCoalescedRequests_cockroach6181() {
+ db := initTestDescriptorDB_cockroach6181()
+ pauseLookupResumeAndAssert := func() {
+ var wg sync.WaitGroup
+ for i := 0; i < 3; i++ {
+ wg.Add(1)
+ go func() { // G2,G3,...
+ doLookupWithToken_cockroach6181(db.cache)
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ }
+ pauseLookupResumeAndAssert()
+}
+
+func Cockroach6181() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ go testRangeCacheCoalescedRequests_cockroach6181() // G1
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go
new file mode 100644
index 0000000000..945308a76f
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go
@@ -0,0 +1,183 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/7504
+ * Buggy version: bc963b438cdc3e0ad058a5282358e5aee0595e17
+ * fix commit-id: cab761b9f5ee5dee1448bc5d6b1d9f5a0ff0bad5
+ * Flaky: 1/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Cockroach7504", Cockroach7504)
+}
+
+func MakeCacheKey_cockroach7504(lease *LeaseState_cockroach7504) int {
+ return lease.id
+}
+
+type LeaseState_cockroach7504 struct {
+ mu sync.Mutex // L1
+ id int
+}
+type LeaseSet_cockroach7504 struct {
+ data []*LeaseState_cockroach7504
+}
+
+func (l *LeaseSet_cockroach7504) find(id int) *LeaseState_cockroach7504 {
+ return l.data[id]
+}
+
+func (l *LeaseSet_cockroach7504) remove(s *LeaseState_cockroach7504) {
+ for i := 0; i < len(l.data); i++ {
+ if s == l.data[i] {
+ l.data = append(l.data[:i], l.data[i+1:]...)
+ break
+ }
+ }
+}
+
+type tableState_cockroach7504 struct {
+ tableNameCache *tableNameCache_cockroach7504
+ mu sync.Mutex // L3
+ active *LeaseSet_cockroach7504
+}
+
+func (t *tableState_cockroach7504) release(lease *LeaseState_cockroach7504) {
+ t.mu.Lock() // L3
+ defer t.mu.Unlock() // L3
+
+ s := t.active.find(MakeCacheKey_cockroach7504(lease))
+ s.mu.Lock() // L1
+ runtime.Gosched()
+ defer s.mu.Unlock() // L1
+
+ t.removeLease(s)
+}
+func (t *tableState_cockroach7504) removeLease(lease *LeaseState_cockroach7504) {
+ t.active.remove(lease)
+ t.tableNameCache.remove(lease) // L1 acquire/release
+}
+
+type tableNameCache_cockroach7504 struct {
+ mu sync.Mutex // L2
+ tables map[int]*LeaseState_cockroach7504
+}
+
+func (c *tableNameCache_cockroach7504) get(id int) {
+ c.mu.Lock() // L2
+ defer c.mu.Unlock() // L2
+ lease, ok := c.tables[id]
+ if !ok {
+ return
+ }
+ if lease == nil {
+ panic("nil lease in name cache")
+ }
+ lease.mu.Lock() // L1
+ defer lease.mu.Unlock() // L1
+}
+
+func (c *tableNameCache_cockroach7504) remove(lease *LeaseState_cockroach7504) {
+ c.mu.Lock() // L2
+ runtime.Gosched()
+ defer c.mu.Unlock() // L2
+ key := MakeCacheKey_cockroach7504(lease)
+ existing, ok := c.tables[key]
+ if !ok {
+ return
+ }
+ if existing == lease {
+ delete(c.tables, key)
+ }
+}
+
+type LeaseManager_cockroach7504 struct {
+ _ [64]byte
+ tableNames *tableNameCache_cockroach7504
+ tables map[int]*tableState_cockroach7504
+}
+
+func (m *LeaseManager_cockroach7504) AcquireByName(id int) {
+ m.tableNames.get(id)
+}
+
+func (m *LeaseManager_cockroach7504) findTableState(lease *LeaseState_cockroach7504) *tableState_cockroach7504 {
+ existing, ok := m.tables[lease.id]
+ if !ok {
+ return nil
+ }
+ return existing
+}
+
+func (m *LeaseManager_cockroach7504) Release(lease *LeaseState_cockroach7504) {
+ t := m.findTableState(lease)
+ t.release(lease)
+}
+func NewLeaseManager_cockroach7504(tname *tableNameCache_cockroach7504, ts *tableState_cockroach7504) *LeaseManager_cockroach7504 {
+ mgr := &LeaseManager_cockroach7504{
+ tableNames: tname,
+ tables: make(map[int]*tableState_cockroach7504),
+ }
+ mgr.tables[0] = ts
+ return mgr
+}
+func NewLeaseSet_cockroach7504(n int) *LeaseSet_cockroach7504 {
+ lset := &LeaseSet_cockroach7504{}
+ for i := 0; i < n; i++ {
+ lease := new(LeaseState_cockroach7504)
+ lset.data = append(lset.data, lease)
+ }
+ return lset
+}
+
+func Cockroach7504() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ go func() {
+ leaseNum := 2
+ lset := NewLeaseSet_cockroach7504(leaseNum)
+
+ nc := &tableNameCache_cockroach7504{
+ tables: make(map[int]*LeaseState_cockroach7504),
+ }
+ for i := 0; i < leaseNum; i++ {
+ nc.tables[i] = lset.find(i)
+ }
+
+ ts := &tableState_cockroach7504{
+ tableNameCache: nc,
+ active: lset,
+ }
+
+ mgr := NewLeaseManager_cockroach7504(nc, ts)
+
+ // G1
+ go func() {
+ // lock L2-L1
+ mgr.AcquireByName(0)
+ }()
+
+ // G2
+ go func() {
+ // lock L1-L2
+ mgr.Release(lset.find(0))
+ }()
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go
new file mode 100644
index 0000000000..e143a6670d
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go
@@ -0,0 +1,77 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/9935
+ * Buggy version: 4df302cc3f03328395dc3fefbfba58b7718e4f2f
+ * fix commit-id: ed6a100ba38dd51b0888b9a3d3ac6bdbb26c528c
+ * Flaky: 100/100
+ * Description: This leak is caused by acquiring l.mu.Lock() twice. The fix is
+ * to release l.mu.Lock() before acquiring l.mu.Lock for the second time.
+ */
+package main
+
+import (
+ "errors"
+ "math/rand"
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Cockroach9935", Cockroach9935)
+}
+
+type loggingT_cockroach9935 struct {
+ mu sync.Mutex
+}
+
+func (l *loggingT_cockroach9935) outputLogEntry() {
+ l.mu.Lock()
+ if err := l.createFile(); err != nil {
+ l.exit(err)
+ }
+ l.mu.Unlock()
+}
+
+func (l *loggingT_cockroach9935) createFile() error {
+ if rand.Intn(8)%4 > 0 {
+ return errors.New("")
+ }
+ return nil
+}
+
+func (l *loggingT_cockroach9935) exit(err error) {
+ l.mu.Lock() // Blocked forever
+ defer l.mu.Unlock()
+}
+
+// Example of goroutine leak trace:
+//
+// G1
+//----------------------------
+// l.outputLogEntry()
+// l.mu.Lock()
+// l.createFile()
+// l.exit()
+// l.mu.Lock()
+//-----------G1 leaks---------
+
+func Cockroach9935() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ l := &loggingT_cockroach9935{}
+ go l.outputLogEntry() // G1
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go
new file mode 100644
index 0000000000..7d56642d5e
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go
@@ -0,0 +1,72 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Etcd10492", Etcd10492)
+}
+
+type Checkpointer_etcd10492 func(ctx context.Context)
+
+type lessor_etcd10492 struct {
+ mu sync.RWMutex
+ cp Checkpointer_etcd10492
+ checkpointInterval time.Duration
+}
+
+func (le *lessor_etcd10492) Checkpoint() {
+ le.mu.Lock() // Lock acquired twice here
+ defer le.mu.Unlock()
+}
+
+func (le *lessor_etcd10492) SetCheckpointer(cp Checkpointer_etcd10492) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ le.cp = cp
+}
+
+func (le *lessor_etcd10492) Renew() {
+ le.mu.Lock()
+ unlock := func() { le.mu.Unlock() }
+ defer func() { unlock() }()
+
+ if le.cp != nil {
+ le.cp(context.Background())
+ }
+}
+
+func Etcd10492() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() { // G1
+ le := &lessor_etcd10492{
+ checkpointInterval: 0,
+ }
+ fakerCheckerpointer_etcd10492 := func(ctx context.Context) {
+ le.Checkpoint()
+ }
+ le.SetCheckpointer(fakerCheckerpointer_etcd10492)
+ le.mu.Lock()
+ le.mu.Unlock()
+ le.Renew()
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go
new file mode 100644
index 0000000000..868e926e66
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go
@@ -0,0 +1,126 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "io"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Etcd5509", Etcd5509)
+}
+
+var ErrConnClosed_etcd5509 error
+
+type Client_etcd5509 struct {
+ mu sync.RWMutex
+ ctx context.Context
+ cancel context.CancelFunc
+}
+
+func (c *Client_etcd5509) Close() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.cancel == nil {
+ return
+ }
+ c.cancel()
+ c.cancel = nil
+ c.mu.Unlock()
+ c.mu.Lock()
+}
+
+type remoteClient_etcd5509 struct {
+ client *Client_etcd5509
+ mu sync.Mutex
+}
+
+func (r *remoteClient_etcd5509) acquire(ctx context.Context) error {
+ for {
+ r.client.mu.RLock()
+ closed := r.client.cancel == nil
+ r.mu.Lock()
+ r.mu.Unlock()
+ if closed {
+ return ErrConnClosed_etcd5509 // Missing RUnlock before return
+ }
+ r.client.mu.RUnlock()
+ }
+}
+
+type kv_etcd5509 struct {
+ rc *remoteClient_etcd5509
+}
+
+func (kv *kv_etcd5509) Get(ctx context.Context) error {
+ return kv.Do(ctx)
+}
+
+func (kv *kv_etcd5509) Do(ctx context.Context) error {
+ for {
+ err := kv.do(ctx)
+ if err == nil {
+ return nil
+ }
+ return err
+ }
+}
+
+func (kv *kv_etcd5509) do(ctx context.Context) error {
+ err := kv.getRemote(ctx)
+ return err
+}
+
+func (kv *kv_etcd5509) getRemote(ctx context.Context) error {
+ return kv.rc.acquire(ctx)
+}
+
+type KV interface {
+ Get(ctx context.Context) error
+ Do(ctx context.Context) error
+}
+
+func NewKV_etcd5509(c *Client_etcd5509) KV {
+ return &kv_etcd5509{rc: &remoteClient_etcd5509{
+ client: c,
+ }}
+}
+
+func Etcd5509() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() {
+ ctx, _ := context.WithCancel(context.TODO())
+ cli := &Client_etcd5509{
+ ctx: ctx,
+ }
+ kv := NewKV_etcd5509(cli)
+ donec := make(chan struct{})
+ go func() {
+ defer close(donec)
+ err := kv.Get(context.TODO())
+ if err != nil && err != ErrConnClosed_etcd5509 {
+ io.Discard.Write([]byte("Expect ErrConnClosed"))
+ }
+ }()
+
+ runtime.Gosched()
+ cli.Close()
+
+ <-donec
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go
new file mode 100644
index 0000000000..afbbe35104
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go
@@ -0,0 +1,100 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Etcd6708", Etcd6708)
+}
+
+type EndpointSelectionMode_etcd6708 int
+
+const (
+ EndpointSelectionRandom_etcd6708 EndpointSelectionMode_etcd6708 = iota
+ EndpointSelectionPrioritizeLeader_etcd6708
+)
+
+type MembersAPI_etcd6708 interface {
+ Leader(ctx context.Context)
+}
+
+type Client_etcd6708 interface {
+ Sync(ctx context.Context)
+ SetEndpoints()
+ httpClient_etcd6708
+}
+
+type httpClient_etcd6708 interface {
+ Do(context.Context)
+}
+
+type httpClusterClient_etcd6708 struct {
+ sync.RWMutex
+ selectionMode EndpointSelectionMode_etcd6708
+}
+
+func (c *httpClusterClient_etcd6708) getLeaderEndpoint() {
+ mAPI := NewMembersAPI_etcd6708(c)
+ mAPI.Leader(context.Background())
+}
+
+func (c *httpClusterClient_etcd6708) SetEndpoints() {
+ switch c.selectionMode {
+ case EndpointSelectionRandom_etcd6708:
+ case EndpointSelectionPrioritizeLeader_etcd6708:
+ c.getLeaderEndpoint()
+ }
+}
+
+func (c *httpClusterClient_etcd6708) Do(ctx context.Context) {
+ c.RLock()
+ c.RUnlock()
+}
+
+func (c *httpClusterClient_etcd6708) Sync(ctx context.Context) {
+ c.Lock()
+ defer c.Unlock()
+
+ c.SetEndpoints()
+}
+
+type httpMembersAPI_etcd6708 struct {
+ client httpClient_etcd6708
+}
+
+func (m *httpMembersAPI_etcd6708) Leader(ctx context.Context) {
+ m.client.Do(ctx)
+}
+
+func NewMembersAPI_etcd6708(c Client_etcd6708) MembersAPI_etcd6708 {
+ return &httpMembersAPI_etcd6708{
+ client: c,
+ }
+}
+
+func Etcd6708() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() {
+ hc := &httpClusterClient_etcd6708{
+ selectionMode: EndpointSelectionPrioritizeLeader_etcd6708,
+ }
+ hc.Sync(context.Background())
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go
new file mode 100644
index 0000000000..0798ab23d3
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go
@@ -0,0 +1,81 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: etcd
+ * Issue or PR : https://github.com/etcd-io/etcd/pull/6857
+ * Buggy version: 7c8f13aed7fe251e7066ed6fc1a090699c2cae0e
+ * fix commit-id: 7afc490c95789c408fbc256d8e790273d331c984
+ * Flaky: 19/100
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Etcd6857", Etcd6857)
+}
+
+type Status_etcd6857 struct{}
+
+type node_etcd6857 struct {
+ status chan chan Status_etcd6857
+ stop chan struct{}
+ done chan struct{}
+}
+
+func (n *node_etcd6857) Status() Status_etcd6857 {
+ c := make(chan Status_etcd6857)
+ n.status <- c
+ return <-c
+}
+
+func (n *node_etcd6857) run() {
+ for {
+ select {
+ case c := <-n.status:
+ c <- Status_etcd6857{}
+ case <-n.stop:
+ close(n.done)
+ return
+ }
+ }
+}
+
+func (n *node_etcd6857) Stop() {
+ select {
+ case n.stop <- struct{}{}:
+ case <-n.done:
+ return
+ }
+ <-n.done
+}
+
+func NewNode_etcd6857() *node_etcd6857 {
+ return &node_etcd6857{
+ status: make(chan chan Status_etcd6857),
+ stop: make(chan struct{}),
+ done: make(chan struct{}),
+ }
+}
+
+func Etcd6857() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i <= 100; i++ {
+ go func() {
+ n := NewNode_etcd6857()
+ go n.run() // G1
+ go n.Status() // G2
+ go n.Stop() // G3
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go
new file mode 100644
index 0000000000..1846d0f260
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go
@@ -0,0 +1,98 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: etcd
+ * Issue or PR : https://github.com/etcd-io/etcd/commit/7618fdd1d642e47cac70c03f637b0fd798a53a6e
+ * Buggy version: 377f19b0031f9c0aafe2aec28b6f9019311f52f9
+ * fix commit-id: 7618fdd1d642e47cac70c03f637b0fd798a53a6e
+ * Flaky: 9/100
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Etcd6873", Etcd6873)
+}
+
+type watchBroadcast_etcd6873 struct{}
+
+type watchBroadcasts_etcd6873 struct {
+ mu sync.Mutex
+ updatec chan *watchBroadcast_etcd6873
+ donec chan struct{}
+}
+
+func newWatchBroadcasts_etcd6873() *watchBroadcasts_etcd6873 {
+ wbs := &watchBroadcasts_etcd6873{
+ updatec: make(chan *watchBroadcast_etcd6873, 1),
+ donec: make(chan struct{}),
+ }
+ go func() { // G2
+ defer close(wbs.donec)
+ for wb := range wbs.updatec {
+ wbs.coalesce(wb)
+ }
+ }()
+ return wbs
+}
+
+func (wbs *watchBroadcasts_etcd6873) coalesce(wb *watchBroadcast_etcd6873) {
+ wbs.mu.Lock()
+ wbs.mu.Unlock()
+}
+
+func (wbs *watchBroadcasts_etcd6873) stop() {
+ wbs.mu.Lock()
+ defer wbs.mu.Unlock()
+ close(wbs.updatec)
+ <-wbs.donec
+}
+
+func (wbs *watchBroadcasts_etcd6873) update(wb *watchBroadcast_etcd6873) {
+ select {
+ case wbs.updatec <- wb:
+ default:
+ }
+}
+
+// Example of goroutine leak trace:
+//
+// G1 G2 G3
+//---------------------------------------------------------
+// newWatchBroadcasts()
+// wbs.update()
+// wbs.updatec <-
+// return
+// <-wbs.updatec
+// wbs.coalesce()
+// wbs.stop()
+// wbs.mu.Lock()
+// close(wbs.updatec)
+// <-wbs.donec
+// wbs.mu.Lock()
+//---------------------G2,G3 leak-------------------------
+//
+
+func Etcd6873() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ wbs := newWatchBroadcasts_etcd6873() // G1
+ wbs.update(&watchBroadcast_etcd6873{})
+ go wbs.stop() // G3
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go
new file mode 100644
index 0000000000..3c8d58a221
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go
@@ -0,0 +1,163 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: etcd
+ * Issue or PR : https://github.com/etcd-io/etcd/pull/7492
+ * Buggy version: 51939650057d602bb5ab090633138fffe36854dc
+ * fix commit-id: 1b1fabef8ffec606909f01c3983300fff539f214
+ * Flaky: 40/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Etcd7492", Etcd7492)
+}
+
+type TokenProvider_etcd7492 interface {
+ assign()
+ enable()
+ disable()
+}
+
+type simpleTokenTTLKeeper_etcd7492 struct {
+ tokens map[string]time.Time
+ addSimpleTokenCh chan struct{}
+ stopCh chan chan struct{}
+ deleteTokenFunc func(string)
+}
+
+type authStore_etcd7492 struct {
+ tokenProvider TokenProvider_etcd7492
+}
+
+func (as *authStore_etcd7492) Authenticate() {
+ as.tokenProvider.assign()
+}
+
+func NewSimpleTokenTTLKeeper_etcd7492(deletefunc func(string)) *simpleTokenTTLKeeper_etcd7492 {
+ stk := &simpleTokenTTLKeeper_etcd7492{
+ tokens: make(map[string]time.Time),
+ addSimpleTokenCh: make(chan struct{}, 1),
+ stopCh: make(chan chan struct{}),
+ deleteTokenFunc: deletefunc,
+ }
+ go stk.run() // G1
+ return stk
+}
+
+func (tm *simpleTokenTTLKeeper_etcd7492) run() {
+ tokenTicker := time.NewTicker(time.Nanosecond)
+ defer tokenTicker.Stop()
+ for {
+ select {
+ case <-tm.addSimpleTokenCh:
+ runtime.Gosched()
+ /// Make tm.tokens not empty is enough
+ tm.tokens["1"] = time.Now()
+ case <-tokenTicker.C:
+ runtime.Gosched()
+ for t, _ := range tm.tokens {
+ tm.deleteTokenFunc(t)
+ delete(tm.tokens, t)
+ }
+ case waitCh := <-tm.stopCh:
+ waitCh <- struct{}{}
+ return
+ }
+ }
+}
+
+func (tm *simpleTokenTTLKeeper_etcd7492) addSimpleToken() {
+ tm.addSimpleTokenCh <- struct{}{}
+ runtime.Gosched()
+}
+
+func (tm *simpleTokenTTLKeeper_etcd7492) stop() {
+ waitCh := make(chan struct{})
+ tm.stopCh <- waitCh
+ <-waitCh
+ close(tm.stopCh)
+}
+
+type tokenSimple_etcd7492 struct {
+ simpleTokenKeeper *simpleTokenTTLKeeper_etcd7492
+ simpleTokensMu sync.RWMutex
+}
+
+func (t *tokenSimple_etcd7492) assign() {
+ t.assignSimpleTokenToUser()
+}
+
+func (t *tokenSimple_etcd7492) assignSimpleTokenToUser() {
+ t.simpleTokensMu.Lock()
+ runtime.Gosched()
+ t.simpleTokenKeeper.addSimpleToken()
+ t.simpleTokensMu.Unlock()
+}
+func newDeleterFunc(t *tokenSimple_etcd7492) func(string) {
+ return func(tk string) {
+ t.simpleTokensMu.Lock()
+ defer t.simpleTokensMu.Unlock()
+ }
+}
+
+func (t *tokenSimple_etcd7492) enable() {
+ t.simpleTokenKeeper = NewSimpleTokenTTLKeeper_etcd7492(newDeleterFunc(t))
+}
+
+func (t *tokenSimple_etcd7492) disable() {
+ if t.simpleTokenKeeper != nil {
+ t.simpleTokenKeeper.stop()
+ t.simpleTokenKeeper = nil
+ }
+ t.simpleTokensMu.Lock()
+ t.simpleTokensMu.Unlock()
+}
+
+func newTokenProviderSimple_etcd7492() *tokenSimple_etcd7492 {
+ return &tokenSimple_etcd7492{}
+}
+
+func setupAuthStore_etcd7492() (store *authStore_etcd7492, teardownfunc func()) {
+ as := &authStore_etcd7492{
+ tokenProvider: newTokenProviderSimple_etcd7492(),
+ }
+ as.tokenProvider.enable()
+ tearDown := func() {
+ as.tokenProvider.disable()
+ }
+ return as, tearDown
+}
+
+func Etcd7492() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ go func() {
+ as, tearDown := setupAuthStore_etcd7492()
+ defer tearDown()
+ var wg sync.WaitGroup
+ wg.Add(3)
+ for i := 0; i < 3; i++ {
+ go func() { // G2
+ as.Authenticate()
+ defer wg.Done()
+ }()
+ }
+ wg.Wait()
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go
new file mode 100644
index 0000000000..0a96d7f047
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go
@@ -0,0 +1,87 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: etcd
+ * Issue or PR : https://github.com/coreos/etcd/pull/7902
+ * Buggy version: dfdaf082c51ba14861267f632f6af795a27eb4ef
+ * fix commit-id: 87d99fe0387ee1df1cf1811d88d37331939ef4ae
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Etcd7902", Etcd7902)
+}
+
+type roundClient_etcd7902 struct {
+ progress int
+ acquire func()
+ validate func()
+ release func()
+}
+
+func runElectionFunc_etcd7902() {
+ rcs := make([]roundClient_etcd7902, 3)
+ nextc := make(chan bool)
+ for i := range rcs {
+ var rcNextc chan bool
+ setRcNextc := func() {
+ rcNextc = nextc
+ }
+ rcs[i].acquire = func() {}
+ rcs[i].validate = func() {
+ setRcNextc()
+ }
+ rcs[i].release = func() {
+ if i == 0 { // Assume the first roundClient is the leader
+ close(nextc)
+ nextc = make(chan bool)
+ }
+ <-rcNextc // Follower is blocking here
+ }
+ }
+ doRounds_etcd7902(rcs, 100)
+}
+
+func doRounds_etcd7902(rcs []roundClient_etcd7902, rounds int) {
+ var mu sync.Mutex
+ var wg sync.WaitGroup
+ wg.Add(len(rcs))
+ for i := range rcs {
+ go func(rc *roundClient_etcd7902) { // G2,G3
+ defer wg.Done()
+ for rc.progress < rounds || rounds <= 0 {
+ rc.acquire()
+ mu.Lock()
+ rc.validate()
+ mu.Unlock()
+ time.Sleep(10 * time.Millisecond)
+ rc.progress++
+ mu.Lock()
+ rc.release()
+ mu.Unlock()
+ }
+ }(&rcs[i])
+ }
+ wg.Wait()
+}
+
+func Etcd7902() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ go runElectionFunc_etcd7902() // G1
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go
new file mode 100644
index 0000000000..ec5491e438
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go
@@ -0,0 +1,111 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: grpc-go
+ * Issue or PR : https://github.com/grpc/grpc-go/pull/1275
+ * Buggy version: (missing)
+ * fix commit-id: 0669f3f89e0330e94bb13fa1ce8cc704aab50c9c
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "io"
+ "os"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Grpc1275", Grpc1275)
+}
+
+type recvBuffer_grpc1275 struct {
+ c chan bool
+}
+
+func (b *recvBuffer_grpc1275) get() <-chan bool {
+ return b.c
+}
+
+type recvBufferReader_grpc1275 struct {
+ recv *recvBuffer_grpc1275
+}
+
+func (r *recvBufferReader_grpc1275) Read(p []byte) (int, error) {
+ select {
+ case <-r.recv.get():
+ }
+ return 0, nil
+}
+
+type Stream_grpc1275 struct {
+ trReader io.Reader
+}
+
+func (s *Stream_grpc1275) Read(p []byte) (int, error) {
+ return io.ReadFull(s.trReader, p)
+}
+
+type http2Client_grpc1275 struct{}
+
+func (t *http2Client_grpc1275) CloseStream(s *Stream_grpc1275) {
+ // It is the client.CloseSream() method called by the
+ // main goroutine that should send the message, but it
+ // is not. The patch is to send out this message.
+}
+
+func (t *http2Client_grpc1275) NewStream() *Stream_grpc1275 {
+ return &Stream_grpc1275{
+ trReader: &recvBufferReader_grpc1275{
+ recv: &recvBuffer_grpc1275{
+ c: make(chan bool),
+ },
+ },
+ }
+}
+
+func testInflightStreamClosing_grpc1275() {
+ client := &http2Client_grpc1275{}
+ stream := client.NewStream()
+ donec := make(chan bool)
+ go func() { // G2
+ defer close(donec)
+ stream.Read([]byte{1})
+ }()
+
+ client.CloseStream(stream)
+
+ timeout := time.NewTimer(300 * time.Nanosecond)
+ select {
+ case <-donec:
+ if !timeout.Stop() {
+ <-timeout.C
+ }
+ case <-timeout.C:
+ }
+}
+
+///
+/// G1 G2
+/// testInflightStreamClosing()
+/// stream.Read()
+/// io.ReadFull()
+/// <- r.recv.get()
+/// CloseStream()
+/// <- donec
+/// ------------G1 timeout, G2 leak---------------------
+///
+
+func Grpc1275() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ testInflightStreamClosing_grpc1275() // G1
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go
new file mode 100644
index 0000000000..777534a788
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go
@@ -0,0 +1,105 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: grpc-go
+ * Issue or PR : https://github.com/grpc/grpc-go/pull/1424
+ * Buggy version: 39c8c3866d926d95e11c03508bf83d00f2963f91
+ * fix commit-id: 64bd0b04a7bb1982078bae6a2ab34c226125fbc1
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Grpc1424", Grpc1424)
+}
+
+type Balancer_grpc1424 interface {
+ Notify() <-chan bool
+}
+
+type roundRobin_grpc1424 struct {
+ mu sync.Mutex
+ addrCh chan bool
+}
+
+func (rr *roundRobin_grpc1424) Notify() <-chan bool {
+ return rr.addrCh
+}
+
+type addrConn_grpc1424 struct {
+ mu sync.Mutex
+}
+
+func (ac *addrConn_grpc1424) tearDown() {
+ ac.mu.Lock()
+ defer ac.mu.Unlock()
+}
+
+type dialOption_grpc1424 struct {
+ balancer Balancer_grpc1424
+}
+
+type ClientConn_grpc1424 struct {
+ dopts dialOption_grpc1424
+ conns []*addrConn_grpc1424
+}
+
+func (cc *ClientConn_grpc1424) lbWatcher(doneChan chan bool) {
+ for addr := range cc.dopts.balancer.Notify() {
+ if addr {
+ // nop, make compiler happy
+ }
+ var (
+ del []*addrConn_grpc1424
+ )
+ for _, a := range cc.conns {
+ del = append(del, a)
+ }
+ for _, c := range del {
+ c.tearDown()
+ }
+ }
+}
+
+func NewClientConn_grpc1424() *ClientConn_grpc1424 {
+ cc := &ClientConn_grpc1424{
+ dopts: dialOption_grpc1424{
+ &roundRobin_grpc1424{addrCh: make(chan bool)},
+ },
+ }
+ return cc
+}
+
+func DialContext_grpc1424() {
+ cc := NewClientConn_grpc1424()
+ waitC := make(chan error, 1)
+ go func() { // G2
+ defer close(waitC)
+ ch := cc.dopts.balancer.Notify()
+ if ch != nil {
+ doneChan := make(chan bool)
+ go cc.lbWatcher(doneChan) // G3
+ <-doneChan
+ }
+ }()
+ /// close addrCh
+ close(cc.dopts.balancer.(*roundRobin_grpc1424).addrCh)
+}
+
+func Grpc1424() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go DialContext_grpc1424() // G1
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go
new file mode 100644
index 0000000000..bc658b408d
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go
@@ -0,0 +1,84 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: grpc
+ * Issue or PR : https://github.com/grpc/grpc-go/pull/1460
+ * Buggy version: 7db1564ba1229bc42919bb1f6d9c4186f3aa8678
+ * fix commit-id: e605a1ecf24b634f94f4eefdab10a9ada98b70dd
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Grpc1460", Grpc1460)
+}
+
+type Stream_grpc1460 struct{}
+
+type http2Client_grpc1460 struct {
+ mu sync.Mutex
+ awakenKeepalive chan struct{}
+ activeStream []*Stream_grpc1460
+}
+
+func (t *http2Client_grpc1460) keepalive() {
+ t.mu.Lock()
+ if len(t.activeStream) < 1 {
+ <-t.awakenKeepalive
+ runtime.Gosched()
+ t.mu.Unlock()
+ } else {
+ t.mu.Unlock()
+ }
+}
+
+func (t *http2Client_grpc1460) NewStream() {
+ t.mu.Lock()
+ runtime.Gosched()
+ t.activeStream = append(t.activeStream, &Stream_grpc1460{})
+ if len(t.activeStream) == 1 {
+ select {
+ case t.awakenKeepalive <- struct{}{}:
+ default:
+ }
+ }
+ t.mu.Unlock()
+}
+
+///
+/// G1 G2
+/// client.keepalive()
+/// client.NewStream()
+/// t.mu.Lock()
+/// <-t.awakenKeepalive
+/// t.mu.Lock()
+/// ---------------G1, G2 deadlock--------------
+///
+
+func Grpc1460() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 1000; i++ {
+ go func() {
+ client := &http2Client_grpc1460{
+ awakenKeepalive: make(chan struct{}),
+ }
+ go client.keepalive() //G1
+ go client.NewStream() //G2
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go
new file mode 100644
index 0000000000..0523b9509f
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go
@@ -0,0 +1,123 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+// This test case is a reproduction of grpc/3017.
+//
+// It is a goroutine leak that also simultaneously engages many GC assists.
+// Testing runtime behaviour when pivoting between regular and goroutine leak detection modes.
+
+func init() {
+ register("Grpc3017", Grpc3017)
+}
+
+type Address_grpc3017 int
+type SubConn_grpc3017 int
+
+type subConnCacheEntry_grpc3017 struct {
+ sc SubConn_grpc3017
+ cancel func()
+ abortDeleting bool
+}
+
+type lbCacheClientConn_grpc3017 struct {
+ mu sync.Mutex // L1
+ timeout time.Duration
+ subConnCache map[Address_grpc3017]*subConnCacheEntry_grpc3017
+ subConnToAddr map[SubConn_grpc3017]Address_grpc3017
+}
+
+func (ccc *lbCacheClientConn_grpc3017) NewSubConn(addrs []Address_grpc3017) SubConn_grpc3017 {
+ if len(addrs) != 1 {
+ return SubConn_grpc3017(1)
+ }
+ addrWithoutMD := addrs[0]
+ ccc.mu.Lock() // L1
+ defer ccc.mu.Unlock()
+ if entry, ok := ccc.subConnCache[addrWithoutMD]; ok {
+ entry.cancel()
+ delete(ccc.subConnCache, addrWithoutMD)
+ return entry.sc
+ }
+ scNew := SubConn_grpc3017(1)
+ ccc.subConnToAddr[scNew] = addrWithoutMD
+ return scNew
+}
+
+func (ccc *lbCacheClientConn_grpc3017) RemoveSubConn(sc SubConn_grpc3017) {
+ ccc.mu.Lock() // L1
+ defer ccc.mu.Unlock()
+ addr, ok := ccc.subConnToAddr[sc]
+ if !ok {
+ return
+ }
+
+ if entry, ok := ccc.subConnCache[addr]; ok {
+ if entry.sc != sc {
+ delete(ccc.subConnToAddr, sc)
+ }
+ return
+ }
+
+ entry := &subConnCacheEntry_grpc3017{
+ sc: sc,
+ }
+ ccc.subConnCache[addr] = entry
+
+ timer := time.AfterFunc(ccc.timeout, func() { // G3
+ runtime.Gosched()
+ ccc.mu.Lock() // L1
+ if entry.abortDeleting {
+ return // Missing unlock
+ }
+ delete(ccc.subConnToAddr, sc)
+ delete(ccc.subConnCache, addr)
+ ccc.mu.Unlock()
+ })
+
+ entry.cancel = func() {
+ if !timer.Stop() {
+ entry.abortDeleting = true
+ }
+ }
+}
+
+func Grpc3017() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() { //G1
+ done := make(chan struct{})
+
+ ccc := &lbCacheClientConn_grpc3017{
+ timeout: time.Nanosecond,
+ subConnCache: make(map[Address_grpc3017]*subConnCacheEntry_grpc3017),
+ subConnToAddr: make(map[SubConn_grpc3017]Address_grpc3017),
+ }
+
+ sc := ccc.NewSubConn([]Address_grpc3017{Address_grpc3017(1)})
+ go func() { // G2
+ for i := 0; i < 10000; i++ {
+ ccc.RemoveSubConn(sc)
+ sc = ccc.NewSubConn([]Address_grpc3017{Address_grpc3017(1)})
+ }
+ close(done)
+ }()
+ <-done
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go
new file mode 100644
index 0000000000..5f6201ec80
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go
@@ -0,0 +1,65 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: grpc-go
+ * Issue or PR : https://github.com/grpc/grpc-go/pull/660
+ * Buggy version: db85417dd0de6cc6f583672c6175a7237e5b5dd2
+ * fix commit-id: ceacfbcbc1514e4e677932fd55938ac455d182fb
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "math/rand"
+ "os"
+ "runtime"
+ "runtime/pprof"
+)
+
+func init() {
+ register("Grpc660", Grpc660)
+}
+
+type benchmarkClient_grpc660 struct {
+ stop chan bool
+}
+
+func (bc *benchmarkClient_grpc660) doCloseLoopUnary() {
+ for {
+ done := make(chan bool)
+ go func() { // G2
+ if rand.Intn(10) > 7 {
+ done <- false
+ return
+ }
+ done <- true
+ }()
+ select {
+ case <-bc.stop:
+ return
+ case <-done:
+ }
+ }
+}
+
+func Grpc660() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ bc := &benchmarkClient_grpc660{
+ stop: make(chan bool),
+ }
+ go bc.doCloseLoopUnary() // G1
+ go func() { // helper goroutine
+ bc.stop <- true
+ }()
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go
new file mode 100644
index 0000000000..72005cc844
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go
@@ -0,0 +1,74 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Grpc795", Grpc795)
+}
+
+type Server_grpc795 struct {
+ mu sync.Mutex
+ drain bool
+}
+
+func (s *Server_grpc795) GracefulStop() {
+ s.mu.Lock()
+ if s.drain {
+ s.mu.Lock()
+ return
+ }
+ s.drain = true
+ s.mu.Unlock()
+}
+func (s *Server_grpc795) Serve() {
+ s.mu.Lock()
+ s.mu.Unlock()
+}
+
+func NewServer_grpc795() *Server_grpc795 {
+ return &Server_grpc795{}
+}
+
+type test_grpc795 struct {
+ srv *Server_grpc795
+}
+
+func (te *test_grpc795) startServer() {
+ s := NewServer_grpc795()
+ te.srv = s
+ go s.Serve()
+}
+
+func newTest_grpc795() *test_grpc795 {
+ return &test_grpc795{}
+}
+
+func testServerGracefulStopIdempotent_grpc795() {
+ te := newTest_grpc795()
+
+ te.startServer()
+
+ for i := 0; i < 3; i++ {
+ te.srv.GracefulStop()
+ }
+}
+
+func Grpc795() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ go testServerGracefulStopIdempotent_grpc795()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go
new file mode 100644
index 0000000000..188b3b88ba
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go
@@ -0,0 +1,105 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: grpc-go
+ * Issue or PR : https://github.com/grpc/grpc-go/pull/862
+ * Buggy version: d8f4ebe77f6b7b6403d7f98626de8a534f9b93a7
+ * fix commit-id: dd5645bebff44f6b88780bb949022a09eadd7dae
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "context"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Grpc862", Grpc862)
+}
+
+type ClientConn_grpc862 struct {
+ ctx context.Context
+ cancel context.CancelFunc
+ conns []*addrConn_grpc862
+}
+
+func (cc *ClientConn_grpc862) Close() {
+ cc.cancel()
+ conns := cc.conns
+ cc.conns = nil
+ for _, ac := range conns {
+ ac.tearDown()
+ }
+}
+
+func (cc *ClientConn_grpc862) resetAddrConn() {
+ ac := &addrConn_grpc862{
+ cc: cc,
+ }
+ cc.conns = append(cc.conns, ac)
+ ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
+ ac.resetTransport()
+}
+
+type addrConn_grpc862 struct {
+ cc *ClientConn_grpc862
+ ctx context.Context
+ cancel context.CancelFunc
+}
+
+func (ac *addrConn_grpc862) resetTransport() {
+ for retries := 1; ; retries++ {
+ _ = 2 * time.Nanosecond * time.Duration(retries)
+ timeout := 10 * time.Nanosecond
+ _, cancel := context.WithTimeout(ac.ctx, timeout)
+ _ = time.Now()
+ cancel()
+ <-ac.ctx.Done()
+ return
+ }
+}
+
+func (ac *addrConn_grpc862) tearDown() {
+ ac.cancel()
+}
+
+func DialContext_grpc862(ctx context.Context) (conn *ClientConn_grpc862) {
+ cc := &ClientConn_grpc862{}
+ cc.ctx, cc.cancel = context.WithCancel(context.Background())
+ defer func() {
+ select {
+ case <-ctx.Done():
+ if conn != nil {
+ conn.Close()
+ }
+ conn = nil
+ default:
+ }
+ }()
+ go func() { // G2
+ cc.resetAddrConn()
+ }()
+ return conn
+}
+
+func Grpc862() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ ctx, cancel := context.WithCancel(context.Background())
+ go DialContext_grpc862(ctx) // G1
+ go cancel() // helper goroutine
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go
new file mode 100644
index 0000000000..3804692a8b
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go
@@ -0,0 +1,81 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Hugo3251", Hugo3251)
+}
+
+type remoteLock_hugo3251 struct {
+ sync.RWMutex // L1
+ m map[string]*sync.Mutex // L2
+}
+
+func (l *remoteLock_hugo3251) URLLock(url string) {
+ l.Lock() // L1
+ if _, ok := l.m[url]; !ok {
+ l.m[url] = &sync.Mutex{}
+ }
+ l.m[url].Lock() // L2
+ runtime.Gosched()
+ l.Unlock() // L1
+ // runtime.Gosched()
+}
+
+func (l *remoteLock_hugo3251) URLUnlock(url string) {
+ l.RLock() // L1
+ defer l.RUnlock() // L1
+ if um, ok := l.m[url]; ok {
+ um.Unlock() // L2
+ }
+}
+
+func resGetRemote_hugo3251(remoteURLLock *remoteLock_hugo3251, url string) error {
+ remoteURLLock.URLLock(url)
+ defer func() { remoteURLLock.URLUnlock(url) }()
+
+ return nil
+}
+
+func Hugo3251() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(time.Second)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 11; i++ {
+ go func() { // G1
+ url := "http://Foo.Bar/foo_Bar-Foo"
+ remoteURLLock := &remoteLock_hugo3251{m: make(map[string]*sync.Mutex)}
+ for range []bool{false, true} {
+ var wg sync.WaitGroup
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func(gor int) { // G2
+ defer wg.Done()
+ for j := 0; j < 200; j++ {
+ err := resGetRemote_hugo3251(remoteURLLock, url)
+ if err != nil {
+ fmt.Errorf("Error getting resource content: %s", err)
+ }
+ time.Sleep(300 * time.Nanosecond)
+ }
+ }(i)
+ }
+ wg.Wait()
+ }
+ }()
+ }
+} \ No newline at end of file
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go
new file mode 100644
index 0000000000..6a1bbe9a3f
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go
@@ -0,0 +1,317 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "log"
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Hugo5379", Hugo5379)
+}
+
+type shortcodeHandler_hugo5379 struct {
+ p *PageWithoutContent_hugo5379
+ contentShortcodes map[int]func() error
+ contentShortcodesDelta map[int]func() error
+ init sync.Once // O1
+}
+
+func (s *shortcodeHandler_hugo5379) executeShortcodesForDelta(p *PageWithoutContent_hugo5379) error {
+ for k, _ := range s.contentShortcodesDelta {
+ render := s.contentShortcodesDelta[k]
+ if err := render(); err != nil {
+ continue
+ }
+ }
+ return nil
+}
+
+func (s *shortcodeHandler_hugo5379) updateDelta() {
+ s.init.Do(func() {
+ s.contentShortcodes = createShortcodeRenderers_hugo5379(s.p.withoutContent())
+ })
+
+ delta := make(map[int]func() error)
+
+ for k, v := range s.contentShortcodes {
+ if _, ok := delta[k]; !ok {
+ delta[k] = v
+ }
+ }
+
+ s.contentShortcodesDelta = delta
+}
+
+type Page_hugo5379 struct {
+ *pageInit_hugo5379
+ *pageContentInit_hugo5379
+ pageWithoutContent *PageWithoutContent_hugo5379
+ contentInit sync.Once // O2
+ contentInitMu sync.Mutex // L1
+ shortcodeState *shortcodeHandler_hugo5379
+}
+
+func (p *Page_hugo5379) WordCount() {
+ p.initContentPlainAndMeta()
+}
+
+func (p *Page_hugo5379) initContentPlainAndMeta() {
+ p.initContent()
+ p.initPlain(true)
+}
+
+func (p *Page_hugo5379) initPlain(lock bool) {
+ p.plainInit.Do(func() {
+ if lock {
+ /// Double locking here.
+ p.contentInitMu.Lock()
+ defer p.contentInitMu.Unlock()
+ }
+ })
+}
+
+func (p *Page_hugo5379) withoutContent() *PageWithoutContent_hugo5379 {
+ p.pageInit_hugo5379.withoutContentInit.Do(func() {
+ p.pageWithoutContent = &PageWithoutContent_hugo5379{Page_hugo5379: p}
+ })
+ return p.pageWithoutContent
+}
+
+func (p *Page_hugo5379) prepareForRender() error {
+ var err error
+ if err = handleShortcodes_hugo5379(p.withoutContent()); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (p *Page_hugo5379) setContentInit() {
+ p.shortcodeState.updateDelta()
+}
+
+func (p *Page_hugo5379) initContent() {
+ p.contentInit.Do(func() {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
+ defer cancel()
+ c := make(chan error, 1)
+
+ go func() { // G2
+ var err error
+ p.contentInitMu.Lock() // first lock here
+ defer p.contentInitMu.Unlock()
+
+ err = p.prepareForRender()
+ if err != nil {
+ c <- err
+ return
+ }
+ c <- err
+ }()
+
+ select {
+ case <-ctx.Done():
+ case <-c:
+ }
+ })
+}
+
+type PageWithoutContent_hugo5379 struct {
+ *Page_hugo5379
+}
+
+type pageInit_hugo5379 struct {
+ withoutContentInit sync.Once
+}
+
+type pageContentInit_hugo5379 struct {
+ contentInit sync.Once // O3
+ plainInit sync.Once // O4
+}
+
+type HugoSites_hugo5379 struct {
+ Sites []*Site_hugo5379
+}
+
+func (h *HugoSites_hugo5379) render() {
+ for _, s := range h.Sites {
+ for _, s2 := range h.Sites {
+ s2.preparePagesForRender()
+ }
+ s.renderPages()
+ }
+}
+
+func (h *HugoSites_hugo5379) Build() {
+ h.render()
+}
+
+type Pages_hugo5379 []*Page_hugo5379
+
+type PageCollections_hugo5379 struct {
+ Pages Pages_hugo5379
+}
+
+type Site_hugo5379 struct {
+ *PageCollections_hugo5379
+}
+
+func (s *Site_hugo5379) preparePagesForRender() {
+ for _, p := range s.Pages {
+ p.setContentInit()
+ }
+}
+
+func (s *Site_hugo5379) renderForLayouts() {
+ /// Omit reflections
+ for _, p := range s.Pages {
+ p.WordCount()
+ }
+}
+
+func (s *Site_hugo5379) renderAndWritePage() {
+ s.renderForLayouts()
+}
+
+func (s *Site_hugo5379) renderPages() {
+ numWorkers := 2
+ wg := &sync.WaitGroup{}
+
+ for i := 0; i < numWorkers; i++ {
+ wg.Add(1)
+ go pageRenderer_hugo5379(s, wg) // G3
+ }
+
+ wg.Wait()
+}
+
+type sitesBuilder_hugo5379 struct {
+ H *HugoSites_hugo5379
+}
+
+func (s *sitesBuilder_hugo5379) Build() *sitesBuilder_hugo5379 {
+ return s.build()
+}
+
+func (s *sitesBuilder_hugo5379) build() *sitesBuilder_hugo5379 {
+ s.H.Build()
+ return s
+}
+
+func (s *sitesBuilder_hugo5379) CreateSitesE() error {
+ sites, err := NewHugoSites_hugo5379()
+ if err != nil {
+ return err
+ }
+ s.H = sites
+ return nil
+}
+
+func (s *sitesBuilder_hugo5379) CreateSites() *sitesBuilder_hugo5379 {
+ if err := s.CreateSitesE(); err != nil {
+ log.Fatalf("Failed to create sites: %s", err)
+ }
+ return s
+}
+
+func newHugoSites_hugo5379(sites ...*Site_hugo5379) (*HugoSites_hugo5379, error) {
+ h := &HugoSites_hugo5379{Sites: sites}
+ return h, nil
+}
+
+func newSite_hugo5379() *Site_hugo5379 {
+ c := &PageCollections_hugo5379{}
+ s := &Site_hugo5379{
+ PageCollections_hugo5379: c,
+ }
+ return s
+}
+
+func createSitesFromConfig_hugo5379() []*Site_hugo5379 {
+ var (
+ sites []*Site_hugo5379
+ )
+
+ var s *Site_hugo5379 = newSite_hugo5379()
+ sites = append(sites, s)
+ return sites
+}
+
+func NewHugoSites_hugo5379() (*HugoSites_hugo5379, error) {
+ sites := createSitesFromConfig_hugo5379()
+ return newHugoSites_hugo5379(sites...)
+}
+
+func prepareShortcodeForPage_hugo5379(p *PageWithoutContent_hugo5379) map[int]func() error {
+ m := make(map[int]func() error)
+ m[0] = func() error {
+ return renderShortcode_hugo5379(p)
+ }
+ return m
+}
+
+func renderShortcode_hugo5379(p *PageWithoutContent_hugo5379) error {
+ return renderShortcodeWithPage_hugo5379(p)
+}
+
+func renderShortcodeWithPage_hugo5379(p *PageWithoutContent_hugo5379) error {
+ /// Omit reflections
+ p.WordCount()
+ return nil
+}
+
+func createShortcodeRenderers_hugo5379(p *PageWithoutContent_hugo5379) map[int]func() error {
+ return prepareShortcodeForPage_hugo5379(p)
+}
+
+func newShortcodeHandler_hugo5379(p *Page_hugo5379) *shortcodeHandler_hugo5379 {
+ return &shortcodeHandler_hugo5379{
+ p: p.withoutContent(),
+ contentShortcodes: make(map[int]func() error),
+ contentShortcodesDelta: make(map[int]func() error),
+ }
+}
+
+func handleShortcodes_hugo5379(p *PageWithoutContent_hugo5379) error {
+ return p.shortcodeState.executeShortcodesForDelta(p)
+}
+
+func pageRenderer_hugo5379(s *Site_hugo5379, wg *sync.WaitGroup) {
+ defer wg.Done()
+ s.renderAndWritePage()
+}
+
+func Hugo5379() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() { // G1
+ b := &sitesBuilder_hugo5379{}
+ s := b.CreateSites()
+ for _, site := range s.H.Sites {
+ p := &Page_hugo5379{
+ pageInit_hugo5379: &pageInit_hugo5379{},
+ pageContentInit_hugo5379: &pageContentInit_hugo5379{},
+ pageWithoutContent: &PageWithoutContent_hugo5379{},
+ contentInit: sync.Once{},
+ contentInitMu: sync.Mutex{},
+ shortcodeState: nil,
+ }
+ p.shortcodeState = newShortcodeHandler_hugo5379(p)
+ site.Pages = append(site.Pages, p)
+ }
+ s.Build()
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go b/src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go
new file mode 100644
index 0000000000..839051cc64
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go
@@ -0,0 +1,129 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Istio16224", Istio16224)
+}
+
+type ConfigStoreCache_istio16224 interface {
+ RegisterEventHandler(handler func())
+ Run()
+}
+
+type Event_istio16224 int
+
+type Handler_istio16224 func(Event_istio16224)
+
+type configstoreMonitor_istio16224 struct {
+ handlers []Handler_istio16224
+ eventCh chan Event_istio16224
+}
+
+func (m *configstoreMonitor_istio16224) Run(stop <-chan struct{}) {
+ for {
+ select {
+ case <-stop:
+ // This bug is not descibed, but is a true positive (in our eyes)
+ // In a real run main exits when the goro is blocked here.
+ if _, ok := <-m.eventCh; ok {
+ close(m.eventCh)
+ }
+ return
+ case ce, ok := <-m.eventCh:
+ if ok {
+ m.processConfigEvent(ce)
+ }
+ }
+ }
+}
+
+func (m *configstoreMonitor_istio16224) processConfigEvent(ce Event_istio16224) {
+ m.applyHandlers(ce)
+}
+
+func (m *configstoreMonitor_istio16224) AppendEventHandler(h Handler_istio16224) {
+ m.handlers = append(m.handlers, h)
+}
+
+func (m *configstoreMonitor_istio16224) applyHandlers(e Event_istio16224) {
+ for _, f := range m.handlers {
+ f(e)
+ }
+}
+func (m *configstoreMonitor_istio16224) ScheduleProcessEvent(configEvent Event_istio16224) {
+ m.eventCh <- configEvent
+}
+
+type Monitor_istio16224 interface {
+ Run(<-chan struct{})
+ AppendEventHandler(Handler_istio16224)
+ ScheduleProcessEvent(Event_istio16224)
+}
+
+type controller_istio16224 struct {
+ monitor Monitor_istio16224
+}
+
+func (c *controller_istio16224) RegisterEventHandler(f func(Event_istio16224)) {
+ c.monitor.AppendEventHandler(f)
+}
+
+func (c *controller_istio16224) Run(stop <-chan struct{}) {
+ c.monitor.Run(stop)
+}
+
+func (c *controller_istio16224) Create() {
+ c.monitor.ScheduleProcessEvent(Event_istio16224(0))
+}
+
+func NewMonitor_istio16224() Monitor_istio16224 {
+ return NewBufferedMonitor_istio16224()
+}
+
+func NewBufferedMonitor_istio16224() Monitor_istio16224 {
+ return &configstoreMonitor_istio16224{
+ eventCh: make(chan Event_istio16224),
+ }
+}
+
+func Istio16224() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ controller := &controller_istio16224{monitor: NewMonitor_istio16224()}
+ done := make(chan bool)
+ lock := sync.Mutex{}
+ controller.RegisterEventHandler(func(event Event_istio16224) {
+ lock.Lock()
+ defer lock.Unlock()
+ done <- true
+ })
+
+ stop := make(chan struct{})
+ go controller.Run(stop)
+
+ controller.Create()
+
+ lock.Lock() // blocks
+ lock.Unlock()
+ <-done
+
+ close(stop)
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go b/src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go
new file mode 100644
index 0000000000..aa8317c6d5
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go
@@ -0,0 +1,144 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime/pprof"
+
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Istio17860", Istio17860)
+}
+
+type Proxy_istio17860 interface {
+ IsLive() bool
+}
+
+type TestProxy_istio17860 struct {
+ live func() bool
+}
+
+func (tp TestProxy_istio17860) IsLive() bool {
+ if tp.live == nil {
+ return true
+ }
+ return tp.live()
+}
+
+type Agent_istio17860 interface {
+ Run(ctx context.Context)
+ Restart()
+}
+
+type exitStatus_istio17860 int
+
+type agent_istio17860 struct {
+ proxy Proxy_istio17860
+ mu *sync.Mutex
+ statusCh chan exitStatus_istio17860
+ currentEpoch int
+ activeEpochs map[int]struct{}
+}
+
+func (a *agent_istio17860) Run(ctx context.Context) {
+ for {
+ select {
+ case status := <-a.statusCh:
+ a.mu.Lock()
+ delete(a.activeEpochs, int(status))
+ active := len(a.activeEpochs)
+ a.mu.Unlock()
+ if active == 0 {
+ return
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func (a *agent_istio17860) Restart() {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ a.waitUntilLive()
+ a.currentEpoch++
+ a.activeEpochs[a.currentEpoch] = struct{}{}
+
+ go a.runWait(a.currentEpoch)
+}
+
+func (a *agent_istio17860) runWait(epoch int) {
+ a.statusCh <- exitStatus_istio17860(epoch)
+}
+
+func (a *agent_istio17860) waitUntilLive() {
+ if len(a.activeEpochs) == 0 {
+ return
+ }
+
+ interval := time.NewTicker(30 * time.Nanosecond)
+ timer := time.NewTimer(100 * time.Nanosecond)
+ defer func() {
+ interval.Stop()
+ timer.Stop()
+ }()
+
+ if a.proxy.IsLive() {
+ return
+ }
+
+ for {
+ select {
+ case <-timer.C:
+ return
+ case <-interval.C:
+ if a.proxy.IsLive() {
+ return
+ }
+ }
+ }
+}
+
+func NewAgent_istio17860(proxy Proxy_istio17860) Agent_istio17860 {
+ return &agent_istio17860{
+ proxy: proxy,
+ mu: &sync.Mutex{},
+ statusCh: make(chan exitStatus_istio17860),
+ activeEpochs: make(map[int]struct{}),
+ }
+}
+
+func Istio17860() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ neverLive := func() bool {
+ return false
+ }
+
+ a := NewAgent_istio17860(TestProxy_istio17860{live: neverLive})
+ go func() { a.Run(ctx) }()
+
+ a.Restart()
+ go a.Restart()
+
+ time.Sleep(200 * time.Nanosecond)
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go b/src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go
new file mode 100644
index 0000000000..b410c49032
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go
@@ -0,0 +1,154 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime/pprof"
+
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Istio18454", Istio18454)
+}
+
+const eventChCap_istio18454 = 1024
+
+type Worker_istio18454 struct {
+ ctx context.Context
+ ctxCancel context.CancelFunc
+}
+
+func (w *Worker_istio18454) Start(setupFn func(), runFn func(c context.Context)) {
+ if setupFn != nil {
+ setupFn()
+ }
+ go func() {
+ runFn(w.ctx)
+ }()
+}
+
+func (w *Worker_istio18454) Stop() {
+ w.ctxCancel()
+}
+
+type Strategy_istio18454 struct {
+ timer *time.Timer
+ timerFrequency time.Duration
+ stateLock sync.Mutex
+ resetChan chan struct{}
+ worker *Worker_istio18454
+ startTimerFn func()
+}
+
+func (s *Strategy_istio18454) OnChange() {
+ s.stateLock.Lock()
+ if s.timer != nil {
+ s.stateLock.Unlock()
+ s.resetChan <- struct{}{}
+ return
+ }
+ s.startTimerFn()
+ s.stateLock.Unlock()
+}
+
+func (s *Strategy_istio18454) startTimer() {
+ s.timer = time.NewTimer(s.timerFrequency)
+ eventLoop := func(ctx context.Context) {
+ for {
+ select {
+ case <-s.timer.C:
+ case <-s.resetChan:
+ if !s.timer.Stop() {
+ <-s.timer.C
+ }
+ s.timer.Reset(s.timerFrequency)
+ case <-ctx.Done():
+ s.timer.Stop()
+ return
+ }
+ }
+ }
+ s.worker.Start(nil, eventLoop)
+}
+
+func (s *Strategy_istio18454) Close() {
+ s.worker.Stop()
+}
+
+type Event_istio18454 int
+
+type Processor_istio18454 struct {
+ stateStrategy *Strategy_istio18454
+ worker *Worker_istio18454
+ eventCh chan Event_istio18454
+}
+
+func (p *Processor_istio18454) processEvent() {
+ p.stateStrategy.OnChange()
+}
+
+func (p *Processor_istio18454) Start() {
+ setupFn := func() {
+ for i := 0; i < eventChCap_istio18454; i++ {
+ p.eventCh <- Event_istio18454(0)
+ }
+ }
+ runFn := func(ctx context.Context) {
+ defer func() {
+ p.stateStrategy.Close()
+ }()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-p.eventCh:
+ p.processEvent()
+ }
+ }
+ }
+ p.worker.Start(setupFn, runFn)
+}
+
+func (p *Processor_istio18454) Stop() {
+ p.worker.Stop()
+}
+
+func NewWorker_istio18454() *Worker_istio18454 {
+ worker := &Worker_istio18454{}
+ worker.ctx, worker.ctxCancel = context.WithCancel(context.Background())
+ return worker
+}
+
+func Istio18454() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ stateStrategy := &Strategy_istio18454{
+ timerFrequency: time.Nanosecond,
+ resetChan: make(chan struct{}, 1),
+ worker: NewWorker_istio18454(),
+ }
+ stateStrategy.startTimerFn = stateStrategy.startTimer
+
+ p := &Processor_istio18454{
+ stateStrategy: stateStrategy,
+ worker: NewWorker_istio18454(),
+ eventCh: make(chan Event_istio18454, eventChCap_istio18454),
+ }
+
+ p.Start()
+ defer p.Stop()
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go
new file mode 100644
index 0000000000..0eca5f41fb
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go
@@ -0,0 +1,95 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/10182
+ * Buggy version: 4b990d128a17eea9058d28a3b3688ab8abafbd94
+ * fix commit-id: 64ad3e17ad15cd0f9a4fd86706eec1c572033254
+ * Flaky: 15/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes10182", Kubernetes10182)
+}
+
+type statusManager_kubernetes10182 struct {
+ podStatusesLock sync.RWMutex
+ podStatusChannel chan bool
+}
+
+func (s *statusManager_kubernetes10182) Start() {
+ go func() {
+ for i := 0; i < 2; i++ {
+ s.syncBatch()
+ }
+ }()
+}
+
+func (s *statusManager_kubernetes10182) syncBatch() {
+ runtime.Gosched()
+ <-s.podStatusChannel
+ s.DeletePodStatus()
+}
+
+func (s *statusManager_kubernetes10182) DeletePodStatus() {
+ s.podStatusesLock.Lock()
+ defer s.podStatusesLock.Unlock()
+}
+
+func (s *statusManager_kubernetes10182) SetPodStatus() {
+ s.podStatusesLock.Lock()
+ defer s.podStatusesLock.Unlock()
+ s.podStatusChannel <- true
+}
+
+func NewStatusManager_kubernetes10182() *statusManager_kubernetes10182 {
+ return &statusManager_kubernetes10182{
+ podStatusChannel: make(chan bool),
+ }
+}
+
+// Example of deadlock trace:
+//
+// G1 G2 G3
+// --------------------------------------------------------------------------------
+// s.Start()
+// s.syncBatch()
+// s.SetPodStatus()
+// <-s.podStatusChannel
+// s.podStatusesLock.Lock()
+// s.podStatusChannel <- true
+// s.podStatusesLock.Unlock()
+// return
+// s.DeletePodStatus()
+// s.podStatusesLock.Lock()
+// s.podStatusChannel <- true
+// s.podStatusesLock.Lock()
+// -----------------------------------G1,G3 leak-------------------------------------
+
+func Kubernetes10182() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 1000; i++ {
+ go func() {
+ s := NewStatusManager_kubernetes10182()
+ go s.Start()
+ go s.SetPodStatus()
+ go s.SetPodStatus()
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go
new file mode 100644
index 0000000000..36405f240a
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go
@@ -0,0 +1,118 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes11298", Kubernetes11298)
+}
+
+type Signal_kubernetes11298 <-chan struct{}
+
+func After_kubernetes11298(f func()) Signal_kubernetes11298 {
+ ch := make(chan struct{})
+ go func() {
+ defer close(ch)
+ if f != nil {
+ f()
+ }
+ }()
+ return Signal_kubernetes11298(ch)
+}
+
+func Until_kubernetes11298(f func(), period time.Duration, stopCh <-chan struct{}) {
+ if f == nil {
+ return
+ }
+ for {
+ select {
+ case <-stopCh:
+ return
+ default:
+ }
+ f()
+ select {
+ case <-stopCh:
+ case <-time.After(period):
+ }
+ }
+
+}
+
+type notifier_kubernetes11298 struct {
+ lock sync.Mutex
+ cond *sync.Cond
+}
+
+// abort will be closed no matter what
+func (n *notifier_kubernetes11298) serviceLoop(abort <-chan struct{}) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+ for {
+ select {
+ case <-abort:
+ return
+ default:
+ ch := After_kubernetes11298(func() {
+ n.cond.Wait()
+ })
+ select {
+ case <-abort:
+ n.cond.Signal()
+ <-ch
+ return
+ case <-ch:
+ }
+ }
+ }
+}
+
+// abort will be closed no matter what
+func Notify_kubernetes11298(abort <-chan struct{}) {
+ n := &notifier_kubernetes11298{}
+ n.cond = sync.NewCond(&n.lock)
+ finished := After_kubernetes11298(func() {
+ Until_kubernetes11298(func() {
+ for {
+ select {
+ case <-abort:
+ return
+ default:
+ func() {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+ n.cond.Signal()
+ }()
+ }
+ }
+ }, 0, abort)
+ })
+ Until_kubernetes11298(func() { n.serviceLoop(finished) }, 0, abort)
+}
+func Kubernetes11298() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 1000; i++ {
+ go func() {
+ done := make(chan struct{})
+ notifyDone := After_kubernetes11298(func() { Notify_kubernetes11298(done) })
+ go func() {
+ defer close(done)
+ time.Sleep(300 * time.Nanosecond)
+ }()
+ <-notifyDone
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go
new file mode 100644
index 0000000000..f6aa8b9ddf
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go
@@ -0,0 +1,166 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/13135
+ * Buggy version: 6ced66249d4fd2a81e86b4a71d8df0139fe5ceae
+ * fix commit-id: a12b7edc42c5c06a2e7d9f381975658692951d5a
+ * Flaky: 93/100
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes13135", Kubernetes13135)
+}
+
+var (
+ StopChannel_kubernetes13135 chan struct{}
+)
+
+func Util_kubernetes13135(f func(), period time.Duration, stopCh <-chan struct{}) {
+ for {
+ select {
+ case <-stopCh:
+ return
+ default:
+ }
+ func() {
+ f()
+ }()
+ time.Sleep(period)
+ }
+}
+
+type Store_kubernetes13135 interface {
+ Add(obj interface{})
+ Replace(obj interface{})
+}
+
+type Reflector_kubernetes13135 struct {
+ store Store_kubernetes13135
+}
+
+func (r *Reflector_kubernetes13135) ListAndWatch(stopCh <-chan struct{}) error {
+ r.syncWith()
+ return nil
+}
+
+func NewReflector_kubernetes13135(store Store_kubernetes13135) *Reflector_kubernetes13135 {
+ return &Reflector_kubernetes13135{
+ store: store,
+ }
+}
+
+func (r *Reflector_kubernetes13135) syncWith() {
+ r.store.Replace(nil)
+}
+
+type Cacher_kubernetes13135 struct {
+ sync.Mutex
+ initialized sync.WaitGroup
+ initOnce sync.Once
+ watchCache *WatchCache_kubernetes13135
+ reflector *Reflector_kubernetes13135
+}
+
+func (c *Cacher_kubernetes13135) processEvent() {
+ c.Lock()
+ defer c.Unlock()
+}
+
+func (c *Cacher_kubernetes13135) startCaching(stopChannel <-chan struct{}) {
+ c.Lock()
+ for {
+ err := c.reflector.ListAndWatch(stopChannel)
+ if err == nil {
+ break
+ }
+ }
+}
+
+type WatchCache_kubernetes13135 struct {
+ sync.RWMutex
+ onReplace func()
+ onEvent func()
+}
+
+func (w *WatchCache_kubernetes13135) SetOnEvent(onEvent func()) {
+ w.Lock()
+ defer w.Unlock()
+ w.onEvent = onEvent
+}
+
+func (w *WatchCache_kubernetes13135) SetOnReplace(onReplace func()) {
+ w.Lock()
+ defer w.Unlock()
+ w.onReplace = onReplace
+}
+
+func (w *WatchCache_kubernetes13135) processEvent() {
+ w.Lock()
+ defer w.Unlock()
+ if w.onEvent != nil {
+ w.onEvent()
+ }
+}
+
+func (w *WatchCache_kubernetes13135) Add(obj interface{}) {
+ w.processEvent()
+}
+
+func (w *WatchCache_kubernetes13135) Replace(obj interface{}) {
+ w.Lock()
+ defer w.Unlock()
+ if w.onReplace != nil {
+ w.onReplace()
+ }
+}
+
+func NewCacher_kubernetes13135(stopCh <-chan struct{}) *Cacher_kubernetes13135 {
+ watchCache := &WatchCache_kubernetes13135{}
+ cacher := &Cacher_kubernetes13135{
+ initialized: sync.WaitGroup{},
+ watchCache: watchCache,
+ reflector: NewReflector_kubernetes13135(watchCache),
+ }
+ cacher.initialized.Add(1)
+ watchCache.SetOnReplace(func() {
+ cacher.initOnce.Do(func() { cacher.initialized.Done() })
+ cacher.Unlock()
+ })
+ watchCache.SetOnEvent(cacher.processEvent)
+ go Util_kubernetes13135(func() { cacher.startCaching(stopCh) }, 0, stopCh) // G2
+ cacher.initialized.Wait()
+ return cacher
+}
+
+func Kubernetes13135() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ StopChannel_kubernetes13135 = make(chan struct{})
+ for i := 0; i < 50; i++ {
+ go func() {
+ // Should create a local channel. Using a single global channel
+ // concurrently will cause a deadlock which does not actually exist
+ // in the original microbenchmark.
+ StopChannel_kubernetes13135 := make(chan struct{})
+
+ c := NewCacher_kubernetes13135(StopChannel_kubernetes13135) // G1
+ go c.watchCache.Add(nil) // G3
+ go close(StopChannel_kubernetes13135)
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go
new file mode 100644
index 0000000000..6c0139a9d2
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go
@@ -0,0 +1,100 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/1321
+ * Buggy version: 9cd0fc70f1ca852c903b18b0933991036b3b2fa1
+ * fix commit-id: 435e0b73bb99862f9dedf56a50260ff3dfef14ff
+ * Flaky: 1/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes1321", Kubernetes1321)
+}
+
+type muxWatcher_kubernetes1321 struct {
+ result chan struct{}
+ m *Mux_kubernetes1321
+ id int64
+}
+
+func (mw *muxWatcher_kubernetes1321) Stop() {
+ mw.m.stopWatching(mw.id)
+}
+
+type Mux_kubernetes1321 struct {
+ lock sync.Mutex
+ watchers map[int64]*muxWatcher_kubernetes1321
+}
+
+func NewMux_kubernetes1321() *Mux_kubernetes1321 {
+ m := &Mux_kubernetes1321{
+ watchers: map[int64]*muxWatcher_kubernetes1321{},
+ }
+ go m.loop() // G2
+ return m
+}
+
+func (m *Mux_kubernetes1321) Watch() *muxWatcher_kubernetes1321 {
+ mw := &muxWatcher_kubernetes1321{
+ result: make(chan struct{}),
+ m: m,
+ id: int64(len(m.watchers)),
+ }
+ m.watchers[mw.id] = mw
+ runtime.Gosched()
+ return mw
+}
+
+func (m *Mux_kubernetes1321) loop() {
+ for i := 0; i < 100; i++ {
+ m.distribute()
+ }
+}
+
+func (m *Mux_kubernetes1321) distribute() {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ for _, w := range m.watchers {
+ w.result <- struct{}{}
+ runtime.Gosched()
+ }
+}
+
+func (m *Mux_kubernetes1321) stopWatching(id int64) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ w, ok := m.watchers[id]
+ if !ok {
+ return
+ }
+ delete(m.watchers, id)
+ close(w.result)
+}
+
+func testMuxWatcherClose_kubernetes1321() {
+ m := NewMux_kubernetes1321()
+ m.watchers[m.Watch().id].Stop()
+}
+
+func Kubernetes1321() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 1000; i++ {
+ go testMuxWatcherClose_kubernetes1321() // G1
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go
new file mode 100644
index 0000000000..323cb236c0
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go
@@ -0,0 +1,72 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/25331
+ * Buggy version: 5dd087040bb13434f1ddf2f0693d0203c30f28cb
+ * fix commit-id: 97f4647dc3d8cf46c2b66b89a31c758a6edfb57c
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "context"
+ "errors"
+ "os"
+ "runtime"
+ "runtime/pprof"
+)
+
+func init() {
+ register("Kubernetes25331", Kubernetes25331)
+}
+
+type watchChan_kubernetes25331 struct {
+ ctx context.Context
+ cancel context.CancelFunc
+ resultChan chan bool
+ errChan chan error
+}
+
+func (wc *watchChan_kubernetes25331) Stop() {
+ wc.errChan <- errors.New("Error")
+ wc.cancel()
+}
+
+func (wc *watchChan_kubernetes25331) run() {
+ select {
+ case err := <-wc.errChan:
+ errResult := len(err.Error()) != 0
+ wc.cancel() // Removed in fix
+ wc.resultChan <- errResult
+ case <-wc.ctx.Done():
+ }
+}
+
+func NewWatchChan_kubernetes25331() *watchChan_kubernetes25331 {
+ ctx, cancel := context.WithCancel(context.Background())
+ return &watchChan_kubernetes25331{
+ ctx: ctx,
+ cancel: cancel,
+ resultChan: make(chan bool),
+ errChan: make(chan error),
+ }
+}
+
+func Kubernetes25331() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ wc := NewWatchChan_kubernetes25331()
+ go wc.run() // G1
+ go wc.Stop() // G2
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go
new file mode 100644
index 0000000000..38e53cf4ad
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go
@@ -0,0 +1,87 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes26980", Kubernetes26980)
+}
+
+type processorListener_kubernetes26980 struct {
+ lock sync.RWMutex
+ cond sync.Cond
+
+ pendingNotifications []interface{}
+}
+
+func (p *processorListener_kubernetes26980) add(notification interface{}) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ p.pendingNotifications = append(p.pendingNotifications, notification)
+ p.cond.Broadcast()
+}
+
+func (p *processorListener_kubernetes26980) pop(stopCh <-chan struct{}) {
+ p.lock.Lock()
+ runtime.Gosched()
+ defer p.lock.Unlock()
+ for {
+ for len(p.pendingNotifications) == 0 {
+ select {
+ case <-stopCh:
+ return
+ default:
+ }
+ p.cond.Wait()
+ }
+ select {
+ case <-stopCh:
+ return
+ }
+ }
+}
+
+func newProcessListener_kubernetes26980() *processorListener_kubernetes26980 {
+ ret := &processorListener_kubernetes26980{
+ pendingNotifications: []interface{}{},
+ }
+ ret.cond.L = &ret.lock
+ return ret
+}
+func Kubernetes26980() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 3000; i++ {
+ go func() {
+ pl := newProcessListener_kubernetes26980()
+ stopCh := make(chan struct{})
+ defer close(stopCh)
+ pl.add(1)
+ runtime.Gosched()
+ go pl.pop(stopCh)
+
+ resultCh := make(chan struct{})
+ go func() {
+ pl.lock.Lock()
+ close(resultCh)
+ }()
+ runtime.Gosched()
+ <-resultCh
+ pl.lock.Unlock()
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go
new file mode 100644
index 0000000000..00cdcf2b67
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go
@@ -0,0 +1,223 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes30872", Kubernetes30872)
+}
+
+type PopProcessFunc_kubernetes30872 func()
+
+type ProcessFunc_kubernetes30872 func()
+
+func Util_kubernetes30872(f func(), stopCh <-chan struct{}) {
+ JitterUntil_kubernetes30872(f, stopCh)
+}
+
+func JitterUntil_kubernetes30872(f func(), stopCh <-chan struct{}) {
+ for {
+ select {
+ case <-stopCh:
+ return
+ default:
+ }
+ func() {
+ f()
+ }()
+ }
+}
+
+type Queue_kubernetes30872 interface {
+ HasSynced()
+ Pop(PopProcessFunc_kubernetes30872)
+}
+
+type Config_kubernetes30872 struct {
+ Queue Queue_kubernetes30872
+ Process ProcessFunc_kubernetes30872
+}
+
+type Controller_kubernetes30872 struct {
+ config Config_kubernetes30872
+}
+
+func (c *Controller_kubernetes30872) Run(stopCh <-chan struct{}) {
+ Util_kubernetes30872(c.processLoop, stopCh)
+}
+
+func (c *Controller_kubernetes30872) HasSynced() {
+ c.config.Queue.HasSynced()
+}
+
+func (c *Controller_kubernetes30872) processLoop() {
+ c.config.Queue.Pop(PopProcessFunc_kubernetes30872(c.config.Process))
+}
+
+type ControllerInterface_kubernetes30872 interface {
+ Run(<-chan struct{})
+ HasSynced()
+}
+
+type ResourceEventHandler_kubernetes30872 interface {
+ OnAdd()
+}
+
+type ResourceEventHandlerFuncs_kubernetes30872 struct {
+ AddFunc func()
+}
+
+func (r ResourceEventHandlerFuncs_kubernetes30872) OnAdd() {
+ if r.AddFunc != nil {
+ r.AddFunc()
+ }
+}
+
+type informer_kubernetes30872 struct {
+ controller ControllerInterface_kubernetes30872
+
+ stopChan chan struct{}
+}
+
+type federatedInformerImpl_kubernetes30872 struct {
+ sync.Mutex
+ clusterInformer informer_kubernetes30872
+}
+
+func (f *federatedInformerImpl_kubernetes30872) ClustersSynced() {
+ f.Lock() // L1
+ defer f.Unlock()
+ f.clusterInformer.controller.HasSynced()
+}
+
+func (f *federatedInformerImpl_kubernetes30872) addCluster() {
+ f.Lock() // L1
+ defer f.Unlock()
+}
+
+func (f *federatedInformerImpl_kubernetes30872) Start() {
+ f.Lock() // L1
+ defer f.Unlock()
+
+ f.clusterInformer.stopChan = make(chan struct{})
+ go f.clusterInformer.controller.Run(f.clusterInformer.stopChan) // G2
+ runtime.Gosched()
+}
+
+func (f *federatedInformerImpl_kubernetes30872) Stop() {
+ f.Lock() // L1
+ defer f.Unlock()
+ close(f.clusterInformer.stopChan)
+}
+
+type DelayingDeliverer_kubernetes30872 struct{}
+
+func (d *DelayingDeliverer_kubernetes30872) StartWithHandler(handler func()) {
+ go func() { // G4
+ handler()
+ }()
+}
+
+type FederationView_kubernetes30872 interface {
+ ClustersSynced()
+}
+
+type FederatedInformer_kubernetes30872 interface {
+ FederationView_kubernetes30872
+ Start()
+ Stop()
+}
+
+type NamespaceController_kubernetes30872 struct {
+ namespaceDeliverer *DelayingDeliverer_kubernetes30872
+ namespaceFederatedInformer FederatedInformer_kubernetes30872
+}
+
+func (nc *NamespaceController_kubernetes30872) isSynced() {
+ nc.namespaceFederatedInformer.ClustersSynced()
+}
+
+func (nc *NamespaceController_kubernetes30872) reconcileNamespace() {
+ nc.isSynced()
+}
+
+func (nc *NamespaceController_kubernetes30872) Run(stopChan <-chan struct{}) {
+ nc.namespaceFederatedInformer.Start()
+ go func() { // G3
+ <-stopChan
+ nc.namespaceFederatedInformer.Stop()
+ }()
+ nc.namespaceDeliverer.StartWithHandler(func() {
+ nc.reconcileNamespace()
+ })
+}
+
+type DeltaFIFO_kubernetes30872 struct {
+ lock sync.RWMutex
+}
+
+func (f *DeltaFIFO_kubernetes30872) HasSynced() {
+ f.lock.Lock() // L2
+ defer f.lock.Unlock()
+}
+
+func (f *DeltaFIFO_kubernetes30872) Pop(process PopProcessFunc_kubernetes30872) {
+ f.lock.Lock() // L2
+ defer f.lock.Unlock()
+ process()
+}
+
+func NewFederatedInformer_kubernetes30872() FederatedInformer_kubernetes30872 {
+ federatedInformer := &federatedInformerImpl_kubernetes30872{}
+ federatedInformer.clusterInformer.controller = NewInformer_kubernetes30872(
+ ResourceEventHandlerFuncs_kubernetes30872{
+ AddFunc: func() {
+ federatedInformer.addCluster()
+ },
+ })
+ return federatedInformer
+}
+
+func NewInformer_kubernetes30872(h ResourceEventHandler_kubernetes30872) *Controller_kubernetes30872 {
+ fifo := &DeltaFIFO_kubernetes30872{}
+ cfg := &Config_kubernetes30872{
+ Queue: fifo,
+ Process: func() {
+ h.OnAdd()
+ },
+ }
+ return &Controller_kubernetes30872{config: *cfg}
+}
+
+func NewNamespaceController_kubernetes30872() *NamespaceController_kubernetes30872 {
+ nc := &NamespaceController_kubernetes30872{}
+ nc.namespaceDeliverer = &DelayingDeliverer_kubernetes30872{}
+ nc.namespaceFederatedInformer = NewFederatedInformer_kubernetes30872()
+ return nc
+}
+
+func Kubernetes30872() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() { // G1
+ namespaceController := NewNamespaceController_kubernetes30872()
+ stop := make(chan struct{})
+ namespaceController.Run(stop)
+ close(stop)
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go
new file mode 100644
index 0000000000..27020d5804
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go
@@ -0,0 +1,83 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Kubernetes38669", Kubernetes38669)
+}
+
+type Event_kubernetes38669 int
+type watchCacheEvent_kubernetes38669 int
+
+type cacheWatcher_kubernetes38669 struct {
+ sync.Mutex
+ input chan watchCacheEvent_kubernetes38669
+ result chan Event_kubernetes38669
+ stopped bool
+}
+
+func (c *cacheWatcher_kubernetes38669) process(initEvents []watchCacheEvent_kubernetes38669) {
+ for _, event := range initEvents {
+ c.sendWatchCacheEvent(&event)
+ }
+ defer close(c.result)
+ defer c.Stop()
+ for {
+ _, ok := <-c.input
+ if !ok {
+ return
+ }
+ }
+}
+
+func (c *cacheWatcher_kubernetes38669) sendWatchCacheEvent(event *watchCacheEvent_kubernetes38669) {
+ c.result <- Event_kubernetes38669(*event)
+}
+
+func (c *cacheWatcher_kubernetes38669) Stop() {
+ c.stop()
+}
+
+func (c *cacheWatcher_kubernetes38669) stop() {
+ c.Lock()
+ defer c.Unlock()
+ if !c.stopped {
+ c.stopped = true
+ close(c.input)
+ }
+}
+
+func newCacheWatcher_kubernetes38669(chanSize int, initEvents []watchCacheEvent_kubernetes38669) *cacheWatcher_kubernetes38669 {
+ watcher := &cacheWatcher_kubernetes38669{
+ input: make(chan watchCacheEvent_kubernetes38669, chanSize),
+ result: make(chan Event_kubernetes38669, chanSize),
+ stopped: false,
+ }
+ go watcher.process(initEvents)
+ return watcher
+}
+
+func Kubernetes38669() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ initEvents := []watchCacheEvent_kubernetes38669{1, 2}
+ w := newCacheWatcher_kubernetes38669(0, initEvents)
+ w.Stop()
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go
new file mode 100644
index 0000000000..fd51484a0f
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go
@@ -0,0 +1,68 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/5316
+ * Buggy version: c868b0bbf09128960bc7c4ada1a77347a464d876
+ * fix commit-id: cc3a433a7abc89d2f766d4c87eaae9448e3dc091
+ * Flaky: 100/100
+ */
+
+package main
+
+import (
+ "errors"
+ "math/rand"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Kubernetes5316", Kubernetes5316)
+}
+
+func finishRequest_kubernetes5316(timeout time.Duration, fn func() error) {
+ ch := make(chan bool)
+ errCh := make(chan error)
+ go func() { // G2
+ if err := fn(); err != nil {
+ errCh <- err
+ } else {
+ ch <- true
+ }
+ }()
+
+ select {
+ case <-ch:
+ case <-errCh:
+ case <-time.After(timeout):
+ }
+}
+
+func Kubernetes5316() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Wait a bit because the child goroutine relies on timed operations.
+ time.Sleep(100 * time.Millisecond)
+
+ // Yield several times to allow the child goroutine to run
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ fn := func() error {
+ time.Sleep(2 * time.Millisecond)
+ if rand.Intn(10) > 5 {
+ return errors.New("Error")
+ }
+ return nil
+ }
+ go finishRequest_kubernetes5316(time.Microsecond, fn) // G1
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go
new file mode 100644
index 0000000000..0ca707e981
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go
@@ -0,0 +1,108 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Tag: Reproduce misbehavior
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/58107
+ * Buggy version: 2f17d782eb2772d6401da7ddced9ac90656a7a79
+ * fix commit-id: 010a127314a935d8d038f8dd4559fc5b249813e4
+ * Flaky: 53/100
+ */
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes58107", Kubernetes58107)
+}
+
+type RateLimitingInterface_kubernetes58107 interface {
+ Get()
+ Put()
+}
+
+type Type_kubernetes58107 struct {
+ cond *sync.Cond
+}
+
+func (q *Type_kubernetes58107) Get() {
+ q.cond.L.Lock()
+ defer q.cond.L.Unlock()
+ q.cond.Wait()
+}
+
+func (q *Type_kubernetes58107) Put() {
+ q.cond.Signal()
+}
+
+type ResourceQuotaController_kubernetes58107 struct {
+ workerLock sync.RWMutex
+ queue RateLimitingInterface_kubernetes58107
+ missingUsageQueue RateLimitingInterface_kubernetes58107
+}
+
+func (rq *ResourceQuotaController_kubernetes58107) worker(queue RateLimitingInterface_kubernetes58107, _ string) {
+ workFunc := func() bool {
+ rq.workerLock.RLock()
+ defer rq.workerLock.RUnlock()
+ queue.Get()
+ return true
+ }
+ for {
+ if quit := workFunc(); quit {
+ return
+ }
+ }
+}
+
+func (rq *ResourceQuotaController_kubernetes58107) Run() {
+ go rq.worker(rq.queue, "G1") // G3
+ go rq.worker(rq.missingUsageQueue, "G2") // G4
+}
+
+func (rq *ResourceQuotaController_kubernetes58107) Sync() {
+ for i := 0; i < 100000; i++ {
+ rq.workerLock.Lock()
+ runtime.Gosched()
+ rq.workerLock.Unlock()
+ }
+}
+
+func (rq *ResourceQuotaController_kubernetes58107) HelperSignals() {
+ for i := 0; i < 100000; i++ {
+ rq.queue.Put()
+ rq.missingUsageQueue.Put()
+ }
+}
+
+func startResourceQuotaController_kubernetes58107() {
+ resourceQuotaController := &ResourceQuotaController_kubernetes58107{
+ queue: &Type_kubernetes58107{sync.NewCond(&sync.Mutex{})},
+ missingUsageQueue: &Type_kubernetes58107{sync.NewCond(&sync.Mutex{})},
+ }
+
+ go resourceQuotaController.Run() // G2
+ go resourceQuotaController.Sync() // G5
+ resourceQuotaController.HelperSignals()
+}
+
+func Kubernetes58107() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(1000 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 1000; i++ {
+ go startResourceQuotaController_kubernetes58107() // G1
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go
new file mode 100644
index 0000000000..0d07ebc4a9
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go
@@ -0,0 +1,120 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/62464
+ * Buggy version: a048ca888ad27367b1a7b7377c67658920adbf5d
+ * fix commit-id: c1b19fce903675b82e9fdd1befcc5f5d658bfe78
+ * Flaky: 8/100
+ */
+
+package main
+
+import (
+ "math/rand"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes62464", Kubernetes62464)
+}
+
+type State_kubernetes62464 interface {
+ GetCPUSetOrDefault()
+ GetCPUSet() bool
+ GetDefaultCPUSet()
+ SetDefaultCPUSet()
+}
+
+type stateMemory_kubernetes62464 struct {
+ sync.RWMutex
+}
+
+func (s *stateMemory_kubernetes62464) GetCPUSetOrDefault() {
+ s.RLock()
+ defer s.RUnlock()
+ if ok := s.GetCPUSet(); ok {
+ return
+ }
+ s.GetDefaultCPUSet()
+}
+
+func (s *stateMemory_kubernetes62464) GetCPUSet() bool {
+ runtime.Gosched()
+ s.RLock()
+ defer s.RUnlock()
+
+ if rand.Intn(10) > 5 {
+ return true
+ }
+ return false
+}
+
+func (s *stateMemory_kubernetes62464) GetDefaultCPUSet() {
+ s.RLock()
+ defer s.RUnlock()
+}
+
+func (s *stateMemory_kubernetes62464) SetDefaultCPUSet() {
+ s.Lock()
+ runtime.Gosched()
+ defer s.Unlock()
+}
+
+type staticPolicy_kubernetes62464 struct{}
+
+func (p *staticPolicy_kubernetes62464) RemoveContainer(s State_kubernetes62464) {
+ s.GetDefaultCPUSet()
+ s.SetDefaultCPUSet()
+}
+
+type manager_kubernetes62464 struct {
+ state *stateMemory_kubernetes62464
+}
+
+func (m *manager_kubernetes62464) reconcileState() {
+ m.state.GetCPUSetOrDefault()
+}
+
+func NewPolicyAndManager_kubernetes62464() (*staticPolicy_kubernetes62464, *manager_kubernetes62464) {
+ s := &stateMemory_kubernetes62464{}
+ m := &manager_kubernetes62464{s}
+ p := &staticPolicy_kubernetes62464{}
+ return p, m
+}
+
+///
+/// G1 G2
+/// m.reconcileState()
+/// m.state.GetCPUSetOrDefault()
+/// s.RLock()
+/// s.GetCPUSet()
+/// p.RemoveContainer()
+/// s.GetDefaultCPUSet()
+/// s.SetDefaultCPUSet()
+/// s.Lock()
+/// s.RLock()
+/// ---------------------G1,G2 deadlock---------------------
+///
+
+func Kubernetes62464() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 1000; i++ {
+ go func() {
+ p, m := NewPolicyAndManager_kubernetes62464()
+ go m.reconcileState()
+ go p.RemoveContainer(m.state)
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go
new file mode 100644
index 0000000000..a3af3b24ae
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go
@@ -0,0 +1,86 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/6632
+ * Buggy version: e597b41d939573502c8dda1dde7bf3439325fb5d
+ * fix commit-id: 82afb7ab1fe12cf2efceede2322d082eaf5d5adc
+ * Flaky: 4/100
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes6632", Kubernetes6632)
+}
+
+type Connection_kubernetes6632 struct {
+ closeChan chan bool
+}
+
+type idleAwareFramer_kubernetes6632 struct {
+ resetChan chan bool
+ writeLock sync.Mutex
+ conn *Connection_kubernetes6632
+}
+
+func (i *idleAwareFramer_kubernetes6632) monitor() {
+ var resetChan = i.resetChan
+Loop:
+ for {
+ select {
+ case <-i.conn.closeChan:
+ i.writeLock.Lock()
+ close(resetChan)
+ i.resetChan = nil
+ i.writeLock.Unlock()
+ break Loop
+ }
+ }
+}
+
+func (i *idleAwareFramer_kubernetes6632) WriteFrame() {
+ i.writeLock.Lock()
+ defer i.writeLock.Unlock()
+ if i.resetChan == nil {
+ return
+ }
+ i.resetChan <- true
+}
+
+func NewIdleAwareFramer_kubernetes6632() *idleAwareFramer_kubernetes6632 {
+ return &idleAwareFramer_kubernetes6632{
+ resetChan: make(chan bool),
+ conn: &Connection_kubernetes6632{
+ closeChan: make(chan bool),
+ },
+ }
+}
+
+func Kubernetes6632() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ i := NewIdleAwareFramer_kubernetes6632()
+
+ go func() { // helper goroutine
+ i.conn.closeChan <- true
+ }()
+ go i.monitor() // G1
+ go i.WriteFrame() // G2
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go
new file mode 100644
index 0000000000..ae02ec8304
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go
@@ -0,0 +1,97 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Kubernetes70277", Kubernetes70277)
+}
+
+type WaitFunc_kubernetes70277 func(done <-chan struct{}) <-chan struct{}
+
+type ConditionFunc_kubernetes70277 func() (done bool, err error)
+
+func WaitFor_kubernetes70277(wait WaitFunc_kubernetes70277, fn ConditionFunc_kubernetes70277, done <-chan struct{}) error {
+ c := wait(done)
+ for {
+ _, open := <-c
+ ok, err := fn()
+ if err != nil {
+ return err
+ }
+ if ok {
+ return nil
+ }
+ if !open {
+ break
+ }
+ }
+ return nil
+}
+
+func poller_kubernetes70277(interval, timeout time.Duration) WaitFunc_kubernetes70277 {
+ return WaitFunc_kubernetes70277(func(done <-chan struct{}) <-chan struct{} {
+ ch := make(chan struct{})
+ go func() {
+ defer close(ch)
+
+ tick := time.NewTicker(interval)
+ defer tick.Stop()
+
+ var after <-chan time.Time
+ if timeout != 0 {
+ timer := time.NewTimer(timeout)
+ after = timer.C
+ defer timer.Stop()
+ }
+ for {
+ select {
+ case <-tick.C:
+ select {
+ case ch <- struct{}{}:
+ default:
+ }
+ case <-after:
+ return
+ case <-done:
+ return
+ }
+ }
+ }()
+
+ return ch
+ })
+}
+
+func Kubernetes70277() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 1000; i++ {
+ go func() {
+ stopCh := make(chan struct{})
+ defer close(stopCh)
+ waitFunc := poller_kubernetes70277(time.Millisecond, 80*time.Millisecond)
+ var doneCh <-chan struct{}
+
+ WaitFor_kubernetes70277(func(done <-chan struct{}) <-chan struct{} {
+ doneCh = done
+ return waitFunc(done)
+ }, func() (bool, error) {
+ time.Sleep(10 * time.Millisecond)
+ return true, nil
+ }, stopCh)
+
+ <-doneCh // block here
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/main.go b/src/runtime/testdata/testgoroutineleakprofile/goker/main.go
new file mode 100644
index 0000000000..5787c1e2b2
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/main.go
@@ -0,0 +1,39 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "os"
+
+// The number of times the main (profiling) goroutine should yield
+// in order to allow the leaking goroutines to get stuck.
+const yieldCount = 10
+
+var cmds = map[string]func(){}
+
+func register(name string, f func()) {
+ if cmds[name] != nil {
+ panic("duplicate registration: " + name)
+ }
+ cmds[name] = f
+}
+
+func registerInit(name string, f func()) {
+ if len(os.Args) >= 2 && os.Args[1] == name {
+ f()
+ }
+}
+
+func main() {
+ if len(os.Args) < 2 {
+ println("usage: " + os.Args[0] + " name-of-test")
+ return
+ }
+ f := cmds[os.Args[1]]
+ if f == nil {
+ println("unknown function: " + os.Args[1])
+ return
+ }
+ f()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go
new file mode 100644
index 0000000000..884d077550
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go
@@ -0,0 +1,68 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/17176
+ * Buggy version: d295dc66521e2734390473ec1f1da8a73ad3288a
+ * fix commit-id: 2f16895ee94848e2d8ad72bc01968b4c88d84cb8
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "errors"
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Moby17176", Moby17176)
+}
+
+type DeviceSet_moby17176 struct {
+ sync.Mutex
+ nrDeletedDevices int
+}
+
+func (devices *DeviceSet_moby17176) cleanupDeletedDevices() error {
+ devices.Lock()
+ if devices.nrDeletedDevices == 0 {
+ /// Missing devices.Unlock()
+ return nil
+ }
+ devices.Unlock()
+ return errors.New("Error")
+}
+
+func testDevmapperLockReleasedDeviceDeletion_moby17176() {
+ ds := &DeviceSet_moby17176{
+ nrDeletedDevices: 0,
+ }
+ ds.cleanupDeletedDevices()
+ doneChan := make(chan bool)
+ go func() {
+ ds.Lock()
+ defer ds.Unlock()
+ doneChan <- true
+ }()
+
+ select {
+ case <-time.After(time.Millisecond):
+ case <-doneChan:
+ }
+}
+func Moby17176() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go testDevmapperLockReleasedDeviceDeletion_moby17176()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go
new file mode 100644
index 0000000000..36017a9488
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go
@@ -0,0 +1,146 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/21233
+ * Buggy version: cc12d2bfaae135e63b1f962ad80e6943dd995337
+ * fix commit-id: 2f4aa9658408ac72a598363c6e22eadf93dbb8a7
+ * Flaky:100/100
+ */
+package main
+
+import (
+ "math/rand"
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Moby21233", Moby21233)
+}
+
+type Progress_moby21233 struct{}
+
+type Output_moby21233 interface {
+ WriteProgress(Progress_moby21233) error
+}
+
+type chanOutput_moby21233 chan<- Progress_moby21233
+
+type TransferManager_moby21233 struct {
+ mu sync.Mutex
+}
+
+type Transfer_moby21233 struct {
+ mu sync.Mutex
+}
+
+type Watcher_moby21233 struct {
+ signalChan chan struct{}
+ releaseChan chan struct{}
+ running chan struct{}
+}
+
+func ChanOutput_moby21233(progressChan chan<- Progress_moby21233) Output_moby21233 {
+ return chanOutput_moby21233(progressChan)
+}
+func (out chanOutput_moby21233) WriteProgress(p Progress_moby21233) error {
+ out <- p
+ return nil
+}
+func NewTransferManager_moby21233() *TransferManager_moby21233 {
+ return &TransferManager_moby21233{}
+}
+func NewTransfer_moby21233() *Transfer_moby21233 {
+ return &Transfer_moby21233{}
+}
+func (t *Transfer_moby21233) Release(watcher *Watcher_moby21233) {
+ t.mu.Lock()
+ t.mu.Unlock()
+ close(watcher.releaseChan)
+ <-watcher.running
+}
+func (t *Transfer_moby21233) Watch(progressOutput Output_moby21233) *Watcher_moby21233 {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ lastProgress := Progress_moby21233{}
+ w := &Watcher_moby21233{
+ releaseChan: make(chan struct{}),
+ signalChan: make(chan struct{}),
+ running: make(chan struct{}),
+ }
+ go func() { // G2
+ defer func() {
+ close(w.running)
+ }()
+ done := false
+ for {
+ t.mu.Lock()
+ t.mu.Unlock()
+ if rand.Int31n(2) >= 1 {
+ progressOutput.WriteProgress(lastProgress)
+ }
+ if done {
+ return
+ }
+ select {
+ case <-w.signalChan:
+ case <-w.releaseChan:
+ done = true
+ }
+ }
+ }()
+ return w
+}
+func (tm *TransferManager_moby21233) Transfer(progressOutput Output_moby21233) (*Transfer_moby21233, *Watcher_moby21233) {
+ tm.mu.Lock()
+ defer tm.mu.Unlock()
+ t := NewTransfer_moby21233()
+ return t, t.Watch(progressOutput)
+}
+
+func testTransfer_moby21233() { // G1
+ tm := NewTransferManager_moby21233()
+ progressChan := make(chan Progress_moby21233)
+ progressDone := make(chan struct{})
+ go func() { // G3
+ time.Sleep(1 * time.Millisecond)
+ for p := range progressChan { /// Chan consumer
+ if rand.Int31n(2) >= 1 {
+ return
+ }
+ _ = p
+ }
+ close(progressDone)
+ }()
+ time.Sleep(1 * time.Millisecond)
+ ids := []string{"id1", "id2", "id3"}
+ xrefs := make([]*Transfer_moby21233, len(ids))
+ watchers := make([]*Watcher_moby21233, len(ids))
+ for i := range ids {
+ xrefs[i], watchers[i] = tm.Transfer(ChanOutput_moby21233(progressChan)) /// Chan producer
+ time.Sleep(2 * time.Millisecond)
+ }
+
+ for i := range xrefs {
+ xrefs[i].Release(watchers[i])
+ }
+
+ close(progressChan)
+ <-progressDone
+}
+
+func Moby21233() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ go testTransfer_moby21233() // G1
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go
new file mode 100644
index 0000000000..d653731f6c
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go
@@ -0,0 +1,59 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/25384
+ * Buggy version: 58befe3081726ef74ea09198cd9488fb42c51f51
+ * fix commit-id: 42360d164b9f25fb4b150ef066fcf57fa39559a7
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Moby25348", Moby25348)
+}
+
+type plugin_moby25348 struct{}
+
+type Manager_moby25348 struct {
+ plugins []*plugin_moby25348
+}
+
+func (pm *Manager_moby25348) init() {
+ var group sync.WaitGroup
+ group.Add(len(pm.plugins))
+ for _, p := range pm.plugins {
+ go func(p *plugin_moby25348) {
+ defer group.Done()
+ }(p)
+ group.Wait() // Block here
+ }
+}
+
+func Moby25348() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ p1 := &plugin_moby25348{}
+ p2 := &plugin_moby25348{}
+ pm := &Manager_moby25348{
+ plugins: []*plugin_moby25348{p1, p2},
+ }
+ go pm.init()
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go
new file mode 100644
index 0000000000..7b3398fd38
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go
@@ -0,0 +1,242 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/27782
+ * Buggy version: 18768fdc2e76ec6c600c8ab57d2d487ee7877794
+ * fix commit-id: a69a59ffc7e3d028a72d1195c2c1535f447eaa84
+ * Flaky: 2/100
+ */
+package main
+
+import (
+ "errors"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Moby27782", Moby27782)
+}
+
+type Event_moby27782 struct {
+ Op Op_moby27782
+}
+
+type Op_moby27782 uint32
+
+const (
+ Create_moby27782 Op_moby27782 = 1 << iota
+ Write_moby27782
+ Remove_moby27782
+ Rename_moby27782
+ Chmod_moby27782
+)
+
+func newEvent(op Op_moby27782) Event_moby27782 {
+ return Event_moby27782{op}
+}
+
+func (e *Event_moby27782) ignoreLinux(w *Watcher_moby27782) bool {
+ if e.Op != Write_moby27782 {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ w.cv.Broadcast()
+ return true
+ }
+ runtime.Gosched()
+ return false
+}
+
+type Watcher_moby27782 struct {
+ Events chan Event_moby27782
+ mu sync.Mutex // L1
+ cv *sync.Cond // C1
+ done chan struct{}
+}
+
+func NewWatcher_moby27782() *Watcher_moby27782 {
+ w := &Watcher_moby27782{
+ Events: make(chan Event_moby27782),
+ done: make(chan struct{}),
+ }
+ w.cv = sync.NewCond(&w.mu)
+ go w.readEvents() // G3
+ return w
+}
+
+func (w *Watcher_moby27782) readEvents() {
+ defer close(w.Events)
+ for {
+ if w.isClosed() {
+ return
+ }
+ event := newEvent(Write_moby27782) // MODIFY event
+ if !event.ignoreLinux(w) {
+ runtime.Gosched()
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ return
+ }
+ }
+ }
+}
+
+func (w *Watcher_moby27782) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+func (w *Watcher_moby27782) Close() {
+ if w.isClosed() {
+ return
+ }
+ close(w.done)
+}
+
+func (w *Watcher_moby27782) Remove() {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ exists := true
+ for exists {
+ w.cv.Wait()
+ runtime.Gosched()
+ }
+}
+
+type FileWatcher_moby27782 interface {
+ Events() <-chan Event_moby27782
+ Remove()
+ Close()
+}
+
+func New_moby27782() FileWatcher_moby27782 {
+ return NewEventWatcher_moby27782()
+}
+
+func NewEventWatcher_moby27782() FileWatcher_moby27782 {
+ return &fsNotifyWatcher_moby27782{NewWatcher_moby27782()}
+}
+
+type fsNotifyWatcher_moby27782 struct {
+ *Watcher_moby27782
+}
+
+func (w *fsNotifyWatcher_moby27782) Events() <-chan Event_moby27782 {
+ return w.Watcher_moby27782.Events
+}
+
+func watchFile_moby27782() FileWatcher_moby27782 {
+ fileWatcher := New_moby27782()
+ return fileWatcher
+}
+
+type LogWatcher_moby27782 struct {
+ closeOnce sync.Once
+ closeNotifier chan struct{}
+}
+
+func (w *LogWatcher_moby27782) Close() {
+ w.closeOnce.Do(func() {
+ close(w.closeNotifier)
+ })
+}
+
+func (w *LogWatcher_moby27782) WatchClose() <-chan struct{} {
+ return w.closeNotifier
+}
+
+func NewLogWatcher_moby27782() *LogWatcher_moby27782 {
+ return &LogWatcher_moby27782{
+ closeNotifier: make(chan struct{}),
+ }
+}
+
+func followLogs_moby27782(logWatcher *LogWatcher_moby27782) {
+ fileWatcher := watchFile_moby27782()
+ defer func() {
+ fileWatcher.Close()
+ }()
+ waitRead := func() {
+ runtime.Gosched()
+ select {
+ case <-fileWatcher.Events():
+ case <-logWatcher.WatchClose():
+ fileWatcher.Remove()
+ return
+ }
+ }
+ handleDecodeErr := func() {
+ waitRead()
+ }
+ handleDecodeErr()
+}
+
+type Container_moby27782 struct {
+ LogDriver *JSONFileLogger_moby27782
+}
+
+func (container *Container_moby27782) InitializeStdio() {
+ if err := container.startLogging(); err != nil {
+ container.Reset()
+ }
+}
+
+func (container *Container_moby27782) startLogging() error {
+ l := &JSONFileLogger_moby27782{
+ readers: make(map[*LogWatcher_moby27782]struct{}),
+ }
+ container.LogDriver = l
+ l.ReadLogs()
+ return errors.New("Some error")
+}
+
+func (container *Container_moby27782) Reset() {
+ if container.LogDriver != nil {
+ container.LogDriver.Close()
+ }
+}
+
+type JSONFileLogger_moby27782 struct {
+ readers map[*LogWatcher_moby27782]struct{}
+}
+
+func (l *JSONFileLogger_moby27782) ReadLogs() *LogWatcher_moby27782 {
+ logWatcher := NewLogWatcher_moby27782()
+ go l.readLogs(logWatcher) // G2
+ return logWatcher
+}
+
+func (l *JSONFileLogger_moby27782) readLogs(logWatcher *LogWatcher_moby27782) {
+ l.readers[logWatcher] = struct{}{}
+ followLogs_moby27782(logWatcher)
+}
+
+func (l *JSONFileLogger_moby27782) Close() {
+ for r := range l.readers {
+ r.Close()
+ delete(l.readers, r)
+ }
+}
+
+func Moby27782() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 10000; i++ {
+ go (&Container_moby27782{}).InitializeStdio() // G1
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go
new file mode 100644
index 0000000000..56467e0b56
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go
@@ -0,0 +1,125 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/28462
+ * Buggy version: b184bdabf7a01c4b802304ac64ac133743c484be
+ * fix commit-id: 89b123473774248fc3a0356dd3ce5b116cc69b29
+ * Flaky: 69/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Moby28462", Moby28462)
+}
+
+type State_moby28462 struct {
+ Health *Health_moby28462
+}
+
+type Container_moby28462 struct {
+ sync.Mutex
+ State *State_moby28462
+}
+
+func (ctr *Container_moby28462) start() {
+ go ctr.waitExit()
+}
+func (ctr *Container_moby28462) waitExit() {
+
+}
+
+type Store_moby28462 struct {
+ ctr *Container_moby28462
+}
+
+func (s *Store_moby28462) Get() *Container_moby28462 {
+ return s.ctr
+}
+
+type Daemon_moby28462 struct {
+ containers Store_moby28462
+}
+
+func (d *Daemon_moby28462) StateChanged() {
+ c := d.containers.Get()
+ c.Lock()
+ d.updateHealthMonitorElseBranch(c)
+ defer c.Unlock()
+}
+
+func (d *Daemon_moby28462) updateHealthMonitorIfBranch(c *Container_moby28462) {
+ h := c.State.Health
+ if stop := h.OpenMonitorChannel(); stop != nil {
+ go monitor_moby28462(c, stop)
+ }
+}
+func (d *Daemon_moby28462) updateHealthMonitorElseBranch(c *Container_moby28462) {
+ h := c.State.Health
+ h.CloseMonitorChannel()
+}
+
+type Health_moby28462 struct {
+ stop chan struct{}
+}
+
+func (s *Health_moby28462) OpenMonitorChannel() chan struct{} {
+ return s.stop
+}
+
+func (s *Health_moby28462) CloseMonitorChannel() {
+ if s.stop != nil {
+ s.stop <- struct{}{}
+ }
+}
+
+func monitor_moby28462(c *Container_moby28462, stop chan struct{}) {
+ for {
+ select {
+ case <-stop:
+ return
+ default:
+ handleProbeResult_moby28462(c)
+ }
+ }
+}
+
+func handleProbeResult_moby28462(c *Container_moby28462) {
+ runtime.Gosched()
+ c.Lock()
+ defer c.Unlock()
+}
+
+func NewDaemonAndContainer_moby28462() (*Daemon_moby28462, *Container_moby28462) {
+ c := &Container_moby28462{
+ State: &State_moby28462{&Health_moby28462{make(chan struct{})}},
+ }
+ d := &Daemon_moby28462{Store_moby28462{c}}
+ return d, c
+}
+
+func Moby28462() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ d, c := NewDaemonAndContainer_moby28462()
+ go monitor_moby28462(c, c.State.Health.OpenMonitorChannel()) // G1
+ go d.StateChanged() // G2
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go
new file mode 100644
index 0000000000..561e2faf57
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go
@@ -0,0 +1,67 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "errors"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Moby30408", Moby30408)
+}
+
+type Manifest_moby30408 struct {
+ Implements []string
+}
+
+type Plugin_moby30408 struct {
+ activateWait *sync.Cond
+ activateErr error
+ Manifest *Manifest_moby30408
+}
+
+func (p *Plugin_moby30408) waitActive() error {
+ p.activateWait.L.Lock()
+ for !p.activated() {
+ p.activateWait.Wait()
+ }
+ p.activateWait.L.Unlock()
+ return p.activateErr
+}
+
+func (p *Plugin_moby30408) activated() bool {
+ return p.Manifest != nil
+}
+
+func testActive_moby30408(p *Plugin_moby30408) {
+ done := make(chan struct{})
+ go func() { // G2
+ p.waitActive()
+ close(done)
+ }()
+ <-done
+}
+
+func Moby30408() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() { // G1
+ p := &Plugin_moby30408{activateWait: sync.NewCond(&sync.Mutex{})}
+ p.activateErr = errors.New("some junk happened")
+
+ testActive_moby30408(p)
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go
new file mode 100644
index 0000000000..4be50546e9
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go
@@ -0,0 +1,71 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/33781
+ * Buggy version: 33fd3817b0f5ca4b87f0a75c2bd583b4425d392b
+ * fix commit-id: 67297ba0051d39be544009ba76abea14bc0be8a4
+ * Flaky: 25/100
+ */
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Moby33781", Moby33781)
+}
+
+func monitor_moby33781(stop chan bool) {
+ probeInterval := time.Millisecond
+ probeTimeout := time.Millisecond
+ for {
+ select {
+ case <-stop:
+ return
+ case <-time.After(probeInterval):
+ results := make(chan bool)
+ ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout)
+ go func() { // G3
+ results <- true
+ close(results)
+ }()
+ select {
+ case <-stop:
+ // results should be drained here
+ cancelProbe()
+ return
+ case <-results:
+ cancelProbe()
+ case <-ctx.Done():
+ cancelProbe()
+ <-results
+ }
+ }
+ }
+}
+
+func Moby33781() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ go func(i int) {
+ stop := make(chan bool)
+ go monitor_moby33781(stop) // G1
+ go func() { // G2
+ time.Sleep(time.Duration(i) * time.Millisecond)
+ stop <- true
+ }()
+ }(i)
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go
new file mode 100644
index 0000000000..577c81651a
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go
@@ -0,0 +1,53 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/36114
+ * Buggy version: 6d4d3c52ae7c3f910bfc7552a2a673a8338e5b9f
+ * fix commit-id: a44fcd3d27c06aaa60d8d1cbce169f0d982e74b1
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Moby36114", Moby36114)
+}
+
+type serviceVM_moby36114 struct {
+ sync.Mutex
+}
+
+func (svm *serviceVM_moby36114) hotAddVHDsAtStart() {
+ svm.Lock()
+ defer svm.Unlock()
+ svm.hotRemoveVHDsAtStart()
+}
+
+func (svm *serviceVM_moby36114) hotRemoveVHDsAtStart() {
+ svm.Lock()
+ defer svm.Unlock()
+}
+
+func Moby36114() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ s := &serviceVM_moby36114{}
+ go s.hotAddVHDsAtStart()
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go
new file mode 100644
index 0000000000..7f0497648c
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go
@@ -0,0 +1,101 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/4951
+ * Buggy version: 81f148be566ab2b17810ad4be61a5d8beac8330f
+ * fix commit-id: 2ffef1b7eb618162673c6ffabccb9ca57c7dfce3
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Moby4951", Moby4951)
+}
+
+type DeviceSet_moby4951 struct {
+ sync.Mutex
+ infos map[string]*DevInfo_moby4951
+ nrDeletedDevices int
+}
+
+func (devices *DeviceSet_moby4951) DeleteDevice(hash string) {
+ devices.Lock()
+ defer devices.Unlock()
+
+ info := devices.lookupDevice(hash)
+
+ info.lock.Lock()
+ runtime.Gosched()
+ defer info.lock.Unlock()
+
+ devices.deleteDevice(info)
+}
+
+func (devices *DeviceSet_moby4951) lookupDevice(hash string) *DevInfo_moby4951 {
+ existing, ok := devices.infos[hash]
+ if !ok {
+ return nil
+ }
+ return existing
+}
+
+func (devices *DeviceSet_moby4951) deleteDevice(info *DevInfo_moby4951) {
+ devices.removeDeviceAndWait(info.Name())
+}
+
+func (devices *DeviceSet_moby4951) removeDeviceAndWait(devname string) {
+ /// remove devices by devname
+ devices.Unlock()
+ time.Sleep(300 * time.Nanosecond)
+ devices.Lock()
+}
+
+type DevInfo_moby4951 struct {
+ lock sync.Mutex
+ name string
+}
+
+func (info *DevInfo_moby4951) Name() string {
+ return info.name
+}
+
+func NewDeviceSet_moby4951() *DeviceSet_moby4951 {
+ devices := &DeviceSet_moby4951{
+ infos: make(map[string]*DevInfo_moby4951),
+ }
+ info1 := &DevInfo_moby4951{
+ name: "info1",
+ }
+ info2 := &DevInfo_moby4951{
+ name: "info2",
+ }
+ devices.infos[info1.name] = info1
+ devices.infos[info2.name] = info2
+ return devices
+}
+
+func Moby4951() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() {
+ ds := NewDeviceSet_moby4951()
+ /// Delete devices by the same info
+ go ds.DeleteDevice("info1")
+ go ds.DeleteDevice("info1")
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go
new file mode 100644
index 0000000000..212f65b1f3
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go
@@ -0,0 +1,55 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/7559
+ * Buggy version: 64579f51fcb439c36377c0068ccc9a007b368b5a
+ * fix commit-id: 6cbb8e070d6c3a66bf48fbe5cbf689557eee23db
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "net"
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Moby7559", Moby7559)
+}
+
+type UDPProxy_moby7559 struct {
+ connTrackLock sync.Mutex
+}
+
+func (proxy *UDPProxy_moby7559) Run() {
+ for i := 0; i < 2; i++ {
+ proxy.connTrackLock.Lock()
+ _, err := net.DialUDP("udp", nil, nil)
+ if err != nil {
+ continue
+ /// Missing unlock here
+ }
+ if i == 0 {
+ break
+ }
+ }
+ proxy.connTrackLock.Unlock()
+}
+
+func Moby7559() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 20; i++ {
+ go (&UDPProxy_moby7559{}).Run()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go b/src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go
new file mode 100644
index 0000000000..49905315a0
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go
@@ -0,0 +1,122 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Serving2137", Serving2137)
+}
+
+type token_serving2137 struct{}
+
+type request_serving2137 struct {
+ lock *sync.Mutex
+ accepted chan bool
+}
+
+type Breaker_serving2137 struct {
+ pendingRequests chan token_serving2137
+ activeRequests chan token_serving2137
+}
+
+func (b *Breaker_serving2137) Maybe(thunk func()) bool {
+ var t token_serving2137
+ select {
+ default:
+ // Pending request queue is full. Report failure.
+ return false
+ case b.pendingRequests <- t:
+ // Pending request has capacity.
+ // Wait for capacity in the active queue.
+ b.activeRequests <- t
+ // Defer releasing capacity in the active and pending request queue.
+ defer func() {
+ <-b.activeRequests
+ runtime.Gosched()
+ <-b.pendingRequests
+ }()
+ // Do the thing.
+ thunk()
+ // Report success
+ return true
+ }
+}
+
+func (b *Breaker_serving2137) concurrentRequest() request_serving2137 {
+ r := request_serving2137{lock: &sync.Mutex{}, accepted: make(chan bool, 1)}
+ r.lock.Lock()
+ var start sync.WaitGroup
+ start.Add(1)
+ go func() { // G2, G3
+ start.Done()
+ runtime.Gosched()
+ ok := b.Maybe(func() {
+ // Will block on locked mutex.
+ r.lock.Lock()
+ runtime.Gosched()
+ r.lock.Unlock()
+ })
+ r.accepted <- ok
+ }()
+ start.Wait() // Ensure that the go func has had a chance to execute.
+ return r
+}
+
+// Perform n requests against the breaker, returning mutexes for each
+// request which succeeded, and a slice of bools for all requests.
+func (b *Breaker_serving2137) concurrentRequests(n int) []request_serving2137 {
+ requests := make([]request_serving2137, n)
+ for i := range requests {
+ requests[i] = b.concurrentRequest()
+ }
+ return requests
+}
+
+func NewBreaker_serving2137(queueDepth, maxConcurrency int32) *Breaker_serving2137 {
+ return &Breaker_serving2137{
+ pendingRequests: make(chan token_serving2137, queueDepth+maxConcurrency),
+ activeRequests: make(chan token_serving2137, maxConcurrency),
+ }
+}
+
+func unlock_serving2137(req request_serving2137) {
+ req.lock.Unlock()
+ runtime.Gosched()
+ // Verify that function has completed
+ ok := <-req.accepted
+ runtime.Gosched()
+ // Requeue for next usage
+ req.accepted <- ok
+}
+
+func unlockAll_serving2137(requests []request_serving2137) {
+ for _, lc := range requests {
+ unlock_serving2137(lc)
+ }
+}
+
+func Serving2137() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 1000; i++ {
+ go func() {
+ b := NewBreaker_serving2137(1, 1)
+
+ locks := b.concurrentRequests(2) // G1
+ unlockAll_serving2137(locks)
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go
new file mode 100644
index 0000000000..7967db7cfe
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go
@@ -0,0 +1,87 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Syncthing4829", Syncthing4829)
+}
+
+type Address_syncthing4829 int
+
+type Mapping_syncthing4829 struct {
+ mut sync.RWMutex // L2
+
+ extAddresses map[string]Address_syncthing4829
+}
+
+func (m *Mapping_syncthing4829) clearAddresses() {
+ m.mut.Lock() // L2
+ var removed []Address_syncthing4829
+ for id, addr := range m.extAddresses {
+ removed = append(removed, addr)
+ delete(m.extAddresses, id)
+ }
+ if len(removed) > 0 {
+ m.notify(nil, removed)
+ }
+ m.mut.Unlock() // L2
+}
+
+func (m *Mapping_syncthing4829) notify(added, remove []Address_syncthing4829) {
+ m.mut.RLock() // L2
+ m.mut.RUnlock() // L2
+}
+
+type Service_syncthing4829 struct {
+ mut sync.RWMutex // L1
+
+ mappings []*Mapping_syncthing4829
+}
+
+func (s *Service_syncthing4829) NewMapping() *Mapping_syncthing4829 {
+ mapping := &Mapping_syncthing4829{
+ extAddresses: make(map[string]Address_syncthing4829),
+ }
+ s.mut.Lock() // L1
+ s.mappings = append(s.mappings, mapping)
+ s.mut.Unlock() // L1
+ return mapping
+}
+
+func (s *Service_syncthing4829) RemoveMapping(mapping *Mapping_syncthing4829) {
+ s.mut.Lock() // L1
+ defer s.mut.Unlock() // L1
+ for _, existing := range s.mappings {
+ if existing == mapping {
+ mapping.clearAddresses()
+ }
+ }
+}
+
+func Syncthing4829() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() { // G1
+ natSvc := &Service_syncthing4829{}
+ m := natSvc.NewMapping()
+ m.extAddresses["test"] = 0
+
+ natSvc.RemoveMapping(m)
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go
new file mode 100644
index 0000000000..e25494a688
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go
@@ -0,0 +1,103 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Syncthing5795", Syncthing5795)
+}
+
+type message_syncthing5795 interface{}
+
+type ClusterConfig_syncthing5795 struct{}
+
+type Model_syncthing5795 interface {
+ ClusterConfig(message_syncthing5795)
+}
+
+type TestModel_syncthing5795 struct {
+ ccFn func()
+}
+
+func (t *TestModel_syncthing5795) ClusterConfig(msg message_syncthing5795) {
+ if t.ccFn != nil {
+ t.ccFn()
+ }
+}
+
+type Connection_syncthing5795 interface {
+ Start()
+ Close()
+}
+
+type rawConnection_syncthing5795 struct {
+ receiver Model_syncthing5795
+
+ inbox chan message_syncthing5795
+ dispatcherLoopStopped chan struct{}
+ closed chan struct{}
+ closeOnce sync.Once
+}
+
+func (c *rawConnection_syncthing5795) Start() {
+ go c.dispatcherLoop() // G2
+}
+
+func (c *rawConnection_syncthing5795) dispatcherLoop() {
+ defer close(c.dispatcherLoopStopped)
+ var msg message_syncthing5795
+ for {
+ select {
+ case msg = <-c.inbox:
+ case <-c.closed:
+ return
+ }
+ switch msg := msg.(type) {
+ case *ClusterConfig_syncthing5795:
+ c.receiver.ClusterConfig(msg)
+ default:
+ return
+ }
+ }
+}
+
+func (c *rawConnection_syncthing5795) Close() {
+ c.closeOnce.Do(func() {
+ close(c.closed)
+ <-c.dispatcherLoopStopped
+ })
+}
+
+func Syncthing5795() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() { // G1
+ m := &TestModel_syncthing5795{}
+ c := &rawConnection_syncthing5795{
+ dispatcherLoopStopped: make(chan struct{}),
+ closed: make(chan struct{}),
+ inbox: make(chan message_syncthing5795),
+ receiver: m,
+ }
+ m.ccFn = c.Close
+
+ c.Start()
+ c.inbox <- &ClusterConfig_syncthing5795{}
+
+ <-c.dispatcherLoopStopped
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/main.go b/src/runtime/testdata/testgoroutineleakprofile/main.go
new file mode 100644
index 0000000000..5787c1e2b2
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/main.go
@@ -0,0 +1,39 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "os"
+
+// The number of times the main (profiling) goroutine should yield
+// in order to allow the leaking goroutines to get stuck.
+const yieldCount = 10
+
+var cmds = map[string]func(){}
+
+func register(name string, f func()) {
+ if cmds[name] != nil {
+ panic("duplicate registration: " + name)
+ }
+ cmds[name] = f
+}
+
+func registerInit(name string, f func()) {
+ if len(os.Args) >= 2 && os.Args[1] == name {
+ f()
+ }
+}
+
+func main() {
+ if len(os.Args) < 2 {
+ println("usage: " + os.Args[0] + " name-of-test")
+ return
+ }
+ f := cmds[os.Args[1]]
+ if f == nil {
+ println("unknown function: " + os.Args[1])
+ return
+ }
+ f()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/simple.go b/src/runtime/testdata/testgoroutineleakprofile/simple.go
new file mode 100644
index 0000000000..b8172cd6df
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/simple.go
@@ -0,0 +1,253 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+// This is a set of micro-tests with obvious goroutine leaks that
+// ensures goroutine leak detection works.
+//
+// Tests in this file are not flaky iff. run with GOMAXPROCS=1.
+// The main goroutine forcefully yields via `runtime.Gosched()` before
+// running the profiler. This moves them to the back of the run queue,
+// allowing the leaky goroutines to be scheduled beforehand and get stuck.
+
+func init() {
+ register("NilRecv", NilRecv)
+ register("NilSend", NilSend)
+ register("SelectNoCases", SelectNoCases)
+ register("ChanRecv", ChanRecv)
+ register("ChanSend", ChanSend)
+ register("Select", Select)
+ register("WaitGroup", WaitGroup)
+ register("MutexStack", MutexStack)
+ register("MutexHeap", MutexHeap)
+ register("RWMutexRLock", RWMutexRLock)
+ register("RWMutexLock", RWMutexLock)
+ register("Cond", Cond)
+ register("Mixed", Mixed)
+ register("NoLeakGlobal", NoLeakGlobal)
+}
+
+func NilRecv() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ var c chan int
+ <-c
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func NilSend() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ var c chan int
+ c <- 0
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func ChanRecv() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ <-make(chan int)
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func SelectNoCases() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ select {}
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func ChanSend() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ make(chan int) <- 0
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func Select() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ select {
+ case make(chan int) <- 0:
+ case <-make(chan int):
+ }
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func WaitGroup() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ var wg sync.WaitGroup
+ wg.Add(1)
+ wg.Wait()
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func MutexStack() {
+ prof := pprof.Lookup("goroutineleak")
+ for i := 0; i < 1000; i++ {
+ go func() {
+ var mu sync.Mutex
+ mu.Lock()
+ mu.Lock()
+ panic("should not be reached")
+ }()
+ }
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func MutexHeap() {
+ prof := pprof.Lookup("goroutineleak")
+ for i := 0; i < 1000; i++ {
+ go func() {
+ mu := &sync.Mutex{}
+ go func() {
+ mu.Lock()
+ mu.Lock()
+ panic("should not be reached")
+ }()
+ }()
+ }
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func RWMutexRLock() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ mu := &sync.RWMutex{}
+ mu.Lock()
+ mu.RLock()
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func RWMutexLock() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ mu := &sync.RWMutex{}
+ mu.Lock()
+ mu.Lock()
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func Cond() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ cond := sync.NewCond(&sync.Mutex{})
+ cond.L.Lock()
+ cond.Wait()
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func Mixed() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ ch := make(chan int)
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ ch <- 0
+ wg.Done()
+ panic("should not be reached")
+ }()
+ wg.Wait()
+ <-ch
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+var ch = make(chan int)
+
+// No leak should be reported by this test
+func NoLeakGlobal() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ <-ch
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/stresstests.go b/src/runtime/testdata/testgoroutineleakprofile/stresstests.go
new file mode 100644
index 0000000000..64b535f51c
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/stresstests.go
@@ -0,0 +1,89 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "io"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+const spawnGCMaxDepth = 5
+
+func init() {
+ register("SpawnGC", SpawnGC)
+ register("DaisyChain", DaisyChain)
+}
+
+func spawnGC(i int) {
+ prof := pprof.Lookup("goroutineleak")
+ if i == 0 {
+ return
+ }
+ wg := &sync.WaitGroup{}
+ wg.Add(i + 1)
+ go func() {
+ wg.Done()
+ <-make(chan int)
+ }()
+ for j := 0; j < i; j++ {
+ go func() {
+ wg.Done()
+ spawnGC(i - 1)
+ }()
+ }
+ wg.Wait()
+ runtime.Gosched()
+ if i == spawnGCMaxDepth {
+ prof.WriteTo(os.Stdout, 2)
+ } else {
+ // We want to concurrently trigger the profile in order to concurrently run
+ // the GC, but we don't want to stream all the profiles to standard output.
+ //
+ // Only output the profile for the root call to spawnGC, and otherwise stream
+ // the profile outputs to /dev/null to avoid jumbling.
+ prof.WriteTo(io.Discard, 2)
+ }
+}
+
+// SpawnGC spawns a tree of goroutine leaks and calls the goroutine leak profiler
+// for each node in the tree. It is supposed to stress the goroutine leak profiler
+// under a heavily concurrent workload.
+func SpawnGC() {
+ spawnGC(spawnGCMaxDepth)
+}
+
+// DaisyChain spawns a daisy-chain of runnable goroutines.
+//
+// Each goroutine in the chain creates a new channel and goroutine.
+//
+// This illustrates a pathological worstcase for the goroutine leak GC complexity,
+// as opposed to the regular GC, which is not negatively affected by this pattern.
+func DaisyChain() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(time.Second)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ var chain func(i int, ch chan struct{})
+ chain = func(i int, ch chan struct{}) {
+ if i <= 0 {
+ go func() {
+ time.Sleep(time.Hour)
+ ch <- struct{}{}
+ }()
+ return
+ }
+ ch2 := make(chan struct{})
+ go chain(i-1, ch2)
+ <-ch2
+ ch <- struct{}{}
+ }
+ // The channel buffer avoids goroutine leaks.
+ go chain(1000, make(chan struct{}, 1))
+}
diff --git a/src/runtime/pipe_unix_test.go b/src/runtime/testdata/testprog/pipe_unix.go
index 82a49df339..cee4da65f6 100644
--- a/src/runtime/pipe_unix_test.go
+++ b/src/runtime/testdata/testprog/pipe_unix.go
@@ -4,7 +4,7 @@
//go:build !windows
-package runtime_test
+package main
import "syscall"
diff --git a/src/runtime/pipe_windows_test.go b/src/runtime/testdata/testprog/pipe_windows.go
index ad84ec918a..597601a179 100644
--- a/src/runtime/pipe_windows_test.go
+++ b/src/runtime/testdata/testprog/pipe_windows.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package runtime_test
+package main
import "syscall"
diff --git a/src/runtime/testdata/testprog/schedmetrics.go b/src/runtime/testdata/testprog/schedmetrics.go
new file mode 100644
index 0000000000..6d3f68a848
--- /dev/null
+++ b/src/runtime/testdata/testprog/schedmetrics.go
@@ -0,0 +1,267 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "internal/testenv"
+ "log"
+ "os"
+ "runtime"
+ "runtime/debug"
+ "runtime/metrics"
+ "strings"
+ "sync/atomic"
+ "syscall"
+ "time"
+)
+
+func init() {
+ register("SchedMetrics", SchedMetrics)
+}
+
+// Tests runtime/metrics.Read for various scheduler metrics.
+//
+// Implemented in testprog to prevent other tests from polluting
+// the metrics.
+func SchedMetrics() {
+ const (
+ notInGo = iota
+ runnable
+ running
+ waiting
+ created
+ threads
+ numSamples
+ )
+ var s [numSamples]metrics.Sample
+ s[notInGo].Name = "/sched/goroutines/not-in-go:goroutines"
+ s[runnable].Name = "/sched/goroutines/runnable:goroutines"
+ s[running].Name = "/sched/goroutines/running:goroutines"
+ s[waiting].Name = "/sched/goroutines/waiting:goroutines"
+ s[created].Name = "/sched/goroutines-created:goroutines"
+ s[threads].Name = "/sched/threads/total:threads"
+
+ var failed bool
+ var out bytes.Buffer
+ logger := log.New(&out, "", 0)
+ indent := 0
+ logf := func(s string, a ...any) {
+ var prefix strings.Builder
+ for range indent {
+ prefix.WriteString("\t")
+ }
+ logger.Printf(prefix.String()+s, a...)
+ }
+ errorf := func(s string, a ...any) {
+ logf(s, a...)
+ failed = true
+ }
+ run := func(name string, f func()) {
+ logf("=== Checking %q", name)
+ indent++
+ f()
+ indent--
+ }
+ logMetrics := func(s []metrics.Sample) {
+ for i := range s {
+ logf("%s: %d", s[i].Name, s[i].Value.Uint64())
+ }
+ }
+
+ // generalSlack is the amount of goroutines we allow ourselves to be
+ // off by in any given category, either due to background system
+ // goroutines. This excludes GC goroutines.
+ generalSlack := uint64(4)
+
+ // waitingSlack is the max number of blocked goroutines controlled
+ // by the runtime that we'll allow for. This includes GC goroutines
+ // as well as finalizer and cleanup goroutines.
+ waitingSlack := generalSlack + uint64(2*runtime.GOMAXPROCS(-1))
+
+ // threadsSlack is the maximum number of threads left over
+ // from the runtime (sysmon, the template thread, etc.)
+ const threadsSlack = 4
+
+ // Make sure GC isn't running, since GC workers interfere with
+ // expected counts.
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
+ runtime.GC()
+
+ check := func(s *metrics.Sample, min, max uint64) {
+ val := s.Value.Uint64()
+ if val < min {
+ errorf("%s too low; %d < %d", s.Name, val, min)
+ }
+ if val > max {
+ errorf("%s too high; %d > %d", s.Name, val, max)
+ }
+ }
+ checkEq := func(s *metrics.Sample, value uint64) {
+ check(s, value, value)
+ }
+ spinUntil := func(f func() bool) bool {
+ for {
+ if f() {
+ return true
+ }
+ time.Sleep(50 * time.Millisecond)
+ }
+ }
+
+ // Check base values.
+ run("base", func() {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
+ metrics.Read(s[:])
+ logMetrics(s[:])
+ check(&s[notInGo], 0, generalSlack)
+ check(&s[runnable], 0, generalSlack)
+ checkEq(&s[running], 1)
+ check(&s[waiting], 0, waitingSlack)
+ })
+
+ metrics.Read(s[:])
+ createdAfterBase := s[created].Value.Uint64()
+
+ // Force Running count to be high. We'll use these goroutines
+ // for Runnable, too.
+ const count = 10
+ var ready, exit atomic.Uint32
+ for range count {
+ go func() {
+ ready.Add(1)
+ for exit.Load() == 0 {
+ // Spin to get us and keep us running, but check
+ // the exit condition so we exit out early if we're
+ // done.
+ start := time.Now()
+ for time.Since(start) < 10*time.Millisecond && exit.Load() == 0 {
+ }
+ runtime.Gosched()
+ }
+ }()
+ }
+ for ready.Load() < count {
+ runtime.Gosched()
+ }
+
+ // Be careful. We've entered a dangerous state for platforms
+ // that do not return back to the underlying system unless all
+ // goroutines are blocked, like js/wasm, since we have a bunch
+ // of runnable goroutines all spinning. We cannot write anything
+ // out.
+ if testenv.HasParallelism() {
+ run("created", func() {
+ metrics.Read(s[:])
+ logMetrics(s[:])
+ checkEq(&s[created], createdAfterBase+count)
+ })
+ run("running", func() {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(count + 4))
+ // It can take a little bit for the scheduler to
+ // distribute the goroutines to Ps, so retry until
+ // we see the count we expect or the test times out.
+ spinUntil(func() bool {
+ metrics.Read(s[:])
+ return s[running].Value.Uint64() >= count
+ })
+ logMetrics(s[:])
+ check(&s[running], count, count+4)
+ check(&s[threads], count, count+4+threadsSlack)
+ })
+
+ // Force runnable count to be high.
+ run("runnable", func() {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
+ metrics.Read(s[:])
+ logMetrics(s[:])
+ checkEq(&s[running], 1)
+ check(&s[runnable], count-1, count+generalSlack)
+ })
+
+ // Done with the running/runnable goroutines.
+ exit.Store(1)
+ } else {
+ // Read metrics and then exit all the other goroutines,
+ // so that system calls may proceed.
+ metrics.Read(s[:])
+
+ // Done with the running/runnable goroutines.
+ exit.Store(1)
+
+ // Now we can check our invariants.
+ run("created", func() {
+ // Look for count-1 goroutines because we read metrics
+ // *before* run goroutine was created for this sub-test.
+ checkEq(&s[created], createdAfterBase+count-1)
+ })
+ run("running", func() {
+ logMetrics(s[:])
+ checkEq(&s[running], 1)
+ checkEq(&s[threads], 1)
+ })
+ run("runnable", func() {
+ logMetrics(s[:])
+ check(&s[runnable], count-1, count+generalSlack)
+ })
+ }
+
+ // Force not-in-go count to be high. This is a little tricky since
+ // we try really hard not to let things block in system calls.
+ // We have to drop to the syscall package to do this reliably.
+ run("not-in-go", func() {
+ // Block a bunch of goroutines on an OS pipe.
+ pr, pw, err := pipe()
+ if err != nil {
+ switch runtime.GOOS {
+ case "js", "wasip1":
+ logf("creating pipe: %v", err)
+ return
+ }
+ panic(fmt.Sprintf("creating pipe: %v", err))
+ }
+ for i := 0; i < count; i++ {
+ go syscall.Read(pr, make([]byte, 1))
+ }
+
+ // Let the goroutines block.
+ spinUntil(func() bool {
+ metrics.Read(s[:])
+ return s[notInGo].Value.Uint64() >= count
+ })
+ logMetrics(s[:])
+ check(&s[notInGo], count, count+generalSlack)
+
+ syscall.Close(pw)
+ syscall.Close(pr)
+ })
+
+ run("waiting", func() {
+ // Force waiting count to be high.
+ const waitingCount = 1000
+ stop := make(chan bool)
+ for i := 0; i < waitingCount; i++ {
+ go func() { <-stop }()
+ }
+
+ // Let the goroutines block.
+ spinUntil(func() bool {
+ metrics.Read(s[:])
+ return s[waiting].Value.Uint64() >= waitingCount
+ })
+ logMetrics(s[:])
+ check(&s[waiting], waitingCount, waitingCount+waitingSlack)
+
+ close(stop)
+ })
+
+ if failed {
+ fmt.Fprintln(os.Stderr, out.String())
+ os.Exit(1)
+ } else {
+ fmt.Fprintln(os.Stderr, "OK")
+ }
+}
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 949d48c79a..8882c306ed 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -429,7 +429,7 @@ func (u *unwinder) resolveInternal(innermost, isSyscall bool) {
// gp._defer for a defer corresponding to this function, but that
// is hard to do with defer records on the stack during a stack copy.)
// Note: the +1 is to offset the -1 that
- // stack.go:getStackMap does to back up a return
+ // (*stkframe).getStackMap does to back up a return
// address make sure the pc is in the CALL instruction.
} else {
frame.continpc = 0
@@ -1206,6 +1206,7 @@ var gStatusStrings = [...]string{
_Gwaiting: "waiting",
_Gdead: "dead",
_Gcopystack: "copystack",
+ _Gleaked: "leaked",
_Gpreempted: "preempted",
}
@@ -1226,7 +1227,7 @@ func goroutineheader(gp *g) {
}
// Override.
- if gpstatus == _Gwaiting && gp.waitreason != waitReasonZero {
+ if (gpstatus == _Gwaiting || gpstatus == _Gleaked) && gp.waitreason != waitReasonZero {
status = gp.waitreason.String()
}
@@ -1245,6 +1246,9 @@ func goroutineheader(gp *g) {
}
}
print(" [", status)
+ if gpstatus == _Gleaked {
+ print(" (leaked)")
+ }
if isScan {
print(" (scan)")
}
diff --git a/src/runtime/tracestatus.go b/src/runtime/tracestatus.go
index 03ec81fc02..8b5eafd170 100644
--- a/src/runtime/tracestatus.go
+++ b/src/runtime/tracestatus.go
@@ -122,7 +122,7 @@ func goStatusToTraceGoStatus(status uint32, wr waitReason) tracev2.GoStatus {
tgs = tracev2.GoRunning
case _Gsyscall:
tgs = tracev2.GoSyscall
- case _Gwaiting, _Gpreempted:
+ case _Gwaiting, _Gpreempted, _Gleaked:
// There are a number of cases where a G might end up in
// _Gwaiting but it's actually running in a non-preemptive
// state but needs to present itself as preempted to the