aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/testdata
diff options
context:
space:
mode:
authorCherry Mui <cherryyz@google.com>2025-10-03 10:11:20 -0400
committerCherry Mui <cherryyz@google.com>2025-10-03 10:11:21 -0400
commitfb1749a3fe6ac40c56127d8fcea2e8b13db820e8 (patch)
tree72dd38fc84286bf88311850a99e6becf2cbd99bf /src/runtime/testdata
parent703a5fbaad81f1285776bf6f2900506d3c751ea1 (diff)
parentadce7f196e6ac6d22e9bc851efea5f3ab650947c (diff)
downloadgo-fb1749a3fe6ac40c56127d8fcea2e8b13db820e8.tar.xz
[dev.simd] all: merge master (adce7f1) into dev.simd
Conflicts: - src/internal/goexperiment/flags.go - src/runtime/export_test.go Merge List: + 2025-10-03 adce7f196e cmd/link: support .def file with MSVC clang toolchain + 2025-10-03 d5b950399d cmd/cgo: fix unaligned arguments typedmemmove crash on iOS + 2025-10-02 53845004d6 net/http/httputil: deprecate ReverseProxy.Director + 2025-10-02 bbdff9e8e1 net/http: update bundled x/net/http2 and delete obsolete http2inTests + 2025-10-02 4008e07080 io/fs: move path name documentation up to the package doc comment + 2025-10-02 0e4e2e6832 runtime: skip TestGoroutineLeakProfile under mayMoreStackPreempt + 2025-10-02 f03c392295 runtime: fix aix/ppc64 library initialization + 2025-10-02 707454b41f cmd/go: update `go help mod edit` with the tool and ignore sections + 2025-10-02 8c68a1c1ab runtime,net/http/pprof: goroutine leak detection by using the garbage collector + 2025-10-02 84db201ae1 cmd/compile: propagate len([]T{}) to make builtin to allow stack allocation + 2025-10-02 5799c139a7 crypto/tls: rm marshalEncryptedClientHelloConfigList dead code + 2025-10-01 633dd1d475 encoding/json: fix Decoder.InputOffset regression in goexperiment.jsonv2 + 2025-10-01 8ad27fb656 doc/go_spec.html: update date + 2025-10-01 3f451f2c54 testing/synctest: fix inverted test failure message in TestContextAfterFunc + 2025-10-01 be0fed8a5f cmd/go/testdata/script/test_fuzz_fuzztime.txt: disable + 2025-09-30 eb1c7f6e69 runtime: move loong64 library entry point to os-agnostic file + 2025-09-30 c9257151e5 runtime: unify ppc64/ppc64le library entry point + 2025-09-30 4ff8a457db test/codegen: codify handling of floating point constants on arm64 + 2025-09-30 fcb893fc4b cmd/compile/internal/ssa: remove redundant "type:" prefix check + 2025-09-30 19cc1022ba mime: reduce allocs incurred by ParseMediaType + 2025-09-30 08afc50bea mime: extend "builtinTypes" to include a more complete list of common types + 2025-09-30 97da068774 cmd/compile: eliminate nil checks on .dict arg + 2025-09-30 300d9d2714 runtime: initialise debug settings much earlier in startup process + 2025-09-30 a846bb0aa5 errors: add AsType + 2025-09-30 7c8166d02d cmd/link/internal/arm64: support Mach-O ARM64_RELOC_SUBTRACTOR in internal linking + 2025-09-30 6e95748335 cmd/link/internal/arm64: support Mach-O ARM64_RELOC_POINTER_TO_GOT in internal linking + 2025-09-30 742f92063e cmd/compile, runtime: always enable Wasm signext and satconv features + 2025-09-30 db10db6be3 internal/poll: remove operation fields from FD + 2025-09-29 75c87df58e internal/poll: pass the I/O mode instead of an overlapped object in execIO + 2025-09-29 fc88e18b4a crypto/internal/fips140/entropy: add CPU jitter-based entropy source + 2025-09-29 db4fade759 crypto/internal/fips140/mlkem: make CAST conditional + 2025-09-29 db3cb3fd9a runtime: correct reference to getStackMap in comment + 2025-09-29 690fc2fb05 internal/poll: remove buf field from operation + 2025-09-29 eaf2345256 cmd/link: use a .def file to mark exported symbols on Windows + 2025-09-29 4b77733565 internal/syscall/windows: regenerate GetFileSizeEx + 2025-09-29 4e9006a716 crypto/tls: quote protocols in ALPN error message + 2025-09-29 047c2ab841 cmd/link: don't pass -Wl,-S on Solaris + 2025-09-29 ae8eba071b cmd/link: use correct length for pcln.cutab + 2025-09-29 fe3ba74b9e cmd/link: skip TestFlagW on platforms without DWARF symbol table + 2025-09-29 d42d56b764 encoding/xml: make use of reflect.TypeAssert + 2025-09-29 6d51f93257 runtime: jump instead of branch in netbsd/arm64 entry point + 2025-09-28 5500cbf0e4 debug/elf: prevent offset overflow + 2025-09-27 34e67623a8 all: fix typos + 2025-09-27 af6999e60d cmd/compile: implement jump table on loong64 + 2025-09-26 63cd912083 os/user: simplify go:build + 2025-09-26 53009b26dd runtime: use a smaller arena size on Wasm + 2025-09-26 3a5df9d2b2 net/http: add HTTP2Config.StrictMaxConcurrentRequests + 2025-09-26 16be34df02 net/http: add more tests of transport connection pool + 2025-09-26 3e4540b49d os/user: use getgrouplist on illumos && cgo + 2025-09-26 15fbe3480b internal/poll: simplify WriteMsg and ReadMsg on Windows + 2025-09-26 16ae11a9e1 runtime: move TestReadMetricsSched to testprog + 2025-09-26 459f3a3adc cmd/link: don't pass -Wl,-S on AIX + 2025-09-26 4631a2d3c6 cmd/link: skip TestFlagW on AIX + 2025-09-26 0f31d742cd cmd/compile: fix ICE with new(<untyped expr>) + 2025-09-26 7d7cd6e07b internal/poll: don't call SetFilePointerEx in Seek for overlapped handles + 2025-09-26 41cba31e66 mime/multipart: percent-encode CR and LF in header values to avoid CRLF injection + 2025-09-26 dd1d597c3a Revert "cmd/internal/obj/loong64: use the MOVVP instruction to optimize prologue" + 2025-09-26 45d6bc76af runtime: unify arm64 entry point code + 2025-09-25 fdea7da3e6 runtime: use common library entry point on windows amd64/386 + 2025-09-25 e8a4f508d1 lib/fips140: re-seal v1.0.0 + 2025-09-25 9b7a328089 crypto/internal/fips140: remove key import PCTs, make keygen PCTs fatal + 2025-09-25 7f9ab7203f crypto/internal/fips140: update frozen module version to "v1.0.0" + 2025-09-25 fb5719cbda crypto/internal/fips140/ecdsa: make TestingOnlyNewDRBG generic + 2025-09-25 56067e31f2 std: remove unused declarations Change-Id: Iecb28fd62c69fbed59da557f46d31bae55889e2c
Diffstat (limited to 'src/runtime/testdata')
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go277
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE21
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/README.md1847
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go145
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go115
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go98
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go82
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go66
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go167
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go108
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go60
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go125
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go78
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go92
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go124
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go135
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go122
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go62
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go87
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go183
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go77
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go72
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go126
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go100
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go81
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go98
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go163
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go87
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go111
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go105
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go84
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go123
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go65
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go74
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go105
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go81
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go317
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go129
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go144
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go154
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go95
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go118
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go166
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go100
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go72
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go87
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go223
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go83
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go68
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go108
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go120
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go86
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go97
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/main.go39
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go68
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go146
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go59
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go242
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go125
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go67
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go71
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go53
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go101
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go55
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go122
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go87
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go103
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/main.go39
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/simple.go253
-rw-r--r--src/runtime/testdata/testgoroutineleakprofile/stresstests.go89
-rw-r--r--src/runtime/testdata/testprog/pipe_unix.go15
-rw-r--r--src/runtime/testdata/testprog/pipe_windows.go13
-rw-r--r--src/runtime/testdata/testprog/schedmetrics.go267
73 files changed, 9727 insertions, 0 deletions
diff --git a/src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go b/src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go
new file mode 100644
index 0000000000..353e48ee70
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go
@@ -0,0 +1,277 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "time"
+)
+
+// Common goroutine leak patterns. Extracted from:
+// "Unveiling and Vanquishing Goroutine Leaks in Enterprise Microservices: A Dynamic Analysis Approach"
+// doi:10.1109/CGO57630.2024.10444835
+//
+// Tests in this file are not flaky iff. the test is run with GOMAXPROCS=1.
+// The main goroutine forcefully yields via `runtime.Gosched()` before
+// running the profiler. This moves them to the back of the run queue,
+// allowing the leaky goroutines to be scheduled beforehand and get stuck.
+
+func init() {
+ register("NoCloseRange", NoCloseRange)
+ register("MethodContractViolation", MethodContractViolation)
+ register("DoubleSend", DoubleSend)
+ register("EarlyReturn", EarlyReturn)
+ register("NCastLeak", NCastLeak)
+ register("Timeout", Timeout)
+}
+
+// Incoming list of items and the number of workers.
+func noCloseRange(list []any, workers int) {
+ ch := make(chan any)
+
+ // Create each worker
+ for i := 0; i < workers; i++ {
+ go func() {
+
+ // Each worker waits for an item and processes it.
+ for item := range ch {
+ // Process each item
+ _ = item
+ }
+ }()
+ }
+
+ // Send each item to one of the workers.
+ for _, item := range list {
+ // Sending can leak if workers == 0 or if one of the workers panics
+ ch <- item
+ }
+ // The channel is never closed, so workers leak once there are no more
+ // items left to process.
+}
+
+func NoCloseRange() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go noCloseRange([]any{1, 2, 3}, 0)
+ go noCloseRange([]any{1, 2, 3}, 3)
+}
+
+// A worker processes items pushed to `ch` one by one in the background.
+// When the worker is no longer needed, it must be closed with `Stop`.
+//
+// Specifications:
+//
+// A worker may be started any number of times, but must be stopped only once.
+// Stopping a worker multiple times will lead to a close panic.
+// Any worker that is started must eventually be stopped.
+// Failing to stop a worker results in a goroutine leak
+type worker struct {
+ ch chan any
+ done chan any
+}
+
+// Start spawns a background goroutine that extracts items pushed to the queue.
+func (w worker) Start() {
+ go func() {
+
+ for {
+ select {
+ case <-w.ch: // Normal workflow
+ case <-w.done:
+ return // Shut down
+ }
+ }
+ }()
+}
+
+func (w worker) Stop() {
+ // Allows goroutine created by Start to terminate
+ close(w.done)
+}
+
+func (w worker) AddToQueue(item any) {
+ w.ch <- item
+}
+
+// worker limited in scope by workerLifecycle
+func workerLifecycle(items []any) {
+ // Create a new worker
+ w := worker{
+ ch: make(chan any),
+ done: make(chan any),
+ }
+ // Start worker
+ w.Start()
+
+ // Operate on worker
+ for _, item := range items {
+ w.AddToQueue(item)
+ }
+
+ runtime.Gosched()
+ // Exits without calling ’Stop’. Goroutine created by `Start` eventually leaks.
+}
+
+func MethodContractViolation() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ runtime.Gosched()
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ workerLifecycle(make([]any, 10))
+ runtime.Gosched()
+}
+
+// doubleSend incoming channel must send a message (incoming error simulates an error generated internally).
+func doubleSend(ch chan any, err error) {
+ if err != nil {
+ // In case of an error, send nil.
+ ch <- nil
+ // Return is missing here.
+ }
+ // Otherwise, continue with normal behaviour
+ // This send is still executed in the error case, which may lead to a goroutine leak.
+ ch <- struct{}{}
+}
+
+func DoubleSend() {
+ prof := pprof.Lookup("goroutineleak")
+ ch := make(chan any)
+ defer func() {
+ runtime.Gosched()
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() {
+ doubleSend(ch, nil)
+ }()
+ <-ch
+
+ go func() {
+ doubleSend(ch, fmt.Errorf("error"))
+ }()
+ <-ch
+
+ ch1 := make(chan any, 1)
+ go func() {
+ doubleSend(ch1, fmt.Errorf("error"))
+ }()
+ <-ch1
+}
+
+// earlyReturn demonstrates a common pattern of goroutine leaks.
+// A return statement interrupts the evaluation of the parent goroutine before it can consume a message.
+// Incoming error simulates an error produced internally.
+func earlyReturn(err error) {
+ // Create a synchronous channel
+ ch := make(chan any)
+
+ go func() {
+
+ // Send something to the channel.
+ // Leaks if the parent goroutine terminates early.
+ ch <- struct{}{}
+ }()
+
+ if err != nil {
+ // Interrupt evaluation of parent early in case of error.
+ // Sender leaks.
+ return
+ }
+
+ // Only receive if there is no error.
+ <-ch
+}
+
+func EarlyReturn() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ runtime.Gosched()
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go earlyReturn(fmt.Errorf("error"))
+}
+
+// nCastLeak processes a number of items. First result to pass the post is retrieved from the channel queue.
+func nCastLeak(items []any) {
+ // Channel is synchronous.
+ ch := make(chan any)
+
+ // Iterate over every item
+ for range items {
+ go func() {
+
+ // Process item and send result to channel
+ ch <- struct{}{}
+ // Channel is synchronous: only one sender will synchronise
+ }()
+ }
+ // Retrieve first result. All other senders block.
+ // Receiver blocks if there are no senders.
+ <-ch
+}
+
+func NCastLeak() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ for i := 0; i < yieldCount; i++ {
+ // Yield enough times to allow all the leaky goroutines to
+ // reach the execution point.
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() {
+ nCastLeak(nil)
+ }()
+
+ go func() {
+ nCastLeak(make([]any, 5))
+ }()
+}
+
+// A context is provided to short-circuit evaluation, leading
+// the sender goroutine to leak.
+func timeout(ctx context.Context) {
+ ch := make(chan any)
+
+ go func() {
+ ch <- struct{}{}
+ }()
+
+ select {
+ case <-ch: // Receive message
+ // Sender is released
+ case <-ctx.Done(): // Context was cancelled or timed out
+ // Sender is leaked
+ }
+}
+
+func Timeout() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ runtime.Gosched()
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+
+ for i := 0; i < 100; i++ {
+ go timeout(ctx)
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE b/src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE
new file mode 100644
index 0000000000..f4b4b8abc4
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+Copyright © 2021 Institute of Computing Technology, University of New South Wales
+
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the “Software”),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/README.md b/src/runtime/testdata/testgoroutineleakprofile/goker/README.md
new file mode 100644
index 0000000000..88c50e1e48
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/README.md
@@ -0,0 +1,1847 @@
+# GoKer
+
+The following examples are obtained from the publication
+"GoBench: A Benchmark Suite of Real-World Go Concurrency Bugs"
+(doi:10.1109/CGO51591.2021.9370317).
+
+**Authors**
+Ting Yuan (yuanting@ict.ac.cn):
+ State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences,
+ University of Chinese Academy of Sciences, Beijing, China;
+Guangwei Li (liguangwei@ict.ac.cn):
+ State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences,
+ University of Chinese Academy of Sciences, Beijing, China;
+Jie Lu† (lujie@ict.ac.an):
+ State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences;
+Chen Liu (liuchen17z@ict.ac.cn):
+ State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences,
+ University of Chinese Academy of Sciences, Beijing, China
+Lian Li (lianli@ict.ac.cn):
+ State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences,
+ University of Chinese Academy of Sciences, Beijing, China;
+Jingling Xue (jingling@cse.unsw.edu.au):
+ University of New South Wales, School of Computer Science and Engineering, Sydney, Australia
+
+White paper: https://lujie.ac.cn/files/papers/GoBench.pdf
+
+The examples have been modified in order to run the goroutine leak
+profiler. Buggy snippets are moved from within a unit test to separate
+applications. Each is then independently executed, possibly as multiple
+copies within the same application in order to exercise more interleavings.
+Concurrently, the main program sets up a waiting period (typically 1ms), followed
+by a goroutine leak profile request. Other modifications may involve injecting calls
+to `runtime.Gosched()`, to more reliably exercise buggy interleavings, or reductions
+in waiting periods when calling `time.Sleep`, in order to reduce overall testing time.
+
+The resulting goroutine leak profile is analyzed to ensure that no unexpected leaks occurred,
+and that the expected leaks did occur. If the leak is flaky, the only purpose of the expected
+leak list is to protect against unexpected leaks.
+
+The entries below document each of the corresponding leaks.
+
+## Cockroach/10214
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#10214]|[pull request]|[patch]| Resource | AB-BA leak |
+
+[cockroach#10214]:(cockroach10214_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/10214/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/10214
+
+### Description
+
+This goroutine leak is caused by different order when acquiring
+coalescedMu.Lock() and raftMu.Lock(). The fix is to refactor sendQueuedHeartbeats()
+so that cockroachdb can unlock coalescedMu before locking raftMu.
+
+### Example execution
+
+```go
+G1 G2
+------------------------------------------------------------------------------------
+s.sendQueuedHeartbeats() .
+s.coalescedMu.Lock() [L1] .
+s.sendQueuedHeartbeatsToNode() .
+s.mu.replicas[0].reportUnreachable() .
+s.mu.replicas[0].raftMu.Lock() [L2] .
+. s.mu.replicas[0].tick()
+. s.mu.replicas[0].raftMu.Lock() [L2]
+. s.mu.replicas[0].tickRaftMuLocked()
+. s.mu.replicas[0].mu.Lock() [L3]
+. s.mu.replicas[0].maybeQuiesceLocked()
+. s.mu.replicas[0].maybeCoalesceHeartbeat()
+. s.coalescedMu.Lock() [L1]
+--------------------------------G1,G2 leak------------------------------------------
+```
+
+## Cockroach/1055
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#1055]|[pull request]|[patch]| Mixed | Channel & WaitGroup |
+
+[cockroach#1055]:(cockroach1055_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/1055/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/1055
+
+### Description
+
+1. `Stop()` is called and blocked at `s.stop.Wait()` after acquiring the lock.
+2. `StartTask()` is called and attempts to acquire the lock. It is then blocked.
+3. `Stop()` never finishes since the task doesn't call SetStopped.
+
+### Example execution
+
+```go
+G1 G2.0 G2.1 G2.2 G3
+-------------------------------------------------------------------------------------------------------------------------------
+s[0].stop.Add(1) [1]
+go func() [G2.0]
+s[1].stop.Add(1) [1] .
+go func() [G2.1] .
+s[2].stop.Add(1) [1] . .
+go func() [G2.2] . .
+go func() [G3] . . .
+<-done . . . .
+. s[0].StartTask() . . .
+. s[0].draining == 0 . . .
+. . s[1].StartTask() . .
+. . s[1].draining == 0 . .
+. . . s[2].StartTask() .
+. . . s[2].draining == 0 .
+. . . . s[0].Quiesce()
+. . . . s[0].mu.Lock() [L1[0]]
+. s[0].mu.Lock() [L1[0]] . . .
+. s[0].drain.Add(1) [1] . . .
+. s[0].mu.Unlock() [L1[0]] . . .
+. <-s[0].ShouldStop() . . .
+. . . . s[0].draining = 1
+. . . . s[0].drain.Wait()
+. . s[0].mu.Lock() [L1[1]] . .
+. . s[1].drain.Add(1) [1] . .
+. . s[1].mu.Unlock() [L1[1]] . .
+. . <-s[1].ShouldStop() . .
+. . . s[2].mu.Lock() [L1[2]] .
+. . . s[2].drain.Add() [1] .
+. . . s[2].mu.Unlock() [L1[2]] .
+. . . <-s[2].ShouldStop() .
+----------------------------------------------------G1, G2.[0..2], G3 leak-----------------------------------------------------
+```
+
+## Cockroach/10790
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#10790]|[pull request]|[patch]| Communication | Channel & Context |
+
+[cockroach#10790]:(cockroach10790_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/10790/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/10790
+
+### Description
+
+It is possible that a message from `ctxDone` will make `beginCmds`
+return without draining the channel `ch`, so that anonymous function
+goroutines will leak.
+
+### Example execution
+
+```go
+G1 G2 helper goroutine
+-----------------------------------------------------
+. . r.sendChans()
+r.beginCmds() . .
+. . ch1 <- true
+<- ch1 . .
+. . ch2 <- true
+...
+. cancel()
+<- ch1
+------------------G1 leak----------------------------
+```
+
+## Cockroach/13197
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#13197]|[pull request]|[patch]| Communication | Channel & Context |
+
+[cockroach#13197]:(cockroach13197_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/13197/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/13197
+
+### Description
+
+One goroutine executing `(*Tx).awaitDone()` blocks and
+waiting for a signal `context.Done()`.
+
+### Example execution
+
+```go
+G1 G2
+-------------------------------
+begin()
+. awaitDone()
+return .
+. <-tx.ctx.Done()
+-----------G2 leaks------------
+```
+
+## Cockroach/13755
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#13755]|[pull request]|[patch]| Communication | Channel & Context |
+
+[cockroach#13755]:(cockroach13755_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/13755/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/13755
+
+### Description
+
+The buggy code does not close the db query result (rows),
+so that one goroutine running `(*Rows).awaitDone` is blocked forever.
+The blocking goroutine is waiting for cancel signal from context.
+
+### Example execution
+
+```go
+G1 G2
+---------------------------------------
+initContextClose()
+. awaitDone()
+return .
+. <-tx.ctx.Done()
+---------------G2 leaks----------------
+```
+
+## Cockroach/1462
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#1462]|[pull request]|[patch]| Mixed | Channel & WaitGroup |
+
+[cockroach#1462]:(cockroach1462_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/1462/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/1462
+
+### Description
+
+Executing `<-stopper.ShouldStop()` in `processEventsUntil` may cause
+goroutines created by `lt.RunWorker` in `lt.start` to be stuck sending
+a message over `lt.Events`. The main thread is then stuck at `s.stop.Wait()`,
+since the sender goroutines cannot call `s.stop.Done()`.
+
+### Example execution
+
+```go
+G1 G2 G3
+-------------------------------------------------------------------------------------------------------
+NewLocalInterceptableTransport()
+lt.start()
+lt.stopper.RunWorker()
+s.AddWorker()
+s.stop.Add(1) [1]
+go func() [G2]
+stopper.RunWorker() .
+s.AddWorker() .
+s.stop.Add(1) [2] .
+go func() [G3] .
+s.Stop() . .
+s.Quiesce() . .
+. select [default] .
+. lt.Events <- interceptMessage(0) .
+close(s.stopper) . .
+. . select [<-stopper.ShouldStop()]
+. . <<<done>>>
+s.stop.Wait() .
+----------------------------------------------G1,G2 leak-----------------------------------------------
+```
+
+## Cockroach/16167
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#16167]|[pull request]|[patch]| Resource | Double Locking |
+
+[cockroach#16167]:(cockroach16167_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/16167/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/16167
+
+### Description
+
+This is another example of goroutine leaks caused by recursively
+acquiring `RWLock`.
+There are two lock variables (`systemConfigCond` and `systemConfigMu`)
+which refer to the same underlying lock. The leak invovlves two goroutines.
+The first acquires `systemConfigMu.Lock()`, then tries to acquire `systemConfigMu.RLock()`.
+The second acquires `systemConfigMu.Lock()`.
+If the second goroutine interleaves in between the two lock operations of the
+first goroutine, both goroutines will leak.
+
+### Example execution
+
+```go
+G1 G2
+---------------------------------------------------------------
+. e.Start()
+. e.updateSystemConfig()
+e.execParsed() .
+e.systemConfigCond.L.Lock() [L1] .
+. e.systemConfigMu.Lock() [L1]
+e.systemConfigMu.RLock() [L1] .
+------------------------G1,G2 leak-----------------------------
+```
+
+## Cockroach/18101
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#18101]|[pull request]|[patch]| Resource | Double Locking |
+
+[cockroach#18101]:(cockroach18101_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/18101/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/18101
+
+### Description
+
+The `context.Done()` signal short-circuits the reader goroutine, but not
+the senders, leading them to leak.
+
+### Example execution
+
+```go
+G1 G2 helper goroutine
+--------------------------------------------------------------
+restore()
+. splitAndScatter()
+<-readyForImportCh .
+<-readyForImportCh <==> readyForImportCh<-
+...
+. . cancel()
+<<done>> . <<done>>
+ readyForImportCh<-
+-----------------------G2 leaks--------------------------------
+```
+
+## Cockroach/2448
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#2448]|[pull request]|[patch]| Communication | Channel |
+
+[cockroach#2448]:(cockroach2448_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/2448/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/2448
+
+### Description
+
+This bug is caused by two goroutines waiting for each other
+to unblock their channels:
+
+1) `MultiRaft` sends the commit event for the Membership change
+2) `store.processRaft` takes it and begins processing
+3) another command commits and triggers another `sendEvent`, but
+ this blocks since `store.processRaft` isn't ready for another
+ `select`. Consequently the main `MultiRaft` loop is waiting for
+ that as well.
+4) the `Membership` change was applied to the range, and the store
+ now tries to execute the callback
+5) the callback tries to write to `callbackChan`, but that is
+ consumed by the `MultiRaft` loop, which is currently waiting
+ for `store.processRaft` to consume from the events channel,
+ which it will only do after the callback has completed.
+
+### Example execution
+
+```go
+G1 G2
+--------------------------------------------------------------------------
+s.processRaft() st.start()
+select .
+. select [default]
+. s.handleWriteResponse()
+. s.sendEvent()
+. select
+<-s.multiraft.Events <----> m.Events <- event
+. select [default]
+. s.handleWriteResponse()
+. s.sendEvent()
+. select [m.Events<-, <-s.stopper.ShouldStop()]
+callback() .
+select [
+ m.callbackChan<-,
+ <-s.stopper.ShouldStop()
+] .
+------------------------------G1,G2 leak----------------------------------
+```
+
+## Cockroach/24808
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#24808]|[pull request]|[patch]| Communication | Channel |
+
+[cockroach#24808]:(cockroach24808_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/24808/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/24808
+
+### Description
+
+When we `Start` the `Compactor`, it may already have received
+`Suggestions`, leaking the previously blocking write to a full channel.
+
+### Example execution
+
+```go
+G1
+------------------------------------------------
+...
+compactor.ch <-
+compactor.Start()
+compactor.ch <-
+--------------------G1 leaks--------------------
+```
+
+## Cockroach/25456
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#25456]|[pull request]|[patch]| Communication | Channel |
+
+[cockroach#25456]:(cockroach25456_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/25456/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/25456
+
+### Description
+
+When `CheckConsistency` (in the complete code) returns an error, the queue
+checks whether the store is draining to decide whether the error is worth
+logging. This check was incorrect and would block until the store actually
+started draining.
+
+### Example execution
+
+```go
+G1
+---------------------------------------
+...
+<-repl.store.Stopper().ShouldQuiesce()
+---------------G1 leaks----------------
+```
+
+## Cockroach/35073
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#35073]|[pull request]|[patch]| Communication | Channel |
+
+[cockroach#35073]:(cockroach35073_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/35073/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/35073
+
+### Description
+
+Previously, the outbox could fail during startup without closing its
+`RowChannel`. This could lead to goroutine leaks in rare cases due
+to channel communication mismatch.
+
+### Example execution
+
+## Cockroach/35931
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#35931]|[pull request]|[patch]| Communication | Channel |
+
+[cockroach#35931]:(cockroach35931_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/35931/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/35931
+
+### Description
+
+Previously, if a processor that reads from multiple inputs was waiting
+on one input to provide more data, and the other input was full, and
+both inputs were connected to inbound streams, it was possible to
+cause goroutine leaks during flow cancellation when trying to propagate
+the cancellation metadata messages into the flow. The cancellation method
+wrote metadata messages to each inbound stream one at a time, so if the
+first one was full, the canceller would block and never send a cancellation
+message to the second stream, which was the one actually being read from.
+
+### Example execution
+
+## Cockroach/3710
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#3710]|[pull request]|[patch]| Resource | RWR Deadlock |
+
+[cockroach#3710]:(cockroach3710_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/3710/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/3710
+
+### Description
+
+The goroutine leak is caused by acquiring a RLock twice in a call chain.
+`ForceRaftLogScanAndProcess(acquire s.mu.RLock())`
+`-> MaybeAdd()`
+`-> shouldQueue()`
+`-> getTruncatableIndexes()`
+`->RaftStatus(acquire s.mu.Rlock())`
+
+### Example execution
+
+```go
+G1 G2
+------------------------------------------------------------
+store.ForceRaftLogScanAndProcess()
+s.mu.RLock()
+s.raftLogQueue.MaybeAdd()
+bq.impl.shouldQueue()
+getTruncatableIndexes()
+r.store.RaftStatus()
+. store.processRaft()
+. s.mu.Lock()
+s.mu.RLock()
+----------------------G1,G2 leak-----------------------------
+```
+
+## Cockroach/584
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#584]|[pull request]|[patch]| Resource | Double Locking |
+
+[cockroach#584]:(cockroach584_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/584/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/584
+
+### Description
+
+Missing call to `mu.Unlock()` before the `break` in the loop.
+
+### Example execution
+
+```go
+G1
+---------------------------
+g.bootstrap()
+g.mu.Lock() [L1]
+if g.closed { ==> break
+g.manage()
+g.mu.Lock() [L1]
+----------G1 leaks---------
+```
+
+## Cockroach/6181
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#6181]|[pull request]|[patch]| Resource | RWR Deadlock |
+
+[cockroach#6181]:(cockroach6181_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/6181/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/6181
+
+### Description
+
+The same `RWMutex` may be recursively acquired for both reading and writing.
+
+### Example execution
+
+```go
+G1 G2 G3 ...
+-----------------------------------------------------------------------------------------------
+testRangeCacheCoalescedRquests()
+initTestDescriptorDB()
+pauseLookupResumeAndAssert()
+return
+. doLookupWithToken()
+. . doLookupWithToken()
+. rc.LookupRangeDescriptor() .
+. . rc.LookupRangeDescriptor()
+. rdc.rangeCacheMu.RLock() .
+. rdc.String() .
+. . rdc.rangeCacheMu.RLock()
+. . fmt.Printf()
+. . rdc.rangeCacheMu.RUnlock()
+. . rdc.rangeCacheMu.Lock()
+. rdc.rangeCacheMu.RLock() .
+-----------------------------------G2,G3,... leak----------------------------------------------
+```
+
+## Cockroach/7504
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#7504]|[pull request]|[patch]| Resource | AB-BA Deadlock |
+
+[cockroach#7504]:(cockroach7504_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/7504/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/7504
+
+### Description
+
+The locks are acquired as `leaseState` and `tableNameCache` in `Release()`, but
+as `tableNameCache` and `leaseState` in `AcquireByName`, leading to an AB-BA deadlock.
+
+### Example execution
+
+```go
+G1 G2
+-----------------------------------------------------
+mgr.AcquireByName() mgr.Release()
+m.tableNames.get(id) .
+c.mu.Lock() [L2] .
+. t.release(lease)
+. t.mu.Lock() [L3]
+. s.mu.Lock() [L1]
+lease.mu.Lock() [L1] .
+. t.removeLease(s)
+. t.tableNameCache.remove()
+. c.mu.Lock() [L2]
+---------------------G1, G2 leak---------------------
+```
+
+## Cockroach/9935
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[cockroach#9935]|[pull request]|[patch]| Resource | Double Locking |
+
+[cockroach#9935]:(cockroach9935_test.go)
+[patch]:https://github.com/cockroachdb/cockroach/pull/9935/files
+[pull request]:https://github.com/cockroachdb/cockroach/pull/9935
+
+### Description
+
+This bug is caused by acquiring `l.mu.Lock()` twice.
+
+### Example execution
+
+## Etcd/10492
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[etcd#10492]|[pull request]|[patch]| Resource | Double locking |
+
+[etcd#10492]:(etcd10492_test.go)
+[patch]:https://github.com/etcd-io/etcd/pull/10492/files
+[pull request]:https://github.com/etcd-io/etcd/pull/10492
+
+### Description
+
+A simple double locking case for lines 19, 31.
+
+## Etcd/5509
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[etcd#5509]|[pull request]|[patch]| Resource | Double locking |
+
+[etcd#5509]:(etcd5509_test.go)
+[patch]:https://github.com/etcd-io/etcd/pull/5509/files
+[pull request]:https://github.com/etcd-io/etcd/pull/5509
+
+### Description
+
+`r.acquire()` returns holding `r.client.mu.RLock()` on a failure path (line 42).
+This causes any call to `client.Close()` to leak goroutines.
+
+## Etcd/6708
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[etcd#6708]|[pull request]|[patch]| Resource | Double locking |
+
+[etcd#6708]:(etcd6708_test.go)
+[patch]:https://github.com/etcd-io/etcd/pull/6708/files
+[pull request]:https://github.com/etcd-io/etcd/pull/6708
+
+### Description
+
+Line 54, 49 double locking
+
+## Etcd/6857
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[etcd#6857]|[pull request]|[patch]| Communication | Channel |
+
+[etcd#6857]:(etcd6857_test.go)
+[patch]:https://github.com/etcd-io/etcd/pull/6857/files
+[pull request]:https://github.com/etcd-io/etcd/pull/6857
+
+### Description
+
+Choosing a different case in a `select` statement (`n.stop`) will
+lead to goroutine leaks when sending over `n.status`.
+
+### Example execution
+
+```go
+G1 G2 G3
+-------------------------------------------
+n.run() . .
+. . n.Stop()
+. . n.stop<-
+<-n.stop . .
+. . <-n.done
+close(n.done) . .
+return . .
+. . return
+. n.Status()
+. n.status<-
+----------------G2 leaks-------------------
+```
+
+## Etcd/6873
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[etcd#6873]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[etcd#6873]:(etcd6873_test.go)
+[patch]:https://github.com/etcd-io/etcd/pull/6873/files
+[pull request]:https://github.com/etcd-io/etcd/pull/6873
+
+### Description
+
+This goroutine leak involves a goroutine acquiring a lock and being
+blocked over a channel operation with no partner, while another tries
+to acquire the same lock.
+
+### Example execution
+
+```go
+G1 G2 G3
+--------------------------------------------------------------
+newWatchBroadcasts()
+wbs.update()
+wbs.updatec <-
+return
+. <-wbs.updatec .
+. wbs.coalesce() .
+. . wbs.stop()
+. . wbs.mu.Lock()
+. . close(wbs.updatec)
+. . <-wbs.donec
+. wbs.mu.Lock() .
+---------------------G2,G3 leak--------------------------------
+```
+
+## Etcd/7492
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[etcd#7492]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[etcd#7492]:(etcd7492_test.go)
+[patch]:https://github.com/etcd-io/etcd/pull/7492/files
+[pull request]:https://github.com/etcd-io/etcd/pull/7492
+
+### Description
+
+This goroutine leak involves a goroutine acquiring a lock and being
+blocked over a channel operation with no partner, while another tries
+to acquire the same lock.
+
+### Example execution
+
+```go
+G2 G1
+---------------------------------------------------------------
+. stk.run()
+ts.assignSimpleTokenToUser() .
+t.simpleTokensMu.Lock() .
+t.simpleTokenKeeper.addSimpleToken() .
+tm.addSimpleTokenCh <- true .
+. <-tm.addSimpleTokenCh
+t.simpleTokensMu.Unlock() .
+ts.assignSimpleTokenToUser() .
+...
+t.simpleTokensMu.Lock()
+. <-tokenTicker.C
+tm.addSimpleTokenCh <- true .
+. tm.deleteTokenFunc()
+. t.simpleTokensMu.Lock()
+---------------------------G1,G2 leak--------------------------
+```
+
+## Etcd/7902
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[etcd#7902]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[etcd#7902]:(etcd7902_test.go)
+[patch]:https://github.com/etcd-io/etcd/pull/7902/files
+[pull request]:https://github.com/etcd-io/etcd/pull/7902
+
+### Description
+
+If the follower gooroutine acquires `mu.Lock()` first and calls
+`rc.release()`, it will be blocked sending over `rcNextc`.
+Only the leader can `close(nextc)` to unblock the follower.
+However, in order to invoke `rc.release()`, the leader needs
+to acquires `mu.Lock()`.
+The fix is to remove the lock and unlock around `rc.release()`.
+
+### Example execution
+
+```go
+G1 G2 (leader) G3 (follower)
+---------------------------------------------------------------------
+runElectionFunc()
+doRounds()
+wg.Wait()
+. ...
+. mu.Lock()
+. rc.validate()
+. rcNextc = nextc
+. mu.Unlock() ...
+. . mu.Lock()
+. . rc.validate()
+. . mu.Unlock()
+. . mu.Lock()
+. . rc.release()
+. . <-rcNextc
+. mu.Lock()
+-------------------------G1,G2,G3 leak--------------------------
+```
+
+## Grpc/1275
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[grpc#1275]|[pull request]|[patch]| Communication | Channel |
+
+[grpc#1275]:(grpc1275_test.go)
+[patch]:https://github.com/grpc/grpc-go/pull/1275/files
+[pull request]:https://github.com/grpc/grpc-go/pull/1275
+
+### Description
+
+Two goroutines are involved in this leak. The main goroutine
+is blocked at `case <- donec`, and is waiting for the second goroutine
+to close the channel.
+The second goroutine is created by the main goroutine. It is blocked
+when calling `stream.Read()`, which invokes `recvBufferRead.Read()`.
+The second goroutine is blocked at case `i := r.recv.get()`, and it is
+waiting for someone to send a message to this channel.
+It is the `client.CloseSream()` method called by the main goroutine that
+should send the message, but it is not. The patch is to send out this message.
+
+### Example execution
+
+```go
+G1 G2
+-----------------------------------------------------
+testInflightStreamClosing()
+. stream.Read()
+. io.ReadFull()
+. <-r.recv.get()
+CloseStream()
+<-donec
+---------------------G1, G2 leak---------------------
+```
+
+## Grpc/1424
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[grpc#1424]|[pull request]|[patch]| Communication | Channel |
+
+[grpc#1424]:(grpc1424_test.go)
+[patch]:https://github.com/grpc/grpc-go/pull/1424/files
+[pull request]:https://github.com/grpc/grpc-go/pull/1424
+
+### Description
+
+The goroutine running `cc.lbWatcher` returns without
+draining the `done` channel.
+
+### Example execution
+
+```go
+G1 G2 G3
+-----------------------------------------------------------------
+DialContext() . .
+. cc.dopts.balancer.Notify() .
+. . cc.lbWatcher()
+. <-doneChan
+close()
+---------------------------G2 leaks-------------------------------
+```
+
+## Grpc/1460
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[grpc#1460]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[grpc#1460]:(grpc1460_test.go)
+[patch]:https://github.com/grpc/grpc-go/pull/1460/files
+[pull request]:https://github.com/grpc/grpc-go/pull/1460
+
+### Description
+
+When gRPC keepalives are enabled (which isn't the case
+by default at this time) and PermitWithoutStream is false
+(the default), the client can leak goroutines when transitioning
+between having no active stream and having one active
+stream.The keepalive() goroutine is stuck at “<-t.awakenKeepalive”,
+while the main goroutine is stuck in NewStream() on t.mu.Lock().
+
+### Example execution
+
+```go
+G1 G2
+--------------------------------------------
+client.keepalive()
+. client.NewStream()
+t.mu.Lock()
+<-t.awakenKeepalive
+. t.mu.Lock()
+---------------G1,G2 leak-------------------
+```
+
+## Grpc/3017
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[grpc#3017]|[pull request]|[patch]| Resource | Missing unlock |
+
+[grpc#3017]:(grpc3017_test.go)
+[patch]:https://github.com/grpc/grpc-go/pull/3017/files
+[pull request]:https://github.com/grpc/grpc-go/pull/3017
+
+### Description
+
+Line 65 is an execution path with a missing unlock.
+
+### Example execution
+
+```go
+G1 G2 G3
+------------------------------------------------------------------------------------------------
+NewSubConn([1])
+ccc.mu.Lock() [L1]
+sc = 1
+ccc.subConnToAddr[1] = 1
+go func() [G2]
+<-done .
+. ccc.RemoveSubConn(1)
+. ccc.mu.Lock()
+. addr = 1
+. entry = &subConnCacheEntry_grpc3017{}
+. cc.subConnCache[1] = entry
+. timer = time.AfterFunc() [G3]
+. entry.cancel = func()
+. sc = ccc.NewSubConn([1])
+. ccc.mu.Lock() [L1]
+. entry.cancel()
+. !timer.Stop() [true]
+. entry.abortDeleting = true
+. . ccc.mu.Lock()
+. . <<<done>>>
+. ccc.RemoveSubConn(1)
+. ccc.mu.Lock() [L1]
+-------------------------------------------G1, G2 leak-----------------------------------------
+```
+
+## Grpc/660
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[grpc#660]|[pull request]|[patch]| Communication | Channel |
+
+[grpc#660]:(grpc660_test.go)
+[patch]:https://github.com/grpc/grpc-go/pull/660/files
+[pull request]:https://github.com/grpc/grpc-go/pull/660
+
+### Description
+
+The parent function could return without draining the done channel.
+
+### Example execution
+
+```go
+G1 G2 helper goroutine
+-------------------------------------------------------------
+doCloseLoopUnary()
+. bc.stop <- true
+<-bc.stop
+return
+. done <-
+----------------------G2 leak--------------------------------
+```
+
+## Grpc/795
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[grpc#795]|[pull request]|[patch]| Resource | Double locking |
+
+[grpc#795]:(grpc795_test.go)
+[patch]:https://github.com/grpc/grpc-go/pull/795/files
+[pull request]:https://github.com/grpc/grpc-go/pull/795
+
+### Description
+
+Line 20 is an execution path with a missing unlock.
+
+## Grpc/862
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[grpc#862]|[pull request]|[patch]| Communication | Channel & Context |
+
+[grpc#862]:(grpc862_test.go)
+[patch]:https://github.com/grpc/grpc-go/pull/862/files
+[pull request]:https://github.com/grpc/grpc-go/pull/862
+
+### Description
+
+When return value `conn` is `nil`, `cc(ClientConn)` is not closed.
+The goroutine executing resetAddrConn is leaked. The patch is to
+close `ClientConn` in `defer func()`.
+
+### Example execution
+
+```go
+G1 G2
+---------------------------------------
+DialContext()
+. cc.resetAddrConn()
+. resetTransport()
+. <-ac.ctx.Done()
+--------------G2 leak------------------
+```
+
+## Hugo/3251
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[hugo#3251]|[pull request]|[patch]| Resource | RWR deadlock |
+
+[hugo#3251]:(hugo3251_test.go)
+[patch]:https://github.com/gohugoio/hugo/pull/3251/files
+[pull request]:https://github.com/gohugoio/hugo/pull/3251
+
+### Description
+
+A goroutine can hold `Lock()` at line 20 then acquire `RLock()` at
+line 29. `RLock()` at line 29 will never be acquired because `Lock()`
+at line 20 will never be released.
+
+### Example execution
+
+```go
+G1 G2 G3
+------------------------------------------------------------------------------------------
+wg.Add(1) [W1: 1]
+go func() [G2]
+go func() [G3]
+. resGetRemote()
+. remoteURLLock.URLLock(url)
+. l.Lock() [L1]
+. l.m[url] = &sync.Mutex{} [L2]
+. l.m[url].Lock() [L2]
+. l.Unlock() [L1]
+. . resGetRemote()
+. . remoteURLLock.URLLock(url)
+. . l.Lock() [L1]
+. . l.m[url].Lock() [L2]
+. remoteURLLock.URLUnlock(url)
+. l.RLock() [L1]
+...
+wg.Wait() [W1]
+----------------------------------------G1,G2,G3 leak--------------------------------------
+```
+
+## Hugo/5379
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[hugo#5379]|[pull request]|[patch]| Resource | Double locking |
+
+[hugo#5379]:(hugo5379_test.go)
+[patch]:https://github.com/gohugoio/hugo/pull/5379/files
+[pull request]:https://github.com/gohugoio/hugo/pull/5379
+
+### Description
+
+A goroutine first acquire `contentInitMu` at line 99 then
+acquire the same `Mutex` at line 66
+
+## Istio/16224
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[istio#16224]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[istio#16224]:(istio16224_test.go)
+[patch]:https://github.com/istio/istio/pull/16224/files
+[pull request]:https://github.com/istio/istio/pull/16224
+
+### Description
+
+A goroutine holds a `Mutex` at line 91 and is then blocked at line 93.
+Another goroutine attempts to acquire the same `Mutex` at line 101 to
+further drains the same channel at 103.
+
+## Istio/17860
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[istio#17860]|[pull request]|[patch]| Communication | Channel |
+
+[istio#17860]:(istio17860_test.go)
+[patch]:https://github.com/istio/istio/pull/17860/files
+[pull request]:https://github.com/istio/istio/pull/17860
+
+### Description
+
+`a.statusCh` can't be drained at line 70.
+
+## Istio/18454
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[istio#18454]|[pull request]|[patch]| Communication | Channel & Context |
+
+[istio#18454]:(istio18454_test.go)
+[patch]:https://github.com/istio/istio/pull/18454/files
+[pull request]:https://github.com/istio/istio/pull/18454
+
+### Description
+
+`s.timer.Stop()` at line 56 and 61 can be called concurrency
+(i.e. from their entry point at line 104 and line 66).
+See [Timer](https://golang.org/pkg/time/#Timer).
+
+## Kubernetes/10182
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#10182]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[kubernetes#10182]:(kubernetes10182_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/10182/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/10182
+
+### Description
+
+Goroutine 1 is blocked on a lock held by goroutine 3,
+while goroutine 3 is blocked on sending message to `ch`,
+which is read by goroutine 1.
+
+### Example execution
+
+```go
+G1 G2 G3
+-------------------------------------------------------------------------------
+s.Start()
+s.syncBatch()
+. s.SetPodStatus()
+. s.podStatusesLock.Lock()
+<-s.podStatusChannel <===> s.podStatusChannel <- true
+. s.podStatusesLock.Unlock()
+. return
+s.DeletePodStatus() .
+. . s.podStatusesLock.Lock()
+. . s.podStatusChannel <- true
+s.podStatusesLock.Lock()
+-----------------------------G1,G3 leak-----------------------------------------
+```
+
+## Kubernetes/11298
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#11298]|[pull request]|[patch]| Communication | Channel & Condition Variable |
+
+[kubernetes#11298]:(kubernetes11298_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/11298/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/11298
+
+### Description
+
+`n.node` used the `n.lock` as underlaying locker. The service loop initially
+locked it, the `Notify` function tried to lock it before calling `n.node.Signal()`,
+leading to a goroutine leak. `n.cond.Signal()` at line 59 and line 81 are not
+guaranteed to unblock the `n.cond.Wait` at line 56.
+
+## Kubernetes/13135
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#13135]|[pull request]|[patch]| Resource | AB-BA deadlock |
+
+[kubernetes#13135]:(kubernetes13135_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/13135/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/13135
+
+### Description
+
+```go
+G1 G2 G3
+----------------------------------------------------------------------------------
+NewCacher()
+watchCache.SetOnReplace()
+watchCache.SetOnEvent()
+. cacher.startCaching()
+. c.Lock()
+. c.reflector.ListAndWatch()
+. r.syncWith()
+. r.store.Replace()
+. w.Lock()
+. w.onReplace()
+. cacher.initOnce.Do()
+. cacher.Unlock()
+return cacher .
+. . c.watchCache.Add()
+. . w.processEvent()
+. . w.Lock()
+. cacher.startCaching() .
+. c.Lock() .
+...
+. c.Lock()
+. w.Lock()
+--------------------------------G2,G3 leak-----------------------------------------
+```
+
+## Kubernetes/1321
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#1321]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[kubernetes#1321]:(kubernetes1321_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/1321/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/1321
+
+### Description
+
+This is a lock-channel bug. The first goroutine invokes
+`distribute()`, which holds `m.lock.Lock()`, while blocking
+at sending message to `w.result`. The second goroutine
+invokes `stopWatching()` function, which can unblock the first
+goroutine by closing `w.result`. However, in order to close `w.result`,
+`stopWatching()` function needs to acquire `m.lock.Lock()`.
+
+The fix is to introduce another channel and put receive message
+from the second channel in the same `select` statement as the
+`w.result`. Close the second channel can unblock the first
+goroutine, while no need to hold `m.lock.Lock()`.
+
+### Example execution
+
+```go
+G1 G2
+----------------------------------------------
+testMuxWatcherClose()
+NewMux()
+. m.loop()
+. m.distribute()
+. m.lock.Lock()
+. w.result <- true
+w := m.Watch()
+w.Stop()
+mw.m.stopWatching()
+m.lock.Lock()
+---------------G1,G2 leak---------------------
+```
+
+## Kubernetes/25331
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#25331]|[pull request]|[patch]| Communication | Channel & Context |
+
+[kubernetes#25331]:(kubernetes25331_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/25331/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/25331
+
+### Description
+
+A potential goroutine leak occurs when an error has happened,
+blocking `resultChan`, while cancelling context in `Stop()`.
+
+### Example execution
+
+```go
+G1 G2
+------------------------------------
+wc.run()
+. wc.Stop()
+. wc.errChan <-
+. wc.cancel()
+<-wc.errChan
+wc.cancel()
+wc.resultChan <-
+-------------G1 leak----------------
+```
+
+## Kubernetes/26980
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#26980]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[kubernetes#26980]:(kubernetes26980_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/26980/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/26980
+
+### Description
+
+A goroutine holds a `Mutex` at line 24 and blocked at line 35.
+Another goroutine blocked at line 58 by acquiring the same `Mutex`.
+
+## Kubernetes/30872
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#30872]|[pull request]|[patch]| Resource | AB-BA deadlock |
+
+[kubernetes#30872]:(kubernetes30872_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/30872/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/30872
+
+### Description
+
+The lock is acquired both at lines 92 and 157.
+
+## Kubernetes/38669
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#38669]|[pull request]|[patch]| Communication | Channel |
+
+[kubernetes#38669]:(kubernetes38669_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/38669/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/38669
+
+### Description
+
+No sender for line 33.
+
+## Kubernetes/5316
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#5316]|[pull request]|[patch]| Communication | Channel |
+
+[kubernetes#5316]:(kubernetes5316_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/5316/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/5316
+
+### Description
+
+If the main goroutine selects a case that doesn’t consumes
+the channels, the anonymous goroutine will be blocked on sending
+to channel.
+
+### Example execution
+
+```go
+G1 G2
+--------------------------------------
+finishRequest()
+. fn()
+time.After()
+. errCh<-/ch<-
+--------------G2 leaks----------------
+```
+
+## Kubernetes/58107
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#58107]|[pull request]|[patch]| Resource | RWR deadlock |
+
+[kubernetes#58107]:(kubernetes58107_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/58107/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/58107
+
+### Description
+
+The rules for read and write lock: allows concurrent read lock;
+write lock has higher priority than read lock.
+
+There are two queues (queue 1 and queue 2) involved in this bug,
+and the two queues are protected by the same read-write lock
+(`rq.workerLock.RLock()`). Before getting an element from queue 1 or
+queue 2, `rq.workerLock.RLock()` is acquired. If the queue is empty,
+`cond.Wait()` will be invoked. There is another goroutine (goroutine D),
+which will periodically invoke `rq.workerLock.Lock()`. Under the following
+situation, deadlock will happen. Queue 1 is empty, so that some goroutines
+hold `rq.workerLock.RLock()`, and block at `cond.Wait()`. Goroutine D is
+blocked when acquiring `rq.workerLock.Lock()`. Some goroutines try to process
+jobs in queue 2, but they are blocked when acquiring `rq.workerLock.RLock()`,
+since write lock has a higher priority.
+
+The fix is to not acquire `rq.workerLock.RLock()`, while pulling data
+from any queue. Therefore, when a goroutine is blocked at `cond.Wait()`,
+`rq.workLock.RLock()` is not held.
+
+### Example execution
+
+```go
+G3 G4 G5
+--------------------------------------------------------------------
+. . Sync()
+rq.workerLock.RLock() . .
+q.cond.Wait() . .
+. . rq.workerLock.Lock()
+. rq.workerLock.RLock()
+. q.cond.L.Lock()
+-----------------------------G3,G4,G5 leak-----------------------------
+```
+
+## Kubernetes/62464
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#62464]|[pull request]|[patch]| Resource | RWR deadlock |
+
+[kubernetes#62464]:(kubernetes62464_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/62464/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/62464
+
+### Description
+
+This is another example for recursive read lock bug. It has
+been noticed by the go developers that RLock should not be
+recursively used in the same thread.
+
+### Example execution
+
+```go
+G1 G2
+--------------------------------------------------------
+m.reconcileState()
+m.state.GetCPUSetOrDefault()
+s.RLock()
+s.GetCPUSet()
+. p.RemoveContainer()
+. s.GetDefaultCPUSet()
+. s.SetDefaultCPUSet()
+. s.Lock()
+s.RLock()
+---------------------G1,G2 leak--------------------------
+```
+
+## Kubernetes/6632
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#6632]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[kubernetes#6632]:(kubernetes6632_test.go)
+[patch]:https://github.com/kubernetes/kubernetes/pull/6632/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/6632
+
+### Description
+
+When `resetChan` is full, `WriteFrame` holds the lock and blocks
+on the channel. Then `monitor()` fails to close the `resetChan`
+because the lock is already held by `WriteFrame`.
+
+
+### Example execution
+
+```go
+G1 G2 helper goroutine
+----------------------------------------------------------------
+i.monitor()
+<-i.conn.closeChan
+. i.WriteFrame()
+. i.writeLock.Lock()
+. i.resetChan <-
+. . i.conn.closeChan<-
+i.writeLock.Lock()
+----------------------G1,G2 leak--------------------------------
+```
+
+## Kubernetes/70277
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[kubernetes#70277]|[pull request]|[patch]| Communication | Channel |
+
+[kubernetes#70277]:kubernetes70277_test.go
+[patch]:https://github.com/kubernetes/kubernetes/pull/70277/files
+[pull request]:https://github.com/kubernetes/kubernetes/pull/70277
+
+### Description
+
+`wait.poller()` returns a function with type `WaitFunc`.
+the function creates a goroutine and the goroutine only
+quits when after or done closed.
+
+The `doneCh` defined at line 70 is never closed.
+
+## Moby/17176
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#17176]|[pull request]|[patch]| Resource | Double locking |
+
+[moby#17176]:(moby17176_test.go)
+[patch]:https://github.com/moby/moby/pull/17176/files
+[pull request]:https://github.com/moby/moby/pull/17176
+
+### Description
+
+`devices.nrDeletedDevices` takes `devices.Lock()` but does
+not release it (line 36) if there are no deleted devices. This will block
+other goroutines trying to acquire `devices.Lock()`.
+
+## Moby/21233
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#21233]|[pull request]|[patch]| Communication | Channel |
+
+[moby#21233]:(moby21233_test.go)
+[patch]:https://github.com/moby/moby/pull/21233/files
+[pull request]:https://github.com/moby/moby/pull/21233
+
+### Description
+
+This test was checking that it received every progress update that was
+produced. But delivery of these intermediate progress updates is not
+guaranteed. A new update can overwrite the previous one if the previous
+one hasn't been sent to the channel yet.
+
+The call to `t.Fatalf` terminated the current goroutine which was consuming
+the channel, which caused a deadlock and eventual test timeout rather
+than a proper failure message.
+
+### Example execution
+
+```go
+G1 G2 G3
+----------------------------------------------------------
+testTransfer() . .
+tm.Transfer() . .
+t.Watch() . .
+. WriteProgress() .
+. ProgressChan<- .
+. . <-progressChan
+. ... ...
+. return .
+. <-progressChan
+<-watcher.running
+----------------------G1,G3 leak--------------------------
+```
+
+## Moby/25384
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#25384]|[pull request]|[patch]| Mixed | Misuse WaitGroup |
+
+[moby#25384]:(moby25384_test.go)
+[patch]:https://github.com/moby/moby/pull/25384/files
+[pull request]:https://github.com/moby/moby/pull/25384
+
+### Description
+
+When `n=1` (where `n` is `len(pm.plugins)`), the location of `group.Wait()` doesn’t matter.
+When `n > 1`, `group.Wait()` is invoked in each iteration. Whenever
+`group.Wait()` is invoked, it waits for `group.Done()` to be executed `n` times.
+However, `group.Done()` is only executed once in one iteration.
+
+Misuse of sync.WaitGroup
+
+## Moby/27782
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#27782]|[pull request]|[patch]| Communication | Channel & Condition Variable |
+
+[moby#27782]:(moby27782_test.go)
+[patch]:https://github.com/moby/moby/pull/27782/files
+[pull request]:https://github.com/moby/moby/pull/27782
+
+### Description
+
+### Example execution
+
+```go
+G1 G2 G3
+-----------------------------------------------------------------------
+InitializeStdio()
+startLogging()
+l.ReadLogs()
+NewLogWatcher()
+. l.readLogs()
+container.Reset() .
+LogDriver.Close() .
+r.Close() .
+close(w.closeNotifier) .
+. followLogs(logWatcher)
+. watchFile()
+. New()
+. NewEventWatcher()
+. NewWatcher()
+. . w.readEvents()
+. . event.ignoreLinux()
+. . return false
+. <-logWatcher.WatchClose() .
+. fileWatcher.Remove() .
+. w.cv.Wait() .
+. . w.Events <- event
+------------------------------G2,G3 leak-------------------------------
+```
+
+## Moby/28462
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#28462]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[moby#28462]:(moby28462_test.go)
+[patch]:https://github.com/moby/moby/pull/28462/files
+[pull request]:https://github.com/moby/moby/pull/28462
+
+### Description
+
+One goroutine may acquire a lock and try to send a message over channel `stop`,
+while the other will try to acquire the same lock. With the wrong ordering,
+both goroutines will leak.
+
+### Example execution
+
+```go
+G1 G2
+--------------------------------------------------------------
+monitor()
+handleProbeResult()
+. d.StateChanged()
+. c.Lock()
+. d.updateHealthMonitorElseBranch()
+. h.CloseMonitorChannel()
+. s.stop <- struct{}{}
+c.Lock()
+----------------------G1,G2 leak------------------------------
+```
+
+## Moby/30408
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#30408]|[pull request]|[patch]| Communication | Condition Variable |
+
+[moby#30408]:(moby30408_test.go)
+[patch]:https://github.com/moby/moby/pull/30408/files
+[pull request]:https://github.com/moby/moby/pull/30408
+
+### Description
+
+`Wait()` at line 22 has no corresponding `Signal()` or `Broadcast()`.
+
+### Example execution
+
+```go
+G1 G2
+------------------------------------------
+testActive()
+. p.waitActive()
+. p.activateWait.L.Lock()
+. p.activateWait.Wait()
+<-done
+-----------------G1,G2 leak---------------
+```
+
+## Moby/33781
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#33781]|[pull request]|[patch]| Communication | Channel & Context |
+
+[moby#33781]:(moby33781_test.go)
+[patch]:https://github.com/moby/moby/pull/33781/files
+[pull request]:https://github.com/moby/moby/pull/33781
+
+### Description
+
+The goroutine created using an anonymous function is blocked
+sending a message over an unbuffered channel. However there
+exists a path in the parent goroutine where the parent function
+will return without draining the channel.
+
+### Example execution
+
+```go
+G1 G2 G3
+----------------------------------------
+monitor() .
+<-time.After() .
+. .
+<-stop stop<-
+.
+cancelProbe()
+return
+. result<-
+----------------G3 leak------------------
+```
+
+## Moby/36114
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#36114]|[pull request]|[patch]| Resource | Double locking |
+
+[moby#36114]:(moby36114_test.go)
+[patch]:https://github.com/moby/moby/pull/36114/files
+[pull request]:https://github.com/moby/moby/pull/36114
+
+### Description
+
+The the lock for the struct svm has already been locked when calling
+`svm.hotRemoveVHDsAtStart()`.
+
+## Moby/4951
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#4951]|[pull request]|[patch]| Resource | AB-BA deadlock |
+
+[moby#4951]:(moby4951_test.go)
+[patch]:https://github.com/moby/moby/pull/4951/files
+[pull request]:https://github.com/moby/moby/pull/4951
+
+### Description
+
+The root cause and patch is clearly explained in the commit
+description. The global lock is `devices.Lock()`, and the device
+lock is `baseInfo.lock.Lock()`. It is very likely that this bug
+can be reproduced.
+
+## Moby/7559
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[moby#7559]|[pull request]|[patch]| Resource | Double locking |
+
+[moby#7559]:(moby7559_test.go)
+[patch]:https://github.com/moby/moby/pull/7559/files
+[pull request]:https://github.com/moby/moby/pull/7559
+
+### Description
+
+Line 25 is missing a call to `.Unlock`.
+
+### Example execution
+
+```go
+G1
+---------------------------
+proxy.connTrackLock.Lock()
+if err != nil { continue }
+proxy.connTrackLock.Lock()
+-----------G1 leaks--------
+```
+
+## Serving/2137
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[serving#2137]|[pull request]|[patch]| Mixed | Channel & Lock |
+
+[serving#2137]:(serving2137_test.go)
+[patch]:https://github.com/ knative/serving/pull/2137/files
+[pull request]:https://github.com/ knative/serving/pull/2137
+
+### Description
+
+### Example execution
+
+```go
+G1 G2 G3
+----------------------------------------------------------------------------------
+b.concurrentRequests(2) . .
+b.concurrentRequest() . .
+r.lock.Lock() . .
+. start.Done() .
+start.Wait() . .
+b.concurrentRequest() . .
+r.lock.Lock() . .
+. . start.Done()
+start.Wait() . .
+unlockAll(locks) . .
+unlock(lc) . .
+req.lock.Unlock() . .
+ok := <-req.accepted . .
+. b.Maybe() .
+. b.activeRequests <- t .
+. thunk() .
+. r.lock.Lock() .
+. . b.Maybe()
+. . b.activeRequests <- t
+----------------------------G1,G2,G3 leak-----------------------------------------
+```
+
+## Syncthing/4829
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[syncthing#4829]|[pull request]|[patch]| Resource | Double locking |
+
+[syncthing#4829]:(syncthing4829_test.go)
+[patch]:https://github.com/syncthing/syncthing/pull/4829/files
+[pull request]:https://github.com/syncthing/syncthing/pull/4829
+
+### Description
+
+Double locking at line 17 and line 30.
+
+### Example execution
+
+```go
+G1
+---------------------------
+mapping.clearAddresses()
+m.mut.Lock() [L2]
+m.notify(...)
+m.mut.RLock() [L2]
+----------G1 leaks---------
+```
+
+## Syncthing/5795
+
+| Bug ID | Ref | Patch | Type | Sub-type |
+| ---- | ---- | ---- | ---- | ---- |
+|[syncthing#5795]|[pull request]|[patch]| Communication | Channel |
+
+[syncthing#5795]:(syncthing5795_test.go)
+[patch]:https://github.com/syncthing/syncthing/pull/5795/files
+[pull request]:https://github.com/syncthing/syncthing/pull/5795
+
+### Description
+
+`<-c.dispatcherLoopStopped` at line 82 is blocking forever because
+`dispatcherLoop()` is blocking at line 72.
+
+### Example execution
+
+```go
+G1 G2
+--------------------------------------------------------------
+c.Start()
+go c.dispatcherLoop() [G3]
+. select [<-c.inbox, <-c.closed]
+c.inbox <- <================> [<-c.inbox]
+<-c.dispatcherLoopStopped .
+. default
+. c.ccFn()/c.Close()
+. close(c.closed)
+. <-c.dispatcherLoopStopped
+---------------------G1,G2 leak-------------------------------
+``` \ No newline at end of file
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go
new file mode 100644
index 0000000000..4f5ef3b0fc
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go
@@ -0,0 +1,145 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/10214
+ * Buggy version: 7207111aa3a43df0552509365fdec741a53f873f
+ * fix commit-id: 27e863d90ab0660494778f1c35966cc5ddc38e32
+ * Flaky: 3/100
+ * Description: This goroutine leak is caused by different order when acquiring
+ * coalescedMu.Lock() and raftMu.Lock(). The fix is to refactor sendQueuedHeartbeats()
+ * so that cockroachdb can unlock coalescedMu before locking raftMu.
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+ "unsafe"
+)
+
+func init() {
+ register("Cockroach10214", Cockroach10214)
+}
+
+type Store_cockroach10214 struct {
+ coalescedMu struct {
+ sync.Mutex // L1
+ heartbeatResponses []int
+ }
+ mu struct {
+ replicas map[int]*Replica_cockroach10214
+ }
+}
+
+func (s *Store_cockroach10214) sendQueuedHeartbeats() {
+ s.coalescedMu.Lock() // L1 acquire
+ defer s.coalescedMu.Unlock() // L2 release
+ for i := 0; i < len(s.coalescedMu.heartbeatResponses); i++ {
+ s.sendQueuedHeartbeatsToNode() // L2
+ }
+}
+
+func (s *Store_cockroach10214) sendQueuedHeartbeatsToNode() {
+ for i := 0; i < len(s.mu.replicas); i++ {
+ r := s.mu.replicas[i]
+ r.reportUnreachable() // L2
+ }
+}
+
+type Replica_cockroach10214 struct {
+ raftMu sync.Mutex // L2
+ mu sync.Mutex // L3
+ store *Store_cockroach10214
+}
+
+func (r *Replica_cockroach10214) reportUnreachable() {
+ r.raftMu.Lock() // L2 acquire
+ time.Sleep(time.Millisecond)
+ defer r.raftMu.Unlock() // L2 release
+}
+
+func (r *Replica_cockroach10214) tick() {
+ r.raftMu.Lock() // L2 acquire
+ defer r.raftMu.Unlock() // L2 release
+ r.tickRaftMuLocked()
+}
+
+func (r *Replica_cockroach10214) tickRaftMuLocked() {
+ r.mu.Lock() // L3 acquire
+ defer r.mu.Unlock() // L3 release
+ if r.maybeQuiesceLocked() {
+ return
+ }
+}
+
+func (r *Replica_cockroach10214) maybeQuiesceLocked() bool {
+ for i := 0; i < 2; i++ {
+ if !r.maybeCoalesceHeartbeat() {
+ return true
+ }
+ }
+ return false
+}
+
+func (r *Replica_cockroach10214) maybeCoalesceHeartbeat() bool {
+ msgtype := uintptr(unsafe.Pointer(r)) % 3
+ switch msgtype {
+ case 0, 1, 2:
+ r.store.coalescedMu.Lock() // L1 acquire
+ default:
+ return false
+ }
+ r.store.coalescedMu.Unlock() // L1 release
+ return true
+}
+
+func Cockroach10214() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 1000; i++ {
+ go func() {
+ store := &Store_cockroach10214{}
+ responses := &store.coalescedMu.heartbeatResponses
+ *responses = append(*responses, 1, 2)
+ store.mu.replicas = make(map[int]*Replica_cockroach10214)
+
+ rp1 := &Replica_cockroach10214{ // L2,3[0]
+ store: store,
+ }
+ rp2 := &Replica_cockroach10214{ // L2,3[1]
+ store: store,
+ }
+ store.mu.replicas[0] = rp1
+ store.mu.replicas[1] = rp2
+
+ go store.sendQueuedHeartbeats() // G1
+ go rp1.tick() // G2
+ }()
+ }
+}
+
+// Example of goroutine leak trace:
+//
+// G1 G2
+//------------------------------------------------------------------------------------
+// s.sendQueuedHeartbeats() .
+// s.coalescedMu.Lock() [L1] .
+// s.sendQueuedHeartbeatsToNode() .
+// s.mu.replicas[0].reportUnreachable() .
+// s.mu.replicas[0].raftMu.Lock() [L2] .
+// . s.mu.replicas[0].tick()
+// . s.mu.replicas[0].raftMu.Lock() [L2]
+// . s.mu.replicas[0].tickRaftMuLocked()
+// . s.mu.replicas[0].mu.Lock() [L3]
+// . s.mu.replicas[0].maybeQuiesceLocked()
+// . s.mu.replicas[0].maybeCoalesceHeartbeat()
+// . s.coalescedMu.Lock() [L1]
+//--------------------------------G1,G2 leak------------------------------------------ \ No newline at end of file
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go
new file mode 100644
index 0000000000..687baed25a
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go
@@ -0,0 +1,115 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+func init() {
+ register("Cockroach1055", Cockroach1055)
+}
+
+type Stopper_cockroach1055 struct {
+ stopper chan struct{}
+ stop sync.WaitGroup
+ mu sync.Mutex
+ draining int32
+ drain sync.WaitGroup
+}
+
+func (s *Stopper_cockroach1055) AddWorker() {
+ s.stop.Add(1)
+}
+
+func (s *Stopper_cockroach1055) ShouldStop() <-chan struct{} {
+ if s == nil {
+ return nil
+ }
+ return s.stopper
+}
+
+func (s *Stopper_cockroach1055) SetStopped() {
+ if s != nil {
+ s.stop.Done()
+ }
+}
+
+func (s *Stopper_cockroach1055) Quiesce() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.draining = 1
+ s.drain.Wait()
+ s.draining = 0
+}
+
+func (s *Stopper_cockroach1055) Stop() {
+ s.mu.Lock() // L1
+ defer s.mu.Unlock()
+ atomic.StoreInt32(&s.draining, 1)
+ s.drain.Wait()
+ close(s.stopper)
+ s.stop.Wait()
+}
+
+func (s *Stopper_cockroach1055) StartTask() bool {
+ if atomic.LoadInt32(&s.draining) == 0 {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.drain.Add(1)
+ return true
+ }
+ return false
+}
+
+func NewStopper_cockroach1055() *Stopper_cockroach1055 {
+ return &Stopper_cockroach1055{
+ stopper: make(chan struct{}),
+ }
+}
+
+func Cockroach1055() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i <= 1000; i++ {
+ go func() { // G1
+ var stoppers []*Stopper_cockroach1055
+ for i := 0; i < 2; i++ {
+ stoppers = append(stoppers, NewStopper_cockroach1055())
+ }
+
+ for i := range stoppers {
+ s := stoppers[i]
+ s.AddWorker()
+ go func() { // G2
+ s.StartTask()
+ <-s.ShouldStop()
+ s.SetStopped()
+ }()
+ }
+
+ done := make(chan struct{})
+ go func() { // G3
+ for _, s := range stoppers {
+ s.Quiesce()
+ }
+ for _, s := range stoppers {
+ s.Stop()
+ }
+ close(done)
+ }()
+
+ <-done
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go
new file mode 100644
index 0000000000..636f45b3e0
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go
@@ -0,0 +1,98 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/10790
+ * Buggy version: 96b5452557ebe26bd9d85fe7905155009204d893
+ * fix commit-id: f1a5c19125c65129b966fbdc0e6408e8df214aba
+ * Flaky: 28/100
+ * Description:
+ * It is possible that a message from ctxDone will make the function beginCmds
+ * returns without draining the channel ch, so that goroutines created by anonymous
+ * function will leak.
+ */
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Cockroach10790", Cockroach10790)
+}
+
+type Replica_cockroach10790 struct {
+ chans []chan bool
+}
+
+func (r *Replica_cockroach10790) beginCmds(ctx context.Context) {
+ ctxDone := ctx.Done()
+ for _, ch := range r.chans {
+ select {
+ case <-ch:
+ case <-ctxDone:
+ go func() { // G3
+ for _, ch := range r.chans {
+ <-ch
+ }
+ }()
+ }
+ }
+}
+
+func (r *Replica_cockroach10790) sendChans(ctx context.Context) {
+ for _, ch := range r.chans {
+ select {
+ case ch <- true:
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func NewReplica_cockroach10790() *Replica_cockroach10790 {
+ r := &Replica_cockroach10790{}
+ r.chans = append(r.chans, make(chan bool), make(chan bool))
+ return r
+}
+
+// Example of goroutine leak trace:
+//
+// G1 G2 G3 helper goroutine
+//--------------------------------------------------------------------------------------
+// . . r.sendChans()
+// r.beginCmds() . .
+// . . ch1 <-
+// <-ch1 <================================================> ch1 <-
+// . . select [ch2<-, <-ctx.Done()]
+// . cancel() .
+// . <<done>> [<-ctx.Done()] ==> return
+// . <<done>>
+// go func() [G3] .
+// . <-ch1
+// ------------------------------G3 leaks----------------------------------------------
+//
+
+func Cockroach10790() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ r := NewReplica_cockroach10790()
+ ctx, cancel := context.WithCancel(context.Background())
+ go r.sendChans(ctx) // helper goroutine
+ go r.beginCmds(ctx) // G1
+ go cancel() // G2
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go
new file mode 100644
index 0000000000..a0a9a79267
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go
@@ -0,0 +1,82 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/13197
+ * Buggy version: fff27aedabafe20cef57f75905fe340cab48c2a4
+ * fix commit-id: 9bf770cd8f6eaff5441b80d3aec1a5614e8747e1
+ * Flaky: 100/100
+ * Description: One goroutine executing (*Tx).awaitDone() blocks
+ * waiting for a signal over context.Done() that never comes.
+ */
+package main
+
+import (
+ "context"
+ "os"
+ "runtime"
+ "runtime/pprof"
+)
+
+func init() {
+ register("Cockroach13197", Cockroach13197)
+}
+
+type DB_cockroach13197 struct{}
+
+func (db *DB_cockroach13197) begin(ctx context.Context) *Tx_cockroach13197 {
+ ctx, cancel := context.WithCancel(ctx)
+ tx := &Tx_cockroach13197{
+ cancel: cancel,
+ ctx: ctx,
+ }
+ go tx.awaitDone() // G2
+ return tx
+}
+
+type Tx_cockroach13197 struct {
+ cancel context.CancelFunc
+ ctx context.Context
+}
+
+func (tx *Tx_cockroach13197) awaitDone() {
+ <-tx.ctx.Done()
+}
+
+func (tx *Tx_cockroach13197) Rollback() {
+ tx.rollback()
+}
+
+func (tx *Tx_cockroach13197) rollback() {
+ tx.close()
+}
+
+func (tx *Tx_cockroach13197) close() {
+ tx.cancel()
+}
+
+// Example of goroutine leak trace:
+//
+// G1 G2
+//--------------------------------
+// begin()
+// . awaitDone()
+// <<done>> .
+// <-tx.ctx.Done()
+//------------G2 leak-------------
+
+func Cockroach13197() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ db := &DB_cockroach13197{}
+ db.begin(context.Background()) // G1
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go
new file mode 100644
index 0000000000..5ef6fa1e28
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go
@@ -0,0 +1,66 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/13755
+ * Buggy version: 7acb881bbb8f23e87b69fce9568d9a3316b5259c
+ * fix commit-id: ef906076adc1d0e3721944829cfedfed51810088
+ * Flaky: 100/100
+ */
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime"
+ "runtime/pprof"
+)
+
+func init() {
+ register("Cockroach13755", Cockroach13755)
+}
+
+type Rows_cockroach13755 struct {
+ cancel context.CancelFunc
+}
+
+func (rs *Rows_cockroach13755) initContextClose(ctx context.Context) {
+ ctx, rs.cancel = context.WithCancel(ctx)
+ go rs.awaitDone(ctx)
+}
+
+func (rs *Rows_cockroach13755) awaitDone(ctx context.Context) {
+ <-ctx.Done()
+ rs.close(ctx.Err())
+}
+
+func (rs *Rows_cockroach13755) close(err error) {
+ rs.cancel()
+}
+
+// Example of goroutine leak trace:
+//
+// G1 G2
+//----------------------------------------
+// initContextClose()
+// . awaitDone()
+// <<done>> .
+// <-tx.ctx.Done()
+//----------------G2 leak-----------------
+
+func Cockroach13755() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ rs := &Rows_cockroach13755{}
+ rs.initContextClose(context.Background())
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go
new file mode 100644
index 0000000000..108d7884a3
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go
@@ -0,0 +1,167 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Cockroach1462", Cockroach1462)
+}
+
+type Stopper_cockroach1462 struct {
+ stopper chan struct{}
+ stopped chan struct{}
+ stop sync.WaitGroup
+ mu sync.Mutex
+ drain *sync.Cond
+ draining bool
+ numTasks int
+}
+
+func NewStopper_cockroach1462() *Stopper_cockroach1462 {
+ s := &Stopper_cockroach1462{
+ stopper: make(chan struct{}),
+ stopped: make(chan struct{}),
+ }
+ s.drain = sync.NewCond(&s.mu)
+ return s
+}
+
+func (s *Stopper_cockroach1462) RunWorker(f func()) {
+ s.AddWorker()
+ go func() { // G2, G3
+ defer s.SetStopped()
+ f()
+ }()
+}
+
+func (s *Stopper_cockroach1462) AddWorker() {
+ s.stop.Add(1)
+}
+func (s *Stopper_cockroach1462) StartTask() bool {
+ s.mu.Lock()
+ runtime.Gosched()
+ defer s.mu.Unlock()
+ if s.draining {
+ return false
+ }
+ s.numTasks++
+ return true
+}
+
+func (s *Stopper_cockroach1462) FinishTask() {
+ s.mu.Lock()
+ runtime.Gosched()
+ defer s.mu.Unlock()
+ s.numTasks--
+ s.drain.Broadcast()
+}
+func (s *Stopper_cockroach1462) SetStopped() {
+ if s != nil {
+ s.stop.Done()
+ }
+}
+func (s *Stopper_cockroach1462) ShouldStop() <-chan struct{} {
+ if s == nil {
+ return nil
+ }
+ return s.stopper
+}
+
+func (s *Stopper_cockroach1462) Quiesce() {
+ s.mu.Lock()
+ runtime.Gosched()
+ defer s.mu.Unlock()
+ s.draining = true
+ for s.numTasks > 0 {
+ // Unlock s.mu, wait for the signal, and lock s.mu.
+ s.drain.Wait()
+ }
+}
+
+func (s *Stopper_cockroach1462) Stop() {
+ s.Quiesce()
+ close(s.stopper)
+ s.stop.Wait()
+ s.mu.Lock()
+ runtime.Gosched()
+ defer s.mu.Unlock()
+ close(s.stopped)
+}
+
+type interceptMessage_cockroach1462 int
+
+type localInterceptableTransport_cockroach1462 struct {
+ mu sync.Mutex
+ Events chan interceptMessage_cockroach1462
+ stopper *Stopper_cockroach1462
+}
+
+func (lt *localInterceptableTransport_cockroach1462) Close() {}
+
+type Transport_cockroach1462 interface {
+ Close()
+}
+
+func NewLocalInterceptableTransport_cockroach1462(stopper *Stopper_cockroach1462) Transport_cockroach1462 {
+ lt := &localInterceptableTransport_cockroach1462{
+ Events: make(chan interceptMessage_cockroach1462),
+ stopper: stopper,
+ }
+ lt.start()
+ return lt
+}
+
+func (lt *localInterceptableTransport_cockroach1462) start() {
+ lt.stopper.RunWorker(func() {
+ for {
+ select {
+ case <-lt.stopper.ShouldStop():
+ return
+ default:
+ lt.Events <- interceptMessage_cockroach1462(0)
+ }
+ }
+ })
+}
+
+func processEventsUntil_cockroach1462(ch <-chan interceptMessage_cockroach1462, stopper *Stopper_cockroach1462) {
+ for {
+ select {
+ case _, ok := <-ch:
+ runtime.Gosched()
+ if !ok {
+ return
+ }
+ case <-stopper.ShouldStop():
+ return
+ }
+ }
+}
+
+func Cockroach1462() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(2000 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i <= 1000; i++ {
+ go func() { // G1
+ stopper := NewStopper_cockroach1462()
+ transport := NewLocalInterceptableTransport_cockroach1462(stopper).(*localInterceptableTransport_cockroach1462)
+ stopper.RunWorker(func() {
+ processEventsUntil_cockroach1462(transport.Events, stopper)
+ })
+ stopper.Stop()
+ }()
+ }
+}
+
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go
new file mode 100644
index 0000000000..4cd14c7a5b
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go
@@ -0,0 +1,108 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/16167
+ * Buggy version: 36fa784aa846b46c29e077634c4e362635f6e74a
+ * fix commit-id: d064942b067ab84628f79cbfda001fa3138d8d6e
+ * Flaky: 1/100
+ */
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Cockroach16167", Cockroach16167)
+}
+
+type PreparedStatements_cockroach16167 struct {
+ session *Session_cockroach16167
+}
+
+func (ps PreparedStatements_cockroach16167) New(e *Executor_cockroach16167) {
+ e.Prepare(ps.session)
+}
+
+type Session_cockroach16167 struct {
+ PreparedStatements PreparedStatements_cockroach16167
+}
+
+func (s *Session_cockroach16167) resetForBatch(e *Executor_cockroach16167) {
+ e.getDatabaseCache()
+}
+
+type Executor_cockroach16167 struct {
+ systemConfigCond *sync.Cond
+ systemConfigMu sync.RWMutex // L1
+}
+
+func (e *Executor_cockroach16167) Start() {
+ e.updateSystemConfig()
+}
+
+func (e *Executor_cockroach16167) execParsed(session *Session_cockroach16167) {
+ e.systemConfigCond.L.Lock() // Same as e.systemConfigMu.RLock()
+ runtime.Gosched()
+ defer e.systemConfigCond.L.Unlock()
+ runTxnAttempt_cockroach16167(e, session)
+}
+
+func (e *Executor_cockroach16167) execStmtsInCurrentTxn(session *Session_cockroach16167) {
+ e.execStmtInOpenTxn(session)
+}
+
+func (e *Executor_cockroach16167) execStmtInOpenTxn(session *Session_cockroach16167) {
+ session.PreparedStatements.New(e)
+}
+
+func (e *Executor_cockroach16167) Prepare(session *Session_cockroach16167) {
+ session.resetForBatch(e)
+}
+
+func (e *Executor_cockroach16167) getDatabaseCache() {
+ e.systemConfigMu.RLock()
+ defer e.systemConfigMu.RUnlock()
+}
+
+func (e *Executor_cockroach16167) updateSystemConfig() {
+ e.systemConfigMu.Lock()
+ runtime.Gosched()
+ defer e.systemConfigMu.Unlock()
+}
+
+func runTxnAttempt_cockroach16167(e *Executor_cockroach16167, session *Session_cockroach16167) {
+ e.execStmtsInCurrentTxn(session)
+}
+
+func NewExectorAndSession_cockroach16167() (*Executor_cockroach16167, *Session_cockroach16167) {
+ session := &Session_cockroach16167{}
+ session.PreparedStatements = PreparedStatements_cockroach16167{session}
+ e := &Executor_cockroach16167{}
+ return e, session
+}
+
+func Cockroach16167() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() { // G1
+ e, s := NewExectorAndSession_cockroach16167()
+ e.systemConfigCond = sync.NewCond(e.systemConfigMu.RLocker())
+ go e.Start() // G2
+ e.execParsed(s)
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go
new file mode 100644
index 0000000000..17b0320330
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go
@@ -0,0 +1,60 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/18101
+ * Buggy version: f7a8e2f57b6bcf00b9abaf3da00598e4acd3a57f
+ * fix commit-id: 822bd176cc725c6b50905ea615023200b395e14f
+ * Flaky: 100/100
+ */
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Cockroach18101", Cockroach18101)
+}
+
+const chanSize_cockroach18101 = 6
+
+func restore_cockroach18101(ctx context.Context) bool {
+ readyForImportCh := make(chan bool, chanSize_cockroach18101)
+ go func() { // G2
+ defer close(readyForImportCh)
+ splitAndScatter_cockroach18101(ctx, readyForImportCh)
+ }()
+ for readyForImportSpan := range readyForImportCh {
+ select {
+ case <-ctx.Done():
+ return readyForImportSpan
+ }
+ }
+ return true
+}
+
+func splitAndScatter_cockroach18101(ctx context.Context, readyForImportCh chan bool) {
+ for i := 0; i < chanSize_cockroach18101+2; i++ {
+ readyForImportCh <- (false || i != 0)
+ }
+}
+
+func Cockroach18101() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ ctx, cancel := context.WithCancel(context.Background())
+ go restore_cockroach18101(ctx) // G1
+ go cancel() // helper goroutine
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go
new file mode 100644
index 0000000000..a7544bc8a4
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go
@@ -0,0 +1,125 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Cockroach2448", Cockroach2448)
+}
+
+type Stopper_cockroach2448 struct {
+ Done chan bool
+}
+
+func (s *Stopper_cockroach2448) ShouldStop() <-chan bool {
+ return s.Done
+}
+
+type EventMembershipChangeCommitted_cockroach2448 struct {
+ Callback func()
+}
+
+type MultiRaft_cockroach2448 struct {
+ stopper *Stopper_cockroach2448
+ Events chan interface{}
+ callbackChan chan func()
+}
+
+// sendEvent can be invoked many times
+func (m *MultiRaft_cockroach2448) sendEvent(event interface{}) {
+ select {
+ case m.Events <- event: // Waiting for events consumption
+ case <-m.stopper.ShouldStop():
+ }
+}
+
+type state_cockroach2448 struct {
+ *MultiRaft_cockroach2448
+}
+
+func (s *state_cockroach2448) start() {
+ for {
+ select {
+ case <-s.stopper.ShouldStop():
+ return
+ case cb := <-s.callbackChan:
+ cb()
+ default:
+ s.handleWriteResponse()
+ time.Sleep(100 * time.Microsecond)
+ }
+ }
+}
+
+func (s *state_cockroach2448) handleWriteResponse() {
+ s.sendEvent(&EventMembershipChangeCommitted_cockroach2448{
+ Callback: func() {
+ select {
+ case s.callbackChan <- func() { // Waiting for callbackChan consumption
+ time.Sleep(time.Nanosecond)
+ }:
+ case <-s.stopper.ShouldStop():
+ }
+ },
+ })
+}
+
+type Store_cockroach2448 struct {
+ multiraft *MultiRaft_cockroach2448
+}
+
+func (s *Store_cockroach2448) processRaft() {
+ for {
+ select {
+ case e := <-s.multiraft.Events:
+ switch e := e.(type) {
+ case *EventMembershipChangeCommitted_cockroach2448:
+ callback := e.Callback
+ runtime.Gosched()
+ if callback != nil {
+ callback() // Waiting for callbackChan consumption
+ }
+ }
+ case <-s.multiraft.stopper.ShouldStop():
+ return
+ }
+ }
+}
+
+func NewStoreAndState_cockroach2448() (*Store_cockroach2448, *state_cockroach2448) {
+ stopper := &Stopper_cockroach2448{
+ Done: make(chan bool),
+ }
+ mltrft := &MultiRaft_cockroach2448{
+ stopper: stopper,
+ Events: make(chan interface{}),
+ callbackChan: make(chan func()),
+ }
+ st := &state_cockroach2448{mltrft}
+ s := &Store_cockroach2448{mltrft}
+ return s, st
+}
+
+func Cockroach2448() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 1000; i++ {
+ go func() {
+ s, st := NewStoreAndState_cockroach2448()
+ go s.processRaft() // G1
+ go st.start() // G2
+ }()
+ }
+}
+
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go
new file mode 100644
index 0000000000..a916d3c928
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go
@@ -0,0 +1,78 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Cockroach24808", Cockroach24808)
+}
+
+type Compactor_cockroach24808 struct {
+ ch chan struct{}
+}
+
+type Stopper_cockroach24808 struct {
+ stop sync.WaitGroup
+ stopper chan struct{}
+}
+
+func (s *Stopper_cockroach24808) RunWorker(ctx context.Context, f func(context.Context)) {
+ s.stop.Add(1)
+ go func() {
+ defer s.stop.Done()
+ f(ctx)
+ }()
+}
+
+func (s *Stopper_cockroach24808) ShouldStop() <-chan struct{} {
+ if s == nil {
+ return nil
+ }
+ return s.stopper
+}
+
+func (s *Stopper_cockroach24808) Stop() {
+ close(s.stopper)
+}
+
+func (c *Compactor_cockroach24808) Start(ctx context.Context, stopper *Stopper_cockroach24808) {
+ c.ch <- struct{}{}
+ stopper.RunWorker(ctx, func(ctx context.Context) {
+ for {
+ select {
+ case <-stopper.ShouldStop():
+ return
+ case <-c.ch:
+ }
+ }
+ })
+}
+
+func Cockroach24808() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() { // G1
+ stopper := &Stopper_cockroach24808{stopper: make(chan struct{})}
+ defer stopper.Stop()
+
+ compactor := &Compactor_cockroach24808{ch: make(chan struct{}, 1)}
+ compactor.ch <- struct{}{}
+
+ compactor.Start(context.Background(), stopper)
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go
new file mode 100644
index 0000000000..b9259c9f91
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go
@@ -0,0 +1,92 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+)
+
+func init() {
+ register("Cockroach25456", Cockroach25456)
+}
+
+type Stopper_cockroach25456 struct {
+ quiescer chan struct{}
+}
+
+func (s *Stopper_cockroach25456) ShouldQuiesce() <-chan struct{} {
+ if s == nil {
+ return nil
+ }
+ return s.quiescer
+}
+
+func NewStopper_cockroach25456() *Stopper_cockroach25456 {
+ return &Stopper_cockroach25456{quiescer: make(chan struct{})}
+}
+
+type Store_cockroach25456 struct {
+ stopper *Stopper_cockroach25456
+ consistencyQueue *consistencyQueue_cockroach25456
+}
+
+func (s *Store_cockroach25456) Stopper() *Stopper_cockroach25456 {
+ return s.stopper
+}
+
+type Replica_cockroach25456 struct {
+ store *Store_cockroach25456
+}
+
+func NewReplica_cockroach25456(store *Store_cockroach25456) *Replica_cockroach25456 {
+ return &Replica_cockroach25456{store: store}
+}
+
+type consistencyQueue_cockroach25456 struct{}
+
+func (q *consistencyQueue_cockroach25456) process(repl *Replica_cockroach25456) {
+ <-repl.store.Stopper().ShouldQuiesce()
+}
+
+func newConsistencyQueue_cockroach25456() *consistencyQueue_cockroach25456 {
+ return &consistencyQueue_cockroach25456{}
+}
+
+type testContext_cockroach25456 struct {
+ store *Store_cockroach25456
+ repl *Replica_cockroach25456
+}
+
+func (tc *testContext_cockroach25456) StartWithStoreConfig(stopper *Stopper_cockroach25456) {
+ if tc.store == nil {
+ tc.store = &Store_cockroach25456{
+ consistencyQueue: newConsistencyQueue_cockroach25456(),
+ }
+ }
+ tc.store.stopper = stopper
+ tc.repl = NewReplica_cockroach25456(tc.store)
+}
+
+func Cockroach25456() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() { // G1
+ stopper := NewStopper_cockroach25456()
+ tc := testContext_cockroach25456{}
+ tc.StartWithStoreConfig(stopper)
+
+ for i := 0; i < 2; i++ {
+ tc.store.consistencyQueue.process(tc.repl)
+ }
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go
new file mode 100644
index 0000000000..f00a7bd462
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go
@@ -0,0 +1,124 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "sync/atomic"
+)
+
+func init() {
+ register("Cockroach35073", Cockroach35073)
+}
+
+type ConsumerStatus_cockroach35073 uint32
+
+const (
+ NeedMoreRows_cockroach35073 ConsumerStatus_cockroach35073 = iota
+ DrainRequested_cockroach35073
+ ConsumerClosed_cockroach35073
+)
+
+const rowChannelBufSize_cockroach35073 = 16
+const outboxBufRows_cockroach35073 = 16
+
+type rowSourceBase_cockroach35073 struct {
+ consumerStatus ConsumerStatus_cockroach35073
+}
+
+func (rb *rowSourceBase_cockroach35073) consumerClosed() {
+ atomic.StoreUint32((*uint32)(&rb.consumerStatus), uint32(ConsumerClosed_cockroach35073))
+}
+
+type RowChannelMsg_cockroach35073 int
+
+type RowChannel_cockroach35073 struct {
+ rowSourceBase_cockroach35073
+ dataChan chan RowChannelMsg_cockroach35073
+}
+
+func (rc *RowChannel_cockroach35073) ConsumerClosed() {
+ rc.consumerClosed()
+ select {
+ case <-rc.dataChan:
+ default:
+ }
+}
+
+func (rc *RowChannel_cockroach35073) Push() ConsumerStatus_cockroach35073 {
+ consumerStatus := ConsumerStatus_cockroach35073(
+ atomic.LoadUint32((*uint32)(&rc.consumerStatus)))
+ switch consumerStatus {
+ case NeedMoreRows_cockroach35073:
+ rc.dataChan <- RowChannelMsg_cockroach35073(0)
+ case DrainRequested_cockroach35073:
+ case ConsumerClosed_cockroach35073:
+ }
+ return consumerStatus
+}
+
+func (rc *RowChannel_cockroach35073) InitWithNumSenders() {
+ rc.initWithBufSizeAndNumSenders(rowChannelBufSize_cockroach35073)
+}
+
+func (rc *RowChannel_cockroach35073) initWithBufSizeAndNumSenders(chanBufSize int) {
+ rc.dataChan = make(chan RowChannelMsg_cockroach35073, chanBufSize)
+}
+
+type outbox_cockroach35073 struct {
+ RowChannel_cockroach35073
+}
+
+func (m *outbox_cockroach35073) init() {
+ m.RowChannel_cockroach35073.InitWithNumSenders()
+}
+
+func (m *outbox_cockroach35073) start(wg *sync.WaitGroup) {
+ if wg != nil {
+ wg.Add(1)
+ }
+ go m.run(wg)
+}
+
+func (m *outbox_cockroach35073) run(wg *sync.WaitGroup) {
+ if wg != nil {
+ wg.Done()
+ }
+}
+
+func Cockroach35073() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ outbox := &outbox_cockroach35073{}
+ outbox.init()
+
+ var wg sync.WaitGroup
+ for i := 0; i < outboxBufRows_cockroach35073; i++ {
+ outbox.Push()
+ }
+
+ var blockedPusherWg sync.WaitGroup
+ blockedPusherWg.Add(1)
+ go func() {
+ outbox.Push()
+ blockedPusherWg.Done()
+ }()
+
+ outbox.start(&wg)
+
+ wg.Wait()
+ outbox.RowChannel_cockroach35073.Push()
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go
new file mode 100644
index 0000000000..9ddcda1b62
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go
@@ -0,0 +1,135 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Cockroach35931", Cockroach35931)
+}
+
+type RowReceiver_cockroach35931 interface {
+ Push()
+}
+
+type inboundStreamInfo_cockroach35931 struct {
+ receiver RowReceiver_cockroach35931
+}
+
+type RowChannel_cockroach35931 struct {
+ dataChan chan struct{}
+}
+
+func (rc *RowChannel_cockroach35931) Push() {
+ // The buffer size can be either 0 or 1 when this function is entered.
+ // We need context sensitivity or a path-condition on the buffer size
+ // to find this bug.
+ rc.dataChan <- struct{}{}
+}
+
+func (rc *RowChannel_cockroach35931) initWithBufSizeAndNumSenders(chanBufSize int) {
+ rc.dataChan = make(chan struct{}, chanBufSize)
+}
+
+type flowEntry_cockroach35931 struct {
+ flow *Flow_cockroach35931
+ inboundStreams map[int]*inboundStreamInfo_cockroach35931
+}
+
+type flowRegistry_cockroach35931 struct {
+ sync.Mutex
+ flows map[int]*flowEntry_cockroach35931
+}
+
+func (fr *flowRegistry_cockroach35931) getEntryLocked(id int) *flowEntry_cockroach35931 {
+ entry, ok := fr.flows[id]
+ if !ok {
+ entry = &flowEntry_cockroach35931{}
+ fr.flows[id] = entry
+ }
+ return entry
+}
+
+func (fr *flowRegistry_cockroach35931) cancelPendingStreamsLocked(id int) []RowReceiver_cockroach35931 {
+ entry := fr.flows[id]
+ pendingReceivers := make([]RowReceiver_cockroach35931, 0)
+ for _, is := range entry.inboundStreams {
+ pendingReceivers = append(pendingReceivers, is.receiver)
+ }
+ return pendingReceivers
+}
+
+type Flow_cockroach35931 struct {
+ id int
+ flowRegistry *flowRegistry_cockroach35931
+ inboundStreams map[int]*inboundStreamInfo_cockroach35931
+}
+
+func (f *Flow_cockroach35931) cancel() {
+ f.flowRegistry.Lock()
+ timedOutReceivers := f.flowRegistry.cancelPendingStreamsLocked(f.id)
+ f.flowRegistry.Unlock()
+
+ for _, receiver := range timedOutReceivers {
+ receiver.Push()
+ }
+}
+
+func (fr *flowRegistry_cockroach35931) RegisterFlow(f *Flow_cockroach35931, inboundStreams map[int]*inboundStreamInfo_cockroach35931) {
+ entry := fr.getEntryLocked(f.id)
+ entry.flow = f
+ entry.inboundStreams = inboundStreams
+}
+
+func makeFlowRegistry_cockroach35931() *flowRegistry_cockroach35931 {
+ return &flowRegistry_cockroach35931{
+ flows: make(map[int]*flowEntry_cockroach35931),
+ }
+}
+
+func Cockroach35931() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ fr := makeFlowRegistry_cockroach35931()
+
+ left := &RowChannel_cockroach35931{}
+ left.initWithBufSizeAndNumSenders(1)
+ right := &RowChannel_cockroach35931{}
+ right.initWithBufSizeAndNumSenders(1)
+
+ inboundStreams := map[int]*inboundStreamInfo_cockroach35931{
+ 0: {
+ receiver: left,
+ },
+ 1: {
+ receiver: right,
+ },
+ }
+
+ left.Push()
+
+ flow := &Flow_cockroach35931{
+ id: 0,
+ flowRegistry: fr,
+ inboundStreams: inboundStreams,
+ }
+
+ fr.RegisterFlow(flow, inboundStreams)
+
+ flow.cancel()
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go
new file mode 100644
index 0000000000..e419cd2fc3
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go
@@ -0,0 +1,122 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/3710
+ * Buggy version: 4afdd4860fd7c3bd9e92489f84a95e5cc7d11a0d
+ * fix commit-id: cb65190f9caaf464723e7d072b1f1b69a044ef7b
+ * Flaky: 2/100
+ */
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+ "unsafe"
+)
+
+func init() {
+ register("Cockroach3710", Cockroach3710)
+}
+
+type Store_cockroach3710 struct {
+ raftLogQueue *baseQueue
+ replicas map[int]*Replica_cockroach3710
+
+ mu struct {
+ sync.RWMutex
+ }
+}
+
+func (s *Store_cockroach3710) ForceRaftLogScanAndProcess() {
+ s.mu.RLock()
+ runtime.Gosched()
+ for _, r := range s.replicas {
+ s.raftLogQueue.MaybeAdd(r)
+ }
+ s.mu.RUnlock()
+}
+
+func (s *Store_cockroach3710) RaftStatus() {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+}
+
+func (s *Store_cockroach3710) processRaft() {
+ go func() {
+ for {
+ var replicas []*Replica_cockroach3710
+ s.mu.Lock()
+ for _, r := range s.replicas {
+ replicas = append(replicas, r)
+ }
+ s.mu.Unlock()
+ break
+ }
+ }()
+}
+
+type Replica_cockroach3710 struct {
+ store *Store_cockroach3710
+}
+
+type baseQueue struct {
+ sync.Mutex
+ impl *raftLogQueue
+}
+
+func (bq *baseQueue) MaybeAdd(repl *Replica_cockroach3710) {
+ bq.Lock()
+ defer bq.Unlock()
+ bq.impl.shouldQueue(repl)
+}
+
+type raftLogQueue struct{}
+
+func (*raftLogQueue) shouldQueue(r *Replica_cockroach3710) {
+ getTruncatableIndexes(r)
+}
+
+func getTruncatableIndexes(r *Replica_cockroach3710) {
+ r.store.RaftStatus()
+}
+
+func NewStore_cockroach3710() *Store_cockroach3710 {
+ rlq := &raftLogQueue{}
+ bq := &baseQueue{impl: rlq}
+ store := &Store_cockroach3710{
+ raftLogQueue: bq,
+ replicas: make(map[int]*Replica_cockroach3710),
+ }
+ r1 := &Replica_cockroach3710{store}
+ r2 := &Replica_cockroach3710{store}
+
+ makeKey := func(r *Replica_cockroach3710) int {
+ return int((uintptr(unsafe.Pointer(r)) >> 1) % 7)
+ }
+ store.replicas[makeKey(r1)] = r1
+ store.replicas[makeKey(r2)] = r2
+
+ return store
+}
+
+func Cockroach3710() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 10000; i++ {
+ go func() {
+ store := NewStore_cockroach3710()
+ go store.ForceRaftLogScanAndProcess() // G1
+ go store.processRaft() // G2
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go
new file mode 100644
index 0000000000..33f7ba7a45
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go
@@ -0,0 +1,62 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Cockroach584", Cockroach584)
+}
+
+type gossip_cockroach584 struct {
+ mu sync.Mutex // L1
+ closed bool
+}
+
+func (g *gossip_cockroach584) bootstrap() {
+ for {
+ g.mu.Lock()
+ if g.closed {
+ // Missing g.mu.Unlock
+ break
+ }
+ g.mu.Unlock()
+ }
+}
+
+func (g *gossip_cockroach584) manage() {
+ for {
+ g.mu.Lock()
+ if g.closed {
+ // Missing g.mu.Unlock
+ break
+ }
+ g.mu.Unlock()
+ }
+}
+
+func Cockroach584() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ for i := 0; i < yieldCount; i++ {
+ // Yield several times to allow the child goroutine to run.
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ g := &gossip_cockroach584{
+ closed: true,
+ }
+ go func() { // G1
+ g.bootstrap()
+ g.manage()
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go
new file mode 100644
index 0000000000..80f1dd504d
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go
@@ -0,0 +1,87 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/6181
+ * Buggy version: c0a232b5521565904b851699853bdbd0c670cf1e
+ * fix commit-id: d5814e4886a776bf7789b3c51b31f5206480d184
+ * Flaky: 57/100
+ */
+package main
+
+import (
+ "io"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Cockroach6181", Cockroach6181)
+}
+
+type testDescriptorDB_cockroach6181 struct {
+ cache *rangeDescriptorCache_cockroach6181
+}
+
+func initTestDescriptorDB_cockroach6181() *testDescriptorDB_cockroach6181 {
+ return &testDescriptorDB_cockroach6181{&rangeDescriptorCache_cockroach6181{}}
+}
+
+type rangeDescriptorCache_cockroach6181 struct {
+ rangeCacheMu sync.RWMutex
+}
+
+func (rdc *rangeDescriptorCache_cockroach6181) LookupRangeDescriptor() {
+ rdc.rangeCacheMu.RLock()
+ runtime.Gosched()
+ io.Discard.Write([]byte(rdc.String()))
+ rdc.rangeCacheMu.RUnlock()
+ rdc.rangeCacheMu.Lock()
+ rdc.rangeCacheMu.Unlock()
+}
+
+func (rdc *rangeDescriptorCache_cockroach6181) String() string {
+ rdc.rangeCacheMu.RLock()
+ defer rdc.rangeCacheMu.RUnlock()
+ return rdc.stringLocked()
+}
+
+func (rdc *rangeDescriptorCache_cockroach6181) stringLocked() string {
+ return "something here"
+}
+
+func doLookupWithToken_cockroach6181(rc *rangeDescriptorCache_cockroach6181) {
+ rc.LookupRangeDescriptor()
+}
+
+func testRangeCacheCoalescedRequests_cockroach6181() {
+ db := initTestDescriptorDB_cockroach6181()
+ pauseLookupResumeAndAssert := func() {
+ var wg sync.WaitGroup
+ for i := 0; i < 3; i++ {
+ wg.Add(1)
+ go func() { // G2,G3,...
+ doLookupWithToken_cockroach6181(db.cache)
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ }
+ pauseLookupResumeAndAssert()
+}
+
+func Cockroach6181() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ go testRangeCacheCoalescedRequests_cockroach6181() // G1
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go
new file mode 100644
index 0000000000..945308a76f
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go
@@ -0,0 +1,183 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/7504
+ * Buggy version: bc963b438cdc3e0ad058a5282358e5aee0595e17
+ * fix commit-id: cab761b9f5ee5dee1448bc5d6b1d9f5a0ff0bad5
+ * Flaky: 1/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Cockroach7504", Cockroach7504)
+}
+
+func MakeCacheKey_cockroach7504(lease *LeaseState_cockroach7504) int {
+ return lease.id
+}
+
+type LeaseState_cockroach7504 struct {
+ mu sync.Mutex // L1
+ id int
+}
+type LeaseSet_cockroach7504 struct {
+ data []*LeaseState_cockroach7504
+}
+
+func (l *LeaseSet_cockroach7504) find(id int) *LeaseState_cockroach7504 {
+ return l.data[id]
+}
+
+func (l *LeaseSet_cockroach7504) remove(s *LeaseState_cockroach7504) {
+ for i := 0; i < len(l.data); i++ {
+ if s == l.data[i] {
+ l.data = append(l.data[:i], l.data[i+1:]...)
+ break
+ }
+ }
+}
+
+type tableState_cockroach7504 struct {
+ tableNameCache *tableNameCache_cockroach7504
+ mu sync.Mutex // L3
+ active *LeaseSet_cockroach7504
+}
+
+func (t *tableState_cockroach7504) release(lease *LeaseState_cockroach7504) {
+ t.mu.Lock() // L3
+ defer t.mu.Unlock() // L3
+
+ s := t.active.find(MakeCacheKey_cockroach7504(lease))
+ s.mu.Lock() // L1
+ runtime.Gosched()
+ defer s.mu.Unlock() // L1
+
+ t.removeLease(s)
+}
+func (t *tableState_cockroach7504) removeLease(lease *LeaseState_cockroach7504) {
+ t.active.remove(lease)
+ t.tableNameCache.remove(lease) // L1 acquire/release
+}
+
+type tableNameCache_cockroach7504 struct {
+ mu sync.Mutex // L2
+ tables map[int]*LeaseState_cockroach7504
+}
+
+func (c *tableNameCache_cockroach7504) get(id int) {
+ c.mu.Lock() // L2
+ defer c.mu.Unlock() // L2
+ lease, ok := c.tables[id]
+ if !ok {
+ return
+ }
+ if lease == nil {
+ panic("nil lease in name cache")
+ }
+ lease.mu.Lock() // L1
+ defer lease.mu.Unlock() // L1
+}
+
+func (c *tableNameCache_cockroach7504) remove(lease *LeaseState_cockroach7504) {
+ c.mu.Lock() // L2
+ runtime.Gosched()
+ defer c.mu.Unlock() // L2
+ key := MakeCacheKey_cockroach7504(lease)
+ existing, ok := c.tables[key]
+ if !ok {
+ return
+ }
+ if existing == lease {
+ delete(c.tables, key)
+ }
+}
+
+type LeaseManager_cockroach7504 struct {
+ _ [64]byte
+ tableNames *tableNameCache_cockroach7504
+ tables map[int]*tableState_cockroach7504
+}
+
+func (m *LeaseManager_cockroach7504) AcquireByName(id int) {
+ m.tableNames.get(id)
+}
+
+func (m *LeaseManager_cockroach7504) findTableState(lease *LeaseState_cockroach7504) *tableState_cockroach7504 {
+ existing, ok := m.tables[lease.id]
+ if !ok {
+ return nil
+ }
+ return existing
+}
+
+func (m *LeaseManager_cockroach7504) Release(lease *LeaseState_cockroach7504) {
+ t := m.findTableState(lease)
+ t.release(lease)
+}
+func NewLeaseManager_cockroach7504(tname *tableNameCache_cockroach7504, ts *tableState_cockroach7504) *LeaseManager_cockroach7504 {
+ mgr := &LeaseManager_cockroach7504{
+ tableNames: tname,
+ tables: make(map[int]*tableState_cockroach7504),
+ }
+ mgr.tables[0] = ts
+ return mgr
+}
+func NewLeaseSet_cockroach7504(n int) *LeaseSet_cockroach7504 {
+ lset := &LeaseSet_cockroach7504{}
+ for i := 0; i < n; i++ {
+ lease := new(LeaseState_cockroach7504)
+ lset.data = append(lset.data, lease)
+ }
+ return lset
+}
+
+func Cockroach7504() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ go func() {
+ leaseNum := 2
+ lset := NewLeaseSet_cockroach7504(leaseNum)
+
+ nc := &tableNameCache_cockroach7504{
+ tables: make(map[int]*LeaseState_cockroach7504),
+ }
+ for i := 0; i < leaseNum; i++ {
+ nc.tables[i] = lset.find(i)
+ }
+
+ ts := &tableState_cockroach7504{
+ tableNameCache: nc,
+ active: lset,
+ }
+
+ mgr := NewLeaseManager_cockroach7504(nc, ts)
+
+ // G1
+ go func() {
+ // lock L2-L1
+ mgr.AcquireByName(0)
+ }()
+
+ // G2
+ go func() {
+ // lock L1-L2
+ mgr.Release(lset.find(0))
+ }()
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go
new file mode 100644
index 0000000000..e143a6670d
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go
@@ -0,0 +1,77 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: cockroach
+ * Issue or PR : https://github.com/cockroachdb/cockroach/pull/9935
+ * Buggy version: 4df302cc3f03328395dc3fefbfba58b7718e4f2f
+ * fix commit-id: ed6a100ba38dd51b0888b9a3d3ac6bdbb26c528c
+ * Flaky: 100/100
+ * Description: This leak is caused by acquiring l.mu.Lock() twice. The fix is
+ * to release l.mu.Lock() before acquiring l.mu.Lock for the second time.
+ */
+package main
+
+import (
+ "errors"
+ "math/rand"
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Cockroach9935", Cockroach9935)
+}
+
+type loggingT_cockroach9935 struct {
+ mu sync.Mutex
+}
+
+func (l *loggingT_cockroach9935) outputLogEntry() {
+ l.mu.Lock()
+ if err := l.createFile(); err != nil {
+ l.exit(err)
+ }
+ l.mu.Unlock()
+}
+
+func (l *loggingT_cockroach9935) createFile() error {
+ if rand.Intn(8)%4 > 0 {
+ return errors.New("")
+ }
+ return nil
+}
+
+func (l *loggingT_cockroach9935) exit(err error) {
+ l.mu.Lock() // Blocked forever
+ defer l.mu.Unlock()
+}
+
+// Example of goroutine leak trace:
+//
+// G1
+//----------------------------
+// l.outputLogEntry()
+// l.mu.Lock()
+// l.createFile()
+// l.exit()
+// l.mu.Lock()
+//-----------G1 leaks---------
+
+func Cockroach9935() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ l := &loggingT_cockroach9935{}
+ go l.outputLogEntry() // G1
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go
new file mode 100644
index 0000000000..7d56642d5e
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go
@@ -0,0 +1,72 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Etcd10492", Etcd10492)
+}
+
+type Checkpointer_etcd10492 func(ctx context.Context)
+
+type lessor_etcd10492 struct {
+ mu sync.RWMutex
+ cp Checkpointer_etcd10492
+ checkpointInterval time.Duration
+}
+
+func (le *lessor_etcd10492) Checkpoint() {
+ le.mu.Lock() // Lock acquired twice here
+ defer le.mu.Unlock()
+}
+
+func (le *lessor_etcd10492) SetCheckpointer(cp Checkpointer_etcd10492) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ le.cp = cp
+}
+
+func (le *lessor_etcd10492) Renew() {
+ le.mu.Lock()
+ unlock := func() { le.mu.Unlock() }
+ defer func() { unlock() }()
+
+ if le.cp != nil {
+ le.cp(context.Background())
+ }
+}
+
+func Etcd10492() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() { // G1
+ le := &lessor_etcd10492{
+ checkpointInterval: 0,
+ }
+ fakerCheckerpointer_etcd10492 := func(ctx context.Context) {
+ le.Checkpoint()
+ }
+ le.SetCheckpointer(fakerCheckerpointer_etcd10492)
+ le.mu.Lock()
+ le.mu.Unlock()
+ le.Renew()
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go
new file mode 100644
index 0000000000..868e926e66
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go
@@ -0,0 +1,126 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "io"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Etcd5509", Etcd5509)
+}
+
+var ErrConnClosed_etcd5509 error
+
+type Client_etcd5509 struct {
+ mu sync.RWMutex
+ ctx context.Context
+ cancel context.CancelFunc
+}
+
+func (c *Client_etcd5509) Close() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.cancel == nil {
+ return
+ }
+ c.cancel()
+ c.cancel = nil
+ c.mu.Unlock()
+ c.mu.Lock()
+}
+
+type remoteClient_etcd5509 struct {
+ client *Client_etcd5509
+ mu sync.Mutex
+}
+
+func (r *remoteClient_etcd5509) acquire(ctx context.Context) error {
+ for {
+ r.client.mu.RLock()
+ closed := r.client.cancel == nil
+ r.mu.Lock()
+ r.mu.Unlock()
+ if closed {
+ return ErrConnClosed_etcd5509 // Missing RUnlock before return
+ }
+ r.client.mu.RUnlock()
+ }
+}
+
+type kv_etcd5509 struct {
+ rc *remoteClient_etcd5509
+}
+
+func (kv *kv_etcd5509) Get(ctx context.Context) error {
+ return kv.Do(ctx)
+}
+
+func (kv *kv_etcd5509) Do(ctx context.Context) error {
+ for {
+ err := kv.do(ctx)
+ if err == nil {
+ return nil
+ }
+ return err
+ }
+}
+
+func (kv *kv_etcd5509) do(ctx context.Context) error {
+ err := kv.getRemote(ctx)
+ return err
+}
+
+func (kv *kv_etcd5509) getRemote(ctx context.Context) error {
+ return kv.rc.acquire(ctx)
+}
+
+type KV interface {
+ Get(ctx context.Context) error
+ Do(ctx context.Context) error
+}
+
+func NewKV_etcd5509(c *Client_etcd5509) KV {
+ return &kv_etcd5509{rc: &remoteClient_etcd5509{
+ client: c,
+ }}
+}
+
+func Etcd5509() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() {
+ ctx, _ := context.WithCancel(context.TODO())
+ cli := &Client_etcd5509{
+ ctx: ctx,
+ }
+ kv := NewKV_etcd5509(cli)
+ donec := make(chan struct{})
+ go func() {
+ defer close(donec)
+ err := kv.Get(context.TODO())
+ if err != nil && err != ErrConnClosed_etcd5509 {
+ io.Discard.Write([]byte("Expect ErrConnClosed"))
+ }
+ }()
+
+ runtime.Gosched()
+ cli.Close()
+
+ <-donec
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go
new file mode 100644
index 0000000000..afbbe35104
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go
@@ -0,0 +1,100 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Etcd6708", Etcd6708)
+}
+
+type EndpointSelectionMode_etcd6708 int
+
+const (
+ EndpointSelectionRandom_etcd6708 EndpointSelectionMode_etcd6708 = iota
+ EndpointSelectionPrioritizeLeader_etcd6708
+)
+
+type MembersAPI_etcd6708 interface {
+ Leader(ctx context.Context)
+}
+
+type Client_etcd6708 interface {
+ Sync(ctx context.Context)
+ SetEndpoints()
+ httpClient_etcd6708
+}
+
+type httpClient_etcd6708 interface {
+ Do(context.Context)
+}
+
+type httpClusterClient_etcd6708 struct {
+ sync.RWMutex
+ selectionMode EndpointSelectionMode_etcd6708
+}
+
+func (c *httpClusterClient_etcd6708) getLeaderEndpoint() {
+ mAPI := NewMembersAPI_etcd6708(c)
+ mAPI.Leader(context.Background())
+}
+
+func (c *httpClusterClient_etcd6708) SetEndpoints() {
+ switch c.selectionMode {
+ case EndpointSelectionRandom_etcd6708:
+ case EndpointSelectionPrioritizeLeader_etcd6708:
+ c.getLeaderEndpoint()
+ }
+}
+
+func (c *httpClusterClient_etcd6708) Do(ctx context.Context) {
+ c.RLock()
+ c.RUnlock()
+}
+
+func (c *httpClusterClient_etcd6708) Sync(ctx context.Context) {
+ c.Lock()
+ defer c.Unlock()
+
+ c.SetEndpoints()
+}
+
+type httpMembersAPI_etcd6708 struct {
+ client httpClient_etcd6708
+}
+
+func (m *httpMembersAPI_etcd6708) Leader(ctx context.Context) {
+ m.client.Do(ctx)
+}
+
+func NewMembersAPI_etcd6708(c Client_etcd6708) MembersAPI_etcd6708 {
+ return &httpMembersAPI_etcd6708{
+ client: c,
+ }
+}
+
+func Etcd6708() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() {
+ hc := &httpClusterClient_etcd6708{
+ selectionMode: EndpointSelectionPrioritizeLeader_etcd6708,
+ }
+ hc.Sync(context.Background())
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go
new file mode 100644
index 0000000000..0798ab23d3
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go
@@ -0,0 +1,81 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: etcd
+ * Issue or PR : https://github.com/etcd-io/etcd/pull/6857
+ * Buggy version: 7c8f13aed7fe251e7066ed6fc1a090699c2cae0e
+ * fix commit-id: 7afc490c95789c408fbc256d8e790273d331c984
+ * Flaky: 19/100
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Etcd6857", Etcd6857)
+}
+
+type Status_etcd6857 struct{}
+
+type node_etcd6857 struct {
+ status chan chan Status_etcd6857
+ stop chan struct{}
+ done chan struct{}
+}
+
+func (n *node_etcd6857) Status() Status_etcd6857 {
+ c := make(chan Status_etcd6857)
+ n.status <- c
+ return <-c
+}
+
+func (n *node_etcd6857) run() {
+ for {
+ select {
+ case c := <-n.status:
+ c <- Status_etcd6857{}
+ case <-n.stop:
+ close(n.done)
+ return
+ }
+ }
+}
+
+func (n *node_etcd6857) Stop() {
+ select {
+ case n.stop <- struct{}{}:
+ case <-n.done:
+ return
+ }
+ <-n.done
+}
+
+func NewNode_etcd6857() *node_etcd6857 {
+ return &node_etcd6857{
+ status: make(chan chan Status_etcd6857),
+ stop: make(chan struct{}),
+ done: make(chan struct{}),
+ }
+}
+
+func Etcd6857() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i <= 100; i++ {
+ go func() {
+ n := NewNode_etcd6857()
+ go n.run() // G1
+ go n.Status() // G2
+ go n.Stop() // G3
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go
new file mode 100644
index 0000000000..1846d0f260
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go
@@ -0,0 +1,98 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: etcd
+ * Issue or PR : https://github.com/etcd-io/etcd/commit/7618fdd1d642e47cac70c03f637b0fd798a53a6e
+ * Buggy version: 377f19b0031f9c0aafe2aec28b6f9019311f52f9
+ * fix commit-id: 7618fdd1d642e47cac70c03f637b0fd798a53a6e
+ * Flaky: 9/100
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Etcd6873", Etcd6873)
+}
+
+type watchBroadcast_etcd6873 struct{}
+
+type watchBroadcasts_etcd6873 struct {
+ mu sync.Mutex
+ updatec chan *watchBroadcast_etcd6873
+ donec chan struct{}
+}
+
+func newWatchBroadcasts_etcd6873() *watchBroadcasts_etcd6873 {
+ wbs := &watchBroadcasts_etcd6873{
+ updatec: make(chan *watchBroadcast_etcd6873, 1),
+ donec: make(chan struct{}),
+ }
+ go func() { // G2
+ defer close(wbs.donec)
+ for wb := range wbs.updatec {
+ wbs.coalesce(wb)
+ }
+ }()
+ return wbs
+}
+
+func (wbs *watchBroadcasts_etcd6873) coalesce(wb *watchBroadcast_etcd6873) {
+ wbs.mu.Lock()
+ wbs.mu.Unlock()
+}
+
+func (wbs *watchBroadcasts_etcd6873) stop() {
+ wbs.mu.Lock()
+ defer wbs.mu.Unlock()
+ close(wbs.updatec)
+ <-wbs.donec
+}
+
+func (wbs *watchBroadcasts_etcd6873) update(wb *watchBroadcast_etcd6873) {
+ select {
+ case wbs.updatec <- wb:
+ default:
+ }
+}
+
+// Example of goroutine leak trace:
+//
+// G1 G2 G3
+//---------------------------------------------------------
+// newWatchBroadcasts()
+// wbs.update()
+// wbs.updatec <-
+// return
+// <-wbs.updatec
+// wbs.coalesce()
+// wbs.stop()
+// wbs.mu.Lock()
+// close(wbs.updatec)
+// <-wbs.donec
+// wbs.mu.Lock()
+//---------------------G2,G3 leak-------------------------
+//
+
+func Etcd6873() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ wbs := newWatchBroadcasts_etcd6873() // G1
+ wbs.update(&watchBroadcast_etcd6873{})
+ go wbs.stop() // G3
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go
new file mode 100644
index 0000000000..3c8d58a221
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go
@@ -0,0 +1,163 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: etcd
+ * Issue or PR : https://github.com/etcd-io/etcd/pull/7492
+ * Buggy version: 51939650057d602bb5ab090633138fffe36854dc
+ * fix commit-id: 1b1fabef8ffec606909f01c3983300fff539f214
+ * Flaky: 40/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Etcd7492", Etcd7492)
+}
+
+type TokenProvider_etcd7492 interface {
+ assign()
+ enable()
+ disable()
+}
+
+type simpleTokenTTLKeeper_etcd7492 struct {
+ tokens map[string]time.Time
+ addSimpleTokenCh chan struct{}
+ stopCh chan chan struct{}
+ deleteTokenFunc func(string)
+}
+
+type authStore_etcd7492 struct {
+ tokenProvider TokenProvider_etcd7492
+}
+
+func (as *authStore_etcd7492) Authenticate() {
+ as.tokenProvider.assign()
+}
+
+func NewSimpleTokenTTLKeeper_etcd7492(deletefunc func(string)) *simpleTokenTTLKeeper_etcd7492 {
+ stk := &simpleTokenTTLKeeper_etcd7492{
+ tokens: make(map[string]time.Time),
+ addSimpleTokenCh: make(chan struct{}, 1),
+ stopCh: make(chan chan struct{}),
+ deleteTokenFunc: deletefunc,
+ }
+ go stk.run() // G1
+ return stk
+}
+
+func (tm *simpleTokenTTLKeeper_etcd7492) run() {
+ tokenTicker := time.NewTicker(time.Nanosecond)
+ defer tokenTicker.Stop()
+ for {
+ select {
+ case <-tm.addSimpleTokenCh:
+ runtime.Gosched()
+ /// Make tm.tokens not empty is enough
+ tm.tokens["1"] = time.Now()
+ case <-tokenTicker.C:
+ runtime.Gosched()
+ for t, _ := range tm.tokens {
+ tm.deleteTokenFunc(t)
+ delete(tm.tokens, t)
+ }
+ case waitCh := <-tm.stopCh:
+ waitCh <- struct{}{}
+ return
+ }
+ }
+}
+
+func (tm *simpleTokenTTLKeeper_etcd7492) addSimpleToken() {
+ tm.addSimpleTokenCh <- struct{}{}
+ runtime.Gosched()
+}
+
+func (tm *simpleTokenTTLKeeper_etcd7492) stop() {
+ waitCh := make(chan struct{})
+ tm.stopCh <- waitCh
+ <-waitCh
+ close(tm.stopCh)
+}
+
+type tokenSimple_etcd7492 struct {
+ simpleTokenKeeper *simpleTokenTTLKeeper_etcd7492
+ simpleTokensMu sync.RWMutex
+}
+
+func (t *tokenSimple_etcd7492) assign() {
+ t.assignSimpleTokenToUser()
+}
+
+func (t *tokenSimple_etcd7492) assignSimpleTokenToUser() {
+ t.simpleTokensMu.Lock()
+ runtime.Gosched()
+ t.simpleTokenKeeper.addSimpleToken()
+ t.simpleTokensMu.Unlock()
+}
+func newDeleterFunc(t *tokenSimple_etcd7492) func(string) {
+ return func(tk string) {
+ t.simpleTokensMu.Lock()
+ defer t.simpleTokensMu.Unlock()
+ }
+}
+
+func (t *tokenSimple_etcd7492) enable() {
+ t.simpleTokenKeeper = NewSimpleTokenTTLKeeper_etcd7492(newDeleterFunc(t))
+}
+
+func (t *tokenSimple_etcd7492) disable() {
+ if t.simpleTokenKeeper != nil {
+ t.simpleTokenKeeper.stop()
+ t.simpleTokenKeeper = nil
+ }
+ t.simpleTokensMu.Lock()
+ t.simpleTokensMu.Unlock()
+}
+
+func newTokenProviderSimple_etcd7492() *tokenSimple_etcd7492 {
+ return &tokenSimple_etcd7492{}
+}
+
+func setupAuthStore_etcd7492() (store *authStore_etcd7492, teardownfunc func()) {
+ as := &authStore_etcd7492{
+ tokenProvider: newTokenProviderSimple_etcd7492(),
+ }
+ as.tokenProvider.enable()
+ tearDown := func() {
+ as.tokenProvider.disable()
+ }
+ return as, tearDown
+}
+
+func Etcd7492() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ go func() {
+ as, tearDown := setupAuthStore_etcd7492()
+ defer tearDown()
+ var wg sync.WaitGroup
+ wg.Add(3)
+ for i := 0; i < 3; i++ {
+ go func() { // G2
+ as.Authenticate()
+ defer wg.Done()
+ }()
+ }
+ wg.Wait()
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go
new file mode 100644
index 0000000000..0a96d7f047
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go
@@ -0,0 +1,87 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: etcd
+ * Issue or PR : https://github.com/coreos/etcd/pull/7902
+ * Buggy version: dfdaf082c51ba14861267f632f6af795a27eb4ef
+ * fix commit-id: 87d99fe0387ee1df1cf1811d88d37331939ef4ae
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Etcd7902", Etcd7902)
+}
+
+type roundClient_etcd7902 struct {
+ progress int
+ acquire func()
+ validate func()
+ release func()
+}
+
+func runElectionFunc_etcd7902() {
+ rcs := make([]roundClient_etcd7902, 3)
+ nextc := make(chan bool)
+ for i := range rcs {
+ var rcNextc chan bool
+ setRcNextc := func() {
+ rcNextc = nextc
+ }
+ rcs[i].acquire = func() {}
+ rcs[i].validate = func() {
+ setRcNextc()
+ }
+ rcs[i].release = func() {
+ if i == 0 { // Assume the first roundClient is the leader
+ close(nextc)
+ nextc = make(chan bool)
+ }
+ <-rcNextc // Follower is blocking here
+ }
+ }
+ doRounds_etcd7902(rcs, 100)
+}
+
+func doRounds_etcd7902(rcs []roundClient_etcd7902, rounds int) {
+ var mu sync.Mutex
+ var wg sync.WaitGroup
+ wg.Add(len(rcs))
+ for i := range rcs {
+ go func(rc *roundClient_etcd7902) { // G2,G3
+ defer wg.Done()
+ for rc.progress < rounds || rounds <= 0 {
+ rc.acquire()
+ mu.Lock()
+ rc.validate()
+ mu.Unlock()
+ time.Sleep(10 * time.Millisecond)
+ rc.progress++
+ mu.Lock()
+ rc.release()
+ mu.Unlock()
+ }
+ }(&rcs[i])
+ }
+ wg.Wait()
+}
+
+func Etcd7902() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ go runElectionFunc_etcd7902() // G1
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go
new file mode 100644
index 0000000000..ec5491e438
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go
@@ -0,0 +1,111 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: grpc-go
+ * Issue or PR : https://github.com/grpc/grpc-go/pull/1275
+ * Buggy version: (missing)
+ * fix commit-id: 0669f3f89e0330e94bb13fa1ce8cc704aab50c9c
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "io"
+ "os"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Grpc1275", Grpc1275)
+}
+
+type recvBuffer_grpc1275 struct {
+ c chan bool
+}
+
+func (b *recvBuffer_grpc1275) get() <-chan bool {
+ return b.c
+}
+
+type recvBufferReader_grpc1275 struct {
+ recv *recvBuffer_grpc1275
+}
+
+func (r *recvBufferReader_grpc1275) Read(p []byte) (int, error) {
+ select {
+ case <-r.recv.get():
+ }
+ return 0, nil
+}
+
+type Stream_grpc1275 struct {
+ trReader io.Reader
+}
+
+func (s *Stream_grpc1275) Read(p []byte) (int, error) {
+ return io.ReadFull(s.trReader, p)
+}
+
+type http2Client_grpc1275 struct{}
+
+func (t *http2Client_grpc1275) CloseStream(s *Stream_grpc1275) {
+ // It is the client.CloseSream() method called by the
+ // main goroutine that should send the message, but it
+ // is not. The patch is to send out this message.
+}
+
+func (t *http2Client_grpc1275) NewStream() *Stream_grpc1275 {
+ return &Stream_grpc1275{
+ trReader: &recvBufferReader_grpc1275{
+ recv: &recvBuffer_grpc1275{
+ c: make(chan bool),
+ },
+ },
+ }
+}
+
+func testInflightStreamClosing_grpc1275() {
+ client := &http2Client_grpc1275{}
+ stream := client.NewStream()
+ donec := make(chan bool)
+ go func() { // G2
+ defer close(donec)
+ stream.Read([]byte{1})
+ }()
+
+ client.CloseStream(stream)
+
+ timeout := time.NewTimer(300 * time.Nanosecond)
+ select {
+ case <-donec:
+ if !timeout.Stop() {
+ <-timeout.C
+ }
+ case <-timeout.C:
+ }
+}
+
+///
+/// G1 G2
+/// testInflightStreamClosing()
+/// stream.Read()
+/// io.ReadFull()
+/// <- r.recv.get()
+/// CloseStream()
+/// <- donec
+/// ------------G1 timeout, G2 leak---------------------
+///
+
+func Grpc1275() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ testInflightStreamClosing_grpc1275() // G1
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go
new file mode 100644
index 0000000000..777534a788
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go
@@ -0,0 +1,105 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: grpc-go
+ * Issue or PR : https://github.com/grpc/grpc-go/pull/1424
+ * Buggy version: 39c8c3866d926d95e11c03508bf83d00f2963f91
+ * fix commit-id: 64bd0b04a7bb1982078bae6a2ab34c226125fbc1
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Grpc1424", Grpc1424)
+}
+
+type Balancer_grpc1424 interface {
+ Notify() <-chan bool
+}
+
+type roundRobin_grpc1424 struct {
+ mu sync.Mutex
+ addrCh chan bool
+}
+
+func (rr *roundRobin_grpc1424) Notify() <-chan bool {
+ return rr.addrCh
+}
+
+type addrConn_grpc1424 struct {
+ mu sync.Mutex
+}
+
+func (ac *addrConn_grpc1424) tearDown() {
+ ac.mu.Lock()
+ defer ac.mu.Unlock()
+}
+
+type dialOption_grpc1424 struct {
+ balancer Balancer_grpc1424
+}
+
+type ClientConn_grpc1424 struct {
+ dopts dialOption_grpc1424
+ conns []*addrConn_grpc1424
+}
+
+func (cc *ClientConn_grpc1424) lbWatcher(doneChan chan bool) {
+ for addr := range cc.dopts.balancer.Notify() {
+ if addr {
+ // nop, make compiler happy
+ }
+ var (
+ del []*addrConn_grpc1424
+ )
+ for _, a := range cc.conns {
+ del = append(del, a)
+ }
+ for _, c := range del {
+ c.tearDown()
+ }
+ }
+}
+
+func NewClientConn_grpc1424() *ClientConn_grpc1424 {
+ cc := &ClientConn_grpc1424{
+ dopts: dialOption_grpc1424{
+ &roundRobin_grpc1424{addrCh: make(chan bool)},
+ },
+ }
+ return cc
+}
+
+func DialContext_grpc1424() {
+ cc := NewClientConn_grpc1424()
+ waitC := make(chan error, 1)
+ go func() { // G2
+ defer close(waitC)
+ ch := cc.dopts.balancer.Notify()
+ if ch != nil {
+ doneChan := make(chan bool)
+ go cc.lbWatcher(doneChan) // G3
+ <-doneChan
+ }
+ }()
+ /// close addrCh
+ close(cc.dopts.balancer.(*roundRobin_grpc1424).addrCh)
+}
+
+func Grpc1424() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go DialContext_grpc1424() // G1
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go
new file mode 100644
index 0000000000..bc658b408d
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go
@@ -0,0 +1,84 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: grpc
+ * Issue or PR : https://github.com/grpc/grpc-go/pull/1460
+ * Buggy version: 7db1564ba1229bc42919bb1f6d9c4186f3aa8678
+ * fix commit-id: e605a1ecf24b634f94f4eefdab10a9ada98b70dd
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Grpc1460", Grpc1460)
+}
+
+type Stream_grpc1460 struct{}
+
+type http2Client_grpc1460 struct {
+ mu sync.Mutex
+ awakenKeepalive chan struct{}
+ activeStream []*Stream_grpc1460
+}
+
+func (t *http2Client_grpc1460) keepalive() {
+ t.mu.Lock()
+ if len(t.activeStream) < 1 {
+ <-t.awakenKeepalive
+ runtime.Gosched()
+ t.mu.Unlock()
+ } else {
+ t.mu.Unlock()
+ }
+}
+
+func (t *http2Client_grpc1460) NewStream() {
+ t.mu.Lock()
+ runtime.Gosched()
+ t.activeStream = append(t.activeStream, &Stream_grpc1460{})
+ if len(t.activeStream) == 1 {
+ select {
+ case t.awakenKeepalive <- struct{}{}:
+ default:
+ }
+ }
+ t.mu.Unlock()
+}
+
+///
+/// G1 G2
+/// client.keepalive()
+/// client.NewStream()
+/// t.mu.Lock()
+/// <-t.awakenKeepalive
+/// t.mu.Lock()
+/// ---------------G1, G2 deadlock--------------
+///
+
+func Grpc1460() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 1000; i++ {
+ go func() {
+ client := &http2Client_grpc1460{
+ awakenKeepalive: make(chan struct{}),
+ }
+ go client.keepalive() //G1
+ go client.NewStream() //G2
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go
new file mode 100644
index 0000000000..0523b9509f
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go
@@ -0,0 +1,123 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+// This test case is a reproduction of grpc/3017.
+//
+// It is a goroutine leak that also simultaneously engages many GC assists.
+// Testing runtime behaviour when pivoting between regular and goroutine leak detection modes.
+
+func init() {
+ register("Grpc3017", Grpc3017)
+}
+
+type Address_grpc3017 int
+type SubConn_grpc3017 int
+
+type subConnCacheEntry_grpc3017 struct {
+ sc SubConn_grpc3017
+ cancel func()
+ abortDeleting bool
+}
+
+type lbCacheClientConn_grpc3017 struct {
+ mu sync.Mutex // L1
+ timeout time.Duration
+ subConnCache map[Address_grpc3017]*subConnCacheEntry_grpc3017
+ subConnToAddr map[SubConn_grpc3017]Address_grpc3017
+}
+
+func (ccc *lbCacheClientConn_grpc3017) NewSubConn(addrs []Address_grpc3017) SubConn_grpc3017 {
+ if len(addrs) != 1 {
+ return SubConn_grpc3017(1)
+ }
+ addrWithoutMD := addrs[0]
+ ccc.mu.Lock() // L1
+ defer ccc.mu.Unlock()
+ if entry, ok := ccc.subConnCache[addrWithoutMD]; ok {
+ entry.cancel()
+ delete(ccc.subConnCache, addrWithoutMD)
+ return entry.sc
+ }
+ scNew := SubConn_grpc3017(1)
+ ccc.subConnToAddr[scNew] = addrWithoutMD
+ return scNew
+}
+
+func (ccc *lbCacheClientConn_grpc3017) RemoveSubConn(sc SubConn_grpc3017) {
+ ccc.mu.Lock() // L1
+ defer ccc.mu.Unlock()
+ addr, ok := ccc.subConnToAddr[sc]
+ if !ok {
+ return
+ }
+
+ if entry, ok := ccc.subConnCache[addr]; ok {
+ if entry.sc != sc {
+ delete(ccc.subConnToAddr, sc)
+ }
+ return
+ }
+
+ entry := &subConnCacheEntry_grpc3017{
+ sc: sc,
+ }
+ ccc.subConnCache[addr] = entry
+
+ timer := time.AfterFunc(ccc.timeout, func() { // G3
+ runtime.Gosched()
+ ccc.mu.Lock() // L1
+ if entry.abortDeleting {
+ return // Missing unlock
+ }
+ delete(ccc.subConnToAddr, sc)
+ delete(ccc.subConnCache, addr)
+ ccc.mu.Unlock()
+ })
+
+ entry.cancel = func() {
+ if !timer.Stop() {
+ entry.abortDeleting = true
+ }
+ }
+}
+
+func Grpc3017() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() { //G1
+ done := make(chan struct{})
+
+ ccc := &lbCacheClientConn_grpc3017{
+ timeout: time.Nanosecond,
+ subConnCache: make(map[Address_grpc3017]*subConnCacheEntry_grpc3017),
+ subConnToAddr: make(map[SubConn_grpc3017]Address_grpc3017),
+ }
+
+ sc := ccc.NewSubConn([]Address_grpc3017{Address_grpc3017(1)})
+ go func() { // G2
+ for i := 0; i < 10000; i++ {
+ ccc.RemoveSubConn(sc)
+ sc = ccc.NewSubConn([]Address_grpc3017{Address_grpc3017(1)})
+ }
+ close(done)
+ }()
+ <-done
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go
new file mode 100644
index 0000000000..5f6201ec80
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go
@@ -0,0 +1,65 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: grpc-go
+ * Issue or PR : https://github.com/grpc/grpc-go/pull/660
+ * Buggy version: db85417dd0de6cc6f583672c6175a7237e5b5dd2
+ * fix commit-id: ceacfbcbc1514e4e677932fd55938ac455d182fb
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "math/rand"
+ "os"
+ "runtime"
+ "runtime/pprof"
+)
+
+func init() {
+ register("Grpc660", Grpc660)
+}
+
+type benchmarkClient_grpc660 struct {
+ stop chan bool
+}
+
+func (bc *benchmarkClient_grpc660) doCloseLoopUnary() {
+ for {
+ done := make(chan bool)
+ go func() { // G2
+ if rand.Intn(10) > 7 {
+ done <- false
+ return
+ }
+ done <- true
+ }()
+ select {
+ case <-bc.stop:
+ return
+ case <-done:
+ }
+ }
+}
+
+func Grpc660() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ bc := &benchmarkClient_grpc660{
+ stop: make(chan bool),
+ }
+ go bc.doCloseLoopUnary() // G1
+ go func() { // helper goroutine
+ bc.stop <- true
+ }()
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go
new file mode 100644
index 0000000000..72005cc844
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go
@@ -0,0 +1,74 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Grpc795", Grpc795)
+}
+
+type Server_grpc795 struct {
+ mu sync.Mutex
+ drain bool
+}
+
+func (s *Server_grpc795) GracefulStop() {
+ s.mu.Lock()
+ if s.drain {
+ s.mu.Lock()
+ return
+ }
+ s.drain = true
+ s.mu.Unlock()
+}
+func (s *Server_grpc795) Serve() {
+ s.mu.Lock()
+ s.mu.Unlock()
+}
+
+func NewServer_grpc795() *Server_grpc795 {
+ return &Server_grpc795{}
+}
+
+type test_grpc795 struct {
+ srv *Server_grpc795
+}
+
+func (te *test_grpc795) startServer() {
+ s := NewServer_grpc795()
+ te.srv = s
+ go s.Serve()
+}
+
+func newTest_grpc795() *test_grpc795 {
+ return &test_grpc795{}
+}
+
+func testServerGracefulStopIdempotent_grpc795() {
+ te := newTest_grpc795()
+
+ te.startServer()
+
+ for i := 0; i < 3; i++ {
+ te.srv.GracefulStop()
+ }
+}
+
+func Grpc795() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ go testServerGracefulStopIdempotent_grpc795()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go
new file mode 100644
index 0000000000..188b3b88ba
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go
@@ -0,0 +1,105 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: grpc-go
+ * Issue or PR : https://github.com/grpc/grpc-go/pull/862
+ * Buggy version: d8f4ebe77f6b7b6403d7f98626de8a534f9b93a7
+ * fix commit-id: dd5645bebff44f6b88780bb949022a09eadd7dae
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "context"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Grpc862", Grpc862)
+}
+
+type ClientConn_grpc862 struct {
+ ctx context.Context
+ cancel context.CancelFunc
+ conns []*addrConn_grpc862
+}
+
+func (cc *ClientConn_grpc862) Close() {
+ cc.cancel()
+ conns := cc.conns
+ cc.conns = nil
+ for _, ac := range conns {
+ ac.tearDown()
+ }
+}
+
+func (cc *ClientConn_grpc862) resetAddrConn() {
+ ac := &addrConn_grpc862{
+ cc: cc,
+ }
+ cc.conns = append(cc.conns, ac)
+ ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
+ ac.resetTransport()
+}
+
+type addrConn_grpc862 struct {
+ cc *ClientConn_grpc862
+ ctx context.Context
+ cancel context.CancelFunc
+}
+
+func (ac *addrConn_grpc862) resetTransport() {
+ for retries := 1; ; retries++ {
+ _ = 2 * time.Nanosecond * time.Duration(retries)
+ timeout := 10 * time.Nanosecond
+ _, cancel := context.WithTimeout(ac.ctx, timeout)
+ _ = time.Now()
+ cancel()
+ <-ac.ctx.Done()
+ return
+ }
+}
+
+func (ac *addrConn_grpc862) tearDown() {
+ ac.cancel()
+}
+
+func DialContext_grpc862(ctx context.Context) (conn *ClientConn_grpc862) {
+ cc := &ClientConn_grpc862{}
+ cc.ctx, cc.cancel = context.WithCancel(context.Background())
+ defer func() {
+ select {
+ case <-ctx.Done():
+ if conn != nil {
+ conn.Close()
+ }
+ conn = nil
+ default:
+ }
+ }()
+ go func() { // G2
+ cc.resetAddrConn()
+ }()
+ return conn
+}
+
+func Grpc862() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ ctx, cancel := context.WithCancel(context.Background())
+ go DialContext_grpc862(ctx) // G1
+ go cancel() // helper goroutine
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go
new file mode 100644
index 0000000000..3804692a8b
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go
@@ -0,0 +1,81 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Hugo3251", Hugo3251)
+}
+
+type remoteLock_hugo3251 struct {
+ sync.RWMutex // L1
+ m map[string]*sync.Mutex // L2
+}
+
+func (l *remoteLock_hugo3251) URLLock(url string) {
+ l.Lock() // L1
+ if _, ok := l.m[url]; !ok {
+ l.m[url] = &sync.Mutex{}
+ }
+ l.m[url].Lock() // L2
+ runtime.Gosched()
+ l.Unlock() // L1
+ // runtime.Gosched()
+}
+
+func (l *remoteLock_hugo3251) URLUnlock(url string) {
+ l.RLock() // L1
+ defer l.RUnlock() // L1
+ if um, ok := l.m[url]; ok {
+ um.Unlock() // L2
+ }
+}
+
+func resGetRemote_hugo3251(remoteURLLock *remoteLock_hugo3251, url string) error {
+ remoteURLLock.URLLock(url)
+ defer func() { remoteURLLock.URLUnlock(url) }()
+
+ return nil
+}
+
+func Hugo3251() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(time.Second)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 11; i++ {
+ go func() { // G1
+ url := "http://Foo.Bar/foo_Bar-Foo"
+ remoteURLLock := &remoteLock_hugo3251{m: make(map[string]*sync.Mutex)}
+ for range []bool{false, true} {
+ var wg sync.WaitGroup
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func(gor int) { // G2
+ defer wg.Done()
+ for j := 0; j < 200; j++ {
+ err := resGetRemote_hugo3251(remoteURLLock, url)
+ if err != nil {
+ fmt.Errorf("Error getting resource content: %s", err)
+ }
+ time.Sleep(300 * time.Nanosecond)
+ }
+ }(i)
+ }
+ wg.Wait()
+ }
+ }()
+ }
+} \ No newline at end of file
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go
new file mode 100644
index 0000000000..6a1bbe9a3f
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go
@@ -0,0 +1,317 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "log"
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Hugo5379", Hugo5379)
+}
+
+type shortcodeHandler_hugo5379 struct {
+ p *PageWithoutContent_hugo5379
+ contentShortcodes map[int]func() error
+ contentShortcodesDelta map[int]func() error
+ init sync.Once // O1
+}
+
+func (s *shortcodeHandler_hugo5379) executeShortcodesForDelta(p *PageWithoutContent_hugo5379) error {
+ for k, _ := range s.contentShortcodesDelta {
+ render := s.contentShortcodesDelta[k]
+ if err := render(); err != nil {
+ continue
+ }
+ }
+ return nil
+}
+
+func (s *shortcodeHandler_hugo5379) updateDelta() {
+ s.init.Do(func() {
+ s.contentShortcodes = createShortcodeRenderers_hugo5379(s.p.withoutContent())
+ })
+
+ delta := make(map[int]func() error)
+
+ for k, v := range s.contentShortcodes {
+ if _, ok := delta[k]; !ok {
+ delta[k] = v
+ }
+ }
+
+ s.contentShortcodesDelta = delta
+}
+
+type Page_hugo5379 struct {
+ *pageInit_hugo5379
+ *pageContentInit_hugo5379
+ pageWithoutContent *PageWithoutContent_hugo5379
+ contentInit sync.Once // O2
+ contentInitMu sync.Mutex // L1
+ shortcodeState *shortcodeHandler_hugo5379
+}
+
+func (p *Page_hugo5379) WordCount() {
+ p.initContentPlainAndMeta()
+}
+
+func (p *Page_hugo5379) initContentPlainAndMeta() {
+ p.initContent()
+ p.initPlain(true)
+}
+
+func (p *Page_hugo5379) initPlain(lock bool) {
+ p.plainInit.Do(func() {
+ if lock {
+ /// Double locking here.
+ p.contentInitMu.Lock()
+ defer p.contentInitMu.Unlock()
+ }
+ })
+}
+
+func (p *Page_hugo5379) withoutContent() *PageWithoutContent_hugo5379 {
+ p.pageInit_hugo5379.withoutContentInit.Do(func() {
+ p.pageWithoutContent = &PageWithoutContent_hugo5379{Page_hugo5379: p}
+ })
+ return p.pageWithoutContent
+}
+
+func (p *Page_hugo5379) prepareForRender() error {
+ var err error
+ if err = handleShortcodes_hugo5379(p.withoutContent()); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (p *Page_hugo5379) setContentInit() {
+ p.shortcodeState.updateDelta()
+}
+
+func (p *Page_hugo5379) initContent() {
+ p.contentInit.Do(func() {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
+ defer cancel()
+ c := make(chan error, 1)
+
+ go func() { // G2
+ var err error
+ p.contentInitMu.Lock() // first lock here
+ defer p.contentInitMu.Unlock()
+
+ err = p.prepareForRender()
+ if err != nil {
+ c <- err
+ return
+ }
+ c <- err
+ }()
+
+ select {
+ case <-ctx.Done():
+ case <-c:
+ }
+ })
+}
+
+type PageWithoutContent_hugo5379 struct {
+ *Page_hugo5379
+}
+
+type pageInit_hugo5379 struct {
+ withoutContentInit sync.Once
+}
+
+type pageContentInit_hugo5379 struct {
+ contentInit sync.Once // O3
+ plainInit sync.Once // O4
+}
+
+type HugoSites_hugo5379 struct {
+ Sites []*Site_hugo5379
+}
+
+func (h *HugoSites_hugo5379) render() {
+ for _, s := range h.Sites {
+ for _, s2 := range h.Sites {
+ s2.preparePagesForRender()
+ }
+ s.renderPages()
+ }
+}
+
+func (h *HugoSites_hugo5379) Build() {
+ h.render()
+}
+
+type Pages_hugo5379 []*Page_hugo5379
+
+type PageCollections_hugo5379 struct {
+ Pages Pages_hugo5379
+}
+
+type Site_hugo5379 struct {
+ *PageCollections_hugo5379
+}
+
+func (s *Site_hugo5379) preparePagesForRender() {
+ for _, p := range s.Pages {
+ p.setContentInit()
+ }
+}
+
+func (s *Site_hugo5379) renderForLayouts() {
+ /// Omit reflections
+ for _, p := range s.Pages {
+ p.WordCount()
+ }
+}
+
+func (s *Site_hugo5379) renderAndWritePage() {
+ s.renderForLayouts()
+}
+
+func (s *Site_hugo5379) renderPages() {
+ numWorkers := 2
+ wg := &sync.WaitGroup{}
+
+ for i := 0; i < numWorkers; i++ {
+ wg.Add(1)
+ go pageRenderer_hugo5379(s, wg) // G3
+ }
+
+ wg.Wait()
+}
+
+type sitesBuilder_hugo5379 struct {
+ H *HugoSites_hugo5379
+}
+
+func (s *sitesBuilder_hugo5379) Build() *sitesBuilder_hugo5379 {
+ return s.build()
+}
+
+func (s *sitesBuilder_hugo5379) build() *sitesBuilder_hugo5379 {
+ s.H.Build()
+ return s
+}
+
+func (s *sitesBuilder_hugo5379) CreateSitesE() error {
+ sites, err := NewHugoSites_hugo5379()
+ if err != nil {
+ return err
+ }
+ s.H = sites
+ return nil
+}
+
+func (s *sitesBuilder_hugo5379) CreateSites() *sitesBuilder_hugo5379 {
+ if err := s.CreateSitesE(); err != nil {
+ log.Fatalf("Failed to create sites: %s", err)
+ }
+ return s
+}
+
+func newHugoSites_hugo5379(sites ...*Site_hugo5379) (*HugoSites_hugo5379, error) {
+ h := &HugoSites_hugo5379{Sites: sites}
+ return h, nil
+}
+
+func newSite_hugo5379() *Site_hugo5379 {
+ c := &PageCollections_hugo5379{}
+ s := &Site_hugo5379{
+ PageCollections_hugo5379: c,
+ }
+ return s
+}
+
+func createSitesFromConfig_hugo5379() []*Site_hugo5379 {
+ var (
+ sites []*Site_hugo5379
+ )
+
+ var s *Site_hugo5379 = newSite_hugo5379()
+ sites = append(sites, s)
+ return sites
+}
+
+func NewHugoSites_hugo5379() (*HugoSites_hugo5379, error) {
+ sites := createSitesFromConfig_hugo5379()
+ return newHugoSites_hugo5379(sites...)
+}
+
+func prepareShortcodeForPage_hugo5379(p *PageWithoutContent_hugo5379) map[int]func() error {
+ m := make(map[int]func() error)
+ m[0] = func() error {
+ return renderShortcode_hugo5379(p)
+ }
+ return m
+}
+
+func renderShortcode_hugo5379(p *PageWithoutContent_hugo5379) error {
+ return renderShortcodeWithPage_hugo5379(p)
+}
+
+func renderShortcodeWithPage_hugo5379(p *PageWithoutContent_hugo5379) error {
+ /// Omit reflections
+ p.WordCount()
+ return nil
+}
+
+func createShortcodeRenderers_hugo5379(p *PageWithoutContent_hugo5379) map[int]func() error {
+ return prepareShortcodeForPage_hugo5379(p)
+}
+
+func newShortcodeHandler_hugo5379(p *Page_hugo5379) *shortcodeHandler_hugo5379 {
+ return &shortcodeHandler_hugo5379{
+ p: p.withoutContent(),
+ contentShortcodes: make(map[int]func() error),
+ contentShortcodesDelta: make(map[int]func() error),
+ }
+}
+
+func handleShortcodes_hugo5379(p *PageWithoutContent_hugo5379) error {
+ return p.shortcodeState.executeShortcodesForDelta(p)
+}
+
+func pageRenderer_hugo5379(s *Site_hugo5379, wg *sync.WaitGroup) {
+ defer wg.Done()
+ s.renderAndWritePage()
+}
+
+func Hugo5379() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() { // G1
+ b := &sitesBuilder_hugo5379{}
+ s := b.CreateSites()
+ for _, site := range s.H.Sites {
+ p := &Page_hugo5379{
+ pageInit_hugo5379: &pageInit_hugo5379{},
+ pageContentInit_hugo5379: &pageContentInit_hugo5379{},
+ pageWithoutContent: &PageWithoutContent_hugo5379{},
+ contentInit: sync.Once{},
+ contentInitMu: sync.Mutex{},
+ shortcodeState: nil,
+ }
+ p.shortcodeState = newShortcodeHandler_hugo5379(p)
+ site.Pages = append(site.Pages, p)
+ }
+ s.Build()
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go b/src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go
new file mode 100644
index 0000000000..839051cc64
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go
@@ -0,0 +1,129 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Istio16224", Istio16224)
+}
+
+type ConfigStoreCache_istio16224 interface {
+ RegisterEventHandler(handler func())
+ Run()
+}
+
+type Event_istio16224 int
+
+type Handler_istio16224 func(Event_istio16224)
+
+type configstoreMonitor_istio16224 struct {
+ handlers []Handler_istio16224
+ eventCh chan Event_istio16224
+}
+
+func (m *configstoreMonitor_istio16224) Run(stop <-chan struct{}) {
+ for {
+ select {
+ case <-stop:
+ // This bug is not descibed, but is a true positive (in our eyes)
+ // In a real run main exits when the goro is blocked here.
+ if _, ok := <-m.eventCh; ok {
+ close(m.eventCh)
+ }
+ return
+ case ce, ok := <-m.eventCh:
+ if ok {
+ m.processConfigEvent(ce)
+ }
+ }
+ }
+}
+
+func (m *configstoreMonitor_istio16224) processConfigEvent(ce Event_istio16224) {
+ m.applyHandlers(ce)
+}
+
+func (m *configstoreMonitor_istio16224) AppendEventHandler(h Handler_istio16224) {
+ m.handlers = append(m.handlers, h)
+}
+
+func (m *configstoreMonitor_istio16224) applyHandlers(e Event_istio16224) {
+ for _, f := range m.handlers {
+ f(e)
+ }
+}
+func (m *configstoreMonitor_istio16224) ScheduleProcessEvent(configEvent Event_istio16224) {
+ m.eventCh <- configEvent
+}
+
+type Monitor_istio16224 interface {
+ Run(<-chan struct{})
+ AppendEventHandler(Handler_istio16224)
+ ScheduleProcessEvent(Event_istio16224)
+}
+
+type controller_istio16224 struct {
+ monitor Monitor_istio16224
+}
+
+func (c *controller_istio16224) RegisterEventHandler(f func(Event_istio16224)) {
+ c.monitor.AppendEventHandler(f)
+}
+
+func (c *controller_istio16224) Run(stop <-chan struct{}) {
+ c.monitor.Run(stop)
+}
+
+func (c *controller_istio16224) Create() {
+ c.monitor.ScheduleProcessEvent(Event_istio16224(0))
+}
+
+func NewMonitor_istio16224() Monitor_istio16224 {
+ return NewBufferedMonitor_istio16224()
+}
+
+func NewBufferedMonitor_istio16224() Monitor_istio16224 {
+ return &configstoreMonitor_istio16224{
+ eventCh: make(chan Event_istio16224),
+ }
+}
+
+func Istio16224() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ controller := &controller_istio16224{monitor: NewMonitor_istio16224()}
+ done := make(chan bool)
+ lock := sync.Mutex{}
+ controller.RegisterEventHandler(func(event Event_istio16224) {
+ lock.Lock()
+ defer lock.Unlock()
+ done <- true
+ })
+
+ stop := make(chan struct{})
+ go controller.Run(stop)
+
+ controller.Create()
+
+ lock.Lock() // blocks
+ lock.Unlock()
+ <-done
+
+ close(stop)
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go b/src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go
new file mode 100644
index 0000000000..aa8317c6d5
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go
@@ -0,0 +1,144 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime/pprof"
+
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Istio17860", Istio17860)
+}
+
+type Proxy_istio17860 interface {
+ IsLive() bool
+}
+
+type TestProxy_istio17860 struct {
+ live func() bool
+}
+
+func (tp TestProxy_istio17860) IsLive() bool {
+ if tp.live == nil {
+ return true
+ }
+ return tp.live()
+}
+
+type Agent_istio17860 interface {
+ Run(ctx context.Context)
+ Restart()
+}
+
+type exitStatus_istio17860 int
+
+type agent_istio17860 struct {
+ proxy Proxy_istio17860
+ mu *sync.Mutex
+ statusCh chan exitStatus_istio17860
+ currentEpoch int
+ activeEpochs map[int]struct{}
+}
+
+func (a *agent_istio17860) Run(ctx context.Context) {
+ for {
+ select {
+ case status := <-a.statusCh:
+ a.mu.Lock()
+ delete(a.activeEpochs, int(status))
+ active := len(a.activeEpochs)
+ a.mu.Unlock()
+ if active == 0 {
+ return
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func (a *agent_istio17860) Restart() {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ a.waitUntilLive()
+ a.currentEpoch++
+ a.activeEpochs[a.currentEpoch] = struct{}{}
+
+ go a.runWait(a.currentEpoch)
+}
+
+func (a *agent_istio17860) runWait(epoch int) {
+ a.statusCh <- exitStatus_istio17860(epoch)
+}
+
+func (a *agent_istio17860) waitUntilLive() {
+ if len(a.activeEpochs) == 0 {
+ return
+ }
+
+ interval := time.NewTicker(30 * time.Nanosecond)
+ timer := time.NewTimer(100 * time.Nanosecond)
+ defer func() {
+ interval.Stop()
+ timer.Stop()
+ }()
+
+ if a.proxy.IsLive() {
+ return
+ }
+
+ for {
+ select {
+ case <-timer.C:
+ return
+ case <-interval.C:
+ if a.proxy.IsLive() {
+ return
+ }
+ }
+ }
+}
+
+func NewAgent_istio17860(proxy Proxy_istio17860) Agent_istio17860 {
+ return &agent_istio17860{
+ proxy: proxy,
+ mu: &sync.Mutex{},
+ statusCh: make(chan exitStatus_istio17860),
+ activeEpochs: make(map[int]struct{}),
+ }
+}
+
+func Istio17860() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ neverLive := func() bool {
+ return false
+ }
+
+ a := NewAgent_istio17860(TestProxy_istio17860{live: neverLive})
+ go func() { a.Run(ctx) }()
+
+ a.Restart()
+ go a.Restart()
+
+ time.Sleep(200 * time.Nanosecond)
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go b/src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go
new file mode 100644
index 0000000000..b410c49032
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go
@@ -0,0 +1,154 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime/pprof"
+
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Istio18454", Istio18454)
+}
+
+const eventChCap_istio18454 = 1024
+
+type Worker_istio18454 struct {
+ ctx context.Context
+ ctxCancel context.CancelFunc
+}
+
+func (w *Worker_istio18454) Start(setupFn func(), runFn func(c context.Context)) {
+ if setupFn != nil {
+ setupFn()
+ }
+ go func() {
+ runFn(w.ctx)
+ }()
+}
+
+func (w *Worker_istio18454) Stop() {
+ w.ctxCancel()
+}
+
+type Strategy_istio18454 struct {
+ timer *time.Timer
+ timerFrequency time.Duration
+ stateLock sync.Mutex
+ resetChan chan struct{}
+ worker *Worker_istio18454
+ startTimerFn func()
+}
+
+func (s *Strategy_istio18454) OnChange() {
+ s.stateLock.Lock()
+ if s.timer != nil {
+ s.stateLock.Unlock()
+ s.resetChan <- struct{}{}
+ return
+ }
+ s.startTimerFn()
+ s.stateLock.Unlock()
+}
+
+func (s *Strategy_istio18454) startTimer() {
+ s.timer = time.NewTimer(s.timerFrequency)
+ eventLoop := func(ctx context.Context) {
+ for {
+ select {
+ case <-s.timer.C:
+ case <-s.resetChan:
+ if !s.timer.Stop() {
+ <-s.timer.C
+ }
+ s.timer.Reset(s.timerFrequency)
+ case <-ctx.Done():
+ s.timer.Stop()
+ return
+ }
+ }
+ }
+ s.worker.Start(nil, eventLoop)
+}
+
+func (s *Strategy_istio18454) Close() {
+ s.worker.Stop()
+}
+
+type Event_istio18454 int
+
+type Processor_istio18454 struct {
+ stateStrategy *Strategy_istio18454
+ worker *Worker_istio18454
+ eventCh chan Event_istio18454
+}
+
+func (p *Processor_istio18454) processEvent() {
+ p.stateStrategy.OnChange()
+}
+
+func (p *Processor_istio18454) Start() {
+ setupFn := func() {
+ for i := 0; i < eventChCap_istio18454; i++ {
+ p.eventCh <- Event_istio18454(0)
+ }
+ }
+ runFn := func(ctx context.Context) {
+ defer func() {
+ p.stateStrategy.Close()
+ }()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-p.eventCh:
+ p.processEvent()
+ }
+ }
+ }
+ p.worker.Start(setupFn, runFn)
+}
+
+func (p *Processor_istio18454) Stop() {
+ p.worker.Stop()
+}
+
+func NewWorker_istio18454() *Worker_istio18454 {
+ worker := &Worker_istio18454{}
+ worker.ctx, worker.ctxCancel = context.WithCancel(context.Background())
+ return worker
+}
+
+func Istio18454() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ stateStrategy := &Strategy_istio18454{
+ timerFrequency: time.Nanosecond,
+ resetChan: make(chan struct{}, 1),
+ worker: NewWorker_istio18454(),
+ }
+ stateStrategy.startTimerFn = stateStrategy.startTimer
+
+ p := &Processor_istio18454{
+ stateStrategy: stateStrategy,
+ worker: NewWorker_istio18454(),
+ eventCh: make(chan Event_istio18454, eventChCap_istio18454),
+ }
+
+ p.Start()
+ defer p.Stop()
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go
new file mode 100644
index 0000000000..0eca5f41fb
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go
@@ -0,0 +1,95 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/10182
+ * Buggy version: 4b990d128a17eea9058d28a3b3688ab8abafbd94
+ * fix commit-id: 64ad3e17ad15cd0f9a4fd86706eec1c572033254
+ * Flaky: 15/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes10182", Kubernetes10182)
+}
+
+type statusManager_kubernetes10182 struct {
+ podStatusesLock sync.RWMutex
+ podStatusChannel chan bool
+}
+
+func (s *statusManager_kubernetes10182) Start() {
+ go func() {
+ for i := 0; i < 2; i++ {
+ s.syncBatch()
+ }
+ }()
+}
+
+func (s *statusManager_kubernetes10182) syncBatch() {
+ runtime.Gosched()
+ <-s.podStatusChannel
+ s.DeletePodStatus()
+}
+
+func (s *statusManager_kubernetes10182) DeletePodStatus() {
+ s.podStatusesLock.Lock()
+ defer s.podStatusesLock.Unlock()
+}
+
+func (s *statusManager_kubernetes10182) SetPodStatus() {
+ s.podStatusesLock.Lock()
+ defer s.podStatusesLock.Unlock()
+ s.podStatusChannel <- true
+}
+
+func NewStatusManager_kubernetes10182() *statusManager_kubernetes10182 {
+ return &statusManager_kubernetes10182{
+ podStatusChannel: make(chan bool),
+ }
+}
+
+// Example of deadlock trace:
+//
+// G1 G2 G3
+// --------------------------------------------------------------------------------
+// s.Start()
+// s.syncBatch()
+// s.SetPodStatus()
+// <-s.podStatusChannel
+// s.podStatusesLock.Lock()
+// s.podStatusChannel <- true
+// s.podStatusesLock.Unlock()
+// return
+// s.DeletePodStatus()
+// s.podStatusesLock.Lock()
+// s.podStatusChannel <- true
+// s.podStatusesLock.Lock()
+// -----------------------------------G1,G3 leak-------------------------------------
+
+func Kubernetes10182() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 1000; i++ {
+ go func() {
+ s := NewStatusManager_kubernetes10182()
+ go s.Start()
+ go s.SetPodStatus()
+ go s.SetPodStatus()
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go
new file mode 100644
index 0000000000..36405f240a
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go
@@ -0,0 +1,118 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes11298", Kubernetes11298)
+}
+
+type Signal_kubernetes11298 <-chan struct{}
+
+func After_kubernetes11298(f func()) Signal_kubernetes11298 {
+ ch := make(chan struct{})
+ go func() {
+ defer close(ch)
+ if f != nil {
+ f()
+ }
+ }()
+ return Signal_kubernetes11298(ch)
+}
+
+func Until_kubernetes11298(f func(), period time.Duration, stopCh <-chan struct{}) {
+ if f == nil {
+ return
+ }
+ for {
+ select {
+ case <-stopCh:
+ return
+ default:
+ }
+ f()
+ select {
+ case <-stopCh:
+ case <-time.After(period):
+ }
+ }
+
+}
+
+type notifier_kubernetes11298 struct {
+ lock sync.Mutex
+ cond *sync.Cond
+}
+
+// abort will be closed no matter what
+func (n *notifier_kubernetes11298) serviceLoop(abort <-chan struct{}) {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+ for {
+ select {
+ case <-abort:
+ return
+ default:
+ ch := After_kubernetes11298(func() {
+ n.cond.Wait()
+ })
+ select {
+ case <-abort:
+ n.cond.Signal()
+ <-ch
+ return
+ case <-ch:
+ }
+ }
+ }
+}
+
+// abort will be closed no matter what
+func Notify_kubernetes11298(abort <-chan struct{}) {
+ n := &notifier_kubernetes11298{}
+ n.cond = sync.NewCond(&n.lock)
+ finished := After_kubernetes11298(func() {
+ Until_kubernetes11298(func() {
+ for {
+ select {
+ case <-abort:
+ return
+ default:
+ func() {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+ n.cond.Signal()
+ }()
+ }
+ }
+ }, 0, abort)
+ })
+ Until_kubernetes11298(func() { n.serviceLoop(finished) }, 0, abort)
+}
+func Kubernetes11298() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 1000; i++ {
+ go func() {
+ done := make(chan struct{})
+ notifyDone := After_kubernetes11298(func() { Notify_kubernetes11298(done) })
+ go func() {
+ defer close(done)
+ time.Sleep(300 * time.Nanosecond)
+ }()
+ <-notifyDone
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go
new file mode 100644
index 0000000000..f6aa8b9ddf
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go
@@ -0,0 +1,166 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/13135
+ * Buggy version: 6ced66249d4fd2a81e86b4a71d8df0139fe5ceae
+ * fix commit-id: a12b7edc42c5c06a2e7d9f381975658692951d5a
+ * Flaky: 93/100
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes13135", Kubernetes13135)
+}
+
+var (
+ StopChannel_kubernetes13135 chan struct{}
+)
+
+func Util_kubernetes13135(f func(), period time.Duration, stopCh <-chan struct{}) {
+ for {
+ select {
+ case <-stopCh:
+ return
+ default:
+ }
+ func() {
+ f()
+ }()
+ time.Sleep(period)
+ }
+}
+
+type Store_kubernetes13135 interface {
+ Add(obj interface{})
+ Replace(obj interface{})
+}
+
+type Reflector_kubernetes13135 struct {
+ store Store_kubernetes13135
+}
+
+func (r *Reflector_kubernetes13135) ListAndWatch(stopCh <-chan struct{}) error {
+ r.syncWith()
+ return nil
+}
+
+func NewReflector_kubernetes13135(store Store_kubernetes13135) *Reflector_kubernetes13135 {
+ return &Reflector_kubernetes13135{
+ store: store,
+ }
+}
+
+func (r *Reflector_kubernetes13135) syncWith() {
+ r.store.Replace(nil)
+}
+
+type Cacher_kubernetes13135 struct {
+ sync.Mutex
+ initialized sync.WaitGroup
+ initOnce sync.Once
+ watchCache *WatchCache_kubernetes13135
+ reflector *Reflector_kubernetes13135
+}
+
+func (c *Cacher_kubernetes13135) processEvent() {
+ c.Lock()
+ defer c.Unlock()
+}
+
+func (c *Cacher_kubernetes13135) startCaching(stopChannel <-chan struct{}) {
+ c.Lock()
+ for {
+ err := c.reflector.ListAndWatch(stopChannel)
+ if err == nil {
+ break
+ }
+ }
+}
+
+type WatchCache_kubernetes13135 struct {
+ sync.RWMutex
+ onReplace func()
+ onEvent func()
+}
+
+func (w *WatchCache_kubernetes13135) SetOnEvent(onEvent func()) {
+ w.Lock()
+ defer w.Unlock()
+ w.onEvent = onEvent
+}
+
+func (w *WatchCache_kubernetes13135) SetOnReplace(onReplace func()) {
+ w.Lock()
+ defer w.Unlock()
+ w.onReplace = onReplace
+}
+
+func (w *WatchCache_kubernetes13135) processEvent() {
+ w.Lock()
+ defer w.Unlock()
+ if w.onEvent != nil {
+ w.onEvent()
+ }
+}
+
+func (w *WatchCache_kubernetes13135) Add(obj interface{}) {
+ w.processEvent()
+}
+
+func (w *WatchCache_kubernetes13135) Replace(obj interface{}) {
+ w.Lock()
+ defer w.Unlock()
+ if w.onReplace != nil {
+ w.onReplace()
+ }
+}
+
+func NewCacher_kubernetes13135(stopCh <-chan struct{}) *Cacher_kubernetes13135 {
+ watchCache := &WatchCache_kubernetes13135{}
+ cacher := &Cacher_kubernetes13135{
+ initialized: sync.WaitGroup{},
+ watchCache: watchCache,
+ reflector: NewReflector_kubernetes13135(watchCache),
+ }
+ cacher.initialized.Add(1)
+ watchCache.SetOnReplace(func() {
+ cacher.initOnce.Do(func() { cacher.initialized.Done() })
+ cacher.Unlock()
+ })
+ watchCache.SetOnEvent(cacher.processEvent)
+ go Util_kubernetes13135(func() { cacher.startCaching(stopCh) }, 0, stopCh) // G2
+ cacher.initialized.Wait()
+ return cacher
+}
+
+func Kubernetes13135() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ StopChannel_kubernetes13135 = make(chan struct{})
+ for i := 0; i < 50; i++ {
+ go func() {
+ // Should create a local channel. Using a single global channel
+ // concurrently will cause a deadlock which does not actually exist
+ // in the original microbenchmark.
+ StopChannel_kubernetes13135 := make(chan struct{})
+
+ c := NewCacher_kubernetes13135(StopChannel_kubernetes13135) // G1
+ go c.watchCache.Add(nil) // G3
+ go close(StopChannel_kubernetes13135)
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go
new file mode 100644
index 0000000000..6c0139a9d2
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go
@@ -0,0 +1,100 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/1321
+ * Buggy version: 9cd0fc70f1ca852c903b18b0933991036b3b2fa1
+ * fix commit-id: 435e0b73bb99862f9dedf56a50260ff3dfef14ff
+ * Flaky: 1/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes1321", Kubernetes1321)
+}
+
+type muxWatcher_kubernetes1321 struct {
+ result chan struct{}
+ m *Mux_kubernetes1321
+ id int64
+}
+
+func (mw *muxWatcher_kubernetes1321) Stop() {
+ mw.m.stopWatching(mw.id)
+}
+
+type Mux_kubernetes1321 struct {
+ lock sync.Mutex
+ watchers map[int64]*muxWatcher_kubernetes1321
+}
+
+func NewMux_kubernetes1321() *Mux_kubernetes1321 {
+ m := &Mux_kubernetes1321{
+ watchers: map[int64]*muxWatcher_kubernetes1321{},
+ }
+ go m.loop() // G2
+ return m
+}
+
+func (m *Mux_kubernetes1321) Watch() *muxWatcher_kubernetes1321 {
+ mw := &muxWatcher_kubernetes1321{
+ result: make(chan struct{}),
+ m: m,
+ id: int64(len(m.watchers)),
+ }
+ m.watchers[mw.id] = mw
+ runtime.Gosched()
+ return mw
+}
+
+func (m *Mux_kubernetes1321) loop() {
+ for i := 0; i < 100; i++ {
+ m.distribute()
+ }
+}
+
+func (m *Mux_kubernetes1321) distribute() {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ for _, w := range m.watchers {
+ w.result <- struct{}{}
+ runtime.Gosched()
+ }
+}
+
+func (m *Mux_kubernetes1321) stopWatching(id int64) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ w, ok := m.watchers[id]
+ if !ok {
+ return
+ }
+ delete(m.watchers, id)
+ close(w.result)
+}
+
+func testMuxWatcherClose_kubernetes1321() {
+ m := NewMux_kubernetes1321()
+ m.watchers[m.Watch().id].Stop()
+}
+
+func Kubernetes1321() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 1000; i++ {
+ go testMuxWatcherClose_kubernetes1321() // G1
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go
new file mode 100644
index 0000000000..323cb236c0
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go
@@ -0,0 +1,72 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/25331
+ * Buggy version: 5dd087040bb13434f1ddf2f0693d0203c30f28cb
+ * fix commit-id: 97f4647dc3d8cf46c2b66b89a31c758a6edfb57c
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "context"
+ "errors"
+ "os"
+ "runtime"
+ "runtime/pprof"
+)
+
+func init() {
+ register("Kubernetes25331", Kubernetes25331)
+}
+
+type watchChan_kubernetes25331 struct {
+ ctx context.Context
+ cancel context.CancelFunc
+ resultChan chan bool
+ errChan chan error
+}
+
+func (wc *watchChan_kubernetes25331) Stop() {
+ wc.errChan <- errors.New("Error")
+ wc.cancel()
+}
+
+func (wc *watchChan_kubernetes25331) run() {
+ select {
+ case err := <-wc.errChan:
+ errResult := len(err.Error()) != 0
+ wc.cancel() // Removed in fix
+ wc.resultChan <- errResult
+ case <-wc.ctx.Done():
+ }
+}
+
+func NewWatchChan_kubernetes25331() *watchChan_kubernetes25331 {
+ ctx, cancel := context.WithCancel(context.Background())
+ return &watchChan_kubernetes25331{
+ ctx: ctx,
+ cancel: cancel,
+ resultChan: make(chan bool),
+ errChan: make(chan error),
+ }
+}
+
+func Kubernetes25331() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ wc := NewWatchChan_kubernetes25331()
+ go wc.run() // G1
+ go wc.Stop() // G2
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go
new file mode 100644
index 0000000000..38e53cf4ad
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go
@@ -0,0 +1,87 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes26980", Kubernetes26980)
+}
+
+type processorListener_kubernetes26980 struct {
+ lock sync.RWMutex
+ cond sync.Cond
+
+ pendingNotifications []interface{}
+}
+
+func (p *processorListener_kubernetes26980) add(notification interface{}) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ p.pendingNotifications = append(p.pendingNotifications, notification)
+ p.cond.Broadcast()
+}
+
+func (p *processorListener_kubernetes26980) pop(stopCh <-chan struct{}) {
+ p.lock.Lock()
+ runtime.Gosched()
+ defer p.lock.Unlock()
+ for {
+ for len(p.pendingNotifications) == 0 {
+ select {
+ case <-stopCh:
+ return
+ default:
+ }
+ p.cond.Wait()
+ }
+ select {
+ case <-stopCh:
+ return
+ }
+ }
+}
+
+func newProcessListener_kubernetes26980() *processorListener_kubernetes26980 {
+ ret := &processorListener_kubernetes26980{
+ pendingNotifications: []interface{}{},
+ }
+ ret.cond.L = &ret.lock
+ return ret
+}
+func Kubernetes26980() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 3000; i++ {
+ go func() {
+ pl := newProcessListener_kubernetes26980()
+ stopCh := make(chan struct{})
+ defer close(stopCh)
+ pl.add(1)
+ runtime.Gosched()
+ go pl.pop(stopCh)
+
+ resultCh := make(chan struct{})
+ go func() {
+ pl.lock.Lock()
+ close(resultCh)
+ }()
+ runtime.Gosched()
+ <-resultCh
+ pl.lock.Unlock()
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go
new file mode 100644
index 0000000000..00cdcf2b67
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go
@@ -0,0 +1,223 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes30872", Kubernetes30872)
+}
+
+type PopProcessFunc_kubernetes30872 func()
+
+type ProcessFunc_kubernetes30872 func()
+
+func Util_kubernetes30872(f func(), stopCh <-chan struct{}) {
+ JitterUntil_kubernetes30872(f, stopCh)
+}
+
+func JitterUntil_kubernetes30872(f func(), stopCh <-chan struct{}) {
+ for {
+ select {
+ case <-stopCh:
+ return
+ default:
+ }
+ func() {
+ f()
+ }()
+ }
+}
+
+type Queue_kubernetes30872 interface {
+ HasSynced()
+ Pop(PopProcessFunc_kubernetes30872)
+}
+
+type Config_kubernetes30872 struct {
+ Queue Queue_kubernetes30872
+ Process ProcessFunc_kubernetes30872
+}
+
+type Controller_kubernetes30872 struct {
+ config Config_kubernetes30872
+}
+
+func (c *Controller_kubernetes30872) Run(stopCh <-chan struct{}) {
+ Util_kubernetes30872(c.processLoop, stopCh)
+}
+
+func (c *Controller_kubernetes30872) HasSynced() {
+ c.config.Queue.HasSynced()
+}
+
+func (c *Controller_kubernetes30872) processLoop() {
+ c.config.Queue.Pop(PopProcessFunc_kubernetes30872(c.config.Process))
+}
+
+type ControllerInterface_kubernetes30872 interface {
+ Run(<-chan struct{})
+ HasSynced()
+}
+
+type ResourceEventHandler_kubernetes30872 interface {
+ OnAdd()
+}
+
+type ResourceEventHandlerFuncs_kubernetes30872 struct {
+ AddFunc func()
+}
+
+func (r ResourceEventHandlerFuncs_kubernetes30872) OnAdd() {
+ if r.AddFunc != nil {
+ r.AddFunc()
+ }
+}
+
+type informer_kubernetes30872 struct {
+ controller ControllerInterface_kubernetes30872
+
+ stopChan chan struct{}
+}
+
+type federatedInformerImpl_kubernetes30872 struct {
+ sync.Mutex
+ clusterInformer informer_kubernetes30872
+}
+
+func (f *federatedInformerImpl_kubernetes30872) ClustersSynced() {
+ f.Lock() // L1
+ defer f.Unlock()
+ f.clusterInformer.controller.HasSynced()
+}
+
+func (f *federatedInformerImpl_kubernetes30872) addCluster() {
+ f.Lock() // L1
+ defer f.Unlock()
+}
+
+func (f *federatedInformerImpl_kubernetes30872) Start() {
+ f.Lock() // L1
+ defer f.Unlock()
+
+ f.clusterInformer.stopChan = make(chan struct{})
+ go f.clusterInformer.controller.Run(f.clusterInformer.stopChan) // G2
+ runtime.Gosched()
+}
+
+func (f *federatedInformerImpl_kubernetes30872) Stop() {
+ f.Lock() // L1
+ defer f.Unlock()
+ close(f.clusterInformer.stopChan)
+}
+
+type DelayingDeliverer_kubernetes30872 struct{}
+
+func (d *DelayingDeliverer_kubernetes30872) StartWithHandler(handler func()) {
+ go func() { // G4
+ handler()
+ }()
+}
+
+type FederationView_kubernetes30872 interface {
+ ClustersSynced()
+}
+
+type FederatedInformer_kubernetes30872 interface {
+ FederationView_kubernetes30872
+ Start()
+ Stop()
+}
+
+type NamespaceController_kubernetes30872 struct {
+ namespaceDeliverer *DelayingDeliverer_kubernetes30872
+ namespaceFederatedInformer FederatedInformer_kubernetes30872
+}
+
+func (nc *NamespaceController_kubernetes30872) isSynced() {
+ nc.namespaceFederatedInformer.ClustersSynced()
+}
+
+func (nc *NamespaceController_kubernetes30872) reconcileNamespace() {
+ nc.isSynced()
+}
+
+func (nc *NamespaceController_kubernetes30872) Run(stopChan <-chan struct{}) {
+ nc.namespaceFederatedInformer.Start()
+ go func() { // G3
+ <-stopChan
+ nc.namespaceFederatedInformer.Stop()
+ }()
+ nc.namespaceDeliverer.StartWithHandler(func() {
+ nc.reconcileNamespace()
+ })
+}
+
+type DeltaFIFO_kubernetes30872 struct {
+ lock sync.RWMutex
+}
+
+func (f *DeltaFIFO_kubernetes30872) HasSynced() {
+ f.lock.Lock() // L2
+ defer f.lock.Unlock()
+}
+
+func (f *DeltaFIFO_kubernetes30872) Pop(process PopProcessFunc_kubernetes30872) {
+ f.lock.Lock() // L2
+ defer f.lock.Unlock()
+ process()
+}
+
+func NewFederatedInformer_kubernetes30872() FederatedInformer_kubernetes30872 {
+ federatedInformer := &federatedInformerImpl_kubernetes30872{}
+ federatedInformer.clusterInformer.controller = NewInformer_kubernetes30872(
+ ResourceEventHandlerFuncs_kubernetes30872{
+ AddFunc: func() {
+ federatedInformer.addCluster()
+ },
+ })
+ return federatedInformer
+}
+
+func NewInformer_kubernetes30872(h ResourceEventHandler_kubernetes30872) *Controller_kubernetes30872 {
+ fifo := &DeltaFIFO_kubernetes30872{}
+ cfg := &Config_kubernetes30872{
+ Queue: fifo,
+ Process: func() {
+ h.OnAdd()
+ },
+ }
+ return &Controller_kubernetes30872{config: *cfg}
+}
+
+func NewNamespaceController_kubernetes30872() *NamespaceController_kubernetes30872 {
+ nc := &NamespaceController_kubernetes30872{}
+ nc.namespaceDeliverer = &DelayingDeliverer_kubernetes30872{}
+ nc.namespaceFederatedInformer = NewFederatedInformer_kubernetes30872()
+ return nc
+}
+
+func Kubernetes30872() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() { // G1
+ namespaceController := NewNamespaceController_kubernetes30872()
+ stop := make(chan struct{})
+ namespaceController.Run(stop)
+ close(stop)
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go
new file mode 100644
index 0000000000..27020d5804
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go
@@ -0,0 +1,83 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Kubernetes38669", Kubernetes38669)
+}
+
+type Event_kubernetes38669 int
+type watchCacheEvent_kubernetes38669 int
+
+type cacheWatcher_kubernetes38669 struct {
+ sync.Mutex
+ input chan watchCacheEvent_kubernetes38669
+ result chan Event_kubernetes38669
+ stopped bool
+}
+
+func (c *cacheWatcher_kubernetes38669) process(initEvents []watchCacheEvent_kubernetes38669) {
+ for _, event := range initEvents {
+ c.sendWatchCacheEvent(&event)
+ }
+ defer close(c.result)
+ defer c.Stop()
+ for {
+ _, ok := <-c.input
+ if !ok {
+ return
+ }
+ }
+}
+
+func (c *cacheWatcher_kubernetes38669) sendWatchCacheEvent(event *watchCacheEvent_kubernetes38669) {
+ c.result <- Event_kubernetes38669(*event)
+}
+
+func (c *cacheWatcher_kubernetes38669) Stop() {
+ c.stop()
+}
+
+func (c *cacheWatcher_kubernetes38669) stop() {
+ c.Lock()
+ defer c.Unlock()
+ if !c.stopped {
+ c.stopped = true
+ close(c.input)
+ }
+}
+
+func newCacheWatcher_kubernetes38669(chanSize int, initEvents []watchCacheEvent_kubernetes38669) *cacheWatcher_kubernetes38669 {
+ watcher := &cacheWatcher_kubernetes38669{
+ input: make(chan watchCacheEvent_kubernetes38669, chanSize),
+ result: make(chan Event_kubernetes38669, chanSize),
+ stopped: false,
+ }
+ go watcher.process(initEvents)
+ return watcher
+}
+
+func Kubernetes38669() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ initEvents := []watchCacheEvent_kubernetes38669{1, 2}
+ w := newCacheWatcher_kubernetes38669(0, initEvents)
+ w.Stop()
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go
new file mode 100644
index 0000000000..fd51484a0f
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go
@@ -0,0 +1,68 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/5316
+ * Buggy version: c868b0bbf09128960bc7c4ada1a77347a464d876
+ * fix commit-id: cc3a433a7abc89d2f766d4c87eaae9448e3dc091
+ * Flaky: 100/100
+ */
+
+package main
+
+import (
+ "errors"
+ "math/rand"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Kubernetes5316", Kubernetes5316)
+}
+
+func finishRequest_kubernetes5316(timeout time.Duration, fn func() error) {
+ ch := make(chan bool)
+ errCh := make(chan error)
+ go func() { // G2
+ if err := fn(); err != nil {
+ errCh <- err
+ } else {
+ ch <- true
+ }
+ }()
+
+ select {
+ case <-ch:
+ case <-errCh:
+ case <-time.After(timeout):
+ }
+}
+
+func Kubernetes5316() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Wait a bit because the child goroutine relies on timed operations.
+ time.Sleep(100 * time.Millisecond)
+
+ // Yield several times to allow the child goroutine to run
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ fn := func() error {
+ time.Sleep(2 * time.Millisecond)
+ if rand.Intn(10) > 5 {
+ return errors.New("Error")
+ }
+ return nil
+ }
+ go finishRequest_kubernetes5316(time.Microsecond, fn) // G1
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go
new file mode 100644
index 0000000000..0ca707e981
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go
@@ -0,0 +1,108 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Tag: Reproduce misbehavior
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/58107
+ * Buggy version: 2f17d782eb2772d6401da7ddced9ac90656a7a79
+ * fix commit-id: 010a127314a935d8d038f8dd4559fc5b249813e4
+ * Flaky: 53/100
+ */
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes58107", Kubernetes58107)
+}
+
+type RateLimitingInterface_kubernetes58107 interface {
+ Get()
+ Put()
+}
+
+type Type_kubernetes58107 struct {
+ cond *sync.Cond
+}
+
+func (q *Type_kubernetes58107) Get() {
+ q.cond.L.Lock()
+ defer q.cond.L.Unlock()
+ q.cond.Wait()
+}
+
+func (q *Type_kubernetes58107) Put() {
+ q.cond.Signal()
+}
+
+type ResourceQuotaController_kubernetes58107 struct {
+ workerLock sync.RWMutex
+ queue RateLimitingInterface_kubernetes58107
+ missingUsageQueue RateLimitingInterface_kubernetes58107
+}
+
+func (rq *ResourceQuotaController_kubernetes58107) worker(queue RateLimitingInterface_kubernetes58107, _ string) {
+ workFunc := func() bool {
+ rq.workerLock.RLock()
+ defer rq.workerLock.RUnlock()
+ queue.Get()
+ return true
+ }
+ for {
+ if quit := workFunc(); quit {
+ return
+ }
+ }
+}
+
+func (rq *ResourceQuotaController_kubernetes58107) Run() {
+ go rq.worker(rq.queue, "G1") // G3
+ go rq.worker(rq.missingUsageQueue, "G2") // G4
+}
+
+func (rq *ResourceQuotaController_kubernetes58107) Sync() {
+ for i := 0; i < 100000; i++ {
+ rq.workerLock.Lock()
+ runtime.Gosched()
+ rq.workerLock.Unlock()
+ }
+}
+
+func (rq *ResourceQuotaController_kubernetes58107) HelperSignals() {
+ for i := 0; i < 100000; i++ {
+ rq.queue.Put()
+ rq.missingUsageQueue.Put()
+ }
+}
+
+func startResourceQuotaController_kubernetes58107() {
+ resourceQuotaController := &ResourceQuotaController_kubernetes58107{
+ queue: &Type_kubernetes58107{sync.NewCond(&sync.Mutex{})},
+ missingUsageQueue: &Type_kubernetes58107{sync.NewCond(&sync.Mutex{})},
+ }
+
+ go resourceQuotaController.Run() // G2
+ go resourceQuotaController.Sync() // G5
+ resourceQuotaController.HelperSignals()
+}
+
+func Kubernetes58107() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(1000 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 1000; i++ {
+ go startResourceQuotaController_kubernetes58107() // G1
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go
new file mode 100644
index 0000000000..0d07ebc4a9
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go
@@ -0,0 +1,120 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/62464
+ * Buggy version: a048ca888ad27367b1a7b7377c67658920adbf5d
+ * fix commit-id: c1b19fce903675b82e9fdd1befcc5f5d658bfe78
+ * Flaky: 8/100
+ */
+
+package main
+
+import (
+ "math/rand"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes62464", Kubernetes62464)
+}
+
+type State_kubernetes62464 interface {
+ GetCPUSetOrDefault()
+ GetCPUSet() bool
+ GetDefaultCPUSet()
+ SetDefaultCPUSet()
+}
+
+type stateMemory_kubernetes62464 struct {
+ sync.RWMutex
+}
+
+func (s *stateMemory_kubernetes62464) GetCPUSetOrDefault() {
+ s.RLock()
+ defer s.RUnlock()
+ if ok := s.GetCPUSet(); ok {
+ return
+ }
+ s.GetDefaultCPUSet()
+}
+
+func (s *stateMemory_kubernetes62464) GetCPUSet() bool {
+ runtime.Gosched()
+ s.RLock()
+ defer s.RUnlock()
+
+ if rand.Intn(10) > 5 {
+ return true
+ }
+ return false
+}
+
+func (s *stateMemory_kubernetes62464) GetDefaultCPUSet() {
+ s.RLock()
+ defer s.RUnlock()
+}
+
+func (s *stateMemory_kubernetes62464) SetDefaultCPUSet() {
+ s.Lock()
+ runtime.Gosched()
+ defer s.Unlock()
+}
+
+type staticPolicy_kubernetes62464 struct{}
+
+func (p *staticPolicy_kubernetes62464) RemoveContainer(s State_kubernetes62464) {
+ s.GetDefaultCPUSet()
+ s.SetDefaultCPUSet()
+}
+
+type manager_kubernetes62464 struct {
+ state *stateMemory_kubernetes62464
+}
+
+func (m *manager_kubernetes62464) reconcileState() {
+ m.state.GetCPUSetOrDefault()
+}
+
+func NewPolicyAndManager_kubernetes62464() (*staticPolicy_kubernetes62464, *manager_kubernetes62464) {
+ s := &stateMemory_kubernetes62464{}
+ m := &manager_kubernetes62464{s}
+ p := &staticPolicy_kubernetes62464{}
+ return p, m
+}
+
+///
+/// G1 G2
+/// m.reconcileState()
+/// m.state.GetCPUSetOrDefault()
+/// s.RLock()
+/// s.GetCPUSet()
+/// p.RemoveContainer()
+/// s.GetDefaultCPUSet()
+/// s.SetDefaultCPUSet()
+/// s.Lock()
+/// s.RLock()
+/// ---------------------G1,G2 deadlock---------------------
+///
+
+func Kubernetes62464() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 1000; i++ {
+ go func() {
+ p, m := NewPolicyAndManager_kubernetes62464()
+ go m.reconcileState()
+ go p.RemoveContainer(m.state)
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go
new file mode 100644
index 0000000000..a3af3b24ae
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go
@@ -0,0 +1,86 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: kubernetes
+ * Issue or PR : https://github.com/kubernetes/kubernetes/pull/6632
+ * Buggy version: e597b41d939573502c8dda1dde7bf3439325fb5d
+ * fix commit-id: 82afb7ab1fe12cf2efceede2322d082eaf5d5adc
+ * Flaky: 4/100
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Kubernetes6632", Kubernetes6632)
+}
+
+type Connection_kubernetes6632 struct {
+ closeChan chan bool
+}
+
+type idleAwareFramer_kubernetes6632 struct {
+ resetChan chan bool
+ writeLock sync.Mutex
+ conn *Connection_kubernetes6632
+}
+
+func (i *idleAwareFramer_kubernetes6632) monitor() {
+ var resetChan = i.resetChan
+Loop:
+ for {
+ select {
+ case <-i.conn.closeChan:
+ i.writeLock.Lock()
+ close(resetChan)
+ i.resetChan = nil
+ i.writeLock.Unlock()
+ break Loop
+ }
+ }
+}
+
+func (i *idleAwareFramer_kubernetes6632) WriteFrame() {
+ i.writeLock.Lock()
+ defer i.writeLock.Unlock()
+ if i.resetChan == nil {
+ return
+ }
+ i.resetChan <- true
+}
+
+func NewIdleAwareFramer_kubernetes6632() *idleAwareFramer_kubernetes6632 {
+ return &idleAwareFramer_kubernetes6632{
+ resetChan: make(chan bool),
+ conn: &Connection_kubernetes6632{
+ closeChan: make(chan bool),
+ },
+ }
+}
+
+func Kubernetes6632() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ i := NewIdleAwareFramer_kubernetes6632()
+
+ go func() { // helper goroutine
+ i.conn.closeChan <- true
+ }()
+ go i.monitor() // G1
+ go i.WriteFrame() // G2
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go
new file mode 100644
index 0000000000..ae02ec8304
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go
@@ -0,0 +1,97 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Kubernetes70277", Kubernetes70277)
+}
+
+type WaitFunc_kubernetes70277 func(done <-chan struct{}) <-chan struct{}
+
+type ConditionFunc_kubernetes70277 func() (done bool, err error)
+
+func WaitFor_kubernetes70277(wait WaitFunc_kubernetes70277, fn ConditionFunc_kubernetes70277, done <-chan struct{}) error {
+ c := wait(done)
+ for {
+ _, open := <-c
+ ok, err := fn()
+ if err != nil {
+ return err
+ }
+ if ok {
+ return nil
+ }
+ if !open {
+ break
+ }
+ }
+ return nil
+}
+
+func poller_kubernetes70277(interval, timeout time.Duration) WaitFunc_kubernetes70277 {
+ return WaitFunc_kubernetes70277(func(done <-chan struct{}) <-chan struct{} {
+ ch := make(chan struct{})
+ go func() {
+ defer close(ch)
+
+ tick := time.NewTicker(interval)
+ defer tick.Stop()
+
+ var after <-chan time.Time
+ if timeout != 0 {
+ timer := time.NewTimer(timeout)
+ after = timer.C
+ defer timer.Stop()
+ }
+ for {
+ select {
+ case <-tick.C:
+ select {
+ case ch <- struct{}{}:
+ default:
+ }
+ case <-after:
+ return
+ case <-done:
+ return
+ }
+ }
+ }()
+
+ return ch
+ })
+}
+
+func Kubernetes70277() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 1000; i++ {
+ go func() {
+ stopCh := make(chan struct{})
+ defer close(stopCh)
+ waitFunc := poller_kubernetes70277(time.Millisecond, 80*time.Millisecond)
+ var doneCh <-chan struct{}
+
+ WaitFor_kubernetes70277(func(done <-chan struct{}) <-chan struct{} {
+ doneCh = done
+ return waitFunc(done)
+ }, func() (bool, error) {
+ time.Sleep(10 * time.Millisecond)
+ return true, nil
+ }, stopCh)
+
+ <-doneCh // block here
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/main.go b/src/runtime/testdata/testgoroutineleakprofile/goker/main.go
new file mode 100644
index 0000000000..5787c1e2b2
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/main.go
@@ -0,0 +1,39 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "os"
+
+// The number of times the main (profiling) goroutine should yield
+// in order to allow the leaking goroutines to get stuck.
+const yieldCount = 10
+
+var cmds = map[string]func(){}
+
+func register(name string, f func()) {
+ if cmds[name] != nil {
+ panic("duplicate registration: " + name)
+ }
+ cmds[name] = f
+}
+
+func registerInit(name string, f func()) {
+ if len(os.Args) >= 2 && os.Args[1] == name {
+ f()
+ }
+}
+
+func main() {
+ if len(os.Args) < 2 {
+ println("usage: " + os.Args[0] + " name-of-test")
+ return
+ }
+ f := cmds[os.Args[1]]
+ if f == nil {
+ println("unknown function: " + os.Args[1])
+ return
+ }
+ f()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go
new file mode 100644
index 0000000000..884d077550
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go
@@ -0,0 +1,68 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/17176
+ * Buggy version: d295dc66521e2734390473ec1f1da8a73ad3288a
+ * fix commit-id: 2f16895ee94848e2d8ad72bc01968b4c88d84cb8
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "errors"
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Moby17176", Moby17176)
+}
+
+type DeviceSet_moby17176 struct {
+ sync.Mutex
+ nrDeletedDevices int
+}
+
+func (devices *DeviceSet_moby17176) cleanupDeletedDevices() error {
+ devices.Lock()
+ if devices.nrDeletedDevices == 0 {
+ /// Missing devices.Unlock()
+ return nil
+ }
+ devices.Unlock()
+ return errors.New("Error")
+}
+
+func testDevmapperLockReleasedDeviceDeletion_moby17176() {
+ ds := &DeviceSet_moby17176{
+ nrDeletedDevices: 0,
+ }
+ ds.cleanupDeletedDevices()
+ doneChan := make(chan bool)
+ go func() {
+ ds.Lock()
+ defer ds.Unlock()
+ doneChan <- true
+ }()
+
+ select {
+ case <-time.After(time.Millisecond):
+ case <-doneChan:
+ }
+}
+func Moby17176() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go testDevmapperLockReleasedDeviceDeletion_moby17176()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go
new file mode 100644
index 0000000000..36017a9488
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go
@@ -0,0 +1,146 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/21233
+ * Buggy version: cc12d2bfaae135e63b1f962ad80e6943dd995337
+ * fix commit-id: 2f4aa9658408ac72a598363c6e22eadf93dbb8a7
+ * Flaky:100/100
+ */
+package main
+
+import (
+ "math/rand"
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Moby21233", Moby21233)
+}
+
+type Progress_moby21233 struct{}
+
+type Output_moby21233 interface {
+ WriteProgress(Progress_moby21233) error
+}
+
+type chanOutput_moby21233 chan<- Progress_moby21233
+
+type TransferManager_moby21233 struct {
+ mu sync.Mutex
+}
+
+type Transfer_moby21233 struct {
+ mu sync.Mutex
+}
+
+type Watcher_moby21233 struct {
+ signalChan chan struct{}
+ releaseChan chan struct{}
+ running chan struct{}
+}
+
+func ChanOutput_moby21233(progressChan chan<- Progress_moby21233) Output_moby21233 {
+ return chanOutput_moby21233(progressChan)
+}
+func (out chanOutput_moby21233) WriteProgress(p Progress_moby21233) error {
+ out <- p
+ return nil
+}
+func NewTransferManager_moby21233() *TransferManager_moby21233 {
+ return &TransferManager_moby21233{}
+}
+func NewTransfer_moby21233() *Transfer_moby21233 {
+ return &Transfer_moby21233{}
+}
+func (t *Transfer_moby21233) Release(watcher *Watcher_moby21233) {
+ t.mu.Lock()
+ t.mu.Unlock()
+ close(watcher.releaseChan)
+ <-watcher.running
+}
+func (t *Transfer_moby21233) Watch(progressOutput Output_moby21233) *Watcher_moby21233 {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ lastProgress := Progress_moby21233{}
+ w := &Watcher_moby21233{
+ releaseChan: make(chan struct{}),
+ signalChan: make(chan struct{}),
+ running: make(chan struct{}),
+ }
+ go func() { // G2
+ defer func() {
+ close(w.running)
+ }()
+ done := false
+ for {
+ t.mu.Lock()
+ t.mu.Unlock()
+ if rand.Int31n(2) >= 1 {
+ progressOutput.WriteProgress(lastProgress)
+ }
+ if done {
+ return
+ }
+ select {
+ case <-w.signalChan:
+ case <-w.releaseChan:
+ done = true
+ }
+ }
+ }()
+ return w
+}
+func (tm *TransferManager_moby21233) Transfer(progressOutput Output_moby21233) (*Transfer_moby21233, *Watcher_moby21233) {
+ tm.mu.Lock()
+ defer tm.mu.Unlock()
+ t := NewTransfer_moby21233()
+ return t, t.Watch(progressOutput)
+}
+
+func testTransfer_moby21233() { // G1
+ tm := NewTransferManager_moby21233()
+ progressChan := make(chan Progress_moby21233)
+ progressDone := make(chan struct{})
+ go func() { // G3
+ time.Sleep(1 * time.Millisecond)
+ for p := range progressChan { /// Chan consumer
+ if rand.Int31n(2) >= 1 {
+ return
+ }
+ _ = p
+ }
+ close(progressDone)
+ }()
+ time.Sleep(1 * time.Millisecond)
+ ids := []string{"id1", "id2", "id3"}
+ xrefs := make([]*Transfer_moby21233, len(ids))
+ watchers := make([]*Watcher_moby21233, len(ids))
+ for i := range ids {
+ xrefs[i], watchers[i] = tm.Transfer(ChanOutput_moby21233(progressChan)) /// Chan producer
+ time.Sleep(2 * time.Millisecond)
+ }
+
+ for i := range xrefs {
+ xrefs[i].Release(watchers[i])
+ }
+
+ close(progressChan)
+ <-progressDone
+}
+
+func Moby21233() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ go testTransfer_moby21233() // G1
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go
new file mode 100644
index 0000000000..d653731f6c
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go
@@ -0,0 +1,59 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/25384
+ * Buggy version: 58befe3081726ef74ea09198cd9488fb42c51f51
+ * fix commit-id: 42360d164b9f25fb4b150ef066fcf57fa39559a7
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Moby25348", Moby25348)
+}
+
+type plugin_moby25348 struct{}
+
+type Manager_moby25348 struct {
+ plugins []*plugin_moby25348
+}
+
+func (pm *Manager_moby25348) init() {
+ var group sync.WaitGroup
+ group.Add(len(pm.plugins))
+ for _, p := range pm.plugins {
+ go func(p *plugin_moby25348) {
+ defer group.Done()
+ }(p)
+ group.Wait() // Block here
+ }
+}
+
+func Moby25348() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() {
+ p1 := &plugin_moby25348{}
+ p2 := &plugin_moby25348{}
+ pm := &Manager_moby25348{
+ plugins: []*plugin_moby25348{p1, p2},
+ }
+ go pm.init()
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go
new file mode 100644
index 0000000000..7b3398fd38
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go
@@ -0,0 +1,242 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/27782
+ * Buggy version: 18768fdc2e76ec6c600c8ab57d2d487ee7877794
+ * fix commit-id: a69a59ffc7e3d028a72d1195c2c1535f447eaa84
+ * Flaky: 2/100
+ */
+package main
+
+import (
+ "errors"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Moby27782", Moby27782)
+}
+
+type Event_moby27782 struct {
+ Op Op_moby27782
+}
+
+type Op_moby27782 uint32
+
+const (
+ Create_moby27782 Op_moby27782 = 1 << iota
+ Write_moby27782
+ Remove_moby27782
+ Rename_moby27782
+ Chmod_moby27782
+)
+
+func newEvent(op Op_moby27782) Event_moby27782 {
+ return Event_moby27782{op}
+}
+
+func (e *Event_moby27782) ignoreLinux(w *Watcher_moby27782) bool {
+ if e.Op != Write_moby27782 {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ w.cv.Broadcast()
+ return true
+ }
+ runtime.Gosched()
+ return false
+}
+
+type Watcher_moby27782 struct {
+ Events chan Event_moby27782
+ mu sync.Mutex // L1
+ cv *sync.Cond // C1
+ done chan struct{}
+}
+
+func NewWatcher_moby27782() *Watcher_moby27782 {
+ w := &Watcher_moby27782{
+ Events: make(chan Event_moby27782),
+ done: make(chan struct{}),
+ }
+ w.cv = sync.NewCond(&w.mu)
+ go w.readEvents() // G3
+ return w
+}
+
+func (w *Watcher_moby27782) readEvents() {
+ defer close(w.Events)
+ for {
+ if w.isClosed() {
+ return
+ }
+ event := newEvent(Write_moby27782) // MODIFY event
+ if !event.ignoreLinux(w) {
+ runtime.Gosched()
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ return
+ }
+ }
+ }
+}
+
+func (w *Watcher_moby27782) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+func (w *Watcher_moby27782) Close() {
+ if w.isClosed() {
+ return
+ }
+ close(w.done)
+}
+
+func (w *Watcher_moby27782) Remove() {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ exists := true
+ for exists {
+ w.cv.Wait()
+ runtime.Gosched()
+ }
+}
+
+type FileWatcher_moby27782 interface {
+ Events() <-chan Event_moby27782
+ Remove()
+ Close()
+}
+
+func New_moby27782() FileWatcher_moby27782 {
+ return NewEventWatcher_moby27782()
+}
+
+func NewEventWatcher_moby27782() FileWatcher_moby27782 {
+ return &fsNotifyWatcher_moby27782{NewWatcher_moby27782()}
+}
+
+type fsNotifyWatcher_moby27782 struct {
+ *Watcher_moby27782
+}
+
+func (w *fsNotifyWatcher_moby27782) Events() <-chan Event_moby27782 {
+ return w.Watcher_moby27782.Events
+}
+
+func watchFile_moby27782() FileWatcher_moby27782 {
+ fileWatcher := New_moby27782()
+ return fileWatcher
+}
+
+type LogWatcher_moby27782 struct {
+ closeOnce sync.Once
+ closeNotifier chan struct{}
+}
+
+func (w *LogWatcher_moby27782) Close() {
+ w.closeOnce.Do(func() {
+ close(w.closeNotifier)
+ })
+}
+
+func (w *LogWatcher_moby27782) WatchClose() <-chan struct{} {
+ return w.closeNotifier
+}
+
+func NewLogWatcher_moby27782() *LogWatcher_moby27782 {
+ return &LogWatcher_moby27782{
+ closeNotifier: make(chan struct{}),
+ }
+}
+
+func followLogs_moby27782(logWatcher *LogWatcher_moby27782) {
+ fileWatcher := watchFile_moby27782()
+ defer func() {
+ fileWatcher.Close()
+ }()
+ waitRead := func() {
+ runtime.Gosched()
+ select {
+ case <-fileWatcher.Events():
+ case <-logWatcher.WatchClose():
+ fileWatcher.Remove()
+ return
+ }
+ }
+ handleDecodeErr := func() {
+ waitRead()
+ }
+ handleDecodeErr()
+}
+
+type Container_moby27782 struct {
+ LogDriver *JSONFileLogger_moby27782
+}
+
+func (container *Container_moby27782) InitializeStdio() {
+ if err := container.startLogging(); err != nil {
+ container.Reset()
+ }
+}
+
+func (container *Container_moby27782) startLogging() error {
+ l := &JSONFileLogger_moby27782{
+ readers: make(map[*LogWatcher_moby27782]struct{}),
+ }
+ container.LogDriver = l
+ l.ReadLogs()
+ return errors.New("Some error")
+}
+
+func (container *Container_moby27782) Reset() {
+ if container.LogDriver != nil {
+ container.LogDriver.Close()
+ }
+}
+
+type JSONFileLogger_moby27782 struct {
+ readers map[*LogWatcher_moby27782]struct{}
+}
+
+func (l *JSONFileLogger_moby27782) ReadLogs() *LogWatcher_moby27782 {
+ logWatcher := NewLogWatcher_moby27782()
+ go l.readLogs(logWatcher) // G2
+ return logWatcher
+}
+
+func (l *JSONFileLogger_moby27782) readLogs(logWatcher *LogWatcher_moby27782) {
+ l.readers[logWatcher] = struct{}{}
+ followLogs_moby27782(logWatcher)
+}
+
+func (l *JSONFileLogger_moby27782) Close() {
+ for r := range l.readers {
+ r.Close()
+ delete(l.readers, r)
+ }
+}
+
+func Moby27782() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 10000; i++ {
+ go (&Container_moby27782{}).InitializeStdio() // G1
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go
new file mode 100644
index 0000000000..56467e0b56
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go
@@ -0,0 +1,125 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/28462
+ * Buggy version: b184bdabf7a01c4b802304ac64ac133743c484be
+ * fix commit-id: 89b123473774248fc3a0356dd3ce5b116cc69b29
+ * Flaky: 69/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Moby28462", Moby28462)
+}
+
+type State_moby28462 struct {
+ Health *Health_moby28462
+}
+
+type Container_moby28462 struct {
+ sync.Mutex
+ State *State_moby28462
+}
+
+func (ctr *Container_moby28462) start() {
+ go ctr.waitExit()
+}
+func (ctr *Container_moby28462) waitExit() {
+
+}
+
+type Store_moby28462 struct {
+ ctr *Container_moby28462
+}
+
+func (s *Store_moby28462) Get() *Container_moby28462 {
+ return s.ctr
+}
+
+type Daemon_moby28462 struct {
+ containers Store_moby28462
+}
+
+func (d *Daemon_moby28462) StateChanged() {
+ c := d.containers.Get()
+ c.Lock()
+ d.updateHealthMonitorElseBranch(c)
+ defer c.Unlock()
+}
+
+func (d *Daemon_moby28462) updateHealthMonitorIfBranch(c *Container_moby28462) {
+ h := c.State.Health
+ if stop := h.OpenMonitorChannel(); stop != nil {
+ go monitor_moby28462(c, stop)
+ }
+}
+func (d *Daemon_moby28462) updateHealthMonitorElseBranch(c *Container_moby28462) {
+ h := c.State.Health
+ h.CloseMonitorChannel()
+}
+
+type Health_moby28462 struct {
+ stop chan struct{}
+}
+
+func (s *Health_moby28462) OpenMonitorChannel() chan struct{} {
+ return s.stop
+}
+
+func (s *Health_moby28462) CloseMonitorChannel() {
+ if s.stop != nil {
+ s.stop <- struct{}{}
+ }
+}
+
+func monitor_moby28462(c *Container_moby28462, stop chan struct{}) {
+ for {
+ select {
+ case <-stop:
+ return
+ default:
+ handleProbeResult_moby28462(c)
+ }
+ }
+}
+
+func handleProbeResult_moby28462(c *Container_moby28462) {
+ runtime.Gosched()
+ c.Lock()
+ defer c.Unlock()
+}
+
+func NewDaemonAndContainer_moby28462() (*Daemon_moby28462, *Container_moby28462) {
+ c := &Container_moby28462{
+ State: &State_moby28462{&Health_moby28462{make(chan struct{})}},
+ }
+ d := &Daemon_moby28462{Store_moby28462{c}}
+ return d, c
+}
+
+func Moby28462() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ d, c := NewDaemonAndContainer_moby28462()
+ go monitor_moby28462(c, c.State.Health.OpenMonitorChannel()) // G1
+ go d.StateChanged() // G2
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go
new file mode 100644
index 0000000000..561e2faf57
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go
@@ -0,0 +1,67 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "errors"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Moby30408", Moby30408)
+}
+
+type Manifest_moby30408 struct {
+ Implements []string
+}
+
+type Plugin_moby30408 struct {
+ activateWait *sync.Cond
+ activateErr error
+ Manifest *Manifest_moby30408
+}
+
+func (p *Plugin_moby30408) waitActive() error {
+ p.activateWait.L.Lock()
+ for !p.activated() {
+ p.activateWait.Wait()
+ }
+ p.activateWait.L.Unlock()
+ return p.activateErr
+}
+
+func (p *Plugin_moby30408) activated() bool {
+ return p.Manifest != nil
+}
+
+func testActive_moby30408(p *Plugin_moby30408) {
+ done := make(chan struct{})
+ go func() { // G2
+ p.waitActive()
+ close(done)
+ }()
+ <-done
+}
+
+func Moby30408() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() { // G1
+ p := &Plugin_moby30408{activateWait: sync.NewCond(&sync.Mutex{})}
+ p.activateErr = errors.New("some junk happened")
+
+ testActive_moby30408(p)
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go
new file mode 100644
index 0000000000..4be50546e9
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go
@@ -0,0 +1,71 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/33781
+ * Buggy version: 33fd3817b0f5ca4b87f0a75c2bd583b4425d392b
+ * fix commit-id: 67297ba0051d39be544009ba76abea14bc0be8a4
+ * Flaky: 25/100
+ */
+
+package main
+
+import (
+ "context"
+ "os"
+ "runtime/pprof"
+ "time"
+)
+
+func init() {
+ register("Moby33781", Moby33781)
+}
+
+func monitor_moby33781(stop chan bool) {
+ probeInterval := time.Millisecond
+ probeTimeout := time.Millisecond
+ for {
+ select {
+ case <-stop:
+ return
+ case <-time.After(probeInterval):
+ results := make(chan bool)
+ ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout)
+ go func() { // G3
+ results <- true
+ close(results)
+ }()
+ select {
+ case <-stop:
+ // results should be drained here
+ cancelProbe()
+ return
+ case <-results:
+ cancelProbe()
+ case <-ctx.Done():
+ cancelProbe()
+ <-results
+ }
+ }
+ }
+}
+
+func Moby33781() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ for i := 0; i < 100; i++ {
+ go func(i int) {
+ stop := make(chan bool)
+ go monitor_moby33781(stop) // G1
+ go func() { // G2
+ time.Sleep(time.Duration(i) * time.Millisecond)
+ stop <- true
+ }()
+ }(i)
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go
new file mode 100644
index 0000000000..577c81651a
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go
@@ -0,0 +1,53 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/36114
+ * Buggy version: 6d4d3c52ae7c3f910bfc7552a2a673a8338e5b9f
+ * fix commit-id: a44fcd3d27c06aaa60d8d1cbce169f0d982e74b1
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Moby36114", Moby36114)
+}
+
+type serviceVM_moby36114 struct {
+ sync.Mutex
+}
+
+func (svm *serviceVM_moby36114) hotAddVHDsAtStart() {
+ svm.Lock()
+ defer svm.Unlock()
+ svm.hotRemoveVHDsAtStart()
+}
+
+func (svm *serviceVM_moby36114) hotRemoveVHDsAtStart() {
+ svm.Lock()
+ defer svm.Unlock()
+}
+
+func Moby36114() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ s := &serviceVM_moby36114{}
+ go s.hotAddVHDsAtStart()
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go
new file mode 100644
index 0000000000..7f0497648c
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go
@@ -0,0 +1,101 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/4951
+ * Buggy version: 81f148be566ab2b17810ad4be61a5d8beac8330f
+ * fix commit-id: 2ffef1b7eb618162673c6ffabccb9ca57c7dfce3
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Moby4951", Moby4951)
+}
+
+type DeviceSet_moby4951 struct {
+ sync.Mutex
+ infos map[string]*DevInfo_moby4951
+ nrDeletedDevices int
+}
+
+func (devices *DeviceSet_moby4951) DeleteDevice(hash string) {
+ devices.Lock()
+ defer devices.Unlock()
+
+ info := devices.lookupDevice(hash)
+
+ info.lock.Lock()
+ runtime.Gosched()
+ defer info.lock.Unlock()
+
+ devices.deleteDevice(info)
+}
+
+func (devices *DeviceSet_moby4951) lookupDevice(hash string) *DevInfo_moby4951 {
+ existing, ok := devices.infos[hash]
+ if !ok {
+ return nil
+ }
+ return existing
+}
+
+func (devices *DeviceSet_moby4951) deleteDevice(info *DevInfo_moby4951) {
+ devices.removeDeviceAndWait(info.Name())
+}
+
+func (devices *DeviceSet_moby4951) removeDeviceAndWait(devname string) {
+ /// remove devices by devname
+ devices.Unlock()
+ time.Sleep(300 * time.Nanosecond)
+ devices.Lock()
+}
+
+type DevInfo_moby4951 struct {
+ lock sync.Mutex
+ name string
+}
+
+func (info *DevInfo_moby4951) Name() string {
+ return info.name
+}
+
+func NewDeviceSet_moby4951() *DeviceSet_moby4951 {
+ devices := &DeviceSet_moby4951{
+ infos: make(map[string]*DevInfo_moby4951),
+ }
+ info1 := &DevInfo_moby4951{
+ name: "info1",
+ }
+ info2 := &DevInfo_moby4951{
+ name: "info2",
+ }
+ devices.infos[info1.name] = info1
+ devices.infos[info2.name] = info2
+ return devices
+}
+
+func Moby4951() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() {
+ ds := NewDeviceSet_moby4951()
+ /// Delete devices by the same info
+ go ds.DeleteDevice("info1")
+ go ds.DeleteDevice("info1")
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go
new file mode 100644
index 0000000000..212f65b1f3
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go
@@ -0,0 +1,55 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+/*
+ * Project: moby
+ * Issue or PR : https://github.com/moby/moby/pull/7559
+ * Buggy version: 64579f51fcb439c36377c0068ccc9a007b368b5a
+ * fix commit-id: 6cbb8e070d6c3a66bf48fbe5cbf689557eee23db
+ * Flaky: 100/100
+ */
+package main
+
+import (
+ "net"
+ "os"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Moby7559", Moby7559)
+}
+
+type UDPProxy_moby7559 struct {
+ connTrackLock sync.Mutex
+}
+
+func (proxy *UDPProxy_moby7559) Run() {
+ for i := 0; i < 2; i++ {
+ proxy.connTrackLock.Lock()
+ _, err := net.DialUDP("udp", nil, nil)
+ if err != nil {
+ continue
+ /// Missing unlock here
+ }
+ if i == 0 {
+ break
+ }
+ }
+ proxy.connTrackLock.Unlock()
+}
+
+func Moby7559() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 20; i++ {
+ go (&UDPProxy_moby7559{}).Run()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go b/src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go
new file mode 100644
index 0000000000..49905315a0
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go
@@ -0,0 +1,122 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+func init() {
+ register("Serving2137", Serving2137)
+}
+
+type token_serving2137 struct{}
+
+type request_serving2137 struct {
+ lock *sync.Mutex
+ accepted chan bool
+}
+
+type Breaker_serving2137 struct {
+ pendingRequests chan token_serving2137
+ activeRequests chan token_serving2137
+}
+
+func (b *Breaker_serving2137) Maybe(thunk func()) bool {
+ var t token_serving2137
+ select {
+ default:
+ // Pending request queue is full. Report failure.
+ return false
+ case b.pendingRequests <- t:
+ // Pending request has capacity.
+ // Wait for capacity in the active queue.
+ b.activeRequests <- t
+ // Defer releasing capacity in the active and pending request queue.
+ defer func() {
+ <-b.activeRequests
+ runtime.Gosched()
+ <-b.pendingRequests
+ }()
+ // Do the thing.
+ thunk()
+ // Report success
+ return true
+ }
+}
+
+func (b *Breaker_serving2137) concurrentRequest() request_serving2137 {
+ r := request_serving2137{lock: &sync.Mutex{}, accepted: make(chan bool, 1)}
+ r.lock.Lock()
+ var start sync.WaitGroup
+ start.Add(1)
+ go func() { // G2, G3
+ start.Done()
+ runtime.Gosched()
+ ok := b.Maybe(func() {
+ // Will block on locked mutex.
+ r.lock.Lock()
+ runtime.Gosched()
+ r.lock.Unlock()
+ })
+ r.accepted <- ok
+ }()
+ start.Wait() // Ensure that the go func has had a chance to execute.
+ return r
+}
+
+// Perform n requests against the breaker, returning mutexes for each
+// request which succeeded, and a slice of bools for all requests.
+func (b *Breaker_serving2137) concurrentRequests(n int) []request_serving2137 {
+ requests := make([]request_serving2137, n)
+ for i := range requests {
+ requests[i] = b.concurrentRequest()
+ }
+ return requests
+}
+
+func NewBreaker_serving2137(queueDepth, maxConcurrency int32) *Breaker_serving2137 {
+ return &Breaker_serving2137{
+ pendingRequests: make(chan token_serving2137, queueDepth+maxConcurrency),
+ activeRequests: make(chan token_serving2137, maxConcurrency),
+ }
+}
+
+func unlock_serving2137(req request_serving2137) {
+ req.lock.Unlock()
+ runtime.Gosched()
+ // Verify that function has completed
+ ok := <-req.accepted
+ runtime.Gosched()
+ // Requeue for next usage
+ req.accepted <- ok
+}
+
+func unlockAll_serving2137(requests []request_serving2137) {
+ for _, lc := range requests {
+ unlock_serving2137(lc)
+ }
+}
+
+func Serving2137() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(100 * time.Millisecond)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ for i := 0; i < 1000; i++ {
+ go func() {
+ b := NewBreaker_serving2137(1, 1)
+
+ locks := b.concurrentRequests(2) // G1
+ unlockAll_serving2137(locks)
+ }()
+ }
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go
new file mode 100644
index 0000000000..7967db7cfe
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go
@@ -0,0 +1,87 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Syncthing4829", Syncthing4829)
+}
+
+type Address_syncthing4829 int
+
+type Mapping_syncthing4829 struct {
+ mut sync.RWMutex // L2
+
+ extAddresses map[string]Address_syncthing4829
+}
+
+func (m *Mapping_syncthing4829) clearAddresses() {
+ m.mut.Lock() // L2
+ var removed []Address_syncthing4829
+ for id, addr := range m.extAddresses {
+ removed = append(removed, addr)
+ delete(m.extAddresses, id)
+ }
+ if len(removed) > 0 {
+ m.notify(nil, removed)
+ }
+ m.mut.Unlock() // L2
+}
+
+func (m *Mapping_syncthing4829) notify(added, remove []Address_syncthing4829) {
+ m.mut.RLock() // L2
+ m.mut.RUnlock() // L2
+}
+
+type Service_syncthing4829 struct {
+ mut sync.RWMutex // L1
+
+ mappings []*Mapping_syncthing4829
+}
+
+func (s *Service_syncthing4829) NewMapping() *Mapping_syncthing4829 {
+ mapping := &Mapping_syncthing4829{
+ extAddresses: make(map[string]Address_syncthing4829),
+ }
+ s.mut.Lock() // L1
+ s.mappings = append(s.mappings, mapping)
+ s.mut.Unlock() // L1
+ return mapping
+}
+
+func (s *Service_syncthing4829) RemoveMapping(mapping *Mapping_syncthing4829) {
+ s.mut.Lock() // L1
+ defer s.mut.Unlock() // L1
+ for _, existing := range s.mappings {
+ if existing == mapping {
+ mapping.clearAddresses()
+ }
+ }
+}
+
+func Syncthing4829() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+
+ go func() { // G1
+ natSvc := &Service_syncthing4829{}
+ m := natSvc.NewMapping()
+ m.extAddresses["test"] = 0
+
+ natSvc.RemoveMapping(m)
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go
new file mode 100644
index 0000000000..e25494a688
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go
@@ -0,0 +1,103 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+func init() {
+ register("Syncthing5795", Syncthing5795)
+}
+
+type message_syncthing5795 interface{}
+
+type ClusterConfig_syncthing5795 struct{}
+
+type Model_syncthing5795 interface {
+ ClusterConfig(message_syncthing5795)
+}
+
+type TestModel_syncthing5795 struct {
+ ccFn func()
+}
+
+func (t *TestModel_syncthing5795) ClusterConfig(msg message_syncthing5795) {
+ if t.ccFn != nil {
+ t.ccFn()
+ }
+}
+
+type Connection_syncthing5795 interface {
+ Start()
+ Close()
+}
+
+type rawConnection_syncthing5795 struct {
+ receiver Model_syncthing5795
+
+ inbox chan message_syncthing5795
+ dispatcherLoopStopped chan struct{}
+ closed chan struct{}
+ closeOnce sync.Once
+}
+
+func (c *rawConnection_syncthing5795) Start() {
+ go c.dispatcherLoop() // G2
+}
+
+func (c *rawConnection_syncthing5795) dispatcherLoop() {
+ defer close(c.dispatcherLoopStopped)
+ var msg message_syncthing5795
+ for {
+ select {
+ case msg = <-c.inbox:
+ case <-c.closed:
+ return
+ }
+ switch msg := msg.(type) {
+ case *ClusterConfig_syncthing5795:
+ c.receiver.ClusterConfig(msg)
+ default:
+ return
+ }
+ }
+}
+
+func (c *rawConnection_syncthing5795) Close() {
+ c.closeOnce.Do(func() {
+ close(c.closed)
+ <-c.dispatcherLoopStopped
+ })
+}
+
+func Syncthing5795() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ go func() { // G1
+ m := &TestModel_syncthing5795{}
+ c := &rawConnection_syncthing5795{
+ dispatcherLoopStopped: make(chan struct{}),
+ closed: make(chan struct{}),
+ inbox: make(chan message_syncthing5795),
+ receiver: m,
+ }
+ m.ccFn = c.Close
+
+ c.Start()
+ c.inbox <- &ClusterConfig_syncthing5795{}
+
+ <-c.dispatcherLoopStopped
+ }()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/main.go b/src/runtime/testdata/testgoroutineleakprofile/main.go
new file mode 100644
index 0000000000..5787c1e2b2
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/main.go
@@ -0,0 +1,39 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "os"
+
+// The number of times the main (profiling) goroutine should yield
+// in order to allow the leaking goroutines to get stuck.
+const yieldCount = 10
+
+var cmds = map[string]func(){}
+
+func register(name string, f func()) {
+ if cmds[name] != nil {
+ panic("duplicate registration: " + name)
+ }
+ cmds[name] = f
+}
+
+func registerInit(name string, f func()) {
+ if len(os.Args) >= 2 && os.Args[1] == name {
+ f()
+ }
+}
+
+func main() {
+ if len(os.Args) < 2 {
+ println("usage: " + os.Args[0] + " name-of-test")
+ return
+ }
+ f := cmds[os.Args[1]]
+ if f == nil {
+ println("unknown function: " + os.Args[1])
+ return
+ }
+ f()
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/simple.go b/src/runtime/testdata/testgoroutineleakprofile/simple.go
new file mode 100644
index 0000000000..b8172cd6df
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/simple.go
@@ -0,0 +1,253 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+)
+
+// This is a set of micro-tests with obvious goroutine leaks that
+// ensures goroutine leak detection works.
+//
+// Tests in this file are not flaky iff. run with GOMAXPROCS=1.
+// The main goroutine forcefully yields via `runtime.Gosched()` before
+// running the profiler. This moves them to the back of the run queue,
+// allowing the leaky goroutines to be scheduled beforehand and get stuck.
+
+func init() {
+ register("NilRecv", NilRecv)
+ register("NilSend", NilSend)
+ register("SelectNoCases", SelectNoCases)
+ register("ChanRecv", ChanRecv)
+ register("ChanSend", ChanSend)
+ register("Select", Select)
+ register("WaitGroup", WaitGroup)
+ register("MutexStack", MutexStack)
+ register("MutexHeap", MutexHeap)
+ register("RWMutexRLock", RWMutexRLock)
+ register("RWMutexLock", RWMutexLock)
+ register("Cond", Cond)
+ register("Mixed", Mixed)
+ register("NoLeakGlobal", NoLeakGlobal)
+}
+
+func NilRecv() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ var c chan int
+ <-c
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func NilSend() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ var c chan int
+ c <- 0
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func ChanRecv() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ <-make(chan int)
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func SelectNoCases() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ select {}
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func ChanSend() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ make(chan int) <- 0
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func Select() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ select {
+ case make(chan int) <- 0:
+ case <-make(chan int):
+ }
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func WaitGroup() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ var wg sync.WaitGroup
+ wg.Add(1)
+ wg.Wait()
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func MutexStack() {
+ prof := pprof.Lookup("goroutineleak")
+ for i := 0; i < 1000; i++ {
+ go func() {
+ var mu sync.Mutex
+ mu.Lock()
+ mu.Lock()
+ panic("should not be reached")
+ }()
+ }
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func MutexHeap() {
+ prof := pprof.Lookup("goroutineleak")
+ for i := 0; i < 1000; i++ {
+ go func() {
+ mu := &sync.Mutex{}
+ go func() {
+ mu.Lock()
+ mu.Lock()
+ panic("should not be reached")
+ }()
+ }()
+ }
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func RWMutexRLock() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ mu := &sync.RWMutex{}
+ mu.Lock()
+ mu.RLock()
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func RWMutexLock() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ mu := &sync.RWMutex{}
+ mu.Lock()
+ mu.Lock()
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func Cond() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ cond := sync.NewCond(&sync.Mutex{})
+ cond.L.Lock()
+ cond.Wait()
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+func Mixed() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ ch := make(chan int)
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ ch <- 0
+ wg.Done()
+ panic("should not be reached")
+ }()
+ wg.Wait()
+ <-ch
+ panic("should not be reached")
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
+
+var ch = make(chan int)
+
+// No leak should be reported by this test
+func NoLeakGlobal() {
+ prof := pprof.Lookup("goroutineleak")
+ go func() {
+ <-ch
+ }()
+ // Yield several times to allow the child goroutine to run.
+ for i := 0; i < yieldCount; i++ {
+ runtime.Gosched()
+ }
+ prof.WriteTo(os.Stdout, 2)
+}
diff --git a/src/runtime/testdata/testgoroutineleakprofile/stresstests.go b/src/runtime/testdata/testgoroutineleakprofile/stresstests.go
new file mode 100644
index 0000000000..64b535f51c
--- /dev/null
+++ b/src/runtime/testdata/testgoroutineleakprofile/stresstests.go
@@ -0,0 +1,89 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "io"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+const spawnGCMaxDepth = 5
+
+func init() {
+ register("SpawnGC", SpawnGC)
+ register("DaisyChain", DaisyChain)
+}
+
+func spawnGC(i int) {
+ prof := pprof.Lookup("goroutineleak")
+ if i == 0 {
+ return
+ }
+ wg := &sync.WaitGroup{}
+ wg.Add(i + 1)
+ go func() {
+ wg.Done()
+ <-make(chan int)
+ }()
+ for j := 0; j < i; j++ {
+ go func() {
+ wg.Done()
+ spawnGC(i - 1)
+ }()
+ }
+ wg.Wait()
+ runtime.Gosched()
+ if i == spawnGCMaxDepth {
+ prof.WriteTo(os.Stdout, 2)
+ } else {
+ // We want to concurrently trigger the profile in order to concurrently run
+ // the GC, but we don't want to stream all the profiles to standard output.
+ //
+ // Only output the profile for the root call to spawnGC, and otherwise stream
+ // the profile outputs to /dev/null to avoid jumbling.
+ prof.WriteTo(io.Discard, 2)
+ }
+}
+
+// SpawnGC spawns a tree of goroutine leaks and calls the goroutine leak profiler
+// for each node in the tree. It is supposed to stress the goroutine leak profiler
+// under a heavily concurrent workload.
+func SpawnGC() {
+ spawnGC(spawnGCMaxDepth)
+}
+
+// DaisyChain spawns a daisy-chain of runnable goroutines.
+//
+// Each goroutine in the chain creates a new channel and goroutine.
+//
+// This illustrates a pathological worstcase for the goroutine leak GC complexity,
+// as opposed to the regular GC, which is not negatively affected by this pattern.
+func DaisyChain() {
+ prof := pprof.Lookup("goroutineleak")
+ defer func() {
+ time.Sleep(time.Second)
+ prof.WriteTo(os.Stdout, 2)
+ }()
+ var chain func(i int, ch chan struct{})
+ chain = func(i int, ch chan struct{}) {
+ if i <= 0 {
+ go func() {
+ time.Sleep(time.Hour)
+ ch <- struct{}{}
+ }()
+ return
+ }
+ ch2 := make(chan struct{})
+ go chain(i-1, ch2)
+ <-ch2
+ ch <- struct{}{}
+ }
+ // The channel buffer avoids goroutine leaks.
+ go chain(1000, make(chan struct{}, 1))
+}
diff --git a/src/runtime/testdata/testprog/pipe_unix.go b/src/runtime/testdata/testprog/pipe_unix.go
new file mode 100644
index 0000000000..cee4da65f6
--- /dev/null
+++ b/src/runtime/testdata/testprog/pipe_unix.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+
+package main
+
+import "syscall"
+
+func pipe() (r, w int, err error) {
+ var p [2]int
+ err = syscall.Pipe(p[:])
+ return p[0], p[1], err
+}
diff --git a/src/runtime/testdata/testprog/pipe_windows.go b/src/runtime/testdata/testprog/pipe_windows.go
new file mode 100644
index 0000000000..597601a179
--- /dev/null
+++ b/src/runtime/testdata/testprog/pipe_windows.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "syscall"
+
+func pipe() (r, w syscall.Handle, err error) {
+ var p [2]syscall.Handle
+ err = syscall.Pipe(p[:])
+ return p[0], p[1], err
+}
diff --git a/src/runtime/testdata/testprog/schedmetrics.go b/src/runtime/testdata/testprog/schedmetrics.go
new file mode 100644
index 0000000000..6d3f68a848
--- /dev/null
+++ b/src/runtime/testdata/testprog/schedmetrics.go
@@ -0,0 +1,267 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "internal/testenv"
+ "log"
+ "os"
+ "runtime"
+ "runtime/debug"
+ "runtime/metrics"
+ "strings"
+ "sync/atomic"
+ "syscall"
+ "time"
+)
+
+func init() {
+ register("SchedMetrics", SchedMetrics)
+}
+
+// Tests runtime/metrics.Read for various scheduler metrics.
+//
+// Implemented in testprog to prevent other tests from polluting
+// the metrics.
+func SchedMetrics() {
+ const (
+ notInGo = iota
+ runnable
+ running
+ waiting
+ created
+ threads
+ numSamples
+ )
+ var s [numSamples]metrics.Sample
+ s[notInGo].Name = "/sched/goroutines/not-in-go:goroutines"
+ s[runnable].Name = "/sched/goroutines/runnable:goroutines"
+ s[running].Name = "/sched/goroutines/running:goroutines"
+ s[waiting].Name = "/sched/goroutines/waiting:goroutines"
+ s[created].Name = "/sched/goroutines-created:goroutines"
+ s[threads].Name = "/sched/threads/total:threads"
+
+ var failed bool
+ var out bytes.Buffer
+ logger := log.New(&out, "", 0)
+ indent := 0
+ logf := func(s string, a ...any) {
+ var prefix strings.Builder
+ for range indent {
+ prefix.WriteString("\t")
+ }
+ logger.Printf(prefix.String()+s, a...)
+ }
+ errorf := func(s string, a ...any) {
+ logf(s, a...)
+ failed = true
+ }
+ run := func(name string, f func()) {
+ logf("=== Checking %q", name)
+ indent++
+ f()
+ indent--
+ }
+ logMetrics := func(s []metrics.Sample) {
+ for i := range s {
+ logf("%s: %d", s[i].Name, s[i].Value.Uint64())
+ }
+ }
+
+ // generalSlack is the amount of goroutines we allow ourselves to be
+ // off by in any given category, either due to background system
+ // goroutines. This excludes GC goroutines.
+ generalSlack := uint64(4)
+
+ // waitingSlack is the max number of blocked goroutines controlled
+ // by the runtime that we'll allow for. This includes GC goroutines
+ // as well as finalizer and cleanup goroutines.
+ waitingSlack := generalSlack + uint64(2*runtime.GOMAXPROCS(-1))
+
+ // threadsSlack is the maximum number of threads left over
+ // from the runtime (sysmon, the template thread, etc.)
+ const threadsSlack = 4
+
+ // Make sure GC isn't running, since GC workers interfere with
+ // expected counts.
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
+ runtime.GC()
+
+ check := func(s *metrics.Sample, min, max uint64) {
+ val := s.Value.Uint64()
+ if val < min {
+ errorf("%s too low; %d < %d", s.Name, val, min)
+ }
+ if val > max {
+ errorf("%s too high; %d > %d", s.Name, val, max)
+ }
+ }
+ checkEq := func(s *metrics.Sample, value uint64) {
+ check(s, value, value)
+ }
+ spinUntil := func(f func() bool) bool {
+ for {
+ if f() {
+ return true
+ }
+ time.Sleep(50 * time.Millisecond)
+ }
+ }
+
+ // Check base values.
+ run("base", func() {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
+ metrics.Read(s[:])
+ logMetrics(s[:])
+ check(&s[notInGo], 0, generalSlack)
+ check(&s[runnable], 0, generalSlack)
+ checkEq(&s[running], 1)
+ check(&s[waiting], 0, waitingSlack)
+ })
+
+ metrics.Read(s[:])
+ createdAfterBase := s[created].Value.Uint64()
+
+ // Force Running count to be high. We'll use these goroutines
+ // for Runnable, too.
+ const count = 10
+ var ready, exit atomic.Uint32
+ for range count {
+ go func() {
+ ready.Add(1)
+ for exit.Load() == 0 {
+ // Spin to get us and keep us running, but check
+ // the exit condition so we exit out early if we're
+ // done.
+ start := time.Now()
+ for time.Since(start) < 10*time.Millisecond && exit.Load() == 0 {
+ }
+ runtime.Gosched()
+ }
+ }()
+ }
+ for ready.Load() < count {
+ runtime.Gosched()
+ }
+
+ // Be careful. We've entered a dangerous state for platforms
+ // that do not return back to the underlying system unless all
+ // goroutines are blocked, like js/wasm, since we have a bunch
+ // of runnable goroutines all spinning. We cannot write anything
+ // out.
+ if testenv.HasParallelism() {
+ run("created", func() {
+ metrics.Read(s[:])
+ logMetrics(s[:])
+ checkEq(&s[created], createdAfterBase+count)
+ })
+ run("running", func() {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(count + 4))
+ // It can take a little bit for the scheduler to
+ // distribute the goroutines to Ps, so retry until
+ // we see the count we expect or the test times out.
+ spinUntil(func() bool {
+ metrics.Read(s[:])
+ return s[running].Value.Uint64() >= count
+ })
+ logMetrics(s[:])
+ check(&s[running], count, count+4)
+ check(&s[threads], count, count+4+threadsSlack)
+ })
+
+ // Force runnable count to be high.
+ run("runnable", func() {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
+ metrics.Read(s[:])
+ logMetrics(s[:])
+ checkEq(&s[running], 1)
+ check(&s[runnable], count-1, count+generalSlack)
+ })
+
+ // Done with the running/runnable goroutines.
+ exit.Store(1)
+ } else {
+ // Read metrics and then exit all the other goroutines,
+ // so that system calls may proceed.
+ metrics.Read(s[:])
+
+ // Done with the running/runnable goroutines.
+ exit.Store(1)
+
+ // Now we can check our invariants.
+ run("created", func() {
+ // Look for count-1 goroutines because we read metrics
+ // *before* run goroutine was created for this sub-test.
+ checkEq(&s[created], createdAfterBase+count-1)
+ })
+ run("running", func() {
+ logMetrics(s[:])
+ checkEq(&s[running], 1)
+ checkEq(&s[threads], 1)
+ })
+ run("runnable", func() {
+ logMetrics(s[:])
+ check(&s[runnable], count-1, count+generalSlack)
+ })
+ }
+
+ // Force not-in-go count to be high. This is a little tricky since
+ // we try really hard not to let things block in system calls.
+ // We have to drop to the syscall package to do this reliably.
+ run("not-in-go", func() {
+ // Block a bunch of goroutines on an OS pipe.
+ pr, pw, err := pipe()
+ if err != nil {
+ switch runtime.GOOS {
+ case "js", "wasip1":
+ logf("creating pipe: %v", err)
+ return
+ }
+ panic(fmt.Sprintf("creating pipe: %v", err))
+ }
+ for i := 0; i < count; i++ {
+ go syscall.Read(pr, make([]byte, 1))
+ }
+
+ // Let the goroutines block.
+ spinUntil(func() bool {
+ metrics.Read(s[:])
+ return s[notInGo].Value.Uint64() >= count
+ })
+ logMetrics(s[:])
+ check(&s[notInGo], count, count+generalSlack)
+
+ syscall.Close(pw)
+ syscall.Close(pr)
+ })
+
+ run("waiting", func() {
+ // Force waiting count to be high.
+ const waitingCount = 1000
+ stop := make(chan bool)
+ for i := 0; i < waitingCount; i++ {
+ go func() { <-stop }()
+ }
+
+ // Let the goroutines block.
+ spinUntil(func() bool {
+ metrics.Read(s[:])
+ return s[waiting].Value.Uint64() >= waitingCount
+ })
+ logMetrics(s[:])
+ check(&s[waiting], waitingCount, waitingCount+waitingSlack)
+
+ close(stop)
+ })
+
+ if failed {
+ fmt.Fprintln(os.Stderr, out.String())
+ os.Exit(1)
+ } else {
+ fmt.Fprintln(os.Stderr, "OK")
+ }
+}