aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorCherry Mui <cherryyz@google.com>2025-12-08 17:41:04 -0500
committerCherry Mui <cherryyz@google.com>2025-12-08 17:41:05 -0500
commitc456ab7a308fa42c601aa07c04207e29f1993e93 (patch)
tree01de50eedd7b7e310c9968d466408533092c6fbb /src/runtime
parent1d8711e126ee1917128ddc1439718835f1c83fb8 (diff)
parenta33bbf1988685215cdf300feb47d2e356e459b3e (diff)
downloadgo-c456ab7a308fa42c601aa07c04207e29f1993e93.tar.xz
[dev.simd] all: merge master (a33bbf1) into dev.simd
Merge List: + 2025-12-08 a33bbf1988 weak: fix weak pointer test to correctly iterate over weak pointers after GC + 2025-12-08 a88a96330f cmd/cgo: use doc link for cgo.Handle + 2025-12-08 276cc4d3db cmd/link: fix AIX builds after recent linker changes + 2025-12-08 f2d96272cb runtime/trace: update TestSubscribers to dump traces + 2025-12-08 4837bcc92c internal/trace: skip tests for alloc/free experiment by default + 2025-12-08 b5f6816cea cmd/link: generate DWARF for moduledata + 2025-12-08 44a39c9dac runtime: only run TestNotInGoMetricCallback when building with cgo + 2025-12-08 3a6a034cd6 runtime: disable TestNotInGoMetricCallback on FreeBSD + race + 2025-12-08 4122d3e9ea runtime: use atomic C types with atomic C functions + 2025-12-08 34397865b1 runtime: deflake TestProfBufWakeup + 2025-12-08 d4972f6295 runtime: mark getfp as nosplit + 2025-12-05 0d0d5c9a82 test/codegen: test negation with add/sub on riscv64 + 2025-12-05 2e509e61ef cmd/go: convert some more tests to script tests + 2025-12-05 c270e71835 cmd/go/internal/vet: skip -fix on pkgs from vendor or non-main mod + 2025-12-05 745349712e runtime: don't count nGsyscallNoP for extra Ms in C + 2025-12-05 f3d572d96a cmd/go: fix race applying fixes in fix and vet -fix modes + 2025-12-05 76345533f7 runtime: expand Pinner documentation + 2025-12-05 b133524c0f cmd/go/testdata/script: skip vet_cache in short mode + 2025-12-05 96e142ba2b runtime: skip TestArenaCollision if we run out of hints + 2025-12-05 fe4952f116 runtime: relax threadsSlack in TestReadMetricsSched + 2025-12-05 8947f092a8 runtime: skip mayMoreStackMove in goroutine leak tests + 2025-12-05 44cb82449e runtime/race: set missing argument frame for ppc64x atomic And/Or wrappers + 2025-12-05 435e61c801 runtime: reject any goroutine leak test failure that failed to execute + 2025-12-05 54e5540014 runtime: print output in case of segfault in goroutine leak tests + 2025-12-05 9616c33295 runtime: don't specify GOEXPERIMENT=greenteagc in goroutine leak tests + 2025-12-05 2244bd7eeb crypto/subtle: add speculation barrier after DIT + 2025-12-05 f84f8d86be cmd/compile: fix mis-infer bounds in slice len/cap calculations + 2025-12-05 a70addd3b3 all: fix some comment issues + 2025-12-05 93b49f773d internal/runtime/maps: clarify probeSeq doc comment + 2025-12-04 91267f0a70 all: update vendored x/tools + 2025-12-04 a753a9ed54 cmd/internal/fuzztest: move fuzz tests out of cmd/go test suite + 2025-12-04 1681c3b67f crypto: use rand.IsDefaultReader instead of comparing to boring.RandReader + 2025-12-04 7b67b68a0d cmd/compile: use isUnsignedPowerOfTwo rather than isPowerOfTwo for unsigneds + 2025-12-03 2b62144069 all: REVERSE MERGE dev.simd (9ac524a) into master Change-Id: Ia0cdf06cdde89b6a4db30ed15ed8e0bcbac6ae30
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/crash_test.go9
-rw-r--r--src/runtime/export_test.go7
-rw-r--r--src/runtime/goroutineleakprofile_test.go32
-rw-r--r--src/runtime/malloc_test.go18
-rw-r--r--src/runtime/metrics_cgo_test.go32
-rw-r--r--src/runtime/mgc.go2
-rw-r--r--src/runtime/os_wasm.go2
-rw-r--r--src/runtime/pinner.go22
-rw-r--r--src/runtime/proc.go54
-rw-r--r--src/runtime/profbuf_test.go16
-rw-r--r--src/runtime/race_ppc64le.s4
-rw-r--r--src/runtime/runtime2.go2
-rw-r--r--src/runtime/stubs_386.go2
-rw-r--r--src/runtime/stubs_arm.go2
-rw-r--r--src/runtime/stubs_loong64.go2
-rw-r--r--src/runtime/stubs_mips64x.go2
-rw-r--r--src/runtime/stubs_mipsx.go2
-rw-r--r--src/runtime/stubs_ppc64x.go2
-rw-r--r--src/runtime/stubs_riscv64.go2
-rw-r--r--src/runtime/stubs_s390x.go2
-rw-r--r--src/runtime/testdata/testprog/schedmetrics.go6
-rw-r--r--src/runtime/testdata/testprogcgo/notingo.go107
-rw-r--r--src/runtime/trace/subscribe_test.go30
23 files changed, 307 insertions, 52 deletions
diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go
index 00e67aeca0..91f9740616 100644
--- a/src/runtime/crash_test.go
+++ b/src/runtime/crash_test.go
@@ -97,6 +97,13 @@ func runTestProg(t *testing.T, binary, name string, env ...string) string {
func runBuiltTestProg(t *testing.T, exe, name string, env ...string) string {
t.Helper()
+ out, _ := runBuiltTestProgErr(t, exe, name, env...)
+ return out
+}
+
+func runBuiltTestProgErr(t *testing.T, exe, name string, env ...string) (string, error) {
+ t.Helper()
+
if *flagQuick {
t.Skip("-quick")
}
@@ -120,7 +127,7 @@ func runBuiltTestProg(t *testing.T, exe, name string, env ...string) string {
t.Fatalf("%v failed to start: %v", cmd, err)
}
}
- return string(out)
+ return string(out), err
}
var serializeBuild = make(chan bool, 2)
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 26341c4300..4f6ef9a3f2 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -551,8 +551,11 @@ func MapNextArenaHint() (start, end uintptr, ok bool) {
return
}
-func GetNextArenaHint() uintptr {
- return mheap_.arenaHints.addr
+func NextArenaHint() (uintptr, bool) {
+ if mheap_.arenaHints == nil {
+ return 0, false
+ }
+ return mheap_.arenaHints.addr, true
}
type G = g
diff --git a/src/runtime/goroutineleakprofile_test.go b/src/runtime/goroutineleakprofile_test.go
index f5d2dd6372..9ab92d17c4 100644
--- a/src/runtime/goroutineleakprofile_test.go
+++ b/src/runtime/goroutineleakprofile_test.go
@@ -14,10 +14,13 @@ import (
)
func TestGoroutineLeakProfile(t *testing.T) {
- if strings.Contains(os.Getenv("GOFLAGS"), "mayMoreStackPreempt") {
- // Some tests have false negatives under mayMoreStackPreempt. This may be a test-only issue,
- // but needs more investigation.
- testenv.SkipFlaky(t, 75729)
+ // Some tests have false negatives under mayMoreStackPreempt and mayMoreStackMove.
+ // This may be a test-only issue in that they're just sensitive to scheduling, but it
+ // needs more investigation.
+ for _, cfg := range []string{"mayMoreStackPreempt", "mayMoreStackMove"} {
+ if strings.Contains(os.Getenv("GOFLAGS"), cfg) {
+ testenv.SkipFlaky(t, 75729)
+ }
}
// Goroutine leak test case.
@@ -486,10 +489,7 @@ func TestGoroutineLeakProfile(t *testing.T) {
testCases := append(microTests, stressTestCases...)
testCases = append(testCases, patternTestCases...)
- // Test cases must not panic or cause fatal exceptions.
- failStates := regexp.MustCompile(`fatal|panic|DATA RACE`)
-
- testApp := func(exepath string, testCases []testCase) {
+ runTests := func(exepath string, testCases []testCase) {
// Build the test program once.
exe, err := buildTestProg(t, exepath)
@@ -503,7 +503,7 @@ func TestGoroutineLeakProfile(t *testing.T) {
cmdEnv := []string{
"GODEBUG=asyncpreemptoff=1",
- "GOEXPERIMENT=greenteagc,goroutineleakprofile",
+ "GOEXPERIMENT=goroutineleakprofile",
}
if tcase.simple {
@@ -515,14 +515,14 @@ func TestGoroutineLeakProfile(t *testing.T) {
var output string
for i := 0; i < tcase.repetitions; i++ {
// Run program for one repetition and get runOutput trace.
- runOutput := runBuiltTestProg(t, exe, tcase.name, cmdEnv...)
+ runOutput, err := runBuiltTestProgErr(t, exe, tcase.name, cmdEnv...)
if len(runOutput) == 0 {
t.Errorf("Test %s produced no output. Is the goroutine leak profile collected?", tcase.name)
}
-
- // Zero tolerance policy for fatal exceptions, panics, or data races.
- if failStates.MatchString(runOutput) {
- t.Errorf("unexpected fatal exception or panic\noutput:\n%s\n\n", runOutput)
+ // Test cases must not end in a non-zero exit code, or otherwise experience a failure to
+ // actually execute.
+ if err != nil {
+ t.Errorf("unexpected failure\noutput:\n%s\n\n", runOutput)
}
output += runOutput + "\n\n"
@@ -598,6 +598,6 @@ func TestGoroutineLeakProfile(t *testing.T) {
}
}
- testApp("testgoroutineleakprofile", testCases)
- testApp("testgoroutineleakprofile/goker", gokerTestCases)
+ runTests("testgoroutineleakprofile", testCases)
+ runTests("testgoroutineleakprofile/goker", gokerTestCases)
}
diff --git a/src/runtime/malloc_test.go b/src/runtime/malloc_test.go
index 97cf0eed54..b76b0a02ac 100644
--- a/src/runtime/malloc_test.go
+++ b/src/runtime/malloc_test.go
@@ -664,10 +664,24 @@ func TestArenaCollision(t *testing.T) {
}
t.Logf("reserved [%#x, %#x)", start, end)
disallowed = append(disallowed, [2]uintptr{start, end})
+
+ hint, ok := NextArenaHint()
+ if !ok {
+ // We're out of arena hints. There's not much we can do now except give up.
+ // This might happen for a number of reasons, like if there's just something
+ // else already mapped in the address space where we put our hints. This is
+ // a bit more common than it used to be thanks to heap base randomization.
+ t.Skip("ran out of arena hints")
+ }
+
// Allocate until the runtime tries to use the hint we
// just mapped over.
- hint := GetNextArenaHint()
- for GetNextArenaHint() == hint {
+ for {
+ if next, ok := NextArenaHint(); !ok {
+ t.Skip("ran out of arena hints")
+ } else if next != hint {
+ break
+ }
ac := new(acLink)
arenaCollisionSink = append(arenaCollisionSink, ac)
// The allocation must not have fallen into
diff --git a/src/runtime/metrics_cgo_test.go b/src/runtime/metrics_cgo_test.go
new file mode 100644
index 0000000000..6cc9d23195
--- /dev/null
+++ b/src/runtime/metrics_cgo_test.go
@@ -0,0 +1,32 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cgo
+
+package runtime_test
+
+import (
+ "internal/race"
+ "runtime"
+ "testing"
+)
+
+func TestNotInGoMetricCallback(t *testing.T) {
+ switch runtime.GOOS {
+ case "windows", "plan9":
+ t.Skip("unsupported on Windows and Plan9")
+ case "freebsd":
+ if race.Enabled {
+ t.Skipf("race + cgo freebsd not supported. See https://go.dev/issue/73788.")
+ }
+ }
+
+ // This test is run in a subprocess to prevent other tests from polluting the metrics
+ // and because we need to make some cgo callbacks.
+ output := runTestProg(t, "testprogcgo", "NotInGoMetricCallback")
+ want := "OK\n"
+ if output != want {
+ t.Fatalf("output:\n%s\n\nwanted:\n%s", output, want)
+ }
+}
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index 32cd8cb0e8..a3bed4b3eb 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -5,7 +5,7 @@
// Garbage collector (GC).
//
// The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple
-// GC thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
+// GC threads to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
// non-generational and non-compacting. Allocation is done using size segregated per P allocation
// areas to minimize fragmentation while eliminating locks in the common case.
//
diff --git a/src/runtime/os_wasm.go b/src/runtime/os_wasm.go
index 15137cc13f..e7a9a13d29 100644
--- a/src/runtime/os_wasm.go
+++ b/src/runtime/os_wasm.go
@@ -142,6 +142,8 @@ func preemptM(mp *m) {
// getfp returns the frame pointer register of its caller or 0 if not implemented.
// TODO: Make this a compiler intrinsic
+//
+//go:nosplit
func getfp() uintptr { return 0 }
func setProcessCPUProfiler(hz int32) {}
diff --git a/src/runtime/pinner.go b/src/runtime/pinner.go
index dad14a4d09..fa0a8f5c53 100644
--- a/src/runtime/pinner.go
+++ b/src/runtime/pinner.go
@@ -12,7 +12,25 @@ import (
// A Pinner is a set of Go objects each pinned to a fixed location in memory. The
// [Pinner.Pin] method pins one object, while [Pinner.Unpin] unpins all pinned
-// objects. See their comments for more information.
+// objects.
+//
+// The purpose of a Pinner is two-fold.
+// First, it allows C code to safely use Go pointers that have not been passed
+// explicitly to the C code via a cgo call.
+// For example, for safely interacting with a pointer stored inside of a struct
+// whose pointer is passed to a C function.
+// Second, it allows C memory to safely retain that Go pointer even after the
+// cgo call returns, provided the object remains pinned.
+//
+// A Pinner arranges for its objects to be automatically unpinned some time after
+// it becomes unreachable, so its referents will not leak. However, this means the
+// Pinner itself must be kept alive across a cgo call, or as long as C retains a
+// reference to the pinned Go pointers.
+//
+// Reusing a Pinner is safe, and in fact encouraged, to avoid the cost of
+// initializing new Pinners on first use.
+//
+// The zero value of Pinner is ready to use.
type Pinner struct {
*pinner
}
@@ -26,6 +44,7 @@ type Pinner struct {
// are going to be accessed from C code.
//
// The argument must be a pointer of any type or an [unsafe.Pointer].
+//
// It's safe to call Pin on non-Go pointers, in which case Pin will do nothing.
func (p *Pinner) Pin(pointer any) {
if p.pinner == nil {
@@ -63,6 +82,7 @@ func (p *Pinner) Pin(pointer any) {
}
// Unpin unpins all pinned objects of the [Pinner].
+// It's safe and encouraged to reuse a Pinner after calling Unpin.
func (p *Pinner) Unpin() {
p.pinner.unpin()
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 16538098cf..52def488ff 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -2433,7 +2433,7 @@ func needm(signal bool) {
sp := sys.GetCallerSP()
callbackUpdateSystemStack(mp, sp, signal)
- // Should mark we are already in Go now.
+ // We must mark that we are already in Go now.
// Otherwise, we may call needm again when we get a signal, before cgocallbackg1,
// which means the extram list may be empty, that will cause a deadlock.
mp.isExtraInC = false
@@ -2455,7 +2455,8 @@ func needm(signal bool) {
// mp.curg is now a real goroutine.
casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
sched.ngsys.Add(-1)
- sched.nGsyscallNoP.Add(1)
+ // N.B. We do not update nGsyscallNoP, because isExtraInC threads are not
+ // counted as real goroutines while they're in C.
if !signal {
if trace.ok() {
@@ -2590,7 +2591,7 @@ func dropm() {
casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
mp.curg.preemptStop = false
sched.ngsys.Add(1)
- sched.nGsyscallNoP.Add(-1)
+ decGSyscallNoP(mp)
if !mp.isExtraInSig {
if trace.ok() {
@@ -4732,7 +4733,7 @@ func entersyscallHandleGCWait(trace traceLocker) {
if trace.ok() {
trace.ProcStop(pp)
}
- sched.nGsyscallNoP.Add(1)
+ addGSyscallNoP(gp.m) // We gave up our P voluntarily.
pp.gcStopTime = nanotime()
pp.syscalltick++
if sched.stopwait--; sched.stopwait == 0 {
@@ -4763,7 +4764,7 @@ func entersyscallblock() {
gp.m.syscalltick = gp.m.p.ptr().syscalltick
gp.m.p.ptr().syscalltick++
- sched.nGsyscallNoP.Add(1)
+ addGSyscallNoP(gp.m) // We're going to give up our P.
// Leave SP around for GC and traceback.
pc := sys.GetCallerPC()
@@ -5001,8 +5002,8 @@ func exitsyscallTryGetP(oldp *p) *p {
if oldp != nil {
if thread, ok := setBlockOnExitSyscall(oldp); ok {
thread.takeP()
+ addGSyscallNoP(thread.mp) // takeP does the opposite, but this is a net zero change.
thread.resume()
- sched.nGsyscallNoP.Add(-1) // takeP adds 1.
return oldp
}
}
@@ -5017,7 +5018,7 @@ func exitsyscallTryGetP(oldp *p) *p {
}
unlock(&sched.lock)
if pp != nil {
- sched.nGsyscallNoP.Add(-1)
+ decGSyscallNoP(getg().m) // We got a P for ourselves.
return pp
}
}
@@ -5043,7 +5044,7 @@ func exitsyscallNoP(gp *g) {
trace.GoSysExit(true)
traceRelease(trace)
}
- sched.nGsyscallNoP.Add(-1)
+ decGSyscallNoP(getg().m)
dropg()
lock(&sched.lock)
var pp *p
@@ -5081,6 +5082,41 @@ func exitsyscallNoP(gp *g) {
schedule() // Never returns.
}
+// addGSyscallNoP must be called when a goroutine in a syscall loses its P.
+// This function updates all relevant accounting.
+//
+// nosplit because it's called on the syscall paths.
+//
+//go:nosplit
+func addGSyscallNoP(mp *m) {
+ // It's safe to read isExtraInC here because it's only mutated
+ // outside of _Gsyscall, and we know this thread is attached
+ // to a goroutine in _Gsyscall and blocked from exiting.
+ if !mp.isExtraInC {
+ // Increment nGsyscallNoP since we're taking away a P
+ // from a _Gsyscall goroutine, but only if isExtraInC
+ // is not set on the M. If it is, then this thread is
+ // back to being a full C thread, and will just inflate
+ // the count of not-in-go goroutines. See go.dev/issue/76435.
+ sched.nGsyscallNoP.Add(1)
+ }
+}
+
+// decGSsyscallNoP must be called whenever a goroutine in a syscall without
+// a P exits the system call. This function updates all relevant accounting.
+//
+// nosplit because it's called from dropm.
+//
+//go:nosplit
+func decGSyscallNoP(mp *m) {
+ // Update nGsyscallNoP, but only if this is not a thread coming
+ // out of C. See the comment in addGSyscallNoP. This logic must match,
+ // to avoid unmatched increments and decrements.
+ if !mp.isExtraInC {
+ sched.nGsyscallNoP.Add(-1)
+ }
+}
+
// Called from syscall package before fork.
//
// syscall_runtime_BeforeFork is for package syscall,
@@ -6758,7 +6794,7 @@ func (s syscallingThread) releaseP(state uint32) {
trace.ProcSteal(s.pp)
traceRelease(trace)
}
- sched.nGsyscallNoP.Add(1)
+ addGSyscallNoP(s.mp)
s.pp.syscalltick++
}
diff --git a/src/runtime/profbuf_test.go b/src/runtime/profbuf_test.go
index 2f068ac386..470c23dd41 100644
--- a/src/runtime/profbuf_test.go
+++ b/src/runtime/profbuf_test.go
@@ -232,11 +232,14 @@ func TestProfBufWakeup(t *testing.T) {
// The reader shouldn't wake up for this
b.Write(nil, 1, []uint64{1, 2}, []uintptr{3, 4})
- // The reader should still be blocked
- //
- // TODO(nick): this is racy. We could Gosched and still have the reader
- // blocked in a buggy implementation because it just didn't get a chance
- // to run
+ // The reader should still be blocked. The awaitBlockedGoroutine here
+ // checks that and also gives a buggy implementation a chance to
+ // actually wake up (it calls Gosched) before the next write. There is a
+ // small chance that a buggy implementation would have woken up but
+ // doesn't get scheduled by the time we do the next write. In that case
+ // the reader will see a more-than-half-full buffer and the test will
+ // pass. But if the implementation is broken, this test should fail
+ // regularly, even if not 100% of the time.
awaitBlockedGoroutine(waitStatus, "TestProfBufWakeup.func1")
b.Write(nil, 1, []uint64{5, 6}, []uintptr{7, 8})
b.Close()
@@ -247,7 +250,8 @@ func TestProfBufWakeup(t *testing.T) {
// see also runtime/pprof tests
func awaitBlockedGoroutine(state, fName string) {
- re := fmt.Sprintf(`(?m)^goroutine \d+ \[%s\]:\n(?:.+\n\t.+\n)*runtime_test\.%s`, regexp.QuoteMeta(state), fName)
+ // NB: this matches [state] as well as [state, n minutes]
+ re := fmt.Sprintf(`(?m)^goroutine \d+ \[%s.*\]:\n(?:.+\n\t.+\n)*runtime_test\.%s`, regexp.QuoteMeta(state), fName)
r := regexp.MustCompile(re)
buf := make([]byte, 64<<10)
diff --git a/src/runtime/race_ppc64le.s b/src/runtime/race_ppc64le.s
index b327e49a2f..41cd232392 100644
--- a/src/runtime/race_ppc64le.s
+++ b/src/runtime/race_ppc64le.s
@@ -329,11 +329,13 @@ TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
TEXT sync∕atomic·AndInt32(SB), NOSPLIT, $0-20
GO_ARGS
MOVD $__tsan_go_atomic32_fetch_and(SB), R8
+ ADD $32, R1, R6
BR racecallatomic<>(SB)
TEXT sync∕atomic·AndInt64(SB), NOSPLIT, $0-24
GO_ARGS
MOVD $__tsan_go_atomic64_fetch_and(SB), R8
+ ADD $32, R1, R6
BR racecallatomic<>(SB)
TEXT sync∕atomic·AndUint32(SB), NOSPLIT, $0-20
@@ -352,11 +354,13 @@ TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24
TEXT sync∕atomic·OrInt32(SB), NOSPLIT, $0-20
GO_ARGS
MOVD $__tsan_go_atomic32_fetch_or(SB), R8
+ ADD $32, R1, R6
BR racecallatomic<>(SB)
TEXT sync∕atomic·OrInt64(SB), NOSPLIT, $0-24
GO_ARGS
MOVD $__tsan_go_atomic64_fetch_or(SB), R8
+ ADD $32, R1, R6
BR racecallatomic<>(SB)
TEXT sync∕atomic·OrUint32(SB), NOSPLIT, $0-20
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index cd75e2dd7c..fde378ff25 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -945,7 +945,7 @@ type schedt struct {
nmfreed int64 // cumulative number of freed m's
ngsys atomic.Int32 // number of system goroutines
- nGsyscallNoP atomic.Int32 // number of goroutines in syscalls without a P
+ nGsyscallNoP atomic.Int32 // number of goroutines in syscalls without a P but whose M is not isExtraInC
pidle puintptr // idle p's
npidle atomic.Int32
diff --git a/src/runtime/stubs_386.go b/src/runtime/stubs_386.go
index a1dd023974..7db27cce87 100644
--- a/src/runtime/stubs_386.go
+++ b/src/runtime/stubs_386.go
@@ -21,4 +21,6 @@ func asmcgocall_no_g(fn, arg unsafe.Pointer)
// getfp returns the frame pointer register of its caller or 0 if not implemented.
// TODO: Make this a compiler intrinsic
+//
+//go:nosplit
func getfp() uintptr { return 0 }
diff --git a/src/runtime/stubs_arm.go b/src/runtime/stubs_arm.go
index e19f1a87b2..49bfd9ef04 100644
--- a/src/runtime/stubs_arm.go
+++ b/src/runtime/stubs_arm.go
@@ -26,4 +26,6 @@ func asmcgocall_no_g(fn, arg unsafe.Pointer)
// getfp returns the frame pointer register of its caller or 0 if not implemented.
// TODO: Make this a compiler intrinsic
+//
+//go:nosplit
func getfp() uintptr { return 0 }
diff --git a/src/runtime/stubs_loong64.go b/src/runtime/stubs_loong64.go
index 4576089b0b..88d5985db0 100644
--- a/src/runtime/stubs_loong64.go
+++ b/src/runtime/stubs_loong64.go
@@ -19,4 +19,6 @@ func unspillArgs()
// getfp returns the frame pointer register of its caller or 0 if not implemented.
// TODO: Make this a compiler intrinsic
+//
+//go:nosplit
func getfp() uintptr { return 0 }
diff --git a/src/runtime/stubs_mips64x.go b/src/runtime/stubs_mips64x.go
index f0cf088620..fb5220b0de 100644
--- a/src/runtime/stubs_mips64x.go
+++ b/src/runtime/stubs_mips64x.go
@@ -17,4 +17,6 @@ func asmcgocall_no_g(fn, arg unsafe.Pointer)
// getfp returns the frame pointer register of its caller or 0 if not implemented.
// TODO: Make this a compiler intrinsic
+//
+//go:nosplit
func getfp() uintptr { return 0 }
diff --git a/src/runtime/stubs_mipsx.go b/src/runtime/stubs_mipsx.go
index 84ba147b85..175d4f9741 100644
--- a/src/runtime/stubs_mipsx.go
+++ b/src/runtime/stubs_mipsx.go
@@ -12,4 +12,6 @@ func save_g()
// getfp returns the frame pointer register of its caller or 0 if not implemented.
// TODO: Make this a compiler intrinsic
+//
+//go:nosplit
func getfp() uintptr { return 0 }
diff --git a/src/runtime/stubs_ppc64x.go b/src/runtime/stubs_ppc64x.go
index 36b01a72b1..dbc82c8453 100644
--- a/src/runtime/stubs_ppc64x.go
+++ b/src/runtime/stubs_ppc64x.go
@@ -23,4 +23,6 @@ func unspillArgs()
// getfp returns the frame pointer register of its caller or 0 if not implemented.
// TODO: Make this a compiler intrinsic
+//
+//go:nosplit
func getfp() uintptr { return 0 }
diff --git a/src/runtime/stubs_riscv64.go b/src/runtime/stubs_riscv64.go
index 61a6e33bd4..2306ba878b 100644
--- a/src/runtime/stubs_riscv64.go
+++ b/src/runtime/stubs_riscv64.go
@@ -22,4 +22,6 @@ func unspillArgs()
// getfp returns the frame pointer register of its caller or 0 if not implemented.
// TODO: Make this a compiler intrinsic
+//
+//go:nosplit
func getfp() uintptr { return 0 }
diff --git a/src/runtime/stubs_s390x.go b/src/runtime/stubs_s390x.go
index 6d704e8200..144e3cdf91 100644
--- a/src/runtime/stubs_s390x.go
+++ b/src/runtime/stubs_s390x.go
@@ -17,4 +17,6 @@ func unspillArgs()
// getfp returns the frame pointer register of its caller or 0 if not implemented.
// TODO: Make this a compiler intrinsic
+//
+//go:nosplit
func getfp() uintptr { return 0 }
diff --git a/src/runtime/testdata/testprog/schedmetrics.go b/src/runtime/testdata/testprog/schedmetrics.go
index 8e8abc4484..7fad95a976 100644
--- a/src/runtime/testdata/testprog/schedmetrics.go
+++ b/src/runtime/testdata/testprog/schedmetrics.go
@@ -91,8 +91,10 @@ func SchedMetrics() {
// threads through frequent scheduling, like mayMoreStackPreempt.
// A slack of 5 is arbitrary but appears to be enough to cover
// the leftovers plus any inflation from scheduling-heavy build
- // modes.
- const threadsSlack = 5
+ // modes. We then also add initialGMP to this slack, since we're
+ // about to call runtime.GC, and in the worst case this will
+ // spin up GOMAXPROCS new threads to run those workers.
+ threadsSlack := 5 + uint64(initialGMP)
// Make sure GC isn't running, since GC workers interfere with
// expected counts.
diff --git a/src/runtime/testdata/testprogcgo/notingo.go b/src/runtime/testdata/testprogcgo/notingo.go
new file mode 100644
index 0000000000..5af4c00e1f
--- /dev/null
+++ b/src/runtime/testdata/testprogcgo/notingo.go
@@ -0,0 +1,107 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !plan9 && !windows
+
+package main
+
+/*
+#include <stdatomic.h>
+#include <stddef.h>
+#include <pthread.h>
+
+extern void Ready();
+
+static _Atomic int spinning;
+static _Atomic int released;
+
+static void* enterGoThenSpinTwice(void* arg __attribute__ ((unused))) {
+ Ready();
+ atomic_fetch_add(&spinning, 1);
+ while(atomic_load(&released) == 0) {};
+
+ Ready();
+ atomic_fetch_add(&spinning, 1);
+ while(1) {};
+ return NULL;
+}
+
+static void SpinTwiceInNewCThread() {
+ pthread_t tid;
+ pthread_create(&tid, NULL, enterGoThenSpinTwice, NULL);
+}
+
+static int Spinning() {
+ return atomic_load(&spinning);
+}
+
+static void Release() {
+ atomic_store(&spinning, 0);
+ atomic_store(&released, 1);
+}
+*/
+import "C"
+
+import (
+ "os"
+ "runtime"
+ "runtime/metrics"
+)
+
+func init() {
+ register("NotInGoMetricCallback", NotInGoMetricCallback)
+}
+
+func NotInGoMetricCallback() {
+ const N = 10
+ s := []metrics.Sample{{Name: "/sched/goroutines/not-in-go:goroutines"}}
+
+ // Create N new C threads that have called into Go at least once.
+ for range N {
+ C.SpinTwiceInNewCThread()
+ }
+
+ // Synchronize with spinning threads twice.
+ //
+ // This helps catch bad accounting by taking at least a couple other
+ // codepaths which would cause the accounting to change.
+ for i := range 2 {
+ // Make sure they pass through Go.
+ // N.B. Ready is called twice by the new threads.
+ for j := range N {
+ <-readyCh
+ if j == 2 {
+ // Try to trigger an update in the immediate STW handoff case.
+ runtime.ReadMemStats(&m)
+ }
+ }
+
+ // Make sure they're back in C.
+ for C.Spinning() < N {
+ }
+
+ // Do something that stops the world to take all the Ps back.
+ runtime.ReadMemStats(&m)
+
+ if i == 0 {
+ C.Release()
+ }
+ }
+
+ // Read not-in-go.
+ metrics.Read(s)
+ if n := s[0].Value.Uint64(); n != 0 {
+ println("expected 0 not-in-go goroutines, found", n)
+ os.Exit(2)
+ }
+ println("OK")
+}
+
+var m runtime.MemStats
+var readyCh = make(chan bool)
+
+//export Ready
+func Ready() {
+ readyCh <- true
+}
diff --git a/src/runtime/trace/subscribe_test.go b/src/runtime/trace/subscribe_test.go
index 0e6c57cbc6..6378c3401a 100644
--- a/src/runtime/trace/subscribe_test.go
+++ b/src/runtime/trace/subscribe_test.go
@@ -16,11 +16,17 @@ import (
)
func TestSubscribers(t *testing.T) {
- validate := func(t *testing.T, source string, tr io.Reader) {
+ validate := func(t *testing.T, source string, tr *bytes.Buffer) {
+ defer func() {
+ if t.Failed() {
+ testtrace.Dump(t, "trace", tr.Bytes(), *dumpTraces)
+ }
+ }()
+
// Prepare to read the trace snapshot.
r, err := inttrace.NewReader(tr)
if err != nil {
- t.Fatalf("unexpected error creating trace reader for %s: %v", source, err)
+ t.Errorf("unexpected error creating trace reader for %s: %v", source, err)
return
}
@@ -38,26 +44,28 @@ func TestSubscribers(t *testing.T) {
break
}
if err != nil {
- t.Fatalf("unexpected error reading trace for %s: %v", source, err)
+ t.Errorf("unexpected error reading trace for %s: %v", source, err)
}
if err := v.Event(ev); err != nil {
- t.Fatalf("event validation failed: %s", err)
+ t.Errorf("event validation failed: %s", err)
}
if ev.Kind() == inttrace.EventSync {
syncs = append(syncs, evs)
}
evs++
}
- ends := []int{syncs[0], syncs[len(syncs)-1]}
- if wantEnds := []int{0, evs - 1}; !slices.Equal(wantEnds, ends) {
- t.Errorf("expected a sync event at each end of the trace, found sync events at %d instead of %d for %s",
- ends, wantEnds, source)
+ if !t.Failed() {
+ ends := []int{syncs[0], syncs[len(syncs)-1]}
+ if wantEnds := []int{0, evs - 1}; !slices.Equal(wantEnds, ends) {
+ t.Errorf("expected a sync event at each end of the trace, found sync events at %d instead of %d for %s",
+ ends, wantEnds, source)
+ }
}
}
- validateTraces := func(t *testing.T, tReader, frReader io.Reader) {
- validate(t, "tracer", tReader)
- validate(t, "flightRecorder", frReader)
+ validateTraces := func(t *testing.T, trace, frTrace *bytes.Buffer) {
+ validate(t, "tracer", trace)
+ validate(t, "flightRecorder", frTrace)
}
startFlightRecorder := func(t *testing.T) *trace.FlightRecorder {
fr := trace.NewFlightRecorder(trace.FlightRecorderConfig{})