aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/preempt.go1
-rw-r--r--src/runtime/runtime2.go39
-rw-r--r--src/runtime/stack.go23
3 files changed, 47 insertions, 16 deletions
diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go
index c41c355835..364929f635 100644
--- a/src/runtime/preempt.go
+++ b/src/runtime/preempt.go
@@ -173,6 +173,7 @@ func suspendG(gp *g) suspendGState {
// _Gscan bit and thus own the stack.
gp.preemptStop = false
gp.preempt = false
+ gp.preemptRecent = true
gp.stackguard0 = gp.stack.lo + stackGuard
// The goroutine was already at a safe-point
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index 05cf345baf..f42c940b8e 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -466,22 +466,29 @@ type g struct {
runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking
lockedm muintptr
fipsIndicator uint8
- sig uint32
- writebuf []byte
- sigcode0 uintptr
- sigcode1 uintptr
- sigpc uintptr
- parentGoid uint64 // goid of goroutine that created this goroutine
- gopc uintptr // pc of go statement that created this goroutine
- ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors)
- startpc uintptr // pc of goroutine function
- racectx uintptr
- waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
- cgoCtxt []uintptr // cgo traceback context
- labels unsafe.Pointer // profiler labels
- timer *timer // cached timer for time.Sleep
- sleepWhen int64 // when to sleep until
- selectDone atomic.Uint32 // are we participating in a select and did someone win the race?
+
+ // preemptRecent is set when a goroutine is preempted. It is
+ // reset by code passing through the synchronous preemption
+ // path. It is used to avoid growing the stack when we were
+ // just preempting, see issue 35470.
+ preemptRecent bool
+
+ sig uint32
+ writebuf []byte
+ sigcode0 uintptr
+ sigcode1 uintptr
+ sigpc uintptr
+ parentGoid uint64 // goid of goroutine that created this goroutine
+ gopc uintptr // pc of go statement that created this goroutine
+ ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors)
+ startpc uintptr // pc of goroutine function
+ racectx uintptr
+ waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
+ cgoCtxt []uintptr // cgo traceback context
+ labels unsafe.Pointer // profiler labels
+ timer *timer // cached timer for time.Sleep
+ sleepWhen int64 // when to sleep until
+ selectDone atomic.Uint32 // are we participating in a select and did someone win the race?
// goroutineProfiled indicates the status of this goroutine's stack for the
// current in-progress goroutine profile
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 2fedaa9421..67a394cd12 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -1075,6 +1075,29 @@ func newstack() {
gopreempt_m(gp) // never return
}
+ if stackguard0 == gp.stack.lo+stackGuard && gp.preemptRecent {
+ // The case happens because of an interaction between synchronous
+ // and asynchronous preemption. First, we set the cooperative
+ // preemption signal (g.stackguard0 = stackPreempt), and as a
+ // result the function fails the stack check and enters its
+ // morestack path. If it gets suspended at that point, we might
+ // give up waiting for it and send an async preempt. That async
+ // preempt gets processed and clears the cooperative preemption
+ // signal (g.stackguard0 = g.stack.lo+stackGuard) and resumes
+ // the function. But even though the cooperative preemption
+ // signal is cleared, we're already on the morestack path and
+ // can't avoid calling morestack. See issue 35470.
+ //
+ // To avoid this problem, if we've been preempted recently,
+ // clear the "preempted recently" flag and resume the G.
+ // If we really did need more stack, the morestack check will
+ // immediately fail and we'll get back here to try again (with
+ // preemptRecent==false, so we don't take this case the
+ // second time).
+ gp.preemptRecent = false
+ gogo(&gp.sched) // never return
+ }
+
// Allocate a bigger segment and move the stack.
oldsize := gp.stack.hi - gp.stack.lo
newsize := oldsize * 2