diff options
| author | Keith Randall <khr@golang.org> | 2025-02-11 15:02:08 -0800 |
|---|---|---|
| committer | Keith Randall <khr@golang.org> | 2025-04-25 12:21:48 -0700 |
| commit | 3f3782feed6e0726ddb08afd32dad7d94fbb38c6 (patch) | |
| tree | 1fdceec931e4169b405a1909f624b7987b1adc6b /src/runtime | |
| parent | dc1e255104f3fee2589da61b7fa80627beb885f4 (diff) | |
| download | go-3f3782feed6e0726ddb08afd32dad7d94fbb38c6.tar.xz | |
cmd/compile: allow all of the preamble to be preemptible
We currently make some parts of the preamble unpreemptible because
it confuses morestack. See comments in the code.
Instead, have morestack handle those weird cases so we can
remove unpreemptible marks from most places.
This CL makes user functions preemptible everywhere if they have no
write barriers (at least, on x86). In cmd/go the fraction of functions
that need preemptible markings drops from 82% to 36%. Makes the cmd/go
binary 0.3% smaller.
Update #35470
Change-Id: Ic83d5eabfd0f6d239a92e65684bcce7e67ff30bb
Reviewed-on: https://go-review.googlesource.com/c/go/+/648518
Auto-Submit: Keith Randall <khr@google.com>
Reviewed-by: Keith Randall <khr@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Diffstat (limited to 'src/runtime')
| -rw-r--r-- | src/runtime/preempt.go | 1 | ||||
| -rw-r--r-- | src/runtime/runtime2.go | 39 | ||||
| -rw-r--r-- | src/runtime/stack.go | 23 |
3 files changed, 47 insertions, 16 deletions
diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go index c41c355835..364929f635 100644 --- a/src/runtime/preempt.go +++ b/src/runtime/preempt.go @@ -173,6 +173,7 @@ func suspendG(gp *g) suspendGState { // _Gscan bit and thus own the stack. gp.preemptStop = false gp.preempt = false + gp.preemptRecent = true gp.stackguard0 = gp.stack.lo + stackGuard // The goroutine was already at a safe-point diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 05cf345baf..f42c940b8e 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -466,22 +466,29 @@ type g struct { runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking lockedm muintptr fipsIndicator uint8 - sig uint32 - writebuf []byte - sigcode0 uintptr - sigcode1 uintptr - sigpc uintptr - parentGoid uint64 // goid of goroutine that created this goroutine - gopc uintptr // pc of go statement that created this goroutine - ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors) - startpc uintptr // pc of goroutine function - racectx uintptr - waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order - cgoCtxt []uintptr // cgo traceback context - labels unsafe.Pointer // profiler labels - timer *timer // cached timer for time.Sleep - sleepWhen int64 // when to sleep until - selectDone atomic.Uint32 // are we participating in a select and did someone win the race? + + // preemptRecent is set when a goroutine is preempted. It is + // reset by code passing through the synchronous preemption + // path. It is used to avoid growing the stack when we were + // just preempting, see issue 35470. + preemptRecent bool + + sig uint32 + writebuf []byte + sigcode0 uintptr + sigcode1 uintptr + sigpc uintptr + parentGoid uint64 // goid of goroutine that created this goroutine + gopc uintptr // pc of go statement that created this goroutine + ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors) + startpc uintptr // pc of goroutine function + racectx uintptr + waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order + cgoCtxt []uintptr // cgo traceback context + labels unsafe.Pointer // profiler labels + timer *timer // cached timer for time.Sleep + sleepWhen int64 // when to sleep until + selectDone atomic.Uint32 // are we participating in a select and did someone win the race? // goroutineProfiled indicates the status of this goroutine's stack for the // current in-progress goroutine profile diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 2fedaa9421..67a394cd12 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -1075,6 +1075,29 @@ func newstack() { gopreempt_m(gp) // never return } + if stackguard0 == gp.stack.lo+stackGuard && gp.preemptRecent { + // The case happens because of an interaction between synchronous + // and asynchronous preemption. First, we set the cooperative + // preemption signal (g.stackguard0 = stackPreempt), and as a + // result the function fails the stack check and enters its + // morestack path. If it gets suspended at that point, we might + // give up waiting for it and send an async preempt. That async + // preempt gets processed and clears the cooperative preemption + // signal (g.stackguard0 = g.stack.lo+stackGuard) and resumes + // the function. But even though the cooperative preemption + // signal is cleared, we're already on the morestack path and + // can't avoid calling morestack. See issue 35470. + // + // To avoid this problem, if we've been preempted recently, + // clear the "preempted recently" flag and resume the G. + // If we really did need more stack, the morestack check will + // immediately fail and we'll get back here to try again (with + // preemptRecent==false, so we don't take this case the + // second time). + gp.preemptRecent = false + gogo(&gp.sched) // never return + } + // Allocate a bigger segment and move the stack. oldsize := gp.stack.hi - gp.stack.lo newsize := oldsize * 2 |
