diff options
| author | Russ Cox <rsc@golang.org> | 2024-02-14 20:36:47 -0500 |
|---|---|---|
| committer | Russ Cox <rsc@golang.org> | 2024-03-13 21:36:04 +0000 |
| commit | 508bb17edd04479622fad263cd702deac1c49157 (patch) | |
| tree | e30588bcd75e92b7d3ecb59645e37a9356afa25f /src/runtime | |
| parent | 74a0e3160d969fac27a65cd79a76214f6d1abbf5 (diff) | |
| download | go-508bb17edd04479622fad263cd702deac1c49157.tar.xz | |
time: garbage collect unstopped Tickers and Timers
From the beginning of Go, the time package has had a gotcha:
if you use a select on <-time.After(1*time.Minute), even if the select
finishes immediately because some other case is ready, the underlying
timer from time.After keeps running until the minute is over. This
pins the timer in the timer heap, which keeps it from being garbage
collected and in extreme cases also slows down timer operations.
The lack of garbage collection is the more important problem.
The docs for After warn against this scenario and suggest using
NewTimer with a call to Stop after the select instead, purely to work
around this garbage collection problem.
Oddly, the docs for NewTimer and NewTicker do not mention this
problem, but they have the same issue: they cannot be collected until
either they are Stopped or, in the case of Timer, the timer expires.
(Tickers repeat, so they never expire.) People have built up a shared
knowledge that timers and tickers need to defer t.Stop even though the
docs do not mention this (it is somewhat implied by the After docs).
This CL fixes the garbage collection problem, so that a timer that is
unreferenced can be GC'ed immediately, even if it is still running.
The approach is to only insert the timer into the heap when some
channel operation is blocked on it; the last channel operation to stop
using the timer takes it back out of the heap. When a timer's channel
is no longer referenced, there are no channel operations blocked on
it, so it's not in the heap, so it can be GC'ed immediately.
This CL adds an undocumented GODEBUG asynctimerchan=1
that will disable the change. The documentation happens in
the CL 568341.
Fixes #8898.
Fixes #61542.
Change-Id: Ieb303b6de1fb3527d3256135151a9e983f3c27e6
Reviewed-on: https://go-review.googlesource.com/c/go/+/512355
Reviewed-by: Austin Clements <austin@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Auto-Submit: Russ Cox <rsc@golang.org>
Diffstat (limited to 'src/runtime')
| -rw-r--r-- | src/runtime/chan.go | 25 | ||||
| -rw-r--r-- | src/runtime/lockrank.go | 48 | ||||
| -rw-r--r-- | src/runtime/mgcscavenge.go | 2 | ||||
| -rw-r--r-- | src/runtime/mklockrank.go | 9 | ||||
| -rw-r--r-- | src/runtime/netpoll.go | 6 | ||||
| -rw-r--r-- | src/runtime/runtime1.go | 25 | ||||
| -rw-r--r-- | src/runtime/select.go | 11 | ||||
| -rw-r--r-- | src/runtime/time.go | 167 | ||||
| -rw-r--r-- | src/runtime/trace2.go | 2 |
9 files changed, 241 insertions, 54 deletions
diff --git a/src/runtime/chan.go b/src/runtime/chan.go index b14ebc31d2..ae26be3c42 100644 --- a/src/runtime/chan.go +++ b/src/runtime/chan.go @@ -36,6 +36,7 @@ type hchan struct { buf unsafe.Pointer // points to an array of dataqsiz elements elemsize uint16 closed uint32 + timer *timer // timer feeding this chan elemtype *_type // element type sendx uint // send index recvx uint // receive index @@ -426,12 +427,19 @@ func closechan(c *hchan) { } // empty reports whether a read from c would block (that is, the channel is -// empty). It uses a single atomic read of mutable state. +// empty). It is atomically correct and sequentially consistent at the moment +// it returns, but since the channel is unlocked, the channel may become +// non-empty immediately afterward. func empty(c *hchan) bool { // c.dataqsiz is immutable. if c.dataqsiz == 0 { return atomic.Loadp(unsafe.Pointer(&c.sendq.first)) == nil } + // c.timer is also immutable (it is set after make(chan) but before any channel operations). + // All timer channels have dataqsiz > 0. + if c.timer != nil { + c.timer.maybeRunChan() + } return atomic.Loaduint(&c.qcount) == 0 } @@ -470,6 +478,10 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) throw("unreachable") } + if c.timer != nil { + c.timer.maybeRunChan() + } + // Fast path: check for failed non-blocking operation without acquiring the lock. if !block && empty(c) { // After observing that the channel is not ready for receiving, we observe whether the @@ -570,11 +582,16 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) mysg.elem = ep mysg.waitlink = nil gp.waiting = mysg + mysg.g = gp mysg.isSelect = false mysg.c = c gp.param = nil c.recvq.enqueue(mysg) + if c.timer != nil { + blockTimerChan(c) + } + // Signal to anyone trying to shrink our stack that we're about // to park on a channel. The window between when this G's status // changes and when we set gp.activeStackChans is not safe for @@ -586,6 +603,9 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) if mysg != gp.waiting { throw("G waiting list is corrupted") } + if c.timer != nil { + unblockTimerChan(c) + } gp.waiting = nil gp.activeStackChans = false if mysg.releasetime > 0 { @@ -728,6 +748,9 @@ func chanlen(c *hchan) int { if c == nil { return 0 } + if c.timer != nil { + c.timer.maybeRunChan() + } return int(c.qcount) } diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go index 199a466a59..a6f61240db 100644 --- a/src/runtime/lockrank.go +++ b/src/runtime/lockrank.go @@ -33,11 +33,11 @@ const ( lockRankSched lockRankAllg lockRankAllp + lockRankNotifyList + lockRankSudog lockRankTimers lockRankTimer lockRankNetpollInit - lockRankNotifyList - lockRankSudog lockRankRoot lockRankItab lockRankReflectOffs @@ -103,11 +103,11 @@ var lockNames = []string{ lockRankSched: "sched", lockRankAllg: "allg", lockRankAllp: "allp", + lockRankNotifyList: "notifyList", + lockRankSudog: "sudog", lockRankTimers: "timers", lockRankTimer: "timer", lockRankNetpollInit: "netpollInit", - lockRankNotifyList: "notifyList", - lockRankSudog: "sudog", lockRankRoot: "root", lockRankItab: "itab", lockRankReflectOffs: "reflectOffs", @@ -180,35 +180,35 @@ var lockPartialOrder [][]lockRank = [][]lockRank{ lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR}, lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankNotifyList: {}, + lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers, lockRankTimer}, - lockRankNotifyList: {}, - lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, lockRankRoot: {}, lockRankItab: {}, lockRankReflectOffs: {lockRankItab}, lockRankUserArenaState: {}, lockRankTraceBuf: {lockRankSysmon, lockRankScavenge}, lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf}, - lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, - lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, - lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, - lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, - lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, - lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, - lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, + lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, + lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, + lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, + lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, + lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, + lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, + lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, lockRankPanic: {}, lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, lockRankRaceFini: {lockRankPanic}, diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go index 4672d55fd5..9c76f8dd23 100644 --- a/src/runtime/mgcscavenge.go +++ b/src/runtime/mgcscavenge.go @@ -361,7 +361,7 @@ func (s *scavengerState) init() { s.g = getg() s.timer = new(timer) - f := func(s any, _ uintptr) { + f := func(s any, _ uintptr, _ int64) { s.(*scavengerState).wake() } s.timer.init(f, s) diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go index 150393acce..75dc6f62cf 100644 --- a/src/runtime/mklockrank.go +++ b/src/runtime/mklockrank.go @@ -72,16 +72,17 @@ assistQueue, < SCHED # Below SCHED is the scheduler implementation. < allocmR, - execR -< sched; + execR; +allocmR, execR, hchan < sched; sched < allg, allp; -hchan, pollDesc, wakeableSleep < timers; -timers < timer < netpollInit; # Channels NONE < notifyList; hchan, notifyList < sudog; +hchan, pollDesc, wakeableSleep < timers; +timers < timer < netpollInit; + # Semaphores NONE < root; diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go index 23e5d408fc..2c5c262c58 100644 --- a/src/runtime/netpoll.go +++ b/src/runtime/netpoll.go @@ -658,15 +658,15 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) { netpollAdjustWaiters(delta) } -func netpollDeadline(arg any, seq uintptr) { +func netpollDeadline(arg any, seq uintptr, delta int64) { netpolldeadlineimpl(arg.(*pollDesc), seq, true, true) } -func netpollReadDeadline(arg any, seq uintptr) { +func netpollReadDeadline(arg any, seq uintptr, delta int64) { netpolldeadlineimpl(arg.(*pollDesc), seq, true, false) } -func netpollWriteDeadline(arg any, seq uintptr) { +func netpollWriteDeadline(arg any, seq uintptr, delta int64) { netpolldeadlineimpl(arg.(*pollDesc), seq, false, true) } diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index afe1bdd298..8c8b20aa57 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -339,12 +339,25 @@ var debug struct { sbrk int32 panicnil atomic.Int32 + + // asynctimerchan controls whether timer channels + // behave asynchronously (as in Go 1.22 and earlier) + // instead of their Go 1.23+ synchronous behavior. + // The value can change at any time (in response to os.Setenv("GODEBUG")) + // and affects all extant timer channels immediately. + // Programs wouldn't normally change over an execution, + // but allowing it is convenient for testing and for programs + // that do an os.Setenv in main.init or main.main. + asynctimerchan atomic.Int32 } var dbgvars = []*dbgVar{ + {name: "adaptivestackstart", value: &debug.adaptivestackstart}, {name: "allocfreetrace", value: &debug.allocfreetrace}, - {name: "clobberfree", value: &debug.clobberfree}, + {name: "asyncpreemptoff", value: &debug.asyncpreemptoff}, + {name: "asynctimerchan", atomic: &debug.asynctimerchan}, {name: "cgocheck", value: &debug.cgocheck}, + {name: "clobberfree", value: &debug.clobberfree}, {name: "disablethp", value: &debug.disablethp}, {name: "dontfreezetheworld", value: &debug.dontfreezetheworld}, {name: "efence", value: &debug.efence}, @@ -353,21 +366,19 @@ var dbgvars = []*dbgVar{ {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff}, {name: "gcstoptheworld", value: &debug.gcstoptheworld}, {name: "gctrace", value: &debug.gctrace}, + {name: "harddecommit", value: &debug.harddecommit}, + {name: "inittrace", value: &debug.inittrace}, {name: "invalidptr", value: &debug.invalidptr}, {name: "madvdontneed", value: &debug.madvdontneed}, + {name: "panicnil", atomic: &debug.panicnil}, {name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks}, {name: "sbrk", value: &debug.sbrk}, {name: "scavtrace", value: &debug.scavtrace}, {name: "scheddetail", value: &debug.scheddetail}, {name: "schedtrace", value: &debug.schedtrace}, + {name: "traceadvanceperiod", value: &debug.traceadvanceperiod}, {name: "tracebackancestors", value: &debug.tracebackancestors}, - {name: "asyncpreemptoff", value: &debug.asyncpreemptoff}, - {name: "inittrace", value: &debug.inittrace}, - {name: "harddecommit", value: &debug.harddecommit}, - {name: "adaptivestackstart", value: &debug.adaptivestackstart}, {name: "tracefpunwindoff", value: &debug.tracefpunwindoff}, - {name: "panicnil", atomic: &debug.panicnil}, - {name: "traceadvanceperiod", value: &debug.traceadvanceperiod}, } func parsedebugvars() { diff --git a/src/runtime/select.go b/src/runtime/select.go index b3a3085cb0..17c49d7484 100644 --- a/src/runtime/select.go +++ b/src/runtime/select.go @@ -173,6 +173,10 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo continue } + if cas.c.timer != nil { + cas.c.timer.maybeRunChan() + } + j := cheaprandn(uint32(norder + 1)) pollorder[norder] = pollorder[j] pollorder[j] = uint16(i) @@ -315,6 +319,10 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo } else { c.recvq.enqueue(sg) } + + if c.timer != nil { + blockTimerChan(c) + } } // wait for someone to wake us up @@ -351,6 +359,9 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo for _, casei := range lockorder { k = &scases[casei] + if k.c.timer != nil { + unblockTimerChan(k.c) + } if sg == sglist { // sg has already been dequeued by the G that woke us up. casi = int(casei) diff --git a/src/runtime/time.go b/src/runtime/time.go index adbe8ac126..4b179d84fc 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -26,15 +26,32 @@ type timer struct { // mu protects reads and writes to all fields, with exceptions noted below. mu mutex - astate atomic.Uint8 // atomic copy of state bits at last unlock; can be read without lock - state uint8 // state bits + astate atomic.Uint8 // atomic copy of state bits at last unlock + state uint8 // state bits + isChan bool // timer has a channel; immutable; can be read without lock + blocked uint32 // number of goroutines blocked on timer's channel // Timer wakes up at when, and then at when+period, ... (period > 0 only) - // each time calling f(arg, now) in the timer goroutine, so f must be + // each time calling f(arg, seq, delay) in the timer goroutine, so f must be // a well-behaved function and not block. + // + // The arg and seq are client-specified opaque arguments passed back to f. + // When used from package time, arg is a channel (for After, NewTicker) + // or the function to call (for AfterFunc) and seq is unused (0). + // When used from netpoll, arg and seq have meanings defined by netpoll + // and are completely opaque to this code; in that context, seq is a sequence + // number to recognize and squech stale function invocations. + // + // The delay argument is nanotime() - t.when, meaning the delay in ns between + // when the timer should have gone off and now. Normally that amount is + // small enough not to matter, but for channel timers that are fed lazily, + // the delay can be arbitrarily long; package time subtracts it out to make + // it look like the send happened earlier than it actually did. + // (No one looked at the channel since then, or the send would have + // not happened so late, so no one can tell the difference.) when int64 period int64 - f func(any, uintptr) + f func(arg any, seq uintptr, delay int64) arg any seq uintptr @@ -58,7 +75,7 @@ type timer struct { // Any code that allocates a timer must call t.init before using it. // The arg and f can be set during init, or they can be nil in init // and set by a future call to t.modify. -func (t *timer) init(f func(any, uintptr), arg any) { +func (t *timer) init(f func(arg any, seq uintptr, delay int64), arg any) { lockInit(&t.mu, lockRankTimer) t.f = f t.arg = arg @@ -130,6 +147,9 @@ const ( // Only set when timerHeaped is also set. // It is possible for timerModified and timerZombie to both // be set, meaning that the timer was modified and then stopped. + // A timer sending to a channel may be placed in timerZombie + // to take it out of the heap even though the timer is not stopped, + // as long as nothing is reading from the channel. timerZombie ) @@ -146,13 +166,16 @@ func (t *timer) trace1(op string) { if !timerDebug { return } - bits := [3]string{"h", "m", "z"} + bits := [4]string{"h", "m", "z", "c"} for i := range bits { if t.state&(1<<i) == 0 { bits[i] = "-" } } - print("T ", t, " ", bits[0], bits[1], bits[2], " ", op, "\n") + if !t.isChan { + bits[3] = "-" + } + print("T ", t, " ", bits[0], bits[1], bits[2], bits[3], " b=", t.blocked, " ", op, "\n") } func (ts *timers) trace(op string) { @@ -171,6 +194,7 @@ func (t *timer) lock() { func (t *timer) unlock() { t.trace("unlock") // Let heap fast paths know whether t.whenHeap is accurate. + // Also let maybeRunChan know whether channel is in heap. t.astate.Store(t.state) unlock(&t.mu) } @@ -277,13 +301,20 @@ type timeTimer struct { // with the given parameters. // //go:linkname newTimer time.newTimer -func newTimer(when, period int64, f func(any, uintptr), arg any) *timeTimer { +func newTimer(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, c *hchan) *timeTimer { t := new(timeTimer) t.timer.init(nil, nil) t.trace("new") if raceenabled { racerelease(unsafe.Pointer(&t.timer)) } + if c != nil { + t.isChan = true + c.timer = &t.timer + if c.dataqsiz == 0 { + throw("invalid timer channel: no capacity") + } + } t.modify(when, period, f, arg, 0) t.init = true return t @@ -312,7 +343,7 @@ func resetTimer(t *timeTimer, when, period int64) bool { // Go runtime. // Ready the goroutine arg. -func goroutineReady(arg any, seq uintptr) { +func goroutineReady(arg any, _ uintptr, _ int64) { goready(arg.(*g), 0) } @@ -348,6 +379,17 @@ func (ts *timers) addHeap(t *timer) { func (t *timer) stop() bool { t.lock() t.trace("stop") + if t.state&timerHeaped == 0 && t.isChan && t.when > 0 { + // If timer should have triggered already (but nothing looked at it yet), + // trigger now, so that a receive after the stop sees the "old" value + // that should be there. + if now := nanotime(); t.when <= now { + systemstack(func() { + t.unlockAndRun(now) // resets t.when + }) + t.lock() + } + } if t.state&timerHeaped != 0 { t.state |= timerModified if t.state&timerZombie == 0 { @@ -390,7 +432,7 @@ func (ts *timers) deleteMin() { // This is called by the netpoll code or time.Ticker.Reset or time.Timer.Reset. // Reports whether the timer was modified before it was run. // If f == nil, then t.f, t.arg, and t.seq are not modified. -func (t *timer) modify(when, period int64, f func(any, uintptr), arg any, seq uintptr) bool { +func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, seq uintptr) bool { if when <= 0 { throw("timer when must be positive") } @@ -407,6 +449,20 @@ func (t *timer) modify(when, period int64, f func(any, uintptr), arg any, seq ui t.seq = seq } + if t.state&timerHeaped == 0 && t.isChan && t.when > 0 { + // This is a timer for an unblocked channel. + // Perhaps it should have expired already. + if now := nanotime(); t.when <= now { + // The timer should have run already, + // but nothing has checked it yet. + // Run it now. + systemstack(func() { + t.unlockAndRun(now) // resets t.when + }) + t.lock() + } + } + wake := false pending := t.when > 0 t.when = when @@ -442,7 +498,7 @@ func (t *timer) modify(when, period int64, f func(any, uintptr), arg any, seq ui // t must be locked. func (t *timer) needsAdd() bool { assertLockHeld(&t.mu) - need := t.state&timerHeaped == 0 && t.when > 0 + need := t.state&timerHeaped == 0 && t.when > 0 && (!t.isChan || t.blocked > 0) if need { t.trace("needsAdd+") } else { @@ -466,7 +522,7 @@ func (t *timer) needsAdd() bool { // too clever and respect the static ordering. // (If we don't, we have to change the static lock checking of t and ts.) // -// Concurrent calls to time.Timer.Reset +// Concurrent calls to time.Timer.Reset or blockTimerChan // may result in concurrent calls to t.maybeAdd, // so we cannot assume that t is not in a heap on entry to t.maybeAdd. func (t *timer) maybeAdd() { @@ -869,7 +925,7 @@ func (t *timer) unlockAndRun(now int64) { if ts != nil { ts.unlock() } - f(arg, seq) + f(arg, seq, delay) if ts != nil { ts.lock() } @@ -1052,3 +1108,88 @@ func (ts *timers) initHeap() { func badTimer() { throw("timer data corruption") } + +// Timer channels. + +// maybeRunChan checks whether the timer needs to run +// to send a value to its associated channel. If so, it does. +// The timer must not be locked. +func (t *timer) maybeRunChan() { + if t.astate.Load()&timerHeaped != 0 { + // If the timer is in the heap, the ordinary timer code + // is in charge of sending when appropriate. + return + } + + t.lock() + now := nanotime() + if t.state&timerHeaped != 0 || t.when == 0 || t.when > now { + t.trace("maybeRunChan-") + // Timer in the heap, or not running at all, or not triggered. + t.unlock() + return + } + t.trace("maybeRunChan+") + systemstack(func() { + t.unlockAndRun(now) + }) +} + +// blockTimerChan is called when a channel op has decided to block on c. +// The caller holds the channel lock for c and possibly other channels. +// blockTimerChan makes sure that c is in a timer heap, +// adding it if needed. +func blockTimerChan(c *hchan) { + t := c.timer + t.lock() + t.trace("blockTimerChan") + if !t.isChan { + badTimer() + } + + t.blocked++ + + // If this is the first enqueue after a recent dequeue, + // the timer may still be in the heap but marked as a zombie. + // Unmark it in this case, if the timer is still pending. + if t.state&timerHeaped != 0 && t.state&timerZombie != 0 && t.when > 0 { + t.state &^= timerZombie + t.ts.zombies.Add(-1) + } + + // t.maybeAdd must be called with t unlocked, + // because it needs to lock t.ts before t. + // Then it will do nothing if t.needsAdd(state) is false. + // Check that now before the unlock, + // avoiding the extra lock-lock-unlock-unlock + // inside maybeAdd when t does not need to be added. + add := t.needsAdd() + t.unlock() + if add { + t.maybeAdd() + } +} + +// unblockTimerChan is called when a channel op that was blocked on c +// is no longer blocked. Every call to blockTimerChan must be paired with +// a call to unblockTimerChan. +// The caller holds the channel lock for c and possibly other channels. +// unblockTimerChan removes c from the timer heap when nothing is +// blocked on it anymore. +func unblockTimerChan(c *hchan) { + t := c.timer + t.lock() + t.trace("unblockTimerChan") + if !t.isChan || t.blocked == 0 { + badTimer() + } + t.blocked-- + if t.blocked == 0 && t.state&timerHeaped != 0 && t.state&timerZombie == 0 { + // Last goroutine that was blocked on this timer. + // Mark for removal from heap but do not clear t.when, + // so that we know what time it is still meant to trigger. + t.state |= timerZombie + t.ts.zombies.Add(1) + } + t.unlock() +} diff --git a/src/runtime/trace2.go b/src/runtime/trace2.go index 4639063811..94f15dffd5 100644 --- a/src/runtime/trace2.go +++ b/src/runtime/trace2.go @@ -955,7 +955,7 @@ func newWakeableSleep() *wakeableSleep { lockInit(&s.lock, lockRankWakeableSleep) s.wakeup = make(chan struct{}, 1) s.timer = new(timer) - f := func(s any, _ uintptr) { + f := func(s any, _ uintptr, _ int64) { s.(*wakeableSleep).wake() } s.timer.init(f, s) |
