diff options
| author | Michael Pratt <mpratt@google.com> | 2020-12-23 15:05:37 -0500 |
|---|---|---|
| committer | Michael Pratt <mpratt@google.com> | 2021-03-05 22:09:52 +0000 |
| commit | d85083911d6ea742901933a544467dad55bb381f (patch) | |
| tree | f8f826d01c3ff45af5908c8a1d71befe4439f97c /src/runtime/proc.go | |
| parent | 39bdd41d03725878f1fd6f8b500ba6700f03bdad (diff) | |
| download | go-d85083911d6ea742901933a544467dad55bb381f.tar.xz | |
runtime: encapsulate access to allgs
Correctly accessing allgs is a bit hairy. Some paths need to lock
allglock, some don't. Those that don't are safest using atomicAllG, but
usage is not consistent.
Rather than doing this ad-hoc, move all access* through forEachG /
forEachGRace, the locking and atomic versions, respectively. This will
make it easier to ensure safe access.
* markroot is the only exception, as it has a far-removed guarantee of
safe access via an atomic load of allglen far before actual use.
Change-Id: Ie1c7a8243e155ae2b4bc3143577380c695680e89
Reviewed-on: https://go-review.googlesource.com/c/go/+/279994
Trust: Michael Pratt <mpratt@google.com>
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Diffstat (limited to 'src/runtime/proc.go')
| -rw-r--r-- | src/runtime/proc.go | 40 |
1 files changed, 29 insertions, 11 deletions
diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 19049d21f3..5f372bb063 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -541,6 +541,30 @@ func atomicAllGIndex(ptr **g, i uintptr) *g { return *(**g)(add(unsafe.Pointer(ptr), i*sys.PtrSize)) } +// forEachG calls fn on every G from allgs. +// +// forEachG takes a lock to exclude concurrent addition of new Gs. +func forEachG(fn func(gp *g)) { + lock(&allglock) + for _, gp := range allgs { + fn(gp) + } + unlock(&allglock) +} + +// forEachGRace calls fn on every G from allgs. +// +// forEachGRace avoids locking, but does not exclude addition of new Gs during +// execution, which may be missed. +func forEachGRace(fn func(gp *g)) { + ptr, length := atomicAllG() + for i := uintptr(0); i < length; i++ { + gp := atomicAllGIndex(ptr, i) + fn(gp) + } + return +} + const ( // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. @@ -4969,11 +4993,9 @@ func checkdead() { } grunning := 0 - lock(&allglock) - for i := 0; i < len(allgs); i++ { - gp := allgs[i] + forEachG(func(gp *g) { if isSystemGoroutine(gp, false) { - continue + return } s := readgstatus(gp) switch s &^ _Gscan { @@ -4986,8 +5008,7 @@ func checkdead() { print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") throw("checkdead: runnable g") } - } - unlock(&allglock) + }) if grunning == 0 { // possible if main goroutine calls runtime·Goexit() unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang throw("no goroutines (main called runtime.Goexit) - deadlock!") @@ -5390,9 +5411,7 @@ func schedtrace(detailed bool) { print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") } - lock(&allglock) - for gi := 0; gi < len(allgs); gi++ { - gp := allgs[gi] + forEachG(func(gp *g) { mp := gp.m lockedm := gp.lockedm.ptr() id1 := int64(-1) @@ -5404,8 +5423,7 @@ func schedtrace(detailed bool) { id2 = lockedm.id } print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n") - } - unlock(&allglock) + }) unlock(&sched.lock) } |
