aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/proc.go
diff options
context:
space:
mode:
authorDamien Neil <dneil@google.com>2024-06-11 11:02:18 -0700
committerDamien Neil <dneil@google.com>2024-11-19 19:40:40 +0000
commitd90ce588eac7b9105c0ca556a7c6e975fd5c1eca (patch)
tree4692a7f87738058c89bba874fe6d53b82786c44a /src/runtime/proc.go
parent944df9a7516021f0405cd8adb1e6894ae9872cb5 (diff)
downloadgo-d90ce588eac7b9105c0ca556a7c6e975fd5c1eca.tar.xz
internal/synctest: new package for testing concurrent code
Add an internal (for now) implementation of testing/synctest. The synctest.Run function executes a tree of goroutines in an isolated environment using a fake clock. The synctest.Wait function allows a test to wait for all other goroutines within the test to reach a blocking point. For #67434 For #69687 Change-Id: Icb39e54c54cece96517e58ef9cfb18bf68506cfc Reviewed-on: https://go-review.googlesource.com/c/go/+/591997 Reviewed-by: Michael Pratt <mpratt@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Diffstat (limited to 'src/runtime/proc.go')
-rw-r--r--src/runtime/proc.go49
1 files changed, 46 insertions, 3 deletions
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 17c375de1a..e7f44c5b6c 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -1229,6 +1229,12 @@ func casgstatus(gp *g, oldval, newval uint32) {
}
}
+ if gp.syncGroup != nil {
+ systemstack(func() {
+ gp.syncGroup.changegstatus(gp, oldval, newval)
+ })
+ }
+
if oldval == _Grunning {
// Track every gTrackingPeriod time a goroutine transitions out of running.
if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
@@ -1325,6 +1331,9 @@ func casgcopystack(gp *g) uint32 {
throw("copystack: bad status, not Gwaiting or Grunnable")
}
if gp.atomicstatus.CompareAndSwap(oldstatus, _Gcopystack) {
+ if sg := gp.syncGroup; sg != nil {
+ sg.changegstatus(gp, oldstatus, _Gcopystack)
+ }
return oldstatus
}
}
@@ -1341,6 +1350,12 @@ func casGToPreemptScan(gp *g, old, new uint32) {
acquireLockRankAndM(lockRankGscan)
for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
}
+ // We never notify gp.syncGroup that the goroutine state has moved
+ // from _Grunning to _Gpreempted. We call syncGroup.changegstatus
+ // after status changes happen, but doing so here would violate the
+ // ordering between the gscan and synctest locks. syncGroup doesn't
+ // distinguish between _Grunning and _Gpreempted anyway, so not
+ // notifying it is fine.
}
// casGFromPreempted attempts to transition gp from _Gpreempted to
@@ -1351,7 +1366,13 @@ func casGFromPreempted(gp *g, old, new uint32) bool {
throw("bad g transition")
}
gp.waitreason = waitReasonPreempted
- return gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting)
+ if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
+ return false
+ }
+ if sg := gp.syncGroup; sg != nil {
+ sg.changegstatus(gp, _Gpreempted, _Gwaiting)
+ }
+ return true
}
// stwReason is an enumeration of reasons the world is stopping.
@@ -4093,6 +4114,15 @@ func park_m(gp *g) {
trace := traceAcquire()
+ // If g is in a synctest group, we don't want to let the group
+ // become idle until after the waitunlockf (if any) has confirmed
+ // that the park is happening.
+ // We need to record gp.syncGroup here, since waitunlockf can change it.
+ sg := gp.syncGroup
+ if sg != nil {
+ sg.incActive()
+ }
+
if trace.ok() {
// Trace the event before the transition. It may take a
// stack trace, but we won't own the stack after the
@@ -4115,6 +4145,9 @@ func park_m(gp *g) {
if !ok {
trace := traceAcquire()
casgstatus(gp, _Gwaiting, _Grunnable)
+ if sg != nil {
+ sg.decActive()
+ }
if trace.ok() {
trace.GoUnpark(gp, 2)
traceRelease(trace)
@@ -4122,6 +4155,11 @@ func park_m(gp *g) {
execute(gp, true) // Schedule it back, never returns.
}
}
+
+ if sg != nil {
+ sg.decActive()
+ }
+
schedule()
}
@@ -4275,6 +4313,9 @@ func goyield_m(gp *g) {
// Finishes execution of the current goroutine.
func goexit1() {
if raceenabled {
+ if gp := getg(); gp.syncGroup != nil {
+ racereleasemergeg(gp, gp.syncGroup.raceaddr())
+ }
racegoend()
}
trace := traceAcquire()
@@ -4313,6 +4354,7 @@ func gdestroy(gp *g) {
gp.param = nil
gp.labels = nil
gp.timer = nil
+ gp.syncGroup = nil
if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
// Flush assist credit to the global pool. This gives
@@ -5059,7 +5101,8 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreaso
if isSystemGoroutine(newg, false) {
sched.ngsys.Add(1)
} else {
- // Only user goroutines inherit pprof labels.
+ // Only user goroutines inherit synctest groups and pprof labels.
+ newg.syncGroup = callergp.syncGroup
if mp.curg != nil {
newg.labels = mp.curg.labels
}
@@ -5086,7 +5129,6 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreaso
status = _Gwaiting
newg.waitreason = waitreason
}
- casgstatus(newg, _Gdead, status)
if pp.goidcache == pp.goidcacheend {
// Sched.goidgen is the last allocated id,
// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
@@ -5096,6 +5138,7 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreaso
pp.goidcacheend = pp.goidcache + _GoidCacheBatch
}
newg.goid = pp.goidcache
+ casgstatus(newg, _Gdead, status)
pp.goidcache++
newg.trace.reset()
if trace.ok() {