aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2015-04-22 12:18:01 -0400
committerAustin Clements <austin@google.com>2015-04-24 15:12:42 +0000
commite5e52f4f2c6f67e41b97fd539c1963f85536c985 (patch)
tree02ae2dd6a1c79fdaeb93a21d809ed5f3b842db30 /src/runtime
parent9406f68e6afd02fd588e8ca05ebb729a4f4b8d0d (diff)
downloadgo-e5e52f4f2c6f67e41b97fd539c1963f85536c985.tar.xz
runtime: factor checking if P run queue is empty
There are a variety of places where we check if a P's run queue is empty. This test is about to get slightly more complicated, so factor it out into a new function, runqempty. This function is inlinable, so this has no effect on performance. Change-Id: If4a0b01ffbd004937de90d8d686f6ded4aad2c6b Reviewed-on: https://go-review.googlesource.com/9287 Reviewed-by: Rick Hudson <rlh@golang.org> Reviewed-by: Russ Cox <rsc@golang.org>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/proc1.go16
1 files changed, 11 insertions, 5 deletions
diff --git a/src/runtime/proc1.go b/src/runtime/proc1.go
index 0359b5571c..d37c4f1a5a 100644
--- a/src/runtime/proc1.go
+++ b/src/runtime/proc1.go
@@ -1115,7 +1115,7 @@ func startm(_p_ *p, spinning bool) {
//go:nowritebarrier
func handoffp(_p_ *p) {
// if it has local work, start it straight away
- if _p_.runqhead != _p_.runqtail || sched.runqsize != 0 {
+ if !runqempty(_p_) || sched.runqsize != 0 {
startm(_p_, false)
return
}
@@ -1369,7 +1369,7 @@ stop:
// check all runqueues once again
for i := 0; i < int(gomaxprocs); i++ {
_p_ := allp[i]
- if _p_ != nil && _p_.runqhead != _p_.runqtail {
+ if _p_ != nil && !runqempty(_p_) {
lock(&sched.lock)
_p_ = pidleget()
unlock(&sched.lock)
@@ -2657,7 +2657,7 @@ func procresize(nprocs int32) *p {
continue
}
p.status = _Pidle
- if p.runqhead == p.runqtail {
+ if runqempty(p) {
pidleput(p)
} else {
p.m.set(mget())
@@ -2940,7 +2940,7 @@ func retake(now int64) uint32 {
// On the one hand we don't want to retake Ps if there is no other work to do,
// but on the other hand we want to retake them eventually
// because they can prevent the sysmon thread from deep sleep.
- if _p_.runqhead == _p_.runqtail && atomicload(&sched.nmspinning)+atomicload(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
+ if runqempty(_p_) && atomicload(&sched.nmspinning)+atomicload(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
continue
}
// Need to decrement number of idle locked M's
@@ -3220,6 +3220,12 @@ func pidleget() *p {
return _p_
}
+// runqempty returns true if _p_ has no Gs on its local run queue.
+// Note that this test is generally racy.
+func runqempty(_p_ *p) bool {
+ return _p_.runqhead == _p_.runqtail
+}
+
// Try to put g on local runnable queue.
// If it's full, put onto global queue.
// Executed only by the owner P.
@@ -3479,7 +3485,7 @@ func sync_runtime_canSpin(i int) bool {
if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
return false
}
- if p := getg().m.p.ptr(); p.runqhead != p.runqtail {
+ if p := getg().m.p.ptr(); !runqempty(p) {
return false
}
return true