diff options
| author | Dmitriy Vyukov <dvyukov@google.com> | 2013-06-27 21:03:35 +0400 |
|---|---|---|
| committer | Dmitriy Vyukov <dvyukov@google.com> | 2013-06-27 21:03:35 +0400 |
| commit | 7ebb187e8e5e588d8c594213ff5187917c4abb20 (patch) | |
| tree | 53e4affca48fbd6faed50a30a3e94572ddc77ea6 /src/pkg/runtime/proc.c | |
| parent | 15a1c3d1e46393a0b05ef5f518d1f4d0b7c638b8 (diff) | |
| download | go-7ebb187e8e5e588d8c594213ff5187917c4abb20.tar.xz | |
undo CL 9776044 / 1e280889f997
Failure on bot:
http://build.golang.org/log/f4c648906e1289ec2237c1d0880fb1a8b1852a08
««« original CL description
runtime: fix CPU underutilization
runtime.newproc/ready are deliberately sloppy about waking new M's,
they only ensure that there is at least 1 spinning M.
Currently to compensate for that, schedule() checks if the current P
has local work and there are no spinning M's, it wakes up another one.
It does not work if goroutines do not call schedule.
With this change a spinning M wakes up another M when it finds work to do.
It's also not ideal, but it fixes the underutilization.
A proper check would require to know the exact number of runnable G's,
but it's too expensive to maintain.
Fixes #5586.
R=rsc
TBR=rsc
CC=gobot, golang-dev
https://golang.org/cl/9776044
»»»
R=golang-dev
CC=golang-dev
https://golang.org/cl/10692043
Diffstat (limited to 'src/pkg/runtime/proc.c')
| -rw-r--r-- | src/pkg/runtime/proc.c | 41 |
1 files changed, 14 insertions, 27 deletions
diff --git a/src/pkg/runtime/proc.c b/src/pkg/runtime/proc.c index 6dcf564cb0..e6844032a6 100644 --- a/src/pkg/runtime/proc.c +++ b/src/pkg/runtime/proc.c @@ -1018,7 +1018,7 @@ execute(G *gp) // Finds a runnable goroutine to execute. // Tries to steal from other P's, get g from global queue, poll network. static G* -findrunnable1(void) +findrunnable(void) { G *gp; P *p; @@ -1127,29 +1127,6 @@ stop: goto top; } -static G* -findrunnable(void) -{ - G *gp; - int32 nmspinning; - - gp = findrunnable1(); // blocks until work is available - if(m->spinning) { - m->spinning = false; - nmspinning = runtime·xadd(&runtime·sched.nmspinning, -1); - if(nmspinning < 0) - runtime·throw("findrunnable: negative nmspinning"); - } else - nmspinning = runtime·atomicload(&runtime·sched.nmspinning); - - // M wakeup policy is deliberately somewhat conservative (see nmspinning handling), - // so see if we need to wakeup another P here. - if (nmspinning == 0 && runtime·atomicload(&runtime·sched.npidle) > 0) - wakep(); - - return gp; -} - // Injects the list of runnable G's into the scheduler. // Can run concurrently with GC. static void @@ -1208,11 +1185,21 @@ top: runtime·throw("schedule: spinning with local work"); } if(gp == nil) - gp = findrunnable(); // blocks until work is available + gp = findrunnable(); + + if(m->spinning) { + m->spinning = false; + runtime·xadd(&runtime·sched.nmspinning, -1); + } + + // M wakeup policy is deliberately somewhat conservative (see nmspinning handling), + // so see if we need to wakeup another M here. + if (m->p->runqhead != m->p->runqtail && + runtime·atomicload(&runtime·sched.nmspinning) == 0 && + runtime·atomicload(&runtime·sched.npidle) > 0) // TODO: fast atomic + wakep(); if(gp->lockedm) { - // Hands off own p to the locked m, - // then blocks waiting for a new p. startlockedm(gp); goto top; } |
