diff options
| author | Rick Hudson <rlh@golang.org> | 2016-04-27 18:19:16 -0400 |
|---|---|---|
| committer | Rick Hudson <rlh@golang.org> | 2016-04-27 18:46:52 -0400 |
| commit | 23aeb34df172b17b7bfaa85fb59ca64bef9073bb (patch) | |
| tree | a8ab866f1e50f0059856ce628f036d93ab620155 /src/runtime/proc.go | |
| parent | 1354b32cd70f2702381764fd595dd2faa996840c (diff) | |
| parent | d3c79d324acd7300b6f705e66af8ca711af00d9f (diff) | |
| download | go-23aeb34df172b17b7bfaa85fb59ca64bef9073bb.tar.xz | |
[dev.garbage] Merge remote-tracking branch 'origin/master' into HEAD
Change-Id: I282fd9ce9db435dfd35e882a9502ab1abc185297
Diffstat (limited to 'src/runtime/proc.go')
| -rw-r--r-- | src/runtime/proc.go | 111 |
1 files changed, 71 insertions, 40 deletions
diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 5145c84aea..ee732e3cf7 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -381,7 +381,7 @@ func badmcall2(fn func(*g)) { } func badreflectcall() { - panic("runtime: arg size to reflect.call more than 1GB") + panic(plainError("arg size to reflect.call more than 1GB")) } func lockedOSThread() bool { @@ -402,6 +402,16 @@ func allgadd(gp *g) { lock(&allglock) allgs = append(allgs, gp) allglen = uintptr(len(allgs)) + + // Grow GC rescan list if necessary. + if len(allgs) > cap(work.rescan.list) { + lock(&work.rescan.lock) + l := work.rescan.list + // Let append do the heavy lifting, but keep the + // length the same. + work.rescan.list = append(l[:cap(l)], 0)[:len(l)] + unlock(&work.rescan.lock) + } unlock(&allglock) } @@ -435,9 +445,10 @@ func schedinit() { tracebackinit() moduledataverify() stackinit() - itabsinit() mallocinit() mcommoninit(_g_.m) + typelinksinit() + itabsinit() msigsave(_g_.m) initSigmask = _g_.m.sigmask @@ -449,6 +460,9 @@ func schedinit() { sched.lastpoll = uint64(nanotime()) procs := int(ncpu) + if procs > _MaxGomaxprocs { + procs = _MaxGomaxprocs + } if n := atoi(gogetenv("GOMAXPROCS")); n > 0 { if n > _MaxGomaxprocs { n = _MaxGomaxprocs @@ -639,17 +653,17 @@ func readgstatus(gp *g) uint32 { return atomic.Load(&gp.atomicstatus) } -// Ownership of gscanvalid: +// Ownership of gcscanvalid: // // If gp is running (meaning status == _Grunning or _Grunning|_Gscan), -// then gp owns gp.gscanvalid, and other goroutines must not modify it. +// then gp owns gp.gcscanvalid, and other goroutines must not modify it. // // Otherwise, a second goroutine can lock the scan state by setting _Gscan -// in the status bit and then modify gscanvalid, and then unlock the scan state. +// in the status bit and then modify gcscanvalid, and then unlock the scan state. // // Note that the first condition implies an exception to the second: // if a second goroutine changes gp's status to _Grunning|_Gscan, -// that second goroutine still does not have the right to modify gscanvalid. +// that second goroutine still does not have the right to modify gcscanvalid. // The Gscanstatuses are acting like locks and this releases them. // If it proves to be a performance hit we should be able to make these @@ -677,9 +691,6 @@ func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { dumpgstatus(gp) throw("casfrom_Gscanstatus: gp->status is not in scan state") } - if newval == _Grunning { - gp.gcscanvalid = false - } } // This will return false if the gp is not in the expected status and the cas fails. @@ -753,8 +764,9 @@ func casgstatus(gp *g, oldval, newval uint32) { nextYield = nanotime() + yieldDelay/2 } } - if newval == _Grunning { - gp.gcscanvalid = false + if newval == _Grunning && gp.gcscanvalid { + // Run queueRescan on the system stack so it has more space. + systemstack(func() { queueRescan(gp) }) } } @@ -1404,6 +1416,8 @@ func newextram() { gp.syscallpc = gp.sched.pc gp.syscallsp = gp.sched.sp gp.stktopsp = gp.sched.sp + gp.gcscanvalid = true // fresh G, so no dequeueRescan necessary + gp.gcRescan = -1 // malg returns status as Gidle, change to Gsyscall before adding to allg // where GC will see it. casgstatus(gp, _Gidle, _Gsyscall) @@ -1792,23 +1806,7 @@ func execute(gp *g, inheritTime bool) { // GoSysExit has to happen when we have a P, but before GoStart. // So we emit it here. if gp.syscallsp != 0 && gp.sysblocktraced { - // Since gp.sysblocktraced is true, we must emit an event. - // There is a race between the code that initializes sysexitseq - // and sysexitticks (in exitsyscall, which runs without a P, - // and therefore is not stopped with the rest of the world) - // and the code that initializes a new trace. - // The recorded sysexitseq and sysexitticks must therefore - // be treated as "best effort". If they are valid for this trace, - // then great, use them for greater accuracy. - // But if they're not valid for this trace, assume that the - // trace was started after the actual syscall exit (but before - // we actually managed to start the goroutine, aka right now), - // and assign a fresh time stamp to keep the log consistent. - seq, ts := gp.sysexitseq, gp.sysexitticks - if seq == 0 || int64(seq)-int64(trace.seqStart) < 0 { - seq, ts = tracestamp() - } - traceGoSysExit(seq, ts) + traceGoSysExit(gp.sysexitticks) } traceGoStart() } @@ -2225,6 +2223,10 @@ func goexit0(gp *g) { gp.waitreason = "" gp.param = nil + // Note that gp's stack scan is now "valid" because it has no + // stack. We could dequeueRescan, but that takes a lock and + // isn't really necessary. + gp.gcscanvalid = true dropg() if _g_.m.locked&^_LockExternal != 0 { @@ -2477,7 +2479,6 @@ func exitsyscall(dummy int32) { } _g_.sysexitticks = 0 - _g_.sysexitseq = 0 if trace.enabled { // Wait till traceGoSysBlock event is emitted. // This ensures consistency of the trace (the goroutine is started after it is blocked). @@ -2488,7 +2489,7 @@ func exitsyscall(dummy int32) { // Tracing code can invoke write barriers that cannot run without a P. // So instead we remember the syscall exit time and emit the event // in execute when we have a P. - _g_.sysexitseq, _g_.sysexitticks = tracestamp() + _g_.sysexitticks = cputicks() } _g_.m.locks-- @@ -2536,7 +2537,7 @@ func exitsyscallfast() bool { // Denote blocking of the new syscall. traceGoSysBlock(_g_.m.p.ptr()) // Denote completion of the current syscall. - traceGoSysExit(tracestamp()) + traceGoSysExit(0) }) } _g_.m.p.ptr().syscalltick++ @@ -2560,7 +2561,7 @@ func exitsyscallfast() bool { osyield() } } - traceGoSysExit(tracestamp()) + traceGoSysExit(0) } }) if ok { @@ -2716,6 +2717,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr if newg == nil { newg = malg(_StackMin) casgstatus(newg, _Gidle, _Gdead) + newg.gcRescan = -1 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. } if newg.stack.hi == 0 { @@ -2749,6 +2751,17 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr if isSystemGoroutine(newg) { atomic.Xadd(&sched.ngsys, +1) } + // The stack is dirty from the argument frame, so queue it for + // scanning. Do this before setting it to runnable so we still + // own the G. If we're recycling a G, it may already be on the + // rescan list. + if newg.gcRescan == -1 { + queueRescan(newg) + } else { + // The recycled G is already on the rescan list. Just + // mark the stack dirty. + newg.gcscanvalid = false + } casgstatus(newg, _Gdead, _Grunnable) if _p_.goidcache == _p_.goidcacheend { @@ -2811,8 +2824,13 @@ func gfput(_p_ *p, gp *g) { _p_.gfreecnt-- gp = _p_.gfree _p_.gfree = gp.schedlink.ptr() - gp.schedlink.set(sched.gfree) - sched.gfree = gp + if gp.stack.lo == 0 { + gp.schedlink.set(sched.gfreeNoStack) + sched.gfreeNoStack = gp + } else { + gp.schedlink.set(sched.gfreeStack) + sched.gfreeStack = gp + } sched.ngfree++ } unlock(&sched.gflock) @@ -2824,12 +2842,20 @@ func gfput(_p_ *p, gp *g) { func gfget(_p_ *p) *g { retry: gp := _p_.gfree - if gp == nil && sched.gfree != nil { + if gp == nil && (sched.gfreeStack != nil || sched.gfreeNoStack != nil) { lock(&sched.gflock) - for _p_.gfreecnt < 32 && sched.gfree != nil { + for _p_.gfreecnt < 32 { + if sched.gfreeStack != nil { + // Prefer Gs with stacks. + gp = sched.gfreeStack + sched.gfreeStack = gp.schedlink.ptr() + } else if sched.gfreeNoStack != nil { + gp = sched.gfreeNoStack + sched.gfreeNoStack = gp.schedlink.ptr() + } else { + break + } _p_.gfreecnt++ - gp = sched.gfree - sched.gfree = gp.schedlink.ptr() sched.ngfree-- gp.schedlink.set(_p_.gfree) _p_.gfree = gp @@ -2866,8 +2892,13 @@ func gfpurge(_p_ *p) { _p_.gfreecnt-- gp := _p_.gfree _p_.gfree = gp.schedlink.ptr() - gp.schedlink.set(sched.gfree) - sched.gfree = gp + if gp.stack.lo == 0 { + gp.schedlink.set(sched.gfreeNoStack) + sched.gfreeNoStack = gp + } else { + gp.schedlink.set(sched.gfreeStack) + sched.gfreeStack = gp + } sched.ngfree++ } unlock(&sched.gflock) |
