diff options
| author | Rick Hudson <rlh@golang.org> | 2016-04-27 18:19:16 -0400 |
|---|---|---|
| committer | Rick Hudson <rlh@golang.org> | 2016-04-27 18:46:52 -0400 |
| commit | 23aeb34df172b17b7bfaa85fb59ca64bef9073bb (patch) | |
| tree | a8ab866f1e50f0059856ce628f036d93ab620155 /src/runtime/runtime2.go | |
| parent | 1354b32cd70f2702381764fd595dd2faa996840c (diff) | |
| parent | d3c79d324acd7300b6f705e66af8ca711af00d9f (diff) | |
| download | go-23aeb34df172b17b7bfaa85fb59ca64bef9073bb.tar.xz | |
[dev.garbage] Merge remote-tracking branch 'origin/master' into HEAD
Change-Id: I282fd9ce9db435dfd35e882a9502ab1abc185297
Diffstat (limited to 'src/runtime/runtime2.go')
| -rw-r--r-- | src/runtime/runtime2.go | 144 |
1 files changed, 58 insertions, 86 deletions
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index e0137f7e97..d35b897c3e 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -10,9 +10,7 @@ import ( "unsafe" ) -/* - * defined constants - */ +// defined constants const ( // G status // @@ -99,6 +97,10 @@ const ( _Pdead ) +// Mutual exclusion locks. In the uncontended case, +// as fast as spin locks (just a few user-level instructions), +// but on the contention path they sleep in the kernel. +// A zeroed Mutex is unlocked (no need to initialize each lock). type mutex struct { // Futex-based impl treats it as uint32 key, // while sema-based impl as M* waitm. @@ -106,6 +108,26 @@ type mutex struct { key uintptr } +// sleep and wakeup on one-time events. +// before any calls to notesleep or notewakeup, +// must call noteclear to initialize the Note. +// then, exactly one thread can call notesleep +// and exactly one thread can call notewakeup (once). +// once notewakeup has been called, the notesleep +// will return. future notesleep will return immediately. +// subsequent noteclear must be called only after +// previous notesleep has returned, e.g. it's disallowed +// to call noteclear straight after notewakeup. +// +// notetsleep is like notesleep but wakes up after +// a given number of nanoseconds even if the event +// has not yet happened. if a goroutine uses notetsleep to +// wake up early, it must wait to call noteclear until it +// can be sure that no other goroutine is calling +// notewakeup. +// +// notesleep/notetsleep are generally called on g0, +// notetsleepg is similar to notetsleep but is called on user g. type note struct { // Futex-based impl treats it as uint32 key, // while sema-based impl as M* waitm. @@ -310,16 +332,17 @@ type g struct { waitsince int64 // approx time when the g become blocked waitreason string // if status==Gwaiting schedlink guintptr - preempt bool // preemption signal, duplicates stackguard0 = stackpreempt - paniconfault bool // panic (instead of crash) on unexpected fault address - preemptscan bool // preempted g does scan for gc - gcscandone bool // g has scanned stack; protected by _Gscan bit in status - gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan - throwsplit bool // must not split stack - raceignore int8 // ignore race detection events - sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine - sysexitticks int64 // cputicks when syscall has returned (for tracing) - sysexitseq uint64 // trace seq when syscall has returned (for tracing) + preempt bool // preemption signal, duplicates stackguard0 = stackpreempt + paniconfault bool // panic (instead of crash) on unexpected fault address + preemptscan bool // preempted g does scan for gc + gcscandone bool // g has scanned stack; protected by _Gscan bit in status + gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan; transition from true to false by calling queueRescan and false to true by calling dequeueRescan + throwsplit bool // must not split stack + raceignore int8 // ignore race detection events + sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine + sysexitticks int64 // cputicks when syscall has returned (for tracing) + traceseq uint64 // trace event sequencer + tracelastp puintptr // last P emitted an event for this goroutine lockedm *m sig uint32 writebuf []byte @@ -331,7 +354,14 @@ type g struct { racectx uintptr waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order - // Per-G gcController state + // Per-G GC state + + // gcRescan is this G's index in work.rescan.list. If this is + // -1, this G is not on the rescan list. + // + // If gcphase != _GCoff and this G is visible to the garbage + // collector, writes to this are protected by work.rescan.lock. + gcRescan int32 // gcAssistBytes is this G's GC assist credit in terms of // bytes allocated. If this is positive, then the G has credit @@ -397,8 +427,8 @@ type m struct { waittraceskip int startingtrace bool syscalltick uint32 - //#ifdef GOOS_windows - thread uintptr // thread handle + thread uintptr // thread handle + // these are here because they are too large to be on the stack // of low-level NOSPLIT functions. libcall libcall @@ -406,7 +436,7 @@ type m struct { libcallsp uintptr libcallg guintptr syscall libcall // stores syscall parameters on windows - //#endif + mOS } @@ -500,9 +530,10 @@ type schedt struct { runqsize int32 // Global cache of dead G's. - gflock mutex - gfree *g - ngfree int32 + gflock mutex + gfreeStack *g + gfreeNoStack *g + ngfree int32 // Central cache of sudog structs. sudoglock mutex @@ -530,10 +561,10 @@ type schedt struct { totaltime int64 // ∫gomaxprocs dt up to procresizetime } -// The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread. +// The m.locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread. // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active. // External locks are not recursive; a second lock is silently ignored. -// The upper bits of m->locked record the nesting depth of calls to lockOSThread +// The upper bits of m.locked record the nesting depth of calls to lockOSThread // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal). // Internal locks can be recursive. For instance, a lock for cgo can occur while the main // goroutine is holding the lock during the initialization phase. @@ -603,13 +634,6 @@ type forcegcstate struct { idle uint32 } -/* - * known to compiler - */ -const ( - _Structrnd = sys.RegSize -) - // startup_random_data holds random bytes initialized at startup. These come from // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go). var startupRandomData []byte @@ -635,9 +659,7 @@ func extendRandom(r []byte, n int) { } } -/* - * deferred subroutine calls - */ +// deferred subroutine calls type _defer struct { siz int32 started bool @@ -648,9 +670,7 @@ type _defer struct { link *_defer } -/* - * panics - */ +// panics type _panic struct { argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink arg interface{} // argument to panic @@ -659,10 +679,7 @@ type _panic struct { aborted bool // the panic was aborted } -/* - * stack traces - */ - +// stack traces type stkframe struct { fn *_func // function being run pc uintptr // program counter within fn @@ -682,10 +699,8 @@ const ( _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it ) -const ( - // The maximum number of frames we print for a traceback - _TracebackMaxFrames = 100 -) +// The maximum number of frames we print for a traceback +const _TracebackMaxFrames = 100 var ( emptystring string @@ -716,46 +731,3 @@ var ( islibrary bool // -buildmode=c-shared isarchive bool // -buildmode=c-archive ) - -/* - * mutual exclusion locks. in the uncontended case, - * as fast as spin locks (just a few user-level instructions), - * but on the contention path they sleep in the kernel. - * a zeroed Mutex is unlocked (no need to initialize each lock). - */ - -/* - * sleep and wakeup on one-time events. - * before any calls to notesleep or notewakeup, - * must call noteclear to initialize the Note. - * then, exactly one thread can call notesleep - * and exactly one thread can call notewakeup (once). - * once notewakeup has been called, the notesleep - * will return. future notesleep will return immediately. - * subsequent noteclear must be called only after - * previous notesleep has returned, e.g. it's disallowed - * to call noteclear straight after notewakeup. - * - * notetsleep is like notesleep but wakes up after - * a given number of nanoseconds even if the event - * has not yet happened. if a goroutine uses notetsleep to - * wake up early, it must wait to call noteclear until it - * can be sure that no other goroutine is calling - * notewakeup. - * - * notesleep/notetsleep are generally called on g0, - * notetsleepg is similar to notetsleep but is called on user g. - */ -// bool runtime·notetsleep(Note*, int64); // false - timeout -// bool runtime·notetsleepg(Note*, int64); // false - timeout - -/* - * Lock-free stack. - * Initialize uint64 head to 0, compare with 0 to test for emptiness. - * The stack does not keep pointers to nodes, - * so they can be garbage collected if there are no other pointers to nodes. - */ - -// for mmap, we only pass the lower 32 bits of file offset to the -// assembly routine; the higher bits (if required), should be provided -// by the assembly routine as 0. |
