From c7ac645d28b99f163c46e2b8622ca7b60dc212ce Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 20 Sep 2018 11:43:42 +0200 Subject: runtime: fix reference to sys{Fault,Free,Reserve,Unused,Used} in comments Change-Id: Icbaedc49c810c63c51d56ae394d2f70e4d64b3e0 Reviewed-on: https://go-review.googlesource.com/136495 Run-TryBot: Tobias Klauser TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/runtime/malloc.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'src/runtime/malloc.go') diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 07e0a67240..c6c969a3bf 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -328,27 +328,27 @@ var physPageSize uintptr // may use larger alignment, so the caller must be careful to realign the // memory obtained by sysAlloc. // -// SysUnused notifies the operating system that the contents +// sysUnused notifies the operating system that the contents // of the memory region are no longer needed and can be reused // for other purposes. -// SysUsed notifies the operating system that the contents +// sysUsed notifies the operating system that the contents // of the memory region are needed again. // -// SysFree returns it unconditionally; this is only used if +// sysFree returns it unconditionally; this is only used if // an out-of-memory error has been detected midway through -// an allocation. It is okay if SysFree is a no-op. +// an allocation. It is okay if sysFree is a no-op. // -// SysReserve reserves address space without allocating memory. +// sysReserve reserves address space without allocating memory. // If the pointer passed to it is non-nil, the caller wants the -// reservation there, but SysReserve can still choose another +// reservation there, but sysReserve can still choose another // location if that one is unavailable. -// NOTE: SysReserve returns OS-aligned memory, but the heap allocator +// NOTE: sysReserve returns OS-aligned memory, but the heap allocator // may use larger alignment, so the caller must be careful to realign the // memory obtained by sysAlloc. // -// SysMap maps previously reserved address space for use. +// sysMap maps previously reserved address space for use. // -// SysFault marks a (already sysAlloc'd) region to fault +// sysFault marks a (already sysAlloc'd) region to fault // if accessed. Used only for debugging the runtime. func mallocinit() { -- cgit v1.3 From 5a8c11ce3e7a87485defafb78c7bcf14f9d7b5a2 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Wed, 26 Sep 2018 16:39:02 -0400 Subject: runtime: rename _MSpan* constants to mSpan* We already aliased mSpanInUse to _MSpanInUse. The dual constants are getting annoying, so fix all of these to use the mSpan* naming convention. This was done automatically with: sed -i -re 's/_?MSpan(Dead|InUse|Manual|Free)/mSpan\1/g' *.go plus deleting the existing definition of mSpanInUse. Change-Id: I09979d9d491d06c10689cea625dc57faa9cc6767 Reviewed-on: https://go-review.googlesource.com/137875 Run-TryBot: Austin Clements Reviewed-by: Brad Fitzpatrick TryBot-Result: Gobot Gobot --- src/runtime/cgocheck.go | 2 +- src/runtime/heapdump.go | 8 +++--- src/runtime/malloc.go | 2 -- src/runtime/mbitmap.go | 4 +-- src/runtime/mgcmark.go | 6 ++-- src/runtime/mgcsweep.go | 2 +- src/runtime/mheap.go | 64 ++++++++++++++++++++--------------------- src/runtime/mstats.go | 2 +- src/runtime/runtime-gdb_test.go | 2 +- src/runtime/stack.go | 4 +-- 10 files changed, 47 insertions(+), 49 deletions(-) (limited to 'src/runtime/malloc.go') diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go index 73cb6ecae2..ac57e0344e 100644 --- a/src/runtime/cgocheck.go +++ b/src/runtime/cgocheck.go @@ -126,7 +126,7 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) { } s := spanOfUnchecked(uintptr(src)) - if s.state == _MSpanManual { + if s.state == mSpanManual { // There are no heap bits for value stored on the stack. // For a channel receive src might be on the stack of some // other goroutine, so we can't unwind the stack even if diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 0fc02a8e80..e2c6f18714 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -430,7 +430,7 @@ func dumproots() { // MSpan.types for _, s := range mheap_.allspans { - if s.state == _MSpanInUse { + if s.state == mSpanInUse { // Finalizers for sp := s.specials; sp != nil; sp = sp.next { if sp.kind != _KindSpecialFinalizer { @@ -453,7 +453,7 @@ var freemark [_PageSize / 8]bool func dumpobjs() { for _, s := range mheap_.allspans { - if s.state != _MSpanInUse { + if s.state != mSpanInUse { continue } p := s.base() @@ -616,7 +616,7 @@ func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, func dumpmemprof() { iterate_memprof(dumpmemprof_callback) for _, s := range mheap_.allspans { - if s.state != _MSpanInUse { + if s.state != mSpanInUse { continue } for sp := s.specials; sp != nil; sp = sp.next { @@ -637,7 +637,7 @@ var dumphdr = []byte("go1.7 heap dump\n") func mdump() { // make sure we're done sweeping for _, s := range mheap_.allspans { - if s.state == _MSpanInUse { + if s.state == mSpanInUse { s.ensureSwept() } } diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index c6c969a3bf..dd88d353dd 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -124,8 +124,6 @@ const ( // have the most objects per span. maxObjsPerSpan = pageSize / 8 - mSpanInUse = _MSpanInUse - concurrentSweep = _ConcurrentSweep _PageSize = 1 << _PageShift diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index e217e7695f..553d83f7f2 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -365,7 +365,7 @@ func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex ui s = spanOf(p) // If p is a bad pointer, it may not be in s's bounds. if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse { - if s == nil || s.state == _MSpanManual { + if s == nil || s.state == mSpanManual { // If s is nil, the virtual address has never been part of the heap. // This pointer may be to some mmap'd region, so we allow it. // Pointers into stacks are also ok, the runtime manages these explicitly. @@ -611,7 +611,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) { } } return - } else if s.state != _MSpanInUse || dst < s.base() || s.limit <= dst { + } else if s.state != mSpanInUse || dst < s.base() || s.limit <= dst { // dst was heap memory at some point, but isn't now. // It can't be a global. It must be either our stack, // or in the case of direct channel sends, it could be diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index 69ff895512..f713841cfa 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -1239,7 +1239,7 @@ func gcDumpObject(label string, obj, off uintptr) { skipped := false size := s.elemsize - if s.state == _MSpanManual && size == 0 { + if s.state == mSpanManual && size == 0 { // We're printing something from a stack frame. We // don't know how big it is, so just show up to an // including off. @@ -1335,7 +1335,7 @@ var useCheckmark = false func initCheckmarks() { useCheckmark = true for _, s := range mheap_.allspans { - if s.state == _MSpanInUse { + if s.state == mSpanInUse { heapBitsForAddr(s.base()).initCheckmarkSpan(s.layout()) } } @@ -1344,7 +1344,7 @@ func initCheckmarks() { func clearCheckmarks() { useCheckmark = false for _, s := range mheap_.allspans { - if s.state == _MSpanInUse { + if s.state == mSpanInUse { heapBitsForAddr(s.base()).clearCheckmarkSpan(s.layout()) } } diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index 71f5c4b3a9..ecfdee59f4 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -159,7 +159,7 @@ func (s *mspan) ensureSwept() { if atomic.Load(&s.sweepgen) == sg { return } - // The caller must be sure that the span is a MSpanInUse span. + // The caller must be sure that the span is a mSpanInUse span. if atomic.Cas(&s.sweepgen, sg-2, sg-1) { s.sweep(false) return diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 00ecfa2d66..65d6b0c7d4 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -82,7 +82,7 @@ type mheap struct { // accounting for current progress. If we could only adjust // the slope, it would create a discontinuity in debt if any // progress has already been made. - pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock + pagesInUse uint64 // pages of spans in stats mSpanInUse; R/W with mheap.lock pagesSwept uint64 // pages swept this cycle; updated atomically pagesSweptBasis uint64 // pagesSwept to use as the origin of the sweep ratio; updated atomically sweepHeapLiveBasis uint64 // value of heap_live to use as the origin of sweep ratio; written with lock, read without @@ -199,18 +199,18 @@ type arenaHint struct { // An MSpan is a run of pages. // -// When a MSpan is in the heap free list, state == MSpanFree +// When a MSpan is in the heap free list, state == mSpanFree // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. // -// When a MSpan is allocated, state == MSpanInUse or MSpanManual +// When a MSpan is allocated, state == mSpanInUse or mSpanManual // and heapmap(i) == span for all s->start <= i < s->start+s->npages. // Every MSpan is in one doubly-linked list, // either one of the MHeap's free lists or one of the // MCentral's span lists. -// An MSpan representing actual memory has state _MSpanInUse, -// _MSpanManual, or _MSpanFree. Transitions between these states are +// An MSpan representing actual memory has state mSpanInUse, +// mSpanManual, or mSpanFree. Transitions between these states are // constrained as follows: // // * A span may transition from free to in-use or manual during any GC @@ -226,19 +226,19 @@ type arenaHint struct { type mSpanState uint8 const ( - _MSpanDead mSpanState = iota - _MSpanInUse // allocated for garbage collected heap - _MSpanManual // allocated for manual management (e.g., stack allocator) - _MSpanFree + mSpanDead mSpanState = iota + mSpanInUse // allocated for garbage collected heap + mSpanManual // allocated for manual management (e.g., stack allocator) + mSpanFree ) // mSpanStateNames are the names of the span states, indexed by // mSpanState. var mSpanStateNames = []string{ - "_MSpanDead", - "_MSpanInUse", - "_MSpanManual", - "_MSpanFree", + "mSpanDead", + "mSpanInUse", + "mSpanManual", + "mSpanFree", } // mSpanList heads a linked list of spans. @@ -258,7 +258,7 @@ type mspan struct { startAddr uintptr // address of first byte of span aka s.base() npages uintptr // number of pages in span - manualFreeList gclinkptr // list of free objects in _MSpanManual spans + manualFreeList gclinkptr // list of free objects in mSpanManual spans // freeindex is the slot index between 0 and nelems at which to begin scanning // for the next free object in this span. @@ -458,7 +458,7 @@ func (i arenaIdx) l2() uint { } // inheap reports whether b is a pointer into a (potentially dead) heap object. -// It returns false for pointers into _MSpanManual spans. +// It returns false for pointers into mSpanManual spans. // Non-preemptible because it is used by write barriers. //go:nowritebarrier //go:nosplit @@ -477,7 +477,7 @@ func inHeapOrStack(b uintptr) bool { return false } switch s.state { - case mSpanInUse, _MSpanManual: + case mSpanInUse, mSpanManual: return b < s.limit default: return false @@ -696,7 +696,7 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan { // able to map interior pointer to containing span. atomic.Store(&s.sweepgen, h.sweepgen) h.sweepSpans[h.sweepgen/2%2].push(s) // Add to swept in-use list. - s.state = _MSpanInUse + s.state = mSpanInUse s.allocCount = 0 s.spanclass = spanclass if sizeclass := spanclass.sizeclass(); sizeclass == 0 { @@ -788,7 +788,7 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { lock(&h.lock) s := h.allocSpanLocked(npage, stat) if s != nil { - s.state = _MSpanManual + s.state = mSpanManual s.manualFreeList = 0 s.allocCount = 0 s.spanclass = 0 @@ -829,7 +829,7 @@ func (h *mheap) setSpans(base, npage uintptr, s *mspan) { // Allocates a span of the given size. h must be locked. // The returned span has been removed from the -// free list, but its state is still MSpanFree. +// free list, but its state is still mSpanFree. func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan { var list *mSpanList var s *mspan @@ -857,7 +857,7 @@ func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan { HaveSpan: // Mark span in use. - if s.state != _MSpanFree { + if s.state != mSpanFree { throw("MHeap_AllocLocked - MSpan not free") } if s.npages < npage { @@ -878,10 +878,10 @@ HaveSpan: h.setSpan(t.base(), t) h.setSpan(t.base()+t.npages*pageSize-1, t) t.needzero = s.needzero - s.state = _MSpanManual // prevent coalescing with s - t.state = _MSpanManual + s.state = mSpanManual // prevent coalescing with s + t.state = mSpanManual h.freeSpanLocked(t, false, false, s.unusedsince) - s.state = _MSpanFree + s.state = mSpanFree } s.unusedsince = 0 @@ -930,7 +930,7 @@ func (h *mheap) grow(npage uintptr) bool { s.init(uintptr(v), size/pageSize) h.setSpans(s.base(), s.npages, s) atomic.Store(&s.sweepgen, h.sweepgen) - s.state = _MSpanInUse + s.state = mSpanInUse h.pagesInUse += uint64(s.npages) h.freeSpanLocked(s, false, true, 0) return true @@ -986,11 +986,11 @@ func (h *mheap) freeManual(s *mspan, stat *uint64) { // s must be on a busy list (h.busy or h.busylarge) or unlinked. func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) { switch s.state { - case _MSpanManual: + case mSpanManual: if s.allocCount != 0 { throw("MHeap_FreeSpanLocked - invalid stack free") } - case _MSpanInUse: + case mSpanInUse: if s.allocCount != 0 || s.sweepgen != h.sweepgen { print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") throw("MHeap_FreeSpanLocked - invalid free") @@ -1006,7 +1006,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i if acctidle { memstats.heap_idle += uint64(s.npages << _PageShift) } - s.state = _MSpanFree + s.state = mSpanFree if s.inList() { h.busyList(s.npages).remove(s) } @@ -1020,7 +1020,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i s.npreleased = 0 // Coalesce with earlier, later spans. - if before := spanOf(s.base() - 1); before != nil && before.state == _MSpanFree { + if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree { // Now adjust s. s.startAddr = before.startAddr s.npages += before.npages @@ -1035,12 +1035,12 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i } else { h.freeList(before.npages).remove(before) } - before.state = _MSpanDead + before.state = mSpanDead h.spanalloc.free(unsafe.Pointer(before)) } // Now check to see if next (greater addresses) span is free and can be coalesced. - if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == _MSpanFree { + if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree { s.npages += after.npages s.npreleased += after.npreleased s.needzero |= after.needzero @@ -1050,7 +1050,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i } else { h.freeList(after.npages).remove(after) } - after.state = _MSpanDead + after.state = mSpanDead h.spanalloc.free(unsafe.Pointer(after)) } @@ -1187,7 +1187,7 @@ func (span *mspan) init(base uintptr, npages uintptr) { span.spanclass = 0 span.incache = false span.elemsize = 0 - span.state = _MSpanDead + span.state = mSpanDead span.unusedsince = 0 span.npreleased = 0 span.speciallock.key = 0 diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go index f67d05414d..1bd6566052 100644 --- a/src/runtime/mstats.go +++ b/src/runtime/mstats.go @@ -38,7 +38,7 @@ type mstats struct { heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above) heap_sys uint64 // virtual address space obtained from system for GC'd heap heap_idle uint64 // bytes in idle spans - heap_inuse uint64 // bytes in _MSpanInUse spans + heap_inuse uint64 // bytes in mSpanInUse spans heap_released uint64 // bytes released to the os heap_objects uint64 // total number of allocated objects diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go index d9c6f6d22a..1ed6b39303 100644 --- a/src/runtime/runtime-gdb_test.go +++ b/src/runtime/runtime-gdb_test.go @@ -484,7 +484,7 @@ func TestGdbConst(t *testing.T) { "-ex", "print main.aConstant", "-ex", "print main.largeConstant", "-ex", "print main.minusOne", - "-ex", "print 'runtime._MSpanInUse'", + "-ex", "print 'runtime.mSpanInUse'", "-ex", "print 'runtime._PageSize'", filepath.Join(dir, "a.exe"), } diff --git a/src/runtime/stack.go b/src/runtime/stack.go index c7bfc0434b..d56b864c5e 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -211,7 +211,7 @@ func stackpoolalloc(order uint8) gclinkptr { // Adds stack x to the free pool. Must be called with stackpoolmu held. func stackpoolfree(x gclinkptr, order uint8) { s := spanOfUnchecked(uintptr(x)) - if s.state != _MSpanManual { + if s.state != mSpanManual { throw("freeing stack not in a stack span") } if s.manualFreeList.ptr() == nil { @@ -459,7 +459,7 @@ func stackfree(stk stack) { } } else { s := spanOfUnchecked(uintptr(v)) - if s.state != _MSpanManual { + if s.state != mSpanManual { println(hex(s.base()), v) throw("bad span state") } -- cgit v1.3 From 01e6cfc2a0b2f9e363c6e305b14f3393d06b13b8 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Sun, 23 Sep 2018 19:12:15 -0400 Subject: runtime: don't call mcache.refill on systemstack mcache.refill doesn't need to run on the system stack; it just needs to be non-preemptible. Its only caller, mcache.nextFree, also needs to be non-preemptible, so we can remove the unnecessary systemstack switch. Change-Id: Iba5b3f4444855f1dc134485ba588efff3b54c426 Reviewed-on: https://go-review.googlesource.com/138196 Run-TryBot: Austin Clements Reviewed-by: Rick Hudson TryBot-Result: Gobot Gobot --- src/runtime/malloc.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'src/runtime/malloc.go') diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index dd88d353dd..5755c9e263 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -733,6 +733,9 @@ func nextFreeFast(s *mspan) gclinkptr { // weight allocation. If it is a heavy weight allocation the caller must // determine whether a new GC cycle needs to be started or if the GC is active // whether this goroutine needs to assist the GC. +// +// Must run in a non-preemptible context since otherwise the owner of +// c could change. func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) { s = c.alloc[spc] shouldhelpgc = false @@ -743,9 +746,7 @@ func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bo println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) throw("s.allocCount != s.nelems && freeIndex == s.nelems") } - systemstack(func() { - c.refill(spc) - }) + c.refill(spc) shouldhelpgc = true s = c.alloc[spc] -- cgit v1.3 From 918ed88e475d39e40baf9cd6539b618e05d12e5e Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 13 Aug 2018 16:14:19 -0400 Subject: runtime: remove gcStart's mode argument This argument is always gcBackgroundMode since only debug.gcstoptheworld can trigger a STW GC at this point. Remove the unnecessary argument. Updates #26903. This is preparation for unifying STW GC and concurrent GC. Change-Id: Icb4ba8f10f80c2b69cf51a21e04fa2c761b71c94 Reviewed-on: https://go-review.googlesource.com/c/134775 Run-TryBot: Austin Clements TryBot-Result: Gobot Gobot Reviewed-by: Rick Hudson --- src/runtime/malloc.go | 2 +- src/runtime/mgc.go | 21 ++++++++++----------- src/runtime/proc.go | 2 +- 3 files changed, 12 insertions(+), 13 deletions(-) (limited to 'src/runtime/malloc.go') diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 5755c9e263..c3fe1169dc 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -992,7 +992,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { if shouldhelpgc { if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { - gcStart(gcBackgroundMode, t) + gcStart(t) } } diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 83d9a49a46..25bb210475 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -1063,7 +1063,7 @@ func GC() { // We're now in sweep N or later. Trigger GC cycle N+1, which // will first finish sweep N if necessary and then enter sweep // termination N+1. - gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerCycle, n: n + 1}) + gcStart(gcTrigger{kind: gcTriggerCycle, n: n + 1}) // Wait for mark termination N+1 to complete. gcWaitOnMark(n + 1) @@ -1201,13 +1201,13 @@ func (t gcTrigger) test() bool { return true } -// gcStart transitions the GC from _GCoff to _GCmark (if -// !mode.stwMark) or _GCmarktermination (if mode.stwMark) by -// performing sweep termination and GC initialization. +// gcStart starts the GC. It transitions from _GCoff to _GCmark (if +// debug.gcstoptheworld == 0) or performs all of GC (if +// debug.gcstoptheworld != 0). // // This may return without performing this transition in some cases, // such as when called on a system stack or with locks held. -func gcStart(mode gcMode, trigger gcTrigger) { +func gcStart(trigger gcTrigger) { // Since this is called from malloc and malloc is called in // the guts of a number of libraries that might be holding // locks, don't attempt to start GC in non-preemptible or @@ -1250,12 +1250,11 @@ func gcStart(mode gcMode, trigger gcTrigger) { // We do this after re-checking the transition condition so // that multiple goroutines that detect the heap trigger don't // start multiple STW GCs. - if mode == gcBackgroundMode { - if debug.gcstoptheworld == 1 { - mode = gcForceMode - } else if debug.gcstoptheworld == 2 { - mode = gcForceBlockMode - } + mode := gcBackgroundMode + if debug.gcstoptheworld == 1 { + mode = gcForceMode + } else if debug.gcstoptheworld == 2 { + mode = gcForceBlockMode } // Ok, we're doing it! Stop everybody else diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 73b4a1d9d6..ec73b4d918 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -254,7 +254,7 @@ func forcegchelper() { println("GC forced") } // Time-triggered, fully concurrent. - gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerTime, now: nanotime()}) + gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()}) } } -- cgit v1.3