From 2517f4946b42b8deedb864c884f1b41311d45850 Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Wed, 14 Oct 2020 17:18:27 -0400 Subject: runtime: remove debugCachedWork debugCachedWork and all of its dependent fields and code were added to aid in debugging issue #27993. Now that the source of the problem is known and mitigated (via the extra work check after STW in gcMarkDone), these extra checks are no longer required and simply make the code more difficult to follow. Remove it all. Updates #27993 Change-Id: I594beedd5ca61733ba9cc9eaad8f80ea92df1a0d Reviewed-on: https://go-review.googlesource.com/c/go/+/262350 Trust: Michael Pratt Run-TryBot: Michael Pratt TryBot-Result: Go Bot Reviewed-by: Austin Clements --- src/runtime/mgcwork.go | 74 -------------------------------------------------- 1 file changed, 74 deletions(-) (limited to 'src/runtime/mgcwork.go') diff --git a/src/runtime/mgcwork.go b/src/runtime/mgcwork.go index 46101657d5..51e0fe9219 100644 --- a/src/runtime/mgcwork.go +++ b/src/runtime/mgcwork.go @@ -22,13 +22,6 @@ const ( workbufAlloc = 32 << 10 ) -// throwOnGCWork causes any operations that add pointers to a gcWork -// buffer to throw. -// -// TODO(austin): This is a temporary debugging measure for issue -// #27993. To be removed before release. -var throwOnGCWork bool - func init() { if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 { throw("bad workbufAlloc") @@ -93,17 +86,6 @@ type gcWork struct { // termination check. Specifically, this indicates that this // gcWork may have communicated work to another gcWork. flushedWork bool - - // pauseGen causes put operations to spin while pauseGen == - // gcWorkPauseGen if debugCachedWork is true. - pauseGen uint32 - - // putGen is the pauseGen of the last putGen. - putGen uint32 - - // pauseStack is the stack at which this P was paused if - // debugCachedWork is true. - pauseStack [16]uintptr } // Most of the methods of gcWork are go:nowritebarrierrec because the @@ -122,60 +104,10 @@ func (w *gcWork) init() { w.wbuf2 = wbuf2 } -func (w *gcWork) checkPut(ptr uintptr, ptrs []uintptr) { - if debugCachedWork { - alreadyFailed := w.putGen == w.pauseGen - w.putGen = w.pauseGen - if !canPreemptM(getg().m) { - // If we were to spin, the runtime may - // deadlock. Since we can't be preempted, the - // spin could prevent gcMarkDone from - // finishing the ragged barrier, which is what - // releases us from the spin. - return - } - for atomic.Load(&gcWorkPauseGen) == w.pauseGen { - } - if throwOnGCWork { - printlock() - if alreadyFailed { - println("runtime: checkPut already failed at this generation") - } - println("runtime: late gcWork put") - if ptr != 0 { - gcDumpObject("ptr", ptr, ^uintptr(0)) - } - for _, ptr := range ptrs { - gcDumpObject("ptrs", ptr, ^uintptr(0)) - } - println("runtime: paused at") - for _, pc := range w.pauseStack { - if pc == 0 { - break - } - f := findfunc(pc) - if f.valid() { - // Obviously this doesn't - // relate to ancestor - // tracebacks, but this - // function prints what we - // want. - printAncestorTracebackFuncInfo(f, pc) - } else { - println("\tunknown PC ", hex(pc), "\n") - } - } - throw("throwOnGCWork") - } - } -} - // put enqueues a pointer for the garbage collector to trace. // obj must point to the beginning of a heap object or an oblet. //go:nowritebarrierrec func (w *gcWork) put(obj uintptr) { - w.checkPut(obj, nil) - flushed := false wbuf := w.wbuf1 // Record that this may acquire the wbufSpans or heap lock to @@ -214,8 +146,6 @@ func (w *gcWork) put(obj uintptr) { // otherwise it returns false and the caller needs to call put. //go:nowritebarrierrec func (w *gcWork) putFast(obj uintptr) bool { - w.checkPut(obj, nil) - wbuf := w.wbuf1 if wbuf == nil { return false @@ -237,8 +167,6 @@ func (w *gcWork) putBatch(obj []uintptr) { return } - w.checkPut(0, obj) - flushed := false wbuf := w.wbuf1 if wbuf == nil { @@ -360,12 +288,10 @@ func (w *gcWork) balance() { return } if wbuf := w.wbuf2; wbuf.nobj != 0 { - w.checkPut(0, wbuf.obj[:wbuf.nobj]) putfull(wbuf) w.flushedWork = true w.wbuf2 = getempty() } else if wbuf := w.wbuf1; wbuf.nobj > 4 { - w.checkPut(0, wbuf.obj[:wbuf.nobj]) w.wbuf1 = handoff(wbuf) w.flushedWork = true // handoff did putfull } else { -- cgit v1.3 From dc02578ac8bb27359c7d0451ca249e47bdef2a9e Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 29 Jul 2020 19:00:37 +0000 Subject: runtime: make the span allocation purpose more explicit This change modifies mheap's span allocation API to have each caller declare a purpose, defined as a new enum called spanAllocType. The purpose behind this change is two-fold: 1. Tight control over who gets to allocate heap memory is, generally speaking, a good thing. Every codepath that allocates heap memory places additional implicit restrictions on the allocator. A notable example of a restriction is work bufs coming from heap memory: write barriers are not allowed in allocation paths because then we could have a situation where the allocator calls into the allocator. 2. Memory statistic updating is explicit. Instead of passing an opaque pointer for statistic updating, which places restrictions on how that statistic may be updated, we use the spanAllocType to determine which statistic to update and how. We also take this opportunity to group all the statistic updating code together, which should make the accounting code a little easier to follow. Change-Id: Ic0b0898959ba2a776f67122f0e36c9d7d60e3085 Reviewed-on: https://go-review.googlesource.com/c/go/+/246970 Trust: Michael Knyszek Run-TryBot: Michael Knyszek TryBot-Result: Go Bot Reviewed-by: Michael Pratt --- src/runtime/mbitmap.go | 4 +-- src/runtime/mgcwork.go | 4 +-- src/runtime/mheap.go | 78 +++++++++++++++++++++++++++++++++++++------------- src/runtime/stack.go | 12 ++++---- 4 files changed, 68 insertions(+), 30 deletions(-) (limited to 'src/runtime/mgcwork.go') diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index 51c3625c3d..fbfaae0f93 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -1868,12 +1868,12 @@ func materializeGCProg(ptrdata uintptr, prog *byte) *mspan { bitmapBytes := divRoundUp(ptrdata, 8*sys.PtrSize) // Compute the number of pages needed for bitmapBytes. pages := divRoundUp(bitmapBytes, pageSize) - s := mheap_.allocManual(pages, &memstats.gc_sys) + s := mheap_.allocManual(pages, spanAllocPtrScalarBits) runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1) return s } func dematerializeGCProg(s *mspan) { - mheap_.freeManual(s, &memstats.gc_sys) + mheap_.freeManual(s, spanAllocPtrScalarBits) } func dumpGCProg(p *byte) { diff --git a/src/runtime/mgcwork.go b/src/runtime/mgcwork.go index 51e0fe9219..b3a068661e 100644 --- a/src/runtime/mgcwork.go +++ b/src/runtime/mgcwork.go @@ -371,7 +371,7 @@ func getempty() *workbuf { } if s == nil { systemstack(func() { - s = mheap_.allocManual(workbufAlloc/pageSize, &memstats.gc_sys) + s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf) }) if s == nil { throw("out of memory") @@ -473,7 +473,7 @@ func freeSomeWbufs(preemptible bool) bool { break } work.wbufSpans.free.remove(span) - mheap_.freeManual(span, &memstats.gc_sys) + mheap_.freeManual(span, spanAllocWorkBuf) } }) more := !work.wbufSpans.free.isEmpty() diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 40fd58b0ef..df659e222b 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -861,6 +861,22 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr { return nFreed } +// spanAllocType represents the type of allocation to make, or +// the type of allocation to be freed. +type spanAllocType uint8 + +const ( + spanAllocHeap spanAllocType = iota // heap span + spanAllocStack // stack span + spanAllocPtrScalarBits // unrolled GC prog bitmap span + spanAllocWorkBuf // work buf span +) + +// manual returns true if the span allocation is manually managed. +func (s spanAllocType) manual() bool { + return s != spanAllocHeap +} + // alloc allocates a new span of npage pages from the GC'd heap. // // spanclass indicates the span's size class and scannability. @@ -877,7 +893,7 @@ func (h *mheap) alloc(npages uintptr, spanclass spanClass, needzero bool) *mspan if h.sweepdone == 0 { h.reclaim(npages) } - s = h.allocSpan(npages, false, spanclass, &memstats.heap_inuse) + s = h.allocSpan(npages, spanAllocHeap, spanclass) }) if s != nil { @@ -902,9 +918,15 @@ func (h *mheap) alloc(npages uintptr, spanclass spanClass, needzero bool) *mspan // allocManual must be called on the system stack because it may // acquire the heap lock via allocSpan. See mheap for details. // +// If new code is written to call allocManual, do NOT use an +// existing spanAllocType value and instead declare a new one. +// //go:systemstack -func (h *mheap) allocManual(npages uintptr, stat *uint64) *mspan { - return h.allocSpan(npages, true, 0, stat) +func (h *mheap) allocManual(npages uintptr, typ spanAllocType) *mspan { + if !typ.manual() { + throw("manual span allocation called with non-manually-managed type") + } + return h.allocSpan(npages, typ, 0) } // setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize)) @@ -1066,7 +1088,7 @@ func (h *mheap) freeMSpanLocked(s *mspan) { // allocSpan allocates an mspan which owns npages worth of memory. // -// If manual == false, allocSpan allocates a heap span of class spanclass +// If typ.manual() == false, allocSpan allocates a heap span of class spanclass // and updates heap accounting. If manual == true, allocSpan allocates a // manually-managed span (spanclass is ignored), and the caller is // responsible for any accounting related to its use of the span. Either @@ -1081,7 +1103,7 @@ func (h *mheap) freeMSpanLocked(s *mspan) { // the heap lock and because it must block GC transitions. // //go:systemstack -func (h *mheap) allocSpan(npages uintptr, manual bool, spanclass spanClass, sysStat *uint64) (s *mspan) { +func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan) { // Function-global state. gp := getg() base, scav := uintptr(0), uintptr(0) @@ -1143,12 +1165,10 @@ HaveSpan: s.needzero = 1 } nbytes := npages * pageSize - if manual { + if typ.manual() { s.manualFreeList = 0 s.nelems = 0 s.limit = s.base() + s.npages*pageSize - // Manually managed memory doesn't count toward heap_sys. - mSysStatDec(&memstats.heap_sys, s.npages*pageSize) s.state.set(mSpanManual) } else { // We must set span properties before the span is published anywhere @@ -1205,7 +1225,18 @@ HaveSpan: mSysStatDec(&memstats.heap_released, scav) } // Update stats. - mSysStatInc(sysStat, nbytes) + switch typ { + case spanAllocHeap: + mSysStatInc(&memstats.heap_inuse, nbytes) + case spanAllocStack: + mSysStatInc(&memstats.stacks_inuse, nbytes) + case spanAllocPtrScalarBits, spanAllocWorkBuf: + mSysStatInc(&memstats.gc_sys, nbytes) + } + if typ.manual() { + // Manually managed memory doesn't count toward heap_sys. + mSysStatDec(&memstats.heap_sys, nbytes) + } mSysStatDec(&memstats.heap_idle, nbytes) // Publish the span in various locations. @@ -1217,7 +1248,7 @@ HaveSpan: // before that happens) or pageInUse is updated. h.setSpans(s.base(), npages, s) - if !manual { + if !typ.manual() { // Mark in-use span in arena page bitmap. // // This publishes the span to the page sweeper, so @@ -1323,13 +1354,13 @@ func (h *mheap) freeSpan(s *mspan) { bytes := s.npages << _PageShift msanfree(base, bytes) } - h.freeSpanLocked(s, true, true) + h.freeSpanLocked(s, spanAllocHeap) unlock(&h.lock) }) } // freeManual frees a manually-managed span returned by allocManual. -// stat must be the same as the stat passed to the allocManual that +// typ must be the same as the spanAllocType passed to the allocManual that // allocated s. // // This must only be called when gcphase == _GCoff. See mSpanState for @@ -1339,16 +1370,14 @@ func (h *mheap) freeSpan(s *mspan) { // the heap lock. See mheap for details. // //go:systemstack -func (h *mheap) freeManual(s *mspan, stat *uint64) { +func (h *mheap) freeManual(s *mspan, typ spanAllocType) { s.needzero = 1 lock(&h.lock) - mSysStatDec(stat, s.npages*pageSize) - mSysStatInc(&memstats.heap_sys, s.npages*pageSize) - h.freeSpanLocked(s, false, true) + h.freeSpanLocked(s, typ) unlock(&h.lock) } -func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) { +func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) { switch s.state.get() { case mSpanManual: if s.allocCount != 0 { @@ -1368,12 +1397,21 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) { throw("mheap.freeSpanLocked - invalid span state") } - if acctinuse { + // Update stats. + // + // Mirrors the code in allocSpan. + switch typ { + case spanAllocHeap: mSysStatDec(&memstats.heap_inuse, s.npages*pageSize) + case spanAllocStack: + mSysStatDec(&memstats.stacks_inuse, s.npages*pageSize) + case spanAllocPtrScalarBits, spanAllocWorkBuf: + mSysStatDec(&memstats.gc_sys, s.npages*pageSize) } - if acctidle { - mSysStatInc(&memstats.heap_idle, s.npages*pageSize) + if typ.manual() { + mSysStatInc(&memstats.heap_sys, s.npages*pageSize) } + mSysStatInc(&memstats.heap_idle, s.npages*pageSize) // Mark the space as free. h.pages.free(s.base(), s.npages) diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 2afc2635aa..7b9dce5393 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -187,7 +187,7 @@ func stackpoolalloc(order uint8) gclinkptr { lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) if s == nil { // no free stacks. Allocate another span worth. - s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse) + s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack) if s == nil { throw("out of memory") } @@ -251,7 +251,7 @@ func stackpoolfree(x gclinkptr, order uint8) { stackpool[order].item.span.remove(s) s.manualFreeList = 0 osStackFree(s) - mheap_.freeManual(s, &memstats.stacks_inuse) + mheap_.freeManual(s, spanAllocStack) } } @@ -396,7 +396,7 @@ func stackalloc(n uint32) stack { if s == nil { // Allocate a new stack from the heap. - s = mheap_.allocManual(npage, &memstats.stacks_inuse) + s = mheap_.allocManual(npage, spanAllocStack) if s == nil { throw("out of memory") } @@ -480,7 +480,7 @@ func stackfree(stk stack) { // Free the stack immediately if we're // sweeping. osStackFree(s) - mheap_.freeManual(s, &memstats.stacks_inuse) + mheap_.freeManual(s, spanAllocStack) } else { // If the GC is running, we can't return a // stack span to the heap because it could be @@ -1193,7 +1193,7 @@ func freeStackSpans() { list.remove(s) s.manualFreeList = 0 osStackFree(s) - mheap_.freeManual(s, &memstats.stacks_inuse) + mheap_.freeManual(s, spanAllocStack) } s = next } @@ -1207,7 +1207,7 @@ func freeStackSpans() { next := s.next stackLarge.free[i].remove(s) osStackFree(s) - mheap_.freeManual(s, &memstats.stacks_inuse) + mheap_.freeManual(s, spanAllocStack) s = next } } -- cgit v1.3