aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mgc.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/mgc.go')
-rw-r--r--src/runtime/mgc.go84
1 files changed, 47 insertions, 37 deletions
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index 1c184db10b..ae8338ac10 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -24,6 +24,10 @@
// Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
// Concurrency and Computation: Practice and Experience 15(3-5), 2003.
//
+// TODO(austin): The rest of this comment is woefully out of date and
+// needs to be rewritten. There is no distinct scan phase any more and
+// we allocate black during GC.
+//
// 0. Set phase = GCscan from GCoff.
// 1. Wait for all P's to acknowledge phase change.
// At this point all goroutines have passed through a GC safepoint and
@@ -244,7 +248,7 @@ var gcBlackenPromptly bool
const (
_GCoff = iota // GC not running; sweeping in background, write barrier disabled
- _GCmark // GC marking roots and workbufs, write barrier ENABLED
+ _GCmark // GC marking roots and workbufs: allocate black, write barrier ENABLED
_GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED
)
@@ -304,7 +308,8 @@ type gcControllerState struct {
// scanWork is the total scan work performed this cycle. This
// is updated atomically during the cycle. Updates occur in
// bounded batches, since it is both written and read
- // throughout the cycle.
+ // throughout the cycle. At the end of the cycle, this is how
+ // much of the retained heap is scannable.
//
// Currently this is the bytes of heap scanned. For most uses,
// this is an opaque unit of work, but for estimation the
@@ -466,14 +471,18 @@ func (c *gcControllerState) startCycle() {
// It should only be called when gcBlackenEnabled != 0 (because this
// is when assists are enabled and the necessary statistics are
// available).
+//
+// TODO: Consider removing the periodic controller update altogether.
+// Since we switched to allocating black, in theory we shouldn't have
+// to change the assist ratio. However, this is still a useful hook
+// that we've found many uses for when experimenting.
func (c *gcControllerState) revise() {
// Compute the expected scan work remaining.
//
- // Note that the scannable heap size is likely to increase
- // during the GC cycle. This is why it's important to revise
- // the assist ratio throughout the cycle: if the scannable
- // heap size increases, the assist ratio based on the initial
- // scannable heap size may target too little scan work.
+ // Note that we currently count allocations during GC as both
+ // scannable heap (heap_scan) and scan work completed
+ // (scanWork), so this difference won't be changed by
+ // allocations during GC.
//
// This particular estimate is a strict upper bound on the
// possible remaining scan work for the current heap.
@@ -753,7 +762,7 @@ var work struct {
alldone note
// Number of roots of various root types. Set by gcMarkRootPrepare.
- nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int
+ nDataRoots, nBSSRoots, nSpanRoots, nStackRoots, nRescanRoots int
// markrootDone indicates that roots have been marked at least
// once during the current GC cycle. This is checked by root
@@ -821,6 +830,14 @@ var work struct {
head, tail guintptr
}
+ // rescan is a list of G's that need to be rescanned during
+ // mark termination. A G adds itself to this list when it
+ // first invalidates its stack scan.
+ rescan struct {
+ lock mutex
+ list []guintptr
+ }
+
// Timing/utilization stats for this cycle.
stwprocs, maxprocs int32
tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start
@@ -1069,13 +1086,6 @@ top:
// cached workbufs.
atomic.Xadd(&work.nwait, -1)
- // Rescan global data and BSS. There may still work
- // workers running at this point, so bump "jobs" down
- // before "next" so they won't try running root jobs
- // until we set next.
- atomic.Store(&work.markrootJobs, uint32(fixedRootCount+work.nDataRoots+work.nBSSRoots))
- atomic.Store(&work.markrootNext, fixedRootCount)
-
// GC is set up for mark 2. Let Gs blocked on the
// transition lock go while we flush caches.
semrelease(&work.markDoneSema)
@@ -1257,6 +1267,13 @@ func gcMarkTermination() {
// Free stack spans. This must be done between GC cycles.
systemstack(freeStackSpans)
+ // Best-effort remove stack barriers so they don't get in the
+ // way of things like GDB and perf.
+ lock(&allglock)
+ myallgs := allgs
+ unlock(&allglock)
+ gcTryRemoveAllStackBarriers(myallgs)
+
// Print gctrace before dropping worldsema. As soon as we drop
// worldsema another cycle could start and smash the stats
// we're trying to print.
@@ -1578,9 +1595,13 @@ func gcMark(start_time int64) {
work.markrootDone = true
for i := 0; i < int(gomaxprocs); i++ {
- if !allp[i].gcw.empty() {
+ gcw := &allp[i].gcw
+ if !gcw.empty() {
throw("P has cached GC work at end of mark termination")
}
+ if gcw.scanWork != 0 || gcw.bytesMarked != 0 {
+ throw("P has unflushed stats at end of mark termination")
+ }
}
if trace.enabled {
@@ -1589,27 +1610,8 @@ func gcMark(start_time int64) {
cachestats()
- // Compute the reachable heap size at the beginning of the
- // cycle. This is approximately the marked heap size at the
- // end (which we know) minus the amount of marked heap that
- // was allocated after marking began (which we don't know, but
- // is approximately the amount of heap that was allocated
- // since marking began).
- allocatedDuringCycle := memstats.heap_live - work.initialHeapLive
- if memstats.heap_live < work.initialHeapLive {
- // This can happen if mCentral_UncacheSpan tightens
- // the heap_live approximation.
- allocatedDuringCycle = 0
- }
- if work.bytesMarked >= allocatedDuringCycle {
- memstats.heap_reachable = work.bytesMarked - allocatedDuringCycle
- } else {
- // This can happen if most of the allocation during
- // the cycle never became reachable from the heap.
- // Just set the reachable heap approximation to 0 and
- // let the heapminimum kick in below.
- memstats.heap_reachable = 0
- }
+ // Update the reachable heap stat.
+ memstats.heap_reachable = work.bytesMarked
// Trigger the next GC cycle when the allocated heap has grown
// by triggerRatio over the reachable heap size. Assume that
@@ -1735,14 +1737,22 @@ func gcCopySpans() {
func gcResetMarkState() {
// This may be called during a concurrent phase, so make sure
// allgs doesn't change.
+ if !(gcphase == _GCoff || gcphase == _GCmarktermination) {
+ // Accessing gcRescan is unsafe.
+ throw("bad GC phase")
+ }
lock(&allglock)
for _, gp := range allgs {
gp.gcscandone = false // set to true in gcphasework
gp.gcscanvalid = false // stack has not been scanned
+ gp.gcRescan = -1
gp.gcAssistBytes = 0
}
unlock(&allglock)
+ // Clear rescan list.
+ work.rescan.list = work.rescan.list[:0]
+
work.bytesMarked = 0
work.initialHeapLive = memstats.heap_live
work.markrootDone = false