diff options
| author | Austin Clements <austin@google.com> | 2018-08-16 12:32:46 -0400 |
|---|---|---|
| committer | Austin Clements <austin@google.com> | 2018-10-02 20:35:37 +0000 |
| commit | 550dfc8ae1651eb954274045e31f8ef2e95f6c6c (patch) | |
| tree | c8121c86c2cca3f6478c76887feaca7121005afc /src/runtime/mgcmark.go | |
| parent | 873bd47dfb34ba4416d4df30180905250b91f137 (diff) | |
| download | go-550dfc8ae1651eb954274045e31f8ef2e95f6c6c.tar.xz | |
runtime: eliminate work.markrootdone and second root marking pass
Before STW and concurrent GC were unified, there could be either one
or two root marking passes per GC cycle. There were several tasks we
had to make sure happened once and only once (whether that was at the
beginning of concurrent mark for concurrent GC or during mark
termination for STW GC). We kept track of this in work.markrootdone.
Now that STW and concurrent GC both use the concurrent marking code
and we've eliminated all work done by the second root marking pass, we
only ever need a single root marking pass. Hence, we can eliminate
work.markrootdone and all of the code that's conditional on it.
Updates #26903.
Change-Id: I654a0f5e21b9322279525560a31e64b8d33b790f
Reviewed-on: https://go-review.googlesource.com/c/134784
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
Diffstat (limited to 'src/runtime/mgcmark.go')
| -rw-r--r-- | src/runtime/mgcmark.go | 100 |
1 files changed, 34 insertions, 66 deletions
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index 78a597f007..0f220dd1b9 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -62,57 +62,41 @@ func gcMarkRootPrepare() { work.nDataRoots = 0 work.nBSSRoots = 0 - // Only scan globals once per cycle; preferably concurrently. - if !work.markrootDone { - for _, datap := range activeModules() { - nDataRoots := nBlocks(datap.edata - datap.data) - if nDataRoots > work.nDataRoots { - work.nDataRoots = nDataRoots - } + // Scan globals. + for _, datap := range activeModules() { + nDataRoots := nBlocks(datap.edata - datap.data) + if nDataRoots > work.nDataRoots { + work.nDataRoots = nDataRoots } + } - for _, datap := range activeModules() { - nBSSRoots := nBlocks(datap.ebss - datap.bss) - if nBSSRoots > work.nBSSRoots { - work.nBSSRoots = nBSSRoots - } + for _, datap := range activeModules() { + nBSSRoots := nBlocks(datap.ebss - datap.bss) + if nBSSRoots > work.nBSSRoots { + work.nBSSRoots = nBSSRoots } } - if !work.markrootDone { - // On the first markroot, we need to scan span roots. - // In concurrent GC, this happens during concurrent - // mark and we depend on addfinalizer to ensure the - // above invariants for objects that get finalizers - // after concurrent mark. In STW GC, this will happen - // during mark termination. - // - // We're only interested in scanning the in-use spans, - // which will all be swept at this point. More spans - // may be added to this list during concurrent GC, but - // we only care about spans that were allocated before - // this mark phase. - work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks() - - // On the first markroot, we need to scan all Gs. Gs - // may be created after this point, but it's okay that - // we ignore them because they begin life without any - // roots, so there's nothing to scan, and any roots - // they create during the concurrent phase will be - // scanned during mark termination. During mark - // termination, allglen isn't changing, so we'll scan - // all Gs. - work.nStackRoots = int(atomic.Loaduintptr(&allglen)) - } else { - // We've already scanned span roots and kept the scan - // up-to-date during concurrent mark. - work.nSpanRoots = 0 + // Scan span roots for finalizer specials. + // + // We depend on addfinalizer to mark objects that get + // finalizers after root marking. + // + // We're only interested in scanning the in-use spans, + // which will all be swept at this point. More spans + // may be added to this list during concurrent GC, but + // we only care about spans that were allocated before + // this mark phase. + work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks() - // The hybrid barrier ensures that stacks can't - // contain pointers to unmarked objects, so on the - // second markroot, there's no need to scan stacks. - work.nStackRoots = 0 - } + // Scan stacks. + // + // Gs may be created after this point, but it's okay that we + // ignore them because they begin life without any roots, so + // there's nothing to scan, and any roots they create during + // the concurrent phase will be scanned during mark + // termination. + work.nStackRoots = int(atomic.Loaduintptr(&allglen)) work.markrootNext = 0 work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots) @@ -183,24 +167,15 @@ func markroot(gcw *gcWork, i uint32) { } case i == fixedRootFinalizers: - // Only do this once per GC cycle since we don't call - // queuefinalizer during marking. - if work.markrootDone { - break - } for fb := allfin; fb != nil; fb = fb.alllink { cnt := uintptr(atomic.Load(&fb.cnt)) scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw) } case i == fixedRootFreeGStacks: - // Only do this once per GC cycle; preferably - // concurrently. - if !work.markrootDone { - // Switch to the system stack so we can call - // stackfree. - systemstack(markrootFreeGStacks) - } + // Switch to the system stack so we can call + // stackfree. + systemstack(markrootFreeGStacks) case baseSpans <= i && i < baseStacks: // mark MSpan.specials @@ -324,10 +299,6 @@ func markrootSpans(gcw *gcWork, shard int) { // TODO(austin): There are several ideas for making this more // efficient in issue #11485. - if work.markrootDone { - throw("markrootSpans during second markroot") - } - sg := mheap_.sweepgen spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard) // Note that work.spans may not include spans that were @@ -719,11 +690,8 @@ func scanstack(gp *g, gcw *gcWork) { throw("can't scan gchelper stack") } - // Shrink the stack if not much of it is being used. During - // concurrent GC, we can do this during concurrent mark. - if !work.markrootDone { - shrinkstack(gp) - } + // Shrink the stack if not much of it is being used. + shrinkstack(gp) // Scan the saved context register. This is effectively a live // register that gets moved back and forth between the |
