aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mcache.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2021-03-31 22:55:06 +0000
committerMichael Knyszek <mknyszek@google.com>2021-04-13 23:42:29 +0000
commitf2d5bd1ad306e87804d600d92105dc37279af83f (patch)
tree3469c5e88ec761a320877fba1e3aed6e21f0ae7f /src/runtime/mcache.go
parent8c2a8b1771cd7ed2182f4d03b3c4bd09828315ce (diff)
downloadgo-f2d5bd1ad306e87804d600d92105dc37279af83f.tar.xz
runtime: move internal GC statistics from memstats to gcController
This change moves certain important but internal-only GC statistics from memstats into gcController. These statistics are mainly used in pacing the GC, so it makes sense to keep them in the pacer's state. This CL was mostly generated via rf ' ex . { memstats.gc_trigger -> gcController.trigger memstats.triggerRatio -> gcController.triggerRatio memstats.heap_marked -> gcController.heapMarked memstats.heap_live -> gcController.heapLive memstats.heap_scan -> gcController.heapScan } ' except for a few special cases, like updating names in comments and when these fields are used within gcControllerState methods (at which point they're accessed through the reciever). For #44167. Change-Id: I6bd1602585aeeb80818ded24c07d8e6fec992b93 Reviewed-on: https://go-review.googlesource.com/c/go/+/306598 Trust: Michael Knyszek <mknyszek@google.com> Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Michael Pratt <mpratt@google.com>
Diffstat (limited to 'src/runtime/mcache.go')
-rw-r--r--src/runtime/mcache.go26
1 files changed, 13 insertions, 13 deletions
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go
index bb7475b6f3..2390be406f 100644
--- a/src/runtime/mcache.go
+++ b/src/runtime/mcache.go
@@ -178,9 +178,9 @@ func (c *mcache) refill(spc spanClass) {
atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount))
memstats.heapStats.release()
- // Update heap_live with the same assumption.
+ // Update gcController.heapLive with the same assumption.
usedBytes := uintptr(s.allocCount) * s.elemsize
- atomic.Xadd64(&memstats.heap_live, int64(s.npages*pageSize)-int64(usedBytes))
+ atomic.Xadd64(&gcController.heapLive, int64(s.npages*pageSize)-int64(usedBytes))
// Flush tinyAllocs.
if spc == tinySpanClass {
@@ -190,15 +190,15 @@ func (c *mcache) refill(spc spanClass) {
// While we're here, flush scanAlloc, since we have to call
// revise anyway.
- atomic.Xadd64(&memstats.heap_scan, int64(c.scanAlloc))
+ atomic.Xadd64(&gcController.heapScan, int64(c.scanAlloc))
c.scanAlloc = 0
if trace.enabled {
- // heap_live changed.
+ // gcController.heapLive changed.
traceHeapAlloc()
}
if gcBlackenEnabled != 0 {
- // heap_live and heap_scan changed.
+ // gcController.heapLive and heapScan changed.
gcController.revise()
}
@@ -230,10 +230,10 @@ func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) *mspan {
atomic.Xadduintptr(&stats.largeAllocCount, 1)
memstats.heapStats.release()
- // Update heap_live and revise pacing if needed.
- atomic.Xadd64(&memstats.heap_live, int64(npages*pageSize))
+ // Update gcController.heapLive and revise pacing if needed.
+ atomic.Xadd64(&gcController.heapLive, int64(npages*pageSize))
if trace.enabled {
- // Trace that a heap alloc occurred because heap_live changed.
+ // Trace that a heap alloc occurred because gcController.heapLive changed.
traceHeapAlloc()
}
if gcBlackenEnabled != 0 {
@@ -250,7 +250,7 @@ func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) *mspan {
func (c *mcache) releaseAll() {
// Take this opportunity to flush scanAlloc.
- atomic.Xadd64(&memstats.heap_scan, int64(c.scanAlloc))
+ atomic.Xadd64(&gcController.heapScan, int64(c.scanAlloc))
c.scanAlloc = 0
sg := mheap_.sweepgen
@@ -263,14 +263,14 @@ func (c *mcache) releaseAll() {
atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], -n)
memstats.heapStats.release()
if s.sweepgen != sg+1 {
- // refill conservatively counted unallocated slots in heap_live.
+ // refill conservatively counted unallocated slots in gcController.heapLive.
// Undo this.
//
// If this span was cached before sweep, then
- // heap_live was totally recomputed since
+ // gcController.heapLive was totally recomputed since
// caching this span, so we don't do this for
// stale spans.
- atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
+ atomic.Xadd64(&gcController.heapLive, -int64(n)*int64(s.elemsize))
}
// Release the span to the mcentral.
mheap_.central[i].mcentral.uncacheSpan(s)
@@ -283,7 +283,7 @@ func (c *mcache) releaseAll() {
atomic.Xadd64(&memstats.tinyallocs, int64(c.tinyAllocs))
c.tinyAllocs = 0
- // Updated heap_scan and possible heap_live.
+ // Updated heapScan and possible gcController.heapLive.
if gcBlackenEnabled != 0 {
gcController.revise()
}