aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2019-06-25 19:06:57 +0000
committerMichael Knyszek <mknyszek@google.com>2019-09-25 22:20:36 +0000
commiteb96f8a57444d174bba500b3a5d2a8b21b7e6d1e (patch)
treea7aa47f303c321c955a96e29da433e2ec759f765 /src/runtime
parentf18109d7e30c8d1a6e1c87ba3458499ac7ab2a79 (diff)
downloadgo-eb96f8a57444d174bba500b3a5d2a8b21b7e6d1e.tar.xz
runtime: scavenge on growth instead of inline with allocation
Inline scavenging causes significant performance regressions in tail latency for k8s and has relatively little benefit for RSS footprint. We disabled inline scavenging in Go 1.12.5 (CL 174102) as well, but we thought other changes in Go 1.13 had mitigated the issues with inline scavenging. Apparently we were wrong. This CL switches back to only doing foreground scavenging on heap growth, rather than doing it when allocation tries to allocate from scavenged space. Fixes #32828. Change-Id: I1f5df44046091f0b4f89fec73c2cde98bf9448cb Reviewed-on: https://go-review.googlesource.com/c/go/+/183857 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Michael Knyszek <mknyszek@google.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/mheap.go14
1 files changed, 4 insertions, 10 deletions
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 31e84e1eb8..9e8b89de12 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -1226,16 +1226,6 @@ HaveSpan:
// heap_released since we already did so earlier.
sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
s.scavenged = false
-
- // Since we allocated out of a scavenged span, we just
- // grew the RSS. Mitigate this by scavenging enough free
- // space to make up for it but only if we need to.
- //
- // scavengeLocked may cause coalescing, so prevent
- // coalescing with s by temporarily changing its state.
- s.state = mSpanManual
- h.scavengeIfNeededLocked(s.npages * pageSize)
- s.state = mSpanFree
}
h.setSpans(s.base(), npage, s)
@@ -1311,6 +1301,10 @@ func (h *mheap) grow(npage uintptr) bool {
//
// h must be locked.
func (h *mheap) growAddSpan(v unsafe.Pointer, size uintptr) {
+ // Scavenge some pages to make up for the virtual memory space
+ // we just allocated, but only if we need to.
+ h.scavengeIfNeededLocked(size)
+
s := (*mspan)(h.spanalloc.alloc())
s.init(uintptr(v), size/pageSize)
h.setSpans(s.base(), s.npages, s)