aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2018-10-05 18:11:02 +0000
committerMichael Knyszek <mknyszek@google.com>2018-10-30 15:28:24 +0000
commitdb82a1bc12c54f1b6d32a3d41c4422605b16b7e8 (patch)
tree5f607607b1637c6270c4b91809a2fc37acff4e51 /src/runtime
parent61d40c8abc82fd845b5e4340bdf0495de0de7ecd (diff)
downloadgo-db82a1bc12c54f1b6d32a3d41c4422605b16b7e8.tar.xz
runtime: sysUsed spans after trimming
Currently, we mark a whole span as sysUsed before trimming, but this unnecessarily tells the OS that the trimmed section from the span is used when it may have been scavenged, if s was scavenged. Overall, this just makes invocations of sysUsed a little more fine-grained. It does come with the caveat that now heap_released needs to be managed a little more carefully in allocSpanLocked. In this case, we choose to (like before this change) negate any effect the span has on heap_released before trimming, then add it back if the trimmed part is scavengable. For #14045. Change-Id: Ifa384d989611398bfad3ca39d3bb595a5962a3ea Reviewed-on: https://go-review.googlesource.com/c/140198 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/mheap.go33
1 files changed, 20 insertions, 13 deletions
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index fbf517edfa..ddbc872080 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -884,19 +884,11 @@ HaveSpan:
if s.npages < npage {
throw("MHeap_AllocLocked - bad npages")
}
- if s.scavenged {
- // sysUsed all the pages that are actually available
- // in the span, but only drop heap_released by the
- // actual amount of pages released. This helps ensure
- // that heap_released only increments and decrements
- // by the same amounts. It's also fine, because any
- // of the pages outside start and end wouldn't have been
- // sysUnused in the first place.
- sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
- start, end := s.physPageBounds()
- memstats.heap_released -= uint64(end-start)
- s.scavenged = false
- }
+
+ // First, subtract any memory that was released back to
+ // the OS from s. We will re-scavenge the trimmed section
+ // if necessary.
+ memstats.heap_released -= uint64(s.released())
if s.npages > npage {
// Trim extra and put it back in the heap.
@@ -907,11 +899,26 @@ HaveSpan:
h.setSpan(t.base(), t)
h.setSpan(t.base()+t.npages*pageSize-1, t)
t.needzero = s.needzero
+ // If s was scavenged, then t may be scavenged.
+ start, end := t.physPageBounds()
+ if s.scavenged && start < end {
+ memstats.heap_released += uint64(end-start)
+ t.scavenged = true
+ }
s.state = mSpanManual // prevent coalescing with s
t.state = mSpanManual
h.freeSpanLocked(t, false, false, s.unusedsince)
s.state = mSpanFree
}
+ // "Unscavenge" s only AFTER splitting so that
+ // we only sysUsed whatever we actually need.
+ if s.scavenged {
+ // sysUsed all the pages that are actually available
+ // in the span. Note that we don't need to decrement
+ // heap_released since we already did so earlier.
+ sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
+ s.scavenged = false
+ }
s.unusedsince = 0
h.setSpans(s.base(), npage, s)