aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2021-10-04 19:52:48 +0000
committerMichael Knyszek <mknyszek@google.com>2021-11-04 20:01:11 +0000
commitfc5e8cd6c9de00f8d7da645343934c548e62223e (patch)
tree083c4a068fc0f971af4c6b31e3f8ea23b2a26c32 /src/runtime
parent9b2dd1f7714b38f1bfe25676357b62c1bb4cad64 (diff)
downloadgo-fc5e8cd6c9de00f8d7da645343934c548e62223e.tar.xz
runtime: update and access scavengeGoal atomically
The first step toward acquiring the heap lock less frequently in the scavenger. Change-Id: Idc69fd8602be2c83268c155951230d60e20b42fe Reviewed-on: https://go-review.googlesource.com/c/go/+/353973 Trust: Michael Knyszek <mknyszek@google.com> Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Michael Pratt <mpratt@google.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/mgcscavenge.go8
-rw-r--r--src/runtime/mheap.go7
2 files changed, 9 insertions, 6 deletions
diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go
index fb9b5c8694..4edeb8739e 100644
--- a/src/runtime/mgcscavenge.go
+++ b/src/runtime/mgcscavenge.go
@@ -125,7 +125,7 @@ func gcPaceScavenger(heapGoal, lastHeapGoal uint64) {
// information about the heap yet) so this is fine, and avoids a fault
// or garbage data later.
if lastHeapGoal == 0 {
- mheap_.scavengeGoal = ^uint64(0)
+ atomic.Store64(&mheap_.scavengeGoal, ^uint64(0))
return
}
// Compute our scavenging goal.
@@ -157,10 +157,10 @@ func gcPaceScavenger(heapGoal, lastHeapGoal uint64) {
// the background scavenger. We disable the background scavenger if there's
// less than one physical page of work to do because it's not worth it.
if retainedNow <= retainedGoal || retainedNow-retainedGoal < uint64(physPageSize) {
- mheap_.scavengeGoal = ^uint64(0)
+ atomic.Store64(&mheap_.scavengeGoal, ^uint64(0))
return
}
- mheap_.scavengeGoal = retainedGoal
+ atomic.Store64(&mheap_.scavengeGoal, retainedGoal)
}
// Sleep/wait state of the background scavenger.
@@ -299,7 +299,7 @@ func bgscavenge(c chan int) {
lock(&mheap_.lock)
// If background scavenging is disabled or if there's no work to do just park.
- retained, goal := heapRetained(), mheap_.scavengeGoal
+ retained, goal := heapRetained(), atomic.Load64(&mheap_.scavengeGoal)
if retained <= goal {
unlock(&mheap_.lock)
return
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 057ab06b1d..f2f6e7f4cf 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -111,6 +111,8 @@ type mheap struct {
// scavengeGoal is the amount of total retained heap memory (measured by
// heapRetained) that the runtime will try to maintain by returning memory
// to the OS.
+ //
+ // Accessed atomically.
scavengeGoal uint64
// Page reclaimer state
@@ -1399,9 +1401,10 @@ func (h *mheap) grow(npage uintptr) bool {
// By scavenging inline we deal with the failure to allocate out of
// memory fragments by scavenging the memory fragments that are least
// likely to be re-used.
- if retained := heapRetained(); retained+uint64(totalGrowth) > h.scavengeGoal {
+ scavengeGoal := atomic.Load64(&h.scavengeGoal)
+ if retained := heapRetained(); retained+uint64(totalGrowth) > scavengeGoal {
todo := totalGrowth
- if overage := uintptr(retained + uint64(totalGrowth) - h.scavengeGoal); todo > overage {
+ if overage := uintptr(retained + uint64(totalGrowth) - scavengeGoal); todo > overage {
todo = overage
}
h.pages.scavenge(todo, false)