diff options
| author | Michael Pratt <mpratt@google.com> | 2025-10-30 16:33:30 -0400 |
|---|---|---|
| committer | Gopher Robot <gobot@golang.org> | 2025-11-12 08:26:55 -0800 |
| commit | d3aeba1670e80095fdd6e313a776163f4c68dc6a (patch) | |
| tree | 704913a49171650bbf14c6b9afe1434e93112f7a /src/runtime | |
| parent | 8873e8bea29ac6de5fecee88b8b81239bd2eb179 (diff) | |
| download | go-d3aeba1670e80095fdd6e313a776163f4c68dc6a.tar.xz | |
runtime: switch p.gcFractionalMarkTime to atomic.Int64
atomic.Int64 automatically maintains proper alignment, avoiding the need
to manually adjust alignment back and forth as fields above change.
Change-Id: I6a6a636c4c3c366353f6dc8ecac473c075dd5cd9
Reviewed-on: https://go-review.googlesource.com/c/go/+/716700
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Auto-Submit: Michael Pratt <mpratt@google.com>
Diffstat (limited to 'src/runtime')
| -rw-r--r-- | src/runtime/align_runtime_test.go | 1 | ||||
| -rw-r--r-- | src/runtime/mgc.go | 4 | ||||
| -rw-r--r-- | src/runtime/mgcpacer.go | 4 | ||||
| -rw-r--r-- | src/runtime/runtime2.go | 2 |
4 files changed, 5 insertions, 6 deletions
diff --git a/src/runtime/align_runtime_test.go b/src/runtime/align_runtime_test.go index 4bcb49db2f..8b21934a75 100644 --- a/src/runtime/align_runtime_test.go +++ b/src/runtime/align_runtime_test.go @@ -14,7 +14,6 @@ import "unsafe" // operations (all the *64 operations in internal/runtime/atomic). var AtomicFields = []uintptr{ unsafe.Offsetof(m{}.procid), - unsafe.Offsetof(p{}.gcFractionalMarkTime), unsafe.Offsetof(profBuf{}.overflow), unsafe.Offsetof(profBuf{}.overflowTime), unsafe.Offsetof(heapStatsDelta{}.tinyAllocCount), diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 22150dfd17..43afbc330b 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -316,7 +316,7 @@ func pollFractionalWorkerExit() bool { return true } p := getg().m.p.ptr() - selfTime := p.gcFractionalMarkTime + (now - p.gcMarkWorkerStartTime) + selfTime := p.gcFractionalMarkTime.Load() + (now - p.gcMarkWorkerStartTime) // Add some slack to the utilization goal so that the // fractional worker isn't behind again the instant it exits. return float64(selfTime)/float64(delta) > 1.2*gcController.fractionalUtilizationGoal @@ -1858,7 +1858,7 @@ func gcBgMarkWorker(ready chan struct{}) { pp.limiterEvent.stop(limiterEventIdleMarkWork, now) } if pp.gcMarkWorkerMode == gcMarkWorkerFractionalMode { - atomic.Xaddint64(&pp.gcFractionalMarkTime, duration) + pp.gcFractionalMarkTime.Add(duration) } // We'll releasem after this point and thus this P may run diff --git a/src/runtime/mgcpacer.go b/src/runtime/mgcpacer.go index bd454b5cea..32c1b941e5 100644 --- a/src/runtime/mgcpacer.go +++ b/src/runtime/mgcpacer.go @@ -427,7 +427,7 @@ func (c *gcControllerState) startCycle(markStartTime int64, procs int, trigger g // Clear per-P state for _, p := range allp { p.gcAssistTime = 0 - p.gcFractionalMarkTime = 0 + p.gcFractionalMarkTime.Store(0) } if trigger.kind == gcTriggerTime { @@ -830,7 +830,7 @@ func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) { // // This should be kept in sync with pollFractionalWorkerExit. delta := now - c.markStartTime - if delta > 0 && float64(pp.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal { + if delta > 0 && float64(pp.gcFractionalMarkTime.Load())/float64(delta) > c.fractionalUtilizationGoal { // Nope. No need to run a fractional worker. gcBgMarkWorkerPool.push(&node.node) return nil, now diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 2b79717703..85a9693ace 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -794,7 +794,7 @@ type p struct { // Per-P GC state gcAssistTime int64 // Nanoseconds in assistAlloc - gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic) + gcFractionalMarkTime atomic.Int64 // Nanoseconds in fractional mark worker // limiterEvent tracks events for the GC CPU limiter. limiterEvent limiterEvent |
