diff options
Diffstat (limited to 'src/runtime/mgc.go')
| -rw-r--r-- | src/runtime/mgc.go | 53 |
1 files changed, 28 insertions, 25 deletions
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index b987bcc833..abebe10660 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -120,7 +120,10 @@ package runtime -import "unsafe" +import ( + "runtime/internal/atomic" + "unsafe" +) const ( _DebugGC = 0 @@ -236,7 +239,7 @@ const ( //go:nosplit func setGCPhase(x uint32) { - atomicstore(&gcphase, x) + atomic.Store(&gcphase, x) writeBarrierEnabled = gcphase == _GCmark || gcphase == _GCmarktermination } @@ -632,11 +635,11 @@ func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g { decIfPositive := func(ptr *int64) bool { if *ptr > 0 { - if xaddint64(ptr, -1) >= 0 { + if atomic.Xaddint64(ptr, -1) >= 0 { return true } // We lost a race - xaddint64(ptr, +1) + atomic.Xaddint64(ptr, +1) } return false } @@ -690,7 +693,7 @@ func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g { timeUsed := c.fractionalMarkTime + gcForcePreemptNS if then > 0 && float64(timeUsed)/float64(then) > c.fractionalUtilizationGoal { // Nope, we'd overshoot the utilization goal - xaddint64(&c.fractionalMarkWorkersNeeded, +1) + atomic.Xaddint64(&c.fractionalMarkWorkersNeeded, +1) return nil } _p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode @@ -983,7 +986,7 @@ func gcStart(mode gcMode, forceTrigger bool) { // black invariant. Enable mutator assists to // put back-pressure on fast allocating // mutators. - atomicstore(&gcBlackenEnabled, 1) + atomic.Store(&gcBlackenEnabled, 1) // Assists and workers can start the moment we start // the world. @@ -1031,8 +1034,8 @@ func gcMarkDone() { // // TODO(austin): Should dedicated workers keep an eye on this // and exit gcDrain promptly? - xaddint64(&gcController.dedicatedMarkWorkersNeeded, -0xffffffff) - xaddint64(&gcController.fractionalMarkWorkersNeeded, -0xffffffff) + atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, -0xffffffff) + atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, -0xffffffff) if !gcBlackenPromptly { // Transition from mark 1 to mark 2. @@ -1049,14 +1052,14 @@ func gcMarkDone() { // Prevent completion of mark 2 until we've flushed // cached workbufs. - xadd(&work.nwait, -1) + atomic.Xadd(&work.nwait, -1) // Rescan global data and BSS. There may still work // workers running at this point, so bump "jobs" down // before "next" so they won't try running root jobs // until we set next. - atomicstore(&work.markrootJobs, uint32(fixedRootCount+work.nDataRoots+work.nBSSRoots)) - atomicstore(&work.markrootNext, fixedRootCount) + atomic.Store(&work.markrootJobs, uint32(fixedRootCount+work.nDataRoots+work.nBSSRoots)) + atomic.Store(&work.markrootNext, fixedRootCount) // GC is set up for mark 2. Let Gs blocked on the // transition lock go while we flush caches. @@ -1075,10 +1078,10 @@ func gcMarkDone() { }) // Now we can start up mark 2 workers. - xaddint64(&gcController.dedicatedMarkWorkersNeeded, 0xffffffff) - xaddint64(&gcController.fractionalMarkWorkersNeeded, 0xffffffff) + atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 0xffffffff) + atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 0xffffffff) - incnwait := xadd(&work.nwait, +1) + incnwait := atomic.Xadd(&work.nwait, +1) if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { // This recursion is safe because the call // can't take this same "if" branch. @@ -1122,7 +1125,7 @@ func gcMarkDone() { func gcMarkTermination() { // World is stopped. // Start marktermination which includes enabling the write barrier. - atomicstore(&gcBlackenEnabled, 0) + atomic.Store(&gcBlackenEnabled, 0) gcBlackenPromptly = false setGCPhase(_GCmarktermination) @@ -1205,7 +1208,7 @@ func gcMarkTermination() { now, unixNow := nanotime(), unixnanotime() work.pauseNS += now - work.pauseStart work.tEnd = now - atomicstore64(&memstats.last_gc, uint64(unixNow)) // must be Unix time to make sense to user + atomic.Store64(&memstats.last_gc, uint64(unixNow)) // must be Unix time to make sense to user memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS) memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow) memstats.pause_total_ns += uint64(work.pauseNS) @@ -1372,7 +1375,7 @@ func gcBgMarkWorker(p *p) { startTime := nanotime() - decnwait := xadd(&work.nwait, -1) + decnwait := atomic.Xadd(&work.nwait, -1) if decnwait == work.nproc { println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc) throw("work.nwait was > work.nproc") @@ -1401,18 +1404,18 @@ func gcBgMarkWorker(p *p) { duration := nanotime() - startTime switch p.gcMarkWorkerMode { case gcMarkWorkerDedicatedMode: - xaddint64(&gcController.dedicatedMarkTime, duration) - xaddint64(&gcController.dedicatedMarkWorkersNeeded, 1) + atomic.Xaddint64(&gcController.dedicatedMarkTime, duration) + atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 1) case gcMarkWorkerFractionalMode: - xaddint64(&gcController.fractionalMarkTime, duration) - xaddint64(&gcController.fractionalMarkWorkersNeeded, 1) + atomic.Xaddint64(&gcController.fractionalMarkTime, duration) + atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 1) case gcMarkWorkerIdleMode: - xaddint64(&gcController.idleMarkTime, duration) + atomic.Xaddint64(&gcController.idleMarkTime, duration) } // Was this the last worker and did we run out // of work? - incnwait := xadd(&work.nwait, +1) + incnwait := atomic.Xadd(&work.nwait, +1) if incnwait > work.nproc { println("runtime: p.gcMarkWorkerMode=", p.gcMarkWorkerMode, "work.nwait=", incnwait, "work.nproc=", work.nproc) @@ -1452,7 +1455,7 @@ func gcMarkWorkAvailable(p *p) bool { if p != nil && !p.gcw.empty() { return true } - if atomicload64(&work.full) != 0 { + if atomic.Load64(&work.full) != 0 { return true // global work available } if work.markrootNext < work.markrootJobs { @@ -1773,7 +1776,7 @@ func gchelper() { } nproc := work.nproc // work.nproc can change right after we increment work.ndone - if xadd(&work.ndone, +1) == nproc-1 { + if atomic.Xadd(&work.ndone, +1) == nproc-1 { notewakeup(&work.alldone) } _g_.m.traceback = 0 |
