aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/proc.go
diff options
context:
space:
mode:
authorMichael Matloob <matloob@golang.org>2015-11-02 14:09:24 -0500
committerMichael Matloob <matloob@golang.org>2015-11-10 17:38:04 +0000
commit67faca7d9c54b367aee5fdeef2d5dd609fcf99d0 (patch)
tree5c6e8b4e243286311bbc4743d6a8e86f16dda85f /src/runtime/proc.go
parentd33360571f46b46724b908a5603520dce1e8a81c (diff)
downloadgo-67faca7d9c54b367aee5fdeef2d5dd609fcf99d0.tar.xz
runtime: break atomics out into package runtime/internal/atomic
This change breaks out most of the atomics functions in the runtime into package runtime/internal/atomic. It adds some basic support in the toolchain for runtime packages, and also modifies linux/arm atomics to remove the dependency on the runtime's mutex. The mutexes have been replaced with spinlocks. all trybots are happy! In addition to the trybots, I've tested on the darwin/arm64 builder, on the darwin/arm builder, and on a ppc64le machine. Change-Id: I6698c8e3cf3834f55ce5824059f44d00dc8e3c2f Reviewed-on: https://go-review.googlesource.com/14204 Run-TryBot: Michael Matloob <matloob@golang.org> Reviewed-by: Russ Cox <rsc@golang.org>
Diffstat (limited to 'src/runtime/proc.go')
-rw-r--r--src/runtime/proc.go171
1 files changed, 87 insertions, 84 deletions
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index cc2134fc1b..4dba0cabe9 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
// Goroutine scheduler
// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
@@ -165,7 +168,7 @@ func forcegchelper() {
if forcegc.idle != 0 {
throw("forcegc: phase error")
}
- atomicstore(&forcegc.idle, 1)
+ atomic.Store(&forcegc.idle, 1)
goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1)
// this goroutine is explicitly resumed by sysmon
if debug.gctrace > 0 {
@@ -462,11 +465,11 @@ func ready(gp *g, traceskip int) {
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
casgstatus(gp, _Gwaiting, _Grunnable)
runqput(_g_.m.p.ptr(), gp, true)
- if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 { // TODO: fast atomic
+ if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { // TODO: fast atomic
wakep()
}
_g_.m.locks--
- if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
+ if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack
_g_.stackguard0 = stackPreempt
}
}
@@ -538,7 +541,7 @@ func freezetheworld() {
for i := 0; i < 5; i++ {
// this should tell the scheduler to not start any new goroutines
sched.stopwait = freezeStopWait
- atomicstore(&sched.gcwaiting, 1)
+ atomic.Store(&sched.gcwaiting, 1)
// this should stop running goroutines
if !preemptall() {
break // no running goroutines
@@ -562,7 +565,7 @@ func isscanstatus(status uint32) bool {
// castogscanstatus, casfrom_Gscanstatus.
//go:nosplit
func readgstatus(gp *g) uint32 {
- return atomicload(&gp.atomicstatus)
+ return atomic.Load(&gp.atomicstatus)
}
// Ownership of gscanvalid:
@@ -595,11 +598,11 @@ func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
_Gscanrunning,
_Gscansyscall:
if newval == oldval&^_Gscan {
- success = cas(&gp.atomicstatus, oldval, newval)
+ success = atomic.Cas(&gp.atomicstatus, oldval, newval)
}
case _Gscanenqueue:
if newval == _Gwaiting {
- success = cas(&gp.atomicstatus, oldval, newval)
+ success = atomic.Cas(&gp.atomicstatus, oldval, newval)
}
}
if !success {
@@ -620,11 +623,11 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool {
_Gwaiting,
_Gsyscall:
if newval == oldval|_Gscan {
- return cas(&gp.atomicstatus, oldval, newval)
+ return atomic.Cas(&gp.atomicstatus, oldval, newval)
}
case _Grunning:
if newval == _Gscanrunning || newval == _Gscanenqueue {
- return cas(&gp.atomicstatus, oldval, newval)
+ return atomic.Cas(&gp.atomicstatus, oldval, newval)
}
}
print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
@@ -656,7 +659,7 @@ func casgstatus(gp *g, oldval, newval uint32) {
// loop if gp->atomicstatus is in a scan state giving
// GC time to finish and change the state to oldval.
- for !cas(&gp.atomicstatus, oldval, newval) {
+ for !atomic.Cas(&gp.atomicstatus, oldval, newval) {
if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
systemstack(func() {
throw("casgstatus: waiting for Gwaiting but is Grunnable")
@@ -687,7 +690,7 @@ func casgcopystack(gp *g) uint32 {
if oldstatus != _Gwaiting && oldstatus != _Grunnable {
throw("copystack: bad status, not Gwaiting or Grunnable")
}
- if cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
+ if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
return oldstatus
}
}
@@ -733,11 +736,11 @@ func scang(gp *g) {
if !gp.gcscandone {
// Coordinate with traceback
// in sigprof.
- for !cas(&gp.stackLock, 0, 1) {
+ for !atomic.Cas(&gp.stackLock, 0, 1) {
osyield()
}
scanstack(gp)
- atomicstore(&gp.stackLock, 0)
+ atomic.Store(&gp.stackLock, 0)
gp.gcscandone = true
}
restartg(gp)
@@ -866,7 +869,7 @@ func stopTheWorldWithSema() {
lock(&sched.lock)
sched.stopwait = gomaxprocs
- atomicstore(&sched.gcwaiting, 1)
+ atomic.Store(&sched.gcwaiting, 1)
preemptall()
// stop current P
_g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
@@ -875,7 +878,7 @@ func stopTheWorldWithSema() {
for i := 0; i < int(gomaxprocs); i++ {
p := allp[i]
s := p.status
- if s == _Psyscall && cas(&p.status, s, _Pgcstop) {
+ if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
if trace.enabled {
traceGoSysBlock(p)
traceProcStop(p)
@@ -966,7 +969,7 @@ func startTheWorldWithSema() {
// Wakeup an additional proc in case we have excessive runnable goroutines
// in local queues or in the global queue. If we don't, the proc will park itself.
// If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
- if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 {
+ if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
wakep()
}
@@ -1073,7 +1076,7 @@ func forEachP(fn func(*p)) {
// Ask all Ps to run the safe point function.
for _, p := range allp[:gomaxprocs] {
if p != _p_ {
- atomicstore(&p.runSafePointFn, 1)
+ atomic.Store(&p.runSafePointFn, 1)
}
}
preemptall()
@@ -1085,7 +1088,7 @@ func forEachP(fn func(*p)) {
// Run safe point function for all idle Ps. sched.pidle will
// not change because we hold sched.lock.
for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
- if cas(&p.runSafePointFn, 1, 0) {
+ if atomic.Cas(&p.runSafePointFn, 1, 0) {
fn(p)
sched.safePointWait--
}
@@ -1102,7 +1105,7 @@ func forEachP(fn func(*p)) {
for i := 0; i < int(gomaxprocs); i++ {
p := allp[i]
s := p.status
- if s == _Psyscall && p.runSafePointFn == 1 && cas(&p.status, s, _Pidle) {
+ if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
if trace.enabled {
traceGoSysBlock(p)
traceProcStop(p)
@@ -1158,7 +1161,7 @@ func runSafePointFn() {
// Resolve the race between forEachP running the safe-point
// function on this P's behalf and this P running the
// safe-point function directly.
- if !cas(&p.runSafePointFn, 1, 0) {
+ if !atomic.Cas(&p.runSafePointFn, 1, 0) {
return
}
sched.safePointFn(p)
@@ -1318,7 +1321,7 @@ func newextram() {
mp.locked = _LockInternal
mp.lockedg = gp
gp.lockedm = mp
- gp.goid = int64(xadd64(&sched.goidgen, 1))
+ gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
if raceenabled {
gp.racectx = racegostart(funcPC(newextram))
}
@@ -1381,7 +1384,7 @@ func lockextra(nilokay bool) *m {
const locked = 1
for {
- old := atomicloaduintptr(&extram)
+ old := atomic.Loaduintptr(&extram)
if old == locked {
yield := osyield
yield()
@@ -1391,7 +1394,7 @@ func lockextra(nilokay bool) *m {
usleep(1)
continue
}
- if casuintptr(&extram, old, locked) {
+ if atomic.Casuintptr(&extram, old, locked) {
return (*m)(unsafe.Pointer(old))
}
yield := osyield
@@ -1402,7 +1405,7 @@ func lockextra(nilokay bool) *m {
//go:nosplit
func unlockextra(mp *m) {
- atomicstoreuintptr(&extram, uintptr(unsafe.Pointer(mp)))
+ atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
}
// Create a new m. It will start off with a call to fn, or else the scheduler.
@@ -1440,7 +1443,7 @@ func stopm() {
}
if _g_.m.spinning {
_g_.m.spinning = false
- xadd(&sched.nmspinning, -1)
+ atomic.Xadd(&sched.nmspinning, -1)
}
retry:
@@ -1466,7 +1469,7 @@ func mspinning() {
// Something (presumably the GC) was readied while the
// runtime was starting up this M, so the M is no
// longer spinning.
- if int32(xadd(&sched.nmspinning, -1)) < 0 {
+ if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
throw("mspinning: nmspinning underflowed")
}
} else {
@@ -1485,7 +1488,7 @@ func startm(_p_ *p, spinning bool) {
if _p_ == nil {
unlock(&sched.lock)
if spinning {
- xadd(&sched.nmspinning, -1)
+ atomic.Xadd(&sched.nmspinning, -1)
}
return
}
@@ -1525,7 +1528,7 @@ func handoffp(_p_ *p) {
}
// no local work, check that there are no spinning/idle M's,
// otherwise our help is not required
- if atomicload(&sched.nmspinning)+atomicload(&sched.npidle) == 0 && cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
+ if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
startm(_p_, true)
return
}
@@ -1539,7 +1542,7 @@ func handoffp(_p_ *p) {
unlock(&sched.lock)
return
}
- if _p_.runSafePointFn != 0 && cas(&_p_.runSafePointFn, 1, 0) {
+ if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
sched.safePointFn(_p_)
sched.safePointWait--
if sched.safePointWait == 0 {
@@ -1553,7 +1556,7 @@ func handoffp(_p_ *p) {
}
// If this is the last running P and nobody is polling network,
// need to wakeup another M to poll network.
- if sched.npidle == uint32(gomaxprocs-1) && atomicload64(&sched.lastpoll) != 0 {
+ if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
unlock(&sched.lock)
startm(_p_, false)
return
@@ -1566,7 +1569,7 @@ func handoffp(_p_ *p) {
// Called when a G is made runnable (newproc, ready).
func wakep() {
// be conservative about spinning threads
- if !cas(&sched.nmspinning, 0, 1) {
+ if !atomic.Cas(&sched.nmspinning, 0, 1) {
return
}
startm(nil, true)
@@ -1630,7 +1633,7 @@ func gcstopm() {
}
if _g_.m.spinning {
_g_.m.spinning = false
- xadd(&sched.nmspinning, -1)
+ atomic.Xadd(&sched.nmspinning, -1)
}
_p_ := releasep()
lock(&sched.lock)
@@ -1749,12 +1752,12 @@ top:
// If number of spinning M's >= number of busy P's, block.
// This is necessary to prevent excessive CPU consumption
// when GOMAXPROCS>>1 but the program parallelism is low.
- if !_g_.m.spinning && 2*atomicload(&sched.nmspinning) >= uint32(gomaxprocs)-atomicload(&sched.npidle) { // TODO: fast atomic
+ if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= uint32(gomaxprocs)-atomic.Load(&sched.npidle) { // TODO: fast atomic
goto stop
}
if !_g_.m.spinning {
_g_.m.spinning = true
- xadd(&sched.nmspinning, 1)
+ atomic.Xadd(&sched.nmspinning, 1)
}
// random steal from other P's
for i := 0; i < int(4*gomaxprocs); i++ {
@@ -1805,7 +1808,7 @@ stop:
unlock(&sched.lock)
if _g_.m.spinning {
_g_.m.spinning = false
- xadd(&sched.nmspinning, -1)
+ atomic.Xadd(&sched.nmspinning, -1)
}
// check all runqueues once again
@@ -1824,7 +1827,7 @@ stop:
}
// poll network
- if netpollinited() && xchg64(&sched.lastpoll, 0) != 0 {
+ if netpollinited() && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
if _g_.m.p != 0 {
throw("findrunnable: netpoll with p")
}
@@ -1832,7 +1835,7 @@ stop:
throw("findrunnable: netpoll with spinning")
}
gp := netpoll(true) // block until new work is available
- atomicstore64(&sched.lastpoll, uint64(nanotime()))
+ atomic.Store64(&sched.lastpoll, uint64(nanotime()))
if gp != nil {
lock(&sched.lock)
_p_ = pidleget()
@@ -1859,17 +1862,17 @@ func resetspinning() {
var nmspinning uint32
if _g_.m.spinning {
_g_.m.spinning = false
- nmspinning = xadd(&sched.nmspinning, -1)
+ nmspinning = atomic.Xadd(&sched.nmspinning, -1)
if int32(nmspinning) < 0 {
throw("findrunnable: negative nmspinning")
}
} else {
- nmspinning = atomicload(&sched.nmspinning)
+ nmspinning = atomic.Load(&sched.nmspinning)
}
// M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
// so see if we need to wakeup another P here.
- if nmspinning == 0 && atomicload(&sched.npidle) > 0 {
+ if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
wakep()
}
}
@@ -2171,7 +2174,7 @@ func reentersyscall(pc, sp uintptr) {
save(pc, sp)
}
- if atomicload(&sched.sysmonwait) != 0 { // TODO: fast atomic
+ if atomic.Load(&sched.sysmonwait) != 0 { // TODO: fast atomic
systemstack(entersyscall_sysmon)
save(pc, sp)
}
@@ -2186,7 +2189,7 @@ func reentersyscall(pc, sp uintptr) {
_g_.sysblocktraced = true
_g_.m.mcache = nil
_g_.m.p.ptr().m = 0
- atomicstore(&_g_.m.p.ptr().status, _Psyscall)
+ atomic.Store(&_g_.m.p.ptr().status, _Psyscall)
if sched.gcwaiting != 0 {
systemstack(entersyscall_gcwait)
save(pc, sp)
@@ -2207,8 +2210,8 @@ func entersyscall(dummy int32) {
func entersyscall_sysmon() {
lock(&sched.lock)
- if atomicload(&sched.sysmonwait) != 0 {
- atomicstore(&sched.sysmonwait, 0)
+ if atomic.Load(&sched.sysmonwait) != 0 {
+ atomic.Store(&sched.sysmonwait, 0)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
@@ -2219,7 +2222,7 @@ func entersyscall_gcwait() {
_p_ := _g_.m.p.ptr()
lock(&sched.lock)
- if sched.stopwait > 0 && cas(&_p_.status, _Psyscall, _Pgcstop) {
+ if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
if trace.enabled {
traceGoSysBlock(_p_)
traceProcStop(_p_)
@@ -2374,7 +2377,7 @@ func exitsyscallfast() bool {
}
// Try to re-acquire the last P.
- if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
+ if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
// There's a cpu for us, so we can run.
_g_.m.mcache = _g_.m.p.ptr().mcache
_g_.m.p.ptr().m.set(_g_.m)
@@ -2424,8 +2427,8 @@ func exitsyscallfast() bool {
func exitsyscallfast_pidle() bool {
lock(&sched.lock)
_p_ := pidleget()
- if _p_ != nil && atomicload(&sched.sysmonwait) != 0 {
- atomicstore(&sched.sysmonwait, 0)
+ if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
+ atomic.Store(&sched.sysmonwait, 0)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
@@ -2447,8 +2450,8 @@ func exitsyscall0(gp *g) {
_p_ := pidleget()
if _p_ == nil {
globrunqput(gp)
- } else if atomicload(&sched.sysmonwait) != 0 {
- atomicstore(&sched.sysmonwait, 0)
+ } else if atomic.Load(&sched.sysmonwait) != 0 {
+ atomic.Store(&sched.sysmonwait, 0)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
@@ -2602,7 +2605,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
// Sched.goidgen is the last allocated id,
// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
// At startup sched.goidgen=0, so main goroutine receives goid=1.
- _p_.goidcache = xadd64(&sched.goidgen, _GoidCacheBatch)
+ _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
_p_.goidcache -= _GoidCacheBatch - 1
_p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
}
@@ -2616,7 +2619,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
}
runqput(_p_, newg, true)
- if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic
+ if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic
wakep()
}
_g_.m.locks--
@@ -2828,7 +2831,7 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
mp.mallocing++
// Coordinate with stack barrier insertion in scanstack.
- for !cas(&gp.stackLock, 0, 1) {
+ for !atomic.Cas(&gp.stackLock, 0, 1) {
osyield()
}
@@ -2935,17 +2938,17 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
}
}
}
- atomicstore(&gp.stackLock, 0)
+ atomic.Store(&gp.stackLock, 0)
if prof.hz != 0 {
// Simple cas-lock to coordinate with setcpuprofilerate.
- for !cas(&prof.lock, 0, 1) {
+ for !atomic.Cas(&prof.lock, 0, 1) {
osyield()
}
if prof.hz != 0 {
cpuprof.add(stk[:n])
}
- atomicstore(&prof.lock, 0)
+ atomic.Store(&prof.lock, 0)
}
mp.mallocing--
}
@@ -2991,11 +2994,11 @@ func setcpuprofilerate_m(hz int32) {
// it would deadlock.
resetcpuprofiler(0)
- for !cas(&prof.lock, 0, 1) {
+ for !atomic.Cas(&prof.lock, 0, 1) {
osyield()
}
prof.hz = hz
- atomicstore(&prof.lock, 0)
+ atomic.Store(&prof.lock, 0)
lock(&sched.lock)
sched.profilehz = hz
@@ -3139,7 +3142,7 @@ func procresize(nprocs int32) *p {
}
}
var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
- atomicstore((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
+ atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
return runnablePs
}
@@ -3317,10 +3320,10 @@ func sysmon() {
delay = 10 * 1000
}
usleep(delay)
- if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomicload(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic
+ if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic
lock(&sched.lock)
- if atomicload(&sched.gcwaiting) != 0 || atomicload(&sched.npidle) == uint32(gomaxprocs) {
- atomicstore(&sched.sysmonwait, 1)
+ if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
+ atomic.Store(&sched.sysmonwait, 1)
unlock(&sched.lock)
// Make wake-up period small enough
// for the sampling to be correct.
@@ -3330,7 +3333,7 @@ func sysmon() {
}
notetsleep(&sched.sysmonnote, maxsleep)
lock(&sched.lock)
- atomicstore(&sched.sysmonwait, 0)
+ atomic.Store(&sched.sysmonwait, 0)
noteclear(&sched.sysmonnote)
idle = 0
delay = 20
@@ -3338,11 +3341,11 @@ func sysmon() {
unlock(&sched.lock)
}
// poll network if not polled for more than 10ms
- lastpoll := int64(atomicload64(&sched.lastpoll))
+ lastpoll := int64(atomic.Load64(&sched.lastpoll))
now := nanotime()
unixnow := unixnanotime()
if lastpoll != 0 && lastpoll+10*1000*1000 < now {
- cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
+ atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
gp := netpoll(false) // non-blocking - returns list of goroutines
if gp != nil {
// Need to decrement number of idle locked M's
@@ -3365,8 +3368,8 @@ func sysmon() {
idle++
}
// check if we need to force a GC
- lastgc := int64(atomicload64(&memstats.last_gc))
- if lastgc != 0 && unixnow-lastgc > forcegcperiod && atomicload(&forcegc.idle) != 0 {
+ lastgc := int64(atomic.Load64(&memstats.last_gc))
+ if lastgc != 0 && unixnow-lastgc > forcegcperiod && atomic.Load(&forcegc.idle) != 0 {
lock(&forcegc.lock)
forcegc.idle = 0
forcegc.g.schedlink = 0
@@ -3417,7 +3420,7 @@ func retake(now int64) uint32 {
// On the one hand we don't want to retake Ps if there is no other work to do,
// but on the other hand we want to retake them eventually
// because they can prevent the sysmon thread from deep sleep.
- if runqempty(_p_) && atomicload(&sched.nmspinning)+atomicload(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
+ if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
continue
}
// Need to decrement number of idle locked M's
@@ -3425,7 +3428,7 @@ func retake(now int64) uint32 {
// Otherwise the M from which we retake can exit the syscall,
// increment nmidle and report deadlock.
incidlelocked(-1)
- if cas(&_p_.status, s, _Pidle) {
+ if atomic.Cas(&_p_.status, s, _Pidle) {
if trace.enabled {
traceGoSysBlock(_p_)
traceProcStop(_p_)
@@ -3523,8 +3526,8 @@ func schedtrace(detailed bool) {
continue
}
mp := _p_.m.ptr()
- h := atomicload(&_p_.runqhead)
- t := atomicload(&_p_.runqtail)
+ h := atomic.Load(&_p_.runqhead)
+ t := atomic.Load(&_p_.runqtail)
if detailed {
id := int32(-1)
if mp != nil {
@@ -3697,7 +3700,7 @@ func pidleput(_p_ *p) {
}
_p_.link = sched.pidle
sched.pidle.set(_p_)
- xadd(&sched.npidle, 1) // TODO: fast atomic
+ atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
}
// Try get a p from _Pidle list.
@@ -3708,7 +3711,7 @@ func pidleget() *p {
_p_ := sched.pidle.ptr()
if _p_ != nil {
sched.pidle = _p_.link
- xadd(&sched.npidle, -1) // TODO: fast atomic
+ atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
}
return _p_
}
@@ -3754,11 +3757,11 @@ func runqput(_p_ *p, gp *g, next bool) {
}
retry:
- h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers
+ h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
t := _p_.runqtail
if t-h < uint32(len(_p_.runq)) {
_p_.runq[t%uint32(len(_p_.runq))].set(gp)
- atomicstore(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
+ atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
return
}
if runqputslow(_p_, gp, h, t) {
@@ -3782,7 +3785,7 @@ func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
for i := uint32(0); i < n; i++ {
batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
}
- if !cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
+ if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
return false
}
batch[n] = gp
@@ -3823,13 +3826,13 @@ func runqget(_p_ *p) (gp *g, inheritTime bool) {
}
for {
- h := atomicload(&_p_.runqhead) // load-acquire, synchronize with other consumers
+ h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
t := _p_.runqtail
if t == h {
return nil, false
}
gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
- if cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
+ if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
return gp, false
}
}
@@ -3841,8 +3844,8 @@ func runqget(_p_ *p) (gp *g, inheritTime bool) {
// Can be executed by any P.
func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
for {
- h := atomicload(&_p_.runqhead) // load-acquire, synchronize with other consumers
- t := atomicload(&_p_.runqtail) // load-acquire, synchronize with the producer
+ h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
+ t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer
n := t - h
n = n - n/2
if n == 0 {
@@ -3873,7 +3876,7 @@ func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool
g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
batch[(batchHead+i)%uint32(len(batch))] = g
}
- if cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
+ if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
return n
}
}
@@ -3893,11 +3896,11 @@ func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
if n == 0 {
return gp
}
- h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers
+ h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
if t-h+n >= uint32(len(_p_.runq)) {
throw("runqsteal: runq overflow")
}
- atomicstore(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
+ atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
return gp
}