diff options
Diffstat (limited to 'src/runtime/parfor.go')
| -rw-r--r-- | src/runtime/parfor.go | 30 |
1 files changed, 16 insertions, 14 deletions
diff --git a/src/runtime/parfor.go b/src/runtime/parfor.go index c82beee3fd..2db43bd424 100644 --- a/src/runtime/parfor.go +++ b/src/runtime/parfor.go @@ -6,6 +6,8 @@ package runtime +import "runtime/internal/atomic" + // A parfor holds state for the parallel for operation. type parfor struct { body func(*parfor, uint32) // executed for each element @@ -82,7 +84,7 @@ func parforsetup(desc *parfor, nthr, n uint32, wait bool, body func(*parfor, uin func parfordo(desc *parfor) { // Obtain 0-based thread index. - tid := xadd(&desc.thrseq, 1) - 1 + tid := atomic.Xadd(&desc.thrseq, 1) - 1 if tid >= desc.nthr { print("tid=", tid, " nthr=", desc.nthr, "\n") throw("parfor: invalid tid") @@ -103,7 +105,7 @@ func parfordo(desc *parfor) { for { // While there is local work, // bump low index and execute the iteration. - pos := xadd64(mypos, 1) + pos := atomic.Xadd64(mypos, 1) begin := uint32(pos) - 1 end := uint32(pos >> 32) if begin < end { @@ -120,7 +122,7 @@ func parfordo(desc *parfor) { // increment the done counter... if try > desc.nthr*4 && !idle { idle = true - xadd(&desc.done, 1) + atomic.Xadd(&desc.done, 1) } // ...if all threads have incremented the counter, @@ -131,7 +133,7 @@ func parfordo(desc *parfor) { } if desc.done+extra == desc.nthr { if !idle { - xadd(&desc.done, 1) + atomic.Xadd(&desc.done, 1) } goto exit } @@ -145,7 +147,7 @@ func parfordo(desc *parfor) { victimpos := &desc.thr[victim].pos for { // See if it has any work. - pos := atomicload64(victimpos) + pos := atomic.Load64(victimpos) begin = uint32(pos) end = uint32(pos >> 32) if begin+1 >= end { @@ -154,12 +156,12 @@ func parfordo(desc *parfor) { break } if idle { - xadd(&desc.done, -1) + atomic.Xadd(&desc.done, -1) idle = false } begin2 := begin + (end-begin)/2 newpos := uint64(begin) | uint64(begin2)<<32 - if cas64(victimpos, pos, newpos) { + if atomic.Cas64(victimpos, pos, newpos) { begin = begin2 break } @@ -169,7 +171,7 @@ func parfordo(desc *parfor) { if idle { throw("parfor: should not be idle") } - atomicstore64(mypos, uint64(begin)|uint64(end)<<32) + atomic.Store64(mypos, uint64(begin)|uint64(end)<<32) me.nsteal++ me.nstealcnt += uint64(end) - uint64(begin) break @@ -185,7 +187,7 @@ func parfordo(desc *parfor) { // If a caller asked not to wait for the others, exit now // (assume that most work is already done at this point). if !idle { - xadd(&desc.done, 1) + atomic.Xadd(&desc.done, 1) } goto exit } else if try < 6*desc.nthr { @@ -199,11 +201,11 @@ func parfordo(desc *parfor) { } exit: - xadd64(&desc.nsteal, int64(me.nsteal)) - xadd64(&desc.nstealcnt, int64(me.nstealcnt)) - xadd64(&desc.nprocyield, int64(me.nprocyield)) - xadd64(&desc.nosyield, int64(me.nosyield)) - xadd64(&desc.nsleep, int64(me.nsleep)) + atomic.Xadd64(&desc.nsteal, int64(me.nsteal)) + atomic.Xadd64(&desc.nstealcnt, int64(me.nstealcnt)) + atomic.Xadd64(&desc.nprocyield, int64(me.nprocyield)) + atomic.Xadd64(&desc.nosyield, int64(me.nosyield)) + atomic.Xadd64(&desc.nsleep, int64(me.nsleep)) me.nsteal = 0 me.nstealcnt = 0 me.nprocyield = 0 |
