aboutsummaryrefslogtreecommitdiff
path: root/src/sync
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2015-09-23 10:03:54 +0200
committerDmitry Vyukov <dvyukov@google.com>2015-11-26 16:50:31 +0000
commit7b767f4e521c2481e08051c843badd0382fde3b0 (patch)
treeb00592a9d37d09c0ede70d403016eaed45f4ad5d /src/sync
parente9081b3c76f21efb0538cce54f04cf1a9a8cdb31 (diff)
downloadgo-7b767f4e521c2481e08051c843badd0382fde3b0.tar.xz
internal/race: add package
Factor out duplicated race thunks from sync, syscall net and fmt packages into a separate package and use it. Fixes #8593 Change-Id: I156869c50946277809f6b509463752e7f7d28cdb Reviewed-on: https://go-review.googlesource.com/14870 Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org> Run-TryBot: Dmitry Vyukov <dvyukov@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
Diffstat (limited to 'src/sync')
-rw-r--r--src/sync/cond.go21
-rw-r--r--src/sync/export_test.go2
-rw-r--r--src/sync/mutex.go13
-rw-r--r--src/sync/pool.go5
-rw-r--r--src/sync/race.go42
-rw-r--r--src/sync/race0.go34
-rw-r--r--src/sync/rwmutex.go49
-rw-r--r--src/sync/waitgroup.go33
-rw-r--r--src/sync/waitgroup_test.go3
9 files changed, 65 insertions, 137 deletions
diff --git a/src/sync/cond.go b/src/sync/cond.go
index 9e6bc170f1..0aefcda908 100644
--- a/src/sync/cond.go
+++ b/src/sync/cond.go
@@ -5,6 +5,7 @@
package sync
import (
+ "internal/race"
"sync/atomic"
"unsafe"
)
@@ -51,12 +52,12 @@ func NewCond(l Locker) *Cond {
//
func (c *Cond) Wait() {
c.checker.check()
- if raceenabled {
- raceDisable()
+ if race.Enabled {
+ race.Disable()
}
atomic.AddUint32(&c.waiters, 1)
- if raceenabled {
- raceEnable()
+ if race.Enabled {
+ race.Enable()
}
c.L.Unlock()
runtime_Syncsemacquire(&c.sema)
@@ -81,14 +82,14 @@ func (c *Cond) Broadcast() {
func (c *Cond) signalImpl(all bool) {
c.checker.check()
- if raceenabled {
- raceDisable()
+ if race.Enabled {
+ race.Disable()
}
for {
old := atomic.LoadUint32(&c.waiters)
if old == 0 {
- if raceenabled {
- raceEnable()
+ if race.Enabled {
+ race.Enable()
}
return
}
@@ -97,8 +98,8 @@ func (c *Cond) signalImpl(all bool) {
new = 0
}
if atomic.CompareAndSwapUint32(&c.waiters, old, new) {
- if raceenabled {
- raceEnable()
+ if race.Enabled {
+ race.Enable()
}
runtime_Syncsemrelease(&c.sema, old-new)
return
diff --git a/src/sync/export_test.go b/src/sync/export_test.go
index 6f49b3bd8a..fa5983a2d1 100644
--- a/src/sync/export_test.go
+++ b/src/sync/export_test.go
@@ -7,5 +7,3 @@ package sync
// Export for testing.
var Runtime_Semacquire = runtime_Semacquire
var Runtime_Semrelease = runtime_Semrelease
-
-const RaceEnabled = raceenabled
diff --git a/src/sync/mutex.go b/src/sync/mutex.go
index 3f280ad719..eb526144c5 100644
--- a/src/sync/mutex.go
+++ b/src/sync/mutex.go
@@ -11,6 +11,7 @@
package sync
import (
+ "internal/race"
"sync/atomic"
"unsafe"
)
@@ -41,8 +42,8 @@ const (
func (m *Mutex) Lock() {
// Fast path: grab unlocked mutex.
if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
- if raceenabled {
- raceAcquire(unsafe.Pointer(m))
+ if race.Enabled {
+ race.Acquire(unsafe.Pointer(m))
}
return
}
@@ -85,8 +86,8 @@ func (m *Mutex) Lock() {
}
}
- if raceenabled {
- raceAcquire(unsafe.Pointer(m))
+ if race.Enabled {
+ race.Acquire(unsafe.Pointer(m))
}
}
@@ -97,9 +98,9 @@ func (m *Mutex) Lock() {
// It is allowed for one goroutine to lock a Mutex and then
// arrange for another goroutine to unlock it.
func (m *Mutex) Unlock() {
- if raceenabled {
+ if race.Enabled {
_ = m.state
- raceRelease(unsafe.Pointer(m))
+ race.Release(unsafe.Pointer(m))
}
// Fast path: drop lock bit.
diff --git a/src/sync/pool.go b/src/sync/pool.go
index 0cf0637024..381af0bead 100644
--- a/src/sync/pool.go
+++ b/src/sync/pool.go
@@ -5,6 +5,7 @@
package sync
import (
+ "internal/race"
"runtime"
"sync/atomic"
"unsafe"
@@ -59,7 +60,7 @@ type poolLocal struct {
// Put adds x to the pool.
func (p *Pool) Put(x interface{}) {
- if raceenabled {
+ if race.Enabled {
// Under race detector the Pool degenerates into no-op.
// It's conforming, simple and does not introduce excessive
// happens-before edges between unrelated goroutines.
@@ -91,7 +92,7 @@ func (p *Pool) Put(x interface{}) {
// If Get would otherwise return nil and p.New is non-nil, Get returns
// the result of calling p.New.
func (p *Pool) Get() interface{} {
- if raceenabled {
+ if race.Enabled {
if p.New != nil {
return p.New()
}
diff --git a/src/sync/race.go b/src/sync/race.go
deleted file mode 100644
index fd0277dcc9..0000000000
--- a/src/sync/race.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build race
-
-package sync
-
-import (
- "runtime"
- "unsafe"
-)
-
-const raceenabled = true
-
-func raceAcquire(addr unsafe.Pointer) {
- runtime.RaceAcquire(addr)
-}
-
-func raceRelease(addr unsafe.Pointer) {
- runtime.RaceRelease(addr)
-}
-
-func raceReleaseMerge(addr unsafe.Pointer) {
- runtime.RaceReleaseMerge(addr)
-}
-
-func raceDisable() {
- runtime.RaceDisable()
-}
-
-func raceEnable() {
- runtime.RaceEnable()
-}
-
-func raceRead(addr unsafe.Pointer) {
- runtime.RaceRead(addr)
-}
-
-func raceWrite(addr unsafe.Pointer) {
- runtime.RaceWrite(addr)
-}
diff --git a/src/sync/race0.go b/src/sync/race0.go
deleted file mode 100644
index 65ada1c5d3..0000000000
--- a/src/sync/race0.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !race
-
-package sync
-
-import (
- "unsafe"
-)
-
-const raceenabled = false
-
-func raceAcquire(addr unsafe.Pointer) {
-}
-
-func raceRelease(addr unsafe.Pointer) {
-}
-
-func raceReleaseMerge(addr unsafe.Pointer) {
-}
-
-func raceDisable() {
-}
-
-func raceEnable() {
-}
-
-func raceRead(addr unsafe.Pointer) {
-}
-
-func raceWrite(addr unsafe.Pointer) {
-}
diff --git a/src/sync/rwmutex.go b/src/sync/rwmutex.go
index 0e8a58e5f0..d438c93c88 100644
--- a/src/sync/rwmutex.go
+++ b/src/sync/rwmutex.go
@@ -5,6 +5,7 @@
package sync
import (
+ "internal/race"
"sync/atomic"
"unsafe"
)
@@ -27,17 +28,17 @@ const rwmutexMaxReaders = 1 << 30
// RLock locks rw for reading.
func (rw *RWMutex) RLock() {
- if raceenabled {
+ if race.Enabled {
_ = rw.w.state
- raceDisable()
+ race.Disable()
}
if atomic.AddInt32(&rw.readerCount, 1) < 0 {
// A writer is pending, wait for it.
runtime_Semacquire(&rw.readerSem)
}
- if raceenabled {
- raceEnable()
- raceAcquire(unsafe.Pointer(&rw.readerSem))
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(&rw.readerSem))
}
}
@@ -46,14 +47,14 @@ func (rw *RWMutex) RLock() {
// It is a run-time error if rw is not locked for reading
// on entry to RUnlock.
func (rw *RWMutex) RUnlock() {
- if raceenabled {
+ if race.Enabled {
_ = rw.w.state
- raceReleaseMerge(unsafe.Pointer(&rw.writerSem))
- raceDisable()
+ race.ReleaseMerge(unsafe.Pointer(&rw.writerSem))
+ race.Disable()
}
if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
if r+1 == 0 || r+1 == -rwmutexMaxReaders {
- raceEnable()
+ race.Enable()
panic("sync: RUnlock of unlocked RWMutex")
}
// A writer is pending.
@@ -62,8 +63,8 @@ func (rw *RWMutex) RUnlock() {
runtime_Semrelease(&rw.writerSem)
}
}
- if raceenabled {
- raceEnable()
+ if race.Enabled {
+ race.Enable()
}
}
@@ -74,9 +75,9 @@ func (rw *RWMutex) RUnlock() {
// a blocked Lock call excludes new readers from acquiring
// the lock.
func (rw *RWMutex) Lock() {
- if raceenabled {
+ if race.Enabled {
_ = rw.w.state
- raceDisable()
+ race.Disable()
}
// First, resolve competition with other writers.
rw.w.Lock()
@@ -86,10 +87,10 @@ func (rw *RWMutex) Lock() {
if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
runtime_Semacquire(&rw.writerSem)
}
- if raceenabled {
- raceEnable()
- raceAcquire(unsafe.Pointer(&rw.readerSem))
- raceAcquire(unsafe.Pointer(&rw.writerSem))
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(&rw.readerSem))
+ race.Acquire(unsafe.Pointer(&rw.writerSem))
}
}
@@ -100,17 +101,17 @@ func (rw *RWMutex) Lock() {
// goroutine. One goroutine may RLock (Lock) an RWMutex and then
// arrange for another goroutine to RUnlock (Unlock) it.
func (rw *RWMutex) Unlock() {
- if raceenabled {
+ if race.Enabled {
_ = rw.w.state
- raceRelease(unsafe.Pointer(&rw.readerSem))
- raceRelease(unsafe.Pointer(&rw.writerSem))
- raceDisable()
+ race.Release(unsafe.Pointer(&rw.readerSem))
+ race.Release(unsafe.Pointer(&rw.writerSem))
+ race.Disable()
}
// Announce to readers there is no active writer.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
if r >= rwmutexMaxReaders {
- raceEnable()
+ race.Enable()
panic("sync: Unlock of unlocked RWMutex")
}
// Unblock blocked readers, if any.
@@ -119,8 +120,8 @@ func (rw *RWMutex) Unlock() {
}
// Allow other writers to proceed.
rw.w.Unlock()
- if raceenabled {
- raceEnable()
+ if race.Enabled {
+ race.Enable()
}
}
diff --git a/src/sync/waitgroup.go b/src/sync/waitgroup.go
index de399e64eb..c77fec306c 100644
--- a/src/sync/waitgroup.go
+++ b/src/sync/waitgroup.go
@@ -5,6 +5,7 @@
package sync
import (
+ "internal/race"
"sync/atomic"
"unsafe"
)
@@ -46,24 +47,24 @@ func (wg *WaitGroup) state() *uint64 {
// See the WaitGroup example.
func (wg *WaitGroup) Add(delta int) {
statep := wg.state()
- if raceenabled {
+ if race.Enabled {
_ = *statep // trigger nil deref early
if delta < 0 {
// Synchronize decrements with Wait.
- raceReleaseMerge(unsafe.Pointer(wg))
+ race.ReleaseMerge(unsafe.Pointer(wg))
}
- raceDisable()
- defer raceEnable()
+ race.Disable()
+ defer race.Enable()
}
state := atomic.AddUint64(statep, uint64(delta)<<32)
v := int32(state >> 32)
w := uint32(state)
- if raceenabled {
+ if race.Enabled {
if delta > 0 && v == int32(delta) {
// The first increment must be synchronized with Wait.
// Need to model this as a read, because there can be
// several concurrent wg.counter transitions from 0.
- raceRead(unsafe.Pointer(&wg.sema))
+ race.Read(unsafe.Pointer(&wg.sema))
}
}
if v < 0 {
@@ -98,9 +99,9 @@ func (wg *WaitGroup) Done() {
// Wait blocks until the WaitGroup counter is zero.
func (wg *WaitGroup) Wait() {
statep := wg.state()
- if raceenabled {
+ if race.Enabled {
_ = *statep // trigger nil deref early
- raceDisable()
+ race.Disable()
}
for {
state := atomic.LoadUint64(statep)
@@ -108,28 +109,28 @@ func (wg *WaitGroup) Wait() {
w := uint32(state)
if v == 0 {
// Counter is 0, no need to wait.
- if raceenabled {
- raceEnable()
- raceAcquire(unsafe.Pointer(wg))
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(wg))
}
return
}
// Increment waiters count.
if atomic.CompareAndSwapUint64(statep, state, state+1) {
- if raceenabled && w == 0 {
+ if race.Enabled && w == 0 {
// Wait must be synchronized with the first Add.
// Need to model this is as a write to race with the read in Add.
// As a consequence, can do the write only for the first waiter,
// otherwise concurrent Waits will race with each other.
- raceWrite(unsafe.Pointer(&wg.sema))
+ race.Write(unsafe.Pointer(&wg.sema))
}
runtime_Semacquire(&wg.sema)
if *statep != 0 {
panic("sync: WaitGroup is reused before previous Wait has returned")
}
- if raceenabled {
- raceEnable()
- raceAcquire(unsafe.Pointer(wg))
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(wg))
}
return
}
diff --git a/src/sync/waitgroup_test.go b/src/sync/waitgroup_test.go
index 3e3e3bf824..a581660940 100644
--- a/src/sync/waitgroup_test.go
+++ b/src/sync/waitgroup_test.go
@@ -5,6 +5,7 @@
package sync_test
import (
+ "internal/race"
"runtime"
. "sync"
"sync/atomic"
@@ -48,7 +49,7 @@ func TestWaitGroup(t *testing.T) {
}
func knownRacy(t *testing.T) {
- if RaceEnabled {
+ if race.Enabled {
t.Skip("skipping known-racy test under the race detector")
}
}