aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDiogo Pinela <diogoid7400@gmail.com>2018-03-13 23:36:45 +0000
committerBrad Fitzpatrick <bradfitz@golang.org>2018-03-15 09:56:25 +0000
commit9ff7df003d770b1dffc984e1861fd009fe08d579 (patch)
treeba4160e65355a5fafd03e33ef3ca18f8ea31710d /src
parent672729ebbd15e0b0dfac1ba22e35e92557215e1a (diff)
downloadgo-9ff7df003d770b1dffc984e1861fd009fe08d579.tar.xz
sync: make WaitGroup more space-efficient
The struct stores its 64-bit state field in a 12-byte array to ensure that it can be 64-bit-aligned. This leaves 4 spare bytes, which we can reuse to store the sema field. (32-bit alignment is still guaranteed because the array type was changed to [3]uint32.) Fixes #19149. Change-Id: I9bc20e69e45e0e07fbf496080f3650e8be0d6e8d Reviewed-on: https://go-review.googlesource.com/100515 Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org>
Diffstat (limited to 'src')
-rw-r--r--src/sync/waitgroup.go25
1 files changed, 13 insertions, 12 deletions
diff --git a/src/sync/waitgroup.go b/src/sync/waitgroup.go
index 2fa7c3e07e..99dd400006 100644
--- a/src/sync/waitgroup.go
+++ b/src/sync/waitgroup.go
@@ -23,16 +23,17 @@ type WaitGroup struct {
// 64-bit value: high 32 bits are counter, low 32 bits are waiter count.
// 64-bit atomic operations require 64-bit alignment, but 32-bit
// compilers do not ensure it. So we allocate 12 bytes and then use
- // the aligned 8 bytes in them as state.
- state1 [12]byte
- sema uint32
+ // the aligned 8 bytes in them as state, and the other 4 as storage
+ // for the sema.
+ state1 [3]uint32
}
-func (wg *WaitGroup) state() *uint64 {
+// state returns pointers to the state and sema fields stored within wg.state1.
+func (wg *WaitGroup) state() (statep *uint64, semap *uint32) {
if uintptr(unsafe.Pointer(&wg.state1))%8 == 0 {
- return (*uint64)(unsafe.Pointer(&wg.state1))
+ return (*uint64)(unsafe.Pointer(&wg.state1)), &wg.state1[2]
} else {
- return (*uint64)(unsafe.Pointer(&wg.state1[4]))
+ return (*uint64)(unsafe.Pointer(&wg.state1[1])), &wg.state1[0]
}
}
@@ -50,7 +51,7 @@ func (wg *WaitGroup) state() *uint64 {
// new Add calls must happen after all previous Wait calls have returned.
// See the WaitGroup example.
func (wg *WaitGroup) Add(delta int) {
- statep := wg.state()
+ statep, semap := wg.state()
if race.Enabled {
_ = *statep // trigger nil deref early
if delta < 0 {
@@ -67,7 +68,7 @@ func (wg *WaitGroup) Add(delta int) {
// The first increment must be synchronized with Wait.
// Need to model this as a read, because there can be
// several concurrent wg.counter transitions from 0.
- race.Read(unsafe.Pointer(&wg.sema))
+ race.Read(unsafe.Pointer(semap))
}
if v < 0 {
panic("sync: negative WaitGroup counter")
@@ -89,7 +90,7 @@ func (wg *WaitGroup) Add(delta int) {
// Reset waiters count to 0.
*statep = 0
for ; w != 0; w-- {
- runtime_Semrelease(&wg.sema, false)
+ runtime_Semrelease(semap, false)
}
}
@@ -100,7 +101,7 @@ func (wg *WaitGroup) Done() {
// Wait blocks until the WaitGroup counter is zero.
func (wg *WaitGroup) Wait() {
- statep := wg.state()
+ statep, semap := wg.state()
if race.Enabled {
_ = *statep // trigger nil deref early
race.Disable()
@@ -124,9 +125,9 @@ func (wg *WaitGroup) Wait() {
// Need to model this is as a write to race with the read in Add.
// As a consequence, can do the write only for the first waiter,
// otherwise concurrent Waits will race with each other.
- race.Write(unsafe.Pointer(&wg.sema))
+ race.Write(unsafe.Pointer(semap))
}
- runtime_Semacquire(&wg.sema)
+ runtime_Semacquire(semap)
if *statep != 0 {
panic("sync: WaitGroup is reused before previous Wait has returned")
}