diff options
| author | Michael Matloob <matloob@golang.org> | 2015-11-02 14:09:24 -0500 |
|---|---|---|
| committer | Michael Matloob <matloob@golang.org> | 2015-11-10 17:38:04 +0000 |
| commit | 67faca7d9c54b367aee5fdeef2d5dd609fcf99d0 (patch) | |
| tree | 5c6e8b4e243286311bbc4743d6a8e86f16dda85f /src/runtime/hashmap.go | |
| parent | d33360571f46b46724b908a5603520dce1e8a81c (diff) | |
| download | go-67faca7d9c54b367aee5fdeef2d5dd609fcf99d0.tar.xz | |
runtime: break atomics out into package runtime/internal/atomic
This change breaks out most of the atomics functions in the runtime
into package runtime/internal/atomic. It adds some basic support
in the toolchain for runtime packages, and also modifies linux/arm
atomics to remove the dependency on the runtime's mutex. The mutexes
have been replaced with spinlocks.
all trybots are happy!
In addition to the trybots, I've tested on the darwin/arm64 builder,
on the darwin/arm builder, and on a ppc64le machine.
Change-Id: I6698c8e3cf3834f55ce5824059f44d00dc8e3c2f
Reviewed-on: https://go-review.googlesource.com/14204
Run-TryBot: Michael Matloob <matloob@golang.org>
Reviewed-by: Russ Cox <rsc@golang.org>
Diffstat (limited to 'src/runtime/hashmap.go')
| -rw-r--r-- | src/runtime/hashmap.go | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/src/runtime/hashmap.go b/src/runtime/hashmap.go index d59ad297f5..667367891c 100644 --- a/src/runtime/hashmap.go +++ b/src/runtime/hashmap.go @@ -54,6 +54,7 @@ package runtime // before the table grows. Typical tables will be somewhat less loaded. import ( + "runtime/internal/atomic" "unsafe" ) @@ -280,7 +281,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { msanread(key, t.key.size) } if h == nil || h.count == 0 { - return atomicloadp(unsafe.Pointer(&zeroptr)) + return atomic.Loadp(unsafe.Pointer(&zeroptr)) } alg := t.key.alg hash := alg.hash(key, uintptr(h.hash0)) @@ -315,7 +316,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { } b = b.overflow(t) if b == nil { - return atomicloadp(unsafe.Pointer(&zeroptr)) + return atomic.Loadp(unsafe.Pointer(&zeroptr)) } } } @@ -331,7 +332,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) msanread(key, t.key.size) } if h == nil || h.count == 0 { - return atomicloadp(unsafe.Pointer(&zeroptr)), false + return atomic.Loadp(unsafe.Pointer(&zeroptr)), false } alg := t.key.alg hash := alg.hash(key, uintptr(h.hash0)) @@ -366,7 +367,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) } b = b.overflow(t) if b == nil { - return atomicloadp(unsafe.Pointer(&zeroptr)), false + return atomic.Loadp(unsafe.Pointer(&zeroptr)), false } } } @@ -627,7 +628,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { // Remember we have an iterator. // Can run concurrently with another hash_iter_init(). if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator { - atomicor8(&h.flags, iterator|oldIterator) + atomic.Or8(&h.flags, iterator|oldIterator) } mapiternext(it) @@ -1024,14 +1025,14 @@ var zerosize uintptr = initialZeroSize // serve as the zero value for t. func mapzero(t *_type) { // Is the type small enough for existing buffer? - cursize := uintptr(atomicloadp(unsafe.Pointer(&zerosize))) + cursize := uintptr(atomic.Loadp(unsafe.Pointer(&zerosize))) if t.size <= cursize { return } // Allocate a new buffer. lock(&zerolock) - cursize = uintptr(atomicloadp(unsafe.Pointer(&zerosize))) + cursize = uintptr(atomic.Loadp(unsafe.Pointer(&zerosize))) if cursize < t.size { for cursize < t.size { cursize *= 2 @@ -1040,8 +1041,8 @@ func mapzero(t *_type) { throw("map element too large") } } - atomicstorep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys)) - atomicstorep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize)) + atomic.Storep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys)) + atomic.Storep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize)) } unlock(&zerolock) } |
