diff options
Diffstat (limited to 'src/runtime')
| -rw-r--r-- | src/runtime/hashmap.go | 65 | ||||
| -rw-r--r-- | src/runtime/hashmap_fast.go | 33 | ||||
| -rw-r--r-- | src/runtime/map_test.go | 16 |
3 files changed, 54 insertions, 60 deletions
diff --git a/src/runtime/hashmap.go b/src/runtime/hashmap.go index 4f5d03d983..ff59faab5d 100644 --- a/src/runtime/hashmap.go +++ b/src/runtime/hashmap.go @@ -236,9 +236,6 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap { throw("need padding in bucket (value)") } - // make sure zeroptr is large enough - mapzero(t.elem) - // find size parameter which will hold the requested # of elements B := uint8(0) for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ { @@ -283,7 +280,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { msanread(key, t.key.size) } if h == nil || h.count == 0 { - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") @@ -321,7 +318,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { } b = b.overflow(t) if b == nil { - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } } } @@ -337,7 +334,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) msanread(key, t.key.size) } if h == nil || h.count == 0 { - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") @@ -375,7 +372,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) } b = b.overflow(t) if b == nil { - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } } } @@ -426,6 +423,22 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe } } +func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer { + v := mapaccess1(t, h, key) + if v == unsafe.Pointer(&zeroVal[0]) { + return zero + } + return v +} + +func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) { + v := mapaccess1(t, h, key) + if v == unsafe.Pointer(&zeroVal[0]) { + return zero, false + } + return v, true +} + func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { if h == nil { panic(plainError("assignment to entry in nil map")) @@ -1044,39 +1057,5 @@ func reflect_ismapkey(t *_type) bool { return ismapkey(t) } -var zerolock mutex - -const initialZeroSize = 1024 - -var zeroinitial [initialZeroSize]byte - -// All accesses to zeroptr and zerosize must be atomic so that they -// can be accessed without locks in the common case. -var zeroptr unsafe.Pointer = unsafe.Pointer(&zeroinitial) -var zerosize uintptr = initialZeroSize - -// mapzero ensures that zeroptr points to a buffer large enough to -// serve as the zero value for t. -func mapzero(t *_type) { - // Is the type small enough for existing buffer? - cursize := uintptr(atomic.Loadp(unsafe.Pointer(&zerosize))) - if t.size <= cursize { - return - } - - // Allocate a new buffer. - lock(&zerolock) - cursize = uintptr(atomic.Loadp(unsafe.Pointer(&zerosize))) - if cursize < t.size { - for cursize < t.size { - cursize *= 2 - if cursize == 0 { - // need >2GB zero on 32-bit machine - throw("map element too large") - } - } - atomic.StorepNoWB(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys)) - atomic.StorepNoWB(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize)) - } - unlock(&zerolock) -} +const maxZero = 1024 // must match value in ../cmd/compile/internal/gc/walk.go +var zeroVal [maxZero]byte diff --git a/src/runtime/hashmap_fast.go b/src/runtime/hashmap_fast.go index 6a5484edee..8f9bb5a6fc 100644 --- a/src/runtime/hashmap_fast.go +++ b/src/runtime/hashmap_fast.go @@ -5,7 +5,6 @@ package runtime import ( - "runtime/internal/atomic" "runtime/internal/sys" "unsafe" ) @@ -16,7 +15,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32)) } if h == nil || h.count == 0 { - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") @@ -50,7 +49,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { } b = b.overflow(t) if b == nil { - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } } } @@ -61,7 +60,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32)) } if h == nil || h.count == 0 { - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") @@ -95,7 +94,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { } b = b.overflow(t) if b == nil { - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } } } @@ -106,7 +105,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64)) } if h == nil || h.count == 0 { - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") @@ -140,7 +139,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { } b = b.overflow(t) if b == nil { - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } } } @@ -151,7 +150,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64)) } if h == nil || h.count == 0 { - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") @@ -185,7 +184,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { } b = b.overflow(t) if b == nil { - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } } } @@ -196,7 +195,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr)) } if h == nil || h.count == 0 { - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") @@ -220,7 +219,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) } } - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } // long key, try not to do more comparisons than necessary keymaybe := uintptr(bucketCnt) @@ -258,7 +257,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)) } } - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } dohash: hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) @@ -290,7 +289,7 @@ dohash: } b = b.overflow(t) if b == nil { - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } } } @@ -301,7 +300,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr)) } if h == nil || h.count == 0 { - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") @@ -325,7 +324,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true } } - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } // long key, try not to do more comparisons than necessary keymaybe := uintptr(bucketCnt) @@ -361,7 +360,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true } } - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } dohash: hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) @@ -393,7 +392,7 @@ dohash: } b = b.overflow(t) if b == nil { - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } } } diff --git a/src/runtime/map_test.go b/src/runtime/map_test.go index 9d2894cb6f..496f8e8868 100644 --- a/src/runtime/map_test.go +++ b/src/runtime/map_test.go @@ -317,6 +317,22 @@ func TestBigItems(t *testing.T) { } } +func TestMapHugeZero(t *testing.T) { + type T [4000]byte + m := map[int]T{} + x := m[0] + if x != (T{}) { + t.Errorf("map value not zero") + } + y, ok := m[0] + if ok { + t.Errorf("map value should be missing") + } + if y != (T{}) { + t.Errorf("map value not zero") + } +} + type empty struct { } |
