aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/hashmap.go
diff options
context:
space:
mode:
authorMartin Möhrmann <moehrmann@google.com>2018-01-27 12:48:15 +0100
committerMartin Möhrmann <moehrmann@google.com>2018-02-17 14:57:32 +0000
commitf4bb25c937cffb277e5ba87708d286ea7fd1b6ed (patch)
tree4040b52ea7cbb64b3afbc94757cc449f059287bd /src/runtime/hashmap.go
parent549cb18a9131221755694c0ccc610ae9a406129d (diff)
downloadgo-f4bb25c937cffb277e5ba87708d286ea7fd1b6ed.tar.xz
runtime: rename map implementation and test files to use a common prefix
Rename all map implementation and test files to use "map" as a file name prefix instead of "hashmap" for the implementation and "map" for the test file names. Change-Id: I7b317c1f7a660b95c6d1f1a185866f2839e69446 Reviewed-on: https://go-review.googlesource.com/90336 Run-TryBot: Martin Möhrmann <moehrmann@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
Diffstat (limited to 'src/runtime/hashmap.go')
-rw-r--r--src/runtime/hashmap.go1249
1 files changed, 0 insertions, 1249 deletions
diff --git a/src/runtime/hashmap.go b/src/runtime/hashmap.go
deleted file mode 100644
index eddb045622..0000000000
--- a/src/runtime/hashmap.go
+++ /dev/null
@@ -1,1249 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-// This file contains the implementation of Go's map type.
-//
-// A map is just a hash table. The data is arranged
-// into an array of buckets. Each bucket contains up to
-// 8 key/value pairs. The low-order bits of the hash are
-// used to select a bucket. Each bucket contains a few
-// high-order bits of each hash to distinguish the entries
-// within a single bucket.
-//
-// If more than 8 keys hash to a bucket, we chain on
-// extra buckets.
-//
-// When the hashtable grows, we allocate a new array
-// of buckets twice as big. Buckets are incrementally
-// copied from the old bucket array to the new bucket array.
-//
-// Map iterators walk through the array of buckets and
-// return the keys in walk order (bucket #, then overflow
-// chain order, then bucket index). To maintain iteration
-// semantics, we never move keys within their bucket (if
-// we did, keys might be returned 0 or 2 times). When
-// growing the table, iterators remain iterating through the
-// old table and must check the new table if the bucket
-// they are iterating through has been moved ("evacuated")
-// to the new table.
-
-// Picking loadFactor: too large and we have lots of overflow
-// buckets, too small and we waste a lot of space. I wrote
-// a simple program to check some stats for different loads:
-// (64-bit, 8 byte keys and values)
-// loadFactor %overflow bytes/entry hitprobe missprobe
-// 4.00 2.13 20.77 3.00 4.00
-// 4.50 4.05 17.30 3.25 4.50
-// 5.00 6.85 14.77 3.50 5.00
-// 5.50 10.55 12.94 3.75 5.50
-// 6.00 15.27 11.67 4.00 6.00
-// 6.50 20.90 10.79 4.25 6.50
-// 7.00 27.14 10.15 4.50 7.00
-// 7.50 34.03 9.73 4.75 7.50
-// 8.00 41.10 9.40 5.00 8.00
-//
-// %overflow = percentage of buckets which have an overflow bucket
-// bytes/entry = overhead bytes used per key/value pair
-// hitprobe = # of entries to check when looking up a present key
-// missprobe = # of entries to check when looking up an absent key
-//
-// Keep in mind this data is for maximally loaded tables, i.e. just
-// before the table grows. Typical tables will be somewhat less loaded.
-
-import (
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-const (
- // Maximum number of key/value pairs a bucket can hold.
- bucketCntBits = 3
- bucketCnt = 1 << bucketCntBits
-
- // Maximum average load of a bucket that triggers growth is 6.5.
- // Represent as loadFactorNum/loadFactDen, to allow integer math.
- loadFactorNum = 13
- loadFactorDen = 2
-
- // Maximum key or value size to keep inline (instead of mallocing per element).
- // Must fit in a uint8.
- // Fast versions cannot handle big values - the cutoff size for
- // fast versions in ../../cmd/internal/gc/walk.go must be at most this value.
- maxKeySize = 128
- maxValueSize = 128
-
- // data offset should be the size of the bmap struct, but needs to be
- // aligned correctly. For amd64p32 this means 64-bit alignment
- // even though pointers are 32 bit.
- dataOffset = unsafe.Offsetof(struct {
- b bmap
- v int64
- }{}.v)
-
- // Possible tophash values. We reserve a few possibilities for special marks.
- // Each bucket (including its overflow buckets, if any) will have either all or none of its
- // entries in the evacuated* states (except during the evacuate() method, which only happens
- // during map writes and thus no one else can observe the map during that time).
- empty = 0 // cell is empty
- evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
- evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table.
- evacuatedY = 3 // same as above, but evacuated to second half of larger table.
- minTopHash = 4 // minimum tophash for a normal filled cell.
-
- // flags
- iterator = 1 // there may be an iterator using buckets
- oldIterator = 2 // there may be an iterator using oldbuckets
- hashWriting = 4 // a goroutine is writing to the map
- sameSizeGrow = 8 // the current map growth is to a new map of the same size
-
- // sentinel bucket ID for iterator checks
- noCheck = 1<<(8*sys.PtrSize) - 1
-)
-
-// A header for a Go map.
-type hmap struct {
- // Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and
- // ../reflect/type.go. Don't change this structure without also changing that code!
- count int // # live cells == size of map. Must be first (used by len() builtin)
- flags uint8
- B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
- noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
- hash0 uint32 // hash seed
-
- buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
- oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
- nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
-
- extra *mapextra // optional fields
-}
-
-// mapextra holds fields that are not present on all maps.
-type mapextra struct {
- // If both key and value do not contain pointers and are inline, then we mark bucket
- // type as containing no pointers. This avoids scanning such maps.
- // However, bmap.overflow is a pointer. In order to keep overflow buckets
- // alive, we store pointers to all overflow buckets in hmap.overflow and h.map.oldoverflow.
- // overflow and oldoverflow are only used if key and value do not contain pointers.
- // overflow contains overflow buckets for hmap.buckets.
- // oldoverflow contains overflow buckets for hmap.oldbuckets.
- // The indirection allows to store a pointer to the slice in hiter.
- overflow *[]*bmap
- oldoverflow *[]*bmap
-
- // nextOverflow holds a pointer to a free overflow bucket.
- nextOverflow *bmap
-}
-
-// A bucket for a Go map.
-type bmap struct {
- // tophash generally contains the top byte of the hash value
- // for each key in this bucket. If tophash[0] < minTopHash,
- // tophash[0] is a bucket evacuation state instead.
- tophash [bucketCnt]uint8
- // Followed by bucketCnt keys and then bucketCnt values.
- // NOTE: packing all the keys together and then all the values together makes the
- // code a bit more complicated than alternating key/value/key/value/... but it allows
- // us to eliminate padding which would be needed for, e.g., map[int64]int8.
- // Followed by an overflow pointer.
-}
-
-// A hash iteration structure.
-// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
-// the layout of this structure.
-type hiter struct {
- key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go).
- value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
- t *maptype
- h *hmap
- buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
- bptr *bmap // current bucket
- overflow *[]*bmap // keeps overflow buckets of hmap.buckets alive
- oldoverflow *[]*bmap // keeps overflow buckets of hmap.oldbuckets alive
- startBucket uintptr // bucket iteration started at
- offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
- wrapped bool // already wrapped around from end of bucket array to beginning
- B uint8
- i uint8
- bucket uintptr
- checkBucket uintptr
-}
-
-// bucketShift returns 1<<b, optimized for code generation.
-func bucketShift(b uint8) uintptr {
- if sys.GoarchAmd64|sys.GoarchAmd64p32|sys.Goarch386 != 0 {
- b &= sys.PtrSize*8 - 1 // help x86 archs remove shift overflow checks
- }
- return uintptr(1) << b
-}
-
-// bucketMask returns 1<<b - 1, optimized for code generation.
-func bucketMask(b uint8) uintptr {
- return bucketShift(b) - 1
-}
-
-// tophash calculates the tophash value for hash.
-func tophash(hash uintptr) uint8 {
- top := uint8(hash >> (sys.PtrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- return top
-}
-
-func evacuated(b *bmap) bool {
- h := b.tophash[0]
- return h > empty && h < minTopHash
-}
-
-func (b *bmap) overflow(t *maptype) *bmap {
- return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
-}
-
-func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
- *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
-}
-
-func (b *bmap) keys() unsafe.Pointer {
- return add(unsafe.Pointer(b), dataOffset)
-}
-
-// incrnoverflow increments h.noverflow.
-// noverflow counts the number of overflow buckets.
-// This is used to trigger same-size map growth.
-// See also tooManyOverflowBuckets.
-// To keep hmap small, noverflow is a uint16.
-// When there are few buckets, noverflow is an exact count.
-// When there are many buckets, noverflow is an approximate count.
-func (h *hmap) incrnoverflow() {
- // We trigger same-size map growth if there are
- // as many overflow buckets as buckets.
- // We need to be able to count to 1<<h.B.
- if h.B < 16 {
- h.noverflow++
- return
- }
- // Increment with probability 1/(1<<(h.B-15)).
- // When we reach 1<<15 - 1, we will have approximately
- // as many overflow buckets as buckets.
- mask := uint32(1)<<(h.B-15) - 1
- // Example: if h.B == 18, then mask == 7,
- // and fastrand & 7 == 0 with probability 1/8.
- if fastrand()&mask == 0 {
- h.noverflow++
- }
-}
-
-func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
- var ovf *bmap
- if h.extra != nil && h.extra.nextOverflow != nil {
- // We have preallocated overflow buckets available.
- // See makeBucketArray for more details.
- ovf = h.extra.nextOverflow
- if ovf.overflow(t) == nil {
- // We're not at the end of the preallocated overflow buckets. Bump the pointer.
- h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
- } else {
- // This is the last preallocated overflow bucket.
- // Reset the overflow pointer on this bucket,
- // which was set to a non-nil sentinel value.
- ovf.setoverflow(t, nil)
- h.extra.nextOverflow = nil
- }
- } else {
- ovf = (*bmap)(newobject(t.bucket))
- }
- h.incrnoverflow()
- if t.bucket.kind&kindNoPointers != 0 {
- h.createOverflow()
- *h.extra.overflow = append(*h.extra.overflow, ovf)
- }
- b.setoverflow(t, ovf)
- return ovf
-}
-
-func (h *hmap) createOverflow() {
- if h.extra == nil {
- h.extra = new(mapextra)
- }
- if h.extra.overflow == nil {
- h.extra.overflow = new([]*bmap)
- }
-}
-
-func makemap64(t *maptype, hint int64, h *hmap) *hmap {
- if int64(int(hint)) != hint {
- hint = 0
- }
- return makemap(t, int(hint), h)
-}
-
-// makehmap_small implements Go map creation for make(map[k]v) and
-// make(map[k]v, hint) when hint is known to be at most bucketCnt
-// at compile time and the map needs to be allocated on the heap.
-func makemap_small() *hmap {
- h := new(hmap)
- h.hash0 = fastrand()
- return h
-}
-
-// makemap implements Go map creation for make(map[k]v, hint).
-// If the compiler has determined that the map or the first bucket
-// can be created on the stack, h and/or bucket may be non-nil.
-// If h != nil, the map can be created directly in h.
-// If h.buckets != nil, bucket pointed to can be used as the first bucket.
-func makemap(t *maptype, hint int, h *hmap) *hmap {
- // The size of hmap should be 48 bytes on 64 bit
- // and 28 bytes on 32 bit platforms.
- if sz := unsafe.Sizeof(hmap{}); sz != 8+5*sys.PtrSize {
- println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
- throw("bad hmap size")
- }
-
- if hint < 0 || hint > int(maxSliceCap(t.bucket.size)) {
- hint = 0
- }
-
- // initialize Hmap
- if h == nil {
- h = new(hmap)
- }
- h.hash0 = fastrand()
-
- // find size parameter which will hold the requested # of elements
- B := uint8(0)
- for overLoadFactor(hint, B) {
- B++
- }
- h.B = B
-
- // allocate initial hash table
- // if B == 0, the buckets field is allocated lazily later (in mapassign)
- // If hint is large zeroing this memory could take a while.
- if h.B != 0 {
- var nextOverflow *bmap
- h.buckets, nextOverflow = makeBucketArray(t, h.B)
- if nextOverflow != nil {
- h.extra = new(mapextra)
- h.extra.nextOverflow = nextOverflow
- }
- }
-
- return h
-}
-
-// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
-// it will return a reference to the zero object for the value type if
-// the key is not in the map.
-// NOTE: The returned pointer may keep the whole map live, so don't
-// hold onto it for very long.
-func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- pc := funcPC(mapaccess1)
- racereadpc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if msanenabled && h != nil {
- msanread(key, t.key.size)
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(&zeroVal[0])
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
- }
- alg := t.key.alg
- hash := alg.hash(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- if alg.equal(key, k) {
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- return v
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0])
-}
-
-func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- pc := funcPC(mapaccess2)
- racereadpc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if msanenabled && h != nil {
- msanread(key, t.key.size)
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(&zeroVal[0]), false
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
- }
- alg := t.key.alg
- hash := alg.hash(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- if alg.equal(key, k) {
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- return v, true
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0]), false
-}
-
-// returns both key and value. Used by map iterator
-func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
- if h == nil || h.count == 0 {
- return nil, nil
- }
- alg := t.key.alg
- hash := alg.hash(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- if alg.equal(key, k) {
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- return k, v
- }
- }
- }
- return nil, nil
-}
-
-func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
- v := mapaccess1(t, h, key)
- if v == unsafe.Pointer(&zeroVal[0]) {
- return zero
- }
- return v
-}
-
-func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
- v := mapaccess1(t, h, key)
- if v == unsafe.Pointer(&zeroVal[0]) {
- return zero, false
- }
- return v, true
-}
-
-// Like mapaccess, but allocates a slot for the key if it is not present in the map.
-func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if h == nil {
- panic(plainError("assignment to entry in nil map"))
- }
- if raceenabled {
- callerpc := getcallerpc()
- pc := funcPC(mapassign)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if msanenabled {
- msanread(key, t.key.size)
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
- }
- alg := t.key.alg
- hash := alg.hash(key, uintptr(h.hash0))
-
- // Set hashWriting after calling alg.hash, since alg.hash may panic,
- // in which case we have not actually done a write.
- h.flags |= hashWriting
-
- if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
- }
-
-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork(t, h, bucket)
- }
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
- top := tophash(hash)
-
- var inserti *uint8
- var insertk unsafe.Pointer
- var val unsafe.Pointer
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == empty && inserti == nil {
- inserti = &b.tophash[i]
- insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- if !alg.equal(key, k) {
- continue
- }
- // already have a mapping for key. Update it.
- if t.needkeyupdate {
- typedmemmove(t.key, k, key)
- }
- val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- goto done
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if inserti == nil {
- // all current buckets are full, allocate a new one.
- newb := h.newoverflow(t, b)
- inserti = &newb.tophash[0]
- insertk = add(unsafe.Pointer(newb), dataOffset)
- val = add(insertk, bucketCnt*uintptr(t.keysize))
- }
-
- // store new key/value at insert position
- if t.indirectkey {
- kmem := newobject(t.key)
- *(*unsafe.Pointer)(insertk) = kmem
- insertk = kmem
- }
- if t.indirectvalue {
- vmem := newobject(t.elem)
- *(*unsafe.Pointer)(val) = vmem
- }
- typedmemmove(t.key, insertk, key)
- *inserti = top
- h.count++
-
-done:
- if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
- }
- h.flags &^= hashWriting
- if t.indirectvalue {
- val = *((*unsafe.Pointer)(val))
- }
- return val
-}
-
-func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- pc := funcPC(mapdelete)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if msanenabled && h != nil {
- msanread(key, t.key.size)
- }
- if h == nil || h.count == 0 {
- return
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
- }
-
- alg := t.key.alg
- hash := alg.hash(key, uintptr(h.hash0))
-
- // Set hashWriting after calling alg.hash, since alg.hash may panic,
- // in which case we have not actually done a write (delete).
- h.flags |= hashWriting
-
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
- top := tophash(hash)
-search:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if !alg.equal(key, k2) {
- continue
- }
- // Only clear key if there are pointers in it.
- if t.indirectkey {
- *(*unsafe.Pointer)(k) = nil
- } else if t.key.kind&kindNoPointers == 0 {
- memclrHasPointers(k, t.key.size)
- }
- // Only clear value if there are pointers in it.
- if t.indirectvalue || t.elem.kind&kindNoPointers == 0 {
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
- *(*unsafe.Pointer)(v) = nil
- } else {
- memclrHasPointers(v, t.elem.size)
- }
- }
- b.tophash[i] = empty
- h.count--
- break search
- }
- }
-
- if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
- }
- h.flags &^= hashWriting
-}
-
-// mapiterinit initializes the hiter struct used for ranging over maps.
-// The hiter struct pointed to by 'it' is allocated on the stack
-// by the compilers order pass or on the heap by reflect_mapiterinit.
-// Both need to have zeroed hiter since the struct contains pointers.
-func mapiterinit(t *maptype, h *hmap, it *hiter) {
- if raceenabled && h != nil {
- callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
- }
-
- if h == nil || h.count == 0 {
- return
- }
-
- if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
- throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
- }
- it.t = t
- it.h = h
-
- // grab snapshot of bucket state
- it.B = h.B
- it.buckets = h.buckets
- if t.bucket.kind&kindNoPointers != 0 {
- // Allocate the current slice and remember pointers to both current and old.
- // This preserves all relevant overflow buckets alive even if
- // the table grows and/or overflow buckets are added to the table
- // while we are iterating.
- h.createOverflow()
- it.overflow = h.extra.overflow
- it.oldoverflow = h.extra.oldoverflow
- }
-
- // decide where to start
- r := uintptr(fastrand())
- if h.B > 31-bucketCntBits {
- r += uintptr(fastrand()) << 31
- }
- it.startBucket = r & bucketMask(h.B)
- it.offset = uint8(r >> h.B & (bucketCnt - 1))
-
- // iterator state
- it.bucket = it.startBucket
-
- // Remember we have an iterator.
- // Can run concurrently with another mapiterinit().
- if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
- atomic.Or8(&h.flags, iterator|oldIterator)
- }
-
- mapiternext(it)
-}
-
-func mapiternext(it *hiter) {
- h := it.h
- if raceenabled {
- callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
- }
- if h.flags&hashWriting != 0 {
- throw("concurrent map iteration and map write")
- }
- t := it.t
- bucket := it.bucket
- b := it.bptr
- i := it.i
- checkBucket := it.checkBucket
- alg := t.key.alg
-
-next:
- if b == nil {
- if bucket == it.startBucket && it.wrapped {
- // end of iteration
- it.key = nil
- it.value = nil
- return
- }
- if h.growing() && it.B == h.B {
- // Iterator was started in the middle of a grow, and the grow isn't done yet.
- // If the bucket we're looking at hasn't been filled in yet (i.e. the old
- // bucket hasn't been evacuated) then we need to iterate through the old
- // bucket and only return the ones that will be migrated to this bucket.
- oldbucket := bucket & it.h.oldbucketmask()
- b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- if !evacuated(b) {
- checkBucket = bucket
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
- checkBucket = noCheck
- }
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
- checkBucket = noCheck
- }
- bucket++
- if bucket == bucketShift(it.B) {
- bucket = 0
- it.wrapped = true
- }
- i = 0
- }
- for ; i < bucketCnt; i++ {
- offi := (i + it.offset) & (bucketCnt - 1)
- if b.tophash[offi] == empty || b.tophash[offi] == evacuatedEmpty {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
- if checkBucket != noCheck && !h.sameSizeGrow() {
- // Special case: iterator was started during a grow to a larger size
- // and the grow is not done yet. We're working on a bucket whose
- // oldbucket has not been evacuated yet. Or at least, it wasn't
- // evacuated when we started the bucket. So we're iterating
- // through the oldbucket, skipping any keys that will go
- // to the other new bucket (each oldbucket expands to two
- // buckets during a grow).
- if t.reflexivekey || alg.equal(k, k) {
- // If the item in the oldbucket is not destined for
- // the current new bucket in the iteration, skip it.
- hash := alg.hash(k, uintptr(h.hash0))
- if hash&bucketMask(it.B) != checkBucket {
- continue
- }
- } else {
- // Hash isn't repeatable if k != k (NaNs). We need a
- // repeatable and randomish choice of which direction
- // to send NaNs during evacuation. We'll use the low
- // bit of tophash to decide which way NaNs go.
- // NOTE: this case is why we need two evacuate tophash
- // values, evacuatedX and evacuatedY, that differ in
- // their low bit.
- if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
- continue
- }
- }
- }
- if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
- !(t.reflexivekey || alg.equal(k, k)) {
- // This is the golden data, we can return it.
- // OR
- // key!=key, so the entry can't be deleted or updated, so we can just return it.
- // That's lucky for us because when key!=key we can't look it up successfully.
- it.key = k
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- it.value = v
- } else {
- // The hash table has grown since the iterator was started.
- // The golden data for this key is now somewhere else.
- // Check the current hash table for the data.
- // This code handles the case where the key
- // has been deleted, updated, or deleted and reinserted.
- // NOTE: we need to regrab the key as it has potentially been
- // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
- rk, rv := mapaccessK(t, h, k)
- if rk == nil {
- continue // key has been deleted
- }
- it.key = rk
- it.value = rv
- }
- it.bucket = bucket
- if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
- it.bptr = b
- }
- it.i = i + 1
- it.checkBucket = checkBucket
- return
- }
- b = b.overflow(t)
- i = 0
- goto next
-}
-
-func makeBucketArray(t *maptype, b uint8) (buckets unsafe.Pointer, nextOverflow *bmap) {
- base := bucketShift(b)
- nbuckets := base
- // For small b, overflow buckets are unlikely.
- // Avoid the overhead of the calculation.
- if b >= 4 {
- // Add on the estimated number of overflow buckets
- // required to insert the median number of elements
- // used with this value of b.
- nbuckets += bucketShift(b - 4)
- sz := t.bucket.size * nbuckets
- up := roundupsize(sz)
- if up != sz {
- nbuckets = up / t.bucket.size
- }
- }
- buckets = newarray(t.bucket, int(nbuckets))
- if base != nbuckets {
- // We preallocated some overflow buckets.
- // To keep the overhead of tracking these overflow buckets to a minimum,
- // we use the convention that if a preallocated overflow bucket's overflow
- // pointer is nil, then there are more available by bumping the pointer.
- // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
- nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
- last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
- last.setoverflow(t, (*bmap)(buckets))
- }
- return buckets, nextOverflow
-}
-
-func hashGrow(t *maptype, h *hmap) {
- // If we've hit the load factor, get bigger.
- // Otherwise, there are too many overflow buckets,
- // so keep the same number of buckets and "grow" laterally.
- bigger := uint8(1)
- if !overLoadFactor(h.count+1, h.B) {
- bigger = 0
- h.flags |= sameSizeGrow
- }
- oldbuckets := h.buckets
- newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger)
-
- flags := h.flags &^ (iterator | oldIterator)
- if h.flags&iterator != 0 {
- flags |= oldIterator
- }
- // commit the grow (atomic wrt gc)
- h.B += bigger
- h.flags = flags
- h.oldbuckets = oldbuckets
- h.buckets = newbuckets
- h.nevacuate = 0
- h.noverflow = 0
-
- if h.extra != nil && h.extra.overflow != nil {
- // Promote current overflow buckets to the old generation.
- if h.extra.oldoverflow != nil {
- throw("oldoverflow is not nil")
- }
- h.extra.oldoverflow = h.extra.overflow
- h.extra.overflow = nil
- }
- if nextOverflow != nil {
- if h.extra == nil {
- h.extra = new(mapextra)
- }
- h.extra.nextOverflow = nextOverflow
- }
-
- // the actual copying of the hash table data is done incrementally
- // by growWork() and evacuate().
-}
-
-// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
-func overLoadFactor(count int, B uint8) bool {
- return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
-}
-
-// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
-// Note that most of these overflow buckets must be in sparse use;
-// if use was dense, then we'd have already triggered regular map growth.
-func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
- // If the threshold is too low, we do extraneous work.
- // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
- // "too many" means (approximately) as many overflow buckets as regular buckets.
- // See incrnoverflow for more details.
- if B > 15 {
- B = 15
- }
- // The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
- return noverflow >= uint16(1)<<(B&15)
-}
-
-// growing reports whether h is growing. The growth may be to the same size or bigger.
-func (h *hmap) growing() bool {
- return h.oldbuckets != nil
-}
-
-// sameSizeGrow reports whether the current growth is to a map of the same size.
-func (h *hmap) sameSizeGrow() bool {
- return h.flags&sameSizeGrow != 0
-}
-
-// noldbuckets calculates the number of buckets prior to the current map growth.
-func (h *hmap) noldbuckets() uintptr {
- oldB := h.B
- if !h.sameSizeGrow() {
- oldB--
- }
- return bucketShift(oldB)
-}
-
-// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
-func (h *hmap) oldbucketmask() uintptr {
- return h.noldbuckets() - 1
-}
-
-func growWork(t *maptype, h *hmap, bucket uintptr) {
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate(t, h, bucket&h.oldbucketmask())
-
- // evacuate one more oldbucket to make progress on growing
- if h.growing() {
- evacuate(t, h, h.nevacuate)
- }
-}
-
-func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
- b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize)))
- return evacuated(b)
-}
-
-// evacDst is an evacuation destination.
-type evacDst struct {
- b *bmap // current destination bucket
- i int // key/val index into b
- k unsafe.Pointer // pointer to current key storage
- v unsafe.Pointer // pointer to current value storage
-}
-
-func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- newbit := h.noldbuckets()
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
-
- // xy contains the x and y (low and high) evacuation destinations.
- var xy [2]evacDst
- x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
- x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.v = add(x.k, bucketCnt*uintptr(t.keysize))
-
- if !h.sameSizeGrow() {
- // Only calculate y pointers if we're growing bigger.
- // Otherwise GC can see bad pointers.
- y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
- y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.v = add(y.k, bucketCnt*uintptr(t.keysize))
- }
-
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- v := add(k, bucketCnt*uintptr(t.keysize))
- for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
- top := b.tophash[i]
- if top == empty {
- b.tophash[i] = evacuatedEmpty
- continue
- }
- if top < minTopHash {
- throw("bad map state")
- }
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- var useY uint8
- if !h.sameSizeGrow() {
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/value to bucket x or bucket y).
- hash := t.key.alg.hash(k2, uintptr(h.hash0))
- if h.flags&iterator != 0 && !t.reflexivekey && !t.key.alg.equal(k2, k2) {
- // If key != key (NaNs), then the hash could be (and probably
- // will be) entirely different from the old hash. Moreover,
- // it isn't reproducible. Reproducibility is required in the
- // presence of iterators, as our evacuation decision must
- // match whatever decision the iterator made.
- // Fortunately, we have the freedom to send these keys either
- // way. Also, tophash is meaningless for these kinds of keys.
- // We let the low bit of tophash drive the evacuation decision.
- // We recompute a new random tophash for the next level so
- // these keys will get evenly distributed across all buckets
- // after multiple grows.
- useY = top & 1
- top = tophash(hash)
- } else {
- if hash&newbit != 0 {
- useY = 1
- }
- }
- }
-
- if evacuatedX+1 != evacuatedY {
- throw("bad evacuatedN")
- }
-
- b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
- dst := &xy[useY] // evacuation destination
-
- if dst.i == bucketCnt {
- dst.b = h.newoverflow(t, dst.b)
- dst.i = 0
- dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
- }
- dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
- if t.indirectkey {
- *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
- } else {
- typedmemmove(t.key, dst.k, k) // copy value
- }
- if t.indirectvalue {
- *(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v)
- } else {
- typedmemmove(t.elem, dst.v, v)
- }
- dst.i++
- // These updates might push these pointers past the end of the
- // key or value arrays. That's ok, as we have the overflow pointer
- // at the end of the bucket to protect against pointing past the
- // end of the bucket.
- dst.k = add(dst.k, uintptr(t.keysize))
- dst.v = add(dst.v, uintptr(t.valuesize))
- }
- }
- // Unlink the overflow buckets & clear key/value to help GC.
- if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
- b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
- // Preserve b.tophash because the evacuation
- // state is maintained there.
- ptr := add(b, dataOffset)
- n := uintptr(t.bucketsize) - dataOffset
- memclrHasPointers(ptr, n)
- }
- }
-
- if oldbucket == h.nevacuate {
- advanceEvacuationMark(h, t, newbit)
- }
-}
-
-func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
- h.nevacuate++
- // Experiments suggest that 1024 is overkill by at least an order of magnitude.
- // Put it in there as a safeguard anyway, to ensure O(1) behavior.
- stop := h.nevacuate + 1024
- if stop > newbit {
- stop = newbit
- }
- for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
- h.nevacuate++
- }
- if h.nevacuate == newbit { // newbit == # of oldbuckets
- // Growing is all done. Free old main bucket array.
- h.oldbuckets = nil
- // Can discard old overflow buckets as well.
- // If they are still referenced by an iterator,
- // then the iterator holds a pointers to the slice.
- if h.extra != nil {
- h.extra.oldoverflow = nil
- }
- h.flags &^= sameSizeGrow
- }
-}
-
-func ismapkey(t *_type) bool {
- return t.alg.hash != nil
-}
-
-// Reflect stubs. Called from ../reflect/asm_*.s
-
-//go:linkname reflect_makemap reflect.makemap
-func reflect_makemap(t *maptype, cap int) *hmap {
- // Check invariants and reflects math.
- if sz := unsafe.Sizeof(hmap{}); sz != t.hmap.size {
- println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
- throw("bad hmap size")
- }
- if !ismapkey(t.key) {
- throw("runtime.reflect_makemap: unsupported map key type")
- }
- if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
- t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
- throw("key size wrong")
- }
- if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
- t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
- throw("value size wrong")
- }
- if t.key.align > bucketCnt {
- throw("key align too big")
- }
- if t.elem.align > bucketCnt {
- throw("value align too big")
- }
- if t.key.size%uintptr(t.key.align) != 0 {
- throw("key size not a multiple of key align")
- }
- if t.elem.size%uintptr(t.elem.align) != 0 {
- throw("value size not a multiple of value align")
- }
- if bucketCnt < 8 {
- throw("bucketsize too small for proper alignment")
- }
- if dataOffset%uintptr(t.key.align) != 0 {
- throw("need padding in bucket (key)")
- }
- if dataOffset%uintptr(t.elem.align) != 0 {
- throw("need padding in bucket (value)")
- }
-
- return makemap(t, cap, nil)
-}
-
-//go:linkname reflect_mapaccess reflect.mapaccess
-func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- val, ok := mapaccess2(t, h, key)
- if !ok {
- // reflect wants nil for a missing element
- val = nil
- }
- return val
-}
-
-//go:linkname reflect_mapassign reflect.mapassign
-func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
- p := mapassign(t, h, key)
- typedmemmove(t.elem, p, val)
-}
-
-//go:linkname reflect_mapdelete reflect.mapdelete
-func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
- mapdelete(t, h, key)
-}
-
-//go:linkname reflect_mapiterinit reflect.mapiterinit
-func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
- it := new(hiter)
- mapiterinit(t, h, it)
- return it
-}
-
-//go:linkname reflect_mapiternext reflect.mapiternext
-func reflect_mapiternext(it *hiter) {
- mapiternext(it)
-}
-
-//go:linkname reflect_mapiterkey reflect.mapiterkey
-func reflect_mapiterkey(it *hiter) unsafe.Pointer {
- return it.key
-}
-
-//go:linkname reflect_maplen reflect.maplen
-func reflect_maplen(h *hmap) int {
- if h == nil {
- return 0
- }
- if raceenabled {
- callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
- }
- return h.count
-}
-
-//go:linkname reflect_ismapkey reflect.ismapkey
-func reflect_ismapkey(t *_type) bool {
- return ismapkey(t)
-}
-
-const maxZero = 1024 // must match value in ../cmd/compile/internal/gc/walk.go
-var zeroVal [maxZero]byte