diff options
| author | Keith Randall <keithr@alum.mit.edu> | 2019-08-06 15:22:51 -0700 |
|---|---|---|
| committer | Keith Randall <khr@golang.org> | 2019-09-03 20:41:29 +0000 |
| commit | 36f30ba289e31df033d100b2adb4eaf557f05a34 (patch) | |
| tree | 17579106197e4c1d80b67cefdf9d8fdfd2ff2a2c /src/runtime/map.go | |
| parent | 671bcb59666c37cb32b154c36aa91b29fdbf0835 (diff) | |
| download | go-36f30ba289e31df033d100b2adb4eaf557f05a34.tar.xz | |
cmd/compile,runtime: generate hash functions only for types which are map keys
Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.
Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.
Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.
cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.
Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:
name old time/op new time/op delta
MapInterfaceString-8 23.0ns ±21% 27.6ns ±14% +20.01% (p=0.002 n=10+10)
MapInterfacePtr-8 19.4ns ±16% 23.7ns ± 7% +22.48% (p=0.000 n=10+8)
Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
Diffstat (limited to 'src/runtime/map.go')
| -rw-r--r-- | src/runtime/map.go | 57 |
1 files changed, 21 insertions, 36 deletions
diff --git a/src/runtime/map.go b/src/runtime/map.go index 4861cf08db..e456c32556 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -403,15 +403,14 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { } if h == nil || h.count == 0 { if t.hashMightPanic() { - t.key.alg.hash(key, 0) // see issue 23734 + t.hasher(key, 0) // see issue 23734 } return unsafe.Pointer(&zeroVal[0]) } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") } - alg := t.key.alg - hash := alg.hash(key, uintptr(h.hash0)) + hash := t.hasher(key, uintptr(h.hash0)) m := bucketMask(h.B) b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) if c := h.oldbuckets; c != nil { @@ -438,7 +437,7 @@ bucketloop: if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } - if alg.equal(key, k) { + if t.key.equal(key, k) { e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) if t.indirectelem() { e = *((*unsafe.Pointer)(e)) @@ -462,15 +461,14 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) } if h == nil || h.count == 0 { if t.hashMightPanic() { - t.key.alg.hash(key, 0) // see issue 23734 + t.hasher(key, 0) // see issue 23734 } return unsafe.Pointer(&zeroVal[0]), false } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") } - alg := t.key.alg - hash := alg.hash(key, uintptr(h.hash0)) + hash := t.hasher(key, uintptr(h.hash0)) m := bucketMask(h.B) b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) if c := h.oldbuckets; c != nil { @@ -497,7 +495,7 @@ bucketloop: if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } - if alg.equal(key, k) { + if t.key.equal(key, k) { e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) if t.indirectelem() { e = *((*unsafe.Pointer)(e)) @@ -514,8 +512,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe if h == nil || h.count == 0 { return nil, nil } - alg := t.key.alg - hash := alg.hash(key, uintptr(h.hash0)) + hash := t.hasher(key, uintptr(h.hash0)) m := bucketMask(h.B) b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) if c := h.oldbuckets; c != nil { @@ -542,7 +539,7 @@ bucketloop: if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } - if alg.equal(key, k) { + if t.key.equal(key, k) { e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) if t.indirectelem() { e = *((*unsafe.Pointer)(e)) @@ -587,10 +584,9 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { if h.flags&hashWriting != 0 { throw("concurrent map writes") } - alg := t.key.alg - hash := alg.hash(key, uintptr(h.hash0)) + hash := t.hasher(key, uintptr(h.hash0)) - // Set hashWriting after calling alg.hash, since alg.hash may panic, + // Set hashWriting after calling t.hasher, since t.hasher may panic, // in which case we have not actually done a write. h.flags ^= hashWriting @@ -627,7 +623,7 @@ bucketloop: if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } - if !alg.equal(key, k) { + if !t.key.equal(key, k) { continue } // already have a mapping for key. Update it. @@ -698,7 +694,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { } if h == nil || h.count == 0 { if t.hashMightPanic() { - t.key.alg.hash(key, 0) // see issue 23734 + t.hasher(key, 0) // see issue 23734 } return } @@ -706,10 +702,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { throw("concurrent map writes") } - alg := t.key.alg - hash := alg.hash(key, uintptr(h.hash0)) + hash := t.hasher(key, uintptr(h.hash0)) - // Set hashWriting after calling alg.hash, since alg.hash may panic, + // Set hashWriting after calling t.hasher, since t.hasher may panic, // in which case we have not actually done a write (delete). h.flags ^= hashWriting @@ -734,7 +729,7 @@ search: if t.indirectkey() { k2 = *((*unsafe.Pointer)(k2)) } - if !alg.equal(key, k2) { + if !t.key.equal(key, k2) { continue } // Only clear key if there are pointers in it. @@ -862,7 +857,6 @@ func mapiternext(it *hiter) { b := it.bptr i := it.i checkBucket := it.checkBucket - alg := t.key.alg next: if b == nil { @@ -916,10 +910,10 @@ next: // through the oldbucket, skipping any keys that will go // to the other new bucket (each oldbucket expands to two // buckets during a grow). - if t.reflexivekey() || alg.equal(k, k) { + if t.reflexivekey() || t.key.equal(k, k) { // If the item in the oldbucket is not destined for // the current new bucket in the iteration, skip it. - hash := alg.hash(k, uintptr(h.hash0)) + hash := t.hasher(k, uintptr(h.hash0)) if hash&bucketMask(it.B) != checkBucket { continue } @@ -937,7 +931,7 @@ next: } } if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) || - !(t.reflexivekey() || alg.equal(k, k)) { + !(t.reflexivekey() || t.key.equal(k, k)) { // This is the golden data, we can return it. // OR // key!=key, so the entry can't be deleted or updated, so we can just return it. @@ -1174,8 +1168,8 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { if !h.sameSizeGrow() { // Compute hash to make our evacuation decision (whether we need // to send this key/elem to bucket x or bucket y). - hash := t.key.alg.hash(k2, uintptr(h.hash0)) - if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.alg.equal(k2, k2) { + hash := t.hasher(k2, uintptr(h.hash0)) + if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) { // If key != key (NaNs), then the hash could be (and probably // will be) entirely different from the old hash. Moreover, // it isn't reproducible. Reproducibility is required in the @@ -1269,16 +1263,12 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) { } } -func ismapkey(t *_type) bool { - return t.alg.hash != nil -} - // Reflect stubs. Called from ../reflect/asm_*.s //go:linkname reflect_makemap reflect.makemap func reflect_makemap(t *maptype, cap int) *hmap { // Check invariants and reflects math. - if !ismapkey(t.key) { + if t.key.equal == nil { throw("runtime.reflect_makemap: unsupported map key type") } if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) || @@ -1381,10 +1371,5 @@ func reflectlite_maplen(h *hmap) int { return h.count } -//go:linkname reflect_ismapkey reflect.ismapkey -func reflect_ismapkey(t *_type) bool { - return ismapkey(t) -} - const maxZero = 1024 // must match value in cmd/compile/internal/gc/walk.go:zeroValSize var zeroVal [maxZero]byte |
