diff options
| author | David Chase <drchase@google.com> | 2023-04-25 19:14:05 -0400 |
|---|---|---|
| committer | David Chase <drchase@google.com> | 2023-05-11 13:45:40 +0000 |
| commit | 2e93fe0a9f55aaa2a762e7fb454d76b2ee3a4e4f (patch) | |
| tree | 6ad00fee3adb58714aa001ece696eeebeb2b5087 /src/runtime/map_faststr.go | |
| parent | a2838ec5f20b56e94a18c873ab4b68397355e214 (diff) | |
| download | go-2e93fe0a9f55aaa2a762e7fb454d76b2ee3a4e4f.tar.xz | |
runtime: move per-type types to internal/abi
Change-Id: I1f031f0f83a94bebe41d3978a91a903dc5bcda66
Reviewed-on: https://go-review.googlesource.com/c/go/+/489276
Reviewed-by: Keith Randall <khr@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
Run-TryBot: David Chase <drchase@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Diffstat (limited to 'src/runtime/map_faststr.go')
| -rw-r--r-- | src/runtime/map_faststr.go | 68 |
1 files changed, 34 insertions, 34 deletions
diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go index 03a4fac169..ef71da859a 100644 --- a/src/runtime/map_faststr.go +++ b/src/runtime/map_faststr.go @@ -36,7 +36,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { continue } if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)) + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)) } } return unsafe.Pointer(&zeroVal[0]) @@ -52,7 +52,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { continue } if k.str == key.str { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)) + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)) } // check first 4 bytes if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { @@ -71,21 +71,21 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { if keymaybe != bucketCnt { k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize)) if memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize)) + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)) } } return unsafe.Pointer(&zeroVal[0]) } dohash: - hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) + hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) m := bucketMask(h.B) - b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) if c := h.oldbuckets; c != nil { if !h.sameSizeGrow() { // There used to be half as many buckets; mask down one more power of two. m >>= 1 } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) if !evacuated(oldb) { b = oldb } @@ -98,7 +98,7 @@ dohash: continue } if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)) + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)) } } } @@ -131,7 +131,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { continue } if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true } } return unsafe.Pointer(&zeroVal[0]), false @@ -147,7 +147,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { continue } if k.str == key.str { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true } // check first 4 bytes if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { @@ -166,21 +166,21 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { if keymaybe != bucketCnt { k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize)) if memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize)), true + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true } } return unsafe.Pointer(&zeroVal[0]), false } dohash: - hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) + hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) m := bucketMask(h.B) - b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) if c := h.oldbuckets; c != nil { if !h.sameSizeGrow() { // There used to be half as many buckets; mask down one more power of two. m >>= 1 } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) if !evacuated(oldb) { b = oldb } @@ -193,7 +193,7 @@ dohash: continue } if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true } } } @@ -212,13 +212,13 @@ func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer { fatal("concurrent map writes") } key := stringStructOf(&s) - hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0)) + hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0)) // Set hashWriting after calling t.hasher for consistency with mapassign. h.flags ^= hashWriting if h.buckets == nil { - h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) + h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) } again: @@ -226,7 +226,7 @@ again: if h.growing() { growWork_faststr(t, h, bucket) } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) + b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) top := tophash(hash) var insertb *bmap @@ -290,7 +290,7 @@ bucketloop: h.count++ done: - elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.elemsize)) + elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.ValueSize)) if h.flags&hashWriting == 0 { fatal("concurrent map writes") } @@ -311,7 +311,7 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) { } key := stringStructOf(&ky) - hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) + hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) // Set hashWriting after calling t.hasher for consistency with mapdelete h.flags ^= hashWriting @@ -320,7 +320,7 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) { if h.growing() { growWork_faststr(t, h, bucket) } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) + b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) bOrig := b top := tophash(hash) search: @@ -335,11 +335,11 @@ search: } // Clear key's pointer. k.str = nil - e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)) - if t.elem.PtrBytes != 0 { - memclrHasPointers(e, t.elem.Size_) + e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)) + if t.Elem.PtrBytes != 0 { + memclrHasPointers(e, t.Elem.Size_) } else { - memclrNoHeapPointers(e, t.elem.Size_) + memclrNoHeapPointers(e, t.Elem.Size_) } b.tophash[i] = emptyOne // If the bucket now ends in a bunch of emptyOne states, @@ -400,7 +400,7 @@ func growWork_faststr(t *maptype, h *hmap, bucket uintptr) { } func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { - b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) + b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) newbit := h.noldbuckets() if !evacuated(b) { // TODO: reuse overflow buckets instead of using new ones, if there @@ -409,7 +409,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { // xy contains the x and y (low and high) evacuation destinations. var xy [2]evacDst x := &xy[0] - x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) + x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) x.k = add(unsafe.Pointer(x.b), dataOffset) x.e = add(x.k, bucketCnt*2*goarch.PtrSize) @@ -417,7 +417,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { // Only calculate y pointers if we're growing bigger. // Otherwise GC can see bad pointers. y := &xy[1] - y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) + y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) y.k = add(unsafe.Pointer(y.b), dataOffset) y.e = add(y.k, bucketCnt*2*goarch.PtrSize) } @@ -425,7 +425,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { for ; b != nil; b = b.overflow(t) { k := add(unsafe.Pointer(b), dataOffset) e := add(k, bucketCnt*2*goarch.PtrSize) - for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.elemsize)) { + for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) { top := b.tophash[i] if isEmpty(top) { b.tophash[i] = evacuatedEmpty @@ -438,7 +438,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { if !h.sameSizeGrow() { // Compute hash to make our evacuation decision (whether we need // to send this key/elem to bucket x or bucket y). - hash := t.hasher(k, uintptr(h.hash0)) + hash := t.Hasher(k, uintptr(h.hash0)) if hash&newbit != 0 { useY = 1 } @@ -458,23 +458,23 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { // Copy key. *(*string)(dst.k) = *(*string)(k) - typedmemmove(t.elem, dst.e, e) + typedmemmove(t.Elem, dst.e, e) dst.i++ // These updates might push these pointers past the end of the // key or elem arrays. That's ok, as we have the overflow pointer // at the end of the bucket to protect against pointing past the // end of the bucket. dst.k = add(dst.k, 2*goarch.PtrSize) - dst.e = add(dst.e, uintptr(t.elemsize)) + dst.e = add(dst.e, uintptr(t.ValueSize)) } } // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.bucket.PtrBytes != 0 { - b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) + if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 { + b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) // Preserve b.tophash because the evacuation // state is maintained there. ptr := add(b, dataOffset) - n := uintptr(t.bucketsize) - dataOffset + n := uintptr(t.BucketSize) - dataOffset memclrHasPointers(ptr, n) } } |
