aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/map_fast32.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/map_fast32.go')
-rw-r--r--src/runtime/map_fast32.go68
1 files changed, 34 insertions, 34 deletions
diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go
index 01a10f520b..d10dca3e91 100644
--- a/src/runtime/map_fast32.go
+++ b/src/runtime/map_fast32.go
@@ -26,15 +26,15 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
+ oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
if !evacuated(oldb) {
b = oldb
}
@@ -43,7 +43,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize))
}
}
}
@@ -66,15 +66,15 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
+ oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
if !evacuated(oldb) {
b = oldb
}
@@ -83,7 +83,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize)), true
}
}
}
@@ -101,13 +101,13 @@ func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if h.flags&hashWriting != 0 {
fatal("concurrent map writes")
}
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
}
again:
@@ -115,7 +115,7 @@ again:
if h.growing() {
growWork_fast32(t, h, bucket)
}
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
var insertb *bmap
var inserti uintptr
@@ -172,7 +172,7 @@ bucketloop:
h.count++
done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
+ elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
@@ -191,13 +191,13 @@ func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
if h.flags&hashWriting != 0 {
fatal("concurrent map writes")
}
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
}
again:
@@ -205,7 +205,7 @@ again:
if h.growing() {
growWork_fast32(t, h, bucket)
}
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
var insertb *bmap
var inserti uintptr
@@ -262,7 +262,7 @@ bucketloop:
h.count++
done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
+ elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize))
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
@@ -282,7 +282,7 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
fatal("concurrent map writes")
}
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
@@ -291,7 +291,7 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
if h.growing() {
growWork_fast32(t, h, bucket)
}
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
bOrig := b
search:
for ; b != nil; b = b.overflow(t) {
@@ -302,16 +302,16 @@ search:
// Only clear key if there are pointers in it.
// This can only happen if pointers are 32 bit
// wide as 64 bit pointers do not fit into a 32 bit key.
- if goarch.PtrSize == 4 && t.key.PtrBytes != 0 {
+ if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 {
// The key must be a pointer as we checked pointers are
// 32 bits wide and the key is 32 bits wide also.
*(*unsafe.Pointer)(k) = nil
}
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
- if t.elem.PtrBytes != 0 {
- memclrHasPointers(e, t.elem.Size_)
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize))
+ if t.Elem.PtrBytes != 0 {
+ memclrHasPointers(e, t.Elem.Size_)
} else {
- memclrNoHeapPointers(e, t.elem.Size_)
+ memclrNoHeapPointers(e, t.Elem.Size_)
}
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
@@ -372,7 +372,7 @@ func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
}
func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+ b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
newbit := h.noldbuckets()
if !evacuated(b) {
// TODO: reuse overflow buckets instead of using new ones, if there
@@ -381,7 +381,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
// xy contains the x and y (low and high) evacuation destinations.
var xy [2]evacDst
x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
+ x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.e = add(x.k, bucketCnt*4)
@@ -389,7 +389,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
// Only calculate y pointers if we're growing bigger.
// Otherwise GC can see bad pointers.
y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
+ y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.e = add(y.k, bucketCnt*4)
}
@@ -397,7 +397,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
e := add(k, bucketCnt*4)
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.elemsize)) {
+ for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
@@ -410,7 +410,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
- hash := t.hasher(k, uintptr(h.hash0))
+ hash := t.Hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
@@ -428,30 +428,30 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
- if goarch.PtrSize == 4 && t.key.PtrBytes != 0 && writeBarrier.enabled {
+ if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 && writeBarrier.enabled {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else {
*(*uint32)(dst.k) = *(*uint32)(k)
}
- typedmemmove(t.elem, dst.e, e)
+ typedmemmove(t.Elem, dst.e, e)
dst.i++
// These updates might push these pointers past the end of the
// key or elem arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
dst.k = add(dst.k, 4)
- dst.e = add(dst.e, uintptr(t.elemsize))
+ dst.e = add(dst.e, uintptr(t.ValueSize))
}
}
// Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.bucket.PtrBytes != 0 {
- b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
+ if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
+ b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
// Preserve b.tophash because the evacuation
// state is maintained there.
ptr := add(b, dataOffset)
- n := uintptr(t.bucketsize) - dataOffset
+ n := uintptr(t.BucketSize) - dataOffset
memclrHasPointers(ptr, n)
}
}