aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/map_faststr.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/map_faststr.go')
-rw-r--r--src/runtime/map_faststr.go47
1 files changed, 23 insertions, 24 deletions
diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go
index 504a3a1d5f..069994f1b7 100644
--- a/src/runtime/map_faststr.go
+++ b/src/runtime/map_faststr.go
@@ -35,7 +35,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
}
}
return unsafe.Pointer(&zeroVal[0])
@@ -51,7 +51,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
continue
}
if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
@@ -70,7 +70,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if keymaybe != bucketCnt {
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize))
}
}
return unsafe.Pointer(&zeroVal[0])
@@ -97,7 +97,7 @@ dohash:
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
}
}
}
@@ -130,7 +130,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
@@ -146,7 +146,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
continue
}
if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
@@ -165,7 +165,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if keymaybe != bucketCnt {
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
@@ -192,7 +192,7 @@ dohash:
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
}
}
}
@@ -286,12 +286,12 @@ bucketloop:
h.count++
done:
- val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.valuesize))
+ elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
- return val
+ return elem
}
func mapdelete_faststr(t *maptype, h *hmap, ky string) {
@@ -331,11 +331,11 @@ search:
}
// Clear key's pointer.
k.str = nil
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
if t.elem.ptrdata != 0 {
- memclrHasPointers(v, t.elem.size)
+ memclrHasPointers(e, t.elem.size)
} else {
- memclrNoHeapPointers(v, t.elem.size)
+ memclrNoHeapPointers(e, t.elem.size)
}
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
@@ -402,7 +402,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.v = add(x.k, bucketCnt*2*sys.PtrSize)
+ x.e = add(x.k, bucketCnt*2*sys.PtrSize)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
@@ -410,13 +410,13 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.v = add(y.k, bucketCnt*2*sys.PtrSize)
+ y.e = add(y.k, bucketCnt*2*sys.PtrSize)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
- v := add(k, bucketCnt*2*sys.PtrSize)
- for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 2*sys.PtrSize), add(v, uintptr(t.valuesize)) {
+ e := add(k, bucketCnt*2*sys.PtrSize)
+ for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*sys.PtrSize), add(e, uintptr(t.elemsize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
@@ -428,7 +428,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
var useY uint8
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
- // to send this key/value to bucket x or bucket y).
+ // to send this key/elem to bucket x or bucket y).
hash := t.key.alg.hash(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
@@ -442,25 +442,24 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.v = add(dst.k, bucketCnt*2*sys.PtrSize)
+ dst.e = add(dst.k, bucketCnt*2*sys.PtrSize)
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
*(*string)(dst.k) = *(*string)(k)
- typedmemmove(t.elem, dst.v, v)
+ typedmemmove(t.elem, dst.e, e)
dst.i++
// These updates might push these pointers past the end of the
- // key or value arrays. That's ok, as we have the overflow pointer
+ // key or elem arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
dst.k = add(dst.k, 2*sys.PtrSize)
- dst.v = add(dst.v, uintptr(t.valuesize))
+ dst.e = add(dst.e, uintptr(t.elemsize))
}
}
- // Unlink the overflow buckets & clear key/value to help GC.
- // Unlink the overflow buckets & clear key/value to help GC.
+ // Unlink the overflow buckets & clear key/elem to help GC.
if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
// Preserve b.tophash because the evacuation