aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/map_noswiss.go28
-rw-r--r--src/runtime/map_noswiss_test.go4
2 files changed, 10 insertions, 22 deletions
diff --git a/src/runtime/map_noswiss.go b/src/runtime/map_noswiss.go
index d7b8a5fe11..327f0c81e8 100644
--- a/src/runtime/map_noswiss.go
+++ b/src/runtime/map_noswiss.go
@@ -123,6 +123,7 @@ type hmap struct {
buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
+ clearSeq uint64
extra *mapextra // optional fields
}
@@ -176,6 +177,7 @@ type hiter struct {
i uint8
bucket uintptr
checkBucket uintptr
+ clearSeq uint64
}
// bucketShift returns 1<<b, optimized for code generation.
@@ -887,10 +889,11 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
return
}
- if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
+ if unsafe.Sizeof(hiter{}) != 8+12*goarch.PtrSize {
throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
}
it.h = h
+ it.clearSeq = h.clearSeq
// grab snapshot of bucket state
it.B = h.B
@@ -1022,8 +1025,9 @@ next:
}
}
}
- if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
- !(t.ReflexiveKey() || t.Key.Equal(k, k)) {
+ if it.clearSeq == h.clearSeq &&
+ ((b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
+ !(t.ReflexiveKey() || t.Key.Equal(k, k))) {
// This is the golden data, we can return it.
// OR
// key!=key, so the entry can't be deleted or updated, so we can just return it.
@@ -1079,28 +1083,12 @@ func mapclear(t *maptype, h *hmap) {
}
h.flags ^= hashWriting
-
- // Mark buckets empty, so existing iterators can be terminated, see issue #59411.
- markBucketsEmpty := func(bucket unsafe.Pointer, mask uintptr) {
- for i := uintptr(0); i <= mask; i++ {
- b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
- b.tophash[i] = emptyRest
- }
- }
- }
- }
- markBucketsEmpty(h.buckets, bucketMask(h.B))
- if oldBuckets := h.oldbuckets; oldBuckets != nil {
- markBucketsEmpty(oldBuckets, h.oldbucketmask())
- }
-
h.flags &^= sameSizeGrow
h.oldbuckets = nil
h.nevacuate = 0
h.noverflow = 0
h.count = 0
+ h.clearSeq++
// Reset the hash seed to make it more difficult for attackers to
// repeatedly trigger hash collisions. See issue 25237.
diff --git a/src/runtime/map_noswiss_test.go b/src/runtime/map_noswiss_test.go
index bda448471c..5af7b7b8c8 100644
--- a/src/runtime/map_noswiss_test.go
+++ b/src/runtime/map_noswiss_test.go
@@ -17,8 +17,8 @@ import (
func TestHmapSize(t *testing.T) {
// The structure of hmap is defined in runtime/map.go
// and in cmd/compile/internal/reflectdata/map.go and must be in sync.
- // The size of hmap should be 48 bytes on 64 bit and 28 bytes on 32 bit platforms.
- var hmapSize = uintptr(8 + 5*goarch.PtrSize)
+ // The size of hmap should be 56 bytes on 64 bit and 36 bytes on 32 bit platforms.
+ var hmapSize = uintptr(2*8 + 5*goarch.PtrSize)
if runtime.RuntimeHmapSize != hmapSize {
t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize)
}