diff options
| author | Keith Randall <khr@golang.org> | 2014-12-19 20:44:18 -0800 |
|---|---|---|
| committer | Keith Randall <khr@golang.org> | 2014-12-22 22:25:48 +0000 |
| commit | fbc56cf05015899aba236d5a68096a770de3ad0a (patch) | |
| tree | 44d489f2452644d208643db8fc19fe566b15a409 /src/runtime/hashmap.go | |
| parent | aaa4bf3720bbf69e1ac65414448baf88b5e4cd83 (diff) | |
| download | go-fbc56cf05015899aba236d5a68096a770de3ad0a.tar.xz | |
runtime: hashmap: move overflow pointer to end of bucket
Pointers to zero-sized values may end up pointing to the next
object in memory, and possibly off the end of a span. This
can cause memory leaks and/or confuse the garbage collector.
By putting the overflow pointer at the end of the bucket, we
make sure that pointers to any zero-sized keys or values don't
accidentally point to the next object in memory.
fixes #9384
Change-Id: I5d434df176984cb0210b4d0195dd106d6eb28f73
Reviewed-on: https://go-review.googlesource.com/1869
Reviewed-by: Russ Cox <rsc@golang.org>
Diffstat (limited to 'src/runtime/hashmap.go')
| -rw-r--r-- | src/runtime/hashmap.go | 33 |
1 files changed, 20 insertions, 13 deletions
diff --git a/src/runtime/hashmap.go b/src/runtime/hashmap.go index 0aa7c60af6..14557f8835 100644 --- a/src/runtime/hashmap.go +++ b/src/runtime/hashmap.go @@ -118,11 +118,11 @@ type hmap struct { // A bucket for a Go map. type bmap struct { tophash [bucketCnt]uint8 - overflow *bmap // Followed by bucketCnt keys and then bucketCnt values. // NOTE: packing all the keys together and then all the values together makes the // code a bit more complicated than alternating key/value/key/value/... but it allows // us to eliminate padding which would be needed for, e.g., map[int64]int8. + // Followed by an overflow pointer. } // A hash iteration structure. @@ -149,6 +149,13 @@ func evacuated(b *bmap) bool { return h > empty && h < minTopHash } +func (b *bmap) overflow(t *maptype) *bmap { + return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize) - ptrSize)) +} +func (b *bmap) setoverflow(t *maptype, ovf *bmap) { + *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize) - ptrSize)) = ovf +} + func makemap(t *maptype, hint int64) *hmap { if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) { gothrow("bad hmap size") @@ -275,7 +282,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { return v } } - b = b.overflow + b = b.overflow(t) if b == nil { return unsafe.Pointer(t.elem.zero) } @@ -323,7 +330,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) return v, true } } - b = b.overflow + b = b.overflow(t) if b == nil { return unsafe.Pointer(t.elem.zero), false } @@ -366,7 +373,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe return k, v } } - b = b.overflow + b = b.overflow(t) if b == nil { return nil, nil } @@ -437,10 +444,11 @@ again: memmove(v2, val, uintptr(t.elem.size)) return } - if b.overflow == nil { + ovf := b.overflow(t) + if ovf == nil { break } - b = b.overflow + b = ovf } // did not find mapping for key. Allocate new cell & add entry. @@ -455,7 +463,7 @@ again: memstats.next_gc = memstats.heap_alloc } newb := (*bmap)(newobject(t.bucket)) - b.overflow = newb + b.setoverflow(t, newb) inserti = &newb.tophash[0] insertk = add(unsafe.Pointer(newb), dataOffset) insertv = add(insertk, bucketCnt*uintptr(t.keysize)) @@ -525,7 +533,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { h.count-- return } - b = b.overflow + b = b.overflow(t) if b == nil { return } @@ -720,7 +728,7 @@ next: return } } - b = b.overflow + b = b.overflow(t) i = 0 goto next } @@ -778,7 +786,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { yk := add(unsafe.Pointer(y), dataOffset) xv := add(xk, bucketCnt*uintptr(t.keysize)) yv := add(yk, bucketCnt*uintptr(t.keysize)) - for ; b != nil; b = b.overflow { + for ; b != nil; b = b.overflow(t) { k := add(unsafe.Pointer(b), dataOffset) v := add(k, bucketCnt*uintptr(t.keysize)) for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) { @@ -828,7 +836,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { memstats.next_gc = memstats.heap_alloc } newx := (*bmap)(newobject(t.bucket)) - x.overflow = newx + x.setoverflow(t, newx) x = newx xi = 0 xk = add(unsafe.Pointer(x), dataOffset) @@ -855,7 +863,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { memstats.next_gc = memstats.heap_alloc } newy := (*bmap)(newobject(t.bucket)) - y.overflow = newy + y.setoverflow(t, newy) y = newy yi = 0 yk = add(unsafe.Pointer(y), dataOffset) @@ -881,7 +889,6 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { // Unlink the overflow buckets & clear key/value to help GC. if h.flags&oldIterator == 0 { b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) - b.overflow = nil memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset) } } |
