aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/alg.go
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2020-03-06 14:01:26 -0800
committerKeith Randall <khr@golang.org>2020-03-10 16:26:59 +0000
commit2b8e60d464515634462ca472ca09c791e2cbf6ae (patch)
treefd9fbce5eafa054a302e77e9df284f33460b7497 /src/runtime/alg.go
parent9f74f0afa6270e6735c1b6f59519cc88ff2ed1e4 (diff)
downloadgo-2b8e60d464515634462ca472ca09c791e2cbf6ae.tar.xz
runtime: make typehash match compiler generated hashes exactly
If typehash (used by reflect) does not match the built-in map's hash, then problems occur. If a map is built using reflect, and then assigned to a variable of map type, the hash function can change. That causes very bad things. This issue is rare. MapOf consults a cache of all types that occur in the binary before making a new one. To make a true new map type (with a hash function derived from typehash) that map type must not occur in the binary anywhere. But to cause the bug, we need a variable of that type in order to assign to it. The only way to make that work is to use a named map type for the variable, so it is distinct from the unnamed version that MapOf looks for. Fixes #37716 Change-Id: I3537bfceca8cbfa1af84202f432f3c06953fe0ed Reviewed-on: https://go-review.googlesource.com/c/go/+/222357 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
Diffstat (limited to 'src/runtime/alg.go')
-rw-r--r--src/runtime/alg.go22
1 files changed, 20 insertions, 2 deletions
diff --git a/src/runtime/alg.go b/src/runtime/alg.go
index 5a0656513d..0af48ab25c 100644
--- a/src/runtime/alg.go
+++ b/src/runtime/alg.go
@@ -158,6 +158,8 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
// is slower but more general and is used for hashing interface types
// (called from interhash or nilinterhash, above) or for hashing in
// maps generated by reflect.MapOf (reflect_typehash, below).
+// Note: this function must match the compiler generated
+// functions exactly. See issue 37716.
func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
if t.tflag&tflagRegularMemory != 0 {
// Handle ptr sizes specially, see issue 37086.
@@ -195,12 +197,28 @@ func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
return h
case kindStruct:
s := (*structtype)(unsafe.Pointer(t))
+ memStart := uintptr(0)
+ memEnd := uintptr(0)
for _, f := range s.fields {
- // TODO: maybe we could hash several contiguous fields all at once.
+ if memEnd > memStart && (f.name.isBlank() || f.offset() != memEnd || f.typ.tflag&tflagRegularMemory == 0) {
+ // flush any pending regular memory hashing
+ h = memhash(add(p, memStart), h, memEnd-memStart)
+ memStart = memEnd
+ }
if f.name.isBlank() {
continue
}
- h = typehash(f.typ, add(p, f.offset()), h)
+ if f.typ.tflag&tflagRegularMemory == 0 {
+ h = typehash(f.typ, add(p, f.offset()), h)
+ continue
+ }
+ if memStart == memEnd {
+ memStart = f.offset()
+ }
+ memEnd = f.offset() + f.typ.size
+ }
+ if memEnd > memStart {
+ h = memhash(add(p, memStart), h, memEnd-memStart)
}
return h
default: