aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/alg.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/alg.go')
-rw-r--r--src/runtime/alg.go22
1 files changed, 20 insertions, 2 deletions
diff --git a/src/runtime/alg.go b/src/runtime/alg.go
index 5a0656513d..0af48ab25c 100644
--- a/src/runtime/alg.go
+++ b/src/runtime/alg.go
@@ -158,6 +158,8 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
// is slower but more general and is used for hashing interface types
// (called from interhash or nilinterhash, above) or for hashing in
// maps generated by reflect.MapOf (reflect_typehash, below).
+// Note: this function must match the compiler generated
+// functions exactly. See issue 37716.
func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
if t.tflag&tflagRegularMemory != 0 {
// Handle ptr sizes specially, see issue 37086.
@@ -195,12 +197,28 @@ func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
return h
case kindStruct:
s := (*structtype)(unsafe.Pointer(t))
+ memStart := uintptr(0)
+ memEnd := uintptr(0)
for _, f := range s.fields {
- // TODO: maybe we could hash several contiguous fields all at once.
+ if memEnd > memStart && (f.name.isBlank() || f.offset() != memEnd || f.typ.tflag&tflagRegularMemory == 0) {
+ // flush any pending regular memory hashing
+ h = memhash(add(p, memStart), h, memEnd-memStart)
+ memStart = memEnd
+ }
if f.name.isBlank() {
continue
}
- h = typehash(f.typ, add(p, f.offset()), h)
+ if f.typ.tflag&tflagRegularMemory == 0 {
+ h = typehash(f.typ, add(p, f.offset()), h)
+ continue
+ }
+ if memStart == memEnd {
+ memStart = f.offset()
+ }
+ memEnd = f.offset() + f.typ.size
+ }
+ if memEnd > memStart {
+ h = memhash(add(p, memStart), h, memEnd-memStart)
}
return h
default: