aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJake Bailey <jacob.b.bailey@gmail.com>2025-09-09 22:22:17 -0700
committerGopher Robot <gobot@golang.org>2026-03-24 11:17:57 -0700
commit55600733988b0d3bb708be22b5cbecd8edd83380 (patch)
tree8966b0c85e05e748308009ce52618eda3cf10025 /src
parent3f057dcdbc86498e07a5744406fe92069221a92d (diff)
downloadgo-55600733988b0d3bb708be22b5cbecd8edd83380.tar.xz
internal/runtime/maps: add GOEXPERIMENT=mapsplitgroup for KKKKVVVV slot order
Map groups are currently: type group struct { ctrl uint64 slots [8]slot } type slot struct { key K elem E } If the element type is struct{}, the slot will be padded so that the address of the elem is unique rather than pointing outside the alloc. This has the effect of map[K]struct{} wasting space due to the extra byte and padding, making it no better than map[K]bool. This CL changes the group layout to instead place keys and elems together, as they used to be before swiss maps: type group struct { ctrl uint64 keys [8]K elems [8]V } This is an alternative to CL 701976, which I suspect will have better performance. Keys placed together should lead to better cache behavior, at the cost of more expensive elem lookups, since the elems are not a fixed offset from their keys. This change is locked behind GOEXPERIMENT=mapsplitgroup. Updates #70835 Updates #71368 Change-Id: Ide8d1406ae4ab636f86edc40e0640cc80653197c Reviewed-on: https://go-review.googlesource.com/c/go/+/711560 Reviewed-by: Michael Pratt <mpratt@google.com> Auto-Submit: Michael Pratt <mpratt@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
Diffstat (limited to 'src')
-rw-r--r--src/cmd/compile/internal/reflectdata/map.go96
-rw-r--r--src/cmd/link/internal/ld/deadcode.go2
-rw-r--r--src/internal/abi/map.go24
-rw-r--r--src/internal/goexperiment/exp_mapsplitgroup_off.go8
-rw-r--r--src/internal/goexperiment/exp_mapsplitgroup_on.go8
-rw-r--r--src/internal/goexperiment/flags.go5
-rw-r--r--src/internal/runtime/maps/group.go28
-rw-r--r--src/internal/runtime/maps/runtime.go20
-rw-r--r--src/internal/runtime/maps/runtime_fast32.go26
-rw-r--r--src/internal/runtime/maps/runtime_fast64.go26
-rw-r--r--src/internal/runtime/maps/runtime_faststr.go33
-rw-r--r--src/internal/runtime/maps/table.go14
-rw-r--r--src/reflect/export_test.go3
-rw-r--r--src/reflect/map.go75
-rw-r--r--src/runtime/runtime-gdb.py15
15 files changed, 293 insertions, 90 deletions
diff --git a/src/cmd/compile/internal/reflectdata/map.go b/src/cmd/compile/internal/reflectdata/map.go
index a7268bc849..38eada4522 100644
--- a/src/cmd/compile/internal/reflectdata/map.go
+++ b/src/cmd/compile/internal/reflectdata/map.go
@@ -13,6 +13,7 @@ import (
"cmd/internal/objabi"
"cmd/internal/src"
"internal/abi"
+ "internal/buildcfg"
)
// MapGroupType makes the map slot group type given the type of the map.
@@ -26,14 +27,6 @@ func MapGroupType(t *types.Type) *types.Type {
// a correct GC program for it.
//
// Make sure this stays in sync with internal/runtime/maps/group.go.
- //
- // type group struct {
- // ctrl uint64
- // slots [abi.MapGroupSlots]struct {
- // key keyType
- // elem elemType
- // }
- // }
keytype := t.Key()
elemtype := t.Elem()
@@ -46,19 +39,48 @@ func MapGroupType(t *types.Type) *types.Type {
elemtype = types.NewPtr(elemtype)
}
- slotFields := []*types.Field{
- makefield("key", keytype),
- makefield("elem", elemtype),
- }
- slot := types.NewStruct(slotFields)
- slot.SetNoalg(true)
+ var fields []*types.Field
+ if buildcfg.Experiment.MapSplitGroup {
+ // Split layout (KKKKVVVV):
+ // type group struct {
+ // ctrl uint64
+ // keys [abi.MapGroupSlots]keyType
+ // elems [abi.MapGroupSlots]elemType
+ // }
+ keyArr := types.NewArray(keytype, abi.MapGroupSlots)
+ keyArr.SetNoalg(true)
- slotArr := types.NewArray(slot, abi.MapGroupSlots)
- slotArr.SetNoalg(true)
+ elemArr := types.NewArray(elemtype, abi.MapGroupSlots)
+ elemArr.SetNoalg(true)
- fields := []*types.Field{
- makefield("ctrl", types.Types[types.TUINT64]),
- makefield("slots", slotArr),
+ fields = []*types.Field{
+ makefield("ctrl", types.Types[types.TUINT64]),
+ makefield("keys", keyArr),
+ makefield("elems", elemArr),
+ }
+ } else {
+ // Interleaved slot layout (KVKVKVKV):
+ // type group struct {
+ // ctrl uint64
+ // slots [abi.MapGroupSlots]struct {
+ // key keyType
+ // elem elemType
+ // }
+ // }
+ slotFields := []*types.Field{
+ makefield("key", keytype),
+ makefield("elem", elemtype),
+ }
+ slot := types.NewStruct(slotFields)
+ slot.SetNoalg(true)
+
+ slotArr := types.NewArray(slot, abi.MapGroupSlots)
+ slotArr.SetNoalg(true)
+
+ fields = []*types.Field{
+ makefield("ctrl", types.Types[types.TUINT64]),
+ makefield("slots", slotArr),
+ }
}
group := types.NewStruct(fields)
@@ -269,13 +291,30 @@ func writeMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
s3 := writeType(gtyp)
hasher := genhash(t.Key())
- slotTyp := gtyp.Field(1).Type.Elem()
- elemOff := slotTyp.Field(1).Offset
- if types.AlgType(t.Key()) == types.AMEM && t.Key().Size() == 8 && elemOff != 8 {
- base.Fatalf("runtime assumes elemOff for 8-byte keys is 8, got %d", elemOff)
- }
- if types.AlgType(t.Key()) == types.ASTRING && elemOff != int64(2*types.PtrSize) {
- base.Fatalf("runtime assumes elemOff for string keys is %d, got %d", 2*types.PtrSize, elemOff)
+ var keysOff int64
+ var keyStride int64
+ var elemsOff int64
+ var elemStride int64
+ var elemOff int64
+ if buildcfg.Experiment.MapSplitGroup {
+ // Split layout: field 1 is keys array, field 2 is elems array.
+ keysOff = gtyp.Field(1).Offset
+ keyStride = gtyp.Field(1).Type.Elem().Size()
+ elemsOff = gtyp.Field(2).Offset
+ elemStride = gtyp.Field(2).Type.Elem().Size()
+ } else {
+ // Interleaved layout: field 1 is slots array.
+ // KeysOff = offset of slots array (first key).
+ // KeyStride = ElemStride = slot stride.
+ // ElemsOff = offset of slots + offset of elem within slot.
+ keysOff = gtyp.Field(1).Offset
+ slotTyp := gtyp.Field(1).Type.Elem()
+ slotSize := slotTyp.Size()
+ elemOffInSlot := slotTyp.Field(1).Offset
+ keyStride = slotSize
+ elemsOff = keysOff + elemOffInSlot
+ elemStride = slotSize
+ elemOff = slotTyp.Field(1).Offset
}
c.Field("Key").WritePtr(s1)
@@ -283,7 +322,10 @@ func writeMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
c.Field("Group").WritePtr(s3)
c.Field("Hasher").WritePtr(hasher)
c.Field("GroupSize").WriteUintptr(uint64(gtyp.Size()))
- c.Field("SlotSize").WriteUintptr(uint64(slotTyp.Size()))
+ c.Field("KeysOff").WriteUintptr(uint64(keysOff))
+ c.Field("KeyStride").WriteUintptr(uint64(keyStride))
+ c.Field("ElemsOff").WriteUintptr(uint64(elemsOff))
+ c.Field("ElemStride").WriteUintptr(uint64(elemStride))
c.Field("ElemOff").WriteUintptr(uint64(elemOff))
var flags uint32
if needkeyupdate(t.Key()) {
diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go
index 055b4efe5d..c2dead36bf 100644
--- a/src/cmd/link/internal/ld/deadcode.go
+++ b/src/cmd/link/internal/ld/deadcode.go
@@ -560,7 +560,7 @@ func (d *deadcodePass) decodetypeMethods(ldr *loader.Loader, arch *sys.Arch, sym
case abi.Chan: // reflect.chanType
off += 2 * arch.PtrSize
case abi.Map:
- off += 7*arch.PtrSize + 4 // internal/abi.MapType
+ off += 10*arch.PtrSize + 4 // internal/abi.MapType
if arch.PtrSize == 8 {
off += 4 // padding for final uint32 field (Flags).
}
diff --git a/src/internal/abi/map.go b/src/internal/abi/map.go
index 4476dda5ca..520c615b2d 100644
--- a/src/internal/abi/map.go
+++ b/src/internal/abi/map.go
@@ -37,9 +37,27 @@ type MapType struct {
// function for hashing keys (ptr to key, seed) -> hash
Hasher func(unsafe.Pointer, uintptr) uintptr
GroupSize uintptr // == Group.Size_
- SlotSize uintptr // size of key/elem slot
- ElemOff uintptr // offset of elem in key/elem slot
- Flags uint32
+ // These fields describe how to access keys and elems within a group.
+ // The formulas key(i) = KeysOff + i*KeyStride and
+ // elem(i) = ElemsOff + i*ElemStride work for both group layouts:
+ //
+ // With GOEXPERIMENT=mapsplitgroup (split arrays KKKKVVVV):
+ // KeysOff = offset of keys array in group
+ // KeyStride = size of a single key
+ // ElemsOff = offset of elems array in group
+ // ElemStride = size of a single elem
+ //
+ // Without (interleaved slots KVKVKVKV):
+ // KeysOff = offset of slots array in group
+ // KeyStride = size of a key/elem slot (stride between keys)
+ // ElemsOff = offset of first elem (slots offset + elem offset within slot)
+ // ElemStride = size of a key/elem slot (stride between elems)
+ KeysOff uintptr
+ KeyStride uintptr
+ ElemsOff uintptr
+ ElemStride uintptr
+ ElemOff uintptr // GOEXPERIMENT=nomapsplitgroup only
+ Flags uint32
}
// Flag values
diff --git a/src/internal/goexperiment/exp_mapsplitgroup_off.go b/src/internal/goexperiment/exp_mapsplitgroup_off.go
new file mode 100644
index 0000000000..4a2d368aa3
--- /dev/null
+++ b/src/internal/goexperiment/exp_mapsplitgroup_off.go
@@ -0,0 +1,8 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.mapsplitgroup
+
+package goexperiment
+
+const MapSplitGroup = false
+const MapSplitGroupInt = 0
diff --git a/src/internal/goexperiment/exp_mapsplitgroup_on.go b/src/internal/goexperiment/exp_mapsplitgroup_on.go
new file mode 100644
index 0000000000..fb93b35787
--- /dev/null
+++ b/src/internal/goexperiment/exp_mapsplitgroup_on.go
@@ -0,0 +1,8 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.mapsplitgroup
+
+package goexperiment
+
+const MapSplitGroup = true
+const MapSplitGroupInt = 1
diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go
index 2cfb71578b..2962109770 100644
--- a/src/internal/goexperiment/flags.go
+++ b/src/internal/goexperiment/flags.go
@@ -128,4 +128,9 @@ type Flags struct {
// RuntimeSecret enables the runtime/secret package.
RuntimeSecret bool
+
+ // MapSplitGroup changes the internal representation of map groups
+ // from interleaved key/elem slots (KVKVKVKV) to split key and elem
+ // arrays (KKKKVVVV).
+ MapSplitGroup bool
}
diff --git a/src/internal/runtime/maps/group.go b/src/internal/runtime/maps/group.go
index a56d32aca0..8f6c38790f 100644
--- a/src/internal/runtime/maps/group.go
+++ b/src/internal/runtime/maps/group.go
@@ -242,25 +242,29 @@ func ctrlGroupMatchFull(g ctrlGroup) bitset {
// control word.
type groupReference struct {
// data points to the group, which is described by typ.Group and has
- // layout:
+ // layout depending on GOEXPERIMENT=mapsplitgroup:
//
+ // With mapsplitgroup (split arrays):
// type group struct {
// ctrls ctrlGroup
- // slots [abi.MapGroupSlots]slot
+ // keys [abi.MapGroupSlots]typ.Key
+ // elems [abi.MapGroupSlots]typ.Elem
// }
//
- // type slot struct {
- // key typ.Key
- // elem typ.Elem
+ // Without (interleaved slots):
+ // type group struct {
+ // ctrls ctrlGroup
+ // slots [abi.MapGroupSlots]struct {
+ // key typ.Key
+ // elem typ.Elem
+ // }
// }
+ //
+ // In both cases, key(i) and elem(i) use the same formula via
+ // typ.KeysOff/KeyStride and typ.ElemsOff/ElemStride.
data unsafe.Pointer // data *typ.Group
}
-const (
- ctrlGroupsSize = unsafe.Sizeof(ctrlGroup(0))
- groupSlotsOffset = ctrlGroupsSize
-)
-
// alignUp rounds n up to a multiple of a. a must be a power of 2.
func alignUp(n, a uintptr) uintptr {
return (n + a - 1) &^ (a - 1)
@@ -287,14 +291,14 @@ func (g *groupReference) ctrls() *ctrlGroup {
// key returns a pointer to the key at index i.
func (g *groupReference) key(typ *abi.MapType, i uintptr) unsafe.Pointer {
- offset := groupSlotsOffset + i*typ.SlotSize
+ offset := typ.KeysOff + i*typ.KeyStride
return unsafe.Pointer(uintptr(g.data) + offset)
}
// elem returns a pointer to the element at index i.
func (g *groupReference) elem(typ *abi.MapType, i uintptr) unsafe.Pointer {
- offset := groupSlotsOffset + i*typ.SlotSize + typ.ElemOff
+ offset := typ.ElemsOff + i*typ.ElemStride
return unsafe.Pointer(uintptr(g.data) + offset)
}
diff --git a/src/internal/runtime/maps/runtime.go b/src/internal/runtime/maps/runtime.go
index 2c395d5c33..bd3ba6acc8 100644
--- a/src/internal/runtime/maps/runtime.go
+++ b/src/internal/runtime/maps/runtime.go
@@ -7,6 +7,7 @@ package maps
import (
"internal/abi"
"internal/asan"
+ "internal/goexperiment"
"internal/msan"
"internal/race"
"internal/runtime/sys"
@@ -124,7 +125,12 @@ func runtime_mapaccess2(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Po
slotKey = *((*unsafe.Pointer)(slotKey))
}
if typ.Key.Equal(key, slotKey) {
- slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
+ var slotElem unsafe.Pointer
+ if goexperiment.MapSplitGroup {
+ slotElem = g.elem(typ, i)
+ } else {
+ slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
+ }
if typ.IndirectElem() {
slotElem = *((*unsafe.Pointer)(slotElem))
}
@@ -223,7 +229,11 @@ outer:
typedmemmove(typ.Key, slotKey, key)
}
- slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
+ if goexperiment.MapSplitGroup {
+ slotElem = g.elem(typ, i)
+ } else {
+ slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
+ }
if typ.IndirectElem() {
slotElem = *((*unsafe.Pointer)(slotElem))
}
@@ -265,7 +275,11 @@ outer:
}
typedmemmove(typ.Key, slotKey, key)
- slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
+ if goexperiment.MapSplitGroup {
+ slotElem = g.elem(typ, i)
+ } else {
+ slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
+ }
if typ.IndirectElem() {
emem := newobject(typ.Elem)
*(*unsafe.Pointer)(slotElem) = emem
diff --git a/src/internal/runtime/maps/runtime_fast32.go b/src/internal/runtime/maps/runtime_fast32.go
index da157c941d..dc3bd3fbd5 100644
--- a/src/internal/runtime/maps/runtime_fast32.go
+++ b/src/internal/runtime/maps/runtime_fast32.go
@@ -6,6 +6,7 @@ package maps
import (
"internal/abi"
+ "internal/goexperiment"
"internal/race"
"internal/runtime/sys"
"unsafe"
@@ -40,14 +41,24 @@ func runtime_mapaccess2_fast32(typ *abi.MapType, m *Map, key uint32) (unsafe.Poi
}
full := g.ctrls().matchFull()
slotKey := g.key(typ, 0)
- slotSize := typ.SlotSize
+ var keyStride uintptr
+ if goexperiment.MapSplitGroup {
+ keyStride = 4 // keys are contiguous in split layout
+ } else {
+ keyStride = typ.KeyStride // == SlotSize in interleaved layout
+ }
+ var i uintptr
for full != 0 {
if key == *(*uint32)(slotKey) && full.lowestSet() {
- slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
- return slotElem, true
+ if goexperiment.MapSplitGroup {
+ return g.elem(typ, i), true
+ } else {
+ return unsafe.Pointer(uintptr(slotKey) + typ.ElemOff), true
+ }
}
- slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
+ slotKey = unsafe.Pointer(uintptr(slotKey) + keyStride)
full = full.shiftOutLowest()
+ i++
}
return unsafe.Pointer(&zeroVal[0]), false
}
@@ -83,8 +94,11 @@ func runtime_mapaccess2_fast32(typ *abi.MapType, m *Map, key uint32) (unsafe.Poi
slotKey := g.key(typ, i)
if key == *(*uint32)(slotKey) {
- slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
- return slotElem, true
+ if goexperiment.MapSplitGroup {
+ return g.elem(typ, i), true
+ } else {
+ return unsafe.Pointer(uintptr(slotKey) + typ.ElemOff), true
+ }
}
match = match.removeFirst()
}
diff --git a/src/internal/runtime/maps/runtime_fast64.go b/src/internal/runtime/maps/runtime_fast64.go
index 6241d4ac6a..dac89eea81 100644
--- a/src/internal/runtime/maps/runtime_fast64.go
+++ b/src/internal/runtime/maps/runtime_fast64.go
@@ -6,6 +6,7 @@ package maps
import (
"internal/abi"
+ "internal/goexperiment"
"internal/race"
"internal/runtime/sys"
"unsafe"
@@ -40,14 +41,24 @@ func runtime_mapaccess2_fast64(typ *abi.MapType, m *Map, key uint64) (unsafe.Poi
}
full := g.ctrls().matchFull()
slotKey := g.key(typ, 0)
- slotSize := typ.SlotSize
+ var keyStride uintptr
+ if goexperiment.MapSplitGroup {
+ keyStride = 8 // keys are contiguous in split layout
+ } else {
+ keyStride = typ.KeyStride // == SlotSize in interleaved layout
+ }
+ var i uintptr
for full != 0 {
if key == *(*uint64)(slotKey) && full.lowestSet() {
- slotElem := unsafe.Pointer(uintptr(slotKey) + 8)
- return slotElem, true
+ if goexperiment.MapSplitGroup {
+ return g.elem(typ, i), true
+ } else {
+ return unsafe.Pointer(uintptr(slotKey) + 8), true
+ }
}
- slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
+ slotKey = unsafe.Pointer(uintptr(slotKey) + keyStride)
full = full.shiftOutLowest()
+ i++
}
return unsafe.Pointer(&zeroVal[0]), false
}
@@ -75,8 +86,11 @@ func runtime_mapaccess2_fast64(typ *abi.MapType, m *Map, key uint64) (unsafe.Poi
slotKey := g.key(typ, i)
if key == *(*uint64)(slotKey) {
- slotElem := unsafe.Pointer(uintptr(slotKey) + 8)
- return slotElem, true
+ if goexperiment.MapSplitGroup {
+ return g.elem(typ, i), true
+ } else {
+ return unsafe.Pointer(uintptr(slotKey) + 8), true
+ }
}
match = match.removeFirst()
}
diff --git a/src/internal/runtime/maps/runtime_faststr.go b/src/internal/runtime/maps/runtime_faststr.go
index 7778bd1881..a96f9feb04 100644
--- a/src/internal/runtime/maps/runtime_faststr.go
+++ b/src/internal/runtime/maps/runtime_faststr.go
@@ -7,6 +7,7 @@ package maps
import (
"internal/abi"
"internal/goarch"
+ "internal/goexperiment"
"internal/race"
"internal/runtime/sys"
"unsafe"
@@ -19,7 +20,12 @@ func (m *Map) getWithoutKeySmallFastStr(typ *abi.MapType, key string) unsafe.Poi
ctrls := *g.ctrls()
slotKey := g.key(typ, 0)
- slotSize := typ.SlotSize
+ var keyStride uintptr
+ if goexperiment.MapSplitGroup {
+ keyStride = 2 * goarch.PtrSize // keys are contiguous in split layout
+ } else {
+ keyStride = typ.KeyStride // == SlotSize in interleaved layout
+ }
// The 64 threshold was chosen based on performance of BenchmarkMapStringKeysEight,
// where there are 8 keys to check, all of which don't quick-match the lookup key.
@@ -37,7 +43,7 @@ func (m *Map) getWithoutKeySmallFastStr(typ *abi.MapType, key string) unsafe.Poi
}
j = i
}
- slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
+ slotKey = unsafe.Pointer(uintptr(slotKey) + keyStride)
ctrls >>= 8
}
if j == abi.MapGroupSlots {
@@ -47,7 +53,11 @@ func (m *Map) getWithoutKeySmallFastStr(typ *abi.MapType, key string) unsafe.Poi
// There's exactly one slot that passed the quick test. Do the single expensive comparison.
slotKey = g.key(typ, uintptr(j))
if key == *(*string)(slotKey) {
- return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
+ if goexperiment.MapSplitGroup {
+ return g.elem(typ, uintptr(j))
+ } else {
+ return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
+ }
}
return nil
}
@@ -63,11 +73,15 @@ dohash:
ctrls = *g.ctrls()
slotKey = g.key(typ, 0)
- for range abi.MapGroupSlots {
+ for i := range uintptr(abi.MapGroupSlots) {
if uint8(ctrls) == h2 && key == *(*string)(slotKey) {
- return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
+ if goexperiment.MapSplitGroup {
+ return g.elem(typ, i)
+ } else {
+ return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
+ }
}
- slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
+ slotKey = unsafe.Pointer(uintptr(slotKey) + keyStride)
ctrls >>= 8
}
return nil
@@ -154,8 +168,11 @@ func runtime_mapaccess2_faststr(typ *abi.MapType, m *Map, key string) (unsafe.Po
slotKey := g.key(typ, i)
if key == *(*string)(slotKey) {
- slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
- return slotElem, true
+ if goexperiment.MapSplitGroup {
+ return g.elem(typ, i), true
+ } else {
+ return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize), true
+ }
}
match = match.removeFirst()
}
diff --git a/src/internal/runtime/maps/table.go b/src/internal/runtime/maps/table.go
index 977f3091ad..7ef47df37f 100644
--- a/src/internal/runtime/maps/table.go
+++ b/src/internal/runtime/maps/table.go
@@ -6,6 +6,7 @@ package maps
import (
"internal/abi"
+ "internal/goexperiment"
"internal/runtime/math"
"unsafe"
)
@@ -615,9 +616,16 @@ func (t *table) Clear(typ *abi.MapType) {
// 4) But if a group is really large, do the test anyway, as
// clearing is expensive.
fullTest := uint64(t.used)*4 <= t.groups.lengthMask // less than ~0.25 entries per group -> >3/4 empty groups
- if typ.SlotSize > 32 {
- // For large slots, it is always worth doing the test first.
- fullTest = true
+ if goexperiment.MapSplitGroup {
+ if (typ.KeyStride + typ.ElemStride) > 32 {
+ // For large slots, it is always worth doing the test first.
+ fullTest = true
+ }
+ } else {
+ if typ.KeyStride > 32 { // KeyStride == SlotSize in interleaved layout
+ // For large slots, it is always worth doing the test first.
+ fullTest = true
+ }
}
if fullTest {
for i := uint64(0); i <= t.groups.lengthMask; i++ {
diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go
index fc209fdfba..51b57c780c 100644
--- a/src/reflect/export_test.go
+++ b/src/reflect/export_test.go
@@ -154,6 +154,5 @@ var InternalIsZero = isZero
var IsRegularMemory = isRegularMemory
func MapGroupOf(x, y Type) Type {
- grp, _ := groupAndSlotOf(x, y)
- return grp
+ return groupOf(x, y)
}
diff --git a/src/reflect/map.go b/src/reflect/map.go
index 9d25b1818c..c67c0668a1 100644
--- a/src/reflect/map.go
+++ b/src/reflect/map.go
@@ -6,6 +6,7 @@ package reflect
import (
"internal/abi"
+ "internal/goexperiment"
"internal/race"
"internal/runtime/maps"
"internal/runtime/sys"
@@ -50,7 +51,7 @@ func MapOf(key, elem Type) Type {
}
}
- group, slot := groupAndSlotOf(key, elem)
+ group := groupOf(key, elem)
// Make a map type.
// Note: flag values must match those used in the TMAP case
@@ -67,8 +68,25 @@ func MapOf(key, elem Type) Type {
return typehash(ktyp, p, seed)
}
mt.GroupSize = mt.Group.Size()
- mt.SlotSize = slot.Size()
- mt.ElemOff = slot.Field(1).Offset
+ if goexperiment.MapSplitGroup {
+ // Split layout: field 1 is keys array, field 2 is elems array.
+ mt.KeysOff = group.Field(1).Offset
+ mt.KeyStride = group.Field(1).Type.Elem().Size()
+ mt.ElemsOff = group.Field(2).Offset
+ mt.ElemStride = group.Field(2).Type.Elem().Size()
+ mt.ElemOff = 0
+ } else {
+ // Interleaved layout: field 1 is slots array.
+ // KeyStride = ElemStride = slot stride.
+ // ElemsOff = slots offset + elem offset within slot.
+ slot := group.Field(1).Type.Elem()
+ slotSize := slot.Size()
+ mt.KeysOff = group.Field(1).Offset
+ mt.KeyStride = slotSize
+ mt.ElemsOff = group.Field(1).Offset + slot.Field(1).Offset
+ mt.ElemStride = slotSize
+ mt.ElemOff = slot.Field(1).Offset
+ }
mt.Flags = 0
if needKeyUpdate(ktyp) {
mt.Flags |= abi.MapNeedKeyUpdate
@@ -88,15 +106,7 @@ func MapOf(key, elem Type) Type {
return ti.(Type)
}
-func groupAndSlotOf(ktyp, etyp Type) (Type, Type) {
- // type group struct {
- // ctrl uint64
- // slots [abi.MapGroupSlots]struct {
- // key keyType
- // elem elemType
- // }
- // }
-
+func groupOf(ktyp, etyp Type) Type {
if ktyp.Size() > abi.MapMaxKeyBytes {
ktyp = PointerTo(ktyp)
}
@@ -104,7 +114,39 @@ func groupAndSlotOf(ktyp, etyp Type) (Type, Type) {
etyp = PointerTo(etyp)
}
- fields := []StructField{
+ if goexperiment.MapSplitGroup {
+ // Split layout (KKKKVVVV):
+ // type group struct {
+ // ctrl uint64
+ // keys [abi.MapGroupSlots]keyType
+ // elems [abi.MapGroupSlots]elemType
+ // }
+ fields := []StructField{
+ {
+ Name: "Ctrl",
+ Type: TypeFor[uint64](),
+ },
+ {
+ Name: "Keys",
+ Type: ArrayOf(abi.MapGroupSlots, ktyp),
+ },
+ {
+ Name: "Elems",
+ Type: ArrayOf(abi.MapGroupSlots, etyp),
+ },
+ }
+ return StructOf(fields)
+ }
+
+ // Interleaved slot layout (KVKVKVKV):
+ // type group struct {
+ // ctrl uint64
+ // slots [abi.MapGroupSlots]struct {
+ // key keyType
+ // elem elemType
+ // }
+ // }
+ slotFields := []StructField{
{
Name: "Key",
Type: ktyp,
@@ -114,9 +156,9 @@ func groupAndSlotOf(ktyp, etyp Type) (Type, Type) {
Type: etyp,
},
}
- slot := StructOf(fields)
+ slot := StructOf(slotFields)
- fields = []StructField{
+ fields := []StructField{
{
Name: "Ctrl",
Type: TypeFor[uint64](),
@@ -126,8 +168,7 @@ func groupAndSlotOf(ktyp, etyp Type) (Type, Type) {
Type: ArrayOf(abi.MapGroupSlots, slot),
},
}
- group := StructOf(fields)
- return group, slot
+ return StructOf(fields)
}
var stringType = rtypeOf("")
diff --git a/src/runtime/runtime-gdb.py b/src/runtime/runtime-gdb.py
index 9d2446a1eb..a56711e0bc 100644
--- a/src/runtime/runtime-gdb.py
+++ b/src/runtime/runtime-gdb.py
@@ -169,6 +169,10 @@ class MapTypePrinter:
cnt = 0
# Yield keys and elements in group.
# group is a value of type *group[K,V]
+ # The group layout depends on GOEXPERIMENT=mapsplitgroup:
+ # split: group.keys[i] / group.elems[i]
+ # interleaved: group.slots[i].key / group.slots[i].elem
+ # Detect which layout by checking for the 'keys' field.
def group_slots(group):
ctrl = group['ctrl']
@@ -179,8 +183,15 @@ class MapTypePrinter:
continue
# Full
- yield str(cnt), group['slots'][i]['key']
- yield str(cnt+1), group['slots'][i]['elem']
+ # The group layout depends on GOEXPERIMENT=mapsplitgroup:
+ # split: group.keys[i] / group.elems[i]
+ # interleaved: group.slots[i].key / group.slots[i].elem
+ try:
+ yield str(cnt), group['slots'][i]['key']
+ yield str(cnt+1), group['slots'][i]['elem']
+ except gdb.error:
+ yield str(cnt), group['keys'][i]
+ yield str(cnt+1), group['elems'][i]
# The linker DWARF generation
# (cmd/link/internal/ld.(*dwctxt).synthesizemaptypes) records