aboutsummaryrefslogtreecommitdiff
path: root/src/internal/runtime
diff options
context:
space:
mode:
authorMichael Pratt <mpratt@google.com>2024-08-14 11:21:28 -0400
committerGopher Robot <gobot@golang.org>2024-10-28 20:35:25 +0000
commit77e3d8cf13a31343ba98268c2dddf6bc41f6ce4c (patch)
treec1f21ea1356d5cccf04ae86aa4ad6539160106cb /src/internal/runtime
parentbb46b754bebb0e820d74fd9eb02635afbdf5a3bd (diff)
downloadgo-77e3d8cf13a31343ba98268c2dddf6bc41f6ce4c.tar.xz
internal/runtime/maps: small maps point directly to a group
If the map contains 8 or fewer entries, it is wasteful to have a directory that points to a table that points to a group. Add a special case that replaces the directory with a direct pointer to a group. We could theoretically do similar for single table maps (no directory, just point directly to a table), but that is left for later. For #54766. Cq-Include-Trybots: luci.golang.try:gotip-linux-amd64-longtest-swissmap Change-Id: I6fc04dfc11c31dadfe5b5d6481b4c4abd43d48ed Reviewed-on: https://go-review.googlesource.com/c/go/+/611188 Reviewed-by: Keith Randall <khr@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Auto-Submit: Michael Pratt <mpratt@google.com> Reviewed-by: Keith Randall <khr@google.com>
Diffstat (limited to 'src/internal/runtime')
-rw-r--r--src/internal/runtime/maps/export_test.go41
-rw-r--r--src/internal/runtime/maps/map.go243
-rw-r--r--src/internal/runtime/maps/map_swiss_test.go44
-rw-r--r--src/internal/runtime/maps/map_test.go3
-rw-r--r--src/internal/runtime/maps/table.go68
5 files changed, 347 insertions, 52 deletions
diff --git a/src/internal/runtime/maps/export_test.go b/src/internal/runtime/maps/export_test.go
index 8f62739665..15c112e737 100644
--- a/src/internal/runtime/maps/export_test.go
+++ b/src/internal/runtime/maps/export_test.go
@@ -24,25 +24,47 @@ func NewTestMap[K comparable, V any](length uint64) (*Map, *abi.SwissMapType) {
}
func (m *Map) TableCount() int {
- return len(m.directory)
+ if m.dirLen <= 0 {
+ return 0
+ }
+ return m.dirLen
}
// Total group count, summed across all tables.
func (m *Map) GroupCount() uint64 {
+ if m.dirLen <= 0 {
+ if m.dirPtr == nil {
+ return 0
+ }
+ return 1
+ }
+
var n uint64
- for _, t := range m.directory {
+ var lastTab *table
+ for i := range m.dirLen {
+ t := m.directoryAt(uintptr(i))
+ if t == lastTab {
+ continue
+ }
+ lastTab = t
n += t.groups.lengthMask + 1
}
return n
}
-// Return a key from a group containing no empty slots, or nil if there are no
-// full groups.
+// Return a key from a group containing no empty slots.
//
-// Also returns nil if a group is full but contains entirely deleted slots.
+// Returns nil if there are no full groups.
+// Returns nil if a group is full but contains entirely deleted slots.
+// Returns nil if the map is small.
func (m *Map) KeyFromFullGroup() unsafe.Pointer {
+ if m.dirLen <= 0 {
+ return nil
+ }
+
var lastTab *table
- for _, t := range m.directory {
+ for i := range m.dirLen {
+ t := m.directoryAt(uintptr(i))
if t == lastTab {
continue
}
@@ -68,10 +90,15 @@ func (m *Map) KeyFromFullGroup() unsafe.Pointer {
return nil
}
+// Returns nil if the map is small.
func (m *Map) TableFor(key unsafe.Pointer) *table {
+ if m.dirLen <= 0 {
+ return nil
+ }
+
hash := m.typ.Hasher(key, m.seed)
idx := m.directoryIndex(hash)
- return m.directory[idx]
+ return m.directoryAt(idx)
}
func (t *table) GrowthLeft() uint64 {
diff --git a/src/internal/runtime/maps/map.go b/src/internal/runtime/maps/map.go
index a26b3cd130..112fc08e0f 100644
--- a/src/internal/runtime/maps/map.go
+++ b/src/internal/runtime/maps/map.go
@@ -192,7 +192,7 @@ func h2(h uintptr) uintptr {
type Map struct {
// The number of filled slots (i.e. the number of elements in all
- // tables).
+ // tables). Excludes deleted slots.
used uint64
// Type of this map.
@@ -208,10 +208,27 @@ type Map struct {
// TODO(prattmic): Populate this on table initialization.
seed uintptr
- // The directory of tables. The length of this slice is
- // `1 << globalDepth`. Multiple entries may point to the same table.
- // See top-level comment for more details.
- directory []*table
+ // The directory of tables.
+ //
+ // Normally dirPtr points to an array of table pointers
+ //
+ // dirPtr *[dirLen]*table
+ //
+ // The length (dirLen) of this array is `1 << globalDepth`. Multiple
+ // entries may point to the same table. See top-level comment for more
+ // details.
+ //
+ // Small map optimization: if the map always contained
+ // abi.SwissMapGroupSlots or fewer entries, it fits entirely in a
+ // single group. In that case dirPtr points directly to a single group.
+ //
+ // dirPtr *group
+ //
+ // In this case, dirLen is 0. used counts the number of used slots in
+ // the group. Note that small maps never have deleted slots (as there
+ // is no probe sequence to maintain).
+ dirPtr unsafe.Pointer
+ dirLen int
// The number of bits to use in table directory lookups.
globalDepth uint8
@@ -239,14 +256,31 @@ func NewMap(mt *abi.SwissMapType, capacity uint64) *Map {
//TODO
//seed: uintptr(rand()),
- directory: make([]*table, dirSize),
+ //directory: make([]*table, dirSize),
globalDepth: globalDepth,
}
- for i := range m.directory {
- // TODO: Think more about initial table capacity.
- m.directory[i] = newTable(mt, capacity/dirSize, i, globalDepth)
+ if capacity > abi.SwissMapGroupSlots {
+ directory := make([]*table, dirSize)
+
+ for i := range directory {
+ // TODO: Think more about initial table capacity.
+ directory[i] = newTable(mt, capacity/dirSize, i, globalDepth)
+ }
+
+ m.dirPtr = unsafe.Pointer(&directory[0])
+ m.dirLen = len(directory)
+ } else {
+ grp := newGroups(mt, 1)
+ m.dirPtr = grp.data
+ m.dirLen = 0
+
+ g := groupReference{
+ typ: m.typ,
+ data: m.dirPtr,
+ }
+ g.ctrls().setEmpty()
}
return m
@@ -257,6 +291,9 @@ func (m *Map) Type() *abi.SwissMapType {
}
func (m *Map) directoryIndex(hash uintptr) uintptr {
+ if m.dirLen == 1 {
+ return 0
+ }
// TODO(prattmic): Store the shift as globalShift, as we need that more
// often than globalDepth.
if goarch.PtrSize == 4 {
@@ -265,12 +302,21 @@ func (m *Map) directoryIndex(hash uintptr) uintptr {
return hash >> (64 - m.globalDepth)
}
+func (m *Map) directoryAt(i uintptr) *table {
+ return *(**table)(unsafe.Pointer(uintptr(m.dirPtr) + goarch.PtrSize*i))
+}
+
+func (m *Map) directorySet(i uintptr, nt *table) {
+ *(**table)(unsafe.Pointer(uintptr(m.dirPtr) + goarch.PtrSize*i)) = nt
+}
+
func (m *Map) replaceTable(nt *table) {
// The number of entries that reference the same table doubles for each
// time the globalDepth grows without the table splitting.
entries := 1 << (m.globalDepth - nt.localDepth)
for i := 0; i < entries; i++ {
- m.directory[nt.index+i] = nt
+ //m.directory[nt.index+i] = nt
+ m.directorySet(uintptr(nt.index+i), nt)
}
}
@@ -278,8 +324,9 @@ func (m *Map) installTableSplit(old, left, right *table) {
if old.localDepth == m.globalDepth {
// No room for another level in the directory. Grow the
// directory.
- newDir := make([]*table, len(m.directory)*2)
- for i, t := range m.directory {
+ newDir := make([]*table, m.dirLen*2)
+ for i := range m.dirLen {
+ t := m.directoryAt(uintptr(i))
newDir[2*i] = t
newDir[2*i+1] = t
// t may already exist in multiple indicies. We should
@@ -291,7 +338,9 @@ func (m *Map) installTableSplit(old, left, right *table) {
}
}
m.globalDepth++
- m.directory = newDir
+ //m.directory = newDir
+ m.dirPtr = unsafe.Pointer(&newDir[0])
+ m.dirLen = len(newDir)
}
// N.B. left and right may still consume multiple indicies if the
@@ -318,8 +367,33 @@ func (m *Map) Get(key unsafe.Pointer) (unsafe.Pointer, bool) {
func (m *Map) getWithKey(key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
hash := m.typ.Hasher(key, m.seed)
+ if m.dirLen == 0 {
+ return m.getWithKeySmall(hash, key)
+ }
+
idx := m.directoryIndex(hash)
- return m.directory[idx].getWithKey(hash, key)
+ return m.directoryAt(idx).getWithKey(hash, key)
+}
+
+func (m *Map) getWithKeySmall(hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
+ g := groupReference{
+ typ: m.typ,
+ data: m.dirPtr,
+ }
+
+ match := g.ctrls().matchH2(h2(hash))
+
+ for match != 0 {
+ i := match.first()
+
+ slotKey := g.key(i)
+ if m.typ.Key.Equal(key, slotKey) {
+ return slotKey, g.elem(i), true
+ }
+ match = match.removeFirst()
+ }
+
+ return nil, nil, false
}
func (m *Map) Put(key, elem unsafe.Pointer) {
@@ -334,9 +408,21 @@ func (m *Map) Put(key, elem unsafe.Pointer) {
func (m *Map) PutSlot(key unsafe.Pointer) unsafe.Pointer {
hash := m.typ.Hasher(key, m.seed)
+ if m.dirLen == 0 {
+ if m.used < abi.SwissMapGroupSlots {
+ return m.putSlotSmall(hash, key)
+ }
+
+ // Can't fit another entry, grow to full size map.
+ //
+ // TODO(prattmic): If this is an update to an existing key then
+ // we actually don't need to grow.
+ m.growToTable()
+ }
+
for {
idx := m.directoryIndex(hash)
- elem, ok := m.directory[idx].PutSlot(m, hash, key)
+ elem, ok := m.directoryAt(idx).PutSlot(m, hash, key)
if !ok {
continue
}
@@ -344,17 +430,127 @@ func (m *Map) PutSlot(key unsafe.Pointer) unsafe.Pointer {
}
}
+func (m *Map) putSlotSmall(hash uintptr, key unsafe.Pointer) unsafe.Pointer {
+ g := groupReference{
+ typ: m.typ,
+ data: m.dirPtr,
+ }
+
+ match := g.ctrls().matchH2(h2(hash))
+
+ // Look for an existing slot containing this key.
+ for match != 0 {
+ i := match.first()
+
+ slotKey := g.key(i)
+ if m.typ.Key.Equal(key, slotKey) {
+ if m.typ.NeedKeyUpdate() {
+ typedmemmove(m.typ.Key, slotKey, key)
+ }
+
+ slotElem := g.elem(i)
+
+ return slotElem
+ }
+ match = match.removeFirst()
+ }
+
+ // No need to look for deleted slots, small maps can't have them (see
+ // deleteSmall).
+ match = g.ctrls().matchEmpty()
+ if match == 0 {
+ panic("small map with no empty slot")
+ }
+
+ i := match.first()
+
+ slotKey := g.key(i)
+ typedmemmove(m.typ.Key, slotKey, key)
+ slotElem := g.elem(i)
+
+ g.ctrls().set(i, ctrl(h2(hash)))
+ m.used++
+
+ return slotElem
+}
+
+func (m *Map) growToTable() {
+ tab := newTable(m.typ, 2*abi.SwissMapGroupSlots, 0, 0)
+
+ g := groupReference{
+ typ: m.typ,
+ data: m.dirPtr,
+ }
+
+ for i := uint32(0); i < abi.SwissMapGroupSlots; i++ {
+ if (g.ctrls().get(i) & ctrlEmpty) == ctrlEmpty {
+ // Empty
+ continue
+ }
+ key := g.key(i)
+ elem := g.elem(i)
+ hash := tab.typ.Hasher(key, m.seed)
+ slotElem := tab.uncheckedPutSlot(hash, key)
+ typedmemmove(tab.typ.Elem, slotElem, elem)
+ tab.used++
+ }
+
+ directory := make([]*table, 1)
+
+ directory[0] = tab
+
+ m.dirPtr = unsafe.Pointer(&directory[0])
+ m.dirLen = len(directory)
+}
+
func (m *Map) Delete(key unsafe.Pointer) {
hash := m.typ.Hasher(key, m.seed)
+ if m.dirLen == 0 {
+ m.deleteSmall(hash, key)
+ return
+ }
+
idx := m.directoryIndex(hash)
- m.directory[idx].Delete(m, key)
+ m.directoryAt(idx).Delete(m, key)
+}
+
+func (m *Map) deleteSmall(hash uintptr, key unsafe.Pointer) {
+ g := groupReference{
+ typ: m.typ,
+ data: m.dirPtr,
+ }
+
+ match := g.ctrls().matchH2(h2(hash))
+
+ for match != 0 {
+ i := match.first()
+ slotKey := g.key(i)
+ if m.typ.Key.Equal(key, slotKey) {
+ m.used--
+
+ typedmemclr(m.typ.Key, slotKey)
+ typedmemclr(m.typ.Elem, g.elem(i))
+
+ // We only have 1 group, so it is OK to immediately
+ // reuse deleted slots.
+ g.ctrls().set(i, ctrlEmpty)
+ return
+ }
+ match = match.removeFirst()
+ }
}
// Clear deletes all entries from the map resulting in an empty map.
func (m *Map) Clear() {
+ if m.dirLen == 0 {
+ m.clearSmall()
+ return
+ }
+
var lastTab *table
- for _, t := range m.directory {
+ for i := range m.dirLen {
+ t := m.directoryAt(uintptr(i))
if t == lastTab {
continue
}
@@ -365,3 +561,16 @@ func (m *Map) Clear() {
m.clearSeq++
// TODO: shrink directory?
}
+
+func (m *Map) clearSmall() {
+ g := groupReference{
+ typ: m.typ,
+ data: m.dirPtr,
+ }
+
+ typedmemclr(m.typ.Group, g.data)
+ g.ctrls().setEmpty()
+
+ m.used = 0
+ m.clearSeq++
+}
diff --git a/src/internal/runtime/maps/map_swiss_test.go b/src/internal/runtime/maps/map_swiss_test.go
index 21142759e8..7c6b426f6d 100644
--- a/src/internal/runtime/maps/map_swiss_test.go
+++ b/src/internal/runtime/maps/map_swiss_test.go
@@ -56,54 +56,52 @@ func TestTableGroupCount(t *testing.T) {
n: -(1 << 30),
escape: mapCase{
// TODO(go.dev/issue/54766): empty maps
- initialLit: mapCount{1, 1},
- initialHint: mapCount{1, 1},
- after: mapCount{1, 1},
+ initialLit: mapCount{0, 1},
+ initialHint: mapCount{0, 1},
+ after: mapCount{0, 1},
},
},
{
n: -1,
escape: mapCase{
// TODO(go.dev/issue/54766): empty maps
- initialLit: mapCount{1, 1},
- initialHint: mapCount{1, 1},
- after: mapCount{1, 1},
+ initialLit: mapCount{0, 1},
+ initialHint: mapCount{0, 1},
+ after: mapCount{0, 1},
},
},
{
n: 0,
escape: mapCase{
// TODO(go.dev/issue/54766): empty maps
- initialLit: mapCount{1, 1},
- initialHint: mapCount{1, 1},
- after: mapCount{1, 1},
+ initialLit: mapCount{0, 1},
+ initialHint: mapCount{0, 1},
+ after: mapCount{0, 1},
},
},
{
n: 1,
escape: mapCase{
// TODO(go.dev/issue/54766): empty maps
- initialLit: mapCount{1, 1},
- initialHint: mapCount{1, 1},
- after: mapCount{1, 1},
+ initialLit: mapCount{0, 1},
+ initialHint: mapCount{0, 1},
+ after: mapCount{0, 1},
},
},
{
n: abi.SwissMapGroupSlots,
escape: mapCase{
// TODO(go.dev/issue/54766): empty maps
- initialLit: mapCount{1, 1},
- // TODO(go.dev/issue/54766): Initial capacity should round hint up to avoid grow.
- initialHint: mapCount{1, 1},
- // TODO(prattmic): small map optimization could store all 8 slots.
- after: mapCount{1, 2},
+ initialLit: mapCount{0, 1},
+ initialHint: mapCount{0, 1},
+ after: mapCount{0, 1},
},
},
{
n: abi.SwissMapGroupSlots + 1,
escape: mapCase{
// TODO(go.dev/issue/54766): empty maps
- initialLit: mapCount{1, 1},
+ initialLit: mapCount{0, 1},
initialHint: mapCount{1, 2},
after: mapCount{1, 2},
},
@@ -112,7 +110,7 @@ func TestTableGroupCount(t *testing.T) {
n: belowMax, // 1.5 group max = 2 groups @ 75%
escape: mapCase{
// TODO(go.dev/issue/54766): empty maps
- initialLit: mapCount{1, 1},
+ initialLit: mapCount{0, 1},
initialHint: mapCount{1, 2},
after: mapCount{1, 2},
},
@@ -121,7 +119,7 @@ func TestTableGroupCount(t *testing.T) {
n: atMax, // 2 groups at max
escape: mapCase{
// TODO(go.dev/issue/54766): empty maps
- initialLit: mapCount{1, 1},
+ initialLit: mapCount{0, 1},
initialHint: mapCount{1, 2},
after: mapCount{1, 2},
},
@@ -130,7 +128,7 @@ func TestTableGroupCount(t *testing.T) {
n: atMax + 1, // 2 groups at max + 1 -> grow to 4 groups
escape: mapCase{
// TODO(go.dev/issue/54766): empty maps
- initialLit: mapCount{1, 1},
+ initialLit: mapCount{0, 1},
// TODO(go.dev/issue/54766): Initial capacity should round hint up to avoid grow.
initialHint: mapCount{1, 2},
after: mapCount{1, 4},
@@ -140,7 +138,7 @@ func TestTableGroupCount(t *testing.T) {
n: 2 * belowMax, // 3 * group max = 4 groups @75%
escape: mapCase{
// TODO(go.dev/issue/54766): empty maps
- initialLit: mapCount{1, 1},
+ initialLit: mapCount{0, 1},
initialHint: mapCount{1, 4},
after: mapCount{1, 4},
},
@@ -149,7 +147,7 @@ func TestTableGroupCount(t *testing.T) {
n: 2*atMax + 1, // 4 groups at max + 1 -> grow to 8 groups
escape: mapCase{
// TODO(go.dev/issue/54766): empty maps
- initialLit: mapCount{1, 1},
+ initialLit: mapCount{0, 1},
// TODO(go.dev/issue/54766): Initial capacity should round hint up to avoid grow.
initialHint: mapCount{1, 4},
after: mapCount{1, 8},
diff --git a/src/internal/runtime/maps/map_test.go b/src/internal/runtime/maps/map_test.go
index 29806ee97b..4b39bf5ec7 100644
--- a/src/internal/runtime/maps/map_test.go
+++ b/src/internal/runtime/maps/map_test.go
@@ -223,7 +223,8 @@ func TestTablePutDelete(t *testing.T) {
// So first we must add to the table continuously until we happen to
// fill a group.
- m, _ := maps.NewTestMap[uint32, uint32](8)
+ // Avoid small maps, they have no tables.
+ m, _ := maps.NewTestMap[uint32, uint32](16)
key := uint32(0)
elem := uint32(256 + 0)
diff --git a/src/internal/runtime/maps/table.go b/src/internal/runtime/maps/table.go
index 232c077db3..86e5dce10d 100644
--- a/src/internal/runtime/maps/table.go
+++ b/src/internal/runtime/maps/table.go
@@ -463,7 +463,8 @@ type Iter struct {
dirIdx int
// tab is the table at dirIdx during the previous call to Next.
- tab *table
+ tab *table
+ groupSmall groupReference // only if small map at init
// entryIdx is the current entry index, prior to adjustment by entryOffset.
// The lower 3 bits of the index are the slot index, and the upper bits
@@ -473,16 +474,28 @@ type Iter struct {
// Init initializes Iter for iteration.
func (it *Iter) Init(typ *abi.SwissMapType, m *Map) {
+
it.typ = typ
if m == nil || m.used == 0 {
return
}
+ dirIdx := 0
+ var groupSmall groupReference
+ if m.dirLen <= 0 {
+ // Use dirIdx == -1 as sentinal for small maps.
+ dirIdx = -1
+ groupSmall.data = m.dirPtr
+ groupSmall.typ = typ
+ }
+
it.typ = m.typ
it.m = m
it.entryOffset = rand()
it.dirOffset = rand()
it.globalDepth = m.globalDepth
+ it.dirIdx = dirIdx
+ it.groupSmall = groupSmall
it.clearSeq = m.clearSeq
}
@@ -525,6 +538,53 @@ func (it *Iter) Next() {
return
}
+ if it.dirIdx < 0 {
+ // Map was small at Init.
+ g := it.groupSmall
+ for ; it.entryIdx < abi.SwissMapGroupSlots; it.entryIdx++ {
+ k := uint32(it.entryIdx+it.entryOffset) % abi.SwissMapGroupSlots
+
+ if (g.ctrls().get(k) & ctrlEmpty) == ctrlEmpty {
+ // Empty or deleted.
+ continue
+ }
+
+ key := g.key(k)
+
+ // As below, if we have grown to a full map since Init,
+ // we continue to use the old group to decide the keys
+ // to return, but must look them up again in the new
+ // tables.
+ grown := it.m.dirLen > 0
+ var elem unsafe.Pointer
+ if grown {
+ var ok bool
+ newKey, newElem, ok := it.m.getWithKey(key)
+ if !ok {
+ // See comment below.
+ if it.clearSeq == it.m.clearSeq && !it.m.typ.Key.Equal(key, key) {
+ elem = g.elem(k)
+ } else {
+ continue
+ }
+ } else {
+ key = newKey
+ elem = newElem
+ }
+ } else {
+ elem = g.elem(k)
+ }
+
+ it.entryIdx++
+ it.key = key
+ it.elem = elem
+ return
+ }
+ it.key = nil
+ it.elem = nil
+ return
+ }
+
if it.globalDepth != it.m.globalDepth {
// Directory has grown since the last call to Next. Adjust our
// directory index.
@@ -564,15 +624,15 @@ func (it *Iter) Next() {
}
// Continue iteration until we find a full slot.
- for it.dirIdx < len(it.m.directory) {
+ for it.dirIdx < it.m.dirLen {
// TODO(prattmic): We currently look up the latest table on
// every call, even if it.tab is set because the inner loop
// checks if it.tab has grown by checking it.tab != newTab.
//
// We could avoid most of these lookups if we left a flag
// behind on the old table to denote that it is stale.
- dirIdx := int((uint64(it.dirIdx) + it.dirOffset) & uint64(len(it.m.directory)-1))
- newTab := it.m.directory[dirIdx]
+ dirIdx := int((uint64(it.dirIdx) + it.dirOffset) & uint64(it.m.dirLen-1))
+ newTab := it.m.directoryAt(uintptr(dirIdx))
if it.tab == nil {
if newTab.index != dirIdx {
// Normally we skip past all duplicates of the