aboutsummaryrefslogtreecommitdiff
path: root/src/internal/runtime/maps/table.go
diff options
context:
space:
mode:
authorMichael Pratt <mpratt@google.com>2024-08-14 11:21:28 -0400
committerGopher Robot <gobot@golang.org>2024-10-28 20:35:25 +0000
commit77e3d8cf13a31343ba98268c2dddf6bc41f6ce4c (patch)
treec1f21ea1356d5cccf04ae86aa4ad6539160106cb /src/internal/runtime/maps/table.go
parentbb46b754bebb0e820d74fd9eb02635afbdf5a3bd (diff)
downloadgo-77e3d8cf13a31343ba98268c2dddf6bc41f6ce4c.tar.xz
internal/runtime/maps: small maps point directly to a group
If the map contains 8 or fewer entries, it is wasteful to have a directory that points to a table that points to a group. Add a special case that replaces the directory with a direct pointer to a group. We could theoretically do similar for single table maps (no directory, just point directly to a table), but that is left for later. For #54766. Cq-Include-Trybots: luci.golang.try:gotip-linux-amd64-longtest-swissmap Change-Id: I6fc04dfc11c31dadfe5b5d6481b4c4abd43d48ed Reviewed-on: https://go-review.googlesource.com/c/go/+/611188 Reviewed-by: Keith Randall <khr@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Auto-Submit: Michael Pratt <mpratt@google.com> Reviewed-by: Keith Randall <khr@google.com>
Diffstat (limited to 'src/internal/runtime/maps/table.go')
-rw-r--r--src/internal/runtime/maps/table.go68
1 files changed, 64 insertions, 4 deletions
diff --git a/src/internal/runtime/maps/table.go b/src/internal/runtime/maps/table.go
index 232c077db3..86e5dce10d 100644
--- a/src/internal/runtime/maps/table.go
+++ b/src/internal/runtime/maps/table.go
@@ -463,7 +463,8 @@ type Iter struct {
dirIdx int
// tab is the table at dirIdx during the previous call to Next.
- tab *table
+ tab *table
+ groupSmall groupReference // only if small map at init
// entryIdx is the current entry index, prior to adjustment by entryOffset.
// The lower 3 bits of the index are the slot index, and the upper bits
@@ -473,16 +474,28 @@ type Iter struct {
// Init initializes Iter for iteration.
func (it *Iter) Init(typ *abi.SwissMapType, m *Map) {
+
it.typ = typ
if m == nil || m.used == 0 {
return
}
+ dirIdx := 0
+ var groupSmall groupReference
+ if m.dirLen <= 0 {
+ // Use dirIdx == -1 as sentinal for small maps.
+ dirIdx = -1
+ groupSmall.data = m.dirPtr
+ groupSmall.typ = typ
+ }
+
it.typ = m.typ
it.m = m
it.entryOffset = rand()
it.dirOffset = rand()
it.globalDepth = m.globalDepth
+ it.dirIdx = dirIdx
+ it.groupSmall = groupSmall
it.clearSeq = m.clearSeq
}
@@ -525,6 +538,53 @@ func (it *Iter) Next() {
return
}
+ if it.dirIdx < 0 {
+ // Map was small at Init.
+ g := it.groupSmall
+ for ; it.entryIdx < abi.SwissMapGroupSlots; it.entryIdx++ {
+ k := uint32(it.entryIdx+it.entryOffset) % abi.SwissMapGroupSlots
+
+ if (g.ctrls().get(k) & ctrlEmpty) == ctrlEmpty {
+ // Empty or deleted.
+ continue
+ }
+
+ key := g.key(k)
+
+ // As below, if we have grown to a full map since Init,
+ // we continue to use the old group to decide the keys
+ // to return, but must look them up again in the new
+ // tables.
+ grown := it.m.dirLen > 0
+ var elem unsafe.Pointer
+ if grown {
+ var ok bool
+ newKey, newElem, ok := it.m.getWithKey(key)
+ if !ok {
+ // See comment below.
+ if it.clearSeq == it.m.clearSeq && !it.m.typ.Key.Equal(key, key) {
+ elem = g.elem(k)
+ } else {
+ continue
+ }
+ } else {
+ key = newKey
+ elem = newElem
+ }
+ } else {
+ elem = g.elem(k)
+ }
+
+ it.entryIdx++
+ it.key = key
+ it.elem = elem
+ return
+ }
+ it.key = nil
+ it.elem = nil
+ return
+ }
+
if it.globalDepth != it.m.globalDepth {
// Directory has grown since the last call to Next. Adjust our
// directory index.
@@ -564,15 +624,15 @@ func (it *Iter) Next() {
}
// Continue iteration until we find a full slot.
- for it.dirIdx < len(it.m.directory) {
+ for it.dirIdx < it.m.dirLen {
// TODO(prattmic): We currently look up the latest table on
// every call, even if it.tab is set because the inner loop
// checks if it.tab has grown by checking it.tab != newTab.
//
// We could avoid most of these lookups if we left a flag
// behind on the old table to denote that it is stale.
- dirIdx := int((uint64(it.dirIdx) + it.dirOffset) & uint64(len(it.m.directory)-1))
- newTab := it.m.directory[dirIdx]
+ dirIdx := int((uint64(it.dirIdx) + it.dirOffset) & uint64(it.m.dirLen-1))
+ newTab := it.m.directoryAt(uintptr(dirIdx))
if it.tab == nil {
if newTab.index != dirIdx {
// Normally we skip past all duplicates of the