aboutsummaryrefslogtreecommitdiff
path: root/src/internal/runtime
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2025-08-20 17:33:14 -0700
committerKeith Randall <khr@golang.org>2025-09-10 15:06:16 -0700
commitde5d7eccb99088e3ab42c0d907da6852d8f9cebe (patch)
tree32579f70bd4ab2a3fe70fd307b5b2f2772e7042f /src/internal/runtime
parent8098b99547e2bb802c976a424e486ba94d3222a8 (diff)
downloadgo-de5d7eccb99088e3ab42c0d907da6852d8f9cebe.tar.xz
runtime/internal/maps: only conditionally clear groups when sparse
We only want to do the work of clearing slots if they are full. But we also don't want to do too much work to figure out whether a slot is full or not, especially if clearing a slot is cheap. 1) We decide group-by-group instead of slot-by-slot. If any slot in a group is full, we zero the whole group. 2) If groups are unlikely to be empty, don't bother testing for it. 3) If groups are 50%/50% likely to be empty, also don't bother testing, as it confuses the branch predictor. See #75097. 4) But if a group is really large, do the test anyway, as clearing is expensive. Fixes #75097 Change-Id: I9191865dd3e0fe887751cffe6082ac27d8d8439c Reviewed-on: https://go-review.googlesource.com/c/go/+/697876 Reviewed-by: Keith Randall <khr@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Auto-Submit: Keith Randall <khr@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> Reviewed-by: Youlin Feng <fengyoulin@live.com>
Diffstat (limited to 'src/internal/runtime')
-rw-r--r--src/internal/runtime/maps/table.go33
1 files changed, 29 insertions, 4 deletions
diff --git a/src/internal/runtime/maps/table.go b/src/internal/runtime/maps/table.go
index a8160befd2..0535be0fc1 100644
--- a/src/internal/runtime/maps/table.go
+++ b/src/internal/runtime/maps/table.go
@@ -604,12 +604,37 @@ func (t *table) Clear(typ *abi.MapType) {
if t.used == 0 && t.growthLeft == mgl { // no current entries and no tombstones
return
}
- for i := uint64(0); i <= t.groups.lengthMask; i++ {
- g := t.groups.group(typ, i)
- if g.ctrls().anyFull() {
+ // We only want to do the work of clearing slots
+ // if they are full. But we also don't want to do too
+ // much work to figure out whether a slot is full or not,
+ // especially if clearing a slot is cheap.
+ // 1) We decide group-by-group instead of slot-by-slot.
+ // If any slot in a group is full, we zero the whole group.
+ // 2) If groups are unlikely to be empty, don't bother
+ // testing for it.
+ // 3) If groups are 50%/50% likely to be empty, also don't
+ // bother testing, as it confuses the branch predictor. See #75097.
+ // 4) But if a group is really large, do the test anyway, as
+ // clearing is expensive.
+ fullTest := uint64(t.used)*4 <= t.groups.lengthMask // less than ~0.25 entries per group -> >3/4 empty groups
+ if typ.SlotSize > 32 {
+ // For large slots, it is always worth doing the test first.
+ fullTest = true
+ }
+ if fullTest {
+ for i := uint64(0); i <= t.groups.lengthMask; i++ {
+ g := t.groups.group(typ, i)
+ if g.ctrls().anyFull() {
+ typedmemclr(typ.Group, g.data)
+ }
+ g.ctrls().setEmpty()
+ }
+ } else {
+ for i := uint64(0); i <= t.groups.lengthMask; i++ {
+ g := t.groups.group(typ, i)
typedmemclr(typ.Group, g.data)
+ g.ctrls().setEmpty()
}
- g.ctrls().setEmpty()
}
t.used = 0
t.growthLeft = mgl