aboutsummaryrefslogtreecommitdiff
path: root/src/internal/runtime
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2024-11-14 16:58:07 -0800
committerKeith Randall <khr@golang.org>2024-11-17 20:17:05 +0000
commit63f762bcdea96889d8ffa406804665b84bda63ab (patch)
tree6ec8508a506b512742af1d3415116f90d67ad7be /src/internal/runtime
parent04807d3acf160b270fbec42b7b672d531dec06b7 (diff)
downloadgo-63f762bcdea96889d8ffa406804665b84bda63ab.tar.xz
internal/runtime/maps: eliminate a load from the hot path
typ.Group.Size involves two loads. Instead cache GroupSize as a separate fields of the map type so we can get to it in just one load. Change-Id: I10ffdce1c7f75dcf448da14040fda78f0d75fd1d Reviewed-on: https://go-review.googlesource.com/c/go/+/627716 Reviewed-by: Cherry Mui <cherryyz@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Diffstat (limited to 'src/internal/runtime')
-rw-r--r--src/internal/runtime/maps/group.go4
-rw-r--r--src/internal/runtime/maps/map.go2
-rw-r--r--src/internal/runtime/maps/map_test.go6
3 files changed, 6 insertions, 6 deletions
diff --git a/src/internal/runtime/maps/group.go b/src/internal/runtime/maps/group.go
index 35b39bbc37..aae667c8d8 100644
--- a/src/internal/runtime/maps/group.go
+++ b/src/internal/runtime/maps/group.go
@@ -53,7 +53,7 @@ func (b bitset) removeFirst() bitset {
// removeBelow removes all set bits below slot i (non-inclusive).
func (b bitset) removeBelow(i uintptr) bitset {
// Clear all bits below slot i's byte.
- mask := (uint64(1) << (8*uint64(i))) - 1
+ mask := (uint64(1) << (8 * uint64(i))) - 1
return b &^ bitset(mask)
}
@@ -239,7 +239,7 @@ func newGroups(typ *abi.SwissMapType, length uint64) groupsReference {
func (g *groupsReference) group(typ *abi.SwissMapType, i uint64) groupReference {
// TODO(prattmic): Do something here about truncation on cast to
// uintptr on 32-bit systems?
- offset := uintptr(i) * typ.Group.Size_
+ offset := uintptr(i) * typ.GroupSize
return groupReference{
data: unsafe.Pointer(uintptr(g.data) + offset),
diff --git a/src/internal/runtime/maps/map.go b/src/internal/runtime/maps/map.go
index 86977bbc2d..969da13432 100644
--- a/src/internal/runtime/maps/map.go
+++ b/src/internal/runtime/maps/map.go
@@ -296,7 +296,7 @@ func NewMap(mt *abi.SwissMapType, hint uintptr, m *Map, maxAlloc uintptr) *Map {
if overflow {
return m // return an empty map.
} else {
- mem, overflow := math.MulUintptr(groups, mt.Group.Size_)
+ mem, overflow := math.MulUintptr(groups, mt.GroupSize)
if overflow || mem > maxAlloc {
return m // return an empty map.
}
diff --git a/src/internal/runtime/maps/map_test.go b/src/internal/runtime/maps/map_test.go
index 42db55c6a4..160450ebb2 100644
--- a/src/internal/runtime/maps/map_test.go
+++ b/src/internal/runtime/maps/map_test.go
@@ -517,8 +517,8 @@ func testTableIterationGrowDuplicate(t *testing.T, grow int) {
key := *(*uint32)(keyPtr)
elem := *(*uint64)(elemPtr)
- if elem != 256 + uint64(key) {
- t.Errorf("iteration got key %d elem %d want elem %d", key, elem, 256 + uint64(key))
+ if elem != 256+uint64(key) {
+ t.Errorf("iteration got key %d elem %d want elem %d", key, elem, 256+uint64(key))
}
if _, ok := got[key]; ok {
t.Errorf("iteration got key %d more than once", key)
@@ -623,7 +623,7 @@ func TestMapZeroSizeSlot(t *testing.T) {
tab := m.TableFor(typ, unsafe.Pointer(&key))
start := tab.GroupsStart()
length := tab.GroupsLength()
- end := unsafe.Pointer(uintptr(start) + length*typ.Group.Size() - 1) // inclusive to ensure we have a valid pointer
+ end := unsafe.Pointer(uintptr(start) + length*typ.GroupSize - 1) // inclusive to ensure we have a valid pointer
if uintptr(got) < uintptr(start) || uintptr(got) > uintptr(end) {
t.Errorf("elem address outside groups allocation; got %p want [%p, %p]", got, start, end)
}