aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorMichael Pratt <mpratt@google.com>2024-05-03 13:03:04 -0400
committerMichael Pratt <mpratt@google.com>2024-10-14 19:58:47 +0000
commitc39bc22c141bc6990e4e2abf604dcf56669ff779 (patch)
tree5384243aaaa9d9f9796674223cf186ee3ceb63a9 /src/runtime
parent48849e0866f64a40d04a9151e44e5a73acdfc17b (diff)
downloadgo-c39bc22c141bc6990e4e2abf604dcf56669ff779.tar.xz
all: wire up swisstable maps
Use the new SwissTable-based map in internal/runtime/maps as the basis for the runtime map when GOEXPERIMENT=swissmap. Integration is complete enough to pass all.bash. Notable missing features: * Race integration / concurrent write detection * Stack-allocated maps * Specialized "fast" map variants * Indirect key / elem For #54766. Cq-Include-Trybots: luci.golang.try:gotip-linux-ppc64_power10,gotip-linux-amd64-longtest-swissmap Change-Id: Ie97b656b6d8e05c0403311ae08fef9f51756a639 Reviewed-on: https://go-review.googlesource.com/c/go/+/594596 Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Michael Knyszek <mknyszek@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/export_map_noswiss_test.go6
-rw-r--r--src/runtime/export_map_swiss_test.go49
-rw-r--r--src/runtime/export_test.go6
-rw-r--r--src/runtime/map_fast32_swiss.go12
-rw-r--r--src/runtime/map_fast64_swiss.go12
-rw-r--r--src/runtime/map_faststr_swiss.go10
-rw-r--r--src/runtime/map_noswiss_test.go26
-rw-r--r--src/runtime/map_swiss.go1642
-rw-r--r--src/runtime/map_swiss_test.go169
-rw-r--r--src/runtime/map_test.go52
-rw-r--r--src/runtime/runtime-gdb_test.go8
11 files changed, 268 insertions, 1724 deletions
diff --git a/src/runtime/export_map_noswiss_test.go b/src/runtime/export_map_noswiss_test.go
index 9e6d81a77c..4638afa6b8 100644
--- a/src/runtime/export_map_noswiss_test.go
+++ b/src/runtime/export_map_noswiss_test.go
@@ -11,6 +11,12 @@ import (
"unsafe"
)
+const RuntimeHmapSize = unsafe.Sizeof(hmap{})
+
+func OverLoadFactor(count int, B uint8) bool {
+ return overLoadFactor(count, B)
+}
+
func MapBucketsCount(m map[int]int) int {
h := *(**hmap)(unsafe.Pointer(&m))
return 1 << h.B
diff --git a/src/runtime/export_map_swiss_test.go b/src/runtime/export_map_swiss_test.go
index ac0308fce0..55a7d6ff04 100644
--- a/src/runtime/export_map_swiss_test.go
+++ b/src/runtime/export_map_swiss_test.go
@@ -6,53 +6,6 @@
package runtime
-import (
- "internal/abi"
- "unsafe"
-)
-
-func MapBucketsCount(m map[int]int) int {
- h := *(**hmap)(unsafe.Pointer(&m))
- return 1 << h.B
-}
-
-func MapBucketsPointerIsNil(m map[int]int) bool {
- h := *(**hmap)(unsafe.Pointer(&m))
- return h.buckets == nil
-}
-
func MapTombstoneCheck(m map[int]int) {
- // Make sure emptyOne and emptyRest are distributed correctly.
- // We should have a series of filled and emptyOne cells, followed by
- // a series of emptyRest cells.
- h := *(**hmap)(unsafe.Pointer(&m))
- i := any(m)
- t := *(**maptype)(unsafe.Pointer(&i))
-
- for x := 0; x < 1<<h.B; x++ {
- b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
- n := 0
- for b := b0; b != nil; b = b.overflow(t) {
- for i := 0; i < abi.SwissMapBucketCount; i++ {
- if b.tophash[i] != emptyRest {
- n++
- }
- }
- }
- k := 0
- for b := b0; b != nil; b = b.overflow(t) {
- for i := 0; i < abi.SwissMapBucketCount; i++ {
- if k < n && b.tophash[i] == emptyRest {
- panic("early emptyRest")
- }
- if k >= n && b.tophash[i] != emptyRest {
- panic("late non-emptyRest")
- }
- if k == n-1 && b.tophash[i] == emptyOne {
- panic("last non-emptyRest entry is emptyOne")
- }
- k++
- }
- }
- }
+ // TODO
}
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 3bde1aea29..5c8d2b18c6 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -481,12 +481,6 @@ func (rw *RWMutex) Unlock() {
rw.rw.unlock()
}
-const RuntimeHmapSize = unsafe.Sizeof(hmap{})
-
-func OverLoadFactor(count int, B uint8) bool {
- return overLoadFactor(count, B)
-}
-
func LockOSCounts() (external, internal uint32) {
gp := getg()
if gp.m.lockedExt+gp.m.lockedInt == 0 {
diff --git a/src/runtime/map_fast32_swiss.go b/src/runtime/map_fast32_swiss.go
index f2482393dd..5cc2376982 100644
--- a/src/runtime/map_fast32_swiss.go
+++ b/src/runtime/map_fast32_swiss.go
@@ -7,29 +7,31 @@
package runtime
import (
+ "internal/abi"
+ "internal/runtime/maps"
"unsafe"
)
-func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
+func mapaccess1_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer {
throw("mapaccess1_fast32 unimplemented")
panic("unreachable")
}
-func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
+func mapaccess2_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) (unsafe.Pointer, bool) {
throw("mapaccess2_fast32 unimplemented")
panic("unreachable")
}
-func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
+func mapassign_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer {
throw("mapassign_fast32 unimplemented")
panic("unreachable")
}
-func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+func mapassign_fast32ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
throw("mapassign_fast32ptr unimplemented")
panic("unreachable")
}
-func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
+func mapdelete_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) {
throw("mapdelete_fast32 unimplemented")
}
diff --git a/src/runtime/map_fast64_swiss.go b/src/runtime/map_fast64_swiss.go
index 07ed9934c3..bf892fe83f 100644
--- a/src/runtime/map_fast64_swiss.go
+++ b/src/runtime/map_fast64_swiss.go
@@ -7,29 +7,31 @@
package runtime
import (
+ "internal/abi"
+ "internal/runtime/maps"
"unsafe"
)
-func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
+func mapaccess1_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer {
throw("mapaccess1_fast64 unimplemented")
panic("unreachable")
}
-func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
+func mapaccess2_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) (unsafe.Pointer, bool) {
throw("mapaccess2_fast64 unimplemented")
panic("unreachable")
}
-func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
+func mapassign_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer {
throw("mapassign_fast64 unimplemented")
panic("unreachable")
}
-func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+func mapassign_fast64ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
throw("mapassign_fast64ptr unimplemented")
panic("unreachable")
}
-func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
+func mapdelete_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) {
throw("mapdelete_fast64 unimplemented")
}
diff --git a/src/runtime/map_faststr_swiss.go b/src/runtime/map_faststr_swiss.go
index 41090d7381..b0fb54315a 100644
--- a/src/runtime/map_faststr_swiss.go
+++ b/src/runtime/map_faststr_swiss.go
@@ -7,24 +7,26 @@
package runtime
import (
+ "internal/abi"
+ "internal/runtime/maps"
"unsafe"
)
-func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
+func mapaccess1_faststr(t *abi.SwissMapType, m *maps.Map, ky string) unsafe.Pointer {
throw("mapaccess1_faststr unimplemented")
panic("unreachable")
}
-func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
+func mapaccess2_faststr(t *abi.SwissMapType, m *maps.Map, ky string) (unsafe.Pointer, bool) {
throw("mapaccess2_faststr unimplemented")
panic("unreachable")
}
-func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
+func mapassign_faststr(t *abi.SwissMapType, m *maps.Map, s string) unsafe.Pointer {
throw("mapassign_faststr unimplemented")
panic("unreachable")
}
-func mapdelete_faststr(t *maptype, h *hmap, ky string) {
+func mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, ky string) {
throw("mapdelete_faststr unimplemented")
}
diff --git a/src/runtime/map_noswiss_test.go b/src/runtime/map_noswiss_test.go
index 72d7e6d362..bda448471c 100644
--- a/src/runtime/map_noswiss_test.go
+++ b/src/runtime/map_noswiss_test.go
@@ -8,11 +8,37 @@ package runtime_test
import (
"internal/abi"
+ "internal/goarch"
"runtime"
"slices"
"testing"
)
+func TestHmapSize(t *testing.T) {
+ // The structure of hmap is defined in runtime/map.go
+ // and in cmd/compile/internal/reflectdata/map.go and must be in sync.
+ // The size of hmap should be 48 bytes on 64 bit and 28 bytes on 32 bit platforms.
+ var hmapSize = uintptr(8 + 5*goarch.PtrSize)
+ if runtime.RuntimeHmapSize != hmapSize {
+ t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize)
+ }
+}
+
+func TestLoadFactor(t *testing.T) {
+ for b := uint8(0); b < 20; b++ {
+ count := 13 * (1 << b) / 2 // 6.5
+ if b == 0 {
+ count = 8
+ }
+ if runtime.OverLoadFactor(count, b) {
+ t.Errorf("OverLoadFactor(%d,%d)=true, want false", count, b)
+ }
+ if !runtime.OverLoadFactor(count+1, b) {
+ t.Errorf("OverLoadFactor(%d,%d)=false, want true", count+1, b)
+ }
+ }
+}
+
func TestMapIterOrder(t *testing.T) {
sizes := []int{3, 7, 9, 15}
if abi.OldMapBucketCountBits >= 5 {
diff --git a/src/runtime/map_swiss.go b/src/runtime/map_swiss.go
index 590fccc407..bd0d05d092 100644
--- a/src/runtime/map_swiss.go
+++ b/src/runtime/map_swiss.go
@@ -6,292 +6,65 @@
package runtime
-// This file contains the implementation of Go's map type.
-//
-// A map is just a hash table. The data is arranged
-// into an array of buckets. Each bucket contains up to
-// 8 key/elem pairs. The low-order bits of the hash are
-// used to select a bucket. Each bucket contains a few
-// high-order bits of each hash to distinguish the entries
-// within a single bucket.
-//
-// If more than 8 keys hash to a bucket, we chain on
-// extra buckets.
-//
-// When the hashtable grows, we allocate a new array
-// of buckets twice as big. Buckets are incrementally
-// copied from the old bucket array to the new bucket array.
-//
-// Map iterators walk through the array of buckets and
-// return the keys in walk order (bucket #, then overflow
-// chain order, then bucket index). To maintain iteration
-// semantics, we never move keys within their bucket (if
-// we did, keys might be returned 0 or 2 times). When
-// growing the table, iterators remain iterating through the
-// old table and must check the new table if the bucket
-// they are iterating through has been moved ("evacuated")
-// to the new table.
-
-// Picking loadFactor: too large and we have lots of overflow
-// buckets, too small and we waste a lot of space. I wrote
-// a simple program to check some stats for different loads:
-// (64-bit, 8 byte keys and elems)
-// loadFactor %overflow bytes/entry hitprobe missprobe
-// 4.00 2.13 20.77 3.00 4.00
-// 4.50 4.05 17.30 3.25 4.50
-// 5.00 6.85 14.77 3.50 5.00
-// 5.50 10.55 12.94 3.75 5.50
-// 6.00 15.27 11.67 4.00 6.00
-// 6.50 20.90 10.79 4.25 6.50
-// 7.00 27.14 10.15 4.50 7.00
-// 7.50 34.03 9.73 4.75 7.50
-// 8.00 41.10 9.40 5.00 8.00
-//
-// %overflow = percentage of buckets which have an overflow bucket
-// bytes/entry = overhead bytes used per key/elem pair
-// hitprobe = # of entries to check when looking up a present key
-// missprobe = # of entries to check when looking up an absent key
-//
-// Keep in mind this data is for maximally loaded tables, i.e. just
-// before the table grows. Typical tables will be somewhat less loaded.
-
import (
"internal/abi"
- "internal/goarch"
- "internal/runtime/atomic"
+ "internal/runtime/maps"
"internal/runtime/math"
"internal/runtime/sys"
"unsafe"
)
-type maptype = abi.SwissMapType
-
const (
- // Maximum number of key/elem pairs a bucket can hold.
- bucketCntBits = abi.SwissMapBucketCountBits
-
- // Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full)
- // Because of minimum alignment rules, bucketCnt is known to be at least 8.
- // Represent as loadFactorNum/loadFactorDen, to allow integer math.
- loadFactorDen = 2
- loadFactorNum = loadFactorDen * abi.SwissMapBucketCount * 13 / 16
-
- // data offset should be the size of the bmap struct, but needs to be
- // aligned correctly. For amd64p32 this means 64-bit alignment
- // even though pointers are 32 bit.
- dataOffset = unsafe.Offsetof(struct {
- b bmap
- v int64
- }{}.v)
-
- // Possible tophash values. We reserve a few possibilities for special marks.
- // Each bucket (including its overflow buckets, if any) will have either all or none of its
- // entries in the evacuated* states (except during the evacuate() method, which only happens
- // during map writes and thus no one else can observe the map during that time).
- emptyRest = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
- emptyOne = 1 // this cell is empty
- evacuatedX = 2 // key/elem is valid. Entry has been evacuated to first half of larger table.
- evacuatedY = 3 // same as above, but evacuated to second half of larger table.
- evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
- minTopHash = 5 // minimum tophash for a normal filled cell.
-
- // flags
- iterator = 1 // there may be an iterator using buckets
- oldIterator = 2 // there may be an iterator using oldbuckets
- hashWriting = 4 // a goroutine is writing to the map
- sameSizeGrow = 8 // the current map growth is to a new map of the same size
-
- // sentinel bucket ID for iterator checks
- noCheck = 1<<(8*goarch.PtrSize) - 1
+ // TODO: remove? These are used by tests but not the actual map
+ loadFactorNum = 7
+ loadFactorDen = 8
)
-// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
-func isEmpty(x uint8) bool {
- return x <= emptyOne
-}
-
-// A header for a Go map.
-type hmap struct {
- // Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
- // Make sure this stays in sync with the compiler's definition.
- count int // # live cells == size of map. Must be first (used by len() builtin)
- flags uint8
- B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
- noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
- hash0 uint32 // hash seed
-
- buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
- oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
- nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
-
- extra *mapextra // optional fields
-}
-
-// mapextra holds fields that are not present on all maps.
-type mapextra struct {
- // If both key and elem do not contain pointers and are inline, then we mark bucket
- // type as containing no pointers. This avoids scanning such maps.
- // However, bmap.overflow is a pointer. In order to keep overflow buckets
- // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
- // overflow and oldoverflow are only used if key and elem do not contain pointers.
- // overflow contains overflow buckets for hmap.buckets.
- // oldoverflow contains overflow buckets for hmap.oldbuckets.
- // The indirection allows to store a pointer to the slice in hiter.
- overflow *[]*bmap
- oldoverflow *[]*bmap
-
- // nextOverflow holds a pointer to a free overflow bucket.
- nextOverflow *bmap
-}
-
-// A bucket for a Go map.
-type bmap struct {
- // tophash generally contains the top byte of the hash value
- // for each key in this bucket. If tophash[0] < minTopHash,
- // tophash[0] is a bucket evacuation state instead.
- tophash [abi.SwissMapBucketCount]uint8
- // Followed by bucketCnt keys and then bucketCnt elems.
- // NOTE: packing all the keys together and then all the elems together makes the
- // code a bit more complicated than alternating key/elem/key/elem/... but it allows
- // us to eliminate padding which would be needed for, e.g., map[int64]int8.
- // Followed by an overflow pointer.
-}
-
-// A hash iteration structure.
-// If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go
-// and reflect/value.go to match the layout of this structure.
-type hiter struct {
- key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
- elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
- t *maptype
- h *hmap
- buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
- bptr *bmap // current bucket
- overflow *[]*bmap // keeps overflow buckets of hmap.buckets alive
- oldoverflow *[]*bmap // keeps overflow buckets of hmap.oldbuckets alive
- startBucket uintptr // bucket iteration started at
- offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
- wrapped bool // already wrapped around from end of bucket array to beginning
- B uint8
- i uint8
- bucket uintptr
- checkBucket uintptr
-}
-
-// bucketShift returns 1<<b, optimized for code generation.
-func bucketShift(b uint8) uintptr {
- // Masking the shift amount allows overflow checks to be elided.
- return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
-}
-
-// bucketMask returns 1<<b - 1, optimized for code generation.
-func bucketMask(b uint8) uintptr {
- return bucketShift(b) - 1
-}
+type maptype = abi.SwissMapType
-// tophash calculates the tophash value for hash.
-func tophash(hash uintptr) uint8 {
- top := uint8(hash >> (goarch.PtrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
+func makemap64(t *abi.SwissMapType, hint int64, m *maps.Map) *maps.Map {
+ if int64(int(hint)) != hint {
+ hint = 0
}
- return top
-}
-
-func evacuated(b *bmap) bool {
- h := b.tophash[0]
- return h > emptyOne && h < minTopHash
-}
-
-func (b *bmap) overflow(t *maptype) *bmap {
- return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
+ return makemap(t, int(hint), m)
}
-func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
- *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
-}
-
-func (b *bmap) keys() unsafe.Pointer {
- return add(unsafe.Pointer(b), dataOffset)
+// makemap_small implements Go map creation for make(map[k]v) and
+// make(map[k]v, hint) when hint is known to be at most bucketCnt
+// at compile time and the map needs to be allocated on the heap.
+func makemap_small() *maps.Map {
+ panic("unimplemented")
}
-// incrnoverflow increments h.noverflow.
-// noverflow counts the number of overflow buckets.
-// This is used to trigger same-size map growth.
-// See also tooManyOverflowBuckets.
-// To keep hmap small, noverflow is a uint16.
-// When there are few buckets, noverflow is an exact count.
-// When there are many buckets, noverflow is an approximate count.
-func (h *hmap) incrnoverflow() {
- // We trigger same-size map growth if there are
- // as many overflow buckets as buckets.
- // We need to be able to count to 1<<h.B.
- if h.B < 16 {
- h.noverflow++
- return
- }
- // Increment with probability 1/(1<<(h.B-15)).
- // When we reach 1<<15 - 1, we will have approximately
- // as many overflow buckets as buckets.
- mask := uint32(1)<<(h.B-15) - 1
- // Example: if h.B == 18, then mask == 7,
- // and rand() & 7 == 0 with probability 1/8.
- if uint32(rand())&mask == 0 {
- h.noverflow++
+// checkHint verifies that hint is reasonable, adjusting as necessary.
+func checkHint(t *abi.SwissMapType, hint int) uint64 {
+ if hint <= 0 {
+ return 0
}
-}
-func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
- var ovf *bmap
- if h.extra != nil && h.extra.nextOverflow != nil {
- // We have preallocated overflow buckets available.
- // See makeBucketArray for more details.
- ovf = h.extra.nextOverflow
- if ovf.overflow(t) == nil {
- // We're not at the end of the preallocated overflow buckets. Bump the pointer.
- h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
- } else {
- // This is the last preallocated overflow bucket.
- // Reset the overflow pointer on this bucket,
- // which was set to a non-nil sentinel value.
- ovf.setoverflow(t, nil)
- h.extra.nextOverflow = nil
+ capacity := uint64(hint)
+
+ // Ensure a groups allocation for a capacity this high doesn't exceed
+ // the maximum allocation size.
+ //
+ // TODO(prattmic): Once we split tables, a large hint will result in
+ // splitting the tables up front, which will use smaller individual
+ // allocations.
+ //
+ // TODO(prattmic): This logic is largely duplicated from maps.newTable
+ // / maps.(*table).reset.
+ capacity, overflow := alignUpPow2(capacity)
+ if !overflow {
+ groupCount := capacity / abi.SwissMapGroupSlots
+ mem, overflow := math.MulUintptr(uintptr(groupCount), t.Group.Size_)
+ if overflow || mem > maxAlloc {
+ return 0
}
} else {
- ovf = (*bmap)(newobject(t.Bucket))
- }
- h.incrnoverflow()
- if !t.Bucket.Pointers() {
- h.createOverflow()
- *h.extra.overflow = append(*h.extra.overflow, ovf)
- }
- b.setoverflow(t, ovf)
- return ovf
-}
-
-func (h *hmap) createOverflow() {
- if h.extra == nil {
- h.extra = new(mapextra)
- }
- if h.extra.overflow == nil {
- h.extra.overflow = new([]*bmap)
- }
-}
-
-func makemap64(t *maptype, hint int64, h *hmap) *hmap {
- if int64(int(hint)) != hint {
- hint = 0
+ return 0
}
- return makemap(t, int(hint), h)
-}
-// makemap_small implements Go map creation for make(map[k]v) and
-// make(map[k]v, hint) when hint is known to be at most bucketCnt
-// at compile time and the map needs to be allocated on the heap.
-func makemap_small() *hmap {
- h := new(hmap)
- h.hash0 = uint32(rand())
- return h
+ return capacity
}
// makemap implements Go map creation for make(map[k]v, hint).
@@ -299,90 +72,27 @@ func makemap_small() *hmap {
// can be created on the stack, h and/or bucket may be non-nil.
// If h != nil, the map can be created directly in h.
// If h.buckets != nil, bucket pointed to can be used as the first bucket.
-func makemap(t *maptype, hint int, h *hmap) *hmap {
- mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
- if overflow || mem > maxAlloc {
- hint = 0
- }
-
- // initialize Hmap
- if h == nil {
- h = new(hmap)
- }
- h.hash0 = uint32(rand())
-
- // Find the size parameter B which will hold the requested # of elements.
- // For hint < 0 overLoadFactor returns false since hint < bucketCnt.
- B := uint8(0)
- for overLoadFactor(hint, B) {
- B++
- }
- h.B = B
+func makemap(t *abi.SwissMapType, hint int, m *maps.Map) *maps.Map {
+ capacity := checkHint(t, hint)
- // allocate initial hash table
- // if B == 0, the buckets field is allocated lazily later (in mapassign)
- // If hint is large zeroing this memory could take a while.
- if h.B != 0 {
- var nextOverflow *bmap
- h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
- if nextOverflow != nil {
- h.extra = new(mapextra)
- h.extra.nextOverflow = nextOverflow
- }
- }
-
- return h
+ // TODO: use existing m
+ return maps.NewTable(t, capacity)
}
-// makeBucketArray initializes a backing array for map buckets.
-// 1<<b is the minimum number of buckets to allocate.
-// dirtyalloc should either be nil or a bucket array previously
-// allocated by makeBucketArray with the same t and b parameters.
-// If dirtyalloc is nil a new backing array will be alloced and
-// otherwise dirtyalloc will be cleared and reused as backing array.
-func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
- base := bucketShift(b)
- nbuckets := base
- // For small b, overflow buckets are unlikely.
- // Avoid the overhead of the calculation.
- if b >= 4 {
- // Add on the estimated number of overflow buckets
- // required to insert the median number of elements
- // used with this value of b.
- nbuckets += bucketShift(b - 4)
- sz := t.Bucket.Size_ * nbuckets
- up := roundupsize(sz, !t.Bucket.Pointers())
- if up != sz {
- nbuckets = up / t.Bucket.Size_
- }
- }
-
- if dirtyalloc == nil {
- buckets = newarray(t.Bucket, int(nbuckets))
- } else {
- // dirtyalloc was previously generated by
- // the above newarray(t.Bucket, int(nbuckets))
- // but may not be empty.
- buckets = dirtyalloc
- size := t.Bucket.Size_ * nbuckets
- if t.Bucket.Pointers() {
- memclrHasPointers(buckets, size)
- } else {
- memclrNoHeapPointers(buckets, size)
- }
+// alignUpPow2 rounds n up to the next power of 2.
+//
+// Returns true if round up causes overflow.
+//
+// TODO(prattmic): deduplicate from internal/runtime/maps.
+func alignUpPow2(n uint64) (uint64, bool) {
+ if n == 0 {
+ return 0, false
}
-
- if base != nbuckets {
- // We preallocated some overflow buckets.
- // To keep the overhead of tracking these overflow buckets to a minimum,
- // we use the convention that if a preallocated overflow bucket's overflow
- // pointer is nil, then there are more available by bumping the pointer.
- // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
- nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
- last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
- last.setoverflow(t, (*bmap)(buckets))
+ v := (uint64(1) << sys.Len64(n-1))
+ if v == 0 {
+ return 0, true
}
- return buckets, nextOverflow
+ return v, false
}
// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
@@ -390,197 +100,89 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
// the key is not in the map.
// NOTE: The returned pointer may keep the whole map live, so don't
// hold onto it for very long.
-func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if raceenabled && h != nil {
+func mapaccess1(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
+ // TODO: concurrent checks.
+ if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapaccess1)
- racereadpc(unsafe.Pointer(h), callerpc, pc)
+ racereadpc(unsafe.Pointer(m), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
}
- if msanenabled && h != nil {
+ if msanenabled && m != nil {
msanread(key, t.Key.Size_)
}
- if asanenabled && h != nil {
+ if asanenabled && m != nil {
asanread(key, t.Key.Size_)
}
- if h == nil || h.count == 0 {
+
+ if m == nil || m.Used() == 0 {
if err := mapKeyError(t, key); err != nil {
panic(err) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0])
}
- if h.flags&hashWriting != 0 {
- fatal("concurrent map read and map write")
- }
- hash := t.Hasher(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
-bucketloop:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
- if t.IndirectKey() {
- k = *((*unsafe.Pointer)(k))
- }
- if t.Key.Equal(key, k) {
- e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
- if t.IndirectElem() {
- e = *((*unsafe.Pointer)(e))
- }
- return e
- }
- }
+
+ elem, ok := m.Get(key)
+ if !ok {
+ return unsafe.Pointer(&zeroVal[0])
}
- return unsafe.Pointer(&zeroVal[0])
+ return elem
}
-func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
+func mapaccess2(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
+ // TODO: concurrent checks.
+ if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapaccess2)
- racereadpc(unsafe.Pointer(h), callerpc, pc)
+ racereadpc(unsafe.Pointer(m), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
}
- if msanenabled && h != nil {
+ if msanenabled && m != nil {
msanread(key, t.Key.Size_)
}
- if asanenabled && h != nil {
+ if asanenabled && m != nil {
asanread(key, t.Key.Size_)
}
- if h == nil || h.count == 0 {
+
+ if m == nil || m.Used() == 0 {
if err := mapKeyError(t, key); err != nil {
panic(err) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0]), false
}
- if h.flags&hashWriting != 0 {
- fatal("concurrent map read and map write")
- }
- hash := t.Hasher(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
-bucketloop:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
- if t.IndirectKey() {
- k = *((*unsafe.Pointer)(k))
- }
- if t.Key.Equal(key, k) {
- e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
- if t.IndirectElem() {
- e = *((*unsafe.Pointer)(e))
- }
- return e, true
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0]), false
-}
-// returns both key and elem. Used by map iterator.
-func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
- if h == nil || h.count == 0 {
- return nil, nil
- }
- hash := t.Hasher(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
-bucketloop:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
- if t.IndirectKey() {
- k = *((*unsafe.Pointer)(k))
- }
- if t.Key.Equal(key, k) {
- e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
- if t.IndirectElem() {
- e = *((*unsafe.Pointer)(e))
- }
- return k, e
- }
- }
+ elem, ok := m.Get(key)
+ if !ok {
+ return unsafe.Pointer(&zeroVal[0]), false
}
- return nil, nil
+ return elem, true
}
-func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
- e := mapaccess1(t, h, key)
+func mapaccess1_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) unsafe.Pointer {
+ e := mapaccess1(t, m, key)
if e == unsafe.Pointer(&zeroVal[0]) {
return zero
}
return e
}
-func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
- e := mapaccess1(t, h, key)
+func mapaccess2_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
+ e := mapaccess1(t, m, key)
if e == unsafe.Pointer(&zeroVal[0]) {
return zero, false
}
return e, true
}
-// Like mapaccess, but allocates a slot for the key if it is not present in the map.
-func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if h == nil {
+func mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
+ // TODO: concurrent checks.
+ if m == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapassign)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
+ racewritepc(unsafe.Pointer(m), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
}
if msanenabled {
@@ -589,762 +191,91 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if asanenabled {
asanread(key, t.Key.Size_)
}
- if h.flags&hashWriting != 0 {
- fatal("concurrent map writes")
- }
- hash := t.Hasher(key, uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher, since t.hasher may panic,
- // in which case we have not actually done a write.
- h.flags ^= hashWriting
-
- if h.buckets == nil {
- h.buckets = newobject(t.Bucket) // newarray(t.Bucket, 1)
- }
-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
- top := tophash(hash)
-
- var inserti *uint8
- var insertk unsafe.Pointer
- var elem unsafe.Pointer
-bucketloop:
- for {
- for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
- if b.tophash[i] != top {
- if isEmpty(b.tophash[i]) && inserti == nil {
- inserti = &b.tophash[i]
- insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
- elem = add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
- if t.IndirectKey() {
- k = *((*unsafe.Pointer)(k))
- }
- if !t.Key.Equal(key, k) {
- continue
- }
- // already have a mapping for key. Update it.
- if t.NeedKeyUpdate() {
- typedmemmove(t.Key, k, key)
- }
- elem = add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
- goto done
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if inserti == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- newb := h.newoverflow(t, b)
- inserti = &newb.tophash[0]
- insertk = add(unsafe.Pointer(newb), dataOffset)
- elem = add(insertk, abi.SwissMapBucketCount*uintptr(t.KeySize))
- }
-
- // store new key/elem at insert position
- if t.IndirectKey() {
- kmem := newobject(t.Key)
- *(*unsafe.Pointer)(insertk) = kmem
- insertk = kmem
- }
- if t.IndirectElem() {
- vmem := newobject(t.Elem)
- *(*unsafe.Pointer)(elem) = vmem
- }
- typedmemmove(t.Key, insertk, key)
- *inserti = top
- h.count++
-
-done:
- if h.flags&hashWriting == 0 {
- fatal("concurrent map writes")
- }
- h.flags &^= hashWriting
- if t.IndirectElem() {
- elem = *((*unsafe.Pointer)(elem))
- }
- return elem
+ return m.PutSlot(key)
}
-func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
- if raceenabled && h != nil {
+func mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) {
+ // TODO: concurrent checks.
+ if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapdelete)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
+ racewritepc(unsafe.Pointer(m), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
}
- if msanenabled && h != nil {
+ if msanenabled && m != nil {
msanread(key, t.Key.Size_)
}
- if asanenabled && h != nil {
+ if asanenabled && m != nil {
asanread(key, t.Key.Size_)
}
- if h == nil || h.count == 0 {
+
+ if m == nil || m.Used() == 0 {
if err := mapKeyError(t, key); err != nil {
panic(err) // see issue 23734
}
return
}
- if h.flags&hashWriting != 0 {
- fatal("concurrent map writes")
- }
- hash := t.Hasher(key, uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher, since t.hasher may panic,
- // in which case we have not actually done a write (delete).
- h.flags ^= hashWriting
-
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
- bOrig := b
- top := tophash(hash)
-search:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break search
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
- k2 := k
- if t.IndirectKey() {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if !t.Key.Equal(key, k2) {
- continue
- }
- // Only clear key if there are pointers in it.
- if t.IndirectKey() {
- *(*unsafe.Pointer)(k) = nil
- } else if t.Key.Pointers() {
- memclrHasPointers(k, t.Key.Size_)
- }
- e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
- if t.IndirectElem() {
- *(*unsafe.Pointer)(e) = nil
- } else if t.Elem.Pointers() {
- memclrHasPointers(e, t.Elem.Size_)
- } else {
- memclrNoHeapPointers(e, t.Elem.Size_)
- }
- b.tophash[i] = emptyOne
- // If the bucket now ends in a bunch of emptyOne states,
- // change those to emptyRest states.
- // It would be nice to make this a separate function, but
- // for loops are not currently inlineable.
- if i == abi.SwissMapBucketCount-1 {
- if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
- goto notLast
- }
- } else {
- if b.tophash[i+1] != emptyRest {
- goto notLast
- }
- }
- for {
- b.tophash[i] = emptyRest
- if i == 0 {
- if b == bOrig {
- break // beginning of initial bucket, we're done.
- }
- // Find previous bucket, continue at its last entry.
- c := b
- for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
- }
- i = abi.SwissMapBucketCount - 1
- } else {
- i--
- }
- if b.tophash[i] != emptyOne {
- break
- }
- }
- notLast:
- h.count--
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- if h.count == 0 {
- h.hash0 = uint32(rand())
- }
- break search
- }
- }
-
- if h.flags&hashWriting == 0 {
- fatal("concurrent map writes")
- }
- h.flags &^= hashWriting
+ m.Delete(key)
}
-// mapiterinit initializes the hiter struct used for ranging over maps.
-// The hiter struct pointed to by 'it' is allocated on the stack
+// mapiterinit initializes the Iter struct used for ranging over maps.
+// The Iter struct pointed to by 'it' is allocated on the stack
// by the compilers order pass or on the heap by reflect_mapiterinit.
// Both need to have zeroed hiter since the struct contains pointers.
-func mapiterinit(t *maptype, h *hmap, it *hiter) {
- if raceenabled && h != nil {
+func mapiterinit(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) {
+ if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
- }
-
- it.t = t
- if h == nil || h.count == 0 {
- return
- }
-
- if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
- throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
- }
- it.h = h
-
- // grab snapshot of bucket state
- it.B = h.B
- it.buckets = h.buckets
- if !t.Bucket.Pointers() {
- // Allocate the current slice and remember pointers to both current and old.
- // This preserves all relevant overflow buckets alive even if
- // the table grows and/or overflow buckets are added to the table
- // while we are iterating.
- h.createOverflow()
- it.overflow = h.extra.overflow
- it.oldoverflow = h.extra.oldoverflow
- }
-
- // decide where to start
- r := uintptr(rand())
- it.startBucket = r & bucketMask(h.B)
- it.offset = uint8(r >> h.B & (abi.SwissMapBucketCount - 1))
-
- // iterator state
- it.bucket = it.startBucket
-
- // Remember we have an iterator.
- // Can run concurrently with another mapiterinit().
- if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
- atomic.Or8(&h.flags, iterator|oldIterator)
+ racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapiterinit))
}
- mapiternext(it)
+ it.Init(t, m)
+ it.Next()
}
-func mapiternext(it *hiter) {
- h := it.h
+func mapiternext(it *maps.Iter) {
+ // TODO: concurrent checks.
if raceenabled {
callerpc := sys.GetCallerPC()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map iteration and map write")
+ racereadpc(unsafe.Pointer(it.Map()), callerpc, abi.FuncPCABIInternal(mapiternext))
}
- t := it.t
- bucket := it.bucket
- b := it.bptr
- i := it.i
- checkBucket := it.checkBucket
-next:
- if b == nil {
- if bucket == it.startBucket && it.wrapped {
- // end of iteration
- it.key = nil
- it.elem = nil
- return
- }
- if h.growing() && it.B == h.B {
- // Iterator was started in the middle of a grow, and the grow isn't done yet.
- // If the bucket we're looking at hasn't been filled in yet (i.e. the old
- // bucket hasn't been evacuated) then we need to iterate through the old
- // bucket and only return the ones that will be migrated to this bucket.
- oldbucket := bucket & it.h.oldbucketmask()
- b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
- if !evacuated(b) {
- checkBucket = bucket
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
- checkBucket = noCheck
- }
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
- checkBucket = noCheck
- }
- bucket++
- if bucket == bucketShift(it.B) {
- bucket = 0
- it.wrapped = true
- }
- i = 0
- }
- for ; i < abi.SwissMapBucketCount; i++ {
- offi := (i + it.offset) & (abi.SwissMapBucketCount - 1)
- if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
- // TODO: emptyRest is hard to use here, as we start iterating
- // in the middle of a bucket. It's feasible, just tricky.
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
- if t.IndirectKey() {
- k = *((*unsafe.Pointer)(k))
- }
- e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
- if checkBucket != noCheck && !h.sameSizeGrow() {
- // Special case: iterator was started during a grow to a larger size
- // and the grow is not done yet. We're working on a bucket whose
- // oldbucket has not been evacuated yet. Or at least, it wasn't
- // evacuated when we started the bucket. So we're iterating
- // through the oldbucket, skipping any keys that will go
- // to the other new bucket (each oldbucket expands to two
- // buckets during a grow).
- if t.ReflexiveKey() || t.Key.Equal(k, k) {
- // If the item in the oldbucket is not destined for
- // the current new bucket in the iteration, skip it.
- hash := t.Hasher(k, uintptr(h.hash0))
- if hash&bucketMask(it.B) != checkBucket {
- continue
- }
- } else {
- // Hash isn't repeatable if k != k (NaNs). We need a
- // repeatable and randomish choice of which direction
- // to send NaNs during evacuation. We'll use the low
- // bit of tophash to decide which way NaNs go.
- // NOTE: this case is why we need two evacuate tophash
- // values, evacuatedX and evacuatedY, that differ in
- // their low bit.
- if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
- continue
- }
- }
- }
- if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
- !(t.ReflexiveKey() || t.Key.Equal(k, k)) {
- // This is the golden data, we can return it.
- // OR
- // key!=key, so the entry can't be deleted or updated, so we can just return it.
- // That's lucky for us because when key!=key we can't look it up successfully.
- it.key = k
- if t.IndirectElem() {
- e = *((*unsafe.Pointer)(e))
- }
- it.elem = e
- } else {
- // The hash table has grown since the iterator was started.
- // The golden data for this key is now somewhere else.
- // Check the current hash table for the data.
- // This code handles the case where the key
- // has been deleted, updated, or deleted and reinserted.
- // NOTE: we need to regrab the key as it has potentially been
- // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
- rk, re := mapaccessK(t, h, k)
- if rk == nil {
- continue // key has been deleted
- }
- it.key = rk
- it.elem = re
- }
- it.bucket = bucket
- if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
- it.bptr = b
- }
- it.i = i + 1
- it.checkBucket = checkBucket
- return
- }
- b = b.overflow(t)
- i = 0
- goto next
+ it.Next()
}
// mapclear deletes all keys from a map.
-func mapclear(t *maptype, h *hmap) {
- if raceenabled && h != nil {
+func mapclear(t *abi.SwissMapType, m *maps.Map) {
+ // TODO: concurrent checks.
+ if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapclear)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
+ racewritepc(unsafe.Pointer(m), callerpc, pc)
}
- if h == nil || h.count == 0 {
+ if m == nil || m.Used() == 0 {
return
}
- if h.flags&hashWriting != 0 {
- fatal("concurrent map writes")
- }
-
- h.flags ^= hashWriting
-
- // Mark buckets empty, so existing iterators can be terminated, see issue #59411.
- markBucketsEmpty := func(bucket unsafe.Pointer, mask uintptr) {
- for i := uintptr(0); i <= mask; i++ {
- b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
- b.tophash[i] = emptyRest
- }
- }
- }
- }
- markBucketsEmpty(h.buckets, bucketMask(h.B))
- if oldBuckets := h.oldbuckets; oldBuckets != nil {
- markBucketsEmpty(oldBuckets, h.oldbucketmask())
- }
-
- h.flags &^= sameSizeGrow
- h.oldbuckets = nil
- h.nevacuate = 0
- h.noverflow = 0
- h.count = 0
-
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- h.hash0 = uint32(rand())
-
- // Keep the mapextra allocation but clear any extra information.
- if h.extra != nil {
- *h.extra = mapextra{}
- }
-
- // makeBucketArray clears the memory pointed to by h.buckets
- // and recovers any overflow buckets by generating them
- // as if h.buckets was newly alloced.
- _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
- if nextOverflow != nil {
- // If overflow buckets are created then h.extra
- // will have been allocated during initial bucket creation.
- h.extra.nextOverflow = nextOverflow
- }
-
- if h.flags&hashWriting == 0 {
- fatal("concurrent map writes")
- }
- h.flags &^= hashWriting
-}
-
-func hashGrow(t *maptype, h *hmap) {
- // If we've hit the load factor, get bigger.
- // Otherwise, there are too many overflow buckets,
- // so keep the same number of buckets and "grow" laterally.
- bigger := uint8(1)
- if !overLoadFactor(h.count+1, h.B) {
- bigger = 0
- h.flags |= sameSizeGrow
- }
- oldbuckets := h.buckets
- newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
-
- flags := h.flags &^ (iterator | oldIterator)
- if h.flags&iterator != 0 {
- flags |= oldIterator
- }
- // commit the grow (atomic wrt gc)
- h.B += bigger
- h.flags = flags
- h.oldbuckets = oldbuckets
- h.buckets = newbuckets
- h.nevacuate = 0
- h.noverflow = 0
-
- if h.extra != nil && h.extra.overflow != nil {
- // Promote current overflow buckets to the old generation.
- if h.extra.oldoverflow != nil {
- throw("oldoverflow is not nil")
- }
- h.extra.oldoverflow = h.extra.overflow
- h.extra.overflow = nil
- }
- if nextOverflow != nil {
- if h.extra == nil {
- h.extra = new(mapextra)
- }
- h.extra.nextOverflow = nextOverflow
- }
-
- // the actual copying of the hash table data is done incrementally
- // by growWork() and evacuate().
-}
-
-// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
-func overLoadFactor(count int, B uint8) bool {
- return count > abi.SwissMapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
-}
-
-// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
-// Note that most of these overflow buckets must be in sparse use;
-// if use was dense, then we'd have already triggered regular map growth.
-func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
- // If the threshold is too low, we do extraneous work.
- // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
- // "too many" means (approximately) as many overflow buckets as regular buckets.
- // See incrnoverflow for more details.
- if B > 15 {
- B = 15
- }
- // The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
- return noverflow >= uint16(1)<<(B&15)
-}
-
-// growing reports whether h is growing. The growth may be to the same size or bigger.
-func (h *hmap) growing() bool {
- return h.oldbuckets != nil
-}
-
-// sameSizeGrow reports whether the current growth is to a map of the same size.
-func (h *hmap) sameSizeGrow() bool {
- return h.flags&sameSizeGrow != 0
-}
-
-//go:linkname sameSizeGrowForIssue69110Test
-func sameSizeGrowForIssue69110Test(h *hmap) bool {
- return h.sameSizeGrow()
-}
-
-// noldbuckets calculates the number of buckets prior to the current map growth.
-func (h *hmap) noldbuckets() uintptr {
- oldB := h.B
- if !h.sameSizeGrow() {
- oldB--
- }
- return bucketShift(oldB)
-}
-
-// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
-func (h *hmap) oldbucketmask() uintptr {
- return h.noldbuckets() - 1
-}
-
-func growWork(t *maptype, h *hmap, bucket uintptr) {
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate(t, h, bucket&h.oldbucketmask())
-
- // evacuate one more oldbucket to make progress on growing
- if h.growing() {
- evacuate(t, h, h.nevacuate)
- }
-}
-
-func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
- b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
- return evacuated(b)
-}
-
-// evacDst is an evacuation destination.
-type evacDst struct {
- b *bmap // current destination bucket
- i int // key/elem index into b
- k unsafe.Pointer // pointer to current key storage
- e unsafe.Pointer // pointer to current elem storage
-}
-
-func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
- newbit := h.noldbuckets()
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
-
- // xy contains the x and y (low and high) evacuation destinations.
- var xy [2]evacDst
- x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
- x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, abi.SwissMapBucketCount*uintptr(t.KeySize))
-
- if !h.sameSizeGrow() {
- // Only calculate y pointers if we're growing bigger.
- // Otherwise GC can see bad pointers.
- y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
- y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, abi.SwissMapBucketCount*uintptr(t.KeySize))
- }
-
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, abi.SwissMapBucketCount*uintptr(t.KeySize))
- for i := 0; i < abi.SwissMapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
- top := b.tophash[i]
- if isEmpty(top) {
- b.tophash[i] = evacuatedEmpty
- continue
- }
- if top < minTopHash {
- throw("bad map state")
- }
- k2 := k
- if t.IndirectKey() {
- k2 = *((*unsafe.Pointer)(k2))
- }
- var useY uint8
- if !h.sameSizeGrow() {
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/elem to bucket x or bucket y).
- hash := t.Hasher(k2, uintptr(h.hash0))
- if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
- // If key != key (NaNs), then the hash could be (and probably
- // will be) entirely different from the old hash. Moreover,
- // it isn't reproducible. Reproducibility is required in the
- // presence of iterators, as our evacuation decision must
- // match whatever decision the iterator made.
- // Fortunately, we have the freedom to send these keys either
- // way. Also, tophash is meaningless for these kinds of keys.
- // We let the low bit of tophash drive the evacuation decision.
- // We recompute a new random tophash for the next level so
- // these keys will get evenly distributed across all buckets
- // after multiple grows.
- useY = top & 1
- top = tophash(hash)
- } else {
- if hash&newbit != 0 {
- useY = 1
- }
- }
- }
-
- if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
- throw("bad evacuatedN")
- }
-
- b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
- dst := &xy[useY] // evacuation destination
-
- if dst.i == abi.SwissMapBucketCount {
- dst.b = h.newoverflow(t, dst.b)
- dst.i = 0
- dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, abi.SwissMapBucketCount*uintptr(t.KeySize))
- }
- dst.b.tophash[dst.i&(abi.SwissMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
- if t.IndirectKey() {
- *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
- } else {
- typedmemmove(t.Key, dst.k, k) // copy elem
- }
- if t.IndirectElem() {
- *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
- } else {
- typedmemmove(t.Elem, dst.e, e)
- }
- dst.i++
- // These updates might push these pointers past the end of the
- // key or elem arrays. That's ok, as we have the overflow pointer
- // at the end of the bucket to protect against pointing past the
- // end of the bucket.
- dst.k = add(dst.k, uintptr(t.KeySize))
- dst.e = add(dst.e, uintptr(t.ValueSize))
- }
- }
- // Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
- b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
- // Preserve b.tophash because the evacuation
- // state is maintained there.
- ptr := add(b, dataOffset)
- n := uintptr(t.BucketSize) - dataOffset
- memclrHasPointers(ptr, n)
- }
- }
-
- if oldbucket == h.nevacuate {
- advanceEvacuationMark(h, t, newbit)
- }
-}
-
-func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
- h.nevacuate++
- // Experiments suggest that 1024 is overkill by at least an order of magnitude.
- // Put it in there as a safeguard anyway, to ensure O(1) behavior.
- stop := h.nevacuate + 1024
- if stop > newbit {
- stop = newbit
- }
- for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
- h.nevacuate++
- }
- if h.nevacuate == newbit { // newbit == # of oldbuckets
- // Growing is all done. Free old main bucket array.
- h.oldbuckets = nil
- // Can discard old overflow buckets as well.
- // If they are still referenced by an iterator,
- // then the iterator holds a pointers to the slice.
- if h.extra != nil {
- h.extra.oldoverflow = nil
- }
- h.flags &^= sameSizeGrow
- }
+ m.Clear()
}
// Reflect stubs. Called from ../reflect/asm_*.s
//go:linkname reflect_makemap reflect.makemap
-func reflect_makemap(t *maptype, cap int) *hmap {
+func reflect_makemap(t *abi.SwissMapType, cap int) *maps.Map {
// Check invariants and reflects math.
if t.Key.Equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
}
- if t.Key.Size_ > abi.SwissMapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
- t.Key.Size_ <= abi.SwissMapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
- throw("key size wrong")
- }
- if t.Elem.Size_ > abi.SwissMapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
- t.Elem.Size_ <= abi.SwissMapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
- throw("elem size wrong")
- }
- if t.Key.Align_ > abi.SwissMapBucketCount {
- throw("key align too big")
- }
- if t.Elem.Align_ > abi.SwissMapBucketCount {
- throw("elem align too big")
- }
- if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
- throw("key size not a multiple of key align")
- }
- if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
- throw("elem size not a multiple of elem align")
- }
- if abi.SwissMapBucketCount < 8 {
- throw("bucketsize too small for proper alignment")
- }
- if dataOffset%uintptr(t.Key.Align_) != 0 {
- throw("need padding in bucket (key)")
- }
- if dataOffset%uintptr(t.Elem.Align_) != 0 {
- throw("need padding in bucket (elem)")
- }
+ // TODO: other checks
return makemap(t, cap, nil)
}
//go:linkname reflect_mapaccess reflect.mapaccess
-func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- elem, ok := mapaccess2(t, h, key)
+func reflect_mapaccess(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
+ elem, ok := mapaccess2(t, m, key)
if !ok {
// reflect wants nil for a missing element
elem = nil
@@ -1353,8 +284,8 @@ func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
}
//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
-func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
- elem, ok := mapaccess2_faststr(t, h, key)
+func reflect_mapaccess_faststr(t *abi.SwissMapType, m *maps.Map, key string) unsafe.Pointer {
+ elem, ok := mapaccess2_faststr(t, m, key)
if !ok {
// reflect wants nil for a missing element
elem = nil
@@ -1363,74 +294,74 @@ func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
}
//go:linkname reflect_mapassign reflect.mapassign0
-func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
- p := mapassign(t, h, key)
+func reflect_mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer) {
+ p := mapassign(t, m, key)
typedmemmove(t.Elem, p, elem)
}
//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
-func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
- p := mapassign_faststr(t, h, key)
+func reflect_mapassign_faststr(t *abi.SwissMapType, m *maps.Map, key string, elem unsafe.Pointer) {
+ p := mapassign_faststr(t, m, key)
typedmemmove(t.Elem, p, elem)
}
//go:linkname reflect_mapdelete reflect.mapdelete
-func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
- mapdelete(t, h, key)
+func reflect_mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) {
+ mapdelete(t, m, key)
}
//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
-func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
- mapdelete_faststr(t, h, key)
+func reflect_mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, key string) {
+ mapdelete_faststr(t, m, key)
}
//go:linkname reflect_mapiterinit reflect.mapiterinit
-func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
- mapiterinit(t, h, it)
+func reflect_mapiterinit(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) {
+ mapiterinit(t, m, it)
}
//go:linkname reflect_mapiternext reflect.mapiternext
-func reflect_mapiternext(it *hiter) {
+func reflect_mapiternext(it *maps.Iter) {
mapiternext(it)
}
//go:linkname reflect_mapiterkey reflect.mapiterkey
-func reflect_mapiterkey(it *hiter) unsafe.Pointer {
- return it.key
+func reflect_mapiterkey(it *maps.Iter) unsafe.Pointer {
+ return it.Key()
}
//go:linkname reflect_mapiterelem reflect.mapiterelem
-func reflect_mapiterelem(it *hiter) unsafe.Pointer {
- return it.elem
+func reflect_mapiterelem(it *maps.Iter) unsafe.Pointer {
+ return it.Elem()
}
//go:linkname reflect_maplen reflect.maplen
-func reflect_maplen(h *hmap) int {
- if h == nil {
+func reflect_maplen(m *maps.Map) int {
+ if m == nil {
return 0
}
if raceenabled {
callerpc := sys.GetCallerPC()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
+ racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(reflect_maplen))
}
- return h.count
+ return int(m.Used())
}
//go:linkname reflect_mapclear reflect.mapclear
-func reflect_mapclear(t *maptype, h *hmap) {
- mapclear(t, h)
+func reflect_mapclear(t *abi.SwissMapType, m *maps.Map) {
+ mapclear(t, m)
}
//go:linkname reflectlite_maplen internal/reflectlite.maplen
-func reflectlite_maplen(h *hmap) int {
- if h == nil {
+func reflectlite_maplen(m *maps.Map) int {
+ if m == nil {
return 0
}
if raceenabled {
callerpc := sys.GetCallerPC()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
+ racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(reflect_maplen))
}
- return h.count
+ return int(m.Used())
}
// mapinitnoop is a no-op function known the Go linker; if a given global
@@ -1445,169 +376,19 @@ func mapinitnoop()
//go:linkname mapclone maps.clone
func mapclone(m any) any {
e := efaceOf(&m)
- e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
+ e.data = unsafe.Pointer(mapclone2((*abi.SwissMapType)(unsafe.Pointer(e._type)), (*maps.Map)(e.data)))
return m
}
-// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
-// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
-func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
- for i := 0; i < abi.SwissMapBucketCount; i++ {
- if isEmpty(src.tophash[i]) {
- continue
- }
-
- for ; pos < abi.SwissMapBucketCount; pos++ {
- if isEmpty(dst.tophash[pos]) {
- break
- }
- }
-
- if pos == abi.SwissMapBucketCount {
- dst = h.newoverflow(t, dst)
- pos = 0
- }
-
- srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
- srcEle := add(unsafe.Pointer(src), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
- dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
- dstEle := add(unsafe.Pointer(dst), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
-
- dst.tophash[pos] = src.tophash[i]
- if t.IndirectKey() {
- srcK = *(*unsafe.Pointer)(srcK)
- if t.NeedKeyUpdate() {
- kStore := newobject(t.Key)
- typedmemmove(t.Key, kStore, srcK)
- srcK = kStore
- }
- // Note: if NeedKeyUpdate is false, then the memory
- // used to store the key is immutable, so we can share
- // it between the original map and its clone.
- *(*unsafe.Pointer)(dstK) = srcK
- } else {
- typedmemmove(t.Key, dstK, srcK)
- }
- if t.IndirectElem() {
- srcEle = *(*unsafe.Pointer)(srcEle)
- eStore := newobject(t.Elem)
- typedmemmove(t.Elem, eStore, srcEle)
- *(*unsafe.Pointer)(dstEle) = eStore
- } else {
- typedmemmove(t.Elem, dstEle, srcEle)
- }
- pos++
- h.count++
- }
- return dst, pos
-}
-
-func mapclone2(t *maptype, src *hmap) *hmap {
- hint := src.count
- if overLoadFactor(hint, src.B) {
- // Note: in rare cases (e.g. during a same-sized grow) the map
- // can be overloaded. Make sure we don't allocate a destination
- // bucket array larger than the source bucket array.
- // This will cause the cloned map to be overloaded also,
- // but that's better than crashing. See issue 69110.
- hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen))
- }
- dst := makemap(t, hint, nil)
- dst.hash0 = src.hash0
- dst.nevacuate = 0
- // flags do not need to be copied here, just like a new map has no flags.
-
- if src.count == 0 {
- return dst
- }
-
- if src.flags&hashWriting != 0 {
- fatal("concurrent map clone and map write")
- }
+func mapclone2(t *abi.SwissMapType, src *maps.Map) *maps.Map {
+ dst := makemap(t, int(src.Used()), nil)
- if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() {
- // Quick copy for small maps.
- dst.buckets = newobject(t.Bucket)
- dst.count = src.count
- typedmemmove(t.Bucket, dst.buckets, src.buckets)
- return dst
+ var iter maps.Iter
+ iter.Init(t, src)
+ for iter.Next(); iter.Key() != nil; iter.Next() {
+ dst.Put(iter.Key(), iter.Elem())
}
- if dst.B == 0 {
- dst.buckets = newobject(t.Bucket)
- }
- dstArraySize := int(bucketShift(dst.B))
- srcArraySize := int(bucketShift(src.B))
- for i := 0; i < dstArraySize; i++ {
- dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
- pos := 0
- for j := 0; j < srcArraySize; j += dstArraySize {
- srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
- for srcBmap != nil {
- dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
- srcBmap = srcBmap.overflow(t)
- }
- }
- }
-
- if src.oldbuckets == nil {
- return dst
- }
-
- oldB := src.B
- srcOldbuckets := src.oldbuckets
- if !src.sameSizeGrow() {
- oldB--
- }
- oldSrcArraySize := int(bucketShift(oldB))
-
- for i := 0; i < oldSrcArraySize; i++ {
- srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
- if evacuated(srcBmap) {
- continue
- }
-
- if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src
- dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
- for dstBmap.overflow(t) != nil {
- dstBmap = dstBmap.overflow(t)
- }
- pos := 0
- for srcBmap != nil {
- dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
- srcBmap = srcBmap.overflow(t)
- }
- continue
- }
-
- // oldB < dst.B, so a single source bucket may go to multiple destination buckets.
- // Process entries one at a time.
- for srcBmap != nil {
- // move from oldBlucket to new bucket
- for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
- if isEmpty(srcBmap.tophash[i]) {
- continue
- }
-
- if src.flags&hashWriting != 0 {
- fatal("concurrent map clone and map write")
- }
-
- srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
- if t.IndirectKey() {
- srcK = *((*unsafe.Pointer)(srcK))
- }
-
- srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
- if t.IndirectElem() {
- srcEle = *((*unsafe.Pointer)(srcEle))
- }
- dstEle := mapassign(t, dst, srcK)
- typedmemmove(t.Elem, dstEle, srcEle)
- }
- srcBmap = srcBmap.overflow(t)
- }
- }
return dst
}
@@ -1615,127 +396,14 @@ func mapclone2(t *maptype, src *hmap) *hmap {
//
//go:linkname keys maps.keys
func keys(m any, p unsafe.Pointer) {
- e := efaceOf(&m)
- t := (*maptype)(unsafe.Pointer(e._type))
- h := (*hmap)(e.data)
-
- if h == nil || h.count == 0 {
- return
- }
- s := (*slice)(p)
- r := int(rand())
- offset := uint8(r >> h.B & (abi.SwissMapBucketCount - 1))
- if h.B == 0 {
- copyKeys(t, h, (*bmap)(h.buckets), s, offset)
- return
- }
- arraySize := int(bucketShift(h.B))
- buckets := h.buckets
- for i := 0; i < arraySize; i++ {
- bucket := (i + r) & (arraySize - 1)
- b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
- copyKeys(t, h, b, s, offset)
- }
-
- if h.growing() {
- oldArraySize := int(h.noldbuckets())
- for i := 0; i < oldArraySize; i++ {
- bucket := (i + r) & (oldArraySize - 1)
- b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
- if evacuated(b) {
- continue
- }
- copyKeys(t, h, b, s, offset)
- }
- }
- return
-}
-
-func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
- for b != nil {
- for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
- offi := (i + uintptr(offset)) & (abi.SwissMapBucketCount - 1)
- if isEmpty(b.tophash[offi]) {
- continue
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map read and map write")
- }
- k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
- if t.IndirectKey() {
- k = *((*unsafe.Pointer)(k))
- }
- if s.len >= s.cap {
- fatal("concurrent map read and map write")
- }
- typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
- s.len++
- }
- b = b.overflow(t)
- }
+ // Currently unused in the maps package.
+ panic("unimplemented")
}
// values for implementing maps.values
//
//go:linkname values maps.values
func values(m any, p unsafe.Pointer) {
- e := efaceOf(&m)
- t := (*maptype)(unsafe.Pointer(e._type))
- h := (*hmap)(e.data)
- if h == nil || h.count == 0 {
- return
- }
- s := (*slice)(p)
- r := int(rand())
- offset := uint8(r >> h.B & (abi.SwissMapBucketCount - 1))
- if h.B == 0 {
- copyValues(t, h, (*bmap)(h.buckets), s, offset)
- return
- }
- arraySize := int(bucketShift(h.B))
- buckets := h.buckets
- for i := 0; i < arraySize; i++ {
- bucket := (i + r) & (arraySize - 1)
- b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
- copyValues(t, h, b, s, offset)
- }
-
- if h.growing() {
- oldArraySize := int(h.noldbuckets())
- for i := 0; i < oldArraySize; i++ {
- bucket := (i + r) & (oldArraySize - 1)
- b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
- if evacuated(b) {
- continue
- }
- copyValues(t, h, b, s, offset)
- }
- }
- return
-}
-
-func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
- for b != nil {
- for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
- offi := (i + uintptr(offset)) & (abi.SwissMapBucketCount - 1)
- if isEmpty(b.tophash[offi]) {
- continue
- }
-
- if h.flags&hashWriting != 0 {
- fatal("concurrent map read and map write")
- }
-
- ele := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
- if t.IndirectElem() {
- ele = *((*unsafe.Pointer)(ele))
- }
- if s.len >= s.cap {
- fatal("concurrent map read and map write")
- }
- typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)
- s.len++
- }
- b = b.overflow(t)
- }
+ // Currently unused in the maps package.
+ panic("unimplemented")
}
diff --git a/src/runtime/map_swiss_test.go b/src/runtime/map_swiss_test.go
index 78db4aa926..aa019e1c31 100644
--- a/src/runtime/map_swiss_test.go
+++ b/src/runtime/map_swiss_test.go
@@ -8,17 +8,41 @@ package runtime_test
import (
"internal/abi"
- "runtime"
+ "internal/goarch"
+ "internal/runtime/maps"
"slices"
"testing"
+ "unsafe"
)
+func TestHmapSize(t *testing.T) {
+ // The structure of Map is defined in internal/runtime/maps/map.go
+ // and in cmd/compile/internal/reflectdata/map_swiss.go and must be in sync.
+ // The size of Map should be 72 bytes on 64 bit and 56 bytes on 32 bit platforms.
+ wantSize := uintptr(4*goarch.PtrSize + 5*8)
+ gotSize := unsafe.Sizeof(maps.Map{})
+ if gotSize != wantSize {
+ t.Errorf("sizeof(maps.Map{})==%d, want %d", gotSize, wantSize)
+ }
+}
+
+// See also reflect_test.TestGroupSizeZero.
+func TestGroupSizeZero(t *testing.T) {
+ var m map[struct{}]struct{}
+ mTyp := abi.TypeOf(m)
+ mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
+
+ // internal/runtime/maps when create pointers to slots, even if slots
+ // are size 0. The compiler should have reserved an extra word to
+ // ensure that pointers to the zero-size type at the end of group are
+ // valid.
+ if mt.Group.Size() <= 8 {
+ t.Errorf("Group size got %d want >8", mt.Group.Size())
+ }
+}
+
func TestMapIterOrder(t *testing.T) {
sizes := []int{3, 7, 9, 15}
- if abi.SwissMapBucketCountBits >= 5 {
- // it gets flaky (often only one iteration order) at size 3 when abi.MapBucketCountBits >=5.
- t.Fatalf("This test becomes flaky if abi.MapBucketCountBits(=%d) is 5 or larger", abi.SwissMapBucketCountBits)
- }
for _, n := range sizes {
for i := 0; i < 1000; i++ {
// Make m be {0: true, 1: true, ..., n-1: true}.
@@ -50,139 +74,6 @@ func TestMapIterOrder(t *testing.T) {
}
}
-const bs = abi.SwissMapBucketCount
-
-// belowOverflow should be a pretty-full pair of buckets;
-// atOverflow is 1/8 bs larger = 13/8 buckets or two buckets
-// that are 13/16 full each, which is the overflow boundary.
-// Adding one to that should ensure overflow to the next higher size.
-const (
- belowOverflow = bs * 3 / 2 // 1.5 bs = 2 buckets @ 75%
- atOverflow = belowOverflow + bs/8 // 2 buckets at 13/16 fill.
-)
-
-var mapBucketTests = [...]struct {
- n int // n is the number of map elements
- noescape int // number of expected buckets for non-escaping map
- escape int // number of expected buckets for escaping map
-}{
- {-(1 << 30), 1, 1},
- {-1, 1, 1},
- {0, 1, 1},
- {1, 1, 1},
- {bs, 1, 1},
- {bs + 1, 2, 2},
- {belowOverflow, 2, 2}, // 1.5 bs = 2 buckets @ 75%
- {atOverflow + 1, 4, 4}, // 13/8 bs + 1 == overflow to 4
-
- {2 * belowOverflow, 4, 4}, // 3 bs = 4 buckets @75%
- {2*atOverflow + 1, 8, 8}, // 13/4 bs + 1 = overflow to 8
-
- {4 * belowOverflow, 8, 8}, // 6 bs = 8 buckets @ 75%
- {4*atOverflow + 1, 16, 16}, // 13/2 bs + 1 = overflow to 16
-}
-
func TestMapBuckets(t *testing.T) {
- // Test that maps of different sizes have the right number of buckets.
- // Non-escaping maps with small buckets (like map[int]int) never
- // have a nil bucket pointer due to starting with preallocated buckets
- // on the stack. Escaping maps start with a non-nil bucket pointer if
- // hint size is above bucketCnt and thereby have more than one bucket.
- // These tests depend on bucketCnt and loadFactor* in map.go.
- t.Run("mapliteral", func(t *testing.T) {
- for _, tt := range mapBucketTests {
- localMap := map[int]int{}
- if runtime.MapBucketsPointerIsNil(localMap) {
- t.Errorf("no escape: buckets pointer is nil for non-escaping map")
- }
- for i := 0; i < tt.n; i++ {
- localMap[i] = i
- }
- if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
- t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
- }
- escapingMap := runtime.Escape(map[int]int{})
- if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
- t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
- }
- for i := 0; i < tt.n; i++ {
- escapingMap[i] = i
- }
- if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
- t.Errorf("escape n=%d want %d buckets, got %d", tt.n, tt.escape, got)
- }
- }
- })
- t.Run("nohint", func(t *testing.T) {
- for _, tt := range mapBucketTests {
- localMap := make(map[int]int)
- if runtime.MapBucketsPointerIsNil(localMap) {
- t.Errorf("no escape: buckets pointer is nil for non-escaping map")
- }
- for i := 0; i < tt.n; i++ {
- localMap[i] = i
- }
- if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
- t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
- }
- escapingMap := runtime.Escape(make(map[int]int))
- if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
- t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
- }
- for i := 0; i < tt.n; i++ {
- escapingMap[i] = i
- }
- if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
- t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
- }
- }
- })
- t.Run("makemap", func(t *testing.T) {
- for _, tt := range mapBucketTests {
- localMap := make(map[int]int, tt.n)
- if runtime.MapBucketsPointerIsNil(localMap) {
- t.Errorf("no escape: buckets pointer is nil for non-escaping map")
- }
- for i := 0; i < tt.n; i++ {
- localMap[i] = i
- }
- if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
- t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
- }
- escapingMap := runtime.Escape(make(map[int]int, tt.n))
- if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
- t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
- }
- for i := 0; i < tt.n; i++ {
- escapingMap[i] = i
- }
- if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
- t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
- }
- }
- })
- t.Run("makemap64", func(t *testing.T) {
- for _, tt := range mapBucketTests {
- localMap := make(map[int]int, int64(tt.n))
- if runtime.MapBucketsPointerIsNil(localMap) {
- t.Errorf("no escape: buckets pointer is nil for non-escaping map")
- }
- for i := 0; i < tt.n; i++ {
- localMap[i] = i
- }
- if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
- t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
- }
- escapingMap := runtime.Escape(make(map[int]int, tt.n))
- if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
- t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
- }
- for i := 0; i < tt.n; i++ {
- escapingMap[i] = i
- }
- if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
- t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
- }
- }
- })
+ t.Skipf("todo")
}
diff --git a/src/runtime/map_test.go b/src/runtime/map_test.go
index 7d884c4922..8a73b9ff6f 100644
--- a/src/runtime/map_test.go
+++ b/src/runtime/map_test.go
@@ -6,7 +6,7 @@ package runtime_test
import (
"fmt"
- "internal/goarch"
+ "internal/goexperiment"
"internal/testenv"
"math"
"os"
@@ -20,17 +20,6 @@ import (
"unsafe"
)
-func TestHmapSize(t *testing.T) {
- // The structure of hmap is defined in runtime/map.go
- // and in cmd/compile/internal/gc/reflect.go and must be in sync.
- // The size of hmap should be 48 bytes on 64 bit and 28 bytes on 32 bit platforms.
- var hmapSize = uintptr(8 + 5*goarch.PtrSize)
- if runtime.RuntimeHmapSize != hmapSize {
- t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize)
- }
-
-}
-
// negative zero is a good test because:
// 1. 0 and -0 are equal, yet have distinct representations.
// 2. 0 is represented as all zeros, -0 isn't.
@@ -430,6 +419,12 @@ func TestEmptyKeyAndValue(t *testing.T) {
if len(a) != 1 {
t.Errorf("empty value insert problem")
}
+ if len(b) != 1 {
+ t.Errorf("empty key insert problem")
+ }
+ if len(c) != 1 {
+ t.Errorf("empty key+value insert problem")
+ }
if b[empty{}] != 1 {
t.Errorf("empty key returned wrong value")
}
@@ -668,33 +663,37 @@ func BenchmarkMapPop10000(b *testing.B) { benchmarkMapPop(b, 10000) }
var testNonEscapingMapVariable int = 8
func TestNonEscapingMap(t *testing.T) {
+ if goexperiment.SwissMap {
+ t.Skip("TODO(go.dev/issue/54766): implement stack allocated maps")
+ }
+
n := testing.AllocsPerRun(1000, func() {
m := map[int]int{}
m[0] = 0
})
if n != 0 {
- t.Fatalf("mapliteral: want 0 allocs, got %v", n)
+ t.Errorf("mapliteral: want 0 allocs, got %v", n)
}
n = testing.AllocsPerRun(1000, func() {
m := make(map[int]int)
m[0] = 0
})
if n != 0 {
- t.Fatalf("no hint: want 0 allocs, got %v", n)
+ t.Errorf("no hint: want 0 allocs, got %v", n)
}
n = testing.AllocsPerRun(1000, func() {
m := make(map[int]int, 8)
m[0] = 0
})
if n != 0 {
- t.Fatalf("with small hint: want 0 allocs, got %v", n)
+ t.Errorf("with small hint: want 0 allocs, got %v", n)
}
n = testing.AllocsPerRun(1000, func() {
m := make(map[int]int, testNonEscapingMapVariable)
m[0] = 0
})
if n != 0 {
- t.Fatalf("with variable hint: want 0 allocs, got %v", n)
+ t.Errorf("with variable hint: want 0 allocs, got %v", n)
}
}
@@ -1246,22 +1245,11 @@ func TestEmptyMapWithInterfaceKey(t *testing.T) {
})
}
-func TestLoadFactor(t *testing.T) {
- for b := uint8(0); b < 20; b++ {
- count := 13 * (1 << b) / 2 // 6.5
- if b == 0 {
- count = 8
- }
- if runtime.OverLoadFactor(count, b) {
- t.Errorf("OverLoadFactor(%d,%d)=true, want false", count, b)
- }
- if !runtime.OverLoadFactor(count+1, b) {
- t.Errorf("OverLoadFactor(%d,%d)=false, want true", count+1, b)
- }
+func TestMapKeys(t *testing.T) {
+ if goexperiment.SwissMap {
+ t.Skip("mapkeys not implemented for swissmaps")
}
-}
-func TestMapKeys(t *testing.T) {
type key struct {
s string
pad [128]byte // sizeof(key) > abi.MapMaxKeyBytes
@@ -1277,6 +1265,10 @@ func TestMapKeys(t *testing.T) {
}
func TestMapValues(t *testing.T) {
+ if goexperiment.SwissMap {
+ t.Skip("mapvalues not implemented for swissmaps")
+ }
+
type val struct {
s string
pad [128]byte // sizeof(val) > abi.MapMaxElemBytes
diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go
index f2a9bd055a..ef01d6a194 100644
--- a/src/runtime/runtime-gdb_test.go
+++ b/src/runtime/runtime-gdb_test.go
@@ -9,6 +9,7 @@ import (
"flag"
"fmt"
"internal/abi"
+ "internal/goexperiment"
"internal/testenv"
"os"
"os/exec"
@@ -185,6 +186,9 @@ func TestGdbPythonCgo(t *testing.T) {
}
func testGdbPython(t *testing.T, cgo bool) {
+ if goexperiment.SwissMap {
+ t.Skip("TODO(prattmic): swissmap DWARF")
+ }
if cgo {
testenv.MustHaveCGO(t)
}
@@ -527,6 +531,10 @@ func main() {
// TestGdbAutotmpTypes ensures that types of autotmp variables appear in .debug_info
// See bug #17830.
func TestGdbAutotmpTypes(t *testing.T) {
+ if goexperiment.SwissMap {
+ t.Skip("TODO(prattmic): swissmap DWARF")
+ }
+
checkGdbEnvironment(t)
t.Parallel()
checkGdbVersion(t)