aboutsummaryrefslogtreecommitdiff
path: root/src/reflect
diff options
context:
space:
mode:
authorMichael Pratt <mpratt@google.com>2024-05-03 13:03:04 -0400
committerMichael Pratt <mpratt@google.com>2024-10-14 19:58:47 +0000
commitc39bc22c141bc6990e4e2abf604dcf56669ff779 (patch)
tree5384243aaaa9d9f9796674223cf186ee3ceb63a9 /src/reflect
parent48849e0866f64a40d04a9151e44e5a73acdfc17b (diff)
downloadgo-c39bc22c141bc6990e4e2abf604dcf56669ff779.tar.xz
all: wire up swisstable maps
Use the new SwissTable-based map in internal/runtime/maps as the basis for the runtime map when GOEXPERIMENT=swissmap. Integration is complete enough to pass all.bash. Notable missing features: * Race integration / concurrent write detection * Stack-allocated maps * Specialized "fast" map variants * Indirect key / elem For #54766. Cq-Include-Trybots: luci.golang.try:gotip-linux-ppc64_power10,gotip-linux-amd64-longtest-swissmap Change-Id: Ie97b656b6d8e05c0403311ae08fef9f51756a639 Reviewed-on: https://go-review.googlesource.com/c/go/+/594596 Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Michael Knyszek <mknyszek@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Diffstat (limited to 'src/reflect')
-rw-r--r--src/reflect/all_test.go197
-rw-r--r--src/reflect/export_noswiss_test.go25
-rw-r--r--src/reflect/export_swiss_test.go12
-rw-r--r--src/reflect/export_test.go13
-rw-r--r--src/reflect/map_noswiss_test.go60
-rw-r--r--src/reflect/map_swiss.go181
-rw-r--r--src/reflect/map_swiss_test.go30
7 files changed, 247 insertions, 271 deletions
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
index e1bd37a70a..b3f4545531 100644
--- a/src/reflect/all_test.go
+++ b/src/reflect/all_test.go
@@ -10,7 +10,6 @@ import (
"flag"
"fmt"
"go/token"
- "internal/abi"
"internal/goarch"
"internal/goexperiment"
"internal/testenv"
@@ -1134,13 +1133,15 @@ var deepEqualTests = []DeepEqualTest{
}
func TestDeepEqual(t *testing.T) {
- for _, test := range deepEqualTests {
- if test.b == (self{}) {
- test.b = test.a
- }
- if r := DeepEqual(test.a, test.b); r != test.eq {
- t.Errorf("DeepEqual(%#v, %#v) = %v, want %v", test.a, test.b, r, test.eq)
- }
+ for i, test := range deepEqualTests {
+ t.Run(fmt.Sprint(i), func(t *testing.T) {
+ if test.b == (self{}) {
+ test.b = test.a
+ }
+ if r := DeepEqual(test.a, test.b); r != test.eq {
+ t.Errorf("DeepEqual(%#v, %#v) = %v, want %v", test.a, test.b, r, test.eq)
+ }
+ })
}
}
@@ -1273,6 +1274,11 @@ var deepEqualPerfTests = []struct {
}
func TestDeepEqualAllocs(t *testing.T) {
+ // TODO(prattmic): maps on stack
+ if goexperiment.SwissMap {
+ t.Skipf("Maps on stack not yet implemented")
+ }
+
for _, tt := range deepEqualPerfTests {
t.Run(ValueOf(tt.x).Type().String(), func(t *testing.T) {
got := testing.AllocsPerRun(100, func() {
@@ -7171,60 +7177,61 @@ func verifyGCBitsSlice(t *testing.T, typ Type, cap int, bits []byte) {
t.Errorf("line %d: heapBits incorrect for make(%v, 0, %v)\nhave %v\nwant %v", line, typ, cap, heapBits, bits)
}
-func TestGCBits(t *testing.T) {
- verifyGCBits(t, TypeOf((*byte)(nil)), []byte{1})
+// Building blocks for types seen by the compiler (like [2]Xscalar).
+// The compiler will create the type structures for the derived types,
+// including their GC metadata.
+type Xscalar struct{ x uintptr }
+type Xptr struct{ x *byte }
+type Xptrscalar struct {
+ *byte
+ uintptr
+}
+type Xscalarptr struct {
+ uintptr
+ *byte
+}
+type Xbigptrscalar struct {
+ _ [100]*byte
+ _ [100]uintptr
+}
+
+var Tscalar, Tint64, Tptr, Tscalarptr, Tptrscalar, Tbigptrscalar Type
- // Building blocks for types seen by the compiler (like [2]Xscalar).
- // The compiler will create the type structures for the derived types,
- // including their GC metadata.
- type Xscalar struct{ x uintptr }
- type Xptr struct{ x *byte }
- type Xptrscalar struct {
+func init() {
+ // Building blocks for types constructed by reflect.
+ // This code is in a separate block so that code below
+ // cannot accidentally refer to these.
+ // The compiler must NOT see types derived from these
+ // (for example, [2]Scalar must NOT appear in the program),
+ // or else reflect will use it instead of having to construct one.
+ // The goal is to test the construction.
+ type Scalar struct{ x uintptr }
+ type Ptr struct{ x *byte }
+ type Ptrscalar struct {
*byte
uintptr
}
- type Xscalarptr struct {
+ type Scalarptr struct {
uintptr
*byte
}
- type Xbigptrscalar struct {
+ type Bigptrscalar struct {
_ [100]*byte
_ [100]uintptr
}
+ type Int64 int64
+ Tscalar = TypeOf(Scalar{})
+ Tint64 = TypeOf(Int64(0))
+ Tptr = TypeOf(Ptr{})
+ Tscalarptr = TypeOf(Scalarptr{})
+ Tptrscalar = TypeOf(Ptrscalar{})
+ Tbigptrscalar = TypeOf(Bigptrscalar{})
+}
- var Tscalar, Tint64, Tptr, Tscalarptr, Tptrscalar, Tbigptrscalar Type
- {
- // Building blocks for types constructed by reflect.
- // This code is in a separate block so that code below
- // cannot accidentally refer to these.
- // The compiler must NOT see types derived from these
- // (for example, [2]Scalar must NOT appear in the program),
- // or else reflect will use it instead of having to construct one.
- // The goal is to test the construction.
- type Scalar struct{ x uintptr }
- type Ptr struct{ x *byte }
- type Ptrscalar struct {
- *byte
- uintptr
- }
- type Scalarptr struct {
- uintptr
- *byte
- }
- type Bigptrscalar struct {
- _ [100]*byte
- _ [100]uintptr
- }
- type Int64 int64
- Tscalar = TypeOf(Scalar{})
- Tint64 = TypeOf(Int64(0))
- Tptr = TypeOf(Ptr{})
- Tscalarptr = TypeOf(Scalarptr{})
- Tptrscalar = TypeOf(Ptrscalar{})
- Tbigptrscalar = TypeOf(Bigptrscalar{})
- }
+var empty = []byte{}
- empty := []byte{}
+func TestGCBits(t *testing.T) {
+ verifyGCBits(t, TypeOf((*byte)(nil)), []byte{1})
verifyGCBits(t, TypeOf(Xscalar{}), empty)
verifyGCBits(t, Tscalar, empty)
@@ -7304,95 +7311,7 @@ func TestGCBits(t *testing.T) {
verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1))
verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1))
- if goexperiment.SwissMap {
- const bucketCount = abi.SwissMapBucketCount
-
- hdr := make([]byte, bucketCount/goarch.PtrSize)
-
- verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
- verifyGCBits(t, MapBucketOf(k, e), want)
- verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
- }
- verifyMapBucket(t,
- Tscalar, Tptr,
- map[Xscalar]Xptr(nil),
- join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t,
- Tscalarptr, Tptr,
- map[Xscalarptr]Xptr(nil),
- join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t, Tint64, Tptr,
- map[int64]Xptr(nil),
- join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t,
- Tscalar, Tscalar,
- map[Xscalar]Xscalar(nil),
- empty)
- verifyMapBucket(t,
- ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
- map[[2]Xscalarptr][3]Xptrscalar(nil),
- join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1)))
- verifyMapBucket(t,
- ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
- map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
- join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
- verifyMapBucket(t,
- ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
- map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
- join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
- verifyMapBucket(t,
- ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
- map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
- join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t,
- ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
- map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
- join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1)))
- } else {
- const bucketCount = abi.OldMapBucketCount
-
- hdr := make([]byte, bucketCount/goarch.PtrSize)
-
- verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
- verifyGCBits(t, MapBucketOf(k, e), want)
- verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
- }
- verifyMapBucket(t,
- Tscalar, Tptr,
- map[Xscalar]Xptr(nil),
- join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t,
- Tscalarptr, Tptr,
- map[Xscalarptr]Xptr(nil),
- join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t, Tint64, Tptr,
- map[int64]Xptr(nil),
- join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t,
- Tscalar, Tscalar,
- map[Xscalar]Xscalar(nil),
- empty)
- verifyMapBucket(t,
- ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
- map[[2]Xscalarptr][3]Xptrscalar(nil),
- join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1)))
- verifyMapBucket(t,
- ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
- map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
- join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
- verifyMapBucket(t,
- ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
- map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
- join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
- verifyMapBucket(t,
- ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
- map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
- join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t,
- ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
- map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
- join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1)))
- }
+ testGCBitsMap(t)
}
func rep(n int, b []byte) []byte { return bytes.Repeat(b, n) }
diff --git a/src/reflect/export_noswiss_test.go b/src/reflect/export_noswiss_test.go
new file mode 100644
index 0000000000..34e5e92055
--- /dev/null
+++ b/src/reflect/export_noswiss_test.go
@@ -0,0 +1,25 @@
+// Copyright 2024 Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !goexperiment.swissmap
+
+package reflect
+
+import (
+ "internal/abi"
+ "unsafe"
+)
+
+func MapBucketOf(x, y Type) Type {
+ return toType(bucketOf(x.common(), y.common()))
+}
+
+func CachedBucketOf(m Type) Type {
+ t := m.(*rtype)
+ if Kind(t.t.Kind_&abi.KindMask) != Map {
+ panic("not map")
+ }
+ tt := (*mapType)(unsafe.Pointer(t))
+ return toType(tt.Bucket)
+}
diff --git a/src/reflect/export_swiss_test.go b/src/reflect/export_swiss_test.go
new file mode 100644
index 0000000000..ac3cd0adf7
--- /dev/null
+++ b/src/reflect/export_swiss_test.go
@@ -0,0 +1,12 @@
+// Copyright 2024 Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.swissmap
+
+package reflect
+
+func MapGroupOf(x, y Type) Type {
+ grp, _ := groupAndSlotOf(x, y)
+ return grp
+}
diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go
index 30a0e823af..7ab3e957fc 100644
--- a/src/reflect/export_test.go
+++ b/src/reflect/export_test.go
@@ -91,19 +91,6 @@ var GCBits = gcbits
func gcbits(any) []byte // provided by runtime
-func MapBucketOf(x, y Type) Type {
- return toType(bucketOf(x.common(), y.common()))
-}
-
-func CachedBucketOf(m Type) Type {
- t := m.(*rtype)
- if Kind(t.t.Kind_&abi.KindMask) != Map {
- panic("not map")
- }
- tt := (*mapType)(unsafe.Pointer(t))
- return toType(tt.Bucket)
-}
-
type EmbedWithUnexpMeth struct{}
func (EmbedWithUnexpMeth) f() {}
diff --git a/src/reflect/map_noswiss_test.go b/src/reflect/map_noswiss_test.go
new file mode 100644
index 0000000000..52fcf89535
--- /dev/null
+++ b/src/reflect/map_noswiss_test.go
@@ -0,0 +1,60 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !goexperiment.swissmap
+
+package reflect_test
+
+import (
+ "internal/abi"
+ "internal/goarch"
+ . "reflect"
+ "testing"
+)
+
+func testGCBitsMap(t *testing.T) {
+ const bucketCount = abi.OldMapBucketCount
+
+ hdr := make([]byte, bucketCount/goarch.PtrSize)
+
+ verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
+ verifyGCBits(t, MapBucketOf(k, e), want)
+ verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
+ }
+ verifyMapBucket(t,
+ Tscalar, Tptr,
+ map[Xscalar]Xptr(nil),
+ join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1)))
+ verifyMapBucket(t,
+ Tscalarptr, Tptr,
+ map[Xscalarptr]Xptr(nil),
+ join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
+ verifyMapBucket(t, Tint64, Tptr,
+ map[int64]Xptr(nil),
+ join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1)))
+ verifyMapBucket(t,
+ Tscalar, Tscalar,
+ map[Xscalar]Xscalar(nil),
+ empty)
+ verifyMapBucket(t,
+ ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
+ map[[2]Xscalarptr][3]Xptrscalar(nil),
+ join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
+ map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
+ join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
+ map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
+ join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
+ map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
+ join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
+ map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
+ join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1)))
+}
diff --git a/src/reflect/map_swiss.go b/src/reflect/map_swiss.go
index 8978b377c7..f6a56f7a65 100644
--- a/src/reflect/map_swiss.go
+++ b/src/reflect/map_swiss.go
@@ -8,7 +8,7 @@ package reflect
import (
"internal/abi"
- "internal/goarch"
+ "internal/runtime/maps"
"unsafe"
)
@@ -55,6 +55,8 @@ func MapOf(key, elem Type) Type {
}
}
+ group, slot := groupAndSlotOf(key, elem)
+
// Make a map type.
// Note: flag values must match those used in the TMAP case
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
@@ -65,32 +67,19 @@ func MapOf(key, elem Type) Type {
mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
mt.Key = ktyp
mt.Elem = etyp
- mt.Bucket = bucketOf(ktyp, etyp)
+ mt.Group = group.common()
mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
return typehash(ktyp, p, seed)
}
+ mt.SlotSize = slot.Size()
+ mt.ElemOff = slot.Field(1).Offset
mt.Flags = 0
- if ktyp.Size_ > abi.SwissMapMaxKeyBytes {
- mt.KeySize = uint8(goarch.PtrSize)
- mt.Flags |= 1 // indirect key
- } else {
- mt.KeySize = uint8(ktyp.Size_)
- }
- if etyp.Size_ > abi.SwissMapMaxElemBytes {
- mt.ValueSize = uint8(goarch.PtrSize)
- mt.Flags |= 2 // indirect value
- } else {
- mt.ValueSize = uint8(etyp.Size_)
- }
- mt.BucketSize = uint16(mt.Bucket.Size_)
- if isReflexive(ktyp) {
- mt.Flags |= 4
- }
+ // TODO(prattmic): indirect key/elem flags
if needKeyUpdate(ktyp) {
- mt.Flags |= 8
+ mt.Flags |= abi.SwissMapNeedKeyUpdate
}
if hashMightPanic(ktyp) {
- mt.Flags |= 16
+ mt.Flags |= abi.SwissMapHashMightPanic
}
mt.PtrToThis = 0
@@ -98,67 +87,41 @@ func MapOf(key, elem Type) Type {
return ti.(Type)
}
-func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
- if ktyp.Size_ > abi.SwissMapMaxKeyBytes {
- ktyp = ptrTo(ktyp)
- }
- if etyp.Size_ > abi.SwissMapMaxElemBytes {
- etyp = ptrTo(etyp)
- }
+func groupAndSlotOf(ktyp, etyp Type) (Type, Type) {
+ // TODO(prattmic): indirect key/elem flags
- // Prepare GC data if any.
- // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes,
- // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap.
- // Note that since the key and value are known to be <= 128 bytes,
- // they're guaranteed to have bitmaps instead of GC programs.
- var gcdata *byte
- var ptrdata uintptr
+ // type group struct {
+ // ctrl uint64
+ // slots [abi.SwissMapGroupSlots]struct {
+ // key keyType
+ // elem elemType
+ // }
+ // }
- size := abi.SwissMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
- if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
- panic("reflect: bad size computation in MapOf")
+ fields := []StructField{
+ {
+ Name: "Key",
+ Type: ktyp,
+ },
+ {
+ Name: "Elem",
+ Type: etyp,
+ },
}
+ slot := StructOf(fields)
- if ktyp.Pointers() || etyp.Pointers() {
- nptr := (abi.SwissMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
- n := (nptr + 7) / 8
-
- // Runtime needs pointer masks to be a multiple of uintptr in size.
- n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
- mask := make([]byte, n)
- base := uintptr(abi.SwissMapBucketCount / goarch.PtrSize)
-
- if ktyp.Pointers() {
- emitGCMask(mask, base, ktyp, abi.SwissMapBucketCount)
- }
- base += abi.SwissMapBucketCount * ktyp.Size_ / goarch.PtrSize
-
- if etyp.Pointers() {
- emitGCMask(mask, base, etyp, abi.SwissMapBucketCount)
- }
- base += abi.SwissMapBucketCount * etyp.Size_ / goarch.PtrSize
-
- word := base
- mask[word/8] |= 1 << (word % 8)
- gcdata = &mask[0]
- ptrdata = (word + 1) * goarch.PtrSize
-
- // overflow word must be last
- if ptrdata != size {
- panic("reflect: bad layout computation in MapOf")
- }
- }
-
- b := &abi.Type{
- Align_: goarch.PtrSize,
- Size_: size,
- Kind_: abi.Struct,
- PtrBytes: ptrdata,
- GCData: gcdata,
+ fields = []StructField{
+ {
+ Name: "Ctrl",
+ Type: TypeFor[uint64](),
+ },
+ {
+ Name: "Slots",
+ Type: ArrayOf(abi.SwissMapGroupSlots, slot),
+ },
}
- s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")"
- b.Str = resolveReflectName(newName(s, "", false, false))
- return b
+ group := StructOf(fields)
+ return group, slot
}
var stringType = rtypeOf("")
@@ -181,7 +144,8 @@ func (v Value) MapIndex(key Value) Value {
var e unsafe.Pointer
// TODO(#54766): temporarily disable specialized variants.
- if false && (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
+ //if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
+ if false {
k := *(*string)(key.ptr)
e = mapaccess_faststr(v.typ(), v.pointer(), k)
} else {
@@ -219,12 +183,12 @@ func (v Value) MapKeys() []Value {
if m != nil {
mlen = maplen(m)
}
- var it hiter
+ var it maps.Iter
mapiterinit(v.typ(), m, &it)
a := make([]Value, mlen)
var i int
for i = 0; i < len(a); i++ {
- key := it.key
+ key := it.Key()
if key == nil {
// Someone deleted an entry from the map since we
// called maplen above. It's a data race, but nothing
@@ -237,45 +201,23 @@ func (v Value) MapKeys() []Value {
return a[:i]
}
-// hiter's structure matches runtime.hiter's structure.
-// Having a clone here allows us to embed a map iterator
-// inside type MapIter so that MapIters can be re-used
-// without doing any allocations.
-type hiter struct {
- key unsafe.Pointer
- elem unsafe.Pointer
- t unsafe.Pointer
- h unsafe.Pointer
- buckets unsafe.Pointer
- bptr unsafe.Pointer
- overflow *[]unsafe.Pointer
- oldoverflow *[]unsafe.Pointer
- startBucket uintptr
- offset uint8
- wrapped bool
- B uint8
- i uint8
- bucket uintptr
- checkBucket uintptr
-}
-
-func (h *hiter) initialized() bool {
- return h.t != nil
-}
-
// A MapIter is an iterator for ranging over a map.
// See [Value.MapRange].
type MapIter struct {
m Value
- hiter hiter
+ hiter maps.Iter
}
+// TODO(prattmic): only for sharing the linkname declarations with old maps.
+// Remove with old maps.
+type hiter = maps.Iter
+
// Key returns the key of iter's current map entry.
func (iter *MapIter) Key() Value {
- if !iter.hiter.initialized() {
+ if !iter.hiter.Initialized() {
panic("MapIter.Key called before Next")
}
- iterkey := iter.hiter.key
+ iterkey := iter.hiter.Key()
if iterkey == nil {
panic("MapIter.Key called on exhausted iterator")
}
@@ -290,10 +232,10 @@ func (iter *MapIter) Key() Value {
// As in Go, the key must be assignable to v's type and
// must not be derived from an unexported field.
func (v Value) SetIterKey(iter *MapIter) {
- if !iter.hiter.initialized() {
+ if !iter.hiter.Initialized() {
panic("reflect: Value.SetIterKey called before Next")
}
- iterkey := iter.hiter.key
+ iterkey := iter.hiter.Key()
if iterkey == nil {
panic("reflect: Value.SetIterKey called on exhausted iterator")
}
@@ -315,10 +257,10 @@ func (v Value) SetIterKey(iter *MapIter) {
// Value returns the value of iter's current map entry.
func (iter *MapIter) Value() Value {
- if !iter.hiter.initialized() {
+ if !iter.hiter.Initialized() {
panic("MapIter.Value called before Next")
}
- iterelem := iter.hiter.elem
+ iterelem := iter.hiter.Elem()
if iterelem == nil {
panic("MapIter.Value called on exhausted iterator")
}
@@ -333,10 +275,10 @@ func (iter *MapIter) Value() Value {
// As in Go, the value must be assignable to v's type and
// must not be derived from an unexported field.
func (v Value) SetIterValue(iter *MapIter) {
- if !iter.hiter.initialized() {
+ if !iter.hiter.Initialized() {
panic("reflect: Value.SetIterValue called before Next")
}
- iterelem := iter.hiter.elem
+ iterelem := iter.hiter.Elem()
if iterelem == nil {
panic("reflect: Value.SetIterValue called on exhausted iterator")
}
@@ -363,15 +305,15 @@ func (iter *MapIter) Next() bool {
if !iter.m.IsValid() {
panic("MapIter.Next called on an iterator that does not have an associated map Value")
}
- if !iter.hiter.initialized() {
+ if !iter.hiter.Initialized() {
mapiterinit(iter.m.typ(), iter.m.pointer(), &iter.hiter)
} else {
- if iter.hiter.key == nil {
+ if iter.hiter.Key() == nil {
panic("MapIter.Next called on exhausted iterator")
}
mapiternext(&iter.hiter)
}
- return iter.hiter.key != nil
+ return iter.hiter.Key() != nil
}
// Reset modifies iter to iterate over v.
@@ -383,7 +325,7 @@ func (iter *MapIter) Reset(v Value) {
v.mustBe(Map)
}
iter.m = v
- iter.hiter = hiter{}
+ iter.hiter = maps.Iter{}
}
// MapRange returns a range iterator for a map.
@@ -425,7 +367,8 @@ func (v Value) SetMapIndex(key, elem Value) {
tt := (*mapType)(unsafe.Pointer(v.typ()))
// TODO(#54766): temporarily disable specialized variants.
- if false && (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
+ //if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
+ if false {
k := *(*string)(key.ptr)
if elem.typ() == nil {
mapdelete_faststr(v.typ(), v.pointer(), k)
diff --git a/src/reflect/map_swiss_test.go b/src/reflect/map_swiss_test.go
new file mode 100644
index 0000000000..621140aa60
--- /dev/null
+++ b/src/reflect/map_swiss_test.go
@@ -0,0 +1,30 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.swissmap
+
+package reflect_test
+
+import (
+ "reflect"
+ "testing"
+)
+
+func testGCBitsMap(t *testing.T) {
+ // Unlike old maps, we don't manually construct GC data for swiss maps,
+ // instead using the public reflect API in groupAndSlotOf.
+}
+
+// See also runtime_test.TestGroupSizeZero.
+func TestGroupSizeZero(t *testing.T) {
+ st := reflect.TypeFor[struct{}]()
+ grp := reflect.MapGroupOf(st, st)
+
+ // internal/runtime/maps when create pointers to slots, even if slots
+ // are size 0. We should have reserved an extra word to ensure that
+ // pointers to the zero-size type at the end of group are valid.
+ if grp.Size() <= 8 {
+ t.Errorf("Group size got %d want >8", grp.Size())
+ }
+}