aboutsummaryrefslogtreecommitdiff
path: root/src/reflect
diff options
context:
space:
mode:
authorMichael Pratt <mpratt@google.com>2024-04-19 13:52:31 -0400
committerGopher Robot <gobot@golang.org>2024-08-02 16:41:53 +0000
commit4f7dc282c4bdfba4e63b39bbe9846c1469dc7ee5 (patch)
treeb4fa4f7de70120c25c408b85056ac30245cfaef9 /src/reflect
parent057b703407fa833193cbdc1f37179561c6c9da90 (diff)
downloadgo-4f7dc282c4bdfba4e63b39bbe9846c1469dc7ee5.tar.xz
all: split old and swiss map abi and compiler integration
The two map implementations are still identical, but now the compiler targets the appropriate ABI depending on GOEXPERIMENT. For #54766. Cq-Include-Trybots: luci.golang.try:gotip-linux-amd64-longtest,gotip-linux-amd64-longtest-swissmap Change-Id: I8438f64f044ba9de30ddbf2b8ceb9b4edd2d5614 Reviewed-on: https://go-review.googlesource.com/c/go/+/580779 Reviewed-by: Michael Knyszek <mknyszek@google.com> Reviewed-by: Keith Randall <khr@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Auto-Submit: Michael Pratt <mpratt@google.com>
Diffstat (limited to 'src/reflect')
-rw-r--r--src/reflect/all_test.go79
-rw-r--r--src/reflect/map_noswiss.go28
-rw-r--r--src/reflect/map_swiss.go28
3 files changed, 91 insertions, 44 deletions
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
index 277c703edd..2870a0adef 100644
--- a/src/reflect/all_test.go
+++ b/src/reflect/all_test.go
@@ -12,6 +12,7 @@ import (
"go/token"
"internal/abi"
"internal/goarch"
+ "internal/goexperiment"
"internal/testenv"
"io"
"math"
@@ -32,8 +33,6 @@ import (
"unsafe"
)
-const bucketCount = abi.MapBucketCount
-
var sink any
func TestBool(t *testing.T) {
@@ -7277,47 +7276,95 @@ func TestGCBits(t *testing.T) {
verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1))
verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1))
- hdr := make([]byte, bucketCount/goarch.PtrSize)
+ if goexperiment.SwissMap {
+ const bucketCount = abi.SwissMapBucketCount
- verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
- verifyGCBits(t, MapBucketOf(k, e), want)
- verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
- }
- verifyMapBucket(t,
+ hdr := make([]byte, bucketCount/goarch.PtrSize)
+
+ verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
+ verifyGCBits(t, MapBucketOf(k, e), want)
+ verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
+ }
+ verifyMapBucket(t,
Tscalar, Tptr,
map[Xscalar]Xptr(nil),
join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t,
+ verifyMapBucket(t,
Tscalarptr, Tptr,
map[Xscalarptr]Xptr(nil),
join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t, Tint64, Tptr,
+ verifyMapBucket(t, Tint64, Tptr,
map[int64]Xptr(nil),
join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t,
+ verifyMapBucket(t,
Tscalar, Tscalar,
map[Xscalar]Xscalar(nil),
empty)
- verifyMapBucket(t,
+ verifyMapBucket(t,
ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
map[[2]Xscalarptr][3]Xptrscalar(nil),
join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1)))
- verifyMapBucket(t,
+ verifyMapBucket(t,
ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
- verifyMapBucket(t,
+ verifyMapBucket(t,
ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
- verifyMapBucket(t,
+ verifyMapBucket(t,
ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t,
+ verifyMapBucket(t,
ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1)))
+ } else {
+ const bucketCount = abi.OldMapBucketCount
+
+ hdr := make([]byte, bucketCount/goarch.PtrSize)
+
+ verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
+ verifyGCBits(t, MapBucketOf(k, e), want)
+ verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
+ }
+ verifyMapBucket(t,
+ Tscalar, Tptr,
+ map[Xscalar]Xptr(nil),
+ join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1)))
+ verifyMapBucket(t,
+ Tscalarptr, Tptr,
+ map[Xscalarptr]Xptr(nil),
+ join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
+ verifyMapBucket(t, Tint64, Tptr,
+ map[int64]Xptr(nil),
+ join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1)))
+ verifyMapBucket(t,
+ Tscalar, Tscalar,
+ map[Xscalar]Xscalar(nil),
+ empty)
+ verifyMapBucket(t,
+ ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
+ map[[2]Xscalarptr][3]Xptrscalar(nil),
+ join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
+ map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
+ join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
+ map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
+ join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
+ map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
+ join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
+ verifyMapBucket(t,
+ ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
+ map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
+ join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1)))
+ }
}
func rep(n int, b []byte) []byte { return bytes.Repeat(b, n) }
diff --git a/src/reflect/map_noswiss.go b/src/reflect/map_noswiss.go
index de92e76df0..5af50ac779 100644
--- a/src/reflect/map_noswiss.go
+++ b/src/reflect/map_noswiss.go
@@ -14,7 +14,7 @@ import (
// mapType represents a map type.
type mapType struct {
- abi.MapType
+ abi.OldMapType
}
func (t *rtype) Key() Type {
@@ -70,13 +70,13 @@ func MapOf(key, elem Type) Type {
return typehash(ktyp, p, seed)
}
mt.Flags = 0
- if ktyp.Size_ > abi.MapMaxKeyBytes {
+ if ktyp.Size_ > abi.OldMapMaxKeyBytes {
mt.KeySize = uint8(goarch.PtrSize)
mt.Flags |= 1 // indirect key
} else {
mt.KeySize = uint8(ktyp.Size_)
}
- if etyp.Size_ > abi.MapMaxElemBytes {
+ if etyp.Size_ > abi.OldMapMaxElemBytes {
mt.ValueSize = uint8(goarch.PtrSize)
mt.Flags |= 2 // indirect value
} else {
@@ -99,10 +99,10 @@ func MapOf(key, elem Type) Type {
}
func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
- if ktyp.Size_ > abi.MapMaxKeyBytes {
+ if ktyp.Size_ > abi.OldMapMaxKeyBytes {
ktyp = ptrTo(ktyp)
}
- if etyp.Size_ > abi.MapMaxElemBytes {
+ if etyp.Size_ > abi.OldMapMaxElemBytes {
etyp = ptrTo(etyp)
}
@@ -114,29 +114,29 @@ func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
var gcdata *byte
var ptrdata uintptr
- size := abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
+ size := abi.OldMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
panic("reflect: bad size computation in MapOf")
}
if ktyp.Pointers() || etyp.Pointers() {
- nptr := (abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
+ nptr := (abi.OldMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
n := (nptr + 7) / 8
// Runtime needs pointer masks to be a multiple of uintptr in size.
n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
mask := make([]byte, n)
- base := uintptr(abi.MapBucketCount / goarch.PtrSize)
+ base := uintptr(abi.OldMapBucketCount / goarch.PtrSize)
if ktyp.Pointers() {
- emitGCMask(mask, base, ktyp, abi.MapBucketCount)
+ emitGCMask(mask, base, ktyp, abi.OldMapBucketCount)
}
- base += abi.MapBucketCount * ktyp.Size_ / goarch.PtrSize
+ base += abi.OldMapBucketCount * ktyp.Size_ / goarch.PtrSize
if etyp.Pointers() {
- emitGCMask(mask, base, etyp, abi.MapBucketCount)
+ emitGCMask(mask, base, etyp, abi.OldMapBucketCount)
}
- base += abi.MapBucketCount * etyp.Size_ / goarch.PtrSize
+ base += abi.OldMapBucketCount * etyp.Size_ / goarch.PtrSize
word := base
mask[word/8] |= 1 << (word % 8)
@@ -180,7 +180,7 @@ func (v Value) MapIndex(key Value) Value {
// of unexported fields.
var e unsafe.Pointer
- if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
+ if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.OldMapMaxElemBytes {
k := *(*string)(key.ptr)
e = mapaccess_faststr(v.typ(), v.pointer(), k)
} else {
@@ -423,7 +423,7 @@ func (v Value) SetMapIndex(key, elem Value) {
key.mustBeExported()
tt := (*mapType)(unsafe.Pointer(v.typ()))
- if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
+ if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.OldMapMaxElemBytes {
k := *(*string)(key.ptr)
if elem.typ() == nil {
mapdelete_faststr(v.typ(), v.pointer(), k)
diff --git a/src/reflect/map_swiss.go b/src/reflect/map_swiss.go
index a42a9d6f89..39f799c9b2 100644
--- a/src/reflect/map_swiss.go
+++ b/src/reflect/map_swiss.go
@@ -14,7 +14,7 @@ import (
// mapType represents a map type.
type mapType struct {
- abi.MapType
+ abi.SwissMapType
}
func (t *rtype) Key() Type {
@@ -70,13 +70,13 @@ func MapOf(key, elem Type) Type {
return typehash(ktyp, p, seed)
}
mt.Flags = 0
- if ktyp.Size_ > abi.MapMaxKeyBytes {
+ if ktyp.Size_ > abi.SwissMapMaxKeyBytes {
mt.KeySize = uint8(goarch.PtrSize)
mt.Flags |= 1 // indirect key
} else {
mt.KeySize = uint8(ktyp.Size_)
}
- if etyp.Size_ > abi.MapMaxElemBytes {
+ if etyp.Size_ > abi.SwissMapMaxElemBytes {
mt.ValueSize = uint8(goarch.PtrSize)
mt.Flags |= 2 // indirect value
} else {
@@ -99,10 +99,10 @@ func MapOf(key, elem Type) Type {
}
func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
- if ktyp.Size_ > abi.MapMaxKeyBytes {
+ if ktyp.Size_ > abi.SwissMapMaxKeyBytes {
ktyp = ptrTo(ktyp)
}
- if etyp.Size_ > abi.MapMaxElemBytes {
+ if etyp.Size_ > abi.SwissMapMaxElemBytes {
etyp = ptrTo(etyp)
}
@@ -114,29 +114,29 @@ func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
var gcdata *byte
var ptrdata uintptr
- size := abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
+ size := abi.SwissMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
panic("reflect: bad size computation in MapOf")
}
if ktyp.Pointers() || etyp.Pointers() {
- nptr := (abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
+ nptr := (abi.SwissMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
n := (nptr + 7) / 8
// Runtime needs pointer masks to be a multiple of uintptr in size.
n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
mask := make([]byte, n)
- base := uintptr(abi.MapBucketCount / goarch.PtrSize)
+ base := uintptr(abi.SwissMapBucketCount / goarch.PtrSize)
if ktyp.Pointers() {
- emitGCMask(mask, base, ktyp, abi.MapBucketCount)
+ emitGCMask(mask, base, ktyp, abi.SwissMapBucketCount)
}
- base += abi.MapBucketCount * ktyp.Size_ / goarch.PtrSize
+ base += abi.SwissMapBucketCount * ktyp.Size_ / goarch.PtrSize
if etyp.Pointers() {
- emitGCMask(mask, base, etyp, abi.MapBucketCount)
+ emitGCMask(mask, base, etyp, abi.SwissMapBucketCount)
}
- base += abi.MapBucketCount * etyp.Size_ / goarch.PtrSize
+ base += abi.SwissMapBucketCount * etyp.Size_ / goarch.PtrSize
word := base
mask[word/8] |= 1 << (word % 8)
@@ -180,7 +180,7 @@ func (v Value) MapIndex(key Value) Value {
// of unexported fields.
var e unsafe.Pointer
- if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
+ if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
k := *(*string)(key.ptr)
e = mapaccess_faststr(v.typ(), v.pointer(), k)
} else {
@@ -423,7 +423,7 @@ func (v Value) SetMapIndex(key, elem Value) {
key.mustBeExported()
tt := (*mapType)(unsafe.Pointer(v.typ()))
- if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
+ if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
k := *(*string)(key.ptr)
if elem.typ() == nil {
mapdelete_faststr(v.typ(), v.pointer(), k)