aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal
diff options
context:
space:
mode:
authorMichael Pratt <mpratt@google.com>2024-05-03 13:03:04 -0400
committerMichael Pratt <mpratt@google.com>2024-10-14 19:58:47 +0000
commitc39bc22c141bc6990e4e2abf604dcf56669ff779 (patch)
tree5384243aaaa9d9f9796674223cf186ee3ceb63a9 /src/cmd/compile/internal
parent48849e0866f64a40d04a9151e44e5a73acdfc17b (diff)
downloadgo-c39bc22c141bc6990e4e2abf604dcf56669ff779.tar.xz
all: wire up swisstable maps
Use the new SwissTable-based map in internal/runtime/maps as the basis for the runtime map when GOEXPERIMENT=swissmap. Integration is complete enough to pass all.bash. Notable missing features: * Race integration / concurrent write detection * Stack-allocated maps * Specialized "fast" map variants * Indirect key / elem For #54766. Cq-Include-Trybots: luci.golang.try:gotip-linux-ppc64_power10,gotip-linux-amd64-longtest-swissmap Change-Id: Ie97b656b6d8e05c0403311ae08fef9f51756a639 Reviewed-on: https://go-review.googlesource.com/c/go/+/594596 Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Michael Knyszek <mknyszek@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Diffstat (limited to 'src/cmd/compile/internal')
-rw-r--r--src/cmd/compile/internal/gc/main.go7
-rw-r--r--src/cmd/compile/internal/ir/symtab.go9
-rw-r--r--src/cmd/compile/internal/reflectdata/map_swiss.go303
-rw-r--r--src/cmd/compile/internal/ssagen/ssa.go11
-rw-r--r--src/cmd/compile/internal/test/inl_test.go15
-rw-r--r--src/cmd/compile/internal/types/fmt.go4
-rw-r--r--src/cmd/compile/internal/types/type.go12
-rw-r--r--src/cmd/compile/internal/walk/builtin.go60
-rw-r--r--src/cmd/compile/internal/walk/range.go11
9 files changed, 162 insertions, 270 deletions
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index 174c609e44..c922fa9a9a 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -104,6 +104,13 @@ func Main(archInit func(*ssagen.ArchInfo)) {
ir.Pkgs.Runtime = types.NewPkg("go.runtime", "runtime")
ir.Pkgs.Runtime.Prefix = "runtime"
+ if buildcfg.Experiment.SwissMap {
+ // Pseudo-package that contains the compiler's builtin
+ // declarations for maps.
+ ir.Pkgs.InternalMaps = types.NewPkg("go.internal/runtime/maps", "internal/runtime/maps")
+ ir.Pkgs.InternalMaps.Prefix = "internal/runtime/maps"
+ }
+
// pseudo-packages used in symbol tables
ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab")
ir.Pkgs.Itab.Prefix = "go:itab"
diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go
index a2a263d3ce..3cdef10230 100644
--- a/src/cmd/compile/internal/ir/symtab.go
+++ b/src/cmd/compile/internal/ir/symtab.go
@@ -73,8 +73,9 @@ type symsStruct struct {
// Pkgs holds known packages.
var Pkgs struct {
- Go *types.Pkg
- Itab *types.Pkg
- Runtime *types.Pkg
- Coverage *types.Pkg
+ Go *types.Pkg
+ Itab *types.Pkg
+ Runtime *types.Pkg
+ InternalMaps *types.Pkg
+ Coverage *types.Pkg
}
diff --git a/src/cmd/compile/internal/reflectdata/map_swiss.go b/src/cmd/compile/internal/reflectdata/map_swiss.go
index 4fed93517e..2525c0cf2c 100644
--- a/src/cmd/compile/internal/reflectdata/map_swiss.go
+++ b/src/cmd/compile/internal/reflectdata/map_swiss.go
@@ -6,7 +6,6 @@ package reflectdata
import (
"internal/abi"
-
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/rttype"
@@ -16,161 +15,100 @@ import (
"cmd/internal/src"
)
-// SwissMapBucketType makes the map bucket type given the type of the map.
-func SwissMapBucketType(t *types.Type) *types.Type {
- // Builds a type representing a Bucket structure for
- // the given map type. This type is not visible to users -
- // we include only enough information to generate a correct GC
- // program for it.
- // Make sure this stays in sync with runtime/map.go.
- //
- // A "bucket" is a "struct" {
- // tophash [abi.SwissMapBucketCount]uint8
- // keys [abi.SwissMapBucketCount]keyType
- // elems [abi.SwissMapBucketCount]elemType
- // overflow *bucket
- // }
- if t.MapType().SwissBucket != nil {
- return t.MapType().SwissBucket
+// SwissMapGroupType makes the map slot group type given the type of the map.
+func SwissMapGroupType(t *types.Type) *types.Type {
+ if t.MapType().SwissGroup != nil {
+ return t.MapType().SwissGroup
}
- keytype := t.Key()
- elemtype := t.Elem()
- types.CalcSize(keytype)
- types.CalcSize(elemtype)
- if keytype.Size() > abi.SwissMapMaxKeyBytes {
- keytype = types.NewPtr(keytype)
- }
- if elemtype.Size() > abi.SwissMapMaxElemBytes {
- elemtype = types.NewPtr(elemtype)
+ // Builds a type representing a group structure for the given map type.
+ // This type is not visible to users, we include it so we can generate
+ // a correct GC program for it.
+ //
+ // Make sure this stays in sync with internal/runtime/maps/group.go.
+ //
+ // type group struct {
+ // ctrl uint64
+ // slots [abi.SwissMapGroupSlots]struct {
+ // key keyType
+ // elem elemType
+ // }
+ // }
+ slotFields := []*types.Field{
+ makefield("key", t.Key()),
+ makefield("typ", t.Elem()),
}
+ slot := types.NewStruct(slotFields)
+ slot.SetNoalg(true)
- field := make([]*types.Field, 0, 5)
-
- // The first field is: uint8 topbits[BUCKETSIZE].
- arr := types.NewArray(types.Types[types.TUINT8], abi.SwissMapBucketCount)
- field = append(field, makefield("topbits", arr))
-
- arr = types.NewArray(keytype, abi.SwissMapBucketCount)
- arr.SetNoalg(true)
- keys := makefield("keys", arr)
- field = append(field, keys)
+ slotArr := types.NewArray(slot, abi.SwissMapGroupSlots)
+ slotArr.SetNoalg(true)
- arr = types.NewArray(elemtype, abi.SwissMapBucketCount)
- arr.SetNoalg(true)
- elems := makefield("elems", arr)
- field = append(field, elems)
-
- // If keys and elems have no pointers, the map implementation
- // can keep a list of overflow pointers on the side so that
- // buckets can be marked as having no pointers.
- // Arrange for the bucket to have no pointers by changing
- // the type of the overflow field to uintptr in this case.
- // See comment on hmap.overflow in runtime/map.go.
- otyp := types.Types[types.TUNSAFEPTR]
- if !elemtype.HasPointers() && !keytype.HasPointers() {
- otyp = types.Types[types.TUINTPTR]
+ fields := []*types.Field{
+ makefield("ctrl", types.Types[types.TUINT64]),
+ makefield("slots", slotArr),
}
- overflow := makefield("overflow", otyp)
- field = append(field, overflow)
- // link up fields
- bucket := types.NewStruct(field[:])
- bucket.SetNoalg(true)
- types.CalcSize(bucket)
+ group := types.NewStruct(fields)
+ group.SetNoalg(true)
+ types.CalcSize(group)
// Check invariants that map code depends on.
if !types.IsComparable(t.Key()) {
base.Fatalf("unsupported map key type for %v", t)
}
- if abi.SwissMapBucketCount < 8 {
- base.Fatalf("bucket size %d too small for proper alignment %d", abi.SwissMapBucketCount, 8)
- }
- if uint8(keytype.Alignment()) > abi.SwissMapBucketCount {
- base.Fatalf("key align too big for %v", t)
- }
- if uint8(elemtype.Alignment()) > abi.SwissMapBucketCount {
- base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, abi.SwissMapBucketCount)
- }
- if keytype.Size() > abi.SwissMapMaxKeyBytes {
- base.Fatalf("key size too large for %v", t)
- }
- if elemtype.Size() > abi.SwissMapMaxElemBytes {
- base.Fatalf("elem size too large for %v", t)
- }
- if t.Key().Size() > abi.SwissMapMaxKeyBytes && !keytype.IsPtr() {
- base.Fatalf("key indirect incorrect for %v", t)
- }
- if t.Elem().Size() > abi.SwissMapMaxElemBytes && !elemtype.IsPtr() {
- base.Fatalf("elem indirect incorrect for %v", t)
- }
- if keytype.Size()%keytype.Alignment() != 0 {
- base.Fatalf("key size not a multiple of key align for %v", t)
- }
- if elemtype.Size()%elemtype.Alignment() != 0 {
- base.Fatalf("elem size not a multiple of elem align for %v", t)
- }
- if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 {
- base.Fatalf("bucket align not multiple of key align %v", t)
- }
- if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 {
- base.Fatalf("bucket align not multiple of elem align %v", t)
+ if group.Size() <= 8 {
+ // internal/runtime/maps creates pointers to slots, even if
+ // both key and elem are size zero. In this case, each slot is
+ // size 0, but group should still reserve a word of padding at
+ // the end to ensure pointers are valid.
+ base.Fatalf("bad group size for %v", t)
}
- if keys.Offset%keytype.Alignment() != 0 {
- base.Fatalf("bad alignment of keys in bmap for %v", t)
- }
- if elems.Offset%elemtype.Alignment() != 0 {
- base.Fatalf("bad alignment of elems in bmap for %v", t)
- }
-
- // Double-check that overflow field is final memory in struct,
- // with no padding at end.
- if overflow.Offset != bucket.Size()-int64(types.PtrSize) {
- base.Fatalf("bad offset of overflow in bmap for %v, overflow.Offset=%d, bucket.Size()-int64(types.PtrSize)=%d",
- t, overflow.Offset, bucket.Size()-int64(types.PtrSize))
- }
-
- t.MapType().SwissBucket = bucket
- bucket.StructType().Map = t
- return bucket
+ t.MapType().SwissGroup = group
+ group.StructType().Map = t
+ return group
}
var swissHmapType *types.Type
-// SwissMapType returns a type interchangeable with runtime.hmap.
-// Make sure this stays in sync with runtime/map.go.
+// SwissMapType returns a type interchangeable with internal/runtime/maps.Map.
+// Make sure this stays in sync with internal/runtime/maps/map.go.
func SwissMapType() *types.Type {
if swissHmapType != nil {
return swissHmapType
}
// build a struct:
- // type hmap struct {
- // count int
- // flags uint8
- // B uint8
- // noverflow uint16
- // hash0 uint32
- // buckets unsafe.Pointer
- // oldbuckets unsafe.Pointer
- // nevacuate uintptr
- // extra unsafe.Pointer // *mapextra
+ // type table struct {
+ // used uint64
+ // typ unsafe.Pointer // *abi.SwissMapType
+ // seed uintptr
+ //
+ // // From groups.
+ // groups_typ unsafe.Pointer // *abi.SwissMapType
+ // groups_data unsafe.Pointer
+ // groups_lengthMask uint64
+ //
+ // capacity uint64
+ // growthLeft uint64
+ //
+ // clearSeq uint64
// }
- // must match runtime/map.go:hmap.
+ // must match internal/runtime/maps/map.go:Map.
fields := []*types.Field{
- makefield("count", types.Types[types.TINT]),
- makefield("flags", types.Types[types.TUINT8]),
- makefield("B", types.Types[types.TUINT8]),
- makefield("noverflow", types.Types[types.TUINT16]),
- makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
- makefield("buckets", types.Types[types.TUNSAFEPTR]), // Used in walk.go for OMAKEMAP.
- makefield("oldbuckets", types.Types[types.TUNSAFEPTR]),
- makefield("nevacuate", types.Types[types.TUINTPTR]),
- makefield("extra", types.Types[types.TUNSAFEPTR]),
+ makefield("used", types.Types[types.TUINT64]),
+ makefield("typ", types.Types[types.TUNSAFEPTR]),
+ makefield("seed", types.Types[types.TUINTPTR]),
+ makefield("groups_typ", types.Types[types.TUNSAFEPTR]),
+ makefield("groups_data", types.Types[types.TUNSAFEPTR]),
+ makefield("groups_lengthMask", types.Types[types.TUINT64]),
+ makefield("capacity", types.Types[types.TUINT64]),
+ makefield("growthLeft", types.Types[types.TUINT64]),
+ makefield("clearSeq", types.Types[types.TUINT64]),
}
- n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hmap"))
+ n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.InternalMaps.Lookup("table"))
hmap := types.NewNamed(n)
n.SetType(hmap)
n.SetTypecheck(1)
@@ -178,10 +116,10 @@ func SwissMapType() *types.Type {
hmap.SetUnderlying(types.NewStruct(fields))
types.CalcSize(hmap)
- // The size of hmap should be 48 bytes on 64 bit
- // and 28 bytes on 32 bit platforms.
- if size := int64(8 + 5*types.PtrSize); hmap.Size() != size {
- base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
+ // The size of Map should be 64 bytes on 64 bit
+ // and 48 bytes on 32 bit platforms.
+ if size := int64(5*8 + 4*types.PtrSize); hmap.Size() != size {
+ base.Fatalf("internal/runtime/maps.Map size not correct: got %d, want %d", hmap.Size(), size)
}
swissHmapType = hmap
@@ -200,52 +138,54 @@ func SwissMapIterType() *types.Type {
hmap := SwissMapType()
// build a struct:
- // type hiter struct {
- // key unsafe.Pointer // *Key
- // elem unsafe.Pointer // *Elem
- // t unsafe.Pointer // *SwissMapType
- // h *hmap
- // buckets unsafe.Pointer
- // bptr unsafe.Pointer // *bmap
- // overflow unsafe.Pointer // *[]*bmap
- // oldoverflow unsafe.Pointer // *[]*bmap
- // startBucket uintptr
- // offset uint8
- // wrapped bool
- // B uint8
- // i uint8
- // bucket uintptr
- // checkBucket uintptr
+ // type Iter struct {
+ // key unsafe.Pointer // *Key
+ // elem unsafe.Pointer // *Elem
+ // typ unsafe.Pointer // *SwissMapType
+ // m *Map
+ //
+ // // From groups.
+ // groups_typ unsafe.Pointer // *abi.SwissMapType
+ // groups_data unsafe.Pointer
+ // groups_lengthMask uint64
+ //
+ // clearSeq uint64
+ //
+ // offset uint64
+ // groupIdx uint64
+ // slotIdx uint32
+ //
+ // // 4 bytes of padding on 64-bit arches.
// }
- // must match runtime/map.go:hiter.
+ // must match internal/runtime/maps/table.go:Iter.
fields := []*types.Field{
makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
- makefield("t", types.Types[types.TUNSAFEPTR]),
- makefield("h", types.NewPtr(hmap)),
- makefield("buckets", types.Types[types.TUNSAFEPTR]),
- makefield("bptr", types.Types[types.TUNSAFEPTR]),
- makefield("overflow", types.Types[types.TUNSAFEPTR]),
- makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
- makefield("startBucket", types.Types[types.TUINTPTR]),
- makefield("offset", types.Types[types.TUINT8]),
- makefield("wrapped", types.Types[types.TBOOL]),
- makefield("B", types.Types[types.TUINT8]),
- makefield("i", types.Types[types.TUINT8]),
- makefield("bucket", types.Types[types.TUINTPTR]),
- makefield("checkBucket", types.Types[types.TUINTPTR]),
+ makefield("typ", types.Types[types.TUNSAFEPTR]),
+ makefield("m", types.NewPtr(hmap)),
+ makefield("groups_typ", types.Types[types.TUNSAFEPTR]),
+ makefield("groups_data", types.Types[types.TUNSAFEPTR]),
+ makefield("groups_lengthMask", types.Types[types.TUINT64]),
+ makefield("clearSeq", types.Types[types.TUINT64]),
+ makefield("offset", types.Types[types.TUINT64]),
+ makefield("groupIdx", types.Types[types.TUINT64]),
+ makefield("slotIdx", types.Types[types.TUINT32]),
}
// build iterator struct hswissing the above fields
- n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hiter"))
+ n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.InternalMaps.Lookup("Iter"))
hiter := types.NewNamed(n)
n.SetType(hiter)
n.SetTypecheck(1)
hiter.SetUnderlying(types.NewStruct(fields))
types.CalcSize(hiter)
- if hiter.Size() != int64(12*types.PtrSize) {
- base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 12*types.PtrSize)
+ want := 6*types.PtrSize + 4*8 + 1*4
+ if types.PtrSize == 8 {
+ want += 4 // tailing padding
+ }
+ if hiter.Size() != int64(want) {
+ base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), want)
}
swissHiterType = hiter
@@ -254,40 +194,27 @@ func SwissMapIterType() *types.Type {
func writeSwissMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
// internal/abi.SwissMapType
+ gtyp := SwissMapGroupType(t)
s1 := writeType(t.Key())
s2 := writeType(t.Elem())
- s3 := writeType(SwissMapBucketType(t))
+ s3 := writeType(gtyp)
hasher := genhash(t.Key())
+ slotTyp := gtyp.Field(1).Type.Elem()
+ elemOff := slotTyp.Field(1).Offset
+
c.Field("Key").WritePtr(s1)
c.Field("Elem").WritePtr(s2)
- c.Field("Bucket").WritePtr(s3)
+ c.Field("Group").WritePtr(s3)
c.Field("Hasher").WritePtr(hasher)
+ c.Field("SlotSize").WriteUintptr(uint64(slotTyp.Size()))
+ c.Field("ElemOff").WriteUintptr(uint64(elemOff))
var flags uint32
- // Note: flags must match maptype accessors in ../../../../runtime/type.go
- // and maptype builder in ../../../../reflect/type.go:MapOf.
- if t.Key().Size() > abi.SwissMapMaxKeyBytes {
- c.Field("KeySize").WriteUint8(uint8(types.PtrSize))
- flags |= 1 // indirect key
- } else {
- c.Field("KeySize").WriteUint8(uint8(t.Key().Size()))
- }
-
- if t.Elem().Size() > abi.SwissMapMaxElemBytes {
- c.Field("ValueSize").WriteUint8(uint8(types.PtrSize))
- flags |= 2 // indirect value
- } else {
- c.Field("ValueSize").WriteUint8(uint8(t.Elem().Size()))
- }
- c.Field("BucketSize").WriteUint16(uint16(SwissMapBucketType(t).Size()))
- if types.IsReflexive(t.Key()) {
- flags |= 4 // reflexive key
- }
if needkeyupdate(t.Key()) {
- flags |= 8 // need key update
+ flags |= abi.SwissMapNeedKeyUpdate
}
if hashMightPanic(t.Key()) {
- flags |= 16 // hash might panic
+ flags |= abi.SwissMapHashMightPanic
}
c.Field("Flags").WriteUint32(flags)
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index d086b74e82..6a65bb0235 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -89,7 +89,7 @@ func InitConfig() {
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
_ = types.NewPtr(types.ErrorType) // *error
if buildcfg.Experiment.SwissMap {
- _ = types.NewPtr(reflectdata.SwissMapType()) // *runtime.hmap
+ _ = types.NewPtr(reflectdata.SwissMapType()) // *internal/runtime/maps.Map
} else {
_ = types.NewPtr(reflectdata.OldMapType()) // *runtime.hmap
}
@@ -5480,8 +5480,13 @@ func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
s.startBlock(bElse)
switch n.Op() {
case ir.OLEN:
- // length is stored in the first word for map/chan
- s.vars[n] = s.load(lenType, x)
+ if buildcfg.Experiment.SwissMap && n.X.Type().IsMap() {
+ // length is stored in the first word.
+ s.vars[n] = s.load(lenType, x)
+ } else {
+ // length is stored in the first word for map/chan
+ s.vars[n] = s.load(lenType, x)
+ }
case ir.OCAP:
// capacity is stored in the second word for chan
sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Size(), x)
diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go
index 5a8a156f02..758479b622 100644
--- a/src/cmd/compile/internal/test/inl_test.go
+++ b/src/cmd/compile/internal/test/inl_test.go
@@ -39,10 +39,7 @@ func TestIntendedInlining(t *testing.T) {
"adjustpointer",
"alignDown",
"alignUp",
- "bucketMask",
- "bucketShift",
"chanbuf",
- "evacuated",
"fastlog2",
"float64bits",
"funcspdelta",
@@ -62,9 +59,6 @@ func TestIntendedInlining(t *testing.T) {
"stringStructOf",
"subtract1",
"subtractb",
- "tophash",
- "(*bmap).keys",
- "(*bmap).overflow",
"(*waitq).enqueue",
"funcInfo.entry",
@@ -236,6 +230,15 @@ func TestIntendedInlining(t *testing.T) {
},
}
+ if !goexperiment.SwissMap {
+ // Maps
+ want["runtime"] = append(want["runtime"], "bucketMask")
+ want["runtime"] = append(want["runtime"], "bucketShift")
+ want["runtime"] = append(want["runtime"], "evacuated")
+ want["runtime"] = append(want["runtime"], "tophash")
+ want["runtime"] = append(want["runtime"], "(*bmap).keys")
+ want["runtime"] = append(want["runtime"], "(*bmap).overflow")
+ }
if runtime.GOARCH != "386" && runtime.GOARCH != "loong64" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" {
// nextFreeFast calls sys.TrailingZeros64, which on 386 is implemented in asm and is not inlinable.
// We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386.
diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go
index 96c63528ec..0dba510ac4 100644
--- a/src/cmd/compile/internal/types/fmt.go
+++ b/src/cmd/compile/internal/types/fmt.go
@@ -474,8 +474,10 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
// Format the bucket struct for map[x]y as map.bucket[x]y.
// This avoids a recursive print that generates very long names.
switch t {
- case mt.OldBucket, mt.SwissBucket:
+ case mt.OldBucket:
b.WriteString("map.bucket[")
+ case mt.SwissGroup:
+ b.WriteString("map.group[")
default:
base.Fatalf("unknown internal map type")
}
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index 9bb3a70b3e..9d3dde8c13 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -291,7 +291,7 @@ type Map struct {
OldBucket *Type // internal struct type representing a hash bucket
// GOEXPERIMENT=swissmap fields
- SwissBucket *Type // internal struct type representing a hash bucket
+ SwissGroup *Type // internal struct type representing a slot group
}
// MapType returns t's extra map-specific fields.
@@ -1192,15 +1192,9 @@ func (t *Type) cmp(x *Type) Cmp {
// to the fallthrough
} else if x.StructType().Map == nil {
return CMPgt // nil > non-nil
- } else if t.StructType().Map.MapType().SwissBucket == t {
- // Both have non-nil Map
- // Special case for Maps which include a recursive type where the recursion is not broken with a named type
- if x.StructType().Map.MapType().SwissBucket != x {
- return CMPlt // bucket maps are least
- }
+ } else {
+ // TODO: I am confused by the purpose of the OldBucket stuff below.
return t.StructType().Map.cmp(x.StructType().Map)
- } else if x.StructType().Map.MapType().SwissBucket == x {
- return CMPgt // bucket maps are least
} // If t != t.Map.SwissBucket, fall through to general case
} else {
if t.StructType().Map == nil {
diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go
index 19ec8d30fa..51c5e0b94b 100644
--- a/src/cmd/compile/internal/walk/builtin.go
+++ b/src/cmd/compile/internal/walk/builtin.go
@@ -1,4 +1,4 @@
-// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.walk/bui
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -332,62 +332,8 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// h = &hv
h = stackTempAddr(init, hmapType)
- // Allocate one bucket pointed to by hmap.buckets on stack if hint
- // is not larger than BUCKETSIZE. In case hint is larger than
- // BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
- // Maximum key and elem size is 128 bytes, larger objects
- // are stored with an indirection. So max bucket size is 2048+eps.
- if !ir.IsConst(hint, constant.Int) ||
- constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.SwissMapBucketCount)) {
-
- // In case hint is larger than BUCKETSIZE runtime.makemap
- // will allocate the buckets on the heap, see #20184
- //
- // if hint <= BUCKETSIZE {
- // var bv bmap
- // b = &bv
- // h.buckets = b
- // }
-
- nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.SwissMapBucketCount)), nil, nil)
- nif.Likely = true
-
- // var bv bmap
- // b = &bv
- b := stackTempAddr(&nif.Body, reflectdata.SwissMapBucketType(t))
-
- // h.buckets = b
- bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
- na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), typecheck.ConvNop(b, types.Types[types.TUNSAFEPTR]))
- nif.Body.Append(na)
- appendWalkStmt(init, nif)
- }
- }
-
- if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.SwissMapBucketCount)) {
- // Handling make(map[any]any) and
- // make(map[any]any, hint) where hint <= BUCKETSIZE
- // special allows for faster map initialization and
- // improves binary size by using calls with fewer arguments.
- // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
- // and no buckets will be allocated by makemap. Therefore,
- // no buckets need to be allocated in this code path.
- if n.Esc() == ir.EscNone {
- // Only need to initialize h.hash0 since
- // hmap h has been allocated on the stack already.
- // h.hash0 = rand32()
- rand := mkcall("rand32", types.Types[types.TUINT32], init)
- hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
- return typecheck.ConvNop(h, t)
- }
- // Call runtime.makehmap to allocate an
- // hmap on the heap and initialize hmap's hash0 field.
- fn := typecheck.LookupRuntime("makemap_small", t.Key(), t.Elem())
- return mkcall1(fn, n.Type(), init)
- }
-
- if n.Esc() != ir.EscNone {
+ // TODO(go.dev/issue/54766): Stack allocated table/groups.
+ } else {
h = typecheck.NodNil()
}
// Map initialization with a variable or large hint is
diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go
index 93898b3a66..27e71425c1 100644
--- a/src/cmd/compile/internal/walk/range.go
+++ b/src/cmd/compile/internal/walk/range.go
@@ -5,6 +5,7 @@
package walk
import (
+ "internal/buildcfg"
"unicode/utf8"
"cmd/compile/internal/base"
@@ -242,8 +243,14 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
th := hit.Type()
// depends on layout of iterator struct.
// See cmd/compile/internal/reflectdata/reflect.go:MapIterType
- keysym := th.Field(0).Sym
- elemsym := th.Field(1).Sym // ditto
+ var keysym, elemsym *types.Sym
+ if buildcfg.Experiment.SwissMap {
+ keysym = th.Field(0).Sym
+ elemsym = th.Field(1).Sym // ditto
+ } else {
+ keysym = th.Field(0).Sym
+ elemsym = th.Field(1).Sym // ditto
+ }
fn := typecheck.LookupRuntime("mapiterinit", t.Key(), t.Elem(), th)
init = append(init, mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), ha, typecheck.NodAddr(hit)))